Add snapshot hash of full accounts state (#8295)

* Add snapshot hash of full accounts state

* Use normal hashing for the accounts delta state

* Add merkle
This commit is contained in:
sakridge 2020-02-22 13:46:40 -08:00 committed by GitHub
parent edb18349c9
commit 947a339714
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 299 additions and 272 deletions

View File

@ -104,7 +104,7 @@ pub fn package_snapshot<P: AsRef<Path>, Q: AsRef<Path>>(
snapshot_hard_links_dir,
snapshot_storages,
snapshot_package_output_file.as_ref().to_path_buf(),
bank.hash(),
bank.get_accounts_hash(),
);
Ok(package)
@ -324,6 +324,7 @@ pub fn add_snapshot<P: AsRef<Path>>(
snapshot_storages: &[SnapshotStorage],
) -> Result<SlotSnapshotPaths> {
bank.purge_zero_lamport_accounts();
bank.update_accounts_hash();
let slot = bank.slot();
// snapshot_path/slot
let slot_snapshot_dir = get_bank_snapshot_dir(snapshot_path, slot);

View File

@ -72,5 +72,29 @@ fn test_accounts_hash_bank_hash(bencher: &mut Bencher) {
let mut pubkeys: Vec<Pubkey> = vec![];
create_test_accounts(&accounts, &mut pubkeys, 60000, 0);
let ancestors = vec![(0, 0)].into_iter().collect();
accounts.accounts_db.update_accounts_hash(0, &ancestors);
bencher.iter(|| assert!(accounts.verify_bank_hash(0, &ancestors)));
}
#[bench]
fn test_update_accounts_hash(bencher: &mut Bencher) {
solana_logger::setup();
let accounts = Accounts::new(vec![PathBuf::from("update_accounts_hash")]);
let mut pubkeys: Vec<Pubkey> = vec![];
create_test_accounts(&accounts, &mut pubkeys, 50_000, 0);
let ancestors = vec![(0, 0)].into_iter().collect();
bencher.iter(|| {
accounts.accounts_db.update_accounts_hash(0, &ancestors);
});
}
#[bench]
fn test_accounts_delta_hash(bencher: &mut Bencher) {
solana_logger::setup();
let accounts = Accounts::new(vec![PathBuf::from("accounts_delta_hash")]);
let mut pubkeys: Vec<Pubkey> = vec![];
create_test_accounts(&accounts, &mut pubkeys, 100_000, 0);
bencher.iter(|| {
accounts.accounts_db.get_accounts_delta_hash(0);
});
}

View File

@ -15,7 +15,6 @@ use log::*;
use rayon::slice::ParallelSliceMut;
use solana_sdk::{
account::Account,
bank_hash::BankHash,
clock::Slot,
hash::Hash,
native_loader,
@ -510,16 +509,19 @@ impl Accounts {
}
}
pub fn bank_hash_at(&self, slot_id: Slot) -> BankHash {
pub fn bank_hash_at(&self, slot_id: Slot) -> Hash {
self.bank_hash_info_at(slot_id).hash
}
pub fn bank_hash_info_at(&self, slot_id: Slot) -> BankHashInfo {
let delta_hash = self.accounts_db.get_accounts_delta_hash(slot_id);
let bank_hashes = self.accounts_db.bank_hashes.read().unwrap();
bank_hashes
let mut hash_info = bank_hashes
.get(&slot_id)
.expect("No bank hash was found for this bank, that should not be possible")
.clone()
.clone();
hash_info.hash = delta_hash;
hash_info
}
/// This function will prevent multiple threads from modifying the same account state at the
@ -1313,7 +1315,7 @@ mod tests {
#[should_panic]
fn test_accounts_empty_bank_hash() {
let accounts = Accounts::new(Vec::new());
accounts.bank_hash_at(0);
accounts.bank_hash_at(1);
}
fn check_accounts(accounts: &Accounts, pubkeys: &Vec<Pubkey>, num: usize) {

View File

@ -38,7 +38,6 @@ use solana_measure::measure::Measure;
use solana_rayon_threadlimit::get_thread_count;
use solana_sdk::{
account::Account,
bank_hash::BankHash,
clock::{Epoch, Slot},
hash::{Hash, Hasher},
pubkey::Pubkey,
@ -48,7 +47,7 @@ use std::{
fmt,
io::{BufReader, Cursor, Error as IOError, ErrorKind, Read, Result as IOResult},
path::{Path, PathBuf},
sync::atomic::{AtomicUsize, Ordering},
sync::atomic::{AtomicBool, AtomicUsize, Ordering},
sync::{Arc, RwLock},
};
use tempfile::TempDir;
@ -121,6 +120,22 @@ impl<'de> Visitor<'de> for AccountStorageVisitor {
}
}
trait Versioned {
fn version(&self) -> u64;
}
impl Versioned for (u64, Hash) {
fn version(&self) -> u64 {
self.0
}
}
impl Versioned for (u64, AccountInfo) {
fn version(&self) -> u64 {
self.0
}
}
struct AccountStorageSerialize<'a> {
account_storage_entries: &'a [SnapshotStorage],
}
@ -396,7 +411,8 @@ impl BankHashStats {
#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq)]
pub struct BankHashInfo {
pub hash: BankHash,
pub hash: Hash,
pub snapshot_hash: Hash,
pub stats: BankHashStats,
}
@ -436,6 +452,8 @@ impl Default for AccountsDB {
fn default() -> Self {
let num_threads = get_thread_count();
let mut bank_hashes = HashMap::new();
bank_hashes.insert(0, BankHashInfo::default());
AccountsDB {
accounts_index: RwLock::new(AccountsIndex::default()),
storage: RwLock::new(AccountStorage(HashMap::new())),
@ -449,7 +467,7 @@ impl Default for AccountsDB {
.build()
.unwrap(),
min_num_stores: num_threads,
bank_hashes: RwLock::new(HashMap::default()),
bank_hashes: RwLock::new(bank_hashes),
}
}
}
@ -773,9 +791,6 @@ impl AccountsDB {
pub fn set_hash(&self, slot: Slot, parent_slot: Slot) {
let mut bank_hashes = self.bank_hashes.write().unwrap();
let hash_info = bank_hashes
.get(&parent_slot)
.expect("accounts_db::set_hash::no parent slot");
if bank_hashes.get(&slot).is_some() {
error!(
"set_hash: already exists; multiple forks with shared slot {} as child (parent: {})!?",
@ -784,9 +799,9 @@ impl AccountsDB {
return;
}
let hash = hash_info.hash;
let new_hash_info = BankHashInfo {
hash,
hash: Hash::default(),
snapshot_hash: Hash::default(),
stats: BankHashStats::default(),
};
bank_hashes.insert(slot, new_hash_info);
@ -1041,6 +1056,120 @@ impl AccountsDB {
datapoint_info!("accounts_db-stores", ("total_count", total_count, i64));
}
pub fn compute_merkle_root(hashes: Vec<(Pubkey, Hash)>, fanout: usize) -> Hash {
let hashes: Vec<_> = hashes.into_iter().map(|(_pubkey, hash)| hash).collect();
let mut hashes: Vec<_> = hashes.chunks(fanout).map(|x| x.to_vec()).collect();
while hashes.len() > 1 {
let mut time = Measure::start("time");
let new_hashes: Vec<Hash> = hashes
.par_iter()
.map(|h| {
let mut hasher = Hasher::default();
for v in h.iter() {
hasher.hash(v.as_ref());
}
hasher.result()
})
.collect();
time.stop();
debug!("hashing {} {}", hashes.len(), time);
hashes = new_hashes.chunks(fanout).map(|x| x.to_vec()).collect();
}
let mut hasher = Hasher::default();
hashes
.into_iter()
.flatten()
.map(|hash| hash)
.for_each(|hash| {
hasher.hash(hash.as_ref());
});
hasher.result()
}
fn accumulate_account_hashes(mut hashes: Vec<(Pubkey, Hash)>) -> Hash {
let mut sort = Measure::start("sort");
hashes.par_sort_by(|a, b| a.0.cmp(&b.0));
sort.stop();
let mut hash_time = Measure::start("hash");
let fanout = 16;
let res = Self::compute_merkle_root(hashes, fanout);
hash_time.stop();
debug!("{} {}", sort, hash_time);
res
}
fn calculate_accounts_hash(
&self,
ancestors: &HashMap<Slot, usize>,
check_hash: bool,
) -> Result<Hash, BankHashVerificationError> {
use BankHashVerificationError::*;
let mut scan = Measure::start("scan");
let accounts_index = self.accounts_index.read().unwrap();
let storage = self.storage.read().unwrap();
let keys: Vec<_> = accounts_index.account_maps.keys().collect();
let mismatch_found = AtomicBool::new(false);
let hashes: Vec<_> = keys
.par_iter()
.filter_map(|pubkey| {
if let Some((list, index)) = accounts_index.get(pubkey, ancestors) {
let (slot, account_info) = &list[index];
if account_info.lamports != 0 {
storage
.0
.get(&slot)
.and_then(|storage_map| storage_map.get(&account_info.store_id))
.and_then(|store| {
let account = store.accounts.get_account(account_info.offset)?.0;
if check_hash {
let hash = Self::hash_stored_account(*slot, &account);
if hash != *account.hash {
mismatch_found.store(true, Ordering::Relaxed);
}
if mismatch_found.load(Ordering::Relaxed) {
return None;
}
}
Some((**pubkey, *account.hash))
})
} else {
None
}
} else {
None
}
})
.collect();
if mismatch_found.load(Ordering::Relaxed) {
return Err(MismatchedAccountHash);
}
scan.stop();
debug!("{}", scan);
Ok(Self::accumulate_account_hashes(hashes))
}
pub fn get_accounts_hash(&self, slot: Slot) -> Hash {
let bank_hashes = self.bank_hashes.read().unwrap();
let bank_hash_info = bank_hashes.get(&slot).unwrap();
bank_hash_info.snapshot_hash
}
pub fn update_accounts_hash(&self, slot: Slot, ancestors: &HashMap<Slot, usize>) -> Hash {
let hash = self.calculate_accounts_hash(ancestors, false).unwrap();
let mut bank_hashes = self.bank_hashes.write().unwrap();
let mut bank_hash_info = bank_hashes.get_mut(&slot).unwrap();
bank_hash_info.snapshot_hash = hash;
hash
}
pub fn verify_bank_hash(
&self,
slot: Slot,
@ -1048,39 +1177,16 @@ impl AccountsDB {
) -> Result<(), BankHashVerificationError> {
use BankHashVerificationError::*;
let (hashes, mismatch_found) = self.scan_accounts(
ancestors,
|(collector, mismatch_found): &mut (Vec<BankHash>, bool),
option: Option<(&Pubkey, Account, Slot)>| {
if let Some((pubkey, account, slot)) = option {
let hash = Self::hash_account(slot, &account, pubkey);
if hash != account.hash {
*mismatch_found = true;
}
if *mismatch_found {
return;
}
let hash = BankHash::from_hash(&hash);
debug!("xoring..{} key: {}", hash, pubkey);
collector.push(hash);
}
},
);
if mismatch_found {
return Err(MismatchedAccountHash);
}
let mut calculated_hash = BankHash::default();
for hash in hashes {
calculated_hash.xor(hash);
}
let calculated_hash = self.calculate_accounts_hash(ancestors, true)?;
let bank_hashes = self.bank_hashes.read().unwrap();
if let Some(found_hash_info) = bank_hashes.get(&slot) {
if calculated_hash == found_hash_info.hash {
if calculated_hash == found_hash_info.snapshot_hash {
Ok(())
} else {
warn!(
"mismatched bank hash for slot {}: {} (calculated) != {} (expected)",
slot, calculated_hash, found_hash_info.hash
slot, calculated_hash, found_hash_info.snapshot_hash
);
Err(MismatchedBankHash)
}
@ -1089,14 +1195,35 @@ impl AccountsDB {
}
}
pub fn xor_in_hash_state(&self, slot_id: Slot, hash: BankHash, stats: &BankHashStats) {
let mut bank_hashes = self.bank_hashes.write().unwrap();
let bank_hash = bank_hashes
.entry(slot_id)
.or_insert_with(BankHashInfo::default);
bank_hash.hash.xor(hash);
bank_hash.stats.merge(stats);
pub fn get_accounts_delta_hash(&self, slot_id: Slot) -> Hash {
let mut scan = Measure::start("scan");
let mut accumulator: Vec<HashMap<Pubkey, (u64, Hash)>> = self.scan_account_storage(
slot_id,
|stored_account: &StoredAccount,
_store_id: AppendVecId,
accum: &mut HashMap<Pubkey, (u64, Hash)>| {
accum.insert(
stored_account.meta.pubkey,
(stored_account.meta.write_version, *stored_account.hash),
);
},
);
scan.stop();
let mut merge = Measure::start("merge");
let mut account_maps = accumulator.pop().unwrap();
while let Some(maps) = accumulator.pop() {
AccountsDB::merge(&mut account_maps, &maps);
}
merge.stop();
let mut accumulate = Measure::start("accumulate");
let hashes: Vec<_> = account_maps
.into_iter()
.map(|(pubkey, (_, hash))| (pubkey, hash))
.collect();
let ret = Self::accumulate_account_hashes(hashes);
accumulate.stop();
info!("{} {} {}", scan, merge, accumulate);
ret
}
fn update_index(
@ -1180,34 +1307,21 @@ impl AccountsDB {
}
fn hash_accounts(&self, slot_id: Slot, accounts: &[(&Pubkey, &Account)]) -> Vec<Hash> {
let mut hash_state = BankHash::default();
let mut had_account = false;
let mut stats = BankHashStats::default();
let hashes: Vec<_> = accounts
.iter()
.map(|(pubkey, account)| {
let hash = BankHash::from_hash(&account.hash);
stats.update(account);
let new_hash = Self::hash_account(slot_id, account, pubkey);
let new_bank_hash = BankHash::from_hash(&new_hash);
debug!(
"hash_accounts: key: {} xor {} current: {}",
pubkey, hash, hash_state
);
if !had_account {
hash_state = hash;
had_account = true;
} else {
hash_state.xor(hash);
}
hash_state.xor(new_bank_hash);
new_hash
Self::hash_account(slot_id, account, pubkey)
})
.collect();
if had_account {
self.xor_in_hash_state(slot_id, hash_state, &stats);
}
let mut bank_hashes = self.bank_hashes.write().unwrap();
let slot_info = bank_hashes
.entry(slot_id)
.or_insert_with(BankHashInfo::default);
slot_info.stats.merge(&stats);
hashes
}
@ -1247,17 +1361,17 @@ impl AccountsDB {
.collect()
}
fn merge(
dest: &mut HashMap<Pubkey, (u64, AccountInfo)>,
source: &HashMap<Pubkey, (u64, AccountInfo)>,
) {
for (key, (source_version, source_info)) in source.iter() {
if let Some((dest_version, _)) = dest.get(key) {
if dest_version > source_version {
fn merge<X>(dest: &mut HashMap<Pubkey, X>, source: &HashMap<Pubkey, X>)
where
X: Versioned + Clone,
{
for (key, source_item) in source.iter() {
if let Some(dest_item) = dest.get(key) {
if dest_item.version() > source_item.version() {
continue;
}
}
dest.insert(*key, (*source_version, source_info.clone()));
dest.insert(*key, source_item.clone());
}
}
@ -1340,6 +1454,14 @@ pub mod tests {
use std::{fs, str::FromStr};
use tempfile::TempDir;
fn linear_ancestors(end_slot: u64) -> HashMap<Slot, usize> {
let mut ancestors: HashMap<Slot, usize> = vec![(0, 0)].into_iter().collect();
for i in 1..end_slot {
ancestors.insert(i, (i - 1) as usize);
}
ancestors
}
#[test]
fn test_accountsdb_add_root() {
solana_logger::setup();
@ -1661,8 +1783,8 @@ pub mod tests {
accounts.store(0, &[(&key, &account)]);
keys.push(key);
}
let ancestors = vec![(0, 0)].into_iter().collect();
for (i, key) in keys.iter().enumerate() {
let ancestors = vec![(0, 0)].into_iter().collect();
assert_eq!(
accounts.load_slow(&ancestors, &key).unwrap().0.lamports,
(i as u64) + 1
@ -1810,11 +1932,16 @@ pub mod tests {
}
fn print_index(label: &'static str, accounts: &AccountsDB) {
info!(
"{}: accounts.accounts_index roots: {:?}",
label,
accounts.accounts_index.read().unwrap().roots
);
let mut roots: Vec<_> = accounts
.accounts_index
.read()
.unwrap()
.roots
.iter()
.cloned()
.collect();
roots.sort();
info!("{}: accounts.accounts_index roots: {:?}", label, roots,);
for (pubkey, list) in &accounts.accounts_index.read().unwrap().account_maps {
info!(" key: {}", pubkey);
info!(" slots: {:?}", *list.read().unwrap());
@ -1844,7 +1971,7 @@ pub mod tests {
}
#[test]
fn test_accounts_db_serialize() {
fn test_accounts_db_serialize1() {
solana_logger::setup();
let accounts = AccountsDB::new_single();
let mut pubkeys: Vec<Pubkey> = vec![];
@ -1905,7 +2032,7 @@ pub mod tests {
// Get the hash for the latest slot, which should be the only hash in the
// bank_hashes map on the deserialized AccountsDb
assert_eq!(daccounts.bank_hashes.read().unwrap().len(), 1);
assert_eq!(daccounts.bank_hashes.read().unwrap().len(), 2);
assert_eq!(
daccounts.bank_hashes.read().unwrap().get(&latest_slot),
accounts.bank_hashes.read().unwrap().get(&latest_slot)
@ -1919,6 +2046,12 @@ pub mod tests {
assert!(check_storage(&daccounts, 0, 78));
assert!(check_storage(&daccounts, 1, 11));
assert!(check_storage(&daccounts, 2, 31));
let ancestors = linear_ancestors(latest_slot);
assert_eq!(
daccounts.update_accounts_hash(latest_slot, &ancestors),
accounts.update_accounts_hash(latest_slot, &ancestors)
);
}
fn assert_load_account(
@ -2005,11 +2138,13 @@ pub mod tests {
current_slot += 1;
accounts.add_root(current_slot);
print_accounts("pre_purge", &accounts);
purge_zero_lamport_accounts(&accounts, current_slot);
print_accounts("post_purge", &accounts);
// Make sure the index is for pubkey cleared
// Make sure the index is not touched
assert_eq!(
accounts
.accounts_index
@ -2030,7 +2165,7 @@ pub mod tests {
}
#[test]
fn test_accounts_db_purge() {
fn test_accounts_db_purge1() {
solana_logger::setup();
let some_lamport = 223;
let zero_lamport = 0;
@ -2046,10 +2181,12 @@ pub mod tests {
accounts.add_root(0);
let mut current_slot = 1;
accounts.set_hash(current_slot, current_slot - 1);
accounts.store(current_slot, &[(&pubkey, &account)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.set_hash(current_slot, current_slot - 1);
accounts.store(current_slot, &[(&pubkey, &zero_lamport_account)]);
accounts.add_root(current_slot);
@ -2057,10 +2194,22 @@ pub mod tests {
// Otherwise slot 2 will not be removed
current_slot += 1;
accounts.set_hash(current_slot, current_slot - 1);
accounts.add_root(current_slot);
print_accounts("pre_purge", &accounts);
let ancestors = linear_ancestors(current_slot);
info!("ancestors: {:?}", ancestors);
let hash = accounts.update_accounts_hash(current_slot, &ancestors);
purge_zero_lamport_accounts(&accounts, current_slot);
assert_eq!(
accounts.update_accounts_hash(current_slot, &ancestors),
hash
);
print_accounts("post_purge", &accounts);
// Make sure the index is for pubkey cleared
@ -2390,6 +2539,7 @@ pub mod tests {
db.store(some_slot, &[(&key, &account)]);
db.add_root(some_slot);
db.update_accounts_hash(some_slot, &ancestors);
assert_matches!(db.verify_bank_hash(some_slot, &ancestors), Ok(_));
db.bank_hashes.write().unwrap().remove(&some_slot).unwrap();
@ -2398,9 +2548,10 @@ pub mod tests {
Err(MissingBankHash)
);
let some_bank_hash = BankHash::from_hash(&Hash::new(&[0xca; HASH_BYTES]));
let some_bank_hash = Hash::new(&[0xca; HASH_BYTES]);
let bank_hash_info = BankHashInfo {
hash: some_bank_hash,
snapshot_hash: Hash::new(&[0xca; HASH_BYTES]),
stats: BankHashStats::default(),
};
db.bank_hashes
@ -2426,6 +2577,7 @@ pub mod tests {
.unwrap()
.insert(some_slot, BankHashInfo::default());
db.add_root(some_slot);
db.update_accounts_hash(some_slot, &ancestors);
assert_matches!(db.verify_bank_hash(some_slot, &ancestors), Ok(_));
}

View File

@ -1893,6 +1893,17 @@ impl Bank {
}
}
pub fn get_accounts_hash(&self) -> Hash {
self.rc.accounts.accounts_db.get_accounts_hash(self.slot)
}
pub fn update_accounts_hash(&self) -> Hash {
self.rc
.accounts
.accounts_db
.update_accounts_hash(self.slot(), &self.ancestors)
}
/// A snapshot bank should be purged of 0 lamport accounts which are not part of the hash
/// calculation and could shield other real accounts.
pub fn verify_snapshot_bank(&self) -> bool {
@ -3131,7 +3142,9 @@ mod tests {
bank = Arc::new(new_from_parent(&bank));
}
let hash = bank.update_accounts_hash();
bank.purge_zero_lamport_accounts();
assert_eq!(bank.update_accounts_hash(), hash);
let bank0 = Arc::new(new_from_parent(&bank));
let blockhash = bank.last_blockhash();
@ -3147,10 +3160,16 @@ mod tests {
assert_eq!(bank0.get_account(&keypair.pubkey()).unwrap().lamports, 10);
assert_eq!(bank1.get_account(&keypair.pubkey()), None);
info!("bank0 purge");
let hash = bank0.update_accounts_hash();
bank0.purge_zero_lamport_accounts();
assert_eq!(bank0.update_accounts_hash(), hash);
assert_eq!(bank0.get_account(&keypair.pubkey()).unwrap().lamports, 10);
assert_eq!(bank1.get_account(&keypair.pubkey()), None);
info!("bank1 purge");
bank1.purge_zero_lamport_accounts();
assert_eq!(bank0.get_account(&keypair.pubkey()).unwrap().lamports, 10);
@ -3163,6 +3182,7 @@ mod tests {
assert!(bank0.verify_bank_hash());
bank1.squash();
bank1.update_accounts_hash();
assert!(bank1.verify_bank_hash());
// keypair should have 0 tokens on both forks
@ -3891,6 +3911,7 @@ mod tests {
let pubkey2 = Pubkey::new_rand();
info!("transfer 2 {}", pubkey2);
bank2.transfer(10, &mint_keypair, &pubkey2).unwrap();
bank2.update_accounts_hash();
assert!(bank2.verify_bank_hash());
}
@ -3907,22 +3928,25 @@ mod tests {
let bank0_state = bank0.hash_internal_state();
let bank0 = Arc::new(bank0);
// Checkpointing should result in a new state while freezing the parent
let bank2 = new_from_parent(&bank0);
let bank2 = Bank::new_from_parent(&bank0, &Pubkey::new_rand(), 1);
assert_ne!(bank0_state, bank2.hash_internal_state());
// Checkpointing should modify the checkpoint's state when freezed
assert_ne!(bank0_state, bank0.hash_internal_state());
// Checkpointing should never modify the checkpoint's state once frozen
let bank0_state = bank0.hash_internal_state();
bank2.update_accounts_hash();
assert!(bank2.verify_bank_hash());
let bank3 = new_from_parent(&bank0);
let bank3 = Bank::new_from_parent(&bank0, &Pubkey::new_rand(), 2);
assert_eq!(bank0_state, bank0.hash_internal_state());
assert!(bank2.verify_bank_hash());
bank3.update_accounts_hash();
assert!(bank3.verify_bank_hash());
let pubkey2 = Pubkey::new_rand();
info!("transfer 2 {}", pubkey2);
bank2.transfer(10, &mint_keypair, &pubkey2).unwrap();
bank2.update_accounts_hash();
assert!(bank2.verify_bank_hash());
assert!(bank3.verify_bank_hash());
}
@ -3943,6 +3967,7 @@ mod tests {
bank.transfer(1_000, &mint_keypair, &pubkey).unwrap();
bank.freeze();
bank.update_accounts_hash();
assert!(bank.verify_snapshot_bank());
// tamper the bank after freeze!
@ -4194,7 +4219,6 @@ mod tests {
#[test]
fn test_bank_update_sysvar_account() {
use solana_sdk::bank_hash::BankHash;
use sysvar::clock::Clock;
let dummy_clock_id = Pubkey::new_rand();
@ -4214,7 +4238,6 @@ mod tests {
.create_account(1)
});
let current_account = bank1.get_account(&dummy_clock_id).unwrap();
let removed_bank_hash = BankHash::from_hash(&current_account.hash);
assert_eq!(
expected_previous_slot,
Clock::from_account(&current_account).unwrap().slot
@ -4222,7 +4245,6 @@ mod tests {
// Updating should increment the clock's slot
let bank2 = Arc::new(Bank::new_from_parent(&bank1, &Pubkey::default(), 1));
let mut expected_bank_hash = bank2.rc.accounts.bank_hash_at(bank2.slot);
bank2.update_sysvar_account(&dummy_clock_id, |optional_account| {
let slot = Clock::from_account(optional_account.as_ref().unwrap())
.unwrap()
@ -4236,17 +4258,11 @@ mod tests {
.create_account(1)
});
let current_account = bank2.get_account(&dummy_clock_id).unwrap();
let added_bank_hash = BankHash::from_hash(&current_account.hash);
expected_bank_hash.xor(removed_bank_hash);
expected_bank_hash.xor(added_bank_hash);
//let added_bank_hash = BankHash::from_hash(&current_account.hash);
assert_eq!(
expected_next_slot,
Clock::from_account(&current_account).unwrap().slot
);
assert_eq!(
expected_bank_hash,
bank2.rc.accounts.bank_hash_at(bank2.slot)
);
// Updating again should give bank1's sysvar to the closure not bank2's.
// Thus, assert with same expected_next_slot as previously
@ -4267,10 +4283,6 @@ mod tests {
expected_next_slot,
Clock::from_account(&current_account).unwrap().slot
);
assert_eq!(
expected_bank_hash,
bank2.rc.accounts.bank_hash_at(bank2.slot)
);
}
#[test]
@ -4492,9 +4504,9 @@ mod tests {
let bank0 = Arc::new(Bank::new(&genesis_config));
// Bank 1
let bank1 = Arc::new(new_from_parent(&bank0));
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::new_rand(), 1));
// Bank 2
let bank2 = new_from_parent(&bank0);
let bank2 = Bank::new_from_parent(&bank0, &Pubkey::new_rand(), 2);
// transfer a token
assert_eq!(
@ -4517,7 +4529,7 @@ mod tests {
assert_eq!(bank2.transaction_count(), 0);
assert_eq!(bank1.transaction_count(), 1);
let bank6 = new_from_parent(&bank1);
let bank6 = Bank::new_from_parent(&bank1, &Pubkey::new_rand(), 3);
assert_eq!(bank1.transaction_count(), 1);
assert_eq!(bank6.transaction_count(), 1);

View File

@ -1,162 +0,0 @@
use crate::hash::Hash;
use bincode::deserialize_from;
use rand::{Rng, SeedableRng};
use rand_chacha::ChaChaRng;
use serde::{Deserialize, Serialize, Serializer};
use std::{fmt, io::Cursor};
// Type for representing a bank accounts state.
// Taken by xor of a sha256 of accounts state for lower 32-bytes, and
// then generating the rest of the bytes with a chacha rng init'ed with that state.
// 440 bytes solves the birthday problem when xor'ing of preventing an attacker of
// finding a value or set of values that could be xor'ed to match the bitpattern
// of an existing state value.
const BANK_HASH_BYTES: usize = 448;
#[derive(Clone, Copy)]
pub struct BankHash([u8; BANK_HASH_BYTES]);
impl fmt::Debug for BankHash {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "BankHash {}", hex::encode(&self.0[..32]))
}
}
impl fmt::Display for BankHash {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl PartialEq for BankHash {
fn eq(&self, other: &Self) -> bool {
self.0[..] == other.0[..]
}
}
impl Eq for BankHash {}
impl Default for BankHash {
fn default() -> Self {
BankHash([0u8; BANK_HASH_BYTES])
}
}
impl BankHash {
pub fn from_hash(hash: &Hash) -> Self {
let mut new = BankHash::default();
// default hash should result in all 0s thus nop for xor
if *hash == Hash::default() {
return new;
}
new.0[..32].copy_from_slice(hash.as_ref());
let mut seed = [0u8; 32];
seed.copy_from_slice(hash.as_ref());
let mut generator = ChaChaRng::from_seed(seed);
generator.fill(&mut new.0[32..]);
new
}
pub fn is_default(&self) -> bool {
self.0[0..32] == Hash::default().as_ref()[0..32]
}
pub fn xor(self: &mut BankHash, hash: BankHash) {
for (i, b) in hash.as_ref().iter().enumerate() {
self.0.as_mut()[i] ^= b;
}
}
}
impl Serialize for BankHash {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_bytes(&self.0[..])
}
}
struct BankHashVisitor;
impl<'a> serde::de::Visitor<'a> for BankHashVisitor {
type Value = BankHash;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("Expecting BankHash")
}
#[allow(clippy::mutex_atomic)]
fn visit_bytes<E>(self, data: &[u8]) -> std::result::Result<Self::Value, E>
where
E: serde::de::Error,
{
use serde::de::Error;
let mut new = BankHash::default();
let mut rd = Cursor::new(&data[..]);
for i in 0..BANK_HASH_BYTES {
new.0[i] = deserialize_from(&mut rd).map_err(Error::custom)?;
}
Ok(new)
}
}
impl<'de> Deserialize<'de> for BankHash {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: ::serde::Deserializer<'de>,
{
deserializer.deserialize_bytes(BankHashVisitor)
}
}
impl AsRef<[u8]> for BankHash {
fn as_ref(&self) -> &[u8] {
&self.0[..]
}
}
impl AsMut<[u8]> for BankHash {
fn as_mut(&mut self) -> &mut [u8] {
&mut self.0[..]
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::hash::hash;
use bincode::{deserialize, serialize};
use log::*;
#[test]
fn test_bankhash() {
let hash = hash(&[1, 2, 3, 4]);
let bank_hash = BankHash::from_hash(&hash);
assert!(!bank_hash.is_default());
let default = BankHash::default();
assert!(default.is_default());
assert!(bank_hash != default);
assert!(bank_hash == bank_hash);
for i in 0..BANK_HASH_BYTES / 32 {
let start = i * 32;
let end = start + 32;
assert!(bank_hash.0[start..end] != [0u8; 32]);
}
}
#[test]
fn test_serialize() {
solana_logger::setup();
let hash = hash(&[1, 2, 3, 4]);
let bank_hash = BankHash::from_hash(&hash);
info!("{}", bank_hash);
let bytes = serialize(&bank_hash).unwrap();
let new: BankHash = deserialize(&bytes).unwrap();
info!("{}", new);
assert_eq!(new, bank_hash);
}
}

View File

@ -64,8 +64,6 @@ pub mod program_error;
// Modules not usable by on-chain programs
#[cfg(not(feature = "program"))]
pub mod bank_hash;
#[cfg(not(feature = "program"))]
pub mod client;
#[cfg(not(feature = "program"))]
pub mod genesis_config;