Rename hash_queue and fix boundary condition (#3289)

This commit is contained in:
Sagar Dhawan 2019-03-14 11:56:36 -07:00 committed by GitHub
parent 9fac3b26ee
commit 3f2fc21bb3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 144 additions and 134 deletions

View File

@ -4,7 +4,7 @@
//! already been signed and verified.
use crate::accounts::{Accounts, ErrorCounters, InstructionAccounts, InstructionLoaders};
use crate::hash_queue::HashQueue;
use crate::blockhash_queue::BlockhashQueue;
use crate::runtime;
use crate::status_cache::StatusCache;
use bincode::serialize;
@ -121,7 +121,7 @@ pub struct Bank {
status_cache: RwLock<BankStatusCache>,
/// FIFO queue of `recent_blockhash` items
blockhash_queue: RwLock<HashQueue>,
blockhash_queue: RwLock<BlockhashQueue>,
/// Previous checkpoint of this bank
parent: RwLock<Option<Arc<Bank>>>,
@ -156,7 +156,7 @@ pub struct Bank {
is_delta: AtomicBool,
}
impl Default for HashQueue {
impl Default for BlockhashQueue {
fn default() -> Self {
Self::new(MAX_RECENT_BLOCKHASHES)
}
@ -475,7 +475,7 @@ impl Bank {
txs.iter()
.zip(lock_results.into_iter())
.map(|(tx, lock_res)| {
if lock_res.is_ok() && !hash_queue.check_entry_age(tx.recent_blockhash, max_age) {
if lock_res.is_ok() && !hash_queue.check_hash_age(tx.recent_blockhash, max_age) {
error_counters.reserve_blockhash += 1;
Err(TransactionError::BlockhashNotFound)
} else {

View File

@ -0,0 +1,139 @@
use hashbrown::HashMap;
use solana_sdk::hash::Hash;
use solana_sdk::timing::timestamp;
#[derive(Debug, PartialEq, Eq, Clone)]
struct HashAge {
timestamp: u64,
hash_height: u64,
}
/// Low memory overhead, so can be cloned for every checkpoint
#[derive(Clone)]
pub struct BlockhashQueue {
/// updated whenever an hash is registered
hash_height: u64,
/// last hash to be registered
last_hash: Option<Hash>,
ages: HashMap<Hash, HashAge>,
/// hashes older than `max_age` will be dropped from the queue
max_age: usize,
}
impl BlockhashQueue {
pub fn new(max_age: usize) -> Self {
Self {
ages: HashMap::new(),
hash_height: 0,
last_hash: None,
max_age,
}
}
#[allow(dead_code)]
pub fn hash_height(&self) -> u64 {
self.hash_height
}
pub fn last_hash(&self) -> Hash {
self.last_hash.expect("no hash has been set")
}
/// Check if the age of the hash is within the max_age
/// return false for any hashes with an age above max_age
pub fn check_hash_age(&self, hash: Hash, max_age: usize) -> bool {
let hash_age = self.ages.get(&hash);
match hash_age {
Some(age) => self.hash_height - age.hash_height <= max_age as u64,
_ => false,
}
}
/// check if hash is valid
#[cfg(test)]
pub fn check_hash(&self, hash: Hash) -> bool {
self.ages.get(&hash).is_some()
}
pub fn genesis_hash(&mut self, hash: &Hash) {
self.ages.insert(
*hash,
HashAge {
hash_height: 0,
timestamp: timestamp(),
},
);
self.last_hash = Some(*hash);
}
pub fn register_hash(&mut self, hash: &Hash) {
self.hash_height += 1;
let hash_height = self.hash_height;
// this clean up can be deferred until sigs gets larger
// because we verify age.nth every place we check for validity
let max_age = self.max_age;
if self.ages.len() >= max_age {
self.ages
.retain(|_, age| hash_height - age.hash_height <= max_age as u64);
}
self.ages.insert(
*hash,
HashAge {
hash_height,
timestamp: timestamp(),
},
);
self.last_hash = Some(*hash);
}
/// Maps a hash height to a timestamp
pub fn hash_height_to_timestamp(&self, hash_height: u64) -> Option<u64> {
for age in self.ages.values() {
if age.hash_height == hash_height {
return Some(age.timestamp);
}
}
None
}
}
#[cfg(test)]
mod tests {
use super::*;
use bincode::serialize;
use solana_sdk::hash::hash;
#[test]
fn test_register_hash() {
let last_hash = Hash::default();
let mut hash_queue = BlockhashQueue::new(100);
assert!(!hash_queue.check_hash(last_hash));
hash_queue.register_hash(&last_hash);
assert!(hash_queue.check_hash(last_hash));
assert_eq!(hash_queue.hash_height(), 1);
}
#[test]
fn test_reject_old_last_hash() {
let last_hash = Hash::default();
let mut hash_queue = BlockhashQueue::new(100);
for i in 0..100 {
let last_hash = hash(&serialize(&i).unwrap()); // Unique hash
hash_queue.register_hash(&last_hash);
}
// Assert we're no longer able to use the oldest hash.
assert!(!hash_queue.check_hash(last_hash));
}
/// test that when max age is 0, that a valid last_hash still passes the age check
#[test]
fn test_queue_init_blockhash() {
let last_hash = Hash::default();
let mut hash_queue = BlockhashQueue::new(100);
hash_queue.register_hash(&last_hash);
assert_eq!(last_hash, hash_queue.last_hash());
assert!(hash_queue.check_hash_age(last_hash, 0));
}
}

View File

@ -1,129 +0,0 @@
use hashbrown::HashMap;
use solana_sdk::hash::Hash;
use solana_sdk::timing::timestamp;
#[derive(Debug, PartialEq, Eq, Clone)]
struct HashQueueEntry {
timestamp: u64,
hash_height: u64,
}
/// Low memory overhead, so can be cloned for every checkpoint
#[derive(Clone)]
pub struct HashQueue {
/// updated whenever an hash is registered
hash_height: u64,
/// last hash to be registered
last_hash: Option<Hash>,
entries: HashMap<Hash, HashQueueEntry>,
max_entries: usize,
}
impl HashQueue {
pub fn new(max_entries: usize) -> Self {
Self {
entries: HashMap::new(),
hash_height: 0,
last_hash: None,
max_entries,
}
}
#[allow(dead_code)]
pub fn hash_height(&self) -> u64 {
self.hash_height
}
pub fn last_hash(&self) -> Hash {
self.last_hash.expect("no hash has been set")
}
/// Check if the age of the entry is within the max_age
/// return false for any entries with an age equal to or above max_age
pub fn check_entry_age(&self, entry: Hash, max_age: usize) -> bool {
let entry = self.entries.get(&entry);
match entry {
Some(entry) => self.hash_height - entry.hash_height < max_age as u64,
_ => false,
}
}
/// check if entry is valid
#[cfg(test)]
pub fn check_entry(&self, entry: Hash) -> bool {
self.entries.get(&entry).is_some()
}
pub fn genesis_hash(&mut self, hash: &Hash) {
self.entries.insert(
*hash,
HashQueueEntry {
hash_height: 0,
timestamp: timestamp(),
},
);
self.last_hash = Some(*hash);
}
pub fn register_hash(&mut self, hash: &Hash) {
self.hash_height += 1;
let hash_height = self.hash_height;
// this clean up can be deferred until sigs gets larger
// because we verify entry.nth every place we check for validity
let max_entries = self.max_entries;
if self.entries.len() >= max_entries {
self.entries
.retain(|_, entry| hash_height - entry.hash_height <= max_entries as u64);
}
self.entries.insert(
*hash,
HashQueueEntry {
hash_height,
timestamp: timestamp(),
},
);
self.last_hash = Some(*hash);
}
/// Maps a hash height to a timestamp
pub fn hash_height_to_timestamp(&self, hash_height: u64) -> Option<u64> {
for entry in self.entries.values() {
if entry.hash_height == hash_height {
return Some(entry.timestamp);
}
}
None
}
}
#[cfg(test)]
mod tests {
use super::*;
use bincode::serialize;
use solana_sdk::hash::hash;
#[test]
fn test_register_hash() {
let last_hash = Hash::default();
let mut entry_queue = HashQueue::new(100);
assert!(!entry_queue.check_entry(last_hash));
entry_queue.register_hash(&last_hash);
assert!(entry_queue.check_entry(last_hash));
assert_eq!(entry_queue.hash_height(), 1);
}
#[test]
fn test_reject_old_last_hash() {
let last_hash = Hash::default();
let mut entry_queue = HashQueue::new(100);
for i in 0..100 {
let last_hash = hash(&serialize(&i).unwrap()); // Unique hash
entry_queue.register_hash(&last_hash);
}
// Assert we're no longer able to use the oldest entry ID.
assert!(!entry_queue.check_entry(last_hash));
}
}

View File

@ -1,8 +1,8 @@
mod accounts;
pub mod append_vec;
pub mod bank;
mod blockhash_queue;
pub mod bloom;
mod hash_queue;
pub mod loader_utils;
mod native_loader;
pub mod runtime;