Rename MAX_ENTRY_IDS

This commit is contained in:
Michael Vines 2019-03-01 12:16:20 -08:00
parent 8ec13d557f
commit 67b6be66c8
8 changed files with 24 additions and 23 deletions

View File

@ -15,7 +15,7 @@ use solana_sdk::hash::hash;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{KeypairUtil, Signature};
use solana_sdk::system_transaction::SystemTransaction;
use solana_sdk::timing::MAX_ENTRY_IDS;
use solana_sdk::timing::MAX_RECENT_TICK_HASHES;
use std::iter;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::{channel, Receiver};
@ -127,7 +127,7 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
);
let mut id = genesis_block.hash();
for _ in 0..MAX_ENTRY_IDS {
for _ in 0..MAX_RECENT_TICK_HASHES {
id = hash(&id.as_ref());
bank.register_tick(&id);
}
@ -236,7 +236,7 @@ fn bench_banking_stage_multi_programs(bencher: &mut Bencher) {
);
let mut id = genesis_block.hash();
for _ in 0..MAX_ENTRY_IDS {
for _ in 0..MAX_RECENT_TICK_HASHES {
id = hash(&id.as_ref());
bank.register_tick(&id);
}

View File

@ -7,7 +7,7 @@ use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::hash::hash;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction::SystemTransaction;
use solana_sdk::timing::MAX_ENTRY_IDS;
use solana_sdk::timing::MAX_RECENT_TICK_HASHES;
use test::Bencher;
#[bench]
@ -42,7 +42,7 @@ fn bench_process_transaction(bencher: &mut Bencher) {
let mut id = bank.last_id();
for _ in 0..(MAX_ENTRY_IDS - 1) {
for _ in 0..(MAX_RECENT_TICK_HASHES - 1) {
bank.register_tick(&id);
id = hash(&id.as_ref())
}

View File

@ -23,7 +23,7 @@ use solana_sdk::signature::{Keypair, Signature};
use solana_sdk::storage_program;
use solana_sdk::system_program;
use solana_sdk::system_transaction::SystemTransaction;
use solana_sdk::timing::{duration_as_us, MAX_ENTRY_IDS, NUM_TICKS_PER_SECOND};
use solana_sdk::timing::{duration_as_us, MAX_RECENT_TICK_HASHES, NUM_TICKS_PER_SECOND};
use solana_sdk::token_program;
use solana_sdk::transaction::Transaction;
use solana_sdk::vote_program::{self, VoteState};
@ -594,7 +594,7 @@ impl Bank {
#[must_use]
pub fn process_transactions(&self, txs: &[Transaction]) -> Vec<Result<()>> {
let lock_results = self.lock_accounts(txs);
let results = self.load_execute_and_commit_transactions(txs, lock_results, MAX_ENTRY_IDS);
let results = self.load_execute_and_commit_transactions(txs, lock_results, MAX_RECENT_TICK_HASHES);
self.unlock_accounts(txs, &results);
results
}
@ -1201,7 +1201,7 @@ mod tests {
let lock_result = bank.lock_accounts(&pay_alice);
let results_alice =
bank.load_execute_and_commit_transactions(&pay_alice, lock_result, MAX_ENTRY_IDS);
bank.load_execute_and_commit_transactions(&pay_alice, lock_result, MAX_RECENT_TICK_HASHES);
assert_eq!(results_alice[0], Ok(()));
// try executing an interleaved transfer twice

View File

@ -1,6 +1,6 @@
use hashbrown::HashMap;
use solana_sdk::hash::Hash;
use solana_sdk::timing::{timestamp, MAX_ENTRY_IDS};
use solana_sdk::timing::{timestamp, MAX_RECENT_TICK_HASHES};
#[derive(Debug, PartialEq, Eq, Clone)]
struct HashQueueEntry {
@ -71,9 +71,9 @@ impl HashQueue {
// this clean up can be deferred until sigs gets larger
// because we verify entry.nth every place we check for validity
if self.entries.len() >= MAX_ENTRY_IDS as usize {
if self.entries.len() >= MAX_RECENT_TICK_HASHES as usize {
self.entries
.retain(|_, entry| hash_height - entry.hash_height <= MAX_ENTRY_IDS as u64);
.retain(|_, entry| hash_height - entry.hash_height <= MAX_RECENT_TICK_HASHES as u64);
}
self.entries.insert(
@ -100,7 +100,7 @@ impl HashQueue {
let mut total = 0;
for (hash_height, stake) in hashes_and_stakes.iter() {
if current_hash_height >= *hash_height
&& ((current_hash_height - hash_height) as usize) < MAX_ENTRY_IDS
&& ((current_hash_height - hash_height) as usize) < MAX_RECENT_TICK_HASHES
{
total += stake;
if total > supermajority_stake {
@ -139,7 +139,7 @@ mod tests {
fn test_reject_old_last_hash() {
let last_hash = Hash::default();
let mut entry_queue = HashQueue::default();
for i in 0..MAX_ENTRY_IDS {
for i in 0..MAX_RECENT_TICK_HASHES {
let last_hash = hash(&serialize(&i).unwrap()); // Unique hash
entry_queue.register_hash(&last_hash);
}

View File

@ -2,14 +2,13 @@ use crate::bloom::{Bloom, BloomHashIndex};
use hashbrown::HashMap;
use solana_sdk::hash::Hash;
use solana_sdk::signature::Signature;
use solana_sdk::timing::{MAX_ENTRY_IDS, NUM_TICKS_PER_SECOND};
use std::collections::VecDeque;
use std::ops::Deref;
#[cfg(test)]
use std::ops::DerefMut;
/// This cache is designed to last 1 second
const MAX_CACHE_ENTRIES: usize = MAX_ENTRY_IDS / NUM_TICKS_PER_SECOND;
/// Each cache entry is designed to span ~1 second of signatures
const MAX_CACHE_ENTRIES: usize = solana_sdk::timing::MAX_HASH_AGE_IN_SECONDS;
type FailureMap<T> = HashMap<Signature, T>;

View File

@ -9,13 +9,15 @@ pub const NUM_TICKS_PER_SECOND: usize = 10;
pub const DEFAULT_TICKS_PER_SLOT: u64 = 80;
pub const DEFAULT_SLOTS_PER_EPOCH: u64 = 64;
/// The number of most recent `last_id` values that the bank will track the signatures
/// of. Once the bank discards a `last_id`, it will reject any transactions that use
/// The time window of recent `last_id` values that the bank will track the signatures
/// of over. Once the bank discards a `last_id`, it will reject any transactions that use
/// that `last_id` in a transaction. Lowering this value reduces memory consumption,
/// but requires clients to update its `last_id` more frequently. Raising the value
/// lengthens the time a client must wait to be certain a missing transaction will
/// not be processed by the network.
pub const MAX_ENTRY_IDS: usize = NUM_TICKS_PER_SECOND * 120;
pub const MAX_HASH_AGE_IN_SECONDS: usize = 120;
pub const MAX_RECENT_TICK_HASHES: usize = NUM_TICKS_PER_SECOND * MAX_HASH_AGE_IN_SECONDS;
pub fn duration_as_us(d: &Duration) -> u64 {
(d.as_secs() * 1000 * 1000) + (u64::from(d.subsec_nanos()) / 1_000)

View File

@ -14,7 +14,7 @@ use bincode::deserialize;
use solana_metrics::counter::Counter;
use solana_runtime::bank::{self, Bank, BankError};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::{self, duration_as_us, MAX_ENTRY_IDS};
use solana_sdk::timing::{self, duration_as_us, MAX_RECENT_TICK_HASHES};
use solana_sdk::transaction::Transaction;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError};
@ -173,7 +173,7 @@ impl BankingStage {
// TODO: Banking stage threads should be prioritized to complete faster then this queue
// expires.
let (loaded_accounts, results) =
bank.load_and_execute_transactions(txs, lock_results, MAX_ENTRY_IDS as usize / 2);
bank.load_and_execute_transactions(txs, lock_results, MAX_RECENT_TICK_HASHES as usize / 2);
let load_execute_time = now.elapsed();
let record_time = {

View File

@ -7,7 +7,7 @@ use solana_metrics::counter::Counter;
use solana_runtime::bank::{Bank, BankError, Result};
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::timing::duration_as_ms;
use solana_sdk::timing::MAX_ENTRY_IDS;
use solana_sdk::timing::MAX_RECENT_TICK_HASHES;
use std::sync::Arc;
use std::time::Instant;
@ -35,7 +35,7 @@ fn par_execute_entries(bank: &Bank, entries: &[(&Entry, Vec<Result<()>>)]) -> Re
let results = bank.load_execute_and_commit_transactions(
&e.transactions,
lock_results.to_vec(),
MAX_ENTRY_IDS,
MAX_RECENT_TICK_HASHES,
);
bank.unlock_accounts(&e.transactions, &results);
first_err(&results)