Remove executor cache from the codebase (#31566)

This commit is contained in:
Pankaj Garg 2023-05-10 07:11:04 -07:00 committed by GitHub
parent 8e9b064ab3
commit ab89e2cb15
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 44 additions and 945 deletions

View File

@ -1,625 +0,0 @@
use {
crate::loaded_programs::{LoadedProgram, LoadedProgramType},
log::*,
rand::Rng,
solana_sdk::{pubkey::Pubkey, saturating_add_assign, slot_history::Slot, stake_history::Epoch},
std::{
collections::HashMap,
fmt::Debug,
ops::Div,
sync::{
atomic::{AtomicU64, Ordering::Relaxed},
Arc,
},
},
};
/// A subset of the BankExecutorCache containing only the executors relevant to one transaction
///
/// The BankExecutorCache can not be updated directly as transaction batches are
/// processed in parallel, which would cause a race condition.
#[derive(Default, Debug)]
pub struct TransactionExecutorCache {
/// Executors or tombstones which are visible during the transaction
pub visible: HashMap<Pubkey, Arc<LoadedProgram>>,
/// Executors of programs which were re-/deploymed during the transaction
pub deployments: HashMap<Pubkey, Arc<LoadedProgram>>,
/// Executors which were missing in the cache and not re-/deploymed during the transaction
pub add_to_cache: HashMap<Pubkey, Arc<LoadedProgram>>,
}
impl TransactionExecutorCache {
pub fn new(executable_keys: impl Iterator<Item = (Pubkey, Arc<LoadedProgram>)>) -> Self {
Self {
visible: executable_keys.collect(),
deployments: HashMap::new(),
add_to_cache: HashMap::new(),
}
}
pub fn get(&self, key: &Pubkey) -> Option<Arc<LoadedProgram>> {
self.visible.get(key).cloned()
}
pub fn set_tombstone(&mut self, key: Pubkey, slot: Slot) {
self.visible.insert(
key,
Arc::new(LoadedProgram::new_tombstone(
slot,
LoadedProgramType::Closed,
)),
);
}
pub fn set(
&mut self,
key: Pubkey,
executor: Arc<LoadedProgram>,
upgrade: bool,
delay_visibility_of_program_deployment: bool,
current_slot: Slot,
) {
if upgrade {
if delay_visibility_of_program_deployment {
// Place a tombstone in the cache so that
// we don't load the new version from the database as it should remain invisible
let tombstone =
LoadedProgram::new_tombstone(current_slot, LoadedProgramType::DelayVisibility);
tombstone
.usage_counter
.store(executor.usage_counter.load(Relaxed), Relaxed);
self.visible.insert(key, Arc::new(tombstone));
} else {
self.visible.insert(key, executor.clone());
}
self.deployments.insert(key, executor);
} else {
self.visible.insert(key, executor.clone());
self.add_to_cache.insert(key, executor);
}
}
pub fn get_executors_added_to_the_cache(&mut self) -> HashMap<Pubkey, Arc<LoadedProgram>> {
let mut executors = HashMap::new();
std::mem::swap(&mut executors, &mut self.add_to_cache);
executors
}
pub fn get_executors_which_were_deployed(&mut self) -> HashMap<Pubkey, Arc<LoadedProgram>> {
let mut executors = HashMap::new();
std::mem::swap(&mut executors, &mut self.deployments);
executors
}
}
/// Capacity of `BankExecutorCache`
pub const MAX_CACHED_EXECUTORS: usize = 256;
/// An entry of the BankExecutorCache
#[derive(Debug)]
pub struct BankExecutorCacheEntry {
prev_epoch_count: u64,
epoch_count: AtomicU64,
executor: Arc<LoadedProgram>,
pub hit_count: AtomicU64,
}
impl Clone for BankExecutorCacheEntry {
fn clone(&self) -> Self {
Self {
prev_epoch_count: self.prev_epoch_count,
epoch_count: AtomicU64::new(self.epoch_count.load(Relaxed)),
executor: self.executor.clone(),
hit_count: AtomicU64::new(self.hit_count.load(Relaxed)),
}
}
}
/// LFU Cache of executors which exists once per bank
#[derive(Debug)]
pub struct BankExecutorCache {
capacity: usize,
current_epoch: Epoch,
pub executors: HashMap<Pubkey, BankExecutorCacheEntry>,
pub stats: Stats,
}
impl Default for BankExecutorCache {
fn default() -> Self {
Self {
capacity: MAX_CACHED_EXECUTORS,
current_epoch: Epoch::default(),
executors: HashMap::default(),
stats: Stats::default(),
}
}
}
#[cfg(RUSTC_WITH_SPECIALIZATION)]
impl solana_frozen_abi::abi_example::AbiExample for BankExecutorCache {
fn example() -> Self {
// Delegate AbiExample impl to Default before going deep and stuck with
// not easily impl-able Arc<dyn Executor> due to rust's coherence issue
// This is safe because BankExecutorCache isn't serializable by definition.
Self::default()
}
}
impl BankExecutorCache {
pub fn new(max_capacity: usize, current_epoch: Epoch) -> Self {
Self {
capacity: max_capacity,
current_epoch,
executors: HashMap::new(),
stats: Stats::default(),
}
}
pub fn new_from_parent_bank_executors(
parent_bank_executors: &BankExecutorCache,
current_epoch: Epoch,
) -> Self {
let executors = if parent_bank_executors.current_epoch == current_epoch {
parent_bank_executors.executors.clone()
} else {
parent_bank_executors
.executors
.iter()
.map(|(&key, entry)| {
let entry = BankExecutorCacheEntry {
prev_epoch_count: entry.epoch_count.load(Relaxed),
epoch_count: AtomicU64::default(),
executor: entry.executor.clone(),
hit_count: AtomicU64::new(entry.hit_count.load(Relaxed)),
};
(key, entry)
})
.collect()
};
Self {
capacity: parent_bank_executors.capacity,
current_epoch,
executors,
stats: Stats::default(),
}
}
pub fn get(&self, pubkey: &Pubkey) -> Option<Arc<LoadedProgram>> {
if let Some(entry) = self.executors.get(pubkey) {
self.stats.hits.fetch_add(1, Relaxed);
entry.epoch_count.fetch_add(1, Relaxed);
entry.hit_count.fetch_add(1, Relaxed);
Some(entry.executor.clone())
} else {
self.stats.misses.fetch_add(1, Relaxed);
None
}
}
pub fn put(&mut self, executors: impl Iterator<Item = (Pubkey, Arc<LoadedProgram>)>) {
let mut new_executors: Vec<_> = executors
.filter_map(|(key, executor)| {
if let Some(mut entry) = self.remove(&key) {
self.stats.replacements.fetch_add(1, Relaxed);
entry.executor = executor;
let _ = self.executors.insert(key, entry);
None
} else {
self.stats.insertions.fetch_add(1, Relaxed);
Some((key, executor))
}
})
.collect();
if !new_executors.is_empty() {
let mut counts = self
.executors
.iter()
.map(|(key, entry)| {
let count = entry
.prev_epoch_count
.saturating_add(entry.epoch_count.load(Relaxed));
(key, count)
})
.collect::<Vec<_>>();
counts.sort_unstable_by_key(|(_, count)| *count);
let primer_counts = Self::get_primer_counts(counts.as_slice(), new_executors.len());
if self.executors.len() >= self.capacity {
let mut least_keys = counts
.iter()
.take(new_executors.len())
.map(|least| *least.0)
.collect::<Vec<_>>();
for least_key in least_keys.drain(..) {
let _ = self.remove(&least_key);
self.stats
.evictions
.entry(least_key)
.and_modify(|c| saturating_add_assign!(*c, 1))
.or_insert(1);
}
}
for ((key, executor), primer_count) in new_executors.drain(..).zip(primer_counts) {
let entry = BankExecutorCacheEntry {
prev_epoch_count: 0,
epoch_count: AtomicU64::new(primer_count),
executor: executor.clone(),
hit_count: AtomicU64::new(1),
};
let _ = self.executors.insert(key, entry);
}
}
}
pub fn remove(&mut self, pubkey: &Pubkey) -> Option<BankExecutorCacheEntry> {
let maybe_entry = self.executors.remove(pubkey);
if let Some(entry) = maybe_entry.as_ref() {
if entry.hit_count.load(Relaxed) == 1 {
self.stats.one_hit_wonders.fetch_add(1, Relaxed);
}
}
maybe_entry
}
pub fn clear(&mut self) {
*self = BankExecutorCache::default();
}
pub fn get_primer_count_upper_bound_inclusive(counts: &[(&Pubkey, u64)]) -> u64 {
const PRIMER_COUNT_TARGET_PERCENTILE: u64 = 85;
#[allow(clippy::assertions_on_constants)]
{
assert!(PRIMER_COUNT_TARGET_PERCENTILE <= 100);
}
// Executor use-frequencies are assumed to fit a Pareto distribution. Choose an
// upper-bound for our primer count as the actual count at the target rank to avoid
// an upward bias
let target_index = u64::try_from(counts.len().saturating_sub(1))
.ok()
.and_then(|counts| {
let index = counts
.saturating_mul(PRIMER_COUNT_TARGET_PERCENTILE)
.div(100); // switch to u64::saturating_div once stable
usize::try_from(index).ok()
})
.unwrap_or(0);
counts
.get(target_index)
.map(|(_, count)| *count)
.unwrap_or(0)
}
pub fn get_primer_counts(counts: &[(&Pubkey, u64)], num_counts: usize) -> Vec<u64> {
let max_primer_count = Self::get_primer_count_upper_bound_inclusive(counts);
let mut rng = rand::thread_rng();
(0..num_counts)
.map(|_| rng.gen_range(0, max_primer_count.saturating_add(1)))
.collect::<Vec<_>>()
}
}
/// Statistics of the entire `BankExecutorCache`
#[derive(Debug, Default)]
pub struct Stats {
pub hits: AtomicU64,
pub misses: AtomicU64,
pub evictions: HashMap<Pubkey, u64>,
pub insertions: AtomicU64,
pub replacements: AtomicU64,
pub one_hit_wonders: AtomicU64,
}
impl Stats {
/// Logs the measurement values
pub fn submit(&self, slot: Slot) {
let hits = self.hits.load(Relaxed);
let misses = self.misses.load(Relaxed);
let insertions = self.insertions.load(Relaxed);
let replacements = self.replacements.load(Relaxed);
let one_hit_wonders = self.one_hit_wonders.load(Relaxed);
let evictions: u64 = self.evictions.values().sum();
datapoint_info!(
"bank-executor-cache-stats",
("slot", slot, i64),
("hits", hits, i64),
("misses", misses, i64),
("evictions", evictions, i64),
("insertions", insertions, i64),
("replacements", replacements, i64),
("one_hit_wonders", one_hit_wonders, i64),
);
debug!(
"Executor Cache Stats -- Hits: {}, Misses: {}, Evictions: {}, Insertions: {}, Replacements: {}, One-Hit-Wonders: {}",
hits, misses, evictions, insertions, replacements, one_hit_wonders,
);
if log_enabled!(log::Level::Trace) && !self.evictions.is_empty() {
let mut evictions = self.evictions.iter().collect::<Vec<_>>();
evictions.sort_by_key(|e| e.1);
let evictions = evictions
.into_iter()
.rev()
.map(|(program_id, evictions)| {
format!(" {:<44} {}", program_id.to_string(), evictions)
})
.collect::<Vec<_>>();
let evictions = evictions.join("\n");
trace!(
"Eviction Details:\n {:<44} {}\n{}",
"Program",
"Count",
evictions
);
}
}
}
#[allow(clippy::indexing_slicing)]
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_executor_cache() {
let key1 = solana_sdk::pubkey::new_rand();
let key2 = solana_sdk::pubkey::new_rand();
let key3 = solana_sdk::pubkey::new_rand();
let key4 = solana_sdk::pubkey::new_rand();
let executor = Arc::new(LoadedProgram::default());
let mut cache = BankExecutorCache::new(3, 0);
cache.put([(key1, executor.clone())].into_iter());
cache.put([(key2, executor.clone())].into_iter());
cache.put([(key3, executor.clone())].into_iter());
assert!(cache.get(&key1).is_some());
assert!(cache.get(&key2).is_some());
assert!(cache.get(&key3).is_some());
assert!(cache.get(&key1).is_some());
assert!(cache.get(&key1).is_some());
assert!(cache.get(&key2).is_some());
cache.put([(key4, executor.clone())].into_iter());
assert!(cache.get(&key4).is_some());
let num_retained = [&key1, &key2, &key3]
.iter()
.filter_map(|key| cache.get(key))
.count();
assert_eq!(num_retained, 2);
assert!(cache.get(&key4).is_some());
assert!(cache.get(&key4).is_some());
assert!(cache.get(&key4).is_some());
cache.put([(key3, executor)].into_iter());
assert!(cache.get(&key3).is_some());
let num_retained = [&key1, &key2, &key4]
.iter()
.filter_map(|key| cache.get(key))
.count();
assert_eq!(num_retained, 2);
}
#[test]
fn test_cached_executor_eviction() {
let key1 = solana_sdk::pubkey::new_rand();
let key2 = solana_sdk::pubkey::new_rand();
let key3 = solana_sdk::pubkey::new_rand();
let key4 = solana_sdk::pubkey::new_rand();
let executor = Arc::new(LoadedProgram::default());
let mut cache = BankExecutorCache::new(3, 0);
assert!(cache.current_epoch == 0);
cache.put([(key1, executor.clone())].into_iter());
cache.put([(key2, executor.clone())].into_iter());
cache.put([(key3, executor.clone())].into_iter());
assert!(cache.get(&key1).is_some());
assert!(cache.get(&key1).is_some());
assert!(cache.get(&key1).is_some());
let mut cache = BankExecutorCache::new_from_parent_bank_executors(&cache, 1);
assert!(cache.current_epoch == 1);
assert!(cache.get(&key2).is_some());
assert!(cache.get(&key2).is_some());
assert!(cache.get(&key3).is_some());
cache.put([(key4, executor.clone())].into_iter());
assert!(cache.get(&key4).is_some());
let num_retained = [&key1, &key2, &key3]
.iter()
.filter_map(|key| cache.get(key))
.count();
assert_eq!(num_retained, 2);
cache.put([(key1, executor.clone())].into_iter());
cache.put([(key3, executor.clone())].into_iter());
assert!(cache.get(&key1).is_some());
assert!(cache.get(&key3).is_some());
let num_retained = [&key2, &key4]
.iter()
.filter_map(|key| cache.get(key))
.count();
assert_eq!(num_retained, 1);
cache = BankExecutorCache::new_from_parent_bank_executors(&cache, 2);
assert!(cache.current_epoch == 2);
cache.put([(key3, executor)].into_iter());
assert!(cache.get(&key3).is_some());
}
#[test]
fn test_executor_cache_evicts_smallest() {
let key1 = solana_sdk::pubkey::new_rand();
let key2 = solana_sdk::pubkey::new_rand();
let key3 = solana_sdk::pubkey::new_rand();
let executor = Arc::new(LoadedProgram::default());
let mut cache = BankExecutorCache::new(2, 0);
cache.put([(key1, executor.clone())].into_iter());
for _ in 0..5 {
let _ = cache.get(&key1);
}
cache.put([(key2, executor.clone())].into_iter());
// make key1's use-count for sure greater than key2's
let _ = cache.get(&key1);
let mut entries = cache
.executors
.iter()
.map(|(k, v)| (*k, v.epoch_count.load(Relaxed)))
.collect::<Vec<_>>();
entries.sort_by_key(|(_, v)| *v);
assert!(entries[0].1 < entries[1].1);
cache.put([(key3, executor)].into_iter());
assert!(cache.get(&entries[0].0).is_none());
assert!(cache.get(&entries[1].0).is_some());
}
#[test]
fn test_executor_cache_one_hit_wonder_counter() {
let mut cache = BankExecutorCache::new(1, 0);
let one_hit_wonder = Pubkey::new_unique();
let popular = Pubkey::new_unique();
let executor = Arc::new(LoadedProgram::default());
// make sure we're starting from where we think we are
assert_eq!(cache.stats.one_hit_wonders.load(Relaxed), 0);
// add our one-hit-wonder
cache.put([(one_hit_wonder, executor.clone())].into_iter());
assert_eq!(cache.executors[&one_hit_wonder].hit_count.load(Relaxed), 1);
// displace the one-hit-wonder with "popular program"
cache.put([(popular, executor.clone())].into_iter());
assert_eq!(cache.executors[&popular].hit_count.load(Relaxed), 1);
// one-hit-wonder counter incremented
assert_eq!(cache.stats.one_hit_wonders.load(Relaxed), 1);
// make "popular program" popular
cache.get(&popular).unwrap();
assert_eq!(cache.executors[&popular].hit_count.load(Relaxed), 2);
// evict "popular program"
cache.put([(one_hit_wonder, executor)].into_iter());
assert_eq!(cache.executors[&one_hit_wonder].hit_count.load(Relaxed), 1);
// one-hit-wonder counter not incremented
assert_eq!(cache.stats.one_hit_wonders.load(Relaxed), 1);
}
#[test]
fn test_executor_cache_get_primer_count_upper_bound_inclusive() {
let pubkey = Pubkey::default();
let v = [];
assert_eq!(
BankExecutorCache::get_primer_count_upper_bound_inclusive(&v),
0
);
let v = [(&pubkey, 1)];
assert_eq!(
BankExecutorCache::get_primer_count_upper_bound_inclusive(&v),
1
);
let v = (0u64..10).map(|i| (&pubkey, i)).collect::<Vec<_>>();
assert_eq!(
BankExecutorCache::get_primer_count_upper_bound_inclusive(v.as_slice()),
7
);
}
#[test]
fn test_executor_cache_stats() {
#[derive(Debug, Default, PartialEq)]
struct ComparableStats {
hits: u64,
misses: u64,
evictions: HashMap<Pubkey, u64>,
insertions: u64,
replacements: u64,
one_hit_wonders: u64,
}
impl From<&Stats> for ComparableStats {
fn from(stats: &Stats) -> Self {
let Stats {
hits,
misses,
evictions,
insertions,
replacements,
one_hit_wonders,
} = stats;
ComparableStats {
hits: hits.load(Relaxed),
misses: misses.load(Relaxed),
evictions: evictions.clone(),
insertions: insertions.load(Relaxed),
replacements: replacements.load(Relaxed),
one_hit_wonders: one_hit_wonders.load(Relaxed),
}
}
}
const CURRENT_EPOCH: Epoch = 0;
let mut cache = BankExecutorCache::new(2, CURRENT_EPOCH);
let mut expected_stats = ComparableStats::default();
let program_id1 = Pubkey::new_unique();
let program_id2 = Pubkey::new_unique();
let executor = Arc::new(LoadedProgram::default());
// make sure we're starting from where we think we are
assert_eq!(ComparableStats::from(&cache.stats), expected_stats,);
// insert some executors
cache.put([(program_id1, executor.clone())].into_iter());
cache.put([(program_id2, executor.clone())].into_iter());
expected_stats.insertions += 2;
assert_eq!(ComparableStats::from(&cache.stats), expected_stats);
// replace a one-hit-wonder executor
cache.put([(program_id1, executor.clone())].into_iter());
expected_stats.replacements += 1;
expected_stats.one_hit_wonders += 1;
assert_eq!(ComparableStats::from(&cache.stats), expected_stats);
// hit some executors
cache.get(&program_id1);
cache.get(&program_id1);
cache.get(&program_id2);
expected_stats.hits += 3;
assert_eq!(ComparableStats::from(&cache.stats), expected_stats);
// miss an executor
cache.get(&Pubkey::new_unique());
expected_stats.misses += 1;
assert_eq!(ComparableStats::from(&cache.stats), expected_stats);
// evict an executor
cache.put([(Pubkey::new_unique(), executor)].into_iter());
expected_stats.insertions += 1;
expected_stats.evictions.insert(program_id2, 1);
assert_eq!(ComparableStats::from(&cache.stats), expected_stats);
// make sure stats are cleared in new_from_parent
assert_eq!(
ComparableStats::from(
&BankExecutorCache::new_from_parent_bank_executors(&cache, CURRENT_EPOCH).stats
),
ComparableStats::default()
);
assert_eq!(
ComparableStats::from(
&BankExecutorCache::new_from_parent_bank_executors(&cache, CURRENT_EPOCH + 1).stats
),
ComparableStats::default()
);
}
}

View File

@ -13,7 +13,6 @@ pub use solana_rbpf;
pub mod accounts_data_meter;
pub mod builtin_program;
pub mod compute_budget;
pub mod executor_cache;
pub mod invoke_context;
pub mod loaded_programs;
pub mod log_collector;

View File

@ -255,6 +255,17 @@ impl LoadedProgram {
)
}
fn is_loaded(&self) -> bool {
match self.program {
LoadedProgramType::LegacyV0(_)
| LoadedProgramType::LegacyV1(_)
| LoadedProgramType::Typed(_) => true,
#[cfg(test)]
LoadedProgramType::TestLoaded => true,
_ => false,
}
}
fn is_implicit_delay_visibility_tombstone(&self, slot: Slot) -> bool {
self.effective_slot.saturating_sub(self.deployment_slot) == DELAY_VISIBILITY_SLOT_OFFSET
&& slot >= self.deployment_slot
@ -559,6 +570,21 @@ impl LoadedPrograms {
}
}
fn unload_program(&mut self, id: &Pubkey) {
if let Some(entries) = self.entries.get_mut(id) {
entries.iter_mut().for_each(|entry| {
if entry.is_loaded() {
*entry = Arc::new(entry.to_unloaded());
}
});
}
}
pub fn unload_all_programs(&mut self) {
let keys = self.entries.keys().copied().collect::<Vec<Pubkey>>();
keys.iter().for_each(|key| self.unload_program(key));
}
fn unload_program_entries<'a>(
&mut self,
remove: impl Iterator<Item = &'a (Pubkey, Arc<LoadedProgram>)>,

View File

@ -449,7 +449,7 @@ fn test_sol_alloc_free_no_longer_deployable() {
// Enable _sol_alloc_free syscall
bank.deactivate_feature(&solana_sdk::feature_set::disable_deploy_of_alloc_free_syscall::id());
bank.clear_signatures();
bank.clear_executors();
bank.clear_program_cache();
// Try and finalize the program now that sol_alloc_free is re-enabled
assert!(bank.process_transaction(&finalize_tx).is_ok());
@ -467,7 +467,7 @@ fn test_sol_alloc_free_no_longer_deployable() {
assert!(bank.process_transaction(&invoke_tx).is_ok());
bank.clear_signatures();
bank.clear_executors();
bank.clear_program_cache();
// invoke should still succeed on execute because the program is already deployed
assert!(bank.process_transaction(&invoke_tx).is_ok());

View File

@ -33,8 +33,6 @@
//! It offers a high-level API that signs transactions
//! on behalf of the caller, and a low-level API for when they have
//! already been signed and verified.
#[cfg(test)]
use solana_program_runtime::executor_cache::TransactionExecutorCache;
#[allow(deprecated)]
use solana_sdk::recent_blockhashes_account;
pub use solana_sdk::reward_type::RewardType;
@ -97,7 +95,6 @@ use {
accounts_data_meter::MAX_ACCOUNTS_DATA_LEN,
builtin_program::BuiltinPrograms,
compute_budget::{self, ComputeBudget},
executor_cache::{BankExecutorCache, MAX_CACHED_EXECUTORS},
loaded_programs::{
LoadedProgram, LoadedProgramMatchCriteria, LoadedProgramType, LoadedPrograms,
LoadedProgramsForTxBatch, WorkingSlot,
@ -784,7 +781,6 @@ impl PartialEq for Bank {
cluster_type: _,
lazy_rent_collection: _,
rewards_pool_pubkeys: _,
executor_cache: _,
transaction_debug_keys: _,
transaction_log_collector_config: _,
transaction_log_collector: _,
@ -1017,9 +1013,6 @@ pub struct Bank {
// this is temporary field only to remove rewards_pool entirely
pub rewards_pool_pubkeys: Arc<HashSet<Pubkey>>,
/// Cached executors
executor_cache: RwLock<BankExecutorCache>,
transaction_debug_keys: Option<Arc<HashSet<Pubkey>>>,
// Global configuration for how transaction logs should be collected across all banks
@ -1269,7 +1262,6 @@ impl Bank {
cluster_type: Option::<ClusterType>::default(),
lazy_rent_collection: AtomicBool::default(),
rewards_pool_pubkeys: Arc::<HashSet<Pubkey>>::default(),
executor_cache: RwLock::<BankExecutorCache>::default(),
transaction_debug_keys: Option::<Arc<HashSet<Pubkey>>>::default(),
transaction_log_collector_config: Arc::<RwLock<TransactionLogCollectorConfig>>::default(
),
@ -1483,14 +1475,6 @@ impl Bank {
let (rewards_pool_pubkeys, rewards_pool_pubkeys_time_us) =
measure_us!(parent.rewards_pool_pubkeys.clone());
let (executor_cache, executor_cache_time_us) = measure_us!({
let parent_bank_executors = parent.executor_cache.read().unwrap();
RwLock::new(BankExecutorCache::new_from_parent_bank_executors(
&parent_bank_executors,
epoch,
))
});
let (transaction_debug_keys, transaction_debug_keys_time_us) =
measure_us!(parent.transaction_debug_keys.clone());
@ -1554,7 +1538,6 @@ impl Bank {
cluster_type: parent.cluster_type,
lazy_rent_collection: AtomicBool::new(parent.lazy_rent_collection.load(Relaxed)),
rewards_pool_pubkeys,
executor_cache,
transaction_debug_keys,
transaction_log_collector_config,
transaction_log_collector: Arc::new(RwLock::new(TransactionLogCollector::default())),
@ -1633,7 +1616,7 @@ impl Bank {
epoch_stakes_time_us,
builtin_programs_time_us,
rewards_pool_pubkeys_time_us,
executor_cache_time_us,
executor_cache_time_us: 0,
transaction_debug_keys_time_us,
transaction_log_collector_config_time_us,
feature_set_time_us,
@ -1644,13 +1627,6 @@ impl Bank {
},
);
parent
.executor_cache
.read()
.unwrap()
.stats
.submit(parent.slot());
new
}
@ -1896,7 +1872,6 @@ impl Bank {
cluster_type: Some(genesis_config.cluster_type),
lazy_rent_collection: new(),
rewards_pool_pubkeys: new(),
executor_cache: RwLock::new(BankExecutorCache::new(MAX_CACHED_EXECUTORS, fields.epoch)),
transaction_debug_keys: debug_keys,
transaction_log_collector_config: new(),
transaction_log_collector: new(),
@ -4077,76 +4052,12 @@ impl Bank {
balances
}
/// Get any cached executors needed by the transaction
#[cfg(test)]
fn get_tx_executor_cache(
&self,
accounts: &[TransactionAccount],
) -> Rc<RefCell<TransactionExecutorCache>> {
let executable_keys: Vec<_> = accounts
.iter()
.filter_map(|(key, account)| {
if account.executable() && !native_loader::check_id(account.owner()) {
Some(key)
} else {
None
}
})
.collect();
if executable_keys.is_empty() {
return Rc::new(RefCell::new(TransactionExecutorCache::default()));
}
let tx_executor_cache = {
let cache = self.executor_cache.read().unwrap();
TransactionExecutorCache::new(
executable_keys
.into_iter()
.filter_map(|key| cache.get(key).map(|executor| (*key, executor))),
)
};
Rc::new(RefCell::new(tx_executor_cache))
}
/// Add executors back to the bank's cache if they were missing and not re-/deployed
#[cfg(test)]
fn store_executors_which_added_to_the_cache(
&self,
tx_executor_cache: &RefCell<TransactionExecutorCache>,
) {
let executors = tx_executor_cache
.borrow_mut()
.get_executors_added_to_the_cache();
if !executors.is_empty() {
self.executor_cache
.write()
.unwrap()
.put(executors.into_iter());
}
}
/// Add re-/deployed executors to the bank's cache
#[cfg(test)]
fn store_executors_which_were_deployed(
&self,
tx_executor_cache: &RefCell<TransactionExecutorCache>,
) {
let executors = tx_executor_cache
.borrow_mut()
.get_executors_which_were_deployed();
if !executors.is_empty() {
self.executor_cache
.write()
.unwrap()
.put(executors.into_iter());
}
}
/// Remove an executor from the bank's cache
fn remove_executor(&self, pubkey: &Pubkey) {
let _ = self.executor_cache.write().unwrap().remove(pubkey);
/// Unload a program from the bank's cache
fn remove_program_from_cache(&self, pubkey: &Pubkey) {
self.loaded_programs_cache
.write()
.unwrap()
.remove_programs([*pubkey].into_iter());
}
fn program_modification_slot(&self, pubkey: &Pubkey) -> Result<Slot> {
@ -4248,8 +4159,11 @@ impl Bank {
.map_err(|err| TransactionError::InstructionError(0, err))
}
pub fn clear_executors(&self) {
self.executor_cache.write().unwrap().clear();
pub fn clear_program_cache(&self) {
self.loaded_programs_cache
.write()
.unwrap()
.unload_all_programs();
}
/// Execute a transaction using the provided loaded accounts and update
@ -4507,51 +4421,6 @@ impl Bank {
loaded_programs_for_txs
}
#[allow(dead_code)] // Preparation for BankExecutorCache rework
fn replenish_executor_cache(
&self,
program_accounts_map: &HashMap<Pubkey, &Pubkey>,
) -> HashMap<Pubkey, Arc<LoadedProgram>> {
let mut loaded_programs_for_txs = HashMap::new();
let mut filter_missing_programs_time = Measure::start("filter_missing_programs_accounts");
let missing_executors = program_accounts_map
.keys()
.filter_map(|key| {
self.executor_cache
.read()
.unwrap()
.get(key)
.map(|program| {
loaded_programs_for_txs.insert(*key, program.clone());
program
})
.is_none()
.then_some(key)
})
.collect::<Vec<_>>();
filter_missing_programs_time.stop();
let executors = missing_executors.iter().map(|pubkey| {
let program = self.load_program(pubkey, false).unwrap_or_else(|err| {
// Create a tombstone for the program in the cache
debug!("Failed to load program {}, error {:?}", pubkey, err);
Arc::new(LoadedProgram::new_tombstone(
self.slot,
LoadedProgramType::FailedVerification,
))
});
loaded_programs_for_txs.insert(**pubkey, program.clone());
(**pubkey, program)
});
// avoid locking the cache if there are no new executors
if executors.len() > 0 {
self.executor_cache.write().unwrap().put(executors);
}
loaded_programs_for_txs
}
#[allow(clippy::type_complexity)]
pub fn load_and_execute_transactions(
&self,
@ -7703,7 +7572,7 @@ impl Bank {
// Clear new account
self.store_account(new_address, &AccountSharedData::default());
self.remove_executor(old_address);
self.remove_program_from_cache(old_address);
self.calculate_and_update_accounts_data_size_delta_off_chain(
old_account.data().len(),

View File

@ -36,9 +36,8 @@ use {
builtin_program::create_builtin,
compute_budget::{self, ComputeBudget, MAX_COMPUTE_UNIT_LIMIT},
declare_process_instruction,
executor_cache::TransactionExecutorCache,
invoke_context::mock_process_instruction,
loaded_programs::{LoadedProgram, LoadedProgramType, DELAY_VISIBILITY_SLOT_OFFSET},
loaded_programs::{LoadedProgramType, DELAY_VISIBILITY_SLOT_OFFSET},
prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType},
timings::ExecuteTimings,
},
@ -48,7 +47,7 @@ use {
AccountSharedData, ReadableAccount, WritableAccount,
},
account_utils::StateMut,
bpf_loader, bpf_loader_deprecated,
bpf_loader,
bpf_loader_upgradeable::{self, UpgradeableLoaderState},
client::SyncClient,
clock::{
@ -7392,120 +7391,6 @@ fn test_reconfigure_token2_native_mint() {
assert_eq!(native_mint_account.owner(), &inline_spl_token::id());
}
#[test]
fn test_bank_executor_cache() {
solana_logger::setup();
let (genesis_config, _) = create_genesis_config(1);
let bank = Bank::new_for_tests(&genesis_config);
let key1 = solana_sdk::pubkey::new_rand();
let key2 = solana_sdk::pubkey::new_rand();
let key3 = solana_sdk::pubkey::new_rand();
let key4 = solana_sdk::pubkey::new_rand();
let key5 = solana_sdk::pubkey::new_rand();
let executor = Arc::new(LoadedProgram::default());
fn new_executable_account(owner: Pubkey) -> AccountSharedData {
AccountSharedData::from(Account {
owner,
executable: true,
..Account::default()
})
}
let accounts = &[
(key1, new_executable_account(bpf_loader_upgradeable::id())),
(key2, new_executable_account(bpf_loader::id())),
(key3, new_executable_account(bpf_loader_deprecated::id())),
(key4, new_executable_account(native_loader::id())),
(key5, AccountSharedData::default()),
];
// don't do any work if not dirty
let executors =
TransactionExecutorCache::new((0..4).map(|i| (accounts[i].0, executor.clone())));
let executors = Rc::new(RefCell::new(executors));
bank.store_executors_which_added_to_the_cache(&executors);
bank.store_executors_which_were_deployed(&executors);
let stored_executors = bank.get_tx_executor_cache(accounts);
assert_eq!(stored_executors.borrow().visible.len(), 0);
// do work
let mut executors =
TransactionExecutorCache::new((2..3).map(|i| (accounts[i].0, executor.clone())));
executors.set(key1, executor.clone(), false, true, bank.slot());
executors.set(key2, executor.clone(), false, true, bank.slot());
executors.set(key3, executor.clone(), true, true, bank.slot());
executors.set(key4, executor, false, true, bank.slot());
let executors = Rc::new(RefCell::new(executors));
// store Missing
bank.store_executors_which_added_to_the_cache(&executors);
let stored_executors = bank.get_tx_executor_cache(accounts);
assert_eq!(stored_executors.borrow().visible.len(), 2);
assert!(stored_executors.borrow().visible.contains_key(&key1));
assert!(stored_executors.borrow().visible.contains_key(&key2));
// store Updated
bank.store_executors_which_were_deployed(&executors);
let stored_executors = bank.get_tx_executor_cache(accounts);
assert_eq!(stored_executors.borrow().visible.len(), 3);
assert!(stored_executors.borrow().visible.contains_key(&key1));
assert!(stored_executors.borrow().visible.contains_key(&key2));
assert!(stored_executors.borrow().visible.contains_key(&key3));
// Check inheritance
let bank = Bank::new_from_parent(&Arc::new(bank), &solana_sdk::pubkey::new_rand(), 1);
let stored_executors = bank.get_tx_executor_cache(accounts);
assert_eq!(stored_executors.borrow().visible.len(), 3);
assert!(stored_executors.borrow().visible.contains_key(&key1));
assert!(stored_executors.borrow().visible.contains_key(&key2));
assert!(stored_executors.borrow().visible.contains_key(&key3));
// Force compilation of an executor
let mut file = File::open("../programs/bpf_loader/test_elfs/out/noop_aligned.so").unwrap();
let mut elf = Vec::new();
file.read_to_end(&mut elf).unwrap();
let programdata_key = solana_sdk::pubkey::new_rand();
let mut program_account = AccountSharedData::new_data(
40,
&UpgradeableLoaderState::Program {
programdata_address: programdata_key,
},
&bpf_loader_upgradeable::id(),
)
.unwrap();
program_account.set_executable(true);
program_account.set_rent_epoch(1);
let programdata_data_offset = UpgradeableLoaderState::size_of_programdata_metadata();
let mut programdata_account = AccountSharedData::new(
40,
programdata_data_offset + elf.len(),
&bpf_loader_upgradeable::id(),
);
programdata_account
.set_state(&UpgradeableLoaderState::ProgramData {
slot: 42,
upgrade_authority_address: None,
})
.unwrap();
let mut data = programdata_account.data().to_vec();
data[programdata_data_offset..].copy_from_slice(&elf);
programdata_account.set_data_from_slice(&data);
programdata_account.set_rent_epoch(1);
bank.store_account_and_update_capitalization(&key1, &program_account);
bank.store_account_and_update_capitalization(&programdata_key, &programdata_account);
// Remove all
bank.remove_executor(&key1);
bank.remove_executor(&key2);
bank.remove_executor(&key3);
bank.remove_executor(&key4);
let stored_executors = bank.get_tx_executor_cache(accounts);
assert_eq!(stored_executors.borrow().visible.len(), 0);
}
#[test]
fn test_bank_load_program() {
solana_logger::setup();
@ -7555,61 +7440,6 @@ fn test_bank_load_program() {
);
}
#[test]
fn test_bank_executor_cow() {
solana_logger::setup();
let (genesis_config, _) = create_genesis_config(1);
let root = Arc::new(Bank::new_for_tests(&genesis_config));
let key1 = solana_sdk::pubkey::new_rand();
let key2 = solana_sdk::pubkey::new_rand();
let executor = Arc::new(LoadedProgram::default());
let executable_account = AccountSharedData::from(Account {
owner: bpf_loader_upgradeable::id(),
executable: true,
..Account::default()
});
let accounts = &[
(key1, executable_account.clone()),
(key2, executable_account),
];
// add one to root bank
let mut executors = TransactionExecutorCache::default();
executors.set(key1, executor.clone(), false, true, root.slot());
let executors = Rc::new(RefCell::new(executors));
root.store_executors_which_added_to_the_cache(&executors);
let executors = root.get_tx_executor_cache(accounts);
assert_eq!(executors.borrow().visible.len(), 1);
let fork1 = Bank::new_from_parent(&root, &Pubkey::default(), 1);
let fork2 = Bank::new_from_parent(&root, &Pubkey::default(), 2);
let executors = fork1.get_tx_executor_cache(accounts);
assert_eq!(executors.borrow().visible.len(), 1);
let executors = fork2.get_tx_executor_cache(accounts);
assert_eq!(executors.borrow().visible.len(), 1);
let mut executors = TransactionExecutorCache::default();
executors.set(key2, executor, false, true, fork1.slot());
let executors = Rc::new(RefCell::new(executors));
fork1.store_executors_which_added_to_the_cache(&executors);
let executors = fork1.get_tx_executor_cache(accounts);
assert_eq!(executors.borrow().visible.len(), 2);
let executors = fork2.get_tx_executor_cache(accounts);
assert_eq!(executors.borrow().visible.len(), 1);
fork1.remove_executor(&key1);
let executors = fork1.get_tx_executor_cache(accounts);
assert_eq!(executors.borrow().visible.len(), 1);
let executors = fork2.get_tx_executor_cache(accounts);
assert_eq!(executors.borrow().visible.len(), 1);
}
#[test]
fn test_bpf_loader_upgradeable_deploy_with_max_len() {
let (genesis_config, mint_keypair) = create_genesis_config(1_000_000_000);
@ -12536,7 +12366,7 @@ fn test_calculate_fee_with_request_heap_frame_flag() {
}
#[test]
fn test_runtime_feature_enable_with_executor_cache() {
fn test_runtime_feature_enable_with_program_cache() {
solana_logger::setup();
// Bank Setup