2021-12-03 09:00:31 -08:00
|
|
|
use {
|
|
|
|
crate::{
|
2023-05-23 18:48:41 -07:00
|
|
|
block_error::BlockError,
|
|
|
|
blockstore::Blockstore,
|
|
|
|
blockstore_db::BlockstoreError,
|
|
|
|
blockstore_meta::SlotMeta,
|
|
|
|
entry_notifier_service::{EntryNotification, EntryNotifierSender},
|
|
|
|
leader_schedule_cache::LeaderScheduleCache,
|
|
|
|
token_balances::collect_token_balances,
|
2020-09-24 07:36:22 -07:00
|
|
|
},
|
2021-12-03 09:00:31 -08:00
|
|
|
chrono_humanize::{Accuracy, HumanTime, Tense},
|
2022-04-06 03:47:19 -07:00
|
|
|
crossbeam_channel::Sender,
|
2021-12-03 09:00:31 -08:00
|
|
|
itertools::Itertools,
|
|
|
|
log::*,
|
|
|
|
rand::{seq::SliceRandom, thread_rng},
|
|
|
|
rayon::{prelude::*, ThreadPool},
|
2023-03-29 13:11:29 -07:00
|
|
|
scopeguard::defer,
|
2021-12-03 09:00:31 -08:00
|
|
|
solana_entry::entry::{
|
|
|
|
self, create_ticks, Entry, EntrySlice, EntryType, EntryVerificationStatus, VerifyRecyclers,
|
2021-11-30 18:16:13 -08:00
|
|
|
},
|
2022-07-05 11:58:51 -07:00
|
|
|
solana_measure::{measure, measure::Measure},
|
2023-05-04 10:33:01 -07:00
|
|
|
solana_metrics::datapoint_error,
|
2022-07-05 11:58:51 -07:00
|
|
|
solana_program_runtime::timings::{ExecuteTimingType, ExecuteTimings, ThreadExecuteTimings},
|
2022-05-05 13:00:50 -07:00
|
|
|
solana_rayon_threadlimit::{get_max_thread_count, get_thread_count},
|
2021-12-03 09:00:31 -08:00
|
|
|
solana_runtime::{
|
2022-10-05 14:44:35 -07:00
|
|
|
accounts_background_service::{AbsRequestSender, SnapshotRequestType},
|
2021-12-03 09:00:31 -08:00
|
|
|
accounts_db::{AccountShrinkThreshold, AccountsDbConfig},
|
|
|
|
accounts_index::AccountSecondaryIndexes,
|
|
|
|
accounts_update_notifier_interface::AccountsUpdateNotifier,
|
|
|
|
bank::{
|
2023-04-14 12:41:10 -07:00
|
|
|
Bank, TransactionBalancesSet, TransactionExecutionDetails, TransactionExecutionResult,
|
2023-04-18 07:31:23 -07:00
|
|
|
TransactionResults,
|
2021-12-03 09:00:31 -08:00
|
|
|
},
|
|
|
|
bank_forks::BankForks,
|
|
|
|
bank_utils,
|
|
|
|
commitment::VOTE_THRESHOLD_SIZE,
|
2022-02-03 10:00:27 -08:00
|
|
|
cost_model::CostModel,
|
2022-11-17 08:01:01 -08:00
|
|
|
epoch_accounts_hash::EpochAccountsHash,
|
2022-08-31 06:00:55 -07:00
|
|
|
prioritization_fee_cache::PrioritizationFeeCache,
|
2023-04-14 12:41:10 -07:00
|
|
|
rent_debits::RentDebits,
|
2022-04-11 17:28:10 -07:00
|
|
|
runtime_config::RuntimeConfig,
|
2021-12-03 09:00:31 -08:00
|
|
|
transaction_batch::TransactionBatch,
|
2022-03-24 10:09:48 -07:00
|
|
|
vote_account::VoteAccountsHashMap,
|
2021-12-03 09:00:31 -08:00
|
|
|
vote_sender_types::ReplayVoteSender,
|
|
|
|
},
|
|
|
|
solana_sdk::{
|
|
|
|
clock::{Slot, MAX_PROCESSING_AGE},
|
2023-02-13 11:09:48 -08:00
|
|
|
feature_set,
|
2021-12-03 09:00:31 -08:00
|
|
|
genesis_config::GenesisConfig,
|
|
|
|
hash::Hash,
|
|
|
|
pubkey::Pubkey,
|
2023-04-25 10:04:11 -07:00
|
|
|
saturating_add_assign,
|
2021-12-03 09:00:31 -08:00
|
|
|
signature::{Keypair, Signature},
|
|
|
|
timing,
|
|
|
|
transaction::{
|
|
|
|
Result, SanitizedTransaction, TransactionError, TransactionVerificationMode,
|
|
|
|
VersionedTransaction,
|
|
|
|
},
|
|
|
|
},
|
2022-08-04 23:20:27 -07:00
|
|
|
solana_transaction_status::token_balances::TransactionTokenBalancesSet,
|
2021-12-03 09:00:31 -08:00
|
|
|
std::{
|
2022-02-03 10:00:27 -08:00
|
|
|
borrow::Cow,
|
2021-12-03 09:00:31 -08:00
|
|
|
collections::{HashMap, HashSet},
|
|
|
|
path::PathBuf,
|
|
|
|
result,
|
2022-10-05 14:44:35 -07:00
|
|
|
sync::{
|
|
|
|
atomic::{AtomicBool, Ordering::Relaxed},
|
|
|
|
Arc, Mutex, RwLock,
|
|
|
|
},
|
2021-12-03 09:00:31 -08:00
|
|
|
time::{Duration, Instant},
|
|
|
|
},
|
|
|
|
thiserror::Error,
|
2019-11-04 21:14:55 -08:00
|
|
|
};
|
2019-05-29 17:16:36 -07:00
|
|
|
|
2022-06-23 12:37:38 -07:00
|
|
|
struct TransactionBatchWithIndexes<'a, 'b> {
|
|
|
|
pub batch: TransactionBatch<'a, 'b>,
|
|
|
|
pub transaction_indexes: Vec<usize>,
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ReplayEntry {
|
|
|
|
entry: EntryType,
|
|
|
|
starting_index: usize,
|
|
|
|
}
|
|
|
|
|
2022-05-05 13:00:50 -07:00
|
|
|
// get_max_thread_count to match number of threads in the old code.
|
|
|
|
// see: https://github.com/solana-labs/solana/pull/24853
|
|
|
|
lazy_static! {
|
|
|
|
static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new()
|
|
|
|
.num_threads(get_max_thread_count())
|
2023-02-08 17:24:57 -08:00
|
|
|
.thread_name(|i| format!("solBstoreProc{i:02}"))
|
2022-05-05 13:00:50 -07:00
|
|
|
.build()
|
|
|
|
.unwrap();
|
|
|
|
}
|
2019-05-29 17:16:36 -07:00
|
|
|
|
2021-12-29 23:42:32 -08:00
|
|
|
fn first_err(results: &[Result<()>]) -> Result<()> {
|
2019-02-16 13:17:37 -08:00
|
|
|
for r in results {
|
2021-12-29 23:42:32 -08:00
|
|
|
if r.is_err() {
|
|
|
|
return r.clone();
|
2019-05-23 17:35:15 -07:00
|
|
|
}
|
2019-02-16 13:17:37 -08:00
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-07-21 13:06:49 -07:00
|
|
|
// Includes transaction signature for unit-testing
|
|
|
|
fn get_first_error(
|
|
|
|
batch: &TransactionBatch,
|
|
|
|
fee_collection_results: Vec<Result<()>>,
|
|
|
|
) -> Option<(Result<()>, Signature)> {
|
|
|
|
let mut first_err = None;
|
2021-08-17 15:17:56 -07:00
|
|
|
for (result, transaction) in fee_collection_results
|
|
|
|
.iter()
|
|
|
|
.zip(batch.sanitized_transactions())
|
|
|
|
{
|
2020-07-21 13:06:49 -07:00
|
|
|
if let Err(ref err) = result {
|
|
|
|
if first_err.is_none() {
|
2021-08-17 15:17:56 -07:00
|
|
|
first_err = Some((result.clone(), *transaction.signature()));
|
2020-07-21 13:06:49 -07:00
|
|
|
}
|
|
|
|
warn!(
|
|
|
|
"Unexpected validator error: {:?}, transaction: {:?}",
|
|
|
|
err, transaction
|
|
|
|
);
|
|
|
|
datapoint_error!(
|
|
|
|
"validator_process_entry_error",
|
|
|
|
(
|
|
|
|
"error",
|
2022-12-06 06:30:06 -08:00
|
|
|
format!("error: {err:?}, transaction: {transaction:?}"),
|
2020-07-21 13:06:49 -07:00
|
|
|
String
|
|
|
|
)
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
first_err
|
|
|
|
}
|
|
|
|
|
2019-11-20 15:43:10 -08:00
|
|
|
fn execute_batch(
|
2022-06-23 12:37:38 -07:00
|
|
|
batch: &TransactionBatchWithIndexes,
|
2019-11-20 15:43:10 -08:00
|
|
|
bank: &Arc<Bank>,
|
2021-05-10 12:14:56 -07:00
|
|
|
transaction_status_sender: Option<&TransactionStatusSender>,
|
2020-08-07 11:21:35 -07:00
|
|
|
replay_vote_sender: Option<&ReplayVoteSender>,
|
2021-01-14 14:14:16 -08:00
|
|
|
timings: &mut ExecuteTimings,
|
2022-07-11 08:53:18 -07:00
|
|
|
log_messages_bytes_limit: Option<usize>,
|
2023-03-23 14:33:02 -07:00
|
|
|
prioritization_fee_cache: &PrioritizationFeeCache,
|
2019-11-20 15:43:10 -08:00
|
|
|
) -> Result<()> {
|
2022-06-23 12:37:38 -07:00
|
|
|
let TransactionBatchWithIndexes {
|
|
|
|
batch,
|
|
|
|
transaction_indexes,
|
|
|
|
} = batch;
|
2020-12-10 19:25:07 -08:00
|
|
|
let record_token_balances = transaction_status_sender.is_some();
|
|
|
|
|
|
|
|
let mut mint_decimals: HashMap<Pubkey, u8> = HashMap::new();
|
|
|
|
|
|
|
|
let pre_token_balances = if record_token_balances {
|
2021-06-18 06:34:46 -07:00
|
|
|
collect_token_balances(bank, batch, &mut mint_decimals)
|
2020-12-10 19:25:07 -08:00
|
|
|
} else {
|
|
|
|
vec![]
|
|
|
|
};
|
|
|
|
|
2022-01-04 18:15:15 -08:00
|
|
|
let (tx_results, balances) = batch.bank().load_execute_and_commit_transactions(
|
|
|
|
batch,
|
|
|
|
MAX_PROCESSING_AGE,
|
|
|
|
transaction_status_sender.is_some(),
|
|
|
|
transaction_status_sender.is_some(),
|
|
|
|
transaction_status_sender.is_some(),
|
2022-03-22 15:17:05 -07:00
|
|
|
transaction_status_sender.is_some(),
|
2022-01-04 18:15:15 -08:00
|
|
|
timings,
|
2022-07-11 08:53:18 -07:00
|
|
|
log_messages_bytes_limit,
|
2022-01-04 18:15:15 -08:00
|
|
|
);
|
2019-09-19 10:06:08 -07:00
|
|
|
|
2021-07-15 20:51:27 -07:00
|
|
|
bank_utils::find_and_send_votes(
|
|
|
|
batch.sanitized_transactions(),
|
|
|
|
&tx_results,
|
|
|
|
replay_vote_sender,
|
|
|
|
);
|
2020-08-07 11:21:35 -07:00
|
|
|
|
|
|
|
let TransactionResults {
|
|
|
|
fee_collection_results,
|
2020-11-29 11:21:55 -08:00
|
|
|
execution_results,
|
2021-05-26 14:43:15 -07:00
|
|
|
rent_debits,
|
2020-08-07 11:21:35 -07:00
|
|
|
..
|
|
|
|
} = tx_results;
|
2020-07-29 23:17:40 -07:00
|
|
|
|
2023-03-23 14:33:02 -07:00
|
|
|
let executed_transactions = execution_results
|
|
|
|
.iter()
|
|
|
|
.zip(batch.sanitized_transactions())
|
|
|
|
.filter_map(|(execution_result, tx)| execution_result.was_executed().then_some(tx))
|
|
|
|
.collect_vec();
|
|
|
|
|
2021-02-01 13:00:51 -08:00
|
|
|
if let Some(transaction_status_sender) = transaction_status_sender {
|
2021-08-17 15:17:56 -07:00
|
|
|
let transactions = batch.sanitized_transactions().to_vec();
|
2020-12-10 19:25:07 -08:00
|
|
|
let post_token_balances = if record_token_balances {
|
2021-06-18 06:34:46 -07:00
|
|
|
collect_token_balances(bank, batch, &mut mint_decimals)
|
2020-12-10 19:25:07 -08:00
|
|
|
} else {
|
|
|
|
vec![]
|
|
|
|
};
|
|
|
|
|
|
|
|
let token_balances =
|
|
|
|
TransactionTokenBalancesSet::new(pre_token_balances, post_token_balances);
|
|
|
|
|
2021-03-26 15:47:35 -07:00
|
|
|
transaction_status_sender.send_transaction_status_batch(
|
2019-11-20 15:43:10 -08:00
|
|
|
bank.clone(),
|
2021-08-17 15:17:56 -07:00
|
|
|
transactions,
|
2020-11-29 11:21:55 -08:00
|
|
|
execution_results,
|
2019-12-18 09:56:29 -08:00
|
|
|
balances,
|
2020-12-10 19:25:07 -08:00
|
|
|
token_balances,
|
2021-05-26 14:43:15 -07:00
|
|
|
rent_debits,
|
2022-06-23 12:37:38 -07:00
|
|
|
transaction_indexes.to_vec(),
|
2019-11-20 15:43:10 -08:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2023-03-24 09:48:04 -07:00
|
|
|
prioritization_fee_cache.update(bank, executed_transactions.into_iter());
|
2023-03-23 14:33:02 -07:00
|
|
|
|
2020-07-21 13:06:49 -07:00
|
|
|
let first_err = get_first_error(batch, fee_collection_results);
|
|
|
|
first_err.map(|(result, _)| result).unwrap_or(Ok(()))
|
2019-09-19 10:06:08 -07:00
|
|
|
}
|
|
|
|
|
2022-07-05 11:58:51 -07:00
|
|
|
#[derive(Default)]
|
|
|
|
struct ExecuteBatchesInternalMetrics {
|
|
|
|
execution_timings_per_thread: HashMap<usize, ThreadExecuteTimings>,
|
|
|
|
total_batches_len: u64,
|
|
|
|
execute_batches_us: u64,
|
|
|
|
}
|
|
|
|
|
2022-02-03 10:00:27 -08:00
|
|
|
fn execute_batches_internal(
|
2019-10-08 14:58:49 -07:00
|
|
|
bank: &Arc<Bank>,
|
2022-06-23 12:37:38 -07:00
|
|
|
batches: &[TransactionBatchWithIndexes],
|
2021-05-10 12:14:56 -07:00
|
|
|
transaction_status_sender: Option<&TransactionStatusSender>,
|
2020-08-07 11:21:35 -07:00
|
|
|
replay_vote_sender: Option<&ReplayVoteSender>,
|
2022-07-11 08:53:18 -07:00
|
|
|
log_messages_bytes_limit: Option<usize>,
|
2023-03-23 14:33:02 -07:00
|
|
|
prioritization_fee_cache: &PrioritizationFeeCache,
|
2022-07-05 11:58:51 -07:00
|
|
|
) -> Result<ExecuteBatchesInternalMetrics> {
|
2022-08-08 10:14:49 -07:00
|
|
|
assert!(!batches.is_empty());
|
2022-07-05 11:58:51 -07:00
|
|
|
let execution_timings_per_thread: Mutex<HashMap<usize, ThreadExecuteTimings>> =
|
|
|
|
Mutex::new(HashMap::new());
|
2019-02-19 15:41:40 -08:00
|
|
|
|
2022-07-05 11:58:51 -07:00
|
|
|
let mut execute_batches_elapsed = Measure::start("execute_batches_elapsed");
|
|
|
|
let results: Vec<Result<()>> = PAR_THREAD_POOL.install(|| {
|
|
|
|
batches
|
|
|
|
.into_par_iter()
|
2023-01-11 12:05:33 -08:00
|
|
|
.map(|transaction_batch| {
|
|
|
|
let transaction_count =
|
|
|
|
transaction_batch.batch.sanitized_transactions().len() as u64;
|
2022-07-05 11:58:51 -07:00
|
|
|
let mut timings = ExecuteTimings::default();
|
|
|
|
let (result, execute_batches_time): (Result<()>, Measure) = measure!(
|
|
|
|
{
|
2023-03-15 17:33:18 -07:00
|
|
|
execute_batch(
|
2023-01-11 12:05:33 -08:00
|
|
|
transaction_batch,
|
2022-07-05 11:58:51 -07:00
|
|
|
bank,
|
|
|
|
transaction_status_sender,
|
|
|
|
replay_vote_sender,
|
|
|
|
&mut timings,
|
2022-07-11 08:53:18 -07:00
|
|
|
log_messages_bytes_limit,
|
2023-03-23 14:33:02 -07:00
|
|
|
prioritization_fee_cache,
|
2023-03-15 17:33:18 -07:00
|
|
|
)
|
2022-07-05 11:58:51 -07:00
|
|
|
},
|
|
|
|
"execute_batch",
|
|
|
|
);
|
|
|
|
|
|
|
|
let thread_index = PAR_THREAD_POOL.current_thread_index().unwrap();
|
|
|
|
execution_timings_per_thread
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.entry(thread_index)
|
|
|
|
.and_modify(|thread_execution_time| {
|
|
|
|
let ThreadExecuteTimings {
|
|
|
|
total_thread_us,
|
|
|
|
total_transactions_executed,
|
|
|
|
execute_timings: total_thread_execute_timings,
|
|
|
|
} = thread_execution_time;
|
|
|
|
*total_thread_us += execute_batches_time.as_us();
|
|
|
|
*total_transactions_executed += transaction_count;
|
|
|
|
total_thread_execute_timings
|
|
|
|
.saturating_add_in_place(ExecuteTimingType::TotalBatchesLen, 1);
|
|
|
|
total_thread_execute_timings.accumulate(&timings);
|
|
|
|
})
|
|
|
|
.or_insert(ThreadExecuteTimings {
|
|
|
|
total_thread_us: execute_batches_time.as_us(),
|
|
|
|
total_transactions_executed: transaction_count,
|
|
|
|
execute_timings: timings,
|
|
|
|
});
|
|
|
|
result
|
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
});
|
|
|
|
execute_batches_elapsed.stop();
|
|
|
|
|
|
|
|
first_err(&results)?;
|
|
|
|
|
|
|
|
Ok(ExecuteBatchesInternalMetrics {
|
|
|
|
execution_timings_per_thread: execution_timings_per_thread.into_inner().unwrap(),
|
|
|
|
total_batches_len: batches.len() as u64,
|
|
|
|
execute_batches_us: execute_batches_elapsed.as_us(),
|
|
|
|
})
|
2019-02-16 13:17:37 -08:00
|
|
|
}
|
|
|
|
|
2022-05-10 13:39:08 -07:00
|
|
|
fn rebatch_transactions<'a>(
|
|
|
|
lock_results: &'a [Result<()>],
|
|
|
|
bank: &'a Arc<Bank>,
|
|
|
|
sanitized_txs: &'a [SanitizedTransaction],
|
|
|
|
start: usize,
|
|
|
|
end: usize,
|
2022-06-23 12:37:38 -07:00
|
|
|
transaction_indexes: &'a [usize],
|
|
|
|
) -> TransactionBatchWithIndexes<'a, 'a> {
|
2022-05-10 13:39:08 -07:00
|
|
|
let txs = &sanitized_txs[start..=end];
|
|
|
|
let results = &lock_results[start..=end];
|
|
|
|
let mut tx_batch = TransactionBatch::new(results.to_vec(), bank, Cow::from(txs));
|
|
|
|
tx_batch.set_needs_unlock(false);
|
|
|
|
|
2022-06-23 12:37:38 -07:00
|
|
|
let transaction_indexes = transaction_indexes[start..=end].to_vec();
|
|
|
|
TransactionBatchWithIndexes {
|
|
|
|
batch: tx_batch,
|
|
|
|
transaction_indexes,
|
|
|
|
}
|
2022-05-10 13:39:08 -07:00
|
|
|
}
|
|
|
|
|
2022-02-03 10:00:27 -08:00
|
|
|
fn execute_batches(
|
|
|
|
bank: &Arc<Bank>,
|
2022-06-23 12:37:38 -07:00
|
|
|
batches: &[TransactionBatchWithIndexes],
|
2022-02-03 10:00:27 -08:00
|
|
|
transaction_status_sender: Option<&TransactionStatusSender>,
|
|
|
|
replay_vote_sender: Option<&ReplayVoteSender>,
|
2023-03-28 15:37:34 -07:00
|
|
|
timing: &mut BatchExecutionTiming,
|
2022-07-11 08:53:18 -07:00
|
|
|
log_messages_bytes_limit: Option<usize>,
|
2023-03-23 14:33:02 -07:00
|
|
|
prioritization_fee_cache: &PrioritizationFeeCache,
|
2022-02-03 10:00:27 -08:00
|
|
|
) -> Result<()> {
|
2022-08-08 10:14:49 -07:00
|
|
|
if batches.is_empty() {
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
2022-06-23 12:37:38 -07:00
|
|
|
let ((lock_results, sanitized_txs), transaction_indexes): ((Vec<_>, Vec<_>), Vec<_>) = batches
|
2022-02-03 10:00:27 -08:00
|
|
|
.iter()
|
2022-06-14 15:49:58 -07:00
|
|
|
.flat_map(|batch| {
|
|
|
|
batch
|
2022-06-23 12:37:38 -07:00
|
|
|
.batch
|
2022-06-14 15:49:58 -07:00
|
|
|
.lock_results()
|
|
|
|
.iter()
|
|
|
|
.cloned()
|
2022-06-23 12:37:38 -07:00
|
|
|
.zip(batch.batch.sanitized_transactions().to_vec())
|
|
|
|
.zip(batch.transaction_indexes.to_vec())
|
2022-06-14 15:49:58 -07:00
|
|
|
})
|
|
|
|
.unzip();
|
2022-02-03 10:00:27 -08:00
|
|
|
|
|
|
|
let mut minimal_tx_cost = u64::MAX;
|
2022-02-05 11:46:09 -08:00
|
|
|
let mut total_cost: u64 = 0;
|
|
|
|
let tx_costs = sanitized_txs
|
2022-02-03 10:00:27 -08:00
|
|
|
.iter()
|
|
|
|
.map(|tx| {
|
2022-11-22 09:55:56 -08:00
|
|
|
let tx_cost = CostModel::calculate_cost(tx, &bank.feature_set);
|
2022-05-12 11:52:20 -07:00
|
|
|
let cost = tx_cost.sum();
|
2022-02-03 10:00:27 -08:00
|
|
|
minimal_tx_cost = std::cmp::min(minimal_tx_cost, cost);
|
2022-02-05 11:46:09 -08:00
|
|
|
total_cost = total_cost.saturating_add(cost);
|
2023-02-13 11:09:48 -08:00
|
|
|
tx_cost
|
2022-02-03 10:00:27 -08:00
|
|
|
})
|
2022-02-05 11:46:09 -08:00
|
|
|
.collect::<Vec<_>>();
|
2022-02-03 10:00:27 -08:00
|
|
|
|
2023-02-13 11:09:48 -08:00
|
|
|
if bank
|
|
|
|
.feature_set
|
|
|
|
.is_active(&feature_set::apply_cost_tracker_during_replay::id())
|
|
|
|
{
|
|
|
|
let mut cost_tracker = bank.write_cost_tracker().unwrap();
|
|
|
|
for tx_cost in &tx_costs {
|
|
|
|
cost_tracker
|
|
|
|
.try_add(tx_cost)
|
|
|
|
.map_err(TransactionError::from)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-03 10:00:27 -08:00
|
|
|
let target_batch_count = get_thread_count() as u64;
|
|
|
|
|
2022-06-23 12:37:38 -07:00
|
|
|
let mut tx_batches: Vec<TransactionBatchWithIndexes> = vec![];
|
2022-02-05 11:46:09 -08:00
|
|
|
let rebatched_txs = if total_cost > target_batch_count.saturating_mul(minimal_tx_cost) {
|
2022-02-03 10:00:27 -08:00
|
|
|
let target_batch_cost = total_cost / target_batch_count;
|
2022-02-05 11:46:09 -08:00
|
|
|
let mut batch_cost: u64 = 0;
|
|
|
|
let mut slice_start = 0;
|
2023-02-13 11:09:48 -08:00
|
|
|
tx_costs
|
|
|
|
.into_iter()
|
|
|
|
.enumerate()
|
|
|
|
.for_each(|(index, tx_cost)| {
|
|
|
|
let next_index = index + 1;
|
|
|
|
batch_cost = batch_cost.saturating_add(tx_cost.sum());
|
|
|
|
if batch_cost >= target_batch_cost || next_index == sanitized_txs.len() {
|
|
|
|
let tx_batch = rebatch_transactions(
|
|
|
|
&lock_results,
|
|
|
|
bank,
|
|
|
|
&sanitized_txs,
|
|
|
|
slice_start,
|
|
|
|
index,
|
|
|
|
&transaction_indexes,
|
|
|
|
);
|
|
|
|
slice_start = next_index;
|
|
|
|
tx_batches.push(tx_batch);
|
|
|
|
batch_cost = 0;
|
|
|
|
}
|
|
|
|
});
|
2022-02-05 11:46:09 -08:00
|
|
|
&tx_batches[..]
|
2022-02-03 10:00:27 -08:00
|
|
|
} else {
|
2022-02-05 11:46:09 -08:00
|
|
|
batches
|
|
|
|
};
|
|
|
|
|
2022-07-05 11:58:51 -07:00
|
|
|
let execute_batches_internal_metrics = execute_batches_internal(
|
2022-02-05 11:46:09 -08:00
|
|
|
bank,
|
|
|
|
rebatched_txs,
|
|
|
|
transaction_status_sender,
|
|
|
|
replay_vote_sender,
|
2022-07-11 08:53:18 -07:00
|
|
|
log_messages_bytes_limit,
|
2023-03-23 14:33:02 -07:00
|
|
|
prioritization_fee_cache,
|
2022-07-05 11:58:51 -07:00
|
|
|
)?;
|
|
|
|
|
2023-03-28 15:37:34 -07:00
|
|
|
timing.accumulate(execute_batches_internal_metrics);
|
2022-07-05 11:58:51 -07:00
|
|
|
Ok(())
|
2022-02-03 10:00:27 -08:00
|
|
|
}
|
|
|
|
|
2019-03-12 16:46:41 -07:00
|
|
|
/// Process an ordered list of entries in parallel
|
2019-02-16 13:17:37 -08:00
|
|
|
/// 1. In order lock accounts for each entry while the lock succeeds, up to a Tick entry
|
|
|
|
/// 2. Process the locked group in parallel
|
|
|
|
/// 3. Register the `Tick` if it's available
|
|
|
|
/// 4. Update the leader scheduler, goto 1
|
2022-06-23 12:37:38 -07:00
|
|
|
///
|
|
|
|
/// This method is for use testing against a single Bank, and assumes `Bank::transaction_count()`
|
|
|
|
/// represents the number of transactions executed in this Bank
|
2021-11-17 11:53:40 -08:00
|
|
|
pub fn process_entries_for_tests(
|
2019-11-20 15:43:10 -08:00
|
|
|
bank: &Arc<Bank>,
|
2021-08-17 15:17:56 -07:00
|
|
|
entries: Vec<Entry>,
|
2019-11-20 15:43:10 -08:00
|
|
|
randomize: bool,
|
2021-05-10 12:14:56 -07:00
|
|
|
transaction_status_sender: Option<&TransactionStatusSender>,
|
2020-08-07 11:21:35 -07:00
|
|
|
replay_vote_sender: Option<&ReplayVoteSender>,
|
2019-11-20 15:43:10 -08:00
|
|
|
) -> Result<()> {
|
2021-08-17 15:17:56 -07:00
|
|
|
let verify_transaction = {
|
|
|
|
let bank = bank.clone();
|
|
|
|
move |versioned_tx: VersionedTransaction| -> Result<SanitizedTransaction> {
|
2021-11-30 18:16:13 -08:00
|
|
|
bank.verify_transaction(versioned_tx, TransactionVerificationMode::FullVerification)
|
2021-08-17 15:17:56 -07:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-06-23 12:37:38 -07:00
|
|
|
let mut entry_starting_index: usize = bank.transaction_count().try_into().unwrap();
|
2023-03-28 15:37:34 -07:00
|
|
|
let mut batch_timing = BatchExecutionTiming::default();
|
2022-06-23 12:37:38 -07:00
|
|
|
let mut replay_entries: Vec<_> =
|
|
|
|
entry::verify_transactions(entries, Arc::new(verify_transaction))?
|
|
|
|
.into_iter()
|
|
|
|
.map(|entry| {
|
|
|
|
let starting_index = entry_starting_index;
|
|
|
|
if let EntryType::Transactions(ref transactions) = entry {
|
|
|
|
entry_starting_index = entry_starting_index.saturating_add(transactions.len());
|
|
|
|
}
|
|
|
|
ReplayEntry {
|
|
|
|
entry,
|
|
|
|
starting_index,
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect();
|
2022-07-05 11:58:51 -07:00
|
|
|
|
2023-04-26 18:10:16 -07:00
|
|
|
let ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64);
|
2023-03-23 13:37:57 -07:00
|
|
|
let result = process_entries(
|
2020-07-29 23:17:40 -07:00
|
|
|
bank,
|
2022-06-23 12:37:38 -07:00
|
|
|
&mut replay_entries,
|
2020-07-29 23:17:40 -07:00
|
|
|
randomize,
|
|
|
|
transaction_status_sender,
|
2020-08-07 11:21:35 -07:00
|
|
|
replay_vote_sender,
|
2023-03-28 15:37:34 -07:00
|
|
|
&mut batch_timing,
|
2022-07-11 08:53:18 -07:00
|
|
|
None,
|
2023-04-26 18:10:16 -07:00
|
|
|
&ignored_prioritization_fee_cache,
|
2021-03-03 15:07:45 -08:00
|
|
|
);
|
|
|
|
|
2023-03-28 15:37:34 -07:00
|
|
|
debug!("process_entries: {:?}", batch_timing);
|
2021-03-03 15:07:45 -08:00
|
|
|
result
|
2019-10-08 14:58:49 -07:00
|
|
|
}
|
|
|
|
|
2021-03-31 16:59:19 -07:00
|
|
|
// Note: If randomize is true this will shuffle entries' transactions in-place.
|
2023-03-23 13:37:57 -07:00
|
|
|
fn process_entries(
|
2019-10-08 14:58:49 -07:00
|
|
|
bank: &Arc<Bank>,
|
2022-06-23 12:37:38 -07:00
|
|
|
entries: &mut [ReplayEntry],
|
2019-10-08 14:58:49 -07:00
|
|
|
randomize: bool,
|
2021-05-10 12:14:56 -07:00
|
|
|
transaction_status_sender: Option<&TransactionStatusSender>,
|
2020-08-07 11:21:35 -07:00
|
|
|
replay_vote_sender: Option<&ReplayVoteSender>,
|
2023-03-28 15:37:34 -07:00
|
|
|
batch_timing: &mut BatchExecutionTiming,
|
2022-07-11 08:53:18 -07:00
|
|
|
log_messages_bytes_limit: Option<usize>,
|
2022-08-31 06:00:55 -07:00
|
|
|
prioritization_fee_cache: &PrioritizationFeeCache,
|
2019-10-08 14:58:49 -07:00
|
|
|
) -> Result<()> {
|
2019-02-16 13:17:37 -08:00
|
|
|
// accumulator for entries that can be processed in parallel
|
2019-09-19 10:06:08 -07:00
|
|
|
let mut batches = vec![];
|
2019-10-23 12:11:04 -07:00
|
|
|
let mut tick_hashes = vec![];
|
2021-04-12 23:28:08 -07:00
|
|
|
let mut rng = thread_rng();
|
|
|
|
|
2022-06-23 12:37:38 -07:00
|
|
|
for ReplayEntry {
|
|
|
|
entry,
|
|
|
|
starting_index,
|
|
|
|
} in entries
|
|
|
|
{
|
2021-04-12 23:28:08 -07:00
|
|
|
match entry {
|
|
|
|
EntryType::Tick(hash) => {
|
|
|
|
// If it's a tick, save it for later
|
|
|
|
tick_hashes.push(hash);
|
|
|
|
if bank.is_block_boundary(bank.tick_height() + tick_hashes.len() as u64) {
|
|
|
|
// If it's a tick that will cause a new blockhash to be created,
|
|
|
|
// execute the group and register the tick
|
|
|
|
execute_batches(
|
|
|
|
bank,
|
|
|
|
&batches,
|
2021-05-10 12:14:56 -07:00
|
|
|
transaction_status_sender,
|
2021-04-12 23:28:08 -07:00
|
|
|
replay_vote_sender,
|
2023-03-28 15:37:34 -07:00
|
|
|
batch_timing,
|
2022-07-11 08:53:18 -07:00
|
|
|
log_messages_bytes_limit,
|
2023-03-23 14:33:02 -07:00
|
|
|
prioritization_fee_cache,
|
2021-04-12 23:28:08 -07:00
|
|
|
)?;
|
|
|
|
batches.clear();
|
|
|
|
for hash in &tick_hashes {
|
|
|
|
bank.register_tick(hash);
|
|
|
|
}
|
|
|
|
tick_hashes.clear();
|
2019-11-08 11:29:41 -08:00
|
|
|
}
|
2019-05-23 17:35:15 -07:00
|
|
|
}
|
2021-04-12 23:28:08 -07:00
|
|
|
EntryType::Transactions(transactions) => {
|
2022-06-23 12:37:38 -07:00
|
|
|
let starting_index = *starting_index;
|
|
|
|
let transaction_indexes = if randomize {
|
|
|
|
let mut transactions_and_indexes: Vec<(SanitizedTransaction, usize)> =
|
|
|
|
transactions.drain(..).zip(starting_index..).collect();
|
|
|
|
transactions_and_indexes.shuffle(&mut rng);
|
|
|
|
let (txs, indexes): (Vec<_>, Vec<_>) =
|
|
|
|
transactions_and_indexes.into_iter().unzip();
|
|
|
|
*transactions = txs;
|
|
|
|
indexes
|
|
|
|
} else {
|
|
|
|
(starting_index..starting_index.saturating_add(transactions.len())).collect()
|
|
|
|
};
|
2021-04-12 23:28:08 -07:00
|
|
|
|
|
|
|
loop {
|
|
|
|
// try to lock the accounts
|
2021-07-15 20:51:27 -07:00
|
|
|
let batch = bank.prepare_sanitized_batch(transactions);
|
2021-04-12 23:28:08 -07:00
|
|
|
let first_lock_err = first_err(batch.lock_results());
|
|
|
|
|
|
|
|
// if locking worked
|
|
|
|
if first_lock_err.is_ok() {
|
2022-06-23 12:37:38 -07:00
|
|
|
batches.push(TransactionBatchWithIndexes {
|
|
|
|
batch,
|
|
|
|
transaction_indexes,
|
|
|
|
});
|
2021-04-12 23:28:08 -07:00
|
|
|
// done with this entry
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// else we failed to lock, 2 possible reasons
|
|
|
|
if batches.is_empty() {
|
|
|
|
// An entry has account lock conflicts with *itself*, which should not happen
|
|
|
|
// if generated by a properly functioning leader
|
|
|
|
datapoint_error!(
|
|
|
|
"validator_process_entry_error",
|
|
|
|
(
|
|
|
|
"error",
|
|
|
|
format!(
|
2022-12-06 06:30:06 -08:00
|
|
|
"Lock accounts error, entry conflicts with itself, txs: {transactions:?}"
|
2021-04-12 23:28:08 -07:00
|
|
|
),
|
|
|
|
String
|
|
|
|
)
|
|
|
|
);
|
|
|
|
// bail
|
|
|
|
first_lock_err?;
|
|
|
|
} else {
|
|
|
|
// else we have an entry that conflicts with a prior entry
|
|
|
|
// execute the current queue and try to process this entry again
|
|
|
|
execute_batches(
|
|
|
|
bank,
|
|
|
|
&batches,
|
2021-05-10 12:14:56 -07:00
|
|
|
transaction_status_sender,
|
2021-04-12 23:28:08 -07:00
|
|
|
replay_vote_sender,
|
2023-03-28 15:37:34 -07:00
|
|
|
batch_timing,
|
2022-07-11 08:53:18 -07:00
|
|
|
log_messages_bytes_limit,
|
2023-03-23 14:33:02 -07:00
|
|
|
prioritization_fee_cache,
|
2021-04-12 23:28:08 -07:00
|
|
|
)?;
|
|
|
|
batches.clear();
|
|
|
|
}
|
|
|
|
}
|
2019-05-07 15:51:35 -07:00
|
|
|
}
|
2019-02-16 13:17:37 -08:00
|
|
|
}
|
|
|
|
}
|
2020-07-29 23:17:40 -07:00
|
|
|
execute_batches(
|
|
|
|
bank,
|
|
|
|
&batches,
|
|
|
|
transaction_status_sender,
|
2020-08-07 11:21:35 -07:00
|
|
|
replay_vote_sender,
|
2023-03-28 15:37:34 -07:00
|
|
|
batch_timing,
|
2022-07-11 08:53:18 -07:00
|
|
|
log_messages_bytes_limit,
|
2023-03-23 14:33:02 -07:00
|
|
|
prioritization_fee_cache,
|
2020-07-29 23:17:40 -07:00
|
|
|
)?;
|
2019-10-23 12:11:04 -07:00
|
|
|
for hash in tick_hashes {
|
2021-06-18 06:34:46 -07:00
|
|
|
bank.register_tick(hash);
|
2019-10-23 12:11:04 -07:00
|
|
|
}
|
2019-02-21 13:37:08 -08:00
|
|
|
Ok(())
|
2019-02-16 13:17:37 -08:00
|
|
|
}
|
|
|
|
|
2020-01-14 17:15:26 -08:00
|
|
|
#[derive(Error, Debug)]
|
2020-01-13 13:13:52 -08:00
|
|
|
pub enum BlockstoreProcessorError {
|
2022-01-13 16:50:46 -08:00
|
|
|
#[error("failed to load entries, error: {0}")]
|
2020-01-14 17:15:26 -08:00
|
|
|
FailedToLoadEntries(#[from] BlockstoreError),
|
2019-12-02 14:42:05 -08:00
|
|
|
|
|
|
|
#[error("failed to load meta")]
|
2019-10-31 13:38:50 -07:00
|
|
|
FailedToLoadMeta,
|
|
|
|
|
2022-01-13 16:50:46 -08:00
|
|
|
#[error("invalid block error: {0}")]
|
2019-12-02 14:42:05 -08:00
|
|
|
InvalidBlock(#[from] BlockError),
|
|
|
|
|
2022-01-13 16:50:46 -08:00
|
|
|
#[error("invalid transaction error: {0}")]
|
2020-01-14 17:15:26 -08:00
|
|
|
InvalidTransaction(#[from] TransactionError),
|
2019-12-09 14:49:19 -08:00
|
|
|
|
|
|
|
#[error("no valid forks found")]
|
|
|
|
NoValidForksFound,
|
2020-01-24 17:27:04 -08:00
|
|
|
|
2022-01-13 16:50:46 -08:00
|
|
|
#[error("invalid hard fork slot {0}")]
|
2020-01-24 17:27:04 -08:00
|
|
|
InvalidHardFork(Slot),
|
2020-09-11 09:48:06 -07:00
|
|
|
|
|
|
|
#[error("root bank with mismatched capitalization at {0}")]
|
|
|
|
RootBankWithMismatchedCapitalization(Slot),
|
2019-03-13 12:47:09 -07:00
|
|
|
}
|
|
|
|
|
2020-01-13 13:13:52 -08:00
|
|
|
/// Callback for accessing bank state while processing the blockstore
|
2020-06-08 17:38:14 -07:00
|
|
|
pub type ProcessCallback = Arc<dyn Fn(&Bank) + Sync + Send>;
|
2019-10-08 14:58:49 -07:00
|
|
|
|
2020-01-10 12:16:44 -08:00
|
|
|
#[derive(Default, Clone)]
|
2019-10-08 14:58:49 -07:00
|
|
|
pub struct ProcessOptions {
|
2023-03-22 11:03:30 -07:00
|
|
|
/// Run PoH, transaction signature and other transaction verifications on the entries.
|
|
|
|
pub run_verification: bool,
|
2019-10-08 14:58:49 -07:00
|
|
|
pub full_leader_cache: bool,
|
2022-04-19 15:06:30 -07:00
|
|
|
pub halt_at_slot: Option<Slot>,
|
2020-01-24 17:27:04 -08:00
|
|
|
pub new_hard_forks: Option<Vec<Slot>>,
|
2020-09-23 18:46:42 -07:00
|
|
|
pub debug_keys: Option<Arc<HashSet<Pubkey>>>,
|
2021-05-10 07:22:48 -07:00
|
|
|
pub account_indexes: AccountSecondaryIndexes,
|
2021-05-26 08:36:12 -07:00
|
|
|
pub limit_load_slot_count_from_snapshot: Option<usize>,
|
2021-02-06 17:26:42 -08:00
|
|
|
pub allow_dead_slots: bool,
|
2021-05-24 16:15:57 -07:00
|
|
|
pub accounts_db_test_hash_calculation: bool,
|
2021-08-04 15:28:33 -07:00
|
|
|
pub accounts_db_skip_shrink: bool,
|
2021-09-07 21:30:38 -07:00
|
|
|
pub accounts_db_config: Option<AccountsDbConfig>,
|
2021-07-13 09:06:18 -07:00
|
|
|
pub verify_index: bool,
|
2021-06-09 21:21:32 -07:00
|
|
|
pub shrink_ratio: AccountShrinkThreshold,
|
2022-04-11 17:28:10 -07:00
|
|
|
pub runtime_config: RuntimeConfig,
|
2022-07-29 13:54:56 -07:00
|
|
|
pub on_halt_store_hash_raw_data_for_debug: bool,
|
2022-10-04 14:04:04 -07:00
|
|
|
/// true if after processing the contents of the blockstore at startup, we should run an accounts hash calc
|
|
|
|
/// This is useful for debugging.
|
|
|
|
pub run_final_accounts_hash_calc: bool,
|
2020-08-25 09:49:15 -07:00
|
|
|
}
|
|
|
|
|
2022-03-05 05:46:23 -08:00
|
|
|
pub fn test_process_blockstore(
|
2019-11-08 20:56:57 -08:00
|
|
|
genesis_config: &GenesisConfig,
|
2020-01-13 13:13:52 -08:00
|
|
|
blockstore: &Blockstore,
|
2022-05-02 14:58:00 -07:00
|
|
|
opts: &ProcessOptions,
|
2022-09-12 11:51:12 -07:00
|
|
|
exit: &Arc<AtomicBool>,
|
2022-03-04 01:52:22 -08:00
|
|
|
) -> (Arc<RwLock<BankForks>>, LeaderScheduleCache) {
|
2022-11-17 08:01:01 -08:00
|
|
|
// Spin up a thread to be a fake Accounts Background Service. Need to intercept and handle all
|
|
|
|
// EpochAccountsHash requests so future rooted banks do not hang in Bank::freeze() waiting for
|
|
|
|
// an in-flight EAH calculation to complete.
|
2022-10-05 14:44:35 -07:00
|
|
|
let (snapshot_request_sender, snapshot_request_receiver) = crossbeam_channel::unbounded();
|
|
|
|
let abs_request_sender = AbsRequestSender::new(snapshot_request_sender);
|
|
|
|
let bg_exit = Arc::new(AtomicBool::new(false));
|
|
|
|
let bg_thread = {
|
|
|
|
let exit = Arc::clone(&bg_exit);
|
|
|
|
std::thread::spawn(move || {
|
|
|
|
while !exit.load(Relaxed) {
|
|
|
|
snapshot_request_receiver
|
|
|
|
.try_iter()
|
|
|
|
.filter(|snapshot_request| {
|
|
|
|
snapshot_request.request_type == SnapshotRequestType::EpochAccountsHash
|
|
|
|
})
|
|
|
|
.for_each(|snapshot_request| {
|
|
|
|
snapshot_request
|
|
|
|
.snapshot_root_bank
|
|
|
|
.rc
|
|
|
|
.accounts
|
|
|
|
.accounts_db
|
|
|
|
.epoch_accounts_hash_manager
|
2022-11-17 08:01:01 -08:00
|
|
|
.set_valid(
|
|
|
|
EpochAccountsHash::new(Hash::new_unique()),
|
|
|
|
snapshot_request.snapshot_root_bank.slot(),
|
|
|
|
)
|
2022-10-05 14:44:35 -07:00
|
|
|
});
|
|
|
|
std::thread::sleep(Duration::from_millis(100));
|
|
|
|
}
|
|
|
|
})
|
|
|
|
};
|
|
|
|
|
2022-03-18 12:43:20 -07:00
|
|
|
let (bank_forks, leader_schedule_cache, ..) = crate::bank_forks_utils::load_bank_forks(
|
|
|
|
genesis_config,
|
|
|
|
blockstore,
|
|
|
|
Vec::new(),
|
|
|
|
None,
|
|
|
|
None,
|
2022-05-02 14:58:00 -07:00
|
|
|
opts,
|
2022-03-18 12:43:20 -07:00
|
|
|
None,
|
|
|
|
None,
|
2023-05-10 16:20:51 -07:00
|
|
|
None,
|
2022-09-12 11:51:12 -07:00
|
|
|
exit,
|
2022-03-18 12:43:20 -07:00
|
|
|
);
|
2022-10-05 14:44:35 -07:00
|
|
|
|
2022-03-05 05:46:23 -08:00
|
|
|
process_blockstore_from_root(
|
2022-03-05 05:16:39 -08:00
|
|
|
blockstore,
|
2022-03-04 01:52:22 -08:00
|
|
|
&bank_forks,
|
2022-03-14 09:16:12 -07:00
|
|
|
&leader_schedule_cache,
|
2022-05-02 14:58:00 -07:00
|
|
|
opts,
|
2022-03-05 05:16:39 -08:00
|
|
|
None,
|
2022-03-05 05:46:23 -08:00
|
|
|
None,
|
2023-05-10 16:20:51 -07:00
|
|
|
None,
|
2022-10-05 14:44:35 -07:00
|
|
|
&abs_request_sender,
|
2022-03-05 05:16:39 -08:00
|
|
|
)
|
2022-03-14 09:16:12 -07:00
|
|
|
.unwrap();
|
2022-10-05 14:44:35 -07:00
|
|
|
|
|
|
|
bg_exit.store(true, Relaxed);
|
|
|
|
bg_thread.join().unwrap();
|
|
|
|
|
2022-03-14 09:16:12 -07:00
|
|
|
(bank_forks, leader_schedule_cache)
|
2022-03-05 05:16:39 -08:00
|
|
|
}
|
2019-08-13 17:20:14 -07:00
|
|
|
|
2022-03-05 05:46:23 -08:00
|
|
|
pub(crate) fn process_blockstore_for_bank_0(
|
2022-03-05 05:16:39 -08:00
|
|
|
genesis_config: &GenesisConfig,
|
|
|
|
blockstore: &Blockstore,
|
|
|
|
account_paths: Vec<PathBuf>,
|
|
|
|
opts: &ProcessOptions,
|
|
|
|
cache_block_meta_sender: Option<&CacheBlockMetaSender>,
|
2023-05-10 16:20:51 -07:00
|
|
|
entry_notification_sender: Option<&EntryNotifierSender>,
|
2022-03-05 05:16:39 -08:00
|
|
|
accounts_update_notifier: Option<AccountsUpdateNotifier>,
|
2022-09-12 11:51:12 -07:00
|
|
|
exit: &Arc<AtomicBool>,
|
2022-03-04 01:52:22 -08:00
|
|
|
) -> Arc<RwLock<BankForks>> {
|
2019-02-20 15:42:35 -08:00
|
|
|
// Setup bank for slot 0
|
2022-08-05 12:49:00 -07:00
|
|
|
let bank0 = Bank::new_with_paths(
|
2021-06-18 06:34:46 -07:00
|
|
|
genesis_config,
|
2022-08-05 12:49:00 -07:00
|
|
|
Arc::new(opts.runtime_config.clone()),
|
2020-08-25 09:49:15 -07:00
|
|
|
account_paths,
|
2020-09-23 18:46:42 -07:00
|
|
|
opts.debug_keys.clone(),
|
2023-04-26 05:44:19 -07:00
|
|
|
None,
|
2020-12-31 18:06:03 -08:00
|
|
|
opts.account_indexes.clone(),
|
2021-06-09 21:21:32 -07:00
|
|
|
opts.shrink_ratio,
|
2021-06-15 13:39:22 -07:00
|
|
|
false,
|
2021-09-07 21:30:38 -07:00
|
|
|
opts.accounts_db_config.clone(),
|
2021-09-30 14:26:17 -07:00
|
|
|
accounts_update_notifier,
|
2022-09-12 11:51:12 -07:00
|
|
|
exit,
|
2020-09-24 12:23:09 -07:00
|
|
|
);
|
2022-03-04 01:52:22 -08:00
|
|
|
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0)));
|
2022-03-03 02:46:29 -08:00
|
|
|
|
2022-07-11 16:11:16 -07:00
|
|
|
info!("Processing ledger for slot 0...");
|
2021-05-10 12:14:56 -07:00
|
|
|
process_bank_0(
|
2022-03-04 01:52:22 -08:00
|
|
|
&bank_forks.read().unwrap().root_bank(),
|
2021-05-10 12:14:56 -07:00
|
|
|
blockstore,
|
2022-03-05 05:16:39 -08:00
|
|
|
opts,
|
|
|
|
&VerifyRecyclers::default(),
|
2021-05-26 21:16:16 -07:00
|
|
|
cache_block_meta_sender,
|
2023-05-10 16:20:51 -07:00
|
|
|
entry_notification_sender,
|
2021-05-10 12:14:56 -07:00
|
|
|
);
|
2022-03-05 05:16:39 -08:00
|
|
|
bank_forks
|
2019-08-13 17:20:14 -07:00
|
|
|
}
|
|
|
|
|
2021-09-07 13:43:43 -07:00
|
|
|
/// Process blockstore from a known root bank
|
|
|
|
#[allow(clippy::too_many_arguments)]
|
2022-03-07 02:23:22 -08:00
|
|
|
pub fn process_blockstore_from_root(
|
2020-08-25 09:49:15 -07:00
|
|
|
blockstore: &Blockstore,
|
2022-03-04 01:52:22 -08:00
|
|
|
bank_forks: &RwLock<BankForks>,
|
2022-03-14 09:16:12 -07:00
|
|
|
leader_schedule_cache: &LeaderScheduleCache,
|
2020-08-25 09:49:15 -07:00
|
|
|
opts: &ProcessOptions,
|
2021-05-10 12:14:56 -07:00
|
|
|
transaction_status_sender: Option<&TransactionStatusSender>,
|
2021-05-26 21:16:16 -07:00
|
|
|
cache_block_meta_sender: Option<&CacheBlockMetaSender>,
|
2023-05-10 16:20:51 -07:00
|
|
|
entry_notification_sender: Option<&EntryNotifierSender>,
|
2022-03-18 12:43:20 -07:00
|
|
|
accounts_background_request_sender: &AbsRequestSender,
|
|
|
|
) -> result::Result<(), BlockstoreProcessorError> {
|
2023-04-12 21:10:26 -07:00
|
|
|
let (start_slot, start_slot_hash) = {
|
|
|
|
// Starting slot must be a root, and thus has no parents
|
|
|
|
assert_eq!(bank_forks.read().unwrap().banks().len(), 1);
|
|
|
|
let bank = bank_forks.read().unwrap().root_bank();
|
|
|
|
assert!(bank.parent().is_none());
|
|
|
|
(bank.slot(), bank.hash())
|
|
|
|
};
|
2022-03-03 02:46:29 -08:00
|
|
|
|
2022-07-11 16:11:16 -07:00
|
|
|
info!("Processing ledger from slot {}...", start_slot);
|
2019-08-13 17:20:14 -07:00
|
|
|
let now = Instant::now();
|
|
|
|
|
2023-03-21 05:17:58 -07:00
|
|
|
// Ensure start_slot is rooted for correct replay; also ensure start_slot and
|
|
|
|
// qualifying children are marked as connected
|
2020-06-02 21:32:44 -07:00
|
|
|
if blockstore.is_primary_access() {
|
|
|
|
blockstore
|
2022-06-25 23:14:17 -07:00
|
|
|
.mark_slots_as_if_rooted_normally_at_startup(
|
2023-04-12 21:10:26 -07:00
|
|
|
vec![(start_slot, Some(start_slot_hash))],
|
2022-06-25 23:14:17 -07:00
|
|
|
true,
|
|
|
|
)
|
2023-04-12 21:10:26 -07:00
|
|
|
.expect("Couldn't mark start_slot as root in startup");
|
2023-03-21 05:17:58 -07:00
|
|
|
blockstore
|
2023-04-12 21:10:26 -07:00
|
|
|
.set_and_chain_connected_on_root_and_next_slots(start_slot)
|
2023-03-21 05:17:58 -07:00
|
|
|
.expect("Couldn't mark start_slot as connected during startup")
|
2021-10-05 22:24:48 -07:00
|
|
|
} else {
|
2022-04-29 18:05:39 -07:00
|
|
|
info!(
|
2023-04-12 21:10:26 -07:00
|
|
|
"Start slot {} isn't a root, and won't be updated due to secondary blockstore access",
|
2022-04-29 18:05:39 -07:00
|
|
|
start_slot
|
2022-03-03 02:46:29 -08:00
|
|
|
);
|
2020-06-02 21:32:44 -07:00
|
|
|
}
|
2019-08-13 17:20:14 -07:00
|
|
|
|
2022-09-23 02:53:43 -07:00
|
|
|
if let Ok(Some(highest_slot)) = blockstore.highest_slot() {
|
|
|
|
info!("ledger holds data through slot {}", highest_slot);
|
2020-03-25 21:41:50 -07:00
|
|
|
}
|
|
|
|
|
2021-03-05 09:01:52 -08:00
|
|
|
let mut timing = ExecuteTimings::default();
|
2023-05-04 21:36:02 -07:00
|
|
|
let (num_slots_processed, num_new_roots_found) = if let Some(start_slot_meta) = blockstore
|
2022-03-02 10:13:02 -08:00
|
|
|
.meta(start_slot)
|
2022-12-06 06:30:06 -08:00
|
|
|
.unwrap_or_else(|_| panic!("Failed to get meta for slot {start_slot}"))
|
2022-03-02 10:13:02 -08:00
|
|
|
{
|
2023-05-04 21:36:02 -07:00
|
|
|
load_frozen_forks(
|
2022-03-14 09:16:12 -07:00
|
|
|
bank_forks,
|
2022-03-03 02:46:29 -08:00
|
|
|
&start_slot_meta,
|
2022-03-02 10:13:02 -08:00
|
|
|
blockstore,
|
2022-03-14 09:16:12 -07:00
|
|
|
leader_schedule_cache,
|
2022-03-02 10:13:02 -08:00
|
|
|
opts,
|
|
|
|
transaction_status_sender,
|
|
|
|
cache_block_meta_sender,
|
2023-05-10 16:20:51 -07:00
|
|
|
entry_notification_sender,
|
2022-03-02 10:13:02 -08:00
|
|
|
&mut timing,
|
2022-03-18 12:43:20 -07:00
|
|
|
accounts_background_request_sender,
|
2023-05-04 21:36:02 -07:00
|
|
|
)?
|
2022-03-02 10:13:02 -08:00
|
|
|
} else {
|
2022-07-11 16:11:16 -07:00
|
|
|
// If there's no meta in the blockstore for the input `start_slot`,
|
|
|
|
// then we started from a snapshot and are unable to process anything.
|
|
|
|
//
|
|
|
|
// If the ledger has any data at all, the snapshot was likely taken at
|
|
|
|
// a slot that is not within the range of ledger min/max slot(s).
|
|
|
|
warn!(
|
|
|
|
"Starting slot {} is not in Blockstore, unable to process",
|
|
|
|
start_slot
|
|
|
|
);
|
2023-05-04 21:36:02 -07:00
|
|
|
(0, 0)
|
2019-08-13 17:20:14 -07:00
|
|
|
};
|
|
|
|
|
2021-06-14 15:46:49 -07:00
|
|
|
let processing_time = now.elapsed();
|
|
|
|
|
2021-05-14 13:58:31 -07:00
|
|
|
datapoint_info!(
|
|
|
|
"process_blockstore_from_root",
|
2021-06-14 15:46:49 -07:00
|
|
|
("total_time_us", processing_time.as_micros(), i64),
|
2022-03-04 01:52:22 -08:00
|
|
|
(
|
|
|
|
"frozen_banks",
|
|
|
|
bank_forks.read().unwrap().frozen_banks().len(),
|
|
|
|
i64
|
|
|
|
),
|
|
|
|
("slot", bank_forks.read().unwrap().root(), i64),
|
2022-10-06 10:17:49 -07:00
|
|
|
("num_slots_processed", num_slots_processed, i64),
|
2023-05-04 10:33:01 -07:00
|
|
|
("num_new_roots_found", num_new_roots_found, i64),
|
2022-03-04 01:52:22 -08:00
|
|
|
("forks", bank_forks.read().unwrap().banks().len(), i64),
|
2021-05-14 13:58:31 -07:00
|
|
|
);
|
|
|
|
|
2021-03-05 09:01:52 -08:00
|
|
|
info!("ledger processing timing: {:?}", timing);
|
2022-03-04 01:52:22 -08:00
|
|
|
{
|
|
|
|
let bank_forks = bank_forks.read().unwrap();
|
2022-04-28 11:51:00 -07:00
|
|
|
let mut bank_slots = bank_forks.banks().keys().copied().collect::<Vec<_>>();
|
2022-03-04 01:52:22 -08:00
|
|
|
bank_slots.sort_unstable();
|
2022-03-03 02:46:29 -08:00
|
|
|
|
2022-03-04 01:52:22 -08:00
|
|
|
info!(
|
|
|
|
"ledger processed in {}. root slot is {}, {} bank{}: {}",
|
|
|
|
HumanTime::from(chrono::Duration::from_std(processing_time).unwrap())
|
|
|
|
.to_text_en(Accuracy::Precise, Tense::Present),
|
|
|
|
bank_forks.root(),
|
|
|
|
bank_slots.len(),
|
|
|
|
if bank_slots.len() > 1 { "s" } else { "" },
|
|
|
|
bank_slots.iter().map(|slot| slot.to_string()).join(", "),
|
|
|
|
);
|
2022-07-28 11:33:19 -07:00
|
|
|
assert!(bank_forks.active_bank_slots().is_empty());
|
2022-03-04 01:52:22 -08:00
|
|
|
}
|
2019-08-13 17:20:14 -07:00
|
|
|
|
2022-03-18 12:43:20 -07:00
|
|
|
Ok(())
|
2019-08-13 17:20:14 -07:00
|
|
|
}
|
|
|
|
|
2020-01-14 17:15:26 -08:00
|
|
|
/// Verify that a segment of entries has the correct number of ticks and hashes
|
2022-03-03 02:46:29 -08:00
|
|
|
fn verify_ticks(
|
|
|
|
bank: &Bank,
|
2019-08-13 17:20:14 -07:00
|
|
|
entries: &[Entry],
|
2020-01-14 17:15:26 -08:00
|
|
|
slot_full: bool,
|
|
|
|
tick_hash_count: &mut u64,
|
|
|
|
) -> std::result::Result<(), BlockError> {
|
|
|
|
let next_bank_tick_height = bank.tick_height() + entries.tick_count();
|
|
|
|
let max_bank_tick_height = bank.max_tick_height();
|
2020-08-20 21:56:25 -07:00
|
|
|
|
2020-01-14 17:15:26 -08:00
|
|
|
if next_bank_tick_height > max_bank_tick_height {
|
|
|
|
warn!("Too many entry ticks found in slot: {}", bank.slot());
|
2021-03-01 14:57:37 -08:00
|
|
|
return Err(BlockError::TooManyTicks);
|
2020-01-14 17:15:26 -08:00
|
|
|
}
|
2019-08-13 17:20:14 -07:00
|
|
|
|
2020-01-14 17:15:26 -08:00
|
|
|
if next_bank_tick_height < max_bank_tick_height && slot_full {
|
2021-03-01 14:57:37 -08:00
|
|
|
info!("Too few entry ticks found in slot: {}", bank.slot());
|
|
|
|
return Err(BlockError::TooFewTicks);
|
2020-01-14 17:15:26 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if next_bank_tick_height == max_bank_tick_height {
|
|
|
|
let has_trailing_entry = entries.last().map(|e| !e.is_tick()).unwrap_or_default();
|
|
|
|
if has_trailing_entry {
|
2019-10-31 13:38:50 -07:00
|
|
|
warn!("Slot: {} did not end with a tick entry", bank.slot());
|
2020-01-14 17:15:26 -08:00
|
|
|
return Err(BlockError::TrailingEntry);
|
2019-10-31 13:38:50 -07:00
|
|
|
}
|
|
|
|
|
2020-01-14 17:15:26 -08:00
|
|
|
if !slot_full {
|
|
|
|
warn!("Slot: {} was not marked full", bank.slot());
|
|
|
|
return Err(BlockError::InvalidLastTick);
|
2019-10-31 13:38:50 -07:00
|
|
|
}
|
2020-01-14 17:15:26 -08:00
|
|
|
}
|
2019-10-31 13:38:50 -07:00
|
|
|
|
2020-01-14 17:15:26 -08:00
|
|
|
let hashes_per_tick = bank.hashes_per_tick().unwrap_or(0);
|
|
|
|
if !entries.verify_tick_hash_count(tick_hash_count, hashes_per_tick) {
|
|
|
|
warn!(
|
|
|
|
"Tick with invalid number of hashes found in slot: {}",
|
|
|
|
bank.slot()
|
|
|
|
);
|
|
|
|
return Err(BlockError::InvalidTickHashCount);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn confirm_full_slot(
|
|
|
|
blockstore: &Blockstore,
|
|
|
|
bank: &Arc<Bank>,
|
|
|
|
opts: &ProcessOptions,
|
|
|
|
recyclers: &VerifyRecyclers,
|
2020-03-23 12:19:11 -07:00
|
|
|
progress: &mut ConfirmationProgress,
|
2021-05-10 12:14:56 -07:00
|
|
|
transaction_status_sender: Option<&TransactionStatusSender>,
|
2023-05-10 16:20:51 -07:00
|
|
|
entry_notification_sender: Option<&EntryNotifierSender>,
|
2020-08-07 11:21:35 -07:00
|
|
|
replay_vote_sender: Option<&ReplayVoteSender>,
|
2021-03-05 09:01:52 -08:00
|
|
|
timing: &mut ExecuteTimings,
|
2020-01-14 17:15:26 -08:00
|
|
|
) -> result::Result<(), BlockstoreProcessorError> {
|
2021-03-05 09:01:52 -08:00
|
|
|
let mut confirmation_timing = ConfirmationTiming::default();
|
2023-03-22 11:03:30 -07:00
|
|
|
let skip_verification = !opts.run_verification;
|
2023-04-26 18:10:16 -07:00
|
|
|
let ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64);
|
2022-08-31 06:00:55 -07:00
|
|
|
|
2020-01-14 17:15:26 -08:00
|
|
|
confirm_slot(
|
|
|
|
blockstore,
|
|
|
|
bank,
|
2021-03-05 09:01:52 -08:00
|
|
|
&mut confirmation_timing,
|
2020-03-23 12:19:11 -07:00
|
|
|
progress,
|
2020-01-14 17:15:26 -08:00
|
|
|
skip_verification,
|
2020-07-14 20:14:48 -07:00
|
|
|
transaction_status_sender,
|
2023-05-10 16:20:51 -07:00
|
|
|
entry_notification_sender,
|
2020-08-07 11:21:35 -07:00
|
|
|
replay_vote_sender,
|
2020-01-14 17:15:26 -08:00
|
|
|
recyclers,
|
2021-02-06 17:26:42 -08:00
|
|
|
opts.allow_dead_slots,
|
2022-07-11 08:53:18 -07:00
|
|
|
opts.runtime_config.log_messages_bytes_limit,
|
2023-04-26 18:10:16 -07:00
|
|
|
&ignored_prioritization_fee_cache,
|
2020-01-14 17:15:26 -08:00
|
|
|
)?;
|
|
|
|
|
2023-03-28 15:37:34 -07:00
|
|
|
timing.accumulate(&confirmation_timing.batch_execute.totals);
|
2021-03-03 15:07:45 -08:00
|
|
|
|
2020-01-14 17:15:26 -08:00
|
|
|
if !bank.is_complete() {
|
|
|
|
Err(BlockstoreProcessorError::InvalidBlock(
|
|
|
|
BlockError::Incomplete,
|
|
|
|
))
|
|
|
|
} else {
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-20 13:50:58 -07:00
|
|
|
/// Measures different parts of the slot confirmation processing pipeline.
|
2022-07-05 11:58:51 -07:00
|
|
|
#[derive(Debug)]
|
2020-01-14 17:15:26 -08:00
|
|
|
pub struct ConfirmationTiming {
|
2023-03-20 13:50:58 -07:00
|
|
|
/// Moment when the `ConfirmationTiming` instance was created. Used to track the total wall
|
|
|
|
/// clock time from the moment the first shard for the slot is received and to the moment the
|
|
|
|
/// slot is complete.
|
2020-01-14 17:15:26 -08:00
|
|
|
pub started: Instant,
|
2023-03-20 13:50:58 -07:00
|
|
|
|
2023-03-29 13:11:29 -07:00
|
|
|
/// Wall clock time used by the slot confirmation code, including PoH/signature verification,
|
|
|
|
/// and replay. As replay can run in parallel with the verification, this value can not be
|
|
|
|
/// recovered from the `replay_elapsed` and or `{poh,transaction}_verify_elapsed`. This
|
|
|
|
/// includes failed cases, when `confirm_slot_entries` exist with an error. In microseconds.
|
|
|
|
pub confirmation_elapsed: u64,
|
|
|
|
|
2023-03-20 13:50:58 -07:00
|
|
|
/// Wall clock time used by the entry replay code. Does not include the PoH or the transaction
|
|
|
|
/// signature/precompiles verification, but can overlap with the PoH and signature verification.
|
|
|
|
/// In microseconds.
|
2020-01-14 17:15:26 -08:00
|
|
|
pub replay_elapsed: u64,
|
2023-03-20 13:50:58 -07:00
|
|
|
|
|
|
|
/// Wall clock times, used for the PoH verification of entries. In microseconds.
|
2020-06-16 14:00:29 -07:00
|
|
|
pub poh_verify_elapsed: u64,
|
2023-03-20 13:50:58 -07:00
|
|
|
|
|
|
|
/// Wall clock time, used for the signature verification as well as precompiles verification.
|
|
|
|
/// In microseconds.
|
2020-06-16 14:00:29 -07:00
|
|
|
pub transaction_verify_elapsed: u64,
|
2023-03-20 13:50:58 -07:00
|
|
|
|
|
|
|
/// Wall clock time spent loading data sets (and entries) from the blockstore. This does not
|
|
|
|
/// include the case when the blockstore load failed. In microseconds.
|
2020-01-14 17:15:26 -08:00
|
|
|
pub fetch_elapsed: u64,
|
2023-03-20 13:50:58 -07:00
|
|
|
|
|
|
|
/// Same as `fetch_elapsed` above, but for the case when the blockstore load fails. In
|
|
|
|
/// microseconds.
|
2020-01-14 17:15:26 -08:00
|
|
|
pub fetch_fail_elapsed: u64,
|
2023-03-20 13:50:58 -07:00
|
|
|
|
2023-03-28 15:37:34 -07:00
|
|
|
/// `batch_execute()` measurements.
|
|
|
|
pub batch_execute: BatchExecutionTiming,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Default for ConfirmationTiming {
|
|
|
|
fn default() -> Self {
|
|
|
|
Self {
|
|
|
|
started: Instant::now(),
|
2023-03-29 13:11:29 -07:00
|
|
|
confirmation_elapsed: 0,
|
2023-03-28 15:37:34 -07:00
|
|
|
replay_elapsed: 0,
|
|
|
|
poh_verify_elapsed: 0,
|
|
|
|
transaction_verify_elapsed: 0,
|
|
|
|
fetch_elapsed: 0,
|
|
|
|
fetch_fail_elapsed: 0,
|
|
|
|
batch_execute: BatchExecutionTiming::default(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Measures times related to transaction execution in a slot.
|
|
|
|
#[derive(Debug, Default)]
|
|
|
|
pub struct BatchExecutionTiming {
|
|
|
|
/// Time used by transaction execution. Accumulated across multiple threads that are running
|
|
|
|
/// `execute_batch()`.
|
|
|
|
pub totals: ExecuteTimings,
|
|
|
|
|
|
|
|
/// Wall clock time used by the transaction execution part of pipeline.
|
|
|
|
/// [`ConfirmationTiming::replay_elapsed`] includes this time. In microseconds.
|
|
|
|
pub wall_clock_us: u64,
|
2023-03-20 13:50:58 -07:00
|
|
|
|
|
|
|
/// Time used to execute transactions, via `execute_batch()`, in the thread that consumed the
|
|
|
|
/// most time.
|
2023-03-28 15:37:34 -07:00
|
|
|
pub slowest_thread: ThreadExecuteTimings,
|
2022-07-05 11:58:51 -07:00
|
|
|
}
|
|
|
|
|
2023-03-28 15:37:34 -07:00
|
|
|
impl BatchExecutionTiming {
|
|
|
|
fn accumulate(&mut self, new_batch: ExecuteBatchesInternalMetrics) {
|
|
|
|
let Self {
|
|
|
|
totals,
|
|
|
|
wall_clock_us,
|
|
|
|
slowest_thread,
|
2022-07-05 11:58:51 -07:00
|
|
|
} = self;
|
|
|
|
|
2023-03-28 15:37:34 -07:00
|
|
|
saturating_add_assign!(*wall_clock_us, new_batch.execute_batches_us);
|
2022-07-05 11:58:51 -07:00
|
|
|
|
2023-03-28 15:37:34 -07:00
|
|
|
use ExecuteTimingType::{NumExecuteBatches, TotalBatchesLen};
|
|
|
|
totals.saturating_add_in_place(TotalBatchesLen, new_batch.total_batches_len);
|
|
|
|
totals.saturating_add_in_place(NumExecuteBatches, 1);
|
2022-07-05 11:58:51 -07:00
|
|
|
|
2023-03-28 15:37:34 -07:00
|
|
|
for thread_times in new_batch.execution_timings_per_thread.values() {
|
|
|
|
totals.accumulate(&thread_times.execute_timings);
|
2022-07-05 11:58:51 -07:00
|
|
|
}
|
|
|
|
|
2023-03-28 15:37:34 -07:00
|
|
|
let slowest = new_batch
|
|
|
|
.execution_timings_per_thread
|
|
|
|
.values()
|
|
|
|
.max_by_key(|thread_times| thread_times.total_thread_us);
|
|
|
|
|
|
|
|
if let Some(slowest) = slowest {
|
|
|
|
slowest_thread.accumulate(slowest);
|
|
|
|
slowest_thread
|
2022-07-05 11:58:51 -07:00
|
|
|
.execute_timings
|
2023-03-28 15:37:34 -07:00
|
|
|
.saturating_add_in_place(NumExecuteBatches, 1);
|
2022-07-05 11:58:51 -07:00
|
|
|
};
|
|
|
|
}
|
2020-01-14 17:15:26 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Default)]
|
|
|
|
pub struct ConfirmationProgress {
|
|
|
|
pub last_entry: Hash,
|
|
|
|
pub tick_hash_count: u64,
|
|
|
|
pub num_shreds: u64,
|
|
|
|
pub num_entries: usize,
|
|
|
|
pub num_txs: usize,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ConfirmationProgress {
|
|
|
|
pub fn new(last_entry: Hash) -> Self {
|
|
|
|
Self {
|
|
|
|
last_entry,
|
|
|
|
..Self::default()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-08-13 17:20:14 -07:00
|
|
|
|
2021-02-06 17:26:42 -08:00
|
|
|
#[allow(clippy::too_many_arguments)]
|
2020-01-14 17:15:26 -08:00
|
|
|
pub fn confirm_slot(
|
|
|
|
blockstore: &Blockstore,
|
|
|
|
bank: &Arc<Bank>,
|
|
|
|
timing: &mut ConfirmationTiming,
|
|
|
|
progress: &mut ConfirmationProgress,
|
|
|
|
skip_verification: bool,
|
2021-05-10 12:14:56 -07:00
|
|
|
transaction_status_sender: Option<&TransactionStatusSender>,
|
2023-05-10 16:20:51 -07:00
|
|
|
entry_notification_sender: Option<&EntryNotifierSender>,
|
2020-08-07 11:21:35 -07:00
|
|
|
replay_vote_sender: Option<&ReplayVoteSender>,
|
2020-01-14 17:15:26 -08:00
|
|
|
recyclers: &VerifyRecyclers,
|
2021-02-06 17:26:42 -08:00
|
|
|
allow_dead_slots: bool,
|
2022-07-11 08:53:18 -07:00
|
|
|
log_messages_bytes_limit: Option<usize>,
|
2022-08-31 06:00:55 -07:00
|
|
|
prioritization_fee_cache: &PrioritizationFeeCache,
|
2020-01-14 17:15:26 -08:00
|
|
|
) -> result::Result<(), BlockstoreProcessorError> {
|
|
|
|
let slot = bank.slot();
|
|
|
|
|
2022-04-15 09:30:20 -07:00
|
|
|
let slot_entries_load_result = {
|
2020-01-14 17:15:26 -08:00
|
|
|
let mut load_elapsed = Measure::start("load_elapsed");
|
|
|
|
let load_result = blockstore
|
2021-02-06 17:26:42 -08:00
|
|
|
.get_slot_entries_with_shred_info(slot, progress.num_shreds, allow_dead_slots)
|
2020-01-14 17:15:26 -08:00
|
|
|
.map_err(BlockstoreProcessorError::FailedToLoadEntries);
|
|
|
|
load_elapsed.stop();
|
|
|
|
if load_result.is_err() {
|
|
|
|
timing.fetch_fail_elapsed += load_elapsed.as_us();
|
|
|
|
} else {
|
|
|
|
timing.fetch_elapsed += load_elapsed.as_us();
|
|
|
|
}
|
|
|
|
load_result
|
|
|
|
}?;
|
|
|
|
|
2022-04-15 09:30:20 -07:00
|
|
|
confirm_slot_entries(
|
|
|
|
bank,
|
|
|
|
slot_entries_load_result,
|
|
|
|
timing,
|
|
|
|
progress,
|
|
|
|
skip_verification,
|
|
|
|
transaction_status_sender,
|
2023-05-10 16:20:51 -07:00
|
|
|
entry_notification_sender,
|
2022-04-15 09:30:20 -07:00
|
|
|
replay_vote_sender,
|
|
|
|
recyclers,
|
2022-07-11 08:53:18 -07:00
|
|
|
log_messages_bytes_limit,
|
2022-08-31 06:00:55 -07:00
|
|
|
prioritization_fee_cache,
|
2022-04-15 09:30:20 -07:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[allow(clippy::too_many_arguments)]
|
|
|
|
fn confirm_slot_entries(
|
|
|
|
bank: &Arc<Bank>,
|
|
|
|
slot_entries_load_result: (Vec<Entry>, u64, bool),
|
|
|
|
timing: &mut ConfirmationTiming,
|
|
|
|
progress: &mut ConfirmationProgress,
|
|
|
|
skip_verification: bool,
|
|
|
|
transaction_status_sender: Option<&TransactionStatusSender>,
|
2023-05-23 18:48:41 -07:00
|
|
|
entry_notification_sender: Option<&EntryNotifierSender>,
|
2022-04-15 09:30:20 -07:00
|
|
|
replay_vote_sender: Option<&ReplayVoteSender>,
|
|
|
|
recyclers: &VerifyRecyclers,
|
2022-07-11 08:53:18 -07:00
|
|
|
log_messages_bytes_limit: Option<usize>,
|
2022-08-31 06:00:55 -07:00
|
|
|
prioritization_fee_cache: &PrioritizationFeeCache,
|
2022-04-15 09:30:20 -07:00
|
|
|
) -> result::Result<(), BlockstoreProcessorError> {
|
2023-03-28 15:37:34 -07:00
|
|
|
let ConfirmationTiming {
|
2023-03-29 13:11:29 -07:00
|
|
|
confirmation_elapsed,
|
2023-03-28 15:37:34 -07:00
|
|
|
replay_elapsed,
|
|
|
|
poh_verify_elapsed,
|
|
|
|
transaction_verify_elapsed,
|
|
|
|
batch_execute: batch_execute_timing,
|
|
|
|
..
|
|
|
|
} = timing;
|
|
|
|
|
2023-03-29 13:11:29 -07:00
|
|
|
let confirmation_elapsed_timer = Measure::start("confirmation_elapsed");
|
|
|
|
defer! {
|
|
|
|
*confirmation_elapsed += confirmation_elapsed_timer.end_as_us();
|
|
|
|
};
|
|
|
|
|
2022-04-15 09:30:20 -07:00
|
|
|
let slot = bank.slot();
|
|
|
|
let (entries, num_shreds, slot_full) = slot_entries_load_result;
|
2020-01-14 17:15:26 -08:00
|
|
|
let num_entries = entries.len();
|
2023-05-23 18:48:41 -07:00
|
|
|
let mut entry_tx_starting_indexes = Vec::with_capacity(num_entries);
|
|
|
|
let mut entry_tx_starting_index = progress.num_txs;
|
2022-06-23 12:37:38 -07:00
|
|
|
let num_txs = entries
|
|
|
|
.iter()
|
2023-05-23 18:48:41 -07:00
|
|
|
.enumerate()
|
|
|
|
.map(|(i, entry)| {
|
|
|
|
if let Some(entry_notification_sender) = entry_notification_sender {
|
|
|
|
let entry_index = progress.num_entries.saturating_add(i);
|
|
|
|
if let Err(err) = entry_notification_sender.send(EntryNotification {
|
|
|
|
slot,
|
|
|
|
index: entry_index,
|
|
|
|
entry: entry.into(),
|
|
|
|
}) {
|
|
|
|
warn!(
|
|
|
|
"Slot {}, entry {} entry_notification_sender send failed: {:?}",
|
|
|
|
slot, entry_index, err
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
let num_txs = entry.transactions.len();
|
|
|
|
let next_tx_starting_index = entry_tx_starting_index.saturating_add(num_txs);
|
|
|
|
entry_tx_starting_indexes.push(entry_tx_starting_index);
|
|
|
|
entry_tx_starting_index = next_tx_starting_index;
|
2022-06-23 12:37:38 -07:00
|
|
|
num_txs
|
|
|
|
})
|
|
|
|
.sum::<usize>();
|
2020-01-14 17:15:26 -08:00
|
|
|
trace!(
|
|
|
|
"Fetched entries for slot {}, num_entries: {}, num_shreds: {}, num_txs: {}, slot_full: {}",
|
|
|
|
slot,
|
|
|
|
num_entries,
|
|
|
|
num_shreds,
|
|
|
|
num_txs,
|
|
|
|
slot_full,
|
|
|
|
);
|
|
|
|
|
|
|
|
if !skip_verification {
|
|
|
|
let tick_hash_count = &mut progress.tick_hash_count;
|
|
|
|
verify_ticks(bank, &entries, slot_full, tick_hash_count).map_err(|err| {
|
2019-10-08 14:58:49 -07:00
|
|
|
warn!(
|
2023-03-18 19:25:18 -07:00
|
|
|
"{:#?}, slot: {}, entry len: {}, tick_height: {}, last entry: {}, \
|
|
|
|
last_blockhash: {}, shred_index: {}, slot_full: {}",
|
2020-01-14 17:15:26 -08:00
|
|
|
err,
|
|
|
|
slot,
|
|
|
|
num_entries,
|
|
|
|
bank.tick_height(),
|
|
|
|
progress.last_entry,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
num_shreds,
|
|
|
|
slot_full,
|
2019-10-08 14:58:49 -07:00
|
|
|
);
|
2020-01-14 17:15:26 -08:00
|
|
|
err
|
2019-11-20 15:43:10 -08:00
|
|
|
})?;
|
2020-01-14 17:15:26 -08:00
|
|
|
}
|
|
|
|
|
2021-04-12 23:28:08 -07:00
|
|
|
let last_entry_hash = entries.last().map(|e| e.hash);
|
2020-01-14 17:15:26 -08:00
|
|
|
let verifier = if !skip_verification {
|
|
|
|
datapoint_debug!("verify-batch-size", ("size", num_entries as i64, i64));
|
2021-04-12 23:28:08 -07:00
|
|
|
let entry_state = entries.start_verify(&progress.last_entry, recyclers.clone());
|
2020-01-14 17:15:26 -08:00
|
|
|
if entry_state.status() == EntryVerificationStatus::Failure {
|
|
|
|
warn!("Ledger proof of history failed at slot: {}", slot);
|
|
|
|
return Err(BlockError::InvalidEntryHash.into());
|
|
|
|
}
|
|
|
|
Some(entry_state)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
2019-08-13 17:20:14 -07:00
|
|
|
|
2021-08-17 15:17:56 -07:00
|
|
|
let verify_transaction = {
|
|
|
|
let bank = bank.clone();
|
2021-11-30 18:16:13 -08:00
|
|
|
move |versioned_tx: VersionedTransaction,
|
|
|
|
verification_mode: TransactionVerificationMode|
|
|
|
|
-> Result<SanitizedTransaction> {
|
|
|
|
bank.verify_transaction(versioned_tx, verification_mode)
|
2021-08-17 15:17:56 -07:00
|
|
|
}
|
|
|
|
};
|
2021-11-30 18:16:13 -08:00
|
|
|
|
2023-03-28 15:37:34 -07:00
|
|
|
let transaction_verification_start = Instant::now();
|
|
|
|
let transaction_verification_result = entry::start_verify_transactions(
|
2021-11-30 18:16:13 -08:00
|
|
|
entries,
|
|
|
|
skip_verification,
|
|
|
|
recyclers.clone(),
|
|
|
|
Arc::new(verify_transaction),
|
|
|
|
);
|
2023-03-28 15:37:34 -07:00
|
|
|
let transaction_cpu_duration_us =
|
|
|
|
timing::duration_as_us(&transaction_verification_start.elapsed());
|
2021-11-30 18:16:13 -08:00
|
|
|
|
2023-03-28 15:37:34 -07:00
|
|
|
let mut transaction_verification_result = match transaction_verification_result {
|
|
|
|
Ok(transaction_verification_result) => transaction_verification_result,
|
2023-03-18 19:25:18 -07:00
|
|
|
Err(err) => {
|
2023-03-28 15:37:34 -07:00
|
|
|
warn!(
|
|
|
|
"Ledger transaction signature verification failed at slot: {}",
|
|
|
|
bank.slot()
|
|
|
|
);
|
2023-03-18 19:25:18 -07:00
|
|
|
return Err(err.into());
|
|
|
|
}
|
|
|
|
};
|
2021-01-14 14:14:16 -08:00
|
|
|
|
2023-03-28 15:37:34 -07:00
|
|
|
let entries = transaction_verification_result
|
|
|
|
.entries()
|
|
|
|
.expect("Transaction verification generates entries");
|
2023-03-18 19:25:18 -07:00
|
|
|
|
2023-03-28 15:37:34 -07:00
|
|
|
let mut replay_timer = Measure::start("replay_elapsed");
|
2023-03-18 19:25:18 -07:00
|
|
|
let mut replay_entries: Vec<_> = entries
|
|
|
|
.into_iter()
|
2023-05-23 18:48:41 -07:00
|
|
|
.zip(entry_tx_starting_indexes)
|
|
|
|
.map(|(entry, tx_starting_index)| ReplayEntry {
|
2023-03-18 19:25:18 -07:00
|
|
|
entry,
|
2023-05-23 18:48:41 -07:00
|
|
|
starting_index: tx_starting_index,
|
2023-03-18 19:25:18 -07:00
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
// Note: This will shuffle entries' transactions in-place.
|
2023-03-23 13:37:57 -07:00
|
|
|
let process_result = process_entries(
|
2023-03-18 19:25:18 -07:00
|
|
|
bank,
|
|
|
|
&mut replay_entries,
|
|
|
|
true, // shuffle transactions.
|
|
|
|
transaction_status_sender,
|
|
|
|
replay_vote_sender,
|
2023-03-28 15:37:34 -07:00
|
|
|
batch_execute_timing,
|
2023-03-18 19:25:18 -07:00
|
|
|
log_messages_bytes_limit,
|
|
|
|
prioritization_fee_cache,
|
|
|
|
)
|
|
|
|
.map_err(BlockstoreProcessorError::from);
|
2023-03-28 15:37:34 -07:00
|
|
|
replay_timer.stop();
|
|
|
|
*replay_elapsed += replay_timer.as_us();
|
|
|
|
|
|
|
|
{
|
|
|
|
// If running signature verification on the GPU, wait for that computation to finish, and
|
|
|
|
// get the result of it. If we did the signature verification on the CPU, this just returns
|
|
|
|
// the already-computed result produced in start_verify_transactions. Either way, check the
|
|
|
|
// result of the signature verification.
|
|
|
|
let valid = transaction_verification_result.finish_verify();
|
|
|
|
|
|
|
|
// The GPU Entry verification (if any) is kicked off right when the CPU-side Entry
|
|
|
|
// verification finishes, so these times should be disjoint
|
|
|
|
*transaction_verify_elapsed +=
|
|
|
|
transaction_cpu_duration_us + transaction_verification_result.gpu_verify_duration();
|
|
|
|
|
|
|
|
if !valid {
|
|
|
|
warn!(
|
|
|
|
"Ledger transaction signature verification failed at slot: {}",
|
|
|
|
bank.slot()
|
|
|
|
);
|
|
|
|
return Err(TransactionError::SignatureFailure.into());
|
|
|
|
}
|
2023-03-18 19:25:18 -07:00
|
|
|
}
|
2021-11-30 18:16:13 -08:00
|
|
|
|
2023-03-18 19:25:18 -07:00
|
|
|
if let Some(mut verifier) = verifier {
|
|
|
|
let verified = verifier.finish_verify();
|
2023-03-28 15:37:34 -07:00
|
|
|
*poh_verify_elapsed += verifier.poh_duration_us();
|
2023-03-18 19:25:18 -07:00
|
|
|
if !verified {
|
2020-01-14 17:15:26 -08:00
|
|
|
warn!("Ledger proof of history failed at slot: {}", bank.slot());
|
2023-03-18 19:25:18 -07:00
|
|
|
return Err(BlockError::InvalidEntryHash.into());
|
2020-01-14 17:15:26 -08:00
|
|
|
}
|
|
|
|
}
|
2023-03-18 19:25:18 -07:00
|
|
|
|
|
|
|
process_result?;
|
|
|
|
|
|
|
|
progress.num_shreds += num_shreds;
|
|
|
|
progress.num_entries += num_entries;
|
|
|
|
progress.num_txs += num_txs;
|
|
|
|
if let Some(last_entry_hash) = last_entry_hash {
|
|
|
|
progress.last_entry = last_entry_hash;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
2019-08-13 17:20:14 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Special handling required for processing the entries in slot 0
|
|
|
|
fn process_bank_0(
|
2019-10-08 14:58:49 -07:00
|
|
|
bank0: &Arc<Bank>,
|
2020-01-13 13:13:52 -08:00
|
|
|
blockstore: &Blockstore,
|
2019-10-08 14:58:49 -07:00
|
|
|
opts: &ProcessOptions,
|
2020-01-14 17:15:26 -08:00
|
|
|
recyclers: &VerifyRecyclers,
|
2021-05-26 21:16:16 -07:00
|
|
|
cache_block_meta_sender: Option<&CacheBlockMetaSender>,
|
2023-05-10 16:20:51 -07:00
|
|
|
entry_notification_sender: Option<&EntryNotifierSender>,
|
2020-12-13 17:26:34 -08:00
|
|
|
) {
|
2019-08-13 17:20:14 -07:00
|
|
|
assert_eq!(bank0.slot(), 0);
|
2020-03-23 12:19:11 -07:00
|
|
|
let mut progress = ConfirmationProgress::new(bank0.last_blockhash());
|
2020-07-29 23:17:40 -07:00
|
|
|
confirm_full_slot(
|
|
|
|
blockstore,
|
|
|
|
bank0,
|
|
|
|
opts,
|
|
|
|
recyclers,
|
|
|
|
&mut progress,
|
|
|
|
None,
|
2023-05-10 16:20:51 -07:00
|
|
|
entry_notification_sender,
|
2020-07-29 23:17:40 -07:00
|
|
|
None,
|
2021-03-05 09:01:52 -08:00
|
|
|
&mut ExecuteTimings::default(),
|
2020-07-29 23:17:40 -07:00
|
|
|
)
|
2022-06-21 11:06:37 -07:00
|
|
|
.expect("Failed to process bank 0 from ledger. Did you forget to provide a snapshot?");
|
2019-08-13 17:20:14 -07:00
|
|
|
bank0.freeze();
|
2022-04-29 18:05:39 -07:00
|
|
|
if blockstore.is_primary_access() {
|
|
|
|
blockstore.insert_bank_hash(bank0.slot(), bank0.hash(), false);
|
|
|
|
}
|
2021-05-26 21:16:16 -07:00
|
|
|
cache_block_meta(bank0, cache_block_meta_sender);
|
2019-08-13 17:20:14 -07:00
|
|
|
}
|
|
|
|
|
2020-01-10 12:16:44 -08:00
|
|
|
// Given a bank, add its children to the pending slots queue if those children slots are
|
2019-08-13 17:20:14 -07:00
|
|
|
// complete
|
|
|
|
fn process_next_slots(
|
|
|
|
bank: &Arc<Bank>,
|
|
|
|
meta: &SlotMeta,
|
2020-01-13 13:13:52 -08:00
|
|
|
blockstore: &Blockstore,
|
2019-08-13 17:20:14 -07:00
|
|
|
leader_schedule_cache: &LeaderScheduleCache,
|
2022-03-03 02:46:29 -08:00
|
|
|
pending_slots: &mut Vec<(SlotMeta, Bank, Hash)>,
|
2022-10-12 15:24:27 -07:00
|
|
|
halt_at_slot: Option<Slot>,
|
2020-01-13 13:13:52 -08:00
|
|
|
) -> result::Result<(), BlockstoreProcessorError> {
|
2019-08-13 17:20:14 -07:00
|
|
|
if meta.next_slots.is_empty() {
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is a fork point if there are multiple children, create a new child bank for each fork
|
|
|
|
for next_slot in &meta.next_slots {
|
2022-10-12 15:24:27 -07:00
|
|
|
let skip_next_slot = halt_at_slot
|
|
|
|
.map(|halt_at_slot| *next_slot > halt_at_slot)
|
|
|
|
.unwrap_or(false);
|
|
|
|
if skip_next_slot {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-01-13 13:13:52 -08:00
|
|
|
let next_meta = blockstore
|
2019-08-13 17:20:14 -07:00
|
|
|
.meta(*next_slot)
|
2019-02-20 15:42:35 -08:00
|
|
|
.map_err(|err| {
|
2019-08-13 17:20:14 -07:00
|
|
|
warn!("Failed to load meta for slot {}: {:?}", next_slot, err);
|
2020-01-13 13:13:52 -08:00
|
|
|
BlockstoreProcessorError::FailedToLoadMeta
|
2019-02-20 15:42:35 -08:00
|
|
|
})?
|
|
|
|
.unwrap();
|
|
|
|
|
2020-01-13 13:13:52 -08:00
|
|
|
// Only process full slots in blockstore_processor, replay_stage
|
2019-08-13 17:20:14 -07:00
|
|
|
// handles any partials
|
|
|
|
if next_meta.is_full() {
|
2022-03-03 02:46:29 -08:00
|
|
|
let next_bank = Bank::new_from_parent(
|
2021-06-18 06:34:46 -07:00
|
|
|
bank,
|
2019-08-13 17:20:14 -07:00
|
|
|
&leader_schedule_cache
|
2021-06-18 06:34:46 -07:00
|
|
|
.slot_leader_at(*next_slot, Some(bank))
|
2019-08-13 17:20:14 -07:00
|
|
|
.unwrap(),
|
|
|
|
*next_slot,
|
2022-03-03 02:46:29 -08:00
|
|
|
);
|
2020-01-01 11:19:20 -08:00
|
|
|
trace!(
|
2021-04-09 17:21:01 -07:00
|
|
|
"New bank for slot {}, parent slot is {}",
|
2020-01-01 11:19:20 -08:00
|
|
|
next_slot,
|
|
|
|
bank.slot(),
|
|
|
|
);
|
2020-01-10 12:16:44 -08:00
|
|
|
pending_slots.push((next_meta, next_bank, bank.last_blockhash()));
|
2019-08-13 17:20:14 -07:00
|
|
|
}
|
|
|
|
}
|
2019-05-20 19:04:18 -07:00
|
|
|
|
2019-08-13 17:20:14 -07:00
|
|
|
// Reverse sort by slot, so the next slot to be processed can be popped
|
2020-01-10 12:16:44 -08:00
|
|
|
pending_slots.sort_by(|a, b| b.1.slot().cmp(&a.1.slot()));
|
2019-08-13 17:20:14 -07:00
|
|
|
Ok(())
|
|
|
|
}
|
2019-04-19 02:39:44 -07:00
|
|
|
|
2023-05-04 10:33:01 -07:00
|
|
|
/// Starting with the root slot corresponding to `start_slot_meta`, iteratively
|
|
|
|
/// find and process children slots from the blockstore.
|
|
|
|
///
|
|
|
|
/// Returns a tuple (a, b) where a is the number of slots processed and b is
|
|
|
|
/// the number of newly found cluster roots.
|
2021-05-10 12:14:56 -07:00
|
|
|
#[allow(clippy::too_many_arguments)]
|
2020-05-06 08:24:59 -07:00
|
|
|
fn load_frozen_forks(
|
2022-03-04 01:52:22 -08:00
|
|
|
bank_forks: &RwLock<BankForks>,
|
2022-03-03 02:46:29 -08:00
|
|
|
start_slot_meta: &SlotMeta,
|
2020-01-13 13:13:52 -08:00
|
|
|
blockstore: &Blockstore,
|
2022-03-03 07:12:47 -08:00
|
|
|
leader_schedule_cache: &LeaderScheduleCache,
|
2019-10-08 14:58:49 -07:00
|
|
|
opts: &ProcessOptions,
|
2021-05-10 12:14:56 -07:00
|
|
|
transaction_status_sender: Option<&TransactionStatusSender>,
|
2021-05-26 21:16:16 -07:00
|
|
|
cache_block_meta_sender: Option<&CacheBlockMetaSender>,
|
2023-05-10 16:20:51 -07:00
|
|
|
entry_notification_sender: Option<&EntryNotifierSender>,
|
2021-03-05 09:01:52 -08:00
|
|
|
timing: &mut ExecuteTimings,
|
2022-03-18 12:43:20 -07:00
|
|
|
accounts_background_request_sender: &AbsRequestSender,
|
2023-05-04 10:33:01 -07:00
|
|
|
) -> result::Result<(u64, usize), BlockstoreProcessorError> {
|
2020-08-20 21:56:25 -07:00
|
|
|
let blockstore_max_root = blockstore.max_root();
|
2022-03-04 01:52:22 -08:00
|
|
|
let mut root = bank_forks.read().unwrap().root();
|
2022-03-03 02:46:29 -08:00
|
|
|
let max_root = std::cmp::max(root, blockstore_max_root);
|
2020-08-20 21:56:25 -07:00
|
|
|
info!(
|
|
|
|
"load_frozen_forks() latest root from blockstore: {}, max_root: {}",
|
|
|
|
blockstore_max_root, max_root,
|
|
|
|
);
|
2021-09-07 13:43:43 -07:00
|
|
|
|
2023-05-04 21:36:02 -07:00
|
|
|
// The total number of slots processed
|
|
|
|
let mut total_slots_processed = 0;
|
|
|
|
// The total number of newly identified root slots
|
|
|
|
let mut total_rooted_slots = 0;
|
|
|
|
|
|
|
|
let mut pending_slots = vec![];
|
2019-08-13 17:20:14 -07:00
|
|
|
process_next_slots(
|
2023-05-04 21:36:02 -07:00
|
|
|
&bank_forks
|
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
.get(start_slot_meta.slot)
|
|
|
|
.unwrap(),
|
2022-03-03 02:46:29 -08:00
|
|
|
start_slot_meta,
|
2020-01-13 13:13:52 -08:00
|
|
|
blockstore,
|
2019-08-13 17:20:14 -07:00
|
|
|
leader_schedule_cache,
|
|
|
|
&mut pending_slots,
|
2022-10-12 15:24:27 -07:00
|
|
|
opts.halt_at_slot,
|
2019-08-13 17:20:14 -07:00
|
|
|
)?;
|
|
|
|
|
2022-07-29 13:54:56 -07:00
|
|
|
let on_halt_store_hash_raw_data_for_debug = opts.on_halt_store_hash_raw_data_for_debug;
|
2022-10-10 12:37:23 -07:00
|
|
|
if Some(bank_forks.read().unwrap().root()) != opts.halt_at_slot {
|
2023-05-04 21:36:02 -07:00
|
|
|
let recyclers = VerifyRecyclers::default();
|
|
|
|
let mut all_banks = HashMap::new();
|
|
|
|
|
|
|
|
const STATUS_REPORT_INTERVAL: Duration = Duration::from_secs(2);
|
|
|
|
let mut last_status_report = Instant::now();
|
|
|
|
let mut slots_processed = 0;
|
|
|
|
let mut txs = 0;
|
2022-10-04 13:46:02 -07:00
|
|
|
let mut set_root_us = 0;
|
|
|
|
let mut root_retain_us = 0;
|
|
|
|
let mut process_single_slot_us = 0;
|
|
|
|
let mut voting_us = 0;
|
2023-05-04 21:36:02 -07:00
|
|
|
|
2020-12-04 21:14:59 -08:00
|
|
|
while !pending_slots.is_empty() {
|
2022-01-07 22:52:08 -08:00
|
|
|
timing.details.per_program_timings.clear();
|
2020-12-04 21:14:59 -08:00
|
|
|
let (meta, bank, last_entry_hash) = pending_slots.pop().unwrap();
|
|
|
|
let slot = bank.slot();
|
2023-05-04 21:36:02 -07:00
|
|
|
if last_status_report.elapsed() > STATUS_REPORT_INTERVAL {
|
2020-12-04 21:14:59 -08:00
|
|
|
let secs = last_status_report.elapsed().as_secs() as f32;
|
2023-05-04 21:36:02 -07:00
|
|
|
let slots_per_sec = slots_processed as f32 / secs;
|
|
|
|
let txs_per_sec = txs as f32 / secs;
|
2020-12-04 21:14:59 -08:00
|
|
|
info!(
|
2023-05-04 21:36:02 -07:00
|
|
|
"processing ledger: slot={slot}, \
|
|
|
|
root_slot={root} \
|
|
|
|
slots={slots_processed}, \
|
|
|
|
slots/s={slots_per_sec}, \
|
|
|
|
txs/s={txs_per_sec}"
|
|
|
|
);
|
|
|
|
debug!(
|
|
|
|
"processing ledger timing: \
|
|
|
|
set_root_us={set_root_us}, \
|
|
|
|
root_retain_us={root_retain_us}, \
|
|
|
|
process_single_slot_us:{process_single_slot_us}, \
|
|
|
|
voting_us: {voting_us}"
|
2020-12-04 21:14:59 -08:00
|
|
|
);
|
2023-05-04 21:36:02 -07:00
|
|
|
|
|
|
|
last_status_report = Instant::now();
|
|
|
|
slots_processed = 0;
|
2020-12-04 21:14:59 -08:00
|
|
|
txs = 0;
|
2022-10-04 13:46:02 -07:00
|
|
|
set_root_us = 0;
|
|
|
|
root_retain_us = 0;
|
|
|
|
process_single_slot_us = 0;
|
|
|
|
voting_us = 0;
|
2020-12-04 21:14:59 -08:00
|
|
|
}
|
2019-04-20 20:17:57 -07:00
|
|
|
|
2020-12-04 21:14:59 -08:00
|
|
|
let mut progress = ConfirmationProgress::new(last_entry_hash);
|
2022-10-04 13:46:02 -07:00
|
|
|
let mut m = Measure::start("process_single_slot");
|
2023-04-28 06:22:14 -07:00
|
|
|
let bank = bank_forks.write().unwrap().insert_from_ledger(bank);
|
2020-12-04 21:14:59 -08:00
|
|
|
if process_single_slot(
|
|
|
|
blockstore,
|
|
|
|
&bank,
|
|
|
|
opts,
|
2022-03-05 05:16:39 -08:00
|
|
|
&recyclers,
|
2020-12-04 21:14:59 -08:00
|
|
|
&mut progress,
|
2021-05-10 12:14:56 -07:00
|
|
|
transaction_status_sender,
|
2021-05-26 21:16:16 -07:00
|
|
|
cache_block_meta_sender,
|
2023-05-10 16:20:51 -07:00
|
|
|
entry_notification_sender,
|
2020-12-04 21:14:59 -08:00
|
|
|
None,
|
2021-03-05 09:01:52 -08:00
|
|
|
timing,
|
2020-12-04 21:14:59 -08:00
|
|
|
)
|
|
|
|
.is_err()
|
|
|
|
{
|
2022-03-04 01:52:22 -08:00
|
|
|
assert!(bank_forks.write().unwrap().remove(bank.slot()).is_some());
|
2020-12-04 21:14:59 -08:00
|
|
|
continue;
|
2020-08-20 21:56:25 -07:00
|
|
|
}
|
2020-12-04 21:14:59 -08:00
|
|
|
txs += progress.num_txs;
|
|
|
|
|
2023-05-04 21:36:02 -07:00
|
|
|
// Block must be frozen by this point; otherwise,
|
|
|
|
// process_single_slot() would have errored above.
|
2020-12-04 21:14:59 -08:00
|
|
|
assert!(bank.is_frozen());
|
|
|
|
all_banks.insert(bank.slot(), bank.clone());
|
2022-10-04 13:46:02 -07:00
|
|
|
m.stop();
|
|
|
|
process_single_slot_us += m.as_us();
|
|
|
|
|
|
|
|
let mut m = Measure::start("voting");
|
2020-12-04 21:14:59 -08:00
|
|
|
// If we've reached the last known root in blockstore, start looking
|
|
|
|
// for newer cluster confirmed roots
|
|
|
|
let new_root_bank = {
|
2022-03-04 01:52:22 -08:00
|
|
|
if bank_forks.read().unwrap().root() >= max_root {
|
2020-12-04 21:14:59 -08:00
|
|
|
supermajority_root_from_vote_accounts(
|
|
|
|
bank.slot(),
|
|
|
|
bank.total_epoch_stake(),
|
2021-08-30 08:54:01 -07:00
|
|
|
&bank.vote_accounts(),
|
2020-12-04 21:14:59 -08:00
|
|
|
).and_then(|supermajority_root| {
|
2022-03-03 02:46:29 -08:00
|
|
|
if supermajority_root > root {
|
2020-12-04 21:14:59 -08:00
|
|
|
// If there's a cluster confirmed root greater than our last
|
2021-05-24 12:24:47 -07:00
|
|
|
// replayed root, then because the cluster confirmed root should
|
2020-12-04 21:14:59 -08:00
|
|
|
// be descended from our last root, it must exist in `all_banks`
|
|
|
|
let cluster_root_bank = all_banks.get(&supermajority_root).unwrap();
|
|
|
|
|
|
|
|
// cluster root must be a descendant of our root, otherwise something
|
|
|
|
// is drastically wrong
|
2022-03-03 02:46:29 -08:00
|
|
|
assert!(cluster_root_bank.ancestors.contains_key(&root));
|
|
|
|
info!(
|
|
|
|
"blockstore processor found new cluster confirmed root: {}, observed in bank: {}",
|
|
|
|
cluster_root_bank.slot(), bank.slot()
|
|
|
|
);
|
2021-05-24 12:24:47 -07:00
|
|
|
|
|
|
|
// Ensure cluster-confirmed root and parents are set as root in blockstore
|
|
|
|
let mut rooted_slots = vec![];
|
|
|
|
let mut new_root_bank = cluster_root_bank.clone();
|
|
|
|
loop {
|
2022-03-03 02:46:29 -08:00
|
|
|
if new_root_bank.slot() == root { break; } // Found the last root in the chain, yay!
|
|
|
|
assert!(new_root_bank.slot() > root);
|
2021-05-24 12:24:47 -07:00
|
|
|
|
2022-06-25 23:14:17 -07:00
|
|
|
rooted_slots.push((new_root_bank.slot(), Some(new_root_bank.hash())));
|
2021-05-24 12:24:47 -07:00
|
|
|
// As noted, the cluster confirmed root should be descended from
|
|
|
|
// our last root; therefore parent should be set
|
|
|
|
new_root_bank = new_root_bank.parent().unwrap();
|
|
|
|
}
|
2023-05-04 10:33:01 -07:00
|
|
|
total_rooted_slots += rooted_slots.len();
|
2022-04-29 18:05:39 -07:00
|
|
|
if blockstore.is_primary_access() {
|
2022-06-25 23:14:17 -07:00
|
|
|
blockstore
|
|
|
|
.mark_slots_as_if_rooted_normally_at_startup(rooted_slots, true)
|
|
|
|
.expect("Blockstore::mark_slots_as_if_rooted_normally_at_startup() should succeed");
|
2022-04-29 18:05:39 -07:00
|
|
|
}
|
2020-12-04 21:14:59 -08:00
|
|
|
Some(cluster_root_bank)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
})
|
|
|
|
} else if blockstore.is_root(slot) {
|
|
|
|
Some(&bank)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
};
|
2022-10-04 13:46:02 -07:00
|
|
|
m.stop();
|
|
|
|
voting_us += m.as_us();
|
|
|
|
|
2020-12-04 21:14:59 -08:00
|
|
|
if let Some(new_root_bank) = new_root_bank {
|
2022-10-04 13:46:02 -07:00
|
|
|
let mut m = Measure::start("set_root");
|
2022-03-03 02:46:29 -08:00
|
|
|
root = new_root_bank.slot();
|
2021-01-13 07:47:06 -08:00
|
|
|
|
2021-06-18 06:34:46 -07:00
|
|
|
leader_schedule_cache.set_root(new_root_bank);
|
2022-03-04 01:52:22 -08:00
|
|
|
let _ = bank_forks.write().unwrap().set_root(
|
2022-03-03 02:46:29 -08:00
|
|
|
root,
|
2022-03-18 12:43:20 -07:00
|
|
|
accounts_background_request_sender,
|
2022-03-03 02:46:29 -08:00
|
|
|
None,
|
|
|
|
);
|
2022-10-04 13:46:02 -07:00
|
|
|
m.stop();
|
|
|
|
set_root_us += m.as_us();
|
2020-08-20 21:56:25 -07:00
|
|
|
|
2020-12-04 21:14:59 -08:00
|
|
|
// Filter out all non descendants of the new root
|
2022-10-04 13:46:02 -07:00
|
|
|
let mut m = Measure::start("filter pending slots");
|
2020-12-04 21:14:59 -08:00
|
|
|
pending_slots
|
2022-03-03 02:46:29 -08:00
|
|
|
.retain(|(_, pending_bank, _)| pending_bank.ancestors.contains_key(&root));
|
|
|
|
all_banks.retain(|_, bank| bank.ancestors.contains_key(&root));
|
2022-10-04 13:46:02 -07:00
|
|
|
m.stop();
|
|
|
|
root_retain_us += m.as_us();
|
2020-09-08 20:05:10 -07:00
|
|
|
}
|
|
|
|
|
2023-05-04 21:36:02 -07:00
|
|
|
slots_processed += 1;
|
|
|
|
total_slots_processed += 1;
|
2020-08-20 21:56:25 -07:00
|
|
|
|
2020-12-04 21:14:59 -08:00
|
|
|
trace!(
|
2021-04-09 17:21:01 -07:00
|
|
|
"Bank for {}slot {} is complete",
|
2022-03-03 02:46:29 -08:00
|
|
|
if root == slot { "root " } else { "" },
|
2020-12-04 21:14:59 -08:00
|
|
|
slot,
|
|
|
|
);
|
2019-02-28 12:09:19 -08:00
|
|
|
|
2022-10-10 12:37:23 -07:00
|
|
|
let done_processing = opts
|
|
|
|
.halt_at_slot
|
|
|
|
.map(|halt_at_slot| slot >= halt_at_slot)
|
|
|
|
.unwrap_or(false);
|
|
|
|
if done_processing {
|
|
|
|
if opts.run_final_accounts_hash_calc {
|
2023-04-18 07:31:23 -07:00
|
|
|
bank.run_final_hash_calc(on_halt_store_hash_raw_data_for_debug);
|
2022-10-10 12:37:23 -07:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-12-04 21:14:59 -08:00
|
|
|
process_next_slots(
|
|
|
|
&bank,
|
|
|
|
&meta,
|
|
|
|
blockstore,
|
|
|
|
leader_schedule_cache,
|
|
|
|
&mut pending_slots,
|
2022-10-12 15:24:27 -07:00
|
|
|
opts.halt_at_slot,
|
2020-12-04 21:14:59 -08:00
|
|
|
)?;
|
2020-01-23 01:49:32 -08:00
|
|
|
}
|
2022-08-01 12:25:47 -07:00
|
|
|
} else if on_halt_store_hash_raw_data_for_debug {
|
2023-04-18 07:31:23 -07:00
|
|
|
bank_forks
|
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
.root_bank()
|
|
|
|
.run_final_hash_calc(on_halt_store_hash_raw_data_for_debug);
|
2019-02-20 15:42:35 -08:00
|
|
|
}
|
2019-02-19 18:31:56 -08:00
|
|
|
|
2023-05-04 21:36:02 -07:00
|
|
|
Ok((total_slots_processed, total_rooted_slots))
|
2019-02-16 02:26:21 -08:00
|
|
|
}
|
|
|
|
|
2020-09-17 21:33:08 -07:00
|
|
|
// `roots` is sorted largest to smallest by root slot
|
|
|
|
fn supermajority_root(roots: &[(Slot, u64)], total_epoch_stake: u64) -> Option<Slot> {
|
|
|
|
if roots.is_empty() {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2020-08-20 21:56:25 -07:00
|
|
|
// Find latest root
|
|
|
|
let mut total = 0;
|
2020-09-17 21:33:08 -07:00
|
|
|
let mut prev_root = roots[0].0;
|
|
|
|
for (root, stake) in roots.iter() {
|
|
|
|
assert!(*root <= prev_root);
|
2020-08-20 21:56:25 -07:00
|
|
|
total += stake;
|
|
|
|
if total as f64 / total_epoch_stake as f64 > VOTE_THRESHOLD_SIZE {
|
|
|
|
return Some(*root);
|
|
|
|
}
|
2020-09-17 21:33:08 -07:00
|
|
|
prev_root = *root;
|
2020-08-20 21:56:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
None
|
|
|
|
}
|
|
|
|
|
2021-08-30 08:54:01 -07:00
|
|
|
fn supermajority_root_from_vote_accounts(
|
2020-09-17 21:33:08 -07:00
|
|
|
bank_slot: Slot,
|
|
|
|
total_epoch_stake: u64,
|
2022-03-24 10:09:48 -07:00
|
|
|
vote_accounts: &VoteAccountsHashMap,
|
2021-08-30 08:54:01 -07:00
|
|
|
) -> Option<Slot> {
|
2020-11-30 09:18:33 -08:00
|
|
|
let mut roots_stakes: Vec<(Slot, u64)> = vote_accounts
|
2021-08-30 08:54:01 -07:00
|
|
|
.iter()
|
2020-08-20 21:56:25 -07:00
|
|
|
.filter_map(|(key, (stake, account))| {
|
2021-08-30 08:54:01 -07:00
|
|
|
if *stake == 0 {
|
2020-08-20 21:56:25 -07:00
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2020-11-30 09:18:33 -08:00
|
|
|
match account.vote_state().as_ref() {
|
|
|
|
Err(_) => {
|
|
|
|
warn!(
|
|
|
|
"Unable to get vote_state from account {} in bank: {}",
|
|
|
|
key, bank_slot
|
|
|
|
);
|
|
|
|
None
|
|
|
|
}
|
2021-08-30 08:54:01 -07:00
|
|
|
Ok(vote_state) => Some((vote_state.root_slot?, *stake)),
|
2020-08-20 21:56:25 -07:00
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
2020-09-17 21:33:08 -07:00
|
|
|
// Sort from greatest to smallest slot
|
|
|
|
roots_stakes.sort_unstable_by(|a, b| a.0.cmp(&b.0).reverse());
|
2020-08-20 21:56:25 -07:00
|
|
|
|
|
|
|
// Find latest root
|
2020-09-17 21:33:08 -07:00
|
|
|
supermajority_root(&roots_stakes, total_epoch_stake)
|
2020-08-20 21:56:25 -07:00
|
|
|
}
|
|
|
|
|
2020-01-10 12:16:44 -08:00
|
|
|
// Processes and replays the contents of a single slot, returns Error
|
|
|
|
// if failed to play the slot
|
2023-05-10 16:20:51 -07:00
|
|
|
#[allow(clippy::too_many_arguments)]
|
2020-01-10 12:16:44 -08:00
|
|
|
fn process_single_slot(
|
2020-01-13 13:13:52 -08:00
|
|
|
blockstore: &Blockstore,
|
2020-01-10 12:16:44 -08:00
|
|
|
bank: &Arc<Bank>,
|
|
|
|
opts: &ProcessOptions,
|
2020-01-14 17:15:26 -08:00
|
|
|
recyclers: &VerifyRecyclers,
|
2020-03-23 12:19:11 -07:00
|
|
|
progress: &mut ConfirmationProgress,
|
2021-05-10 12:14:56 -07:00
|
|
|
transaction_status_sender: Option<&TransactionStatusSender>,
|
2021-05-26 21:16:16 -07:00
|
|
|
cache_block_meta_sender: Option<&CacheBlockMetaSender>,
|
2023-05-10 16:20:51 -07:00
|
|
|
entry_notification_sender: Option<&EntryNotifierSender>,
|
2020-08-07 11:21:35 -07:00
|
|
|
replay_vote_sender: Option<&ReplayVoteSender>,
|
2021-03-05 09:01:52 -08:00
|
|
|
timing: &mut ExecuteTimings,
|
2020-01-13 13:13:52 -08:00
|
|
|
) -> result::Result<(), BlockstoreProcessorError> {
|
2020-01-14 17:15:26 -08:00
|
|
|
// Mark corrupt slots as dead so validators don't replay this slot and
|
2021-04-12 23:28:08 -07:00
|
|
|
// see AlreadyProcessed errors later in ReplayStage
|
2022-04-29 18:05:39 -07:00
|
|
|
confirm_full_slot(
|
|
|
|
blockstore,
|
|
|
|
bank,
|
|
|
|
opts,
|
|
|
|
recyclers,
|
|
|
|
progress,
|
|
|
|
transaction_status_sender,
|
2023-05-10 16:20:51 -07:00
|
|
|
entry_notification_sender,
|
2022-04-29 18:05:39 -07:00
|
|
|
replay_vote_sender,
|
|
|
|
timing,
|
|
|
|
)
|
|
|
|
.map_err(|err| {
|
2020-01-14 17:15:26 -08:00
|
|
|
let slot = bank.slot();
|
2020-01-10 12:16:44 -08:00
|
|
|
warn!("slot {} failed to verify: {}", slot, err);
|
2020-06-02 21:32:44 -07:00
|
|
|
if blockstore.is_primary_access() {
|
|
|
|
blockstore
|
|
|
|
.set_dead_slot(slot)
|
|
|
|
.expect("Failed to mark slot as dead in blockstore");
|
2021-10-05 22:24:48 -07:00
|
|
|
} else {
|
2022-04-29 18:05:39 -07:00
|
|
|
info!(
|
|
|
|
"Failed slot {} won't be marked dead due to being secondary blockstore access",
|
|
|
|
slot
|
|
|
|
);
|
2020-06-02 21:32:44 -07:00
|
|
|
}
|
2020-01-10 12:16:44 -08:00
|
|
|
err
|
|
|
|
})?;
|
|
|
|
|
|
|
|
bank.freeze(); // all banks handled by this routine are created from complete slots
|
2022-04-29 18:05:39 -07:00
|
|
|
if blockstore.is_primary_access() {
|
|
|
|
blockstore.insert_bank_hash(bank.slot(), bank.hash(), false);
|
|
|
|
}
|
2021-05-26 21:16:16 -07:00
|
|
|
cache_block_meta(bank, cache_block_meta_sender);
|
2020-08-20 21:56:25 -07:00
|
|
|
|
2020-01-10 12:16:44 -08:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2021-10-22 21:25:54 -07:00
|
|
|
#[allow(clippy::large_enum_variant)]
|
2021-03-26 15:47:35 -07:00
|
|
|
pub enum TransactionStatusMessage {
|
|
|
|
Batch(TransactionStatusBatch),
|
|
|
|
Freeze(Slot),
|
|
|
|
}
|
|
|
|
|
2019-11-20 15:43:10 -08:00
|
|
|
pub struct TransactionStatusBatch {
|
|
|
|
pub bank: Arc<Bank>,
|
2021-08-17 15:17:56 -07:00
|
|
|
pub transactions: Vec<SanitizedTransaction>,
|
2022-04-20 09:20:29 -07:00
|
|
|
pub execution_results: Vec<Option<TransactionExecutionDetails>>,
|
2019-12-18 09:56:29 -08:00
|
|
|
pub balances: TransactionBalancesSet,
|
2020-12-10 19:25:07 -08:00
|
|
|
pub token_balances: TransactionTokenBalancesSet,
|
2021-05-26 14:43:15 -07:00
|
|
|
pub rent_debits: Vec<RentDebits>,
|
2022-06-23 12:37:38 -07:00
|
|
|
pub transaction_indexes: Vec<usize>,
|
2019-11-20 15:43:10 -08:00
|
|
|
}
|
2020-09-24 07:36:22 -07:00
|
|
|
|
2021-02-01 13:00:51 -08:00
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct TransactionStatusSender {
|
2021-03-26 15:47:35 -07:00
|
|
|
pub sender: Sender<TransactionStatusMessage>,
|
2021-02-01 13:00:51 -08:00
|
|
|
}
|
2019-11-20 15:43:10 -08:00
|
|
|
|
2021-03-26 15:47:35 -07:00
|
|
|
impl TransactionStatusSender {
|
|
|
|
pub fn send_transaction_status_batch(
|
|
|
|
&self,
|
|
|
|
bank: Arc<Bank>,
|
2021-08-17 15:17:56 -07:00
|
|
|
transactions: Vec<SanitizedTransaction>,
|
2022-02-11 20:29:07 -08:00
|
|
|
execution_results: Vec<TransactionExecutionResult>,
|
2021-03-26 15:47:35 -07:00
|
|
|
balances: TransactionBalancesSet,
|
|
|
|
token_balances: TransactionTokenBalancesSet,
|
2021-05-26 14:43:15 -07:00
|
|
|
rent_debits: Vec<RentDebits>,
|
2022-06-23 12:37:38 -07:00
|
|
|
transaction_indexes: Vec<usize>,
|
2021-03-26 15:47:35 -07:00
|
|
|
) {
|
|
|
|
let slot = bank.slot();
|
2022-02-11 20:29:07 -08:00
|
|
|
|
2021-03-26 15:47:35 -07:00
|
|
|
if let Err(e) = self
|
|
|
|
.sender
|
|
|
|
.send(TransactionStatusMessage::Batch(TransactionStatusBatch {
|
|
|
|
bank,
|
2021-04-12 23:28:08 -07:00
|
|
|
transactions,
|
2022-04-20 09:20:29 -07:00
|
|
|
execution_results: execution_results
|
|
|
|
.into_iter()
|
|
|
|
.map(|result| match result {
|
|
|
|
TransactionExecutionResult::Executed { details, .. } => Some(details),
|
|
|
|
TransactionExecutionResult::NotExecuted(_) => None,
|
|
|
|
})
|
|
|
|
.collect(),
|
2021-03-26 15:47:35 -07:00
|
|
|
balances,
|
|
|
|
token_balances,
|
2021-05-26 14:43:15 -07:00
|
|
|
rent_debits,
|
2022-06-23 12:37:38 -07:00
|
|
|
transaction_indexes,
|
2021-03-26 15:47:35 -07:00
|
|
|
}))
|
|
|
|
{
|
|
|
|
trace!(
|
|
|
|
"Slot {} transaction_status send batch failed: {:?}",
|
|
|
|
slot,
|
|
|
|
e
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn send_transaction_status_freeze_message(&self, bank: &Arc<Bank>) {
|
|
|
|
let slot = bank.slot();
|
|
|
|
if let Err(e) = self.sender.send(TransactionStatusMessage::Freeze(slot)) {
|
|
|
|
trace!(
|
|
|
|
"Slot {} transaction_status send freeze message failed: {:?}",
|
|
|
|
slot,
|
|
|
|
e
|
|
|
|
);
|
|
|
|
}
|
2019-11-20 15:43:10 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-26 21:16:16 -07:00
|
|
|
pub type CacheBlockMetaSender = Sender<Arc<Bank>>;
|
2021-05-10 12:14:56 -07:00
|
|
|
|
2021-05-26 21:16:16 -07:00
|
|
|
pub fn cache_block_meta(bank: &Arc<Bank>, cache_block_meta_sender: Option<&CacheBlockMetaSender>) {
|
|
|
|
if let Some(cache_block_meta_sender) = cache_block_meta_sender {
|
|
|
|
cache_block_meta_sender
|
2021-05-10 12:14:56 -07:00
|
|
|
.send(bank.clone())
|
2021-05-26 21:16:16 -07:00
|
|
|
.unwrap_or_else(|err| warn!("cache_block_meta_sender failed: {:?}", err));
|
2021-05-10 12:14:56 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-31 14:15:07 -07:00
|
|
|
// used for tests only
|
2020-01-13 13:13:52 -08:00
|
|
|
pub fn fill_blockstore_slot_with_ticks(
|
|
|
|
blockstore: &Blockstore,
|
2019-10-20 08:54:38 -07:00
|
|
|
ticks_per_slot: u64,
|
|
|
|
slot: u64,
|
|
|
|
parent_slot: u64,
|
|
|
|
last_entry_hash: Hash,
|
|
|
|
) -> Hash {
|
2019-10-31 14:15:07 -07:00
|
|
|
// Only slot 0 can be equal to the parent_slot
|
|
|
|
assert!(slot.saturating_sub(1) >= parent_slot);
|
|
|
|
let num_slots = (slot - parent_slot).max(1);
|
2019-10-31 13:38:50 -07:00
|
|
|
let entries = create_ticks(num_slots * ticks_per_slot, 0, last_entry_hash);
|
2019-10-20 08:54:38 -07:00
|
|
|
let last_entry_hash = entries.last().unwrap().hash;
|
|
|
|
|
2020-01-13 13:13:52 -08:00
|
|
|
blockstore
|
2019-10-20 08:54:38 -07:00
|
|
|
.write_entries(
|
|
|
|
slot,
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
ticks_per_slot,
|
|
|
|
Some(parent_slot),
|
|
|
|
true,
|
|
|
|
&Arc::new(Keypair::new()),
|
|
|
|
entries,
|
2019-11-18 18:05:02 -08:00
|
|
|
0,
|
2019-10-20 08:54:38 -07:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
last_entry_hash
|
|
|
|
}
|
|
|
|
|
2019-02-16 02:26:21 -08:00
|
|
|
#[cfg(test)]
|
2019-05-20 23:09:00 -07:00
|
|
|
pub mod tests {
|
2021-12-03 09:00:31 -08:00
|
|
|
use {
|
|
|
|
super::*,
|
2022-04-29 18:05:39 -07:00
|
|
|
crate::{
|
2022-05-26 16:59:26 -07:00
|
|
|
blockstore_options::{AccessType, BlockstoreOptions},
|
2022-04-29 18:05:39 -07:00
|
|
|
genesis_utils::{
|
|
|
|
create_genesis_config, create_genesis_config_with_leader, GenesisConfigInfo,
|
|
|
|
},
|
2021-12-03 09:00:31 -08:00
|
|
|
},
|
|
|
|
matches::assert_matches,
|
|
|
|
rand::{thread_rng, Rng},
|
|
|
|
solana_entry::entry::{create_ticks, next_entry, next_entry_mut},
|
2023-05-18 13:18:28 -07:00
|
|
|
solana_program_runtime::declare_process_instruction,
|
2022-03-24 10:09:48 -07:00
|
|
|
solana_runtime::{
|
|
|
|
genesis_utils::{
|
|
|
|
self, create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs,
|
|
|
|
},
|
|
|
|
vote_account::VoteAccount,
|
2021-12-03 09:00:31 -08:00
|
|
|
},
|
|
|
|
solana_sdk::{
|
|
|
|
account::{AccountSharedData, WritableAccount},
|
|
|
|
epoch_schedule::EpochSchedule,
|
|
|
|
hash::Hash,
|
2023-03-28 11:54:49 -07:00
|
|
|
instruction::{Instruction, InstructionError},
|
2022-06-15 18:35:33 -07:00
|
|
|
native_token::LAMPORTS_PER_SOL,
|
2021-12-03 09:00:31 -08:00
|
|
|
pubkey::Pubkey,
|
|
|
|
signature::{Keypair, Signer},
|
2022-08-05 06:55:41 -07:00
|
|
|
system_instruction::SystemError,
|
2021-12-03 09:00:31 -08:00
|
|
|
system_transaction,
|
|
|
|
transaction::{Transaction, TransactionError},
|
|
|
|
},
|
|
|
|
solana_vote_program::{
|
|
|
|
self,
|
|
|
|
vote_state::{VoteState, VoteStateVersions, MAX_LOCKOUT_HISTORY},
|
|
|
|
vote_transaction,
|
|
|
|
},
|
2022-01-11 02:44:46 -08:00
|
|
|
std::{collections::BTreeSet, sync::RwLock},
|
2021-12-03 09:00:31 -08:00
|
|
|
trees::tr,
|
2021-09-07 13:43:43 -07:00
|
|
|
};
|
2019-02-16 02:26:21 -08:00
|
|
|
|
2022-04-29 18:05:39 -07:00
|
|
|
// Convenience wrapper to optionally process blockstore with Secondary access.
|
|
|
|
//
|
|
|
|
// Setting up the ledger for a test requires Primary access as items will need to be inserted.
|
|
|
|
// However, once a Secondary access has been opened, it won't automaticaly see updates made by
|
|
|
|
// the Primary access. So, open (and close) the Secondary access within this function to ensure
|
|
|
|
// that "stale" Secondary accesses don't propagate.
|
|
|
|
fn test_process_blockstore_with_custom_options(
|
|
|
|
genesis_config: &GenesisConfig,
|
|
|
|
blockstore: &Blockstore,
|
2022-05-02 14:58:00 -07:00
|
|
|
opts: &ProcessOptions,
|
2022-04-29 18:05:39 -07:00
|
|
|
access_type: AccessType,
|
|
|
|
) -> (Arc<RwLock<BankForks>>, LeaderScheduleCache) {
|
|
|
|
match access_type {
|
|
|
|
AccessType::Primary | AccessType::PrimaryForMaintenance => {
|
|
|
|
// Attempting to open a second Primary access would fail, so
|
|
|
|
// just pass the original session if it is a Primary variant
|
2022-09-12 11:51:12 -07:00
|
|
|
test_process_blockstore(genesis_config, blockstore, opts, &Arc::default())
|
2022-04-29 18:05:39 -07:00
|
|
|
}
|
|
|
|
AccessType::Secondary => {
|
|
|
|
let secondary_blockstore = Blockstore::open_with_options(
|
|
|
|
blockstore.ledger_path(),
|
|
|
|
BlockstoreOptions {
|
|
|
|
access_type,
|
|
|
|
..BlockstoreOptions::default()
|
|
|
|
},
|
|
|
|
)
|
|
|
|
.expect("Unable to open access to blockstore");
|
2022-09-12 11:51:12 -07:00
|
|
|
test_process_blockstore(
|
|
|
|
genesis_config,
|
|
|
|
&secondary_blockstore,
|
|
|
|
opts,
|
|
|
|
&Arc::default(),
|
|
|
|
)
|
2022-04-29 18:05:39 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-31 13:38:50 -07:00
|
|
|
#[test]
|
2020-01-13 13:13:52 -08:00
|
|
|
fn test_process_blockstore_with_missing_hashes() {
|
2022-04-29 18:05:39 -07:00
|
|
|
do_test_process_blockstore_with_missing_hashes(AccessType::Primary);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_process_blockstore_with_missing_hashes_secondary_access() {
|
|
|
|
do_test_process_blockstore_with_missing_hashes(AccessType::Secondary);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Intentionally make slot 1 faulty and ensure that processing sees it as dead
|
|
|
|
fn do_test_process_blockstore_with_missing_hashes(blockstore_access_type: AccessType) {
|
2019-10-31 13:38:50 -07:00
|
|
|
solana_logger::setup();
|
|
|
|
|
|
|
|
let hashes_per_tick = 2;
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
mut genesis_config, ..
|
|
|
|
} = create_genesis_config(10_000);
|
|
|
|
genesis_config.poh_config.hashes_per_tick = Some(hashes_per_tick);
|
|
|
|
let ticks_per_slot = genesis_config.ticks_per_slot;
|
2019-10-31 13:38:50 -07:00
|
|
|
|
2021-09-10 05:33:08 -07:00
|
|
|
let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
|
|
|
|
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
|
2019-10-31 13:38:50 -07:00
|
|
|
|
|
|
|
let parent_slot = 0;
|
|
|
|
let slot = 1;
|
|
|
|
let entries = create_ticks(ticks_per_slot, hashes_per_tick - 1, blockhash);
|
2019-12-09 14:49:19 -08:00
|
|
|
assert_matches!(
|
2020-01-13 13:13:52 -08:00
|
|
|
blockstore.write_entries(
|
2019-10-31 13:38:50 -07:00
|
|
|
slot,
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
ticks_per_slot,
|
|
|
|
Some(parent_slot),
|
|
|
|
true,
|
|
|
|
&Arc::new(Keypair::new()),
|
|
|
|
entries,
|
2019-11-18 18:05:02 -08:00
|
|
|
0,
|
2019-12-09 14:49:19 -08:00
|
|
|
),
|
|
|
|
Ok(_)
|
|
|
|
);
|
2019-10-31 13:38:50 -07:00
|
|
|
|
2022-04-29 18:05:39 -07:00
|
|
|
let (bank_forks, ..) = test_process_blockstore_with_custom_options(
|
2020-01-10 12:16:44 -08:00
|
|
|
&genesis_config,
|
2020-01-13 13:13:52 -08:00
|
|
|
&blockstore,
|
2022-05-02 14:58:00 -07:00
|
|
|
&ProcessOptions {
|
2023-03-22 11:03:30 -07:00
|
|
|
run_verification: true,
|
2020-01-10 12:16:44 -08:00
|
|
|
..ProcessOptions::default()
|
|
|
|
},
|
2022-04-29 18:05:39 -07:00
|
|
|
blockstore_access_type.clone(),
|
2021-09-01 13:13:52 -07:00
|
|
|
);
|
2022-03-04 01:52:22 -08:00
|
|
|
assert_eq!(frozen_bank_slots(&bank_forks.read().unwrap()), vec![0]);
|
2022-04-29 18:05:39 -07:00
|
|
|
|
|
|
|
let dead_slots: Vec<Slot> = blockstore.dead_slots_iterator(0).unwrap().collect();
|
|
|
|
match blockstore_access_type {
|
|
|
|
// Secondary access is immutable so even though a dead slot
|
|
|
|
// will be identified, it won't actually be marked dead.
|
|
|
|
AccessType::Secondary => {
|
|
|
|
assert_eq!(dead_slots.len(), 0);
|
|
|
|
}
|
|
|
|
AccessType::Primary | AccessType::PrimaryForMaintenance => {
|
|
|
|
assert_eq!(&dead_slots, &[1]);
|
|
|
|
}
|
|
|
|
}
|
2019-10-31 13:38:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2020-01-13 13:13:52 -08:00
|
|
|
fn test_process_blockstore_with_invalid_slot_tick_count() {
|
2019-10-31 13:38:50 -07:00
|
|
|
solana_logger::setup();
|
|
|
|
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
|
|
|
let ticks_per_slot = genesis_config.ticks_per_slot;
|
2019-10-31 13:38:50 -07:00
|
|
|
|
|
|
|
// Create a new ledger with slot 0 full of ticks
|
2021-09-10 05:33:08 -07:00
|
|
|
let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
|
|
|
|
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
|
2019-10-31 13:38:50 -07:00
|
|
|
|
|
|
|
// Write slot 1 with one tick missing
|
|
|
|
let parent_slot = 0;
|
|
|
|
let slot = 1;
|
|
|
|
let entries = create_ticks(ticks_per_slot - 1, 0, blockhash);
|
2019-12-09 14:49:19 -08:00
|
|
|
assert_matches!(
|
2020-01-13 13:13:52 -08:00
|
|
|
blockstore.write_entries(
|
2019-10-31 13:38:50 -07:00
|
|
|
slot,
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
ticks_per_slot,
|
|
|
|
Some(parent_slot),
|
|
|
|
true,
|
|
|
|
&Arc::new(Keypair::new()),
|
|
|
|
entries,
|
2019-11-18 18:05:02 -08:00
|
|
|
0,
|
2019-12-09 14:49:19 -08:00
|
|
|
),
|
|
|
|
Ok(_)
|
|
|
|
);
|
2019-10-31 13:38:50 -07:00
|
|
|
|
2020-01-10 12:16:44 -08:00
|
|
|
// Should return slot 0, the last slot on the fork that is valid
|
2021-09-01 13:13:52 -07:00
|
|
|
let (bank_forks, ..) = test_process_blockstore(
|
2020-01-10 12:16:44 -08:00
|
|
|
&genesis_config,
|
2020-01-13 13:13:52 -08:00
|
|
|
&blockstore,
|
2022-05-02 14:58:00 -07:00
|
|
|
&ProcessOptions {
|
2023-03-22 11:03:30 -07:00
|
|
|
run_verification: true,
|
2020-01-10 12:16:44 -08:00
|
|
|
..ProcessOptions::default()
|
|
|
|
},
|
2022-09-12 11:51:12 -07:00
|
|
|
&Arc::default(),
|
2021-09-01 13:13:52 -07:00
|
|
|
);
|
2022-03-04 01:52:22 -08:00
|
|
|
assert_eq!(frozen_bank_slots(&bank_forks.read().unwrap()), vec![0]);
|
2019-12-09 14:49:19 -08:00
|
|
|
|
|
|
|
// Write slot 2 fully
|
|
|
|
let _last_slot2_entry_hash =
|
2020-01-13 13:13:52 -08:00
|
|
|
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 0, blockhash);
|
2019-12-09 14:49:19 -08:00
|
|
|
|
2021-09-01 13:13:52 -07:00
|
|
|
let (bank_forks, ..) = test_process_blockstore(
|
2019-12-09 14:49:19 -08:00
|
|
|
&genesis_config,
|
2020-01-13 13:13:52 -08:00
|
|
|
&blockstore,
|
2022-05-02 14:58:00 -07:00
|
|
|
&ProcessOptions {
|
2023-03-22 11:03:30 -07:00
|
|
|
run_verification: true,
|
2019-12-09 14:49:19 -08:00
|
|
|
..ProcessOptions::default()
|
|
|
|
},
|
2022-09-12 11:51:12 -07:00
|
|
|
&Arc::default(),
|
2021-09-01 13:13:52 -07:00
|
|
|
);
|
2019-12-09 14:49:19 -08:00
|
|
|
|
2020-01-13 13:13:52 -08:00
|
|
|
// One valid fork, one bad fork. process_blockstore() should only return the valid fork
|
2022-03-04 01:52:22 -08:00
|
|
|
assert_eq!(frozen_bank_slots(&bank_forks.read().unwrap()), vec![0, 2]);
|
|
|
|
assert_eq!(bank_forks.read().unwrap().working_bank().slot(), 2);
|
|
|
|
assert_eq!(bank_forks.read().unwrap().root(), 0);
|
2019-10-31 13:38:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2020-01-13 13:13:52 -08:00
|
|
|
fn test_process_blockstore_with_slot_with_trailing_entry() {
|
2019-10-31 13:38:50 -07:00
|
|
|
solana_logger::setup();
|
|
|
|
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
2019-10-31 13:38:50 -07:00
|
|
|
mint_keypair,
|
2019-11-08 20:56:57 -08:00
|
|
|
genesis_config,
|
2019-10-31 13:38:50 -07:00
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config(10_000);
|
|
|
|
let ticks_per_slot = genesis_config.ticks_per_slot;
|
2019-10-31 13:38:50 -07:00
|
|
|
|
2021-09-10 05:33:08 -07:00
|
|
|
let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
|
|
|
|
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
|
2019-10-31 13:38:50 -07:00
|
|
|
|
|
|
|
let mut entries = create_ticks(ticks_per_slot, 0, blockhash);
|
|
|
|
let trailing_entry = {
|
|
|
|
let keypair = Keypair::new();
|
|
|
|
let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 1, blockhash);
|
|
|
|
next_entry(&blockhash, 1, vec![tx])
|
|
|
|
};
|
|
|
|
entries.push(trailing_entry);
|
|
|
|
|
2020-01-13 13:13:52 -08:00
|
|
|
// Tricks blockstore into writing the trailing entry by lying that there is one more tick
|
2019-10-31 13:38:50 -07:00
|
|
|
// per slot.
|
|
|
|
let parent_slot = 0;
|
|
|
|
let slot = 1;
|
2019-12-09 14:49:19 -08:00
|
|
|
assert_matches!(
|
2020-01-13 13:13:52 -08:00
|
|
|
blockstore.write_entries(
|
2019-10-31 13:38:50 -07:00
|
|
|
slot,
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
ticks_per_slot + 1,
|
|
|
|
Some(parent_slot),
|
|
|
|
true,
|
|
|
|
&Arc::new(Keypair::new()),
|
|
|
|
entries,
|
2019-11-18 18:05:02 -08:00
|
|
|
0,
|
2019-12-09 14:49:19 -08:00
|
|
|
),
|
|
|
|
Ok(_)
|
|
|
|
);
|
2019-10-31 13:38:50 -07:00
|
|
|
|
|
|
|
let opts = ProcessOptions {
|
2023-03-22 11:03:30 -07:00
|
|
|
run_verification: true,
|
2021-06-21 08:20:27 -07:00
|
|
|
accounts_db_test_hash_calculation: true,
|
2019-10-31 13:38:50 -07:00
|
|
|
..ProcessOptions::default()
|
|
|
|
};
|
2022-09-12 11:51:12 -07:00
|
|
|
let (bank_forks, ..) =
|
|
|
|
test_process_blockstore(&genesis_config, &blockstore, &opts, &Arc::default());
|
2022-03-04 01:52:22 -08:00
|
|
|
assert_eq!(frozen_bank_slots(&bank_forks.read().unwrap()), vec![0]);
|
2019-10-31 13:38:50 -07:00
|
|
|
}
|
|
|
|
|
2019-02-26 09:18:24 -08:00
|
|
|
#[test]
|
2020-01-13 13:13:52 -08:00
|
|
|
fn test_process_blockstore_with_incomplete_slot() {
|
2019-02-26 09:18:24 -08:00
|
|
|
solana_logger::setup();
|
|
|
|
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
|
|
|
let ticks_per_slot = genesis_config.ticks_per_slot;
|
2019-02-26 09:18:24 -08:00
|
|
|
|
|
|
|
/*
|
2020-01-13 13:13:52 -08:00
|
|
|
Build a blockstore in the ledger with the following fork structure:
|
2019-02-26 09:18:24 -08:00
|
|
|
|
2019-03-04 19:22:23 -08:00
|
|
|
slot 0 (all ticks)
|
2019-02-26 09:18:24 -08:00
|
|
|
|
|
2019-03-04 19:22:23 -08:00
|
|
|
slot 1 (all ticks but one)
|
2019-02-26 16:35:00 -08:00
|
|
|
|
|
2019-03-04 19:22:23 -08:00
|
|
|
slot 2 (all ticks)
|
2019-02-26 09:18:24 -08:00
|
|
|
|
2019-02-26 16:35:00 -08:00
|
|
|
where slot 1 is incomplete (missing 1 tick at the end)
|
2019-02-26 09:18:24 -08:00
|
|
|
*/
|
|
|
|
|
2019-02-26 16:35:00 -08:00
|
|
|
// Create a new ledger with slot 0 full of ticks
|
2021-09-10 05:33:08 -07:00
|
|
|
let (ledger_path, mut blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
|
2019-02-26 16:35:00 -08:00
|
|
|
debug!("ledger_path: {:?}", ledger_path);
|
|
|
|
|
2021-09-10 05:33:08 -07:00
|
|
|
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
|
2019-02-26 09:18:24 -08:00
|
|
|
|
2019-02-26 16:35:00 -08:00
|
|
|
// Write slot 1
|
|
|
|
// slot 1, points at slot 0. Missing one tick
|
|
|
|
{
|
|
|
|
let parent_slot = 0;
|
|
|
|
let slot = 1;
|
2019-10-31 13:38:50 -07:00
|
|
|
let mut entries = create_ticks(ticks_per_slot, 0, blockhash);
|
2019-03-02 10:25:16 -08:00
|
|
|
blockhash = entries.last().unwrap().hash;
|
2019-02-26 16:35:00 -08:00
|
|
|
|
2019-03-04 19:22:23 -08:00
|
|
|
// throw away last one
|
2019-02-26 16:35:00 -08:00
|
|
|
entries.pop();
|
|
|
|
|
2019-12-09 14:49:19 -08:00
|
|
|
assert_matches!(
|
2020-01-13 13:13:52 -08:00
|
|
|
blockstore.write_entries(
|
2019-09-03 21:32:51 -07:00
|
|
|
slot,
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
ticks_per_slot,
|
|
|
|
Some(parent_slot),
|
|
|
|
false,
|
|
|
|
&Arc::new(Keypair::new()),
|
|
|
|
entries,
|
2019-11-18 18:05:02 -08:00
|
|
|
0,
|
2019-12-09 14:49:19 -08:00
|
|
|
),
|
|
|
|
Ok(_)
|
|
|
|
);
|
2019-02-26 16:35:00 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// slot 2, points at slot 1
|
2020-01-13 13:13:52 -08:00
|
|
|
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 1, blockhash);
|
2019-02-26 09:18:24 -08:00
|
|
|
|
2019-10-08 14:58:49 -07:00
|
|
|
let opts = ProcessOptions {
|
2023-03-22 11:03:30 -07:00
|
|
|
run_verification: true,
|
2021-06-21 08:20:27 -07:00
|
|
|
accounts_db_test_hash_calculation: true,
|
2019-10-08 14:58:49 -07:00
|
|
|
..ProcessOptions::default()
|
|
|
|
};
|
2022-09-12 11:51:12 -07:00
|
|
|
let (bank_forks, ..) =
|
|
|
|
test_process_blockstore(&genesis_config, &blockstore, &opts, &Arc::default());
|
2019-02-26 09:18:24 -08:00
|
|
|
|
2022-03-04 01:52:22 -08:00
|
|
|
assert_eq!(frozen_bank_slots(&bank_forks.read().unwrap()), vec![0]); // slot 1 isn't "full", we stop at slot zero
|
2020-01-10 12:16:44 -08:00
|
|
|
|
2020-01-14 17:15:26 -08:00
|
|
|
/* Add a complete slot such that the store looks like:
|
2020-01-10 12:16:44 -08:00
|
|
|
|
|
|
|
slot 0 (all ticks)
|
|
|
|
/ \
|
|
|
|
slot 1 (all ticks but one) slot 3 (all ticks)
|
|
|
|
|
|
|
|
|
slot 2 (all ticks)
|
|
|
|
*/
|
|
|
|
let opts = ProcessOptions {
|
2023-03-22 11:03:30 -07:00
|
|
|
run_verification: true,
|
2021-06-21 08:20:27 -07:00
|
|
|
accounts_db_test_hash_calculation: true,
|
2020-01-10 12:16:44 -08:00
|
|
|
..ProcessOptions::default()
|
|
|
|
};
|
2020-01-13 13:13:52 -08:00
|
|
|
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 0, blockhash);
|
2020-01-10 12:16:44 -08:00
|
|
|
// Slot 0 should not show up in the ending bank_forks_info
|
2022-09-12 11:51:12 -07:00
|
|
|
let (bank_forks, ..) =
|
|
|
|
test_process_blockstore(&genesis_config, &blockstore, &opts, &Arc::default());
|
2020-01-10 12:16:44 -08:00
|
|
|
|
2020-05-06 08:24:59 -07:00
|
|
|
// slot 1 isn't "full", we stop at slot zero
|
2022-03-04 01:52:22 -08:00
|
|
|
assert_eq!(frozen_bank_slots(&bank_forks.read().unwrap()), vec![0, 3]);
|
2019-02-26 09:18:24 -08:00
|
|
|
}
|
|
|
|
|
2019-04-29 12:29:14 -07:00
|
|
|
#[test]
|
2020-01-13 13:13:52 -08:00
|
|
|
fn test_process_blockstore_with_two_forks_and_squash() {
|
2019-04-29 12:29:14 -07:00
|
|
|
solana_logger::setup();
|
|
|
|
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
|
|
|
let ticks_per_slot = genesis_config.ticks_per_slot;
|
2019-04-29 12:29:14 -07:00
|
|
|
|
|
|
|
// Create a new ledger with slot 0 full of ticks
|
2021-09-10 05:33:08 -07:00
|
|
|
let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
|
2019-04-29 12:29:14 -07:00
|
|
|
debug!("ledger_path: {:?}", ledger_path);
|
|
|
|
let mut last_entry_hash = blockhash;
|
|
|
|
|
|
|
|
/*
|
2020-01-13 13:13:52 -08:00
|
|
|
Build a blockstore in the ledger with the following fork structure:
|
2019-04-29 12:29:14 -07:00
|
|
|
|
|
|
|
slot 0
|
|
|
|
|
|
|
|
|
slot 1
|
|
|
|
/ \
|
|
|
|
slot 2 |
|
|
|
|
/ |
|
|
|
|
slot 3 |
|
|
|
|
|
|
|
|
|
slot 4 <-- set_root(true)
|
|
|
|
|
|
|
|
*/
|
2021-09-10 05:33:08 -07:00
|
|
|
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
|
2019-04-29 12:29:14 -07:00
|
|
|
|
|
|
|
// Fork 1, ending at slot 3
|
|
|
|
let last_slot1_entry_hash =
|
2020-01-13 13:13:52 -08:00
|
|
|
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, last_entry_hash);
|
|
|
|
last_entry_hash = fill_blockstore_slot_with_ticks(
|
|
|
|
&blockstore,
|
|
|
|
ticks_per_slot,
|
|
|
|
2,
|
|
|
|
1,
|
|
|
|
last_slot1_entry_hash,
|
|
|
|
);
|
2019-04-29 12:29:14 -07:00
|
|
|
let last_fork1_entry_hash =
|
2020-01-13 13:13:52 -08:00
|
|
|
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 2, last_entry_hash);
|
2019-04-29 12:29:14 -07:00
|
|
|
|
|
|
|
// Fork 2, ending at slot 4
|
2020-01-13 13:13:52 -08:00
|
|
|
let last_fork2_entry_hash = fill_blockstore_slot_with_ticks(
|
|
|
|
&blockstore,
|
|
|
|
ticks_per_slot,
|
|
|
|
4,
|
|
|
|
1,
|
|
|
|
last_slot1_entry_hash,
|
|
|
|
);
|
2019-04-29 12:29:14 -07:00
|
|
|
|
|
|
|
info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash);
|
|
|
|
info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash);
|
|
|
|
|
2021-07-01 20:02:40 -07:00
|
|
|
blockstore.set_roots(vec![0, 1, 4].iter()).unwrap();
|
2019-04-29 12:29:14 -07:00
|
|
|
|
2019-10-08 14:58:49 -07:00
|
|
|
let opts = ProcessOptions {
|
2023-03-22 11:03:30 -07:00
|
|
|
run_verification: true,
|
2021-06-21 08:20:27 -07:00
|
|
|
accounts_db_test_hash_calculation: true,
|
2019-10-08 14:58:49 -07:00
|
|
|
..ProcessOptions::default()
|
|
|
|
};
|
2022-09-12 11:51:12 -07:00
|
|
|
let (bank_forks, ..) =
|
|
|
|
test_process_blockstore(&genesis_config, &blockstore, &opts, &Arc::default());
|
2022-03-04 01:52:22 -08:00
|
|
|
let bank_forks = bank_forks.read().unwrap();
|
2019-04-29 12:29:14 -07:00
|
|
|
|
2020-05-06 08:24:59 -07:00
|
|
|
// One fork, other one is ignored b/c not a descendant of the root
|
|
|
|
assert_eq!(frozen_bank_slots(&bank_forks), vec![4]);
|
2019-04-29 12:29:14 -07:00
|
|
|
|
|
|
|
assert!(&bank_forks[4]
|
|
|
|
.parents()
|
|
|
|
.iter()
|
|
|
|
.map(|bank| bank.slot())
|
2020-05-15 09:35:43 -07:00
|
|
|
.next()
|
|
|
|
.is_none());
|
2019-04-29 12:29:14 -07:00
|
|
|
|
|
|
|
// Ensure bank_forks holds the right banks
|
2020-05-06 08:24:59 -07:00
|
|
|
verify_fork_infos(&bank_forks);
|
2019-05-07 23:34:10 -07:00
|
|
|
|
|
|
|
assert_eq!(bank_forks.root(), 4);
|
2019-04-29 12:29:14 -07:00
|
|
|
}
|
|
|
|
|
2019-02-19 19:40:23 -08:00
|
|
|
#[test]
|
2020-01-13 13:13:52 -08:00
|
|
|
fn test_process_blockstore_with_two_forks() {
|
2019-02-19 19:40:23 -08:00
|
|
|
solana_logger::setup();
|
|
|
|
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
|
|
|
let ticks_per_slot = genesis_config.ticks_per_slot;
|
2019-02-19 19:40:23 -08:00
|
|
|
|
|
|
|
// Create a new ledger with slot 0 full of ticks
|
2021-09-10 05:33:08 -07:00
|
|
|
let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
|
2019-02-19 19:40:23 -08:00
|
|
|
debug!("ledger_path: {:?}", ledger_path);
|
2019-03-02 10:25:16 -08:00
|
|
|
let mut last_entry_hash = blockhash;
|
2019-02-19 19:40:23 -08:00
|
|
|
|
|
|
|
/*
|
2020-01-13 13:13:52 -08:00
|
|
|
Build a blockstore in the ledger with the following fork structure:
|
2019-02-19 19:40:23 -08:00
|
|
|
|
|
|
|
slot 0
|
|
|
|
|
|
2019-04-15 13:12:28 -07:00
|
|
|
slot 1 <-- set_root(true)
|
2019-02-19 19:40:23 -08:00
|
|
|
/ \
|
|
|
|
slot 2 |
|
|
|
|
/ |
|
|
|
|
slot 3 |
|
|
|
|
|
|
|
|
|
slot 4
|
|
|
|
|
|
|
|
*/
|
2021-09-10 05:33:08 -07:00
|
|
|
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
|
2019-02-19 19:40:23 -08:00
|
|
|
|
|
|
|
// Fork 1, ending at slot 3
|
2019-03-01 08:57:42 -08:00
|
|
|
let last_slot1_entry_hash =
|
2020-01-13 13:13:52 -08:00
|
|
|
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, last_entry_hash);
|
|
|
|
last_entry_hash = fill_blockstore_slot_with_ticks(
|
|
|
|
&blockstore,
|
|
|
|
ticks_per_slot,
|
|
|
|
2,
|
|
|
|
1,
|
|
|
|
last_slot1_entry_hash,
|
|
|
|
);
|
2019-03-01 08:57:42 -08:00
|
|
|
let last_fork1_entry_hash =
|
2020-01-13 13:13:52 -08:00
|
|
|
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 2, last_entry_hash);
|
2019-02-19 19:40:23 -08:00
|
|
|
|
|
|
|
// Fork 2, ending at slot 4
|
2020-01-13 13:13:52 -08:00
|
|
|
let last_fork2_entry_hash = fill_blockstore_slot_with_ticks(
|
|
|
|
&blockstore,
|
|
|
|
ticks_per_slot,
|
|
|
|
4,
|
|
|
|
1,
|
|
|
|
last_slot1_entry_hash,
|
|
|
|
);
|
2019-02-19 19:40:23 -08:00
|
|
|
|
2019-03-01 08:57:42 -08:00
|
|
|
info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash);
|
|
|
|
info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash);
|
2019-02-19 19:40:23 -08:00
|
|
|
|
2021-07-01 20:02:40 -07:00
|
|
|
blockstore.set_roots(vec![0, 1].iter()).unwrap();
|
2019-04-15 13:12:28 -07:00
|
|
|
|
2019-10-08 14:58:49 -07:00
|
|
|
let opts = ProcessOptions {
|
2023-03-22 11:03:30 -07:00
|
|
|
run_verification: true,
|
2021-06-21 08:20:27 -07:00
|
|
|
accounts_db_test_hash_calculation: true,
|
2019-10-08 14:58:49 -07:00
|
|
|
..ProcessOptions::default()
|
|
|
|
};
|
2022-09-12 11:51:12 -07:00
|
|
|
let (bank_forks, ..) =
|
|
|
|
test_process_blockstore(&genesis_config, &blockstore, &opts, &Arc::default());
|
2022-03-04 01:52:22 -08:00
|
|
|
let bank_forks = bank_forks.read().unwrap();
|
2019-02-19 19:40:23 -08:00
|
|
|
|
2020-05-06 08:24:59 -07:00
|
|
|
assert_eq!(frozen_bank_slots(&bank_forks), vec![1, 2, 3, 4]);
|
|
|
|
assert_eq!(bank_forks.working_bank().slot(), 4);
|
|
|
|
assert_eq!(bank_forks.root(), 1);
|
|
|
|
|
2019-04-15 13:12:28 -07:00
|
|
|
assert_eq!(
|
|
|
|
&bank_forks[3]
|
|
|
|
.parents()
|
|
|
|
.iter()
|
|
|
|
.map(|bank| bank.slot())
|
|
|
|
.collect::<Vec<_>>(),
|
|
|
|
&[2, 1]
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
&bank_forks[4]
|
|
|
|
.parents()
|
|
|
|
.iter()
|
|
|
|
.map(|bank| bank.slot())
|
|
|
|
.collect::<Vec<_>>(),
|
|
|
|
&[1]
|
|
|
|
);
|
2019-02-20 15:42:35 -08:00
|
|
|
|
2019-05-07 23:34:10 -07:00
|
|
|
assert_eq!(bank_forks.root(), 1);
|
|
|
|
|
2019-04-15 13:12:28 -07:00
|
|
|
// Ensure bank_forks holds the right banks
|
2020-05-06 08:24:59 -07:00
|
|
|
verify_fork_infos(&bank_forks);
|
2019-02-19 19:40:23 -08:00
|
|
|
}
|
|
|
|
|
2019-11-12 12:38:26 -08:00
|
|
|
#[test]
|
2020-01-13 13:13:52 -08:00
|
|
|
fn test_process_blockstore_with_dead_slot() {
|
2019-11-12 12:38:26 -08:00
|
|
|
solana_logger::setup();
|
|
|
|
|
|
|
|
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
|
|
|
let ticks_per_slot = genesis_config.ticks_per_slot;
|
2021-09-10 05:33:08 -07:00
|
|
|
let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
|
2019-11-12 12:38:26 -08:00
|
|
|
debug!("ledger_path: {:?}", ledger_path);
|
|
|
|
|
|
|
|
/*
|
|
|
|
slot 0
|
|
|
|
|
|
|
|
|
slot 1
|
|
|
|
/ \
|
|
|
|
/ \
|
|
|
|
slot 2 (dead) \
|
|
|
|
\
|
|
|
|
slot 3
|
|
|
|
*/
|
2021-09-10 05:33:08 -07:00
|
|
|
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
|
2019-11-12 12:38:26 -08:00
|
|
|
let slot1_blockhash =
|
2020-01-13 13:13:52 -08:00
|
|
|
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, blockhash);
|
|
|
|
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 1, slot1_blockhash);
|
|
|
|
blockstore.set_dead_slot(2).unwrap();
|
|
|
|
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 1, slot1_blockhash);
|
2019-11-12 12:38:26 -08:00
|
|
|
|
2022-09-12 11:51:12 -07:00
|
|
|
let (bank_forks, ..) = test_process_blockstore(
|
|
|
|
&genesis_config,
|
|
|
|
&blockstore,
|
|
|
|
&ProcessOptions::default(),
|
|
|
|
&Arc::default(),
|
|
|
|
);
|
2022-03-04 01:52:22 -08:00
|
|
|
let bank_forks = bank_forks.read().unwrap();
|
2019-11-12 12:38:26 -08:00
|
|
|
|
2020-05-06 08:24:59 -07:00
|
|
|
assert_eq!(frozen_bank_slots(&bank_forks), vec![0, 1, 3]);
|
|
|
|
assert_eq!(bank_forks.working_bank().slot(), 3);
|
2019-11-12 12:38:26 -08:00
|
|
|
assert_eq!(
|
|
|
|
&bank_forks[3]
|
|
|
|
.parents()
|
|
|
|
.iter()
|
|
|
|
.map(|bank| bank.slot())
|
|
|
|
.collect::<Vec<_>>(),
|
|
|
|
&[1, 0]
|
|
|
|
);
|
2020-05-06 08:24:59 -07:00
|
|
|
verify_fork_infos(&bank_forks);
|
2019-11-12 12:38:26 -08:00
|
|
|
}
|
|
|
|
|
2020-01-10 12:16:44 -08:00
|
|
|
#[test]
|
2020-01-13 13:13:52 -08:00
|
|
|
fn test_process_blockstore_with_dead_child() {
|
2020-01-10 12:16:44 -08:00
|
|
|
solana_logger::setup();
|
|
|
|
|
|
|
|
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
|
|
|
let ticks_per_slot = genesis_config.ticks_per_slot;
|
2021-09-10 05:33:08 -07:00
|
|
|
let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
|
2020-01-10 12:16:44 -08:00
|
|
|
debug!("ledger_path: {:?}", ledger_path);
|
|
|
|
|
|
|
|
/*
|
|
|
|
slot 0
|
|
|
|
|
|
|
|
|
slot 1
|
|
|
|
/ \
|
|
|
|
/ \
|
|
|
|
slot 2 \
|
|
|
|
/ \
|
|
|
|
slot 4 (dead) slot 3
|
|
|
|
*/
|
2021-09-10 05:33:08 -07:00
|
|
|
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
|
2020-01-10 12:16:44 -08:00
|
|
|
let slot1_blockhash =
|
2020-01-13 13:13:52 -08:00
|
|
|
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, blockhash);
|
2020-01-10 12:16:44 -08:00
|
|
|
let slot2_blockhash =
|
2020-01-13 13:13:52 -08:00
|
|
|
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 1, slot1_blockhash);
|
|
|
|
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 4, 2, slot2_blockhash);
|
|
|
|
blockstore.set_dead_slot(4).unwrap();
|
|
|
|
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 1, slot1_blockhash);
|
2020-01-10 12:16:44 -08:00
|
|
|
|
2022-09-12 11:51:12 -07:00
|
|
|
let (bank_forks, ..) = test_process_blockstore(
|
|
|
|
&genesis_config,
|
|
|
|
&blockstore,
|
|
|
|
&ProcessOptions::default(),
|
|
|
|
&Arc::default(),
|
|
|
|
);
|
2022-03-04 01:52:22 -08:00
|
|
|
let bank_forks = bank_forks.read().unwrap();
|
2020-01-10 12:16:44 -08:00
|
|
|
|
|
|
|
// Should see the parent of the dead child
|
2020-05-06 08:24:59 -07:00
|
|
|
assert_eq!(frozen_bank_slots(&bank_forks), vec![0, 1, 2, 3]);
|
|
|
|
assert_eq!(bank_forks.working_bank().slot(), 3);
|
|
|
|
|
2020-01-10 12:16:44 -08:00
|
|
|
assert_eq!(
|
|
|
|
&bank_forks[3]
|
|
|
|
.parents()
|
|
|
|
.iter()
|
|
|
|
.map(|bank| bank.slot())
|
|
|
|
.collect::<Vec<_>>(),
|
|
|
|
&[1, 0]
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
&bank_forks[2]
|
|
|
|
.parents()
|
|
|
|
.iter()
|
|
|
|
.map(|bank| bank.slot())
|
|
|
|
.collect::<Vec<_>>(),
|
|
|
|
&[1, 0]
|
|
|
|
);
|
2020-05-06 08:24:59 -07:00
|
|
|
assert_eq!(bank_forks.working_bank().slot(), 3);
|
|
|
|
verify_fork_infos(&bank_forks);
|
2020-01-10 12:16:44 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_root_with_all_dead_children() {
|
|
|
|
solana_logger::setup();
|
|
|
|
|
|
|
|
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
|
|
|
let ticks_per_slot = genesis_config.ticks_per_slot;
|
2021-09-10 05:33:08 -07:00
|
|
|
let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
|
2020-01-10 12:16:44 -08:00
|
|
|
debug!("ledger_path: {:?}", ledger_path);
|
|
|
|
|
|
|
|
/*
|
|
|
|
slot 0
|
|
|
|
/ \
|
|
|
|
/ \
|
|
|
|
slot 1 (dead) slot 2 (dead)
|
|
|
|
*/
|
2021-09-10 05:33:08 -07:00
|
|
|
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
|
2020-01-13 13:13:52 -08:00
|
|
|
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, blockhash);
|
|
|
|
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 0, blockhash);
|
|
|
|
blockstore.set_dead_slot(1).unwrap();
|
|
|
|
blockstore.set_dead_slot(2).unwrap();
|
2022-09-12 11:51:12 -07:00
|
|
|
let (bank_forks, ..) = test_process_blockstore(
|
|
|
|
&genesis_config,
|
|
|
|
&blockstore,
|
|
|
|
&ProcessOptions::default(),
|
|
|
|
&Arc::default(),
|
|
|
|
);
|
2022-03-04 01:52:22 -08:00
|
|
|
let bank_forks = bank_forks.read().unwrap();
|
2020-01-10 12:16:44 -08:00
|
|
|
|
|
|
|
// Should see only the parent of the dead children
|
2020-05-06 08:24:59 -07:00
|
|
|
assert_eq!(frozen_bank_slots(&bank_forks), vec![0]);
|
|
|
|
verify_fork_infos(&bank_forks);
|
2020-01-10 12:16:44 -08:00
|
|
|
}
|
|
|
|
|
2019-05-20 19:04:18 -07:00
|
|
|
#[test]
|
2020-01-13 13:13:52 -08:00
|
|
|
fn test_process_blockstore_epoch_boundary_root() {
|
2019-05-20 19:04:18 -07:00
|
|
|
solana_logger::setup();
|
|
|
|
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
|
|
|
let ticks_per_slot = genesis_config.ticks_per_slot;
|
2019-05-20 19:04:18 -07:00
|
|
|
|
|
|
|
// Create a new ledger with slot 0 full of ticks
|
2021-09-10 05:33:08 -07:00
|
|
|
let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
|
2019-05-20 19:04:18 -07:00
|
|
|
let mut last_entry_hash = blockhash;
|
|
|
|
|
2021-09-10 05:33:08 -07:00
|
|
|
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
|
2019-05-20 19:04:18 -07:00
|
|
|
|
2020-08-20 21:56:25 -07:00
|
|
|
// Let `last_slot` be the number of slots in the first two epochs
|
2019-12-05 18:41:29 -08:00
|
|
|
let epoch_schedule = get_epoch_schedule(&genesis_config, Vec::new());
|
2019-05-20 19:04:18 -07:00
|
|
|
let last_slot = epoch_schedule.get_last_slot_in_epoch(1);
|
|
|
|
|
2020-08-20 21:56:25 -07:00
|
|
|
// Create a single chain of slots with all indexes in the range [0, v + 1]
|
2019-05-20 19:04:18 -07:00
|
|
|
for i in 1..=last_slot + 1 {
|
2020-01-13 13:13:52 -08:00
|
|
|
last_entry_hash = fill_blockstore_slot_with_ticks(
|
|
|
|
&blockstore,
|
2019-05-20 19:04:18 -07:00
|
|
|
ticks_per_slot,
|
|
|
|
i,
|
|
|
|
i - 1,
|
|
|
|
last_entry_hash,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set a root on the last slot of the last confirmed epoch
|
2021-07-01 20:02:40 -07:00
|
|
|
let rooted_slots: Vec<Slot> = (0..=last_slot).collect();
|
|
|
|
blockstore.set_roots(rooted_slots.iter()).unwrap();
|
2019-05-20 19:04:18 -07:00
|
|
|
|
2020-06-17 20:54:52 -07:00
|
|
|
// Set a root on the next slot of the confirmed epoch
|
2021-07-01 20:02:40 -07:00
|
|
|
blockstore
|
|
|
|
.set_roots(std::iter::once(&(last_slot + 1)))
|
|
|
|
.unwrap();
|
2019-05-20 19:04:18 -07:00
|
|
|
|
|
|
|
// Check that we can properly restart the ledger / leader scheduler doesn't fail
|
2019-10-08 14:58:49 -07:00
|
|
|
let opts = ProcessOptions {
|
2023-03-22 11:03:30 -07:00
|
|
|
run_verification: true,
|
2021-06-21 08:20:27 -07:00
|
|
|
accounts_db_test_hash_calculation: true,
|
2019-10-08 14:58:49 -07:00
|
|
|
..ProcessOptions::default()
|
|
|
|
};
|
2022-09-12 11:51:12 -07:00
|
|
|
let (bank_forks, ..) =
|
|
|
|
test_process_blockstore(&genesis_config, &blockstore, &opts, &Arc::default());
|
2022-03-04 01:52:22 -08:00
|
|
|
let bank_forks = bank_forks.read().unwrap();
|
2019-05-20 19:04:18 -07:00
|
|
|
|
2020-05-06 08:24:59 -07:00
|
|
|
// There is one fork, head is last_slot + 1
|
|
|
|
assert_eq!(frozen_bank_slots(&bank_forks), vec![last_slot + 1]);
|
2019-05-20 19:04:18 -07:00
|
|
|
|
|
|
|
// The latest root should have purged all its parents
|
|
|
|
assert!(&bank_forks[last_slot + 1]
|
|
|
|
.parents()
|
|
|
|
.iter()
|
|
|
|
.map(|bank| bank.slot())
|
2020-05-15 09:35:43 -07:00
|
|
|
.next()
|
|
|
|
.is_none());
|
2019-05-20 19:04:18 -07:00
|
|
|
}
|
|
|
|
|
2019-02-16 13:17:37 -08:00
|
|
|
#[test]
|
|
|
|
fn test_first_err() {
|
|
|
|
assert_eq!(first_err(&[Ok(())]), Ok(()));
|
|
|
|
assert_eq!(
|
2021-04-12 23:28:08 -07:00
|
|
|
first_err(&[Ok(()), Err(TransactionError::AlreadyProcessed)]),
|
|
|
|
Err(TransactionError::AlreadyProcessed)
|
2019-02-16 13:17:37 -08:00
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
first_err(&[
|
|
|
|
Ok(()),
|
2021-04-12 23:28:08 -07:00
|
|
|
Err(TransactionError::AlreadyProcessed),
|
2019-03-13 12:58:44 -07:00
|
|
|
Err(TransactionError::AccountInUse)
|
2019-02-16 13:17:37 -08:00
|
|
|
]),
|
2021-04-12 23:28:08 -07:00
|
|
|
Err(TransactionError::AlreadyProcessed)
|
2019-02-16 13:17:37 -08:00
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
first_err(&[
|
|
|
|
Ok(()),
|
2019-03-13 12:58:44 -07:00
|
|
|
Err(TransactionError::AccountInUse),
|
2021-04-12 23:28:08 -07:00
|
|
|
Err(TransactionError::AlreadyProcessed)
|
2019-02-16 13:17:37 -08:00
|
|
|
]),
|
2019-03-13 12:58:44 -07:00
|
|
|
Err(TransactionError::AccountInUse)
|
2019-02-16 13:17:37 -08:00
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
first_err(&[
|
2019-03-13 12:58:44 -07:00
|
|
|
Err(TransactionError::AccountInUse),
|
2019-02-16 13:17:37 -08:00
|
|
|
Ok(()),
|
2021-04-12 23:28:08 -07:00
|
|
|
Err(TransactionError::AlreadyProcessed)
|
2019-02-16 13:17:37 -08:00
|
|
|
]),
|
2019-03-13 12:58:44 -07:00
|
|
|
Err(TransactionError::AccountInUse)
|
2019-02-16 13:17:37 -08:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_process_empty_entry_is_registered() {
|
2019-03-01 14:52:27 -08:00
|
|
|
solana_logger::setup();
|
|
|
|
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
2019-05-22 20:39:00 -07:00
|
|
|
mint_keypair,
|
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config(2);
|
2021-08-05 06:42:38 -07:00
|
|
|
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
|
2019-02-16 13:17:37 -08:00
|
|
|
let keypair = Keypair::new();
|
2021-08-17 15:17:56 -07:00
|
|
|
let slot_entries = create_ticks(genesis_config.ticks_per_slot, 1, genesis_config.hash());
|
2019-10-23 22:01:22 -07:00
|
|
|
let tx = system_transaction::transfer(
|
2019-03-01 14:52:27 -08:00
|
|
|
&mint_keypair,
|
2019-03-09 19:28:43 -08:00
|
|
|
&keypair.pubkey(),
|
2019-03-01 14:52:27 -08:00
|
|
|
1,
|
|
|
|
slot_entries.last().unwrap().hash,
|
|
|
|
);
|
2019-02-16 13:17:37 -08:00
|
|
|
|
|
|
|
// First, ensure the TX is rejected because of the unregistered last ID
|
|
|
|
assert_eq!(
|
|
|
|
bank.process_transaction(&tx),
|
2019-03-13 12:58:44 -07:00
|
|
|
Err(TransactionError::BlockhashNotFound)
|
2019-02-16 13:17:37 -08:00
|
|
|
);
|
|
|
|
|
|
|
|
// Now ensure the TX is accepted despite pointing to the ID of an empty entry.
|
2021-11-17 11:53:40 -08:00
|
|
|
process_entries_for_tests(&bank, slot_entries, true, None, None).unwrap();
|
2019-02-16 13:17:37 -08:00
|
|
|
assert_eq!(bank.process_transaction(&tx), Ok(()));
|
|
|
|
}
|
|
|
|
|
2019-02-20 15:42:35 -08:00
|
|
|
#[test]
|
|
|
|
fn test_process_ledger_simple() {
|
2019-03-01 14:52:27 -08:00
|
|
|
solana_logger::setup();
|
2020-10-19 12:12:08 -07:00
|
|
|
let leader_pubkey = solana_sdk::pubkey::new_rand();
|
2019-05-07 11:16:22 -07:00
|
|
|
let mint = 100;
|
2019-10-31 13:38:50 -07:00
|
|
|
let hashes_per_tick = 10;
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
mut genesis_config,
|
2019-05-22 20:39:00 -07:00
|
|
|
mint_keypair,
|
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config_with_leader(mint, &leader_pubkey, 50);
|
|
|
|
genesis_config.poh_config.hashes_per_tick = Some(hashes_per_tick);
|
2021-09-10 05:33:08 -07:00
|
|
|
let (ledger_path, mut last_entry_hash) =
|
|
|
|
create_new_tmp_ledger_auto_delete!(&genesis_config);
|
2019-02-20 15:42:35 -08:00
|
|
|
debug!("ledger_path: {:?}", ledger_path);
|
2019-02-16 02:26:21 -08:00
|
|
|
|
2019-05-07 11:16:22 -07:00
|
|
|
let deducted_from_mint = 3;
|
2019-02-20 15:42:35 -08:00
|
|
|
let mut entries = vec![];
|
2019-11-08 20:56:57 -08:00
|
|
|
let blockhash = genesis_config.hash();
|
2019-05-07 11:16:22 -07:00
|
|
|
for _ in 0..deducted_from_mint {
|
2019-02-16 02:26:21 -08:00
|
|
|
// Transfer one token from the mint to a random account
|
|
|
|
let keypair = Keypair::new();
|
2019-10-23 22:01:22 -07:00
|
|
|
let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 1, blockhash);
|
2019-10-31 13:38:50 -07:00
|
|
|
let entry = next_entry_mut(&mut last_entry_hash, 1, vec![tx]);
|
2019-02-16 02:26:21 -08:00
|
|
|
entries.push(entry);
|
|
|
|
|
|
|
|
// Add a second Transaction that will produce a
|
2019-03-18 09:05:03 -07:00
|
|
|
// InstructionError<0, ResultWithNegativeLamports> error when processed
|
2019-02-16 02:26:21 -08:00
|
|
|
let keypair2 = Keypair::new();
|
2019-10-23 22:01:22 -07:00
|
|
|
let tx =
|
|
|
|
system_transaction::transfer(&mint_keypair, &keypair2.pubkey(), 101, blockhash);
|
2019-10-31 13:38:50 -07:00
|
|
|
let entry = next_entry_mut(&mut last_entry_hash, 1, vec![tx]);
|
2019-02-16 02:26:21 -08:00
|
|
|
entries.push(entry);
|
|
|
|
}
|
|
|
|
|
2019-10-31 13:38:50 -07:00
|
|
|
let remaining_hashes = hashes_per_tick - entries.len() as u64;
|
|
|
|
let tick_entry = next_entry_mut(&mut last_entry_hash, remaining_hashes, vec![]);
|
|
|
|
entries.push(tick_entry);
|
|
|
|
|
2019-02-26 16:35:00 -08:00
|
|
|
// Fill up the rest of slot 1 with ticks
|
2019-10-31 13:38:50 -07:00
|
|
|
entries.extend(create_ticks(
|
2019-11-08 20:56:57 -08:00
|
|
|
genesis_config.ticks_per_slot - 1,
|
|
|
|
genesis_config.poh_config.hashes_per_tick.unwrap(),
|
2019-10-31 13:38:50 -07:00
|
|
|
last_entry_hash,
|
|
|
|
));
|
2019-10-08 00:42:51 -07:00
|
|
|
let last_blockhash = entries.last().unwrap().hash;
|
2019-10-31 13:38:50 -07:00
|
|
|
|
2021-09-10 05:33:08 -07:00
|
|
|
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
|
2020-01-13 13:13:52 -08:00
|
|
|
blockstore
|
2019-09-03 21:32:51 -07:00
|
|
|
.write_entries(
|
2019-08-28 22:34:47 -07:00
|
|
|
1,
|
|
|
|
0,
|
|
|
|
0,
|
2019-11-08 20:56:57 -08:00
|
|
|
genesis_config.ticks_per_slot,
|
2019-08-28 22:34:47 -07:00
|
|
|
None,
|
|
|
|
true,
|
|
|
|
&Arc::new(Keypair::new()),
|
2019-10-08 00:42:51 -07:00
|
|
|
entries,
|
2019-11-18 18:05:02 -08:00
|
|
|
0,
|
2019-08-28 22:34:47 -07:00
|
|
|
)
|
2019-03-14 15:18:37 -07:00
|
|
|
.unwrap();
|
2019-10-08 14:58:49 -07:00
|
|
|
let opts = ProcessOptions {
|
2023-03-22 11:03:30 -07:00
|
|
|
run_verification: true,
|
2021-06-21 08:20:27 -07:00
|
|
|
accounts_db_test_hash_calculation: true,
|
2019-10-08 14:58:49 -07:00
|
|
|
..ProcessOptions::default()
|
|
|
|
};
|
2022-09-12 11:51:12 -07:00
|
|
|
let (bank_forks, ..) =
|
|
|
|
test_process_blockstore(&genesis_config, &blockstore, &opts, &Arc::default());
|
2022-03-04 01:52:22 -08:00
|
|
|
let bank_forks = bank_forks.read().unwrap();
|
2019-02-20 15:42:35 -08:00
|
|
|
|
2020-05-06 08:24:59 -07:00
|
|
|
assert_eq!(frozen_bank_slots(&bank_forks), vec![0, 1]);
|
2019-05-07 23:34:10 -07:00
|
|
|
assert_eq!(bank_forks.root(), 0);
|
2020-05-06 08:24:59 -07:00
|
|
|
assert_eq!(bank_forks.working_bank().slot(), 1);
|
2019-02-20 15:42:35 -08:00
|
|
|
|
2019-02-28 10:57:58 -08:00
|
|
|
let bank = bank_forks[1].clone();
|
2019-05-07 11:16:22 -07:00
|
|
|
assert_eq!(
|
|
|
|
bank.get_balance(&mint_keypair.pubkey()),
|
|
|
|
mint - deducted_from_mint
|
|
|
|
);
|
2019-11-08 20:56:57 -08:00
|
|
|
assert_eq!(bank.tick_height(), 2 * genesis_config.ticks_per_slot);
|
2019-10-08 00:42:51 -07:00
|
|
|
assert_eq!(bank.last_blockhash(), last_blockhash);
|
2019-02-16 02:26:21 -08:00
|
|
|
}
|
2019-02-16 13:17:37 -08:00
|
|
|
|
2019-02-28 12:09:19 -08:00
|
|
|
#[test]
|
|
|
|
fn test_process_ledger_with_one_tick_per_slot() {
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
mut genesis_config, ..
|
|
|
|
} = create_genesis_config(123);
|
|
|
|
genesis_config.ticks_per_slot = 1;
|
2021-09-10 05:33:08 -07:00
|
|
|
let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
|
2019-02-28 12:09:19 -08:00
|
|
|
|
2021-09-10 05:33:08 -07:00
|
|
|
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
|
2019-10-08 14:58:49 -07:00
|
|
|
let opts = ProcessOptions {
|
2023-03-22 11:03:30 -07:00
|
|
|
run_verification: true,
|
2021-06-21 08:20:27 -07:00
|
|
|
accounts_db_test_hash_calculation: true,
|
2019-10-08 14:58:49 -07:00
|
|
|
..ProcessOptions::default()
|
|
|
|
};
|
2022-09-12 11:51:12 -07:00
|
|
|
let (bank_forks, ..) =
|
|
|
|
test_process_blockstore(&genesis_config, &blockstore, &opts, &Arc::default());
|
2022-03-04 01:52:22 -08:00
|
|
|
let bank_forks = bank_forks.read().unwrap();
|
2019-02-28 12:09:19 -08:00
|
|
|
|
2020-05-06 08:24:59 -07:00
|
|
|
assert_eq!(frozen_bank_slots(&bank_forks), vec![0]);
|
2019-02-28 12:09:19 -08:00
|
|
|
let bank = bank_forks[0].clone();
|
2019-10-21 07:51:02 -07:00
|
|
|
assert_eq!(bank.tick_height(), 1);
|
2019-02-28 12:09:19 -08:00
|
|
|
}
|
|
|
|
|
2019-10-08 14:58:49 -07:00
|
|
|
#[test]
|
|
|
|
fn test_process_ledger_options_full_leader_cache() {
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123);
|
2021-09-10 05:33:08 -07:00
|
|
|
let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
|
2019-10-08 14:58:49 -07:00
|
|
|
|
2021-09-10 05:33:08 -07:00
|
|
|
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
|
2019-10-08 14:58:49 -07:00
|
|
|
let opts = ProcessOptions {
|
|
|
|
full_leader_cache: true,
|
2021-06-21 08:20:27 -07:00
|
|
|
accounts_db_test_hash_calculation: true,
|
2019-10-08 14:58:49 -07:00
|
|
|
..ProcessOptions::default()
|
|
|
|
};
|
2022-03-14 09:16:12 -07:00
|
|
|
let (_bank_forks, leader_schedule) =
|
2022-09-12 11:51:12 -07:00
|
|
|
test_process_blockstore(&genesis_config, &blockstore, &opts, &Arc::default());
|
2020-05-06 08:24:59 -07:00
|
|
|
assert_eq!(leader_schedule.max_schedules(), std::usize::MAX);
|
2019-10-08 14:58:49 -07:00
|
|
|
}
|
|
|
|
|
2019-02-16 13:17:37 -08:00
|
|
|
#[test]
|
2019-03-12 16:46:41 -07:00
|
|
|
fn test_process_entries_tick() {
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(1000);
|
2021-08-05 06:42:38 -07:00
|
|
|
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
|
2019-02-16 13:17:37 -08:00
|
|
|
|
|
|
|
// ensure bank can process a tick
|
2019-03-01 14:52:27 -08:00
|
|
|
assert_eq!(bank.tick_height(), 0);
|
2019-11-08 20:56:57 -08:00
|
|
|
let tick = next_entry(&genesis_config.hash(), 1, vec![]);
|
2021-11-17 11:53:40 -08:00
|
|
|
assert_eq!(
|
|
|
|
process_entries_for_tests(&bank, vec![tick], true, None, None),
|
|
|
|
Ok(())
|
|
|
|
);
|
2019-03-01 14:52:27 -08:00
|
|
|
assert_eq!(bank.tick_height(), 1);
|
2019-02-16 13:17:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2019-03-12 16:46:41 -07:00
|
|
|
fn test_process_entries_2_entries_collision() {
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
2019-05-22 20:39:00 -07:00
|
|
|
mint_keypair,
|
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config(1000);
|
2021-08-05 06:42:38 -07:00
|
|
|
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
|
2019-02-16 13:17:37 -08:00
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let keypair2 = Keypair::new();
|
|
|
|
|
2019-03-02 10:25:16 -08:00
|
|
|
let blockhash = bank.last_blockhash();
|
2019-02-16 13:17:37 -08:00
|
|
|
|
|
|
|
// ensure bank can process 2 entries that have a common account and no tick is registered
|
2019-10-23 22:01:22 -07:00
|
|
|
let tx = system_transaction::transfer(
|
2019-03-02 10:20:10 -08:00
|
|
|
&mint_keypair,
|
2019-03-09 19:28:43 -08:00
|
|
|
&keypair1.pubkey(),
|
2019-03-02 10:20:10 -08:00
|
|
|
2,
|
2019-03-02 10:25:16 -08:00
|
|
|
bank.last_blockhash(),
|
2019-03-02 10:20:10 -08:00
|
|
|
);
|
2019-03-02 10:25:16 -08:00
|
|
|
let entry_1 = next_entry(&blockhash, 1, vec![tx]);
|
2019-10-23 22:01:22 -07:00
|
|
|
let tx = system_transaction::transfer(
|
2019-03-02 10:20:10 -08:00
|
|
|
&mint_keypair,
|
2019-03-09 19:28:43 -08:00
|
|
|
&keypair2.pubkey(),
|
2019-03-02 10:20:10 -08:00
|
|
|
2,
|
2019-03-02 10:25:16 -08:00
|
|
|
bank.last_blockhash(),
|
2019-03-02 10:20:10 -08:00
|
|
|
);
|
2019-03-01 08:57:42 -08:00
|
|
|
let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]);
|
2019-11-20 15:43:10 -08:00
|
|
|
assert_eq!(
|
2021-11-17 11:53:40 -08:00
|
|
|
process_entries_for_tests(&bank, vec![entry_1, entry_2], true, None, None),
|
2019-11-20 15:43:10 -08:00
|
|
|
Ok(())
|
|
|
|
);
|
2019-02-16 13:17:37 -08:00
|
|
|
assert_eq!(bank.get_balance(&keypair1.pubkey()), 2);
|
|
|
|
assert_eq!(bank.get_balance(&keypair2.pubkey()), 2);
|
2019-03-02 10:25:16 -08:00
|
|
|
assert_eq!(bank.last_blockhash(), blockhash);
|
2019-02-16 13:17:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2019-03-12 16:46:41 -07:00
|
|
|
fn test_process_entries_2_txes_collision() {
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
2019-05-22 20:39:00 -07:00
|
|
|
mint_keypair,
|
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config(1000);
|
2021-08-05 06:42:38 -07:00
|
|
|
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
|
2019-02-16 13:17:37 -08:00
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let keypair2 = Keypair::new();
|
|
|
|
let keypair3 = Keypair::new();
|
|
|
|
|
|
|
|
// fund: put 4 in each of 1 and 2
|
2019-03-27 04:59:30 -07:00
|
|
|
assert_matches!(bank.transfer(4, &mint_keypair, &keypair1.pubkey()), Ok(_));
|
|
|
|
assert_matches!(bank.transfer(4, &mint_keypair, &keypair2.pubkey()), Ok(_));
|
2019-02-16 13:17:37 -08:00
|
|
|
|
|
|
|
// construct an Entry whose 2nd transaction would cause a lock conflict with previous entry
|
|
|
|
let entry_1_to_mint = next_entry(
|
2019-03-02 10:25:16 -08:00
|
|
|
&bank.last_blockhash(),
|
2019-02-16 13:17:37 -08:00
|
|
|
1,
|
2019-10-23 22:01:22 -07:00
|
|
|
vec![system_transaction::transfer(
|
2019-02-16 13:17:37 -08:00
|
|
|
&keypair1,
|
2019-03-09 19:28:43 -08:00
|
|
|
&mint_keypair.pubkey(),
|
2019-02-16 13:17:37 -08:00
|
|
|
1,
|
2019-03-02 10:25:16 -08:00
|
|
|
bank.last_blockhash(),
|
2019-02-16 13:17:37 -08:00
|
|
|
)],
|
|
|
|
);
|
|
|
|
|
|
|
|
let entry_2_to_3_mint_to_1 = next_entry(
|
2019-03-01 08:57:42 -08:00
|
|
|
&entry_1_to_mint.hash,
|
2019-02-16 13:17:37 -08:00
|
|
|
1,
|
|
|
|
vec![
|
2019-10-23 22:01:22 -07:00
|
|
|
system_transaction::transfer(
|
2019-03-02 10:20:10 -08:00
|
|
|
&keypair2,
|
2019-03-09 19:28:43 -08:00
|
|
|
&keypair3.pubkey(),
|
2019-03-02 10:20:10 -08:00
|
|
|
2,
|
2019-03-02 10:25:16 -08:00
|
|
|
bank.last_blockhash(),
|
2019-03-02 10:20:10 -08:00
|
|
|
), // should be fine
|
2019-10-23 22:01:22 -07:00
|
|
|
system_transaction::transfer(
|
2019-02-16 13:17:37 -08:00
|
|
|
&keypair1,
|
2019-03-09 19:28:43 -08:00
|
|
|
&mint_keypair.pubkey(),
|
2019-02-16 13:17:37 -08:00
|
|
|
2,
|
2019-03-02 10:25:16 -08:00
|
|
|
bank.last_blockhash(),
|
2019-02-16 13:17:37 -08:00
|
|
|
), // will collide
|
|
|
|
],
|
|
|
|
);
|
|
|
|
|
|
|
|
assert_eq!(
|
2021-11-17 11:53:40 -08:00
|
|
|
process_entries_for_tests(
|
2019-11-20 15:43:10 -08:00
|
|
|
&bank,
|
2021-08-17 15:17:56 -07:00
|
|
|
vec![entry_1_to_mint, entry_2_to_3_mint_to_1],
|
2019-11-20 15:43:10 -08:00
|
|
|
false,
|
2020-07-29 23:17:40 -07:00
|
|
|
None,
|
|
|
|
None,
|
2019-11-20 15:43:10 -08:00
|
|
|
),
|
2019-02-16 13:17:37 -08:00
|
|
|
Ok(())
|
|
|
|
);
|
|
|
|
|
|
|
|
assert_eq!(bank.get_balance(&keypair1.pubkey()), 1);
|
|
|
|
assert_eq!(bank.get_balance(&keypair2.pubkey()), 2);
|
|
|
|
assert_eq!(bank.get_balance(&keypair3.pubkey()), 2);
|
|
|
|
}
|
|
|
|
|
2019-04-02 03:55:42 -07:00
|
|
|
#[test]
|
|
|
|
fn test_process_entries_2_txes_collision_and_error() {
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
2019-05-22 20:39:00 -07:00
|
|
|
mint_keypair,
|
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config(1000);
|
2021-08-05 06:42:38 -07:00
|
|
|
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
|
2019-04-02 03:55:42 -07:00
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let keypair2 = Keypair::new();
|
|
|
|
let keypair3 = Keypair::new();
|
|
|
|
let keypair4 = Keypair::new();
|
|
|
|
|
|
|
|
// fund: put 4 in each of 1 and 2
|
|
|
|
assert_matches!(bank.transfer(4, &mint_keypair, &keypair1.pubkey()), Ok(_));
|
|
|
|
assert_matches!(bank.transfer(4, &mint_keypair, &keypair2.pubkey()), Ok(_));
|
|
|
|
assert_matches!(bank.transfer(4, &mint_keypair, &keypair4.pubkey()), Ok(_));
|
|
|
|
|
|
|
|
// construct an Entry whose 2nd transaction would cause a lock conflict with previous entry
|
|
|
|
let entry_1_to_mint = next_entry(
|
|
|
|
&bank.last_blockhash(),
|
|
|
|
1,
|
|
|
|
vec![
|
2019-10-23 22:01:22 -07:00
|
|
|
system_transaction::transfer(
|
2019-04-02 03:55:42 -07:00
|
|
|
&keypair1,
|
|
|
|
&mint_keypair.pubkey(),
|
|
|
|
1,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
),
|
2019-04-03 08:45:57 -07:00
|
|
|
system_transaction::transfer(
|
2019-04-02 03:55:42 -07:00
|
|
|
&keypair4,
|
|
|
|
&keypair4.pubkey(),
|
|
|
|
1,
|
|
|
|
Hash::default(), // Should cause a transaction failure with BlockhashNotFound
|
|
|
|
),
|
|
|
|
],
|
|
|
|
);
|
|
|
|
|
|
|
|
let entry_2_to_3_mint_to_1 = next_entry(
|
|
|
|
&entry_1_to_mint.hash,
|
|
|
|
1,
|
|
|
|
vec![
|
2019-10-23 22:01:22 -07:00
|
|
|
system_transaction::transfer(
|
2019-04-02 03:55:42 -07:00
|
|
|
&keypair2,
|
|
|
|
&keypair3.pubkey(),
|
|
|
|
2,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
), // should be fine
|
2019-10-23 22:01:22 -07:00
|
|
|
system_transaction::transfer(
|
2019-04-02 03:55:42 -07:00
|
|
|
&keypair1,
|
|
|
|
&mint_keypair.pubkey(),
|
|
|
|
2,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
), // will collide
|
|
|
|
],
|
|
|
|
);
|
|
|
|
|
2021-11-17 11:53:40 -08:00
|
|
|
assert!(process_entries_for_tests(
|
2019-04-02 03:55:42 -07:00
|
|
|
&bank,
|
2021-08-17 15:17:56 -07:00
|
|
|
vec![entry_1_to_mint.clone(), entry_2_to_3_mint_to_1.clone()],
|
2019-10-08 14:58:49 -07:00
|
|
|
false,
|
2019-11-20 15:43:10 -08:00
|
|
|
None,
|
2020-07-29 23:17:40 -07:00
|
|
|
None,
|
2019-04-02 03:55:42 -07:00
|
|
|
)
|
|
|
|
.is_err());
|
|
|
|
|
|
|
|
// First transaction in first entry succeeded, so keypair1 lost 1 lamport
|
|
|
|
assert_eq!(bank.get_balance(&keypair1.pubkey()), 3);
|
|
|
|
assert_eq!(bank.get_balance(&keypair2.pubkey()), 4);
|
|
|
|
|
|
|
|
// Check all accounts are unlocked
|
2021-08-17 15:17:56 -07:00
|
|
|
let txs1 = entry_1_to_mint.transactions;
|
|
|
|
let txs2 = entry_2_to_3_mint_to_1.transactions;
|
|
|
|
let batch1 = bank.prepare_entry_batch(txs1).unwrap();
|
2019-09-19 10:06:08 -07:00
|
|
|
for result in batch1.lock_results() {
|
2019-04-02 03:55:42 -07:00
|
|
|
assert!(result.is_ok());
|
|
|
|
}
|
|
|
|
// txs1 and txs2 have accounts that conflict, so we must drop txs1 first
|
2019-09-19 10:06:08 -07:00
|
|
|
drop(batch1);
|
2021-08-17 15:17:56 -07:00
|
|
|
let batch2 = bank.prepare_entry_batch(txs2).unwrap();
|
2019-09-19 10:06:08 -07:00
|
|
|
for result in batch2.lock_results() {
|
2019-04-02 03:55:42 -07:00
|
|
|
assert!(result.is_ok());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-28 11:54:49 -07:00
|
|
|
#[test]
|
|
|
|
fn test_transaction_result_does_not_affect_bankhash() {
|
|
|
|
solana_logger::setup();
|
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
|
|
|
mint_keypair,
|
|
|
|
..
|
|
|
|
} = create_genesis_config(1000);
|
|
|
|
|
|
|
|
fn get_instruction_errors() -> Vec<InstructionError> {
|
|
|
|
vec![
|
|
|
|
InstructionError::GenericError,
|
|
|
|
InstructionError::InvalidArgument,
|
|
|
|
InstructionError::InvalidInstructionData,
|
|
|
|
InstructionError::InvalidAccountData,
|
|
|
|
InstructionError::AccountDataTooSmall,
|
|
|
|
InstructionError::InsufficientFunds,
|
|
|
|
InstructionError::IncorrectProgramId,
|
|
|
|
InstructionError::MissingRequiredSignature,
|
|
|
|
InstructionError::AccountAlreadyInitialized,
|
|
|
|
InstructionError::UninitializedAccount,
|
|
|
|
InstructionError::UnbalancedInstruction,
|
|
|
|
InstructionError::ModifiedProgramId,
|
|
|
|
InstructionError::ExternalAccountLamportSpend,
|
|
|
|
InstructionError::ExternalAccountDataModified,
|
|
|
|
InstructionError::ReadonlyLamportChange,
|
|
|
|
InstructionError::ReadonlyDataModified,
|
|
|
|
InstructionError::DuplicateAccountIndex,
|
|
|
|
InstructionError::ExecutableModified,
|
|
|
|
InstructionError::RentEpochModified,
|
|
|
|
InstructionError::NotEnoughAccountKeys,
|
|
|
|
InstructionError::AccountDataSizeChanged,
|
|
|
|
InstructionError::AccountNotExecutable,
|
|
|
|
InstructionError::AccountBorrowFailed,
|
|
|
|
InstructionError::AccountBorrowOutstanding,
|
|
|
|
InstructionError::DuplicateAccountOutOfSync,
|
|
|
|
InstructionError::Custom(0),
|
|
|
|
InstructionError::InvalidError,
|
|
|
|
InstructionError::ExecutableDataModified,
|
|
|
|
InstructionError::ExecutableLamportChange,
|
|
|
|
InstructionError::ExecutableAccountNotRentExempt,
|
|
|
|
InstructionError::UnsupportedProgramId,
|
|
|
|
InstructionError::CallDepth,
|
|
|
|
InstructionError::MissingAccount,
|
|
|
|
InstructionError::ReentrancyNotAllowed,
|
|
|
|
InstructionError::MaxSeedLengthExceeded,
|
|
|
|
InstructionError::InvalidSeeds,
|
|
|
|
InstructionError::InvalidRealloc,
|
|
|
|
InstructionError::ComputationalBudgetExceeded,
|
|
|
|
InstructionError::PrivilegeEscalation,
|
|
|
|
InstructionError::ProgramEnvironmentSetupFailure,
|
|
|
|
InstructionError::ProgramFailedToComplete,
|
|
|
|
InstructionError::ProgramFailedToCompile,
|
|
|
|
InstructionError::Immutable,
|
|
|
|
InstructionError::IncorrectAuthority,
|
|
|
|
InstructionError::BorshIoError("error".to_string()),
|
|
|
|
InstructionError::AccountNotRentExempt,
|
|
|
|
InstructionError::InvalidAccountOwner,
|
|
|
|
InstructionError::ArithmeticOverflow,
|
|
|
|
InstructionError::UnsupportedSysvar,
|
|
|
|
InstructionError::IllegalOwner,
|
|
|
|
InstructionError::MaxAccountsDataAllocationsExceeded,
|
|
|
|
InstructionError::MaxAccountsExceeded,
|
|
|
|
InstructionError::MaxInstructionTraceLengthExceeded,
|
|
|
|
InstructionError::BuiltinProgramsMustConsumeComputeUnits,
|
|
|
|
]
|
|
|
|
}
|
|
|
|
|
2023-04-21 09:08:32 -07:00
|
|
|
declare_process_instruction!(mock_processor_ok, 1, |_invoke_context| {
|
|
|
|
// Always succeeds
|
2023-03-28 11:54:49 -07:00
|
|
|
Ok(())
|
2023-04-21 09:08:32 -07:00
|
|
|
});
|
2023-03-28 11:54:49 -07:00
|
|
|
|
|
|
|
let mock_program_id = solana_sdk::pubkey::new_rand();
|
|
|
|
|
|
|
|
let mut bank = Bank::new_for_tests(&genesis_config);
|
2023-05-18 13:18:28 -07:00
|
|
|
bank.add_mockup_builtin(mock_program_id, mock_processor_ok);
|
2023-03-28 11:54:49 -07:00
|
|
|
|
|
|
|
let tx = Transaction::new_signed_with_payer(
|
|
|
|
&[Instruction::new_with_bincode(
|
|
|
|
mock_program_id,
|
|
|
|
&10,
|
|
|
|
Vec::new(),
|
|
|
|
)],
|
|
|
|
Some(&mint_keypair.pubkey()),
|
|
|
|
&[&mint_keypair],
|
|
|
|
bank.last_blockhash(),
|
|
|
|
);
|
|
|
|
|
|
|
|
let entry = next_entry(&bank.last_blockhash(), 1, vec![tx]);
|
|
|
|
let bank = Arc::new(bank);
|
|
|
|
let result = process_entries_for_tests(&bank, vec![entry], false, None, None);
|
|
|
|
bank.freeze();
|
|
|
|
let blockhash_ok = bank.last_blockhash();
|
|
|
|
let bankhash_ok = bank.hash();
|
|
|
|
assert!(result.is_ok());
|
|
|
|
|
2023-04-21 09:08:32 -07:00
|
|
|
declare_process_instruction!(mock_processor_err, 1, |invoke_context| {
|
2023-03-28 11:54:49 -07:00
|
|
|
let instruction_errors = get_instruction_errors();
|
|
|
|
|
|
|
|
let err = invoke_context
|
|
|
|
.transaction_context
|
|
|
|
.get_current_instruction_context()
|
|
|
|
.expect("Failed to get instruction context")
|
|
|
|
.get_instruction_data()
|
|
|
|
.first()
|
|
|
|
.expect("Failed to get instruction data");
|
2023-04-21 09:08:32 -07:00
|
|
|
Err(instruction_errors
|
|
|
|
.get(*err as usize)
|
|
|
|
.expect("Invalid error index")
|
|
|
|
.clone())
|
|
|
|
});
|
2023-03-28 11:54:49 -07:00
|
|
|
|
|
|
|
let mut bankhash_err = None;
|
|
|
|
|
|
|
|
(0..get_instruction_errors().len()).for_each(|err| {
|
|
|
|
let mut bank = Bank::new_for_tests(&genesis_config);
|
2023-05-18 13:18:28 -07:00
|
|
|
bank.add_mockup_builtin(mock_program_id, mock_processor_err);
|
2023-03-28 11:54:49 -07:00
|
|
|
|
|
|
|
let tx = Transaction::new_signed_with_payer(
|
|
|
|
&[Instruction::new_with_bincode(
|
|
|
|
mock_program_id,
|
|
|
|
&(err as u8),
|
|
|
|
Vec::new(),
|
|
|
|
)],
|
|
|
|
Some(&mint_keypair.pubkey()),
|
|
|
|
&[&mint_keypair],
|
|
|
|
bank.last_blockhash(),
|
|
|
|
);
|
|
|
|
|
|
|
|
let entry = next_entry(&bank.last_blockhash(), 1, vec![tx]);
|
|
|
|
let bank = Arc::new(bank);
|
|
|
|
let _result = process_entries_for_tests(&bank, vec![entry], false, None, None);
|
|
|
|
bank.freeze();
|
|
|
|
|
|
|
|
assert_eq!(blockhash_ok, bank.last_blockhash());
|
|
|
|
assert!(bankhash_ok != bank.hash());
|
|
|
|
if let Some(bankhash) = bankhash_err {
|
|
|
|
assert_eq!(bankhash, bank.hash());
|
|
|
|
}
|
|
|
|
bankhash_err = Some(bank.hash());
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2019-05-23 17:35:15 -07:00
|
|
|
#[test]
|
|
|
|
fn test_process_entries_2nd_entry_collision_with_self_and_error() {
|
|
|
|
solana_logger::setup();
|
|
|
|
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
2019-05-23 17:35:15 -07:00
|
|
|
mint_keypair,
|
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config(1000);
|
2021-08-05 06:42:38 -07:00
|
|
|
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
|
2019-05-23 17:35:15 -07:00
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let keypair2 = Keypair::new();
|
|
|
|
let keypair3 = Keypair::new();
|
|
|
|
|
|
|
|
// fund: put some money in each of 1 and 2
|
|
|
|
assert_matches!(bank.transfer(5, &mint_keypair, &keypair1.pubkey()), Ok(_));
|
|
|
|
assert_matches!(bank.transfer(4, &mint_keypair, &keypair2.pubkey()), Ok(_));
|
|
|
|
|
|
|
|
// 3 entries: first has a transfer, 2nd has a conflict with 1st, 3rd has a conflict with itself
|
|
|
|
let entry_1_to_mint = next_entry(
|
|
|
|
&bank.last_blockhash(),
|
|
|
|
1,
|
|
|
|
vec![system_transaction::transfer(
|
|
|
|
&keypair1,
|
|
|
|
&mint_keypair.pubkey(),
|
|
|
|
1,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
)],
|
|
|
|
);
|
|
|
|
// should now be:
|
|
|
|
// keypair1=4
|
|
|
|
// keypair2=4
|
|
|
|
// keypair3=0
|
|
|
|
|
|
|
|
let entry_2_to_3_and_1_to_mint = next_entry(
|
|
|
|
&entry_1_to_mint.hash,
|
|
|
|
1,
|
|
|
|
vec![
|
2019-10-23 22:01:22 -07:00
|
|
|
system_transaction::transfer(
|
2019-05-23 17:35:15 -07:00
|
|
|
&keypair2,
|
|
|
|
&keypair3.pubkey(),
|
|
|
|
2,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
), // should be fine
|
|
|
|
system_transaction::transfer(
|
|
|
|
&keypair1,
|
|
|
|
&mint_keypair.pubkey(),
|
|
|
|
2,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
), // will collide with predecessor
|
|
|
|
],
|
|
|
|
);
|
|
|
|
// should now be:
|
|
|
|
// keypair1=2
|
|
|
|
// keypair2=2
|
|
|
|
// keypair3=2
|
|
|
|
|
|
|
|
let entry_conflict_itself = next_entry(
|
|
|
|
&entry_2_to_3_and_1_to_mint.hash,
|
|
|
|
1,
|
|
|
|
vec![
|
|
|
|
system_transaction::transfer(
|
|
|
|
&keypair1,
|
|
|
|
&keypair3.pubkey(),
|
|
|
|
1,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
),
|
|
|
|
system_transaction::transfer(
|
|
|
|
&keypair1,
|
|
|
|
&keypair2.pubkey(),
|
|
|
|
1,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
), // should be fine
|
|
|
|
],
|
|
|
|
);
|
|
|
|
// would now be:
|
|
|
|
// keypair1=0
|
|
|
|
// keypair2=3
|
|
|
|
// keypair3=3
|
|
|
|
|
2021-11-17 11:53:40 -08:00
|
|
|
assert!(process_entries_for_tests(
|
2019-05-23 17:35:15 -07:00
|
|
|
&bank,
|
2021-08-17 15:17:56 -07:00
|
|
|
vec![
|
2020-05-15 09:35:43 -07:00
|
|
|
entry_1_to_mint,
|
|
|
|
entry_2_to_3_and_1_to_mint,
|
|
|
|
entry_conflict_itself,
|
2019-08-28 08:38:32 -07:00
|
|
|
],
|
2019-10-08 14:58:49 -07:00
|
|
|
false,
|
2019-11-20 15:43:10 -08:00
|
|
|
None,
|
2020-07-29 23:17:40 -07:00
|
|
|
None,
|
2019-05-23 17:35:15 -07:00
|
|
|
)
|
|
|
|
.is_err());
|
|
|
|
|
|
|
|
// last entry should have been aborted before par_execute_entries
|
|
|
|
assert_eq!(bank.get_balance(&keypair1.pubkey()), 2);
|
|
|
|
assert_eq!(bank.get_balance(&keypair2.pubkey()), 2);
|
|
|
|
assert_eq!(bank.get_balance(&keypair3.pubkey()), 2);
|
|
|
|
}
|
|
|
|
|
2019-02-16 13:17:37 -08:00
|
|
|
#[test]
|
2019-03-12 16:46:41 -07:00
|
|
|
fn test_process_entries_2_entries_par() {
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
2019-05-22 20:39:00 -07:00
|
|
|
mint_keypair,
|
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config(1000);
|
2021-08-05 06:42:38 -07:00
|
|
|
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
|
2019-02-16 13:17:37 -08:00
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let keypair2 = Keypair::new();
|
|
|
|
let keypair3 = Keypair::new();
|
|
|
|
let keypair4 = Keypair::new();
|
|
|
|
|
|
|
|
//load accounts
|
2019-10-23 22:01:22 -07:00
|
|
|
let tx = system_transaction::transfer(
|
2019-03-02 10:20:10 -08:00
|
|
|
&mint_keypair,
|
2019-03-09 19:28:43 -08:00
|
|
|
&keypair1.pubkey(),
|
2019-03-02 10:20:10 -08:00
|
|
|
1,
|
2019-03-02 10:25:16 -08:00
|
|
|
bank.last_blockhash(),
|
2019-03-02 10:20:10 -08:00
|
|
|
);
|
2019-02-16 13:17:37 -08:00
|
|
|
assert_eq!(bank.process_transaction(&tx), Ok(()));
|
2019-10-23 22:01:22 -07:00
|
|
|
let tx = system_transaction::transfer(
|
2019-03-02 10:20:10 -08:00
|
|
|
&mint_keypair,
|
2019-03-09 19:28:43 -08:00
|
|
|
&keypair2.pubkey(),
|
2019-03-02 10:20:10 -08:00
|
|
|
1,
|
2019-03-02 10:25:16 -08:00
|
|
|
bank.last_blockhash(),
|
2019-03-02 10:20:10 -08:00
|
|
|
);
|
2019-02-16 13:17:37 -08:00
|
|
|
assert_eq!(bank.process_transaction(&tx), Ok(()));
|
|
|
|
|
|
|
|
// ensure bank can process 2 entries that do not have a common account and no tick is registered
|
2019-03-02 10:25:16 -08:00
|
|
|
let blockhash = bank.last_blockhash();
|
2019-10-23 22:01:22 -07:00
|
|
|
let tx =
|
|
|
|
system_transaction::transfer(&keypair1, &keypair3.pubkey(), 1, bank.last_blockhash());
|
2019-03-02 10:25:16 -08:00
|
|
|
let entry_1 = next_entry(&blockhash, 1, vec![tx]);
|
2019-10-23 22:01:22 -07:00
|
|
|
let tx =
|
|
|
|
system_transaction::transfer(&keypair2, &keypair4.pubkey(), 1, bank.last_blockhash());
|
2019-03-01 08:57:42 -08:00
|
|
|
let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]);
|
2019-11-20 15:43:10 -08:00
|
|
|
assert_eq!(
|
2021-11-17 11:53:40 -08:00
|
|
|
process_entries_for_tests(&bank, vec![entry_1, entry_2], true, None, None),
|
2019-11-20 15:43:10 -08:00
|
|
|
Ok(())
|
|
|
|
);
|
2019-02-16 13:17:37 -08:00
|
|
|
assert_eq!(bank.get_balance(&keypair3.pubkey()), 1);
|
|
|
|
assert_eq!(bank.get_balance(&keypair4.pubkey()), 1);
|
2019-03-02 10:25:16 -08:00
|
|
|
assert_eq!(bank.last_blockhash(), blockhash);
|
2019-02-16 13:17:37 -08:00
|
|
|
}
|
|
|
|
|
2019-09-17 15:11:29 -07:00
|
|
|
#[test]
|
|
|
|
fn test_process_entry_tx_random_execution_with_error() {
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
2019-09-17 15:11:29 -07:00
|
|
|
mint_keypair,
|
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config(1_000_000_000);
|
2021-08-05 06:42:38 -07:00
|
|
|
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
|
2019-09-17 15:11:29 -07:00
|
|
|
|
|
|
|
const NUM_TRANSFERS_PER_ENTRY: usize = 8;
|
|
|
|
const NUM_TRANSFERS: usize = NUM_TRANSFERS_PER_ENTRY * 32;
|
|
|
|
// large enough to scramble locks and results
|
|
|
|
|
|
|
|
let keypairs: Vec<_> = (0..NUM_TRANSFERS * 2).map(|_| Keypair::new()).collect();
|
|
|
|
|
|
|
|
// give everybody one lamport
|
|
|
|
for keypair in &keypairs {
|
|
|
|
bank.transfer(1, &mint_keypair, &keypair.pubkey())
|
|
|
|
.expect("funding failed");
|
|
|
|
}
|
|
|
|
let mut hash = bank.last_blockhash();
|
|
|
|
|
2019-11-08 02:27:35 -08:00
|
|
|
let present_account_key = Keypair::new();
|
2021-03-09 13:06:07 -08:00
|
|
|
let present_account = AccountSharedData::new(1, 10, &Pubkey::default());
|
2019-11-08 02:27:35 -08:00
|
|
|
bank.store_account(&present_account_key.pubkey(), &present_account);
|
|
|
|
|
2021-08-17 15:17:56 -07:00
|
|
|
let entries: Vec<_> = (0..NUM_TRANSFERS)
|
2019-09-17 15:11:29 -07:00
|
|
|
.step_by(NUM_TRANSFERS_PER_ENTRY)
|
|
|
|
.map(|i| {
|
|
|
|
let mut transactions = (0..NUM_TRANSFERS_PER_ENTRY)
|
|
|
|
.map(|j| {
|
|
|
|
system_transaction::transfer(
|
|
|
|
&keypairs[i + j],
|
|
|
|
&keypairs[i + j + NUM_TRANSFERS].pubkey(),
|
|
|
|
1,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
transactions.push(system_transaction::create_account(
|
|
|
|
&mint_keypair,
|
2019-11-08 02:27:35 -08:00
|
|
|
&present_account_key, // puts a TX error in results
|
2019-09-17 15:11:29 -07:00
|
|
|
bank.last_blockhash(),
|
|
|
|
1,
|
|
|
|
0,
|
2020-10-19 12:12:08 -07:00
|
|
|
&solana_sdk::pubkey::new_rand(),
|
2019-09-17 15:11:29 -07:00
|
|
|
));
|
|
|
|
|
|
|
|
next_entry_mut(&mut hash, 0, transactions)
|
|
|
|
})
|
|
|
|
.collect();
|
2021-11-17 11:53:40 -08:00
|
|
|
assert_eq!(
|
|
|
|
process_entries_for_tests(&bank, entries, true, None, None),
|
|
|
|
Ok(())
|
|
|
|
);
|
2019-09-17 15:11:29 -07:00
|
|
|
}
|
|
|
|
|
2019-08-28 08:38:32 -07:00
|
|
|
#[test]
|
|
|
|
fn test_process_entry_tx_random_execution_no_error() {
|
|
|
|
// entropy multiplier should be big enough to provide sufficient entropy
|
|
|
|
// but small enough to not take too much time while executing the test.
|
|
|
|
let entropy_multiplier: usize = 25;
|
|
|
|
let initial_lamports = 100;
|
|
|
|
|
|
|
|
// number of accounts need to be in multiple of 4 for correct
|
|
|
|
// execution of the test.
|
|
|
|
let num_accounts = entropy_multiplier * 4;
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
2019-08-28 08:38:32 -07:00
|
|
|
mint_keypair,
|
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config((num_accounts + 1) as u64 * initial_lamports);
|
2019-08-28 08:38:32 -07:00
|
|
|
|
2021-08-05 06:42:38 -07:00
|
|
|
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
|
2019-08-28 08:38:32 -07:00
|
|
|
|
|
|
|
let mut keypairs: Vec<Keypair> = vec![];
|
|
|
|
|
|
|
|
for _ in 0..num_accounts {
|
|
|
|
let keypair = Keypair::new();
|
2019-10-23 22:01:22 -07:00
|
|
|
let create_account_tx = system_transaction::transfer(
|
2019-08-28 08:38:32 -07:00
|
|
|
&mint_keypair,
|
|
|
|
&keypair.pubkey(),
|
|
|
|
0,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
);
|
|
|
|
assert_eq!(bank.process_transaction(&create_account_tx), Ok(()));
|
|
|
|
assert_matches!(
|
|
|
|
bank.transfer(initial_lamports, &mint_keypair, &keypair.pubkey()),
|
|
|
|
Ok(_)
|
|
|
|
);
|
|
|
|
keypairs.push(keypair);
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut tx_vector: Vec<Transaction> = vec![];
|
|
|
|
|
|
|
|
for i in (0..num_accounts).step_by(4) {
|
|
|
|
tx_vector.append(&mut vec![
|
|
|
|
system_transaction::transfer(
|
|
|
|
&keypairs[i + 1],
|
|
|
|
&keypairs[i].pubkey(),
|
|
|
|
initial_lamports,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
),
|
|
|
|
system_transaction::transfer(
|
|
|
|
&keypairs[i + 3],
|
|
|
|
&keypairs[i + 2].pubkey(),
|
|
|
|
initial_lamports,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
),
|
|
|
|
]);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Transfer lamports to each other
|
|
|
|
let entry = next_entry(&bank.last_blockhash(), 1, tx_vector);
|
2021-03-31 16:59:19 -07:00
|
|
|
assert_eq!(
|
2021-11-17 11:53:40 -08:00
|
|
|
process_entries_for_tests(&bank, vec![entry], true, None, None),
|
2021-03-31 16:59:19 -07:00
|
|
|
Ok(())
|
|
|
|
);
|
2019-08-28 08:38:32 -07:00
|
|
|
bank.squash();
|
|
|
|
|
|
|
|
// Even number keypair should have balance of 2 * initial_lamports and
|
|
|
|
// odd number keypair should have balance of 0, which proves
|
|
|
|
// that even in case of random order of execution, overall state remains
|
|
|
|
// consistent.
|
2020-05-15 09:35:43 -07:00
|
|
|
for (i, keypair) in keypairs.iter().enumerate() {
|
2019-08-28 08:38:32 -07:00
|
|
|
if i % 2 == 0 {
|
2020-05-15 09:35:43 -07:00
|
|
|
assert_eq!(bank.get_balance(&keypair.pubkey()), 2 * initial_lamports);
|
2019-08-28 08:38:32 -07:00
|
|
|
} else {
|
2020-05-15 09:35:43 -07:00
|
|
|
assert_eq!(bank.get_balance(&keypair.pubkey()), 0);
|
2019-08-28 08:38:32 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-16 13:17:37 -08:00
|
|
|
#[test]
|
2019-03-12 16:46:41 -07:00
|
|
|
fn test_process_entries_2_entries_tick() {
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
2019-05-22 20:39:00 -07:00
|
|
|
mint_keypair,
|
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config(1000);
|
2021-08-05 06:42:38 -07:00
|
|
|
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
|
2019-02-16 13:17:37 -08:00
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let keypair2 = Keypair::new();
|
|
|
|
let keypair3 = Keypair::new();
|
|
|
|
let keypair4 = Keypair::new();
|
|
|
|
|
|
|
|
//load accounts
|
2019-10-23 22:01:22 -07:00
|
|
|
let tx = system_transaction::transfer(
|
2019-03-02 10:20:10 -08:00
|
|
|
&mint_keypair,
|
2019-03-09 19:28:43 -08:00
|
|
|
&keypair1.pubkey(),
|
2019-03-02 10:20:10 -08:00
|
|
|
1,
|
2019-03-02 10:25:16 -08:00
|
|
|
bank.last_blockhash(),
|
2019-03-02 10:20:10 -08:00
|
|
|
);
|
2019-02-16 13:17:37 -08:00
|
|
|
assert_eq!(bank.process_transaction(&tx), Ok(()));
|
2019-10-23 22:01:22 -07:00
|
|
|
let tx = system_transaction::transfer(
|
2019-03-02 10:20:10 -08:00
|
|
|
&mint_keypair,
|
2019-03-09 19:28:43 -08:00
|
|
|
&keypair2.pubkey(),
|
2019-03-02 10:20:10 -08:00
|
|
|
1,
|
2019-03-02 10:25:16 -08:00
|
|
|
bank.last_blockhash(),
|
2019-03-02 10:20:10 -08:00
|
|
|
);
|
2019-02-16 13:17:37 -08:00
|
|
|
assert_eq!(bank.process_transaction(&tx), Ok(()));
|
|
|
|
|
2019-03-02 10:25:16 -08:00
|
|
|
let blockhash = bank.last_blockhash();
|
|
|
|
while blockhash == bank.last_blockhash() {
|
2019-03-01 14:52:27 -08:00
|
|
|
bank.register_tick(&Hash::default());
|
|
|
|
}
|
2019-02-16 13:17:37 -08:00
|
|
|
|
|
|
|
// ensure bank can process 2 entries that do not have a common account and tick is registered
|
2019-10-23 22:01:22 -07:00
|
|
|
let tx = system_transaction::transfer(&keypair2, &keypair3.pubkey(), 1, blockhash);
|
2019-03-02 10:25:16 -08:00
|
|
|
let entry_1 = next_entry(&blockhash, 1, vec![tx]);
|
2019-03-01 08:57:42 -08:00
|
|
|
let tick = next_entry(&entry_1.hash, 1, vec![]);
|
2019-10-23 22:01:22 -07:00
|
|
|
let tx =
|
|
|
|
system_transaction::transfer(&keypair1, &keypair4.pubkey(), 1, bank.last_blockhash());
|
2019-03-01 08:57:42 -08:00
|
|
|
let entry_2 = next_entry(&tick.hash, 1, vec![tx]);
|
2019-02-21 13:37:08 -08:00
|
|
|
assert_eq!(
|
2021-11-17 11:53:40 -08:00
|
|
|
process_entries_for_tests(
|
2021-03-31 16:59:19 -07:00
|
|
|
&bank,
|
2021-08-17 15:17:56 -07:00
|
|
|
vec![entry_1, tick, entry_2.clone()],
|
2021-03-31 16:59:19 -07:00
|
|
|
true,
|
|
|
|
None,
|
2023-05-10 16:20:51 -07:00
|
|
|
None,
|
2021-03-31 16:59:19 -07:00
|
|
|
),
|
2019-02-21 13:37:08 -08:00
|
|
|
Ok(())
|
|
|
|
);
|
2019-02-16 13:17:37 -08:00
|
|
|
assert_eq!(bank.get_balance(&keypair3.pubkey()), 1);
|
|
|
|
assert_eq!(bank.get_balance(&keypair4.pubkey()), 1);
|
2019-03-01 14:52:27 -08:00
|
|
|
|
2019-02-16 13:17:37 -08:00
|
|
|
// ensure that an error is returned for an empty account (keypair2)
|
2019-10-23 22:01:22 -07:00
|
|
|
let tx =
|
|
|
|
system_transaction::transfer(&keypair2, &keypair3.pubkey(), 1, bank.last_blockhash());
|
2019-03-01 08:57:42 -08:00
|
|
|
let entry_3 = next_entry(&entry_2.hash, 1, vec![tx]);
|
2019-02-21 13:37:08 -08:00
|
|
|
assert_eq!(
|
2021-11-17 11:53:40 -08:00
|
|
|
process_entries_for_tests(&bank, vec![entry_3], true, None, None),
|
2019-03-13 12:58:44 -07:00
|
|
|
Err(TransactionError::AccountNotFound)
|
2019-02-21 13:37:08 -08:00
|
|
|
);
|
2019-02-16 13:17:37 -08:00
|
|
|
}
|
2019-04-11 11:51:34 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_update_transaction_statuses() {
|
|
|
|
// Make sure instruction errors still update the signature cache
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
2019-05-22 20:39:00 -07:00
|
|
|
mint_keypair,
|
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config(11_000);
|
2021-08-05 06:42:38 -07:00
|
|
|
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
|
2020-10-19 12:12:08 -07:00
|
|
|
let pubkey = solana_sdk::pubkey::new_rand();
|
2019-04-11 11:51:34 -07:00
|
|
|
bank.transfer(1_000, &mint_keypair, &pubkey).unwrap();
|
|
|
|
assert_eq!(bank.transaction_count(), 1);
|
|
|
|
assert_eq!(bank.get_balance(&pubkey), 1_000);
|
|
|
|
assert_eq!(
|
|
|
|
bank.transfer(10_001, &mint_keypair, &pubkey),
|
|
|
|
Err(TransactionError::InstructionError(
|
|
|
|
0,
|
2020-08-06 15:04:43 -07:00
|
|
|
SystemError::ResultWithNegativeLamports.into(),
|
2019-04-11 11:51:34 -07:00
|
|
|
))
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
bank.transfer(10_001, &mint_keypair, &pubkey),
|
2021-04-12 23:28:08 -07:00
|
|
|
Err(TransactionError::AlreadyProcessed)
|
2019-04-11 11:51:34 -07:00
|
|
|
);
|
|
|
|
|
|
|
|
// Make sure other errors don't update the signature cache
|
2019-10-23 22:01:22 -07:00
|
|
|
let tx = system_transaction::transfer(&mint_keypair, &pubkey, 1000, Hash::default());
|
2019-04-11 11:51:34 -07:00
|
|
|
let signature = tx.signatures[0];
|
|
|
|
|
|
|
|
// Should fail with blockhash not found
|
|
|
|
assert_eq!(
|
|
|
|
bank.process_transaction(&tx).map(|_| signature),
|
|
|
|
Err(TransactionError::BlockhashNotFound)
|
|
|
|
);
|
|
|
|
|
|
|
|
// Should fail again with blockhash not found
|
|
|
|
assert_eq!(
|
|
|
|
bank.process_transaction(&tx).map(|_| signature),
|
|
|
|
Err(TransactionError::BlockhashNotFound)
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_update_transaction_statuses_fail() {
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
2019-05-22 20:39:00 -07:00
|
|
|
mint_keypair,
|
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config(11_000);
|
2021-08-05 06:42:38 -07:00
|
|
|
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
|
2019-04-11 11:51:34 -07:00
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let keypair2 = Keypair::new();
|
2019-10-23 22:01:22 -07:00
|
|
|
let success_tx = system_transaction::transfer(
|
2019-04-11 11:51:34 -07:00
|
|
|
&mint_keypair,
|
|
|
|
&keypair1.pubkey(),
|
|
|
|
1,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
);
|
2019-10-23 22:01:22 -07:00
|
|
|
let fail_tx = system_transaction::transfer(
|
2019-04-11 11:51:34 -07:00
|
|
|
&mint_keypair,
|
|
|
|
&keypair2.pubkey(),
|
|
|
|
2,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
);
|
|
|
|
|
|
|
|
let entry_1_to_mint = next_entry(
|
|
|
|
&bank.last_blockhash(),
|
|
|
|
1,
|
|
|
|
vec![
|
|
|
|
success_tx,
|
|
|
|
fail_tx.clone(), // will collide
|
|
|
|
],
|
|
|
|
);
|
|
|
|
|
|
|
|
assert_eq!(
|
2021-11-17 11:53:40 -08:00
|
|
|
process_entries_for_tests(&bank, vec![entry_1_to_mint], false, None, None),
|
2019-04-11 11:51:34 -07:00
|
|
|
Err(TransactionError::AccountInUse)
|
|
|
|
);
|
|
|
|
|
|
|
|
// Should not see duplicate signature error
|
|
|
|
assert_eq!(bank.process_transaction(&fail_tx), Ok(()));
|
|
|
|
}
|
2019-05-01 09:27:13 -07:00
|
|
|
|
2020-12-04 21:14:59 -08:00
|
|
|
#[test]
|
|
|
|
fn test_halt_at_slot_starting_snapshot_root() {
|
|
|
|
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123);
|
|
|
|
|
|
|
|
// Create roots at slots 0, 1
|
|
|
|
let forks = tr(0) / tr(1);
|
2021-09-10 05:33:08 -07:00
|
|
|
let ledger_path = get_tmp_ledger_path_auto_delete!();
|
|
|
|
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
|
2020-12-04 21:14:59 -08:00
|
|
|
blockstore.add_tree(
|
|
|
|
forks,
|
|
|
|
false,
|
|
|
|
true,
|
|
|
|
genesis_config.ticks_per_slot,
|
|
|
|
genesis_config.hash(),
|
|
|
|
);
|
2021-07-01 20:02:40 -07:00
|
|
|
blockstore.set_roots(vec![0, 1].iter()).unwrap();
|
2020-12-04 21:14:59 -08:00
|
|
|
|
|
|
|
// Specify halting at slot 0
|
|
|
|
let opts = ProcessOptions {
|
2023-03-22 11:03:30 -07:00
|
|
|
run_verification: true,
|
2022-04-19 15:06:30 -07:00
|
|
|
halt_at_slot: Some(0),
|
2021-06-21 08:20:27 -07:00
|
|
|
accounts_db_test_hash_calculation: true,
|
2020-12-04 21:14:59 -08:00
|
|
|
..ProcessOptions::default()
|
|
|
|
};
|
2022-09-12 11:51:12 -07:00
|
|
|
let (bank_forks, ..) =
|
|
|
|
test_process_blockstore(&genesis_config, &blockstore, &opts, &Arc::default());
|
2022-03-04 01:52:22 -08:00
|
|
|
let bank_forks = bank_forks.read().unwrap();
|
2020-12-04 21:14:59 -08:00
|
|
|
|
|
|
|
// Should be able to fetch slot 0 because we specified halting at slot 0, even
|
|
|
|
// if there is a greater root at slot 1.
|
|
|
|
assert!(bank_forks.get(0).is_some());
|
|
|
|
}
|
|
|
|
|
2019-08-13 17:20:14 -07:00
|
|
|
#[test]
|
2020-01-13 13:13:52 -08:00
|
|
|
fn test_process_blockstore_from_root() {
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
mut genesis_config, ..
|
|
|
|
} = create_genesis_config(123);
|
2019-08-13 17:20:14 -07:00
|
|
|
|
|
|
|
let ticks_per_slot = 1;
|
2019-11-08 20:56:57 -08:00
|
|
|
genesis_config.ticks_per_slot = ticks_per_slot;
|
2021-09-10 05:33:08 -07:00
|
|
|
let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
|
|
|
|
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
|
2019-08-13 17:20:14 -07:00
|
|
|
|
|
|
|
/*
|
2020-01-13 13:13:52 -08:00
|
|
|
Build a blockstore in the ledger with the following fork structure:
|
2019-08-13 17:20:14 -07:00
|
|
|
|
|
|
|
slot 0 (all ticks)
|
|
|
|
|
|
|
|
|
slot 1 (all ticks)
|
|
|
|
|
|
|
|
|
slot 2 (all ticks)
|
|
|
|
|
|
|
|
|
slot 3 (all ticks) -> root
|
|
|
|
|
|
|
|
|
slot 4 (all ticks)
|
|
|
|
|
|
|
|
|
slot 5 (all ticks) -> root
|
|
|
|
|
|
|
|
|
slot 6 (all ticks)
|
|
|
|
*/
|
|
|
|
|
|
|
|
let mut last_hash = blockhash;
|
|
|
|
for i in 0..6 {
|
|
|
|
last_hash =
|
2020-01-13 13:13:52 -08:00
|
|
|
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, i + 1, i, last_hash);
|
2019-08-13 17:20:14 -07:00
|
|
|
}
|
2021-07-01 20:02:40 -07:00
|
|
|
blockstore.set_roots(vec![3, 5].iter()).unwrap();
|
2019-08-13 17:20:14 -07:00
|
|
|
|
|
|
|
// Set up bank1
|
2022-03-03 02:46:29 -08:00
|
|
|
let mut bank_forks = BankForks::new(Bank::new_for_tests(&genesis_config));
|
2022-04-28 11:51:00 -07:00
|
|
|
let bank0 = bank_forks.get(0).unwrap();
|
2019-10-08 14:58:49 -07:00
|
|
|
let opts = ProcessOptions {
|
2023-03-22 11:03:30 -07:00
|
|
|
run_verification: true,
|
2021-06-21 08:20:27 -07:00
|
|
|
accounts_db_test_hash_calculation: true,
|
2019-10-08 14:58:49 -07:00
|
|
|
..ProcessOptions::default()
|
|
|
|
};
|
2020-01-14 17:15:26 -08:00
|
|
|
let recyclers = VerifyRecyclers::default();
|
2023-05-10 16:20:51 -07:00
|
|
|
process_bank_0(&bank0, &blockstore, &opts, &recyclers, None, None);
|
2022-03-03 02:46:29 -08:00
|
|
|
let bank1 = bank_forks.insert(Bank::new_from_parent(&bank0, &Pubkey::default(), 1));
|
2020-01-14 17:15:26 -08:00
|
|
|
confirm_full_slot(
|
|
|
|
&blockstore,
|
|
|
|
&bank1,
|
|
|
|
&opts,
|
|
|
|
&recyclers,
|
2020-03-23 12:19:11 -07:00
|
|
|
&mut ConfirmationProgress::new(bank0.last_blockhash()),
|
2020-07-14 20:14:48 -07:00
|
|
|
None,
|
2020-07-29 23:17:40 -07:00
|
|
|
None,
|
2023-05-10 16:20:51 -07:00
|
|
|
None,
|
2021-03-05 09:01:52 -08:00
|
|
|
&mut ExecuteTimings::default(),
|
2020-01-14 17:15:26 -08:00
|
|
|
)
|
|
|
|
.unwrap();
|
2022-03-03 02:46:29 -08:00
|
|
|
bank_forks.set_root(
|
|
|
|
1,
|
|
|
|
&solana_runtime::accounts_background_service::AbsRequestSender::default(),
|
|
|
|
None,
|
|
|
|
);
|
2019-08-13 17:20:14 -07:00
|
|
|
|
2022-03-14 09:16:12 -07:00
|
|
|
let leader_schedule_cache = LeaderScheduleCache::new_from_bank(&bank1);
|
|
|
|
|
2020-01-13 13:13:52 -08:00
|
|
|
// Test process_blockstore_from_root() from slot 1 onwards
|
2022-03-04 01:52:22 -08:00
|
|
|
let bank_forks = RwLock::new(bank_forks);
|
2022-03-14 09:16:12 -07:00
|
|
|
process_blockstore_from_root(
|
2021-06-14 15:46:49 -07:00
|
|
|
&blockstore,
|
2022-03-04 01:52:22 -08:00
|
|
|
&bank_forks,
|
2022-03-14 09:16:12 -07:00
|
|
|
&leader_schedule_cache,
|
2021-06-14 15:46:49 -07:00
|
|
|
&opts,
|
|
|
|
None,
|
|
|
|
None,
|
2023-05-10 16:20:51 -07:00
|
|
|
None,
|
2022-03-18 12:43:20 -07:00
|
|
|
&AbsRequestSender::default(),
|
2021-06-14 15:46:49 -07:00
|
|
|
)
|
|
|
|
.unwrap();
|
2019-08-13 17:20:14 -07:00
|
|
|
|
2022-03-04 01:52:22 -08:00
|
|
|
let bank_forks = bank_forks.read().unwrap();
|
|
|
|
|
2020-05-06 08:24:59 -07:00
|
|
|
assert_eq!(frozen_bank_slots(&bank_forks), vec![5, 6]);
|
|
|
|
assert_eq!(bank_forks.working_bank().slot(), 6);
|
2019-08-13 17:20:14 -07:00
|
|
|
assert_eq!(bank_forks.root(), 5);
|
|
|
|
|
|
|
|
// Verify the parents of the head of the fork
|
|
|
|
assert_eq!(
|
|
|
|
&bank_forks[6]
|
|
|
|
.parents()
|
|
|
|
.iter()
|
|
|
|
.map(|bank| bank.slot())
|
|
|
|
.collect::<Vec<_>>(),
|
|
|
|
&[5]
|
|
|
|
);
|
|
|
|
|
|
|
|
// Check that bank forks has the correct banks
|
2020-05-06 08:24:59 -07:00
|
|
|
verify_fork_infos(&bank_forks);
|
2019-08-13 17:20:14 -07:00
|
|
|
}
|
|
|
|
|
2019-05-01 09:27:13 -07:00
|
|
|
#[test]
|
|
|
|
#[ignore]
|
|
|
|
fn test_process_entries_stress() {
|
|
|
|
// this test throws lots of rayon threads at process_entries()
|
|
|
|
// finds bugs in very low-layer stuff
|
|
|
|
solana_logger::setup();
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
2019-05-22 20:39:00 -07:00
|
|
|
mint_keypair,
|
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config(1_000_000_000);
|
2021-08-05 06:42:38 -07:00
|
|
|
let mut bank = Arc::new(Bank::new_for_tests(&genesis_config));
|
2019-05-01 09:27:13 -07:00
|
|
|
|
2019-09-17 15:11:29 -07:00
|
|
|
const NUM_TRANSFERS_PER_ENTRY: usize = 8;
|
|
|
|
const NUM_TRANSFERS: usize = NUM_TRANSFERS_PER_ENTRY * 32;
|
|
|
|
|
2019-05-01 09:27:13 -07:00
|
|
|
let keypairs: Vec<_> = (0..NUM_TRANSFERS * 2).map(|_| Keypair::new()).collect();
|
|
|
|
|
|
|
|
// give everybody one lamport
|
|
|
|
for keypair in &keypairs {
|
|
|
|
bank.transfer(1, &mint_keypair, &keypair.pubkey())
|
|
|
|
.expect("funding failed");
|
|
|
|
}
|
|
|
|
|
2019-11-08 02:27:35 -08:00
|
|
|
let present_account_key = Keypair::new();
|
2021-03-09 13:06:07 -08:00
|
|
|
let present_account = AccountSharedData::new(1, 10, &Pubkey::default());
|
2019-11-08 02:27:35 -08:00
|
|
|
bank.store_account(&present_account_key.pubkey(), &present_account);
|
|
|
|
|
2019-05-01 09:27:13 -07:00
|
|
|
let mut i = 0;
|
|
|
|
let mut hash = bank.last_blockhash();
|
2019-07-16 22:04:40 -07:00
|
|
|
let mut root: Option<Arc<Bank>> = None;
|
2019-05-01 09:27:13 -07:00
|
|
|
loop {
|
2021-08-17 15:17:56 -07:00
|
|
|
let entries: Vec<_> = (0..NUM_TRANSFERS)
|
2019-09-17 15:11:29 -07:00
|
|
|
.step_by(NUM_TRANSFERS_PER_ENTRY)
|
2019-05-01 09:27:13 -07:00
|
|
|
.map(|i| {
|
2019-09-17 15:11:29 -07:00
|
|
|
next_entry_mut(&mut hash, 0, {
|
|
|
|
let mut transactions = (i..i + NUM_TRANSFERS_PER_ENTRY)
|
|
|
|
.map(|i| {
|
|
|
|
system_transaction::transfer(
|
|
|
|
&keypairs[i],
|
|
|
|
&keypairs[i + NUM_TRANSFERS].pubkey(),
|
|
|
|
1,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
transactions.push(system_transaction::create_account(
|
|
|
|
&mint_keypair,
|
2019-11-08 02:27:35 -08:00
|
|
|
&present_account_key, // puts a TX error in results
|
2019-05-01 09:27:13 -07:00
|
|
|
bank.last_blockhash(),
|
2019-09-17 15:11:29 -07:00
|
|
|
100,
|
|
|
|
100,
|
2020-10-19 12:12:08 -07:00
|
|
|
&solana_sdk::pubkey::new_rand(),
|
2019-09-17 15:11:29 -07:00
|
|
|
));
|
|
|
|
transactions
|
|
|
|
})
|
2019-05-01 09:27:13 -07:00
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
info!("paying iteration {}", i);
|
2021-11-17 11:53:40 -08:00
|
|
|
process_entries_for_tests(&bank, entries, true, None, None).expect("paying failed");
|
2019-05-01 09:27:13 -07:00
|
|
|
|
2021-08-17 15:17:56 -07:00
|
|
|
let entries: Vec<_> = (0..NUM_TRANSFERS)
|
2019-09-17 15:11:29 -07:00
|
|
|
.step_by(NUM_TRANSFERS_PER_ENTRY)
|
2019-05-01 09:27:13 -07:00
|
|
|
.map(|i| {
|
|
|
|
next_entry_mut(
|
|
|
|
&mut hash,
|
|
|
|
0,
|
2019-09-17 15:11:29 -07:00
|
|
|
(i..i + NUM_TRANSFERS_PER_ENTRY)
|
|
|
|
.map(|i| {
|
|
|
|
system_transaction::transfer(
|
|
|
|
&keypairs[i + NUM_TRANSFERS],
|
|
|
|
&keypairs[i].pubkey(),
|
|
|
|
1,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>(),
|
2019-05-01 09:27:13 -07:00
|
|
|
)
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
info!("refunding iteration {}", i);
|
2021-11-17 11:53:40 -08:00
|
|
|
process_entries_for_tests(&bank, entries, true, None, None).expect("refunding failed");
|
2019-05-01 09:27:13 -07:00
|
|
|
|
|
|
|
// advance to next block
|
2021-11-17 11:53:40 -08:00
|
|
|
process_entries_for_tests(
|
2019-05-01 09:27:13 -07:00
|
|
|
&bank,
|
2021-08-17 15:17:56 -07:00
|
|
|
(0..bank.ticks_per_slot())
|
2019-05-01 09:27:13 -07:00
|
|
|
.map(|_| next_entry_mut(&mut hash, 1, vec![]))
|
|
|
|
.collect::<Vec<_>>(),
|
2019-08-28 08:38:32 -07:00
|
|
|
true,
|
2019-11-20 15:43:10 -08:00
|
|
|
None,
|
2020-07-29 23:17:40 -07:00
|
|
|
None,
|
2019-05-01 09:27:13 -07:00
|
|
|
)
|
|
|
|
.expect("process ticks failed");
|
|
|
|
|
2019-07-16 22:04:40 -07:00
|
|
|
if i % 16 == 0 {
|
2020-05-15 09:35:43 -07:00
|
|
|
if let Some(old_root) = root {
|
|
|
|
old_root.squash();
|
|
|
|
}
|
2019-10-08 14:58:49 -07:00
|
|
|
root = Some(bank.clone());
|
2019-07-16 22:04:40 -07:00
|
|
|
}
|
2019-05-01 09:27:13 -07:00
|
|
|
i += 1;
|
2019-07-08 18:11:58 -07:00
|
|
|
|
2019-10-08 14:58:49 -07:00
|
|
|
bank = Arc::new(Bank::new_from_parent(
|
|
|
|
&bank,
|
2019-07-16 22:04:40 -07:00
|
|
|
&Pubkey::default(),
|
2019-10-08 14:58:49 -07:00
|
|
|
bank.slot() + thread_rng().gen_range(1, 3),
|
|
|
|
));
|
2019-05-01 09:27:13 -07:00
|
|
|
}
|
|
|
|
}
|
2019-11-08 11:29:41 -08:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_process_ledger_ticks_ordering() {
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
2019-11-08 11:29:41 -08:00
|
|
|
mint_keypair,
|
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config(100);
|
2021-08-05 06:42:38 -07:00
|
|
|
let bank0 = Arc::new(Bank::new_for_tests(&genesis_config));
|
2019-11-08 20:56:57 -08:00
|
|
|
let genesis_hash = genesis_config.hash();
|
2019-11-08 11:29:41 -08:00
|
|
|
let keypair = Keypair::new();
|
|
|
|
|
|
|
|
// Simulate a slot of virtual ticks, creates a new blockhash
|
2019-11-08 20:56:57 -08:00
|
|
|
let mut entries = create_ticks(genesis_config.ticks_per_slot, 1, genesis_hash);
|
2019-11-08 11:29:41 -08:00
|
|
|
|
|
|
|
// The new blockhash is going to be the hash of the last tick in the block
|
|
|
|
let new_blockhash = entries.last().unwrap().hash;
|
|
|
|
// Create an transaction that references the new blockhash, should still
|
|
|
|
// be able to find the blockhash if we process transactions all in the same
|
|
|
|
// batch
|
|
|
|
let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 1, new_blockhash);
|
|
|
|
let entry = next_entry(&new_blockhash, 1, vec![tx]);
|
|
|
|
entries.push(entry);
|
|
|
|
|
2021-11-17 11:53:40 -08:00
|
|
|
process_entries_for_tests(&bank0, entries, true, None, None).unwrap();
|
2019-11-08 11:29:41 -08:00
|
|
|
assert_eq!(bank0.get_balance(&keypair.pubkey()), 1)
|
|
|
|
}
|
2019-05-20 19:04:18 -07:00
|
|
|
|
|
|
|
fn get_epoch_schedule(
|
2019-11-08 20:56:57 -08:00
|
|
|
genesis_config: &GenesisConfig,
|
2019-12-05 18:41:29 -08:00
|
|
|
account_paths: Vec<PathBuf>,
|
2019-05-20 19:04:18 -07:00
|
|
|
) -> EpochSchedule {
|
2021-08-06 07:30:40 -07:00
|
|
|
let bank = Bank::new_with_paths_for_tests(
|
2021-06-18 06:34:46 -07:00
|
|
|
genesis_config,
|
2022-08-05 12:49:00 -07:00
|
|
|
Arc::<RuntimeConfig>::default(),
|
2020-12-31 18:06:03 -08:00
|
|
|
account_paths,
|
2021-05-10 07:22:48 -07:00
|
|
|
AccountSecondaryIndexes::default(),
|
2021-06-09 21:21:32 -07:00
|
|
|
AccountShrinkThreshold::default(),
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
2020-05-15 09:35:43 -07:00
|
|
|
*bank.epoch_schedule()
|
2019-05-20 19:04:18 -07:00
|
|
|
}
|
2019-08-13 17:20:14 -07:00
|
|
|
|
2020-05-06 08:24:59 -07:00
|
|
|
fn frozen_bank_slots(bank_forks: &BankForks) -> Vec<Slot> {
|
|
|
|
let mut slots: Vec<_> = bank_forks.frozen_banks().keys().cloned().collect();
|
2020-12-13 17:26:34 -08:00
|
|
|
slots.sort_unstable();
|
2020-05-06 08:24:59 -07:00
|
|
|
slots
|
|
|
|
}
|
|
|
|
|
2019-08-13 17:20:14 -07:00
|
|
|
// Check that `bank_forks` contains all the ancestors and banks for each fork identified in
|
|
|
|
// `bank_forks_info`
|
2020-05-06 08:24:59 -07:00
|
|
|
fn verify_fork_infos(bank_forks: &BankForks) {
|
|
|
|
for slot in frozen_bank_slots(bank_forks) {
|
|
|
|
let head_bank = &bank_forks[slot];
|
2019-08-13 17:20:14 -07:00
|
|
|
let mut parents = head_bank.parents();
|
|
|
|
parents.push(head_bank.clone());
|
|
|
|
|
|
|
|
// Ensure the tip of each fork and all its parents are in the given bank_forks
|
|
|
|
for parent in parents {
|
|
|
|
let parent_bank = &bank_forks[parent.slot()];
|
|
|
|
assert_eq!(parent_bank.slot(), parent.slot());
|
|
|
|
assert!(parent_bank.is_frozen());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-07-21 13:06:49 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_get_first_error() {
|
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
|
|
|
mint_keypair,
|
|
|
|
..
|
|
|
|
} = create_genesis_config(1_000_000_000);
|
2021-08-05 06:42:38 -07:00
|
|
|
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
|
2020-07-21 13:06:49 -07:00
|
|
|
|
|
|
|
let present_account_key = Keypair::new();
|
2021-03-09 13:06:07 -08:00
|
|
|
let present_account = AccountSharedData::new(1, 10, &Pubkey::default());
|
2020-07-21 13:06:49 -07:00
|
|
|
bank.store_account(&present_account_key.pubkey(), &present_account);
|
|
|
|
|
|
|
|
let keypair = Keypair::new();
|
|
|
|
|
|
|
|
// Create array of two transactions which throw different errors
|
2020-10-19 12:23:14 -07:00
|
|
|
let account_not_found_tx = system_transaction::transfer(
|
|
|
|
&keypair,
|
|
|
|
&solana_sdk::pubkey::new_rand(),
|
|
|
|
42,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
);
|
2020-07-21 13:06:49 -07:00
|
|
|
let account_not_found_sig = account_not_found_tx.signatures[0];
|
2021-07-15 20:51:27 -07:00
|
|
|
let invalid_blockhash_tx = system_transaction::transfer(
|
2020-07-21 13:06:49 -07:00
|
|
|
&mint_keypair,
|
2020-10-19 12:12:08 -07:00
|
|
|
&solana_sdk::pubkey::new_rand(),
|
2020-07-21 13:06:49 -07:00
|
|
|
42,
|
2021-07-15 20:51:27 -07:00
|
|
|
Hash::default(),
|
2020-07-21 13:06:49 -07:00
|
|
|
);
|
2021-08-17 15:17:56 -07:00
|
|
|
let txs = vec![account_not_found_tx, invalid_blockhash_tx];
|
2021-10-27 10:09:16 -07:00
|
|
|
let batch = bank.prepare_batch_for_tests(txs);
|
2020-07-21 13:06:49 -07:00
|
|
|
let (
|
|
|
|
TransactionResults {
|
|
|
|
fee_collection_results,
|
2020-08-07 11:21:35 -07:00
|
|
|
..
|
2020-07-21 13:06:49 -07:00
|
|
|
},
|
|
|
|
_balances,
|
2020-09-24 07:36:22 -07:00
|
|
|
) = batch.bank().load_execute_and_commit_transactions(
|
|
|
|
&batch,
|
|
|
|
MAX_PROCESSING_AGE,
|
|
|
|
false,
|
|
|
|
false,
|
2020-10-08 12:06:15 -07:00
|
|
|
false,
|
2022-03-22 15:17:05 -07:00
|
|
|
false,
|
2021-01-14 14:14:16 -08:00
|
|
|
&mut ExecuteTimings::default(),
|
2022-07-11 08:53:18 -07:00
|
|
|
None,
|
2020-09-24 07:36:22 -07:00
|
|
|
);
|
2020-07-21 13:06:49 -07:00
|
|
|
let (err, signature) = get_first_error(&batch, fee_collection_results).unwrap();
|
|
|
|
assert_eq!(err.unwrap_err(), TransactionError::AccountNotFound);
|
|
|
|
assert_eq!(signature, account_not_found_sig);
|
|
|
|
}
|
2020-07-29 23:17:40 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_replay_vote_sender() {
|
|
|
|
let validator_keypairs: Vec<_> =
|
|
|
|
(0..10).map(|_| ValidatorVoteKeypairs::new_rand()).collect();
|
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
|
|
|
voting_keypair: _,
|
|
|
|
..
|
|
|
|
} = create_genesis_config_with_vote_accounts(
|
|
|
|
1_000_000_000,
|
|
|
|
&validator_keypairs,
|
|
|
|
vec![100; validator_keypairs.len()],
|
|
|
|
);
|
2021-08-05 06:42:38 -07:00
|
|
|
let bank0 = Arc::new(Bank::new_for_tests(&genesis_config));
|
2020-07-29 23:17:40 -07:00
|
|
|
bank0.freeze();
|
|
|
|
|
2020-10-19 12:23:14 -07:00
|
|
|
let bank1 = Arc::new(Bank::new_from_parent(
|
|
|
|
&bank0,
|
|
|
|
&solana_sdk::pubkey::new_rand(),
|
|
|
|
1,
|
|
|
|
));
|
2020-07-29 23:17:40 -07:00
|
|
|
|
|
|
|
// The new blockhash is going to be the hash of the last tick in the block
|
|
|
|
let bank_1_blockhash = bank1.last_blockhash();
|
|
|
|
|
|
|
|
// Create an transaction that references the new blockhash, should still
|
|
|
|
// be able to find the blockhash if we process transactions all in the same
|
|
|
|
// batch
|
2023-01-18 12:13:13 -08:00
|
|
|
let mut expected_successful_voter_pubkeys = BTreeSet::new();
|
2020-07-29 23:17:40 -07:00
|
|
|
let vote_txs: Vec<_> = validator_keypairs
|
|
|
|
.iter()
|
|
|
|
.enumerate()
|
|
|
|
.map(|(i, validator_keypairs)| {
|
2023-01-18 12:13:13 -08:00
|
|
|
if i % 3 == 0 {
|
2020-07-29 23:17:40 -07:00
|
|
|
// These votes are correct
|
2023-01-18 12:13:13 -08:00
|
|
|
expected_successful_voter_pubkeys
|
|
|
|
.insert(validator_keypairs.vote_keypair.pubkey());
|
2020-07-29 23:17:40 -07:00
|
|
|
vote_transaction::new_vote_transaction(
|
|
|
|
vec![0],
|
|
|
|
bank0.hash(),
|
|
|
|
bank_1_blockhash,
|
|
|
|
&validator_keypairs.node_keypair,
|
|
|
|
&validator_keypairs.vote_keypair,
|
|
|
|
&validator_keypairs.vote_keypair,
|
|
|
|
None,
|
|
|
|
)
|
|
|
|
} else if i % 3 == 1 {
|
|
|
|
// These have the wrong authorized voter
|
|
|
|
vote_transaction::new_vote_transaction(
|
|
|
|
vec![0],
|
|
|
|
bank0.hash(),
|
|
|
|
bank_1_blockhash,
|
|
|
|
&validator_keypairs.node_keypair,
|
|
|
|
&validator_keypairs.vote_keypair,
|
|
|
|
&Keypair::new(),
|
|
|
|
None,
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
// These have an invalid vote for non-existent bank 2
|
|
|
|
vote_transaction::new_vote_transaction(
|
|
|
|
vec![bank1.slot() + 1],
|
|
|
|
bank0.hash(),
|
|
|
|
bank_1_blockhash,
|
|
|
|
&validator_keypairs.node_keypair,
|
|
|
|
&validator_keypairs.vote_keypair,
|
|
|
|
&validator_keypairs.vote_keypair,
|
|
|
|
None,
|
|
|
|
)
|
2023-01-18 12:13:13 -08:00
|
|
|
}
|
2020-07-29 23:17:40 -07:00
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
let entry = next_entry(&bank_1_blockhash, 1, vote_txs);
|
2022-04-06 03:47:19 -07:00
|
|
|
let (replay_vote_sender, replay_vote_receiver) = crossbeam_channel::unbounded();
|
2021-11-17 11:53:40 -08:00
|
|
|
let _ =
|
|
|
|
process_entries_for_tests(&bank1, vec![entry], true, None, Some(&replay_vote_sender));
|
2023-01-18 12:13:13 -08:00
|
|
|
let successes: BTreeSet<Pubkey> = replay_vote_receiver
|
2020-07-29 23:17:40 -07:00
|
|
|
.try_iter()
|
2023-01-18 12:13:13 -08:00
|
|
|
.map(|(vote_pubkey, ..)| vote_pubkey)
|
2020-07-29 23:17:40 -07:00
|
|
|
.collect();
|
2023-01-18 12:13:13 -08:00
|
|
|
assert_eq!(successes, expected_successful_voter_pubkeys);
|
2020-07-29 23:17:40 -07:00
|
|
|
}
|
2020-08-20 21:56:25 -07:00
|
|
|
|
|
|
|
fn make_slot_with_vote_tx(
|
|
|
|
blockstore: &Blockstore,
|
|
|
|
ticks_per_slot: u64,
|
|
|
|
tx_landed_slot: Slot,
|
|
|
|
parent_slot: Slot,
|
|
|
|
parent_blockhash: &Hash,
|
|
|
|
vote_tx: Transaction,
|
|
|
|
slot_leader_keypair: &Arc<Keypair>,
|
|
|
|
) {
|
|
|
|
// Add votes to `last_slot` so that `root` will be confirmed
|
2021-06-18 06:34:46 -07:00
|
|
|
let vote_entry = next_entry(parent_blockhash, 1, vec![vote_tx]);
|
2020-08-20 21:56:25 -07:00
|
|
|
let mut entries = create_ticks(ticks_per_slot, 0, vote_entry.hash);
|
|
|
|
entries.insert(0, vote_entry);
|
|
|
|
blockstore
|
|
|
|
.write_entries(
|
|
|
|
tx_landed_slot,
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
ticks_per_slot,
|
|
|
|
Some(parent_slot),
|
|
|
|
true,
|
2021-06-18 06:34:46 -07:00
|
|
|
slot_leader_keypair,
|
2020-08-20 21:56:25 -07:00
|
|
|
entries,
|
|
|
|
0,
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
}
|
|
|
|
|
2022-04-29 18:05:39 -07:00
|
|
|
fn run_test_process_blockstore_with_supermajority_root(
|
|
|
|
blockstore_root: Option<Slot>,
|
|
|
|
blockstore_access_type: AccessType,
|
|
|
|
) {
|
2020-08-20 21:56:25 -07:00
|
|
|
solana_logger::setup();
|
|
|
|
/*
|
|
|
|
Build fork structure:
|
|
|
|
slot 0
|
|
|
|
|
|
|
|
|
slot 1 <- (blockstore root)
|
|
|
|
/ \
|
|
|
|
slot 2 |
|
|
|
|
| |
|
|
|
|
slot 4 |
|
|
|
|
slot 5
|
|
|
|
|
|
|
|
|
`expected_root_slot`
|
|
|
|
/ \
|
|
|
|
... minor fork
|
|
|
|
/
|
|
|
|
`last_slot`
|
2021-01-13 07:47:06 -08:00
|
|
|
|
|
|
|
|
`really_last_slot`
|
2020-08-20 21:56:25 -07:00
|
|
|
*/
|
|
|
|
let starting_fork_slot = 5;
|
|
|
|
let mut main_fork = tr(starting_fork_slot);
|
2021-06-17 15:45:09 -07:00
|
|
|
let mut main_fork_ref = main_fork.root_mut().get_mut();
|
2020-08-20 21:56:25 -07:00
|
|
|
|
|
|
|
// Make enough slots to make a root slot > blockstore_root
|
|
|
|
let expected_root_slot = starting_fork_slot + blockstore_root.unwrap_or(0);
|
2021-01-13 07:47:06 -08:00
|
|
|
let really_expected_root_slot = expected_root_slot + 1;
|
2020-08-20 21:56:25 -07:00
|
|
|
let last_main_fork_slot = expected_root_slot + MAX_LOCKOUT_HISTORY as u64 + 1;
|
2021-01-13 07:47:06 -08:00
|
|
|
let really_last_main_fork_slot = last_main_fork_slot + 1;
|
2020-08-20 21:56:25 -07:00
|
|
|
|
|
|
|
// Make `minor_fork`
|
2021-01-13 07:47:06 -08:00
|
|
|
let last_minor_fork_slot = really_last_main_fork_slot + 1;
|
2020-08-20 21:56:25 -07:00
|
|
|
let minor_fork = tr(last_minor_fork_slot);
|
|
|
|
|
|
|
|
// Make 'main_fork`
|
|
|
|
for slot in starting_fork_slot + 1..last_main_fork_slot {
|
|
|
|
if slot - 1 == expected_root_slot {
|
|
|
|
main_fork_ref.push_front(minor_fork.clone());
|
|
|
|
}
|
|
|
|
main_fork_ref.push_front(tr(slot));
|
2021-06-17 15:45:09 -07:00
|
|
|
main_fork_ref = main_fork_ref.front_mut().unwrap().get_mut();
|
2020-08-20 21:56:25 -07:00
|
|
|
}
|
|
|
|
let forks = tr(0) / (tr(1) / (tr(2) / (tr(4))) / main_fork);
|
|
|
|
let validator_keypairs = ValidatorVoteKeypairs::new_rand();
|
|
|
|
let GenesisConfigInfo { genesis_config, .. } =
|
|
|
|
genesis_utils::create_genesis_config_with_vote_accounts(
|
|
|
|
10_000,
|
|
|
|
&[&validator_keypairs],
|
|
|
|
vec![100],
|
|
|
|
);
|
|
|
|
let ticks_per_slot = genesis_config.ticks_per_slot();
|
2021-09-10 05:33:08 -07:00
|
|
|
let ledger_path = get_tmp_ledger_path_auto_delete!();
|
|
|
|
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
|
2020-08-20 21:56:25 -07:00
|
|
|
blockstore.add_tree(forks, false, true, ticks_per_slot, genesis_config.hash());
|
|
|
|
|
|
|
|
if let Some(blockstore_root) = blockstore_root {
|
2021-07-01 20:02:40 -07:00
|
|
|
blockstore
|
|
|
|
.set_roots(std::iter::once(&blockstore_root))
|
|
|
|
.unwrap();
|
2020-08-20 21:56:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
let opts = ProcessOptions {
|
2023-03-22 11:03:30 -07:00
|
|
|
run_verification: true,
|
2021-06-21 08:20:27 -07:00
|
|
|
accounts_db_test_hash_calculation: true,
|
2020-08-20 21:56:25 -07:00
|
|
|
..ProcessOptions::default()
|
|
|
|
};
|
2022-04-29 18:05:39 -07:00
|
|
|
|
|
|
|
let (bank_forks, ..) = test_process_blockstore_with_custom_options(
|
|
|
|
&genesis_config,
|
|
|
|
&blockstore,
|
2022-05-02 14:58:00 -07:00
|
|
|
&opts,
|
2022-04-29 18:05:39 -07:00
|
|
|
blockstore_access_type.clone(),
|
|
|
|
);
|
2022-03-04 01:52:22 -08:00
|
|
|
let bank_forks = bank_forks.read().unwrap();
|
2020-08-20 21:56:25 -07:00
|
|
|
|
2021-01-13 07:47:06 -08:00
|
|
|
// prepare to add votes
|
2020-08-20 21:56:25 -07:00
|
|
|
let last_vote_bank_hash = bank_forks.get(last_main_fork_slot - 1).unwrap().hash();
|
|
|
|
let last_vote_blockhash = bank_forks
|
|
|
|
.get(last_main_fork_slot - 1)
|
|
|
|
.unwrap()
|
|
|
|
.last_blockhash();
|
|
|
|
let slots: Vec<_> = (expected_root_slot..last_main_fork_slot).collect();
|
|
|
|
let vote_tx = vote_transaction::new_vote_transaction(
|
|
|
|
slots,
|
|
|
|
last_vote_bank_hash,
|
|
|
|
last_vote_blockhash,
|
|
|
|
&validator_keypairs.node_keypair,
|
|
|
|
&validator_keypairs.vote_keypair,
|
|
|
|
&validator_keypairs.vote_keypair,
|
|
|
|
None,
|
|
|
|
);
|
|
|
|
|
|
|
|
// Add votes to `last_slot` so that `root` will be confirmed
|
|
|
|
let leader_keypair = Arc::new(validator_keypairs.node_keypair);
|
|
|
|
make_slot_with_vote_tx(
|
|
|
|
&blockstore,
|
|
|
|
ticks_per_slot,
|
|
|
|
last_main_fork_slot,
|
|
|
|
last_main_fork_slot - 1,
|
|
|
|
&last_vote_blockhash,
|
|
|
|
vote_tx,
|
|
|
|
&leader_keypair,
|
|
|
|
);
|
|
|
|
|
2022-04-29 18:05:39 -07:00
|
|
|
let (bank_forks, ..) = test_process_blockstore_with_custom_options(
|
|
|
|
&genesis_config,
|
|
|
|
&blockstore,
|
2022-05-02 14:58:00 -07:00
|
|
|
&opts,
|
2022-04-29 18:05:39 -07:00
|
|
|
blockstore_access_type.clone(),
|
|
|
|
);
|
2022-03-04 01:52:22 -08:00
|
|
|
let bank_forks = bank_forks.read().unwrap();
|
2020-08-20 21:56:25 -07:00
|
|
|
|
|
|
|
assert_eq!(bank_forks.root(), expected_root_slot);
|
|
|
|
assert_eq!(
|
|
|
|
bank_forks.frozen_banks().len() as u64,
|
2021-01-13 07:47:06 -08:00
|
|
|
last_minor_fork_slot - really_expected_root_slot + 1
|
2020-08-20 21:56:25 -07:00
|
|
|
);
|
|
|
|
|
|
|
|
// Minor fork at `last_main_fork_slot + 1` was above the `expected_root_slot`
|
|
|
|
// so should not have been purged
|
|
|
|
//
|
|
|
|
// Fork at slot 2 was purged because it was below the `expected_root_slot`
|
|
|
|
for slot in 0..=last_minor_fork_slot {
|
2021-01-13 07:47:06 -08:00
|
|
|
// this slot will be created below
|
|
|
|
if slot == really_last_main_fork_slot {
|
|
|
|
continue;
|
|
|
|
}
|
2020-08-20 21:56:25 -07:00
|
|
|
if slot >= expected_root_slot {
|
|
|
|
let bank = bank_forks.get(slot).unwrap();
|
|
|
|
assert_eq!(bank.slot(), slot);
|
|
|
|
assert!(bank.is_frozen());
|
|
|
|
} else {
|
|
|
|
assert!(bank_forks.get(slot).is_none());
|
|
|
|
}
|
|
|
|
}
|
2021-01-13 07:47:06 -08:00
|
|
|
|
|
|
|
// really prepare to add votes
|
|
|
|
let last_vote_bank_hash = bank_forks.get(last_main_fork_slot).unwrap().hash();
|
|
|
|
let last_vote_blockhash = bank_forks
|
|
|
|
.get(last_main_fork_slot)
|
|
|
|
.unwrap()
|
|
|
|
.last_blockhash();
|
|
|
|
let slots: Vec<_> = vec![last_main_fork_slot];
|
|
|
|
let vote_tx = vote_transaction::new_vote_transaction(
|
|
|
|
slots,
|
|
|
|
last_vote_bank_hash,
|
|
|
|
last_vote_blockhash,
|
|
|
|
&leader_keypair,
|
|
|
|
&validator_keypairs.vote_keypair,
|
|
|
|
&validator_keypairs.vote_keypair,
|
|
|
|
None,
|
|
|
|
);
|
|
|
|
|
|
|
|
// Add votes to `really_last_slot` so that `root` will be confirmed again
|
|
|
|
make_slot_with_vote_tx(
|
|
|
|
&blockstore,
|
|
|
|
ticks_per_slot,
|
|
|
|
really_last_main_fork_slot,
|
|
|
|
last_main_fork_slot,
|
|
|
|
&last_vote_blockhash,
|
|
|
|
vote_tx,
|
|
|
|
&leader_keypair,
|
|
|
|
);
|
|
|
|
|
2022-04-29 18:05:39 -07:00
|
|
|
let (bank_forks, ..) = test_process_blockstore_with_custom_options(
|
|
|
|
&genesis_config,
|
|
|
|
&blockstore,
|
2022-05-02 14:58:00 -07:00
|
|
|
&opts,
|
2022-04-29 18:05:39 -07:00
|
|
|
blockstore_access_type,
|
|
|
|
);
|
2022-03-04 01:52:22 -08:00
|
|
|
let bank_forks = bank_forks.read().unwrap();
|
2021-01-13 07:47:06 -08:00
|
|
|
|
|
|
|
assert_eq!(bank_forks.root(), really_expected_root_slot);
|
2020-08-20 21:56:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2021-01-13 07:47:06 -08:00
|
|
|
fn test_process_blockstore_with_supermajority_root_without_blockstore_root() {
|
2022-04-29 18:05:39 -07:00
|
|
|
run_test_process_blockstore_with_supermajority_root(None, AccessType::Primary);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_process_blockstore_with_supermajority_root_without_blockstore_root_secondary_access() {
|
|
|
|
run_test_process_blockstore_with_supermajority_root(None, AccessType::Secondary);
|
2021-01-13 07:47:06 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_process_blockstore_with_supermajority_root_with_blockstore_root() {
|
2022-04-29 18:05:39 -07:00
|
|
|
run_test_process_blockstore_with_supermajority_root(Some(1), AccessType::Primary)
|
2020-08-20 21:56:25 -07:00
|
|
|
}
|
2020-08-25 09:49:15 -07:00
|
|
|
|
2020-09-17 21:33:08 -07:00
|
|
|
#[test]
|
2020-12-13 17:26:34 -08:00
|
|
|
#[allow(clippy::field_reassign_with_default)]
|
2020-09-17 21:33:08 -07:00
|
|
|
fn test_supermajority_root_from_vote_accounts() {
|
2022-03-24 10:09:48 -07:00
|
|
|
let convert_to_vote_accounts = |roots_stakes: Vec<(Slot, u64)>| -> VoteAccountsHashMap {
|
|
|
|
roots_stakes
|
|
|
|
.into_iter()
|
|
|
|
.map(|(root, stake)| {
|
|
|
|
let mut vote_state = VoteState::default();
|
|
|
|
vote_state.root_slot = Some(root);
|
|
|
|
let mut vote_account =
|
|
|
|
AccountSharedData::new(1, VoteState::size_of(), &solana_vote_program::id());
|
|
|
|
let versioned = VoteStateVersions::new_current(vote_state);
|
|
|
|
VoteState::serialize(&versioned, vote_account.data_as_mut_slice()).unwrap();
|
|
|
|
(
|
|
|
|
solana_sdk::pubkey::new_rand(),
|
2022-05-06 09:22:49 -07:00
|
|
|
(stake, VoteAccount::try_from(vote_account).unwrap()),
|
2022-03-24 10:09:48 -07:00
|
|
|
)
|
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
};
|
2020-09-17 21:33:08 -07:00
|
|
|
|
|
|
|
let total_stake = 10;
|
|
|
|
let slot = 100;
|
|
|
|
|
|
|
|
// Supermajority root should be None
|
|
|
|
assert!(
|
2021-08-30 08:54:01 -07:00
|
|
|
supermajority_root_from_vote_accounts(slot, total_stake, &HashMap::default()).is_none()
|
2020-09-17 21:33:08 -07:00
|
|
|
);
|
|
|
|
|
|
|
|
// Supermajority root should be None
|
|
|
|
let roots_stakes = vec![(8, 1), (3, 1), (4, 1), (8, 1)];
|
|
|
|
let accounts = convert_to_vote_accounts(roots_stakes);
|
2021-08-30 08:54:01 -07:00
|
|
|
assert!(supermajority_root_from_vote_accounts(slot, total_stake, &accounts).is_none());
|
2020-09-17 21:33:08 -07:00
|
|
|
|
|
|
|
// Supermajority root should be 4, has 7/10 of the stake
|
|
|
|
let roots_stakes = vec![(8, 1), (3, 1), (4, 1), (8, 5)];
|
|
|
|
let accounts = convert_to_vote_accounts(roots_stakes);
|
|
|
|
assert_eq!(
|
2021-08-30 08:54:01 -07:00
|
|
|
supermajority_root_from_vote_accounts(slot, total_stake, &accounts).unwrap(),
|
2020-09-17 21:33:08 -07:00
|
|
|
4
|
|
|
|
);
|
|
|
|
|
|
|
|
// Supermajority root should be 8, it has 7/10 of the stake
|
|
|
|
let roots_stakes = vec![(8, 1), (3, 1), (4, 1), (8, 6)];
|
|
|
|
let accounts = convert_to_vote_accounts(roots_stakes);
|
|
|
|
assert_eq!(
|
2021-08-30 08:54:01 -07:00
|
|
|
supermajority_root_from_vote_accounts(slot, total_stake, &accounts).unwrap(),
|
2020-09-17 21:33:08 -07:00
|
|
|
8
|
|
|
|
);
|
|
|
|
}
|
2022-04-15 09:30:20 -07:00
|
|
|
|
|
|
|
fn confirm_slot_entries_for_tests(
|
|
|
|
bank: &Arc<Bank>,
|
|
|
|
slot_entries: Vec<Entry>,
|
|
|
|
slot_full: bool,
|
|
|
|
prev_entry_hash: Hash,
|
|
|
|
) -> result::Result<(), BlockstoreProcessorError> {
|
|
|
|
confirm_slot_entries(
|
|
|
|
bank,
|
|
|
|
(slot_entries, 0, slot_full),
|
|
|
|
&mut ConfirmationTiming::default(),
|
|
|
|
&mut ConfirmationProgress::new(prev_entry_hash),
|
|
|
|
false,
|
|
|
|
None,
|
|
|
|
None,
|
2023-05-10 16:20:51 -07:00
|
|
|
None,
|
2022-04-15 09:30:20 -07:00
|
|
|
&VerifyRecyclers::default(),
|
2022-07-11 08:53:18 -07:00
|
|
|
None,
|
2022-08-31 06:00:55 -07:00
|
|
|
&PrioritizationFeeCache::new(0u64),
|
2022-04-15 09:30:20 -07:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2022-04-21 06:05:29 -07:00
|
|
|
fn test_confirm_slot_entries_without_fix() {
|
2022-04-15 09:30:20 -07:00
|
|
|
const HASHES_PER_TICK: u64 = 10;
|
|
|
|
const TICKS_PER_SLOT: u64 = 2;
|
|
|
|
|
|
|
|
let collector_id = Pubkey::new_unique();
|
|
|
|
|
|
|
|
let GenesisConfigInfo {
|
|
|
|
mut genesis_config,
|
|
|
|
mint_keypair,
|
|
|
|
..
|
|
|
|
} = create_genesis_config(10_000);
|
|
|
|
genesis_config.poh_config.hashes_per_tick = Some(HASHES_PER_TICK);
|
|
|
|
genesis_config.ticks_per_slot = TICKS_PER_SLOT;
|
|
|
|
let genesis_hash = genesis_config.hash();
|
|
|
|
|
2022-04-21 06:05:29 -07:00
|
|
|
let mut slot_0_bank = Bank::new_for_tests(&genesis_config);
|
|
|
|
slot_0_bank.deactivate_feature(&feature_set::fix_recent_blockhashes::id());
|
|
|
|
let slot_0_bank = Arc::new(slot_0_bank);
|
2022-04-15 09:30:20 -07:00
|
|
|
assert_eq!(slot_0_bank.slot(), 0);
|
|
|
|
assert_eq!(slot_0_bank.tick_height(), 0);
|
|
|
|
assert_eq!(slot_0_bank.max_tick_height(), 2);
|
|
|
|
assert_eq!(slot_0_bank.last_blockhash(), genesis_hash);
|
|
|
|
assert_eq!(slot_0_bank.get_hash_age(&genesis_hash), Some(0));
|
|
|
|
|
|
|
|
let slot_0_entries = entry::create_ticks(TICKS_PER_SLOT, HASHES_PER_TICK, genesis_hash);
|
|
|
|
let slot_0_hash = slot_0_entries.last().unwrap().hash;
|
|
|
|
confirm_slot_entries_for_tests(&slot_0_bank, slot_0_entries, true, genesis_hash).unwrap();
|
|
|
|
assert_eq!(slot_0_bank.tick_height(), slot_0_bank.max_tick_height());
|
|
|
|
assert_eq!(slot_0_bank.last_blockhash(), slot_0_hash);
|
|
|
|
assert_eq!(slot_0_bank.get_hash_age(&genesis_hash), Some(1));
|
|
|
|
assert_eq!(slot_0_bank.get_hash_age(&slot_0_hash), Some(0));
|
|
|
|
|
|
|
|
let slot_2_bank = Arc::new(Bank::new_from_parent(&slot_0_bank, &collector_id, 2));
|
|
|
|
assert_eq!(slot_2_bank.slot(), 2);
|
|
|
|
assert_eq!(slot_2_bank.tick_height(), 2);
|
|
|
|
assert_eq!(slot_2_bank.max_tick_height(), 6);
|
|
|
|
assert_eq!(slot_2_bank.last_blockhash(), slot_0_hash);
|
|
|
|
|
|
|
|
let slot_1_entries = entry::create_ticks(TICKS_PER_SLOT, HASHES_PER_TICK, slot_0_hash);
|
|
|
|
let slot_1_hash = slot_1_entries.last().unwrap().hash;
|
|
|
|
confirm_slot_entries_for_tests(&slot_2_bank, slot_1_entries, false, slot_0_hash).unwrap();
|
|
|
|
assert_eq!(slot_2_bank.tick_height(), 4);
|
|
|
|
assert_eq!(slot_2_bank.last_blockhash(), slot_1_hash);
|
|
|
|
assert_eq!(slot_2_bank.get_hash_age(&genesis_hash), Some(2));
|
|
|
|
assert_eq!(slot_2_bank.get_hash_age(&slot_0_hash), Some(1));
|
|
|
|
assert_eq!(slot_2_bank.get_hash_age(&slot_1_hash), Some(0));
|
|
|
|
|
|
|
|
// Check that slot 2 transactions can use any previous slot hash, including the
|
|
|
|
// hash for slot 1 which is just ticks.
|
|
|
|
let slot_2_entries = {
|
|
|
|
let to_pubkey = Pubkey::new_unique();
|
|
|
|
let mut prev_entry_hash = slot_1_hash;
|
|
|
|
let mut remaining_entry_hashes = HASHES_PER_TICK;
|
|
|
|
let mut entries: Vec<Entry> = [genesis_hash, slot_0_hash, slot_1_hash]
|
|
|
|
.into_iter()
|
|
|
|
.map(|recent_hash| {
|
|
|
|
let tx =
|
|
|
|
system_transaction::transfer(&mint_keypair, &to_pubkey, 1, recent_hash);
|
|
|
|
remaining_entry_hashes = remaining_entry_hashes.checked_sub(1).unwrap();
|
|
|
|
next_entry_mut(&mut prev_entry_hash, 1, vec![tx])
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
entries.push(next_entry_mut(
|
|
|
|
&mut prev_entry_hash,
|
|
|
|
remaining_entry_hashes,
|
|
|
|
vec![],
|
|
|
|
));
|
|
|
|
entries.push(next_entry_mut(
|
|
|
|
&mut prev_entry_hash,
|
|
|
|
HASHES_PER_TICK,
|
|
|
|
vec![],
|
|
|
|
));
|
|
|
|
entries
|
|
|
|
};
|
|
|
|
let slot_2_hash = slot_2_entries.last().unwrap().hash;
|
|
|
|
confirm_slot_entries_for_tests(&slot_2_bank, slot_2_entries, true, slot_1_hash).unwrap();
|
|
|
|
assert_eq!(slot_2_bank.tick_height(), slot_2_bank.max_tick_height());
|
|
|
|
assert_eq!(slot_2_bank.last_blockhash(), slot_2_hash);
|
|
|
|
assert_eq!(slot_2_bank.get_hash_age(&genesis_hash), Some(3));
|
|
|
|
assert_eq!(slot_2_bank.get_hash_age(&slot_0_hash), Some(2));
|
|
|
|
assert_eq!(slot_2_bank.get_hash_age(&slot_1_hash), Some(1));
|
|
|
|
assert_eq!(slot_2_bank.get_hash_age(&slot_2_hash), Some(0));
|
|
|
|
}
|
2022-04-21 06:05:29 -07:00
|
|
|
|
2022-06-23 12:37:38 -07:00
|
|
|
#[test]
|
|
|
|
fn test_confirm_slot_entries_progress_num_txs_indexes() {
|
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
|
|
|
mint_keypair,
|
|
|
|
..
|
|
|
|
} = create_genesis_config(100 * LAMPORTS_PER_SOL);
|
|
|
|
let genesis_hash = genesis_config.hash();
|
|
|
|
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
|
|
|
|
let mut timing = ConfirmationTiming::default();
|
|
|
|
let mut progress = ConfirmationProgress::new(genesis_hash);
|
|
|
|
let amount = genesis_config.rent.minimum_balance(0);
|
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let keypair2 = Keypair::new();
|
|
|
|
let keypair3 = Keypair::new();
|
|
|
|
let keypair4 = Keypair::new();
|
|
|
|
bank.transfer(LAMPORTS_PER_SOL, &mint_keypair, &keypair1.pubkey())
|
|
|
|
.unwrap();
|
|
|
|
bank.transfer(LAMPORTS_PER_SOL, &mint_keypair, &keypair2.pubkey())
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let (transaction_status_sender, transaction_status_receiver) =
|
|
|
|
crossbeam_channel::unbounded();
|
|
|
|
let transaction_status_sender = TransactionStatusSender {
|
|
|
|
sender: transaction_status_sender,
|
|
|
|
};
|
|
|
|
|
|
|
|
let blockhash = bank.last_blockhash();
|
|
|
|
let tx1 = system_transaction::transfer(
|
|
|
|
&keypair1,
|
|
|
|
&keypair3.pubkey(),
|
|
|
|
amount,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
);
|
|
|
|
let tx2 = system_transaction::transfer(
|
|
|
|
&keypair2,
|
|
|
|
&keypair4.pubkey(),
|
|
|
|
amount,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
);
|
|
|
|
let entry = next_entry(&blockhash, 1, vec![tx1, tx2]);
|
|
|
|
let new_hash = entry.hash;
|
|
|
|
|
|
|
|
confirm_slot_entries(
|
|
|
|
&bank,
|
|
|
|
(vec![entry], 0, false),
|
|
|
|
&mut timing,
|
|
|
|
&mut progress,
|
|
|
|
false,
|
|
|
|
Some(&transaction_status_sender),
|
|
|
|
None,
|
2023-05-10 16:20:51 -07:00
|
|
|
None,
|
2022-06-23 12:37:38 -07:00
|
|
|
&VerifyRecyclers::default(),
|
2022-07-11 08:53:18 -07:00
|
|
|
None,
|
2022-08-31 06:00:55 -07:00
|
|
|
&PrioritizationFeeCache::new(0u64),
|
2022-06-23 12:37:38 -07:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
assert_eq!(progress.num_txs, 2);
|
|
|
|
let batch = transaction_status_receiver.recv().unwrap();
|
|
|
|
if let TransactionStatusMessage::Batch(batch) = batch {
|
|
|
|
assert_eq!(batch.transactions.len(), 2);
|
|
|
|
assert_eq!(batch.transaction_indexes.len(), 2);
|
|
|
|
// Assert contains instead of the actual vec due to randomize
|
|
|
|
assert!(batch.transaction_indexes.contains(&0));
|
|
|
|
assert!(batch.transaction_indexes.contains(&1));
|
|
|
|
} else {
|
|
|
|
panic!("batch should have been sent");
|
|
|
|
}
|
|
|
|
|
|
|
|
let tx1 = system_transaction::transfer(
|
|
|
|
&keypair1,
|
|
|
|
&keypair3.pubkey(),
|
|
|
|
amount + 1,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
);
|
|
|
|
let tx2 = system_transaction::transfer(
|
|
|
|
&keypair2,
|
|
|
|
&keypair4.pubkey(),
|
|
|
|
amount + 1,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
);
|
|
|
|
let tx3 = system_transaction::transfer(
|
|
|
|
&mint_keypair,
|
|
|
|
&Pubkey::new_unique(),
|
|
|
|
amount,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
);
|
|
|
|
let entry = next_entry(&new_hash, 1, vec![tx1, tx2, tx3]);
|
|
|
|
|
|
|
|
confirm_slot_entries(
|
|
|
|
&bank,
|
|
|
|
(vec![entry], 0, false),
|
|
|
|
&mut timing,
|
|
|
|
&mut progress,
|
|
|
|
false,
|
|
|
|
Some(&transaction_status_sender),
|
|
|
|
None,
|
2023-05-10 16:20:51 -07:00
|
|
|
None,
|
2022-06-23 12:37:38 -07:00
|
|
|
&VerifyRecyclers::default(),
|
2022-07-11 08:53:18 -07:00
|
|
|
None,
|
2022-08-31 06:00:55 -07:00
|
|
|
&PrioritizationFeeCache::new(0u64),
|
2022-06-23 12:37:38 -07:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
assert_eq!(progress.num_txs, 5);
|
|
|
|
let batch = transaction_status_receiver.recv().unwrap();
|
|
|
|
if let TransactionStatusMessage::Batch(batch) = batch {
|
|
|
|
assert_eq!(batch.transactions.len(), 3);
|
|
|
|
assert_eq!(batch.transaction_indexes.len(), 3);
|
|
|
|
// Assert contains instead of the actual vec due to randomize
|
|
|
|
assert!(batch.transaction_indexes.contains(&2));
|
|
|
|
assert!(batch.transaction_indexes.contains(&3));
|
|
|
|
assert!(batch.transaction_indexes.contains(&4));
|
|
|
|
} else {
|
|
|
|
panic!("batch should have been sent");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-21 06:05:29 -07:00
|
|
|
#[test]
|
2022-05-10 13:39:08 -07:00
|
|
|
fn test_rebatch_transactions() {
|
|
|
|
let dummy_leader_pubkey = solana_sdk::pubkey::new_rand();
|
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
|
|
|
mint_keypair,
|
|
|
|
..
|
|
|
|
} = create_genesis_config_with_leader(500, &dummy_leader_pubkey, 100);
|
|
|
|
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
|
|
|
|
|
|
|
|
let pubkey = solana_sdk::pubkey::new_rand();
|
|
|
|
let keypair2 = Keypair::new();
|
|
|
|
let pubkey2 = solana_sdk::pubkey::new_rand();
|
2022-06-23 12:37:38 -07:00
|
|
|
let keypair3 = Keypair::new();
|
|
|
|
let pubkey3 = solana_sdk::pubkey::new_rand();
|
2022-05-10 13:39:08 -07:00
|
|
|
|
|
|
|
let txs = vec![
|
|
|
|
SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer(
|
|
|
|
&mint_keypair,
|
|
|
|
&pubkey,
|
|
|
|
1,
|
|
|
|
genesis_config.hash(),
|
|
|
|
)),
|
|
|
|
SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer(
|
|
|
|
&keypair2,
|
|
|
|
&pubkey2,
|
|
|
|
1,
|
|
|
|
genesis_config.hash(),
|
|
|
|
)),
|
2022-06-23 12:37:38 -07:00
|
|
|
SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer(
|
|
|
|
&keypair3,
|
|
|
|
&pubkey3,
|
|
|
|
1,
|
|
|
|
genesis_config.hash(),
|
|
|
|
)),
|
2022-05-10 13:39:08 -07:00
|
|
|
];
|
|
|
|
|
|
|
|
let batch = bank.prepare_sanitized_batch(&txs);
|
|
|
|
assert!(batch.needs_unlock());
|
2022-06-23 12:37:38 -07:00
|
|
|
let transaction_indexes = vec![42, 43, 44];
|
2022-05-10 13:39:08 -07:00
|
|
|
|
|
|
|
let batch2 = rebatch_transactions(
|
|
|
|
batch.lock_results(),
|
|
|
|
&bank,
|
|
|
|
batch.sanitized_transactions(),
|
|
|
|
0,
|
2022-06-23 12:37:38 -07:00
|
|
|
0,
|
|
|
|
&transaction_indexes,
|
2022-05-10 13:39:08 -07:00
|
|
|
);
|
|
|
|
assert!(batch.needs_unlock());
|
2022-06-23 12:37:38 -07:00
|
|
|
assert!(!batch2.batch.needs_unlock());
|
|
|
|
assert_eq!(batch2.transaction_indexes, vec![42]);
|
|
|
|
|
|
|
|
let batch3 = rebatch_transactions(
|
|
|
|
batch.lock_results(),
|
|
|
|
&bank,
|
|
|
|
batch.sanitized_transactions(),
|
|
|
|
1,
|
|
|
|
2,
|
|
|
|
&transaction_indexes,
|
|
|
|
);
|
|
|
|
assert!(!batch3.batch.needs_unlock());
|
|
|
|
assert_eq!(batch3.transaction_indexes, vec![43, 44]);
|
2022-05-10 13:39:08 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2022-04-21 06:05:29 -07:00
|
|
|
fn test_confirm_slot_entries_with_fix() {
|
|
|
|
const HASHES_PER_TICK: u64 = 10;
|
|
|
|
const TICKS_PER_SLOT: u64 = 2;
|
|
|
|
|
|
|
|
let collector_id = Pubkey::new_unique();
|
|
|
|
|
|
|
|
let GenesisConfigInfo {
|
|
|
|
mut genesis_config,
|
|
|
|
mint_keypair,
|
|
|
|
..
|
|
|
|
} = create_genesis_config(10_000);
|
|
|
|
genesis_config.poh_config.hashes_per_tick = Some(HASHES_PER_TICK);
|
|
|
|
genesis_config.ticks_per_slot = TICKS_PER_SLOT;
|
|
|
|
let genesis_hash = genesis_config.hash();
|
|
|
|
|
|
|
|
let slot_0_bank = Arc::new(Bank::new_for_tests(&genesis_config));
|
|
|
|
assert_eq!(slot_0_bank.slot(), 0);
|
|
|
|
assert_eq!(slot_0_bank.tick_height(), 0);
|
|
|
|
assert_eq!(slot_0_bank.max_tick_height(), 2);
|
|
|
|
assert_eq!(slot_0_bank.last_blockhash(), genesis_hash);
|
|
|
|
assert_eq!(slot_0_bank.get_hash_age(&genesis_hash), Some(0));
|
|
|
|
|
|
|
|
let slot_0_entries = entry::create_ticks(TICKS_PER_SLOT, HASHES_PER_TICK, genesis_hash);
|
|
|
|
let slot_0_hash = slot_0_entries.last().unwrap().hash;
|
|
|
|
confirm_slot_entries_for_tests(&slot_0_bank, slot_0_entries, true, genesis_hash).unwrap();
|
|
|
|
assert_eq!(slot_0_bank.tick_height(), slot_0_bank.max_tick_height());
|
|
|
|
assert_eq!(slot_0_bank.last_blockhash(), slot_0_hash);
|
|
|
|
assert_eq!(slot_0_bank.get_hash_age(&genesis_hash), Some(1));
|
|
|
|
assert_eq!(slot_0_bank.get_hash_age(&slot_0_hash), Some(0));
|
|
|
|
|
|
|
|
let slot_2_bank = Arc::new(Bank::new_from_parent(&slot_0_bank, &collector_id, 2));
|
|
|
|
assert_eq!(slot_2_bank.slot(), 2);
|
|
|
|
assert_eq!(slot_2_bank.tick_height(), 2);
|
|
|
|
assert_eq!(slot_2_bank.max_tick_height(), 6);
|
|
|
|
assert_eq!(slot_2_bank.last_blockhash(), slot_0_hash);
|
|
|
|
|
|
|
|
let slot_1_entries = entry::create_ticks(TICKS_PER_SLOT, HASHES_PER_TICK, slot_0_hash);
|
|
|
|
let slot_1_hash = slot_1_entries.last().unwrap().hash;
|
|
|
|
confirm_slot_entries_for_tests(&slot_2_bank, slot_1_entries, false, slot_0_hash).unwrap();
|
|
|
|
assert_eq!(slot_2_bank.tick_height(), 4);
|
|
|
|
assert_eq!(slot_2_bank.last_blockhash(), slot_0_hash);
|
|
|
|
assert_eq!(slot_2_bank.get_hash_age(&genesis_hash), Some(1));
|
|
|
|
assert_eq!(slot_2_bank.get_hash_age(&slot_0_hash), Some(0));
|
|
|
|
|
|
|
|
struct TestCase {
|
|
|
|
recent_blockhash: Hash,
|
|
|
|
expected_result: result::Result<(), BlockstoreProcessorError>,
|
|
|
|
}
|
|
|
|
|
|
|
|
let test_cases = [
|
|
|
|
TestCase {
|
|
|
|
recent_blockhash: slot_1_hash,
|
|
|
|
expected_result: Err(BlockstoreProcessorError::InvalidTransaction(
|
|
|
|
TransactionError::BlockhashNotFound,
|
|
|
|
)),
|
|
|
|
},
|
|
|
|
TestCase {
|
|
|
|
recent_blockhash: slot_0_hash,
|
|
|
|
expected_result: Ok(()),
|
|
|
|
},
|
|
|
|
];
|
|
|
|
|
|
|
|
// Check that slot 2 transactions can only use hashes for completed blocks.
|
|
|
|
for TestCase {
|
|
|
|
recent_blockhash,
|
|
|
|
expected_result,
|
|
|
|
} in test_cases
|
|
|
|
{
|
|
|
|
let slot_2_entries = {
|
|
|
|
let to_pubkey = Pubkey::new_unique();
|
|
|
|
let mut prev_entry_hash = slot_1_hash;
|
|
|
|
let mut remaining_entry_hashes = HASHES_PER_TICK;
|
|
|
|
|
|
|
|
let tx =
|
|
|
|
system_transaction::transfer(&mint_keypair, &to_pubkey, 1, recent_blockhash);
|
|
|
|
remaining_entry_hashes = remaining_entry_hashes.checked_sub(1).unwrap();
|
|
|
|
let mut entries = vec![next_entry_mut(&mut prev_entry_hash, 1, vec![tx])];
|
|
|
|
|
|
|
|
entries.push(next_entry_mut(
|
|
|
|
&mut prev_entry_hash,
|
|
|
|
remaining_entry_hashes,
|
|
|
|
vec![],
|
|
|
|
));
|
|
|
|
entries.push(next_entry_mut(
|
|
|
|
&mut prev_entry_hash,
|
|
|
|
HASHES_PER_TICK,
|
|
|
|
vec![],
|
|
|
|
));
|
|
|
|
|
|
|
|
entries
|
|
|
|
};
|
|
|
|
|
|
|
|
let slot_2_hash = slot_2_entries.last().unwrap().hash;
|
|
|
|
let result =
|
|
|
|
confirm_slot_entries_for_tests(&slot_2_bank, slot_2_entries, true, slot_1_hash);
|
|
|
|
match (result, expected_result) {
|
|
|
|
(Ok(()), Ok(())) => {
|
|
|
|
assert_eq!(slot_2_bank.tick_height(), slot_2_bank.max_tick_height());
|
|
|
|
assert_eq!(slot_2_bank.last_blockhash(), slot_2_hash);
|
|
|
|
assert_eq!(slot_2_bank.get_hash_age(&genesis_hash), Some(2));
|
|
|
|
assert_eq!(slot_2_bank.get_hash_age(&slot_0_hash), Some(1));
|
|
|
|
assert_eq!(slot_2_bank.get_hash_age(&slot_2_hash), Some(0));
|
|
|
|
}
|
|
|
|
(
|
|
|
|
Err(BlockstoreProcessorError::InvalidTransaction(err)),
|
|
|
|
Err(BlockstoreProcessorError::InvalidTransaction(expected_err)),
|
|
|
|
) => {
|
|
|
|
assert_eq!(err, expected_err);
|
|
|
|
}
|
|
|
|
(result, expected_result) => {
|
2022-12-06 06:30:06 -08:00
|
|
|
panic!("actual result {result:?} != expected result {expected_result:?}");
|
2022-04-21 06:05:29 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-02-16 02:26:21 -08:00
|
|
|
}
|