From ee0a40937e191e4e5bcbeb9d1691bd0339db7986 Mon Sep 17 00:00:00 2001 From: Nicholas Clarke Date: Mon, 11 Jul 2022 08:53:18 -0700 Subject: [PATCH] Add validator argument log_messages_bytes_limit to change log truncation limit. Add new cli argument log_messages_bytes_limit to solana-validator to control how long program logs can be before truncation --- banking-bench/src/main.rs | 8 +++++ core/benches/banking_stage.rs | 2 ++ core/src/banking_stage.rs | 35 ++++++++++++++++++++++ core/src/replay_stage.rs | 7 +++++ core/src/tpu.rs | 2 ++ core/src/tvu.rs | 3 ++ core/src/validator.rs | 2 ++ ledger/src/blockstore_processor.rs | 20 +++++++++++++ multinode-demo/bootstrap-validator.sh | 3 ++ programs/bpf/tests/programs.rs | 2 ++ runtime/src/bank.rs | 19 +++++++++++- runtime/src/runtime_config.rs | 1 + test-validator/src/lib.rs | 3 ++ validator/src/bin/solana-test-validator.rs | 9 ++++++ validator/src/main.rs | 9 ++++++ 15 files changed, 124 insertions(+), 1 deletion(-) diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index a3c9019ea9..ced15423aa 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -213,6 +213,12 @@ fn main() { .takes_value(true) .help("Number of threads to use in the banking stage"), ) + .arg( + Arg::new("log_messages_bytes_limit") + .long("log-messages-bytes-limit") + .takes_value(false) + .help("Maximum number of bytes written to the program log before truncation"), + ) .arg( Arg::new("tpu_use_quic") .long("tpu-use-quic") @@ -236,6 +242,7 @@ fn main() { let write_lock_contention = matches .value_of_t::("write_lock_contention") .unwrap_or(WriteLockContention::None); + let log_messages_bytes_limit = matches.value_of_t::("log_messages_bytes_limit").ok(); let mint_total = 1_000_000_000_000; let GenesisConfigInfo { @@ -356,6 +363,7 @@ fn main() { None, replay_vote_sender, Arc::new(RwLock::new(CostModel::default())), + log_messages_bytes_limit, Arc::new(connection_cache), bank_forks.clone(), ); diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index fc01bdf1ac..fb506be8c5 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -99,6 +99,7 @@ fn bench_consume_buffered(bencher: &mut Bencher) { &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), &mut LeaderSlotMetricsTracker::new(0), 10, + None, ); }); @@ -232,6 +233,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) { None, s, Arc::new(RwLock::new(CostModel::default())), + None, Arc::new(ConnectionCache::default()), bank_forks, ); diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index f366c46f6b..1ddb82dcbc 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -396,6 +396,7 @@ impl BankingStage { transaction_status_sender: Option, gossip_vote_sender: ReplayVoteSender, cost_model: Arc>, + log_messages_bytes_limit: Option, connection_cache: Arc, bank_forks: Arc>, ) -> Self { @@ -409,6 +410,7 @@ impl BankingStage { transaction_status_sender, gossip_vote_sender, cost_model, + log_messages_bytes_limit, connection_cache, bank_forks, ) @@ -425,6 +427,7 @@ impl BankingStage { transaction_status_sender: Option, gossip_vote_sender: ReplayVoteSender, cost_model: Arc>, + log_messages_bytes_limit: Option, connection_cache: Arc, bank_forks: Arc>, ) -> Self { @@ -475,6 +478,7 @@ impl BankingStage { gossip_vote_sender, &data_budget, cost_model, + log_messages_bytes_limit, connection_cache, &bank_forks, ); @@ -645,6 +649,7 @@ impl BankingStage { qos_service: &QosService, slot_metrics_tracker: &mut LeaderSlotMetricsTracker, num_packets_to_process_per_iteration: usize, + log_messages_bytes_limit: Option, ) { let mut rebuffered_packet_count = 0; let mut consumed_buffered_packets_count = 0; @@ -692,6 +697,7 @@ impl BankingStage { banking_stage_stats, qos_service, slot_metrics_tracker, + log_messages_bytes_limit ), "process_packets_transactions", ); @@ -873,6 +879,7 @@ impl BankingStage { data_budget: &DataBudget, qos_service: &QosService, slot_metrics_tracker: &mut LeaderSlotMetricsTracker, + log_messages_bytes_limit: Option, connection_cache: &ConnectionCache, tracer_packet_stats: &mut TracerPacketStats, bank_forks: &Arc>, @@ -935,6 +942,7 @@ impl BankingStage { qos_service, slot_metrics_tracker, UNPROCESSED_BUFFER_STEP_SIZE, + log_messages_bytes_limit ), "consume_buffered_packets", ); @@ -1091,6 +1099,7 @@ impl BankingStage { gossip_vote_sender: ReplayVoteSender, data_budget: &DataBudget, cost_model: Arc>, + log_messages_bytes_limit: Option, connection_cache: Arc, bank_forks: &Arc>, ) { @@ -1124,6 +1133,7 @@ impl BankingStage { data_budget, &qos_service, &mut slot_metrics_tracker, + log_messages_bytes_limit, &connection_cache, &mut tracer_packet_stats, bank_forks, @@ -1235,6 +1245,7 @@ impl BankingStage { batch: &TransactionBatch, transaction_status_sender: Option, gossip_vote_sender: &ReplayVoteSender, + log_messages_bytes_limit: Option, ) -> ExecuteAndCommitTransactionsOutput { let mut execute_and_commit_timings = LeaderExecuteAndCommitTimings::default(); let mut mint_decimals: HashMap = HashMap::new(); @@ -1272,6 +1283,7 @@ impl BankingStage { transaction_status_sender.is_some(), &mut execute_and_commit_timings.execute_timings, None, // account_overrides + log_messages_bytes_limit ), "load_execute", ); @@ -1476,6 +1488,7 @@ impl BankingStage { transaction_status_sender: Option, gossip_vote_sender: &ReplayVoteSender, qos_service: &QosService, + log_messages_bytes_limit: Option, ) -> ProcessTransactionBatchOutput { let mut cost_model_time = Measure::start("cost_model"); @@ -1511,6 +1524,7 @@ impl BankingStage { &batch, transaction_status_sender, gossip_vote_sender, + log_messages_bytes_limit, ); let mut unlock_time = Measure::start("unlock_time"); @@ -1667,6 +1681,7 @@ impl BankingStage { transaction_status_sender: Option, gossip_vote_sender: &ReplayVoteSender, qos_service: &QosService, + log_messages_bytes_limit: Option, ) -> ProcessTransactionsSummary { let mut chunk_start = 0; let mut all_retryable_tx_indexes = vec![]; @@ -1698,6 +1713,7 @@ impl BankingStage { transaction_status_sender.clone(), gossip_vote_sender, qos_service, + log_messages_bytes_limit, ); let ProcessTransactionBatchOutput { @@ -1883,6 +1899,7 @@ impl BankingStage { banking_stage_stats: &'a BankingStageStats, qos_service: &'a QosService, slot_metrics_tracker: &'a mut LeaderSlotMetricsTracker, + log_messages_bytes_limit: Option, ) -> ProcessTransactionsSummary { // Convert packets to transactions let ((transactions, transaction_to_packet_indexes), packet_conversion_time): ( @@ -1921,6 +1938,7 @@ impl BankingStage { transaction_status_sender, gossip_vote_sender, qos_service, + log_messages_bytes_limit, ), "process_transaction_time", ); @@ -2272,6 +2290,7 @@ mod tests { None, gossip_vote_sender, Arc::new(RwLock::new(CostModel::default())), + None, Arc::new(ConnectionCache::default()), bank_forks, ); @@ -2325,6 +2344,7 @@ mod tests { None, gossip_vote_sender, Arc::new(RwLock::new(CostModel::default())), + None, Arc::new(ConnectionCache::default()), bank_forks, ); @@ -2403,6 +2423,7 @@ mod tests { None, gossip_vote_sender, Arc::new(RwLock::new(CostModel::default())), + None, Arc::new(ConnectionCache::default()), bank_forks, ); @@ -2558,6 +2579,7 @@ mod tests { None, gossip_vote_sender, Arc::new(RwLock::new(CostModel::default())), + None, Arc::new(ConnectionCache::default()), bank_forks, ); @@ -2867,6 +2889,7 @@ mod tests { None, &gossip_vote_sender, &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), + None, ); let ExecuteAndCommitTransactionsOutput { @@ -2919,6 +2942,7 @@ mod tests { None, &gossip_vote_sender, &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), + None, ); let ExecuteAndCommitTransactionsOutput { @@ -3002,6 +3026,7 @@ mod tests { None, &gossip_vote_sender, &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), + None, ); let ExecuteAndCommitTransactionsOutput { @@ -3093,6 +3118,7 @@ mod tests { None, &gossip_vote_sender, &qos_service, + None, ); let ExecuteAndCommitTransactionsOutput { @@ -3132,6 +3158,7 @@ mod tests { None, &gossip_vote_sender, &qos_service, + None, ); let ExecuteAndCommitTransactionsOutput { @@ -3228,6 +3255,7 @@ mod tests { None, &gossip_vote_sender, &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), + None, ); poh_recorder @@ -3393,6 +3421,7 @@ mod tests { None, &gossip_vote_sender, &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), + None, ); let ProcessTransactionsSummary { @@ -3459,6 +3488,7 @@ mod tests { None, &gossip_vote_sender, &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), + None, ); poh_recorder @@ -3681,6 +3711,7 @@ mod tests { }), &gossip_vote_sender, &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), + None, ); transaction_status_service.join().unwrap(); @@ -3842,6 +3873,7 @@ mod tests { }), &gossip_vote_sender, &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), + None, ); transaction_status_service.join().unwrap(); @@ -3964,6 +3996,7 @@ mod tests { &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), &mut LeaderSlotMetricsTracker::new(0), num_conflicting_transactions, + None, ); assert_eq!(buffered_packet_batches.len(), num_conflicting_transactions); // When the poh recorder has a bank, should process all non conflicting buffered packets. @@ -3984,6 +4017,7 @@ mod tests { &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), &mut LeaderSlotMetricsTracker::new(0), num_packets_to_process_per_iteration, + None, ); if num_expected_unprocessed == 0 { assert!(buffered_packet_batches.is_empty()) @@ -4057,6 +4091,7 @@ mod tests { &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), &mut LeaderSlotMetricsTracker::new(0), num_packets_to_process_per_iteration, + None, ); // Check everything is correct. All indexes after `interrupted_iteration` diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 007f218784..a38e0e2cc2 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -375,6 +375,7 @@ impl ReplayStage { drop_bank_sender: Sender>>, block_metadata_notifier: Option, transaction_cost_metrics_sender: Option, + log_messages_bytes_limit: Option, ) -> Self { let mut tower = if let Some(process_blockstore) = maybe_process_blockstore { let tower = process_blockstore.process_to_create_tower(); @@ -507,6 +508,7 @@ impl ReplayStage { block_metadata_notifier.clone(), transaction_cost_metrics_sender.as_ref(), &mut replay_timing, + log_messages_bytes_limit ); replay_active_banks_time.stop(); @@ -1690,6 +1692,7 @@ impl ReplayStage { replay_vote_sender: &ReplayVoteSender, transaction_cost_metrics_sender: Option<&TransactionCostMetricsSender>, verify_recyclers: &VerifyRecyclers, + log_messages_bytes_limit: Option, ) -> result::Result { let tx_count_before = bank_progress.replay_progress.num_txs; // All errors must lead to marking the slot as dead, otherwise, @@ -1707,6 +1710,7 @@ impl ReplayStage { None, verify_recyclers, false, + log_messages_bytes_limit, )?; let tx_count_after = bank_progress.replay_progress.num_txs; let tx_count = tx_count_after - tx_count_before; @@ -2207,6 +2211,7 @@ impl ReplayStage { block_metadata_notifier: Option, transaction_cost_metrics_sender: Option<&TransactionCostMetricsSender>, replay_timing: &mut ReplayTiming, + log_messages_bytes_limit: Option, ) -> bool { let mut did_complete_bank = false; let mut tx_count = 0; @@ -2259,6 +2264,7 @@ impl ReplayStage { replay_vote_sender, transaction_cost_metrics_sender, verify_recyclers, + log_messages_bytes_limit, ); replay_blockstore_time.stop(); replay_timing.replay_blockstore_us += replay_blockstore_time.as_us(); @@ -3879,6 +3885,7 @@ pub(crate) mod tests { &replay_vote_sender, None, &VerifyRecyclers::default(), + None, ); let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let rpc_subscriptions = Arc::new(RpcSubscriptions::new_for_tests( diff --git a/core/src/tpu.rs b/core/src/tpu.rs index 41b3f434e3..9872c1e4e1 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -95,6 +95,7 @@ impl Tpu { cost_model: &Arc>, connection_cache: &Arc, keypair: &Keypair, + log_messages_bytes_limit: Option, enable_quic_servers: bool, ) -> Self { let TpuSockets { @@ -229,6 +230,7 @@ impl Tpu { transaction_status_sender, replay_vote_sender, cost_model.clone(), + log_messages_bytes_limit, connection_cache.clone(), bank_forks.clone(), ); diff --git a/core/src/tvu.rs b/core/src/tvu.rs index e6deed99fe..7fa62f6f74 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -133,6 +133,7 @@ impl Tvu { block_metadata_notifier: Option, wait_to_vote_slot: Option, accounts_background_request_sender: AbsRequestSender, + log_messages_bytes_limit: Option, connection_cache: &Arc, ) -> Self { let TvuSockets { @@ -300,6 +301,7 @@ impl Tvu { drop_bank_sender, block_metadata_notifier, transaction_cost_metrics_sender, + log_messages_bytes_limit, ); let ledger_cleanup_service = tvu_config.max_ledger_shreds.map(|max_ledger_shreds| { @@ -462,6 +464,7 @@ pub mod tests { None, None, AbsRequestSender::default(), + None, &Arc::new(ConnectionCache::default()), ); exit.store(true, Ordering::Relaxed); diff --git a/core/src/validator.rs b/core/src/validator.rs index 684791ff30..fc4228cc11 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -982,6 +982,7 @@ impl Validator { block_metadata_notifier, config.wait_to_vote_slot, accounts_background_request_sender, + config.runtime_config.log_messages_bytes_limit, &connection_cache, ); @@ -1028,6 +1029,7 @@ impl Validator { &cost_model, &connection_cache, &identity_keypair, + config.runtime_config.log_messages_bytes_limit, enable_quic_servers, ); diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 7bfad1943a..d39bea53ad 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -174,6 +174,7 @@ fn execute_batch( timings: &mut ExecuteTimings, cost_capacity_meter: Arc>, tx_cost: u64, + log_messages_bytes_limit: Option, ) -> Result<()> { let TransactionBatchWithIndexes { batch, @@ -199,6 +200,7 @@ fn execute_batch( transaction_status_sender.is_some(), transaction_status_sender.is_some(), timings, + log_messages_bytes_limit, ); if bank @@ -281,6 +283,7 @@ fn execute_batches_internal( replay_vote_sender: Option<&ReplayVoteSender>, cost_capacity_meter: Arc>, tx_costs: &[u64], + log_messages_bytes_limit: Option, ) -> Result { inc_new_counter_debug!("bank-par_execute_entries-count", batches.len()); let execution_timings_per_thread: Mutex> = @@ -307,6 +310,7 @@ fn execute_batches_internal( &mut timings, cost_capacity_meter.clone(), tx_costs[index], + log_messages_bytes_limit, ); if let Some(entry_callback) = entry_callback { entry_callback(bank); @@ -382,6 +386,7 @@ fn execute_batches( confirmation_timing: &mut ConfirmationTiming, cost_capacity_meter: Arc>, cost_model: &CostModel, + log_messages_bytes_limit: Option, ) -> Result<()> { let ((lock_results, sanitized_txs), transaction_indexes): ((Vec<_>, Vec<_>), Vec<_>) = batches .iter() @@ -466,6 +471,7 @@ fn execute_batches( replay_vote_sender, cost_capacity_meter, &tx_batch_costs, + log_messages_bytes_limit, )?; confirmation_timing.process_execute_batches_internal_metrics(execute_batches_internal_metrics); @@ -521,6 +527,7 @@ pub fn process_entries_for_tests( None, &mut confirmation_timing, Arc::new(RwLock::new(BlockCostCapacityMeter::default())), + None, ); debug!("process_entries: {:?}", confirmation_timing); @@ -539,6 +546,7 @@ fn process_entries_with_callback( transaction_cost_metrics_sender: Option<&TransactionCostMetricsSender>, confirmation_timing: &mut ConfirmationTiming, cost_capacity_meter: Arc>, + log_messages_bytes_limit: Option, ) -> Result<()> { // accumulator for entries that can be processed in parallel let mut batches = vec![]; @@ -567,6 +575,7 @@ fn process_entries_with_callback( confirmation_timing, cost_capacity_meter.clone(), &cost_model, + log_messages_bytes_limit, )?; batches.clear(); for hash in &tick_hashes { @@ -637,6 +646,7 @@ fn process_entries_with_callback( confirmation_timing, cost_capacity_meter.clone(), &cost_model, + log_messages_bytes_limit, )?; batches.clear(); } @@ -653,6 +663,7 @@ fn process_entries_with_callback( confirmation_timing, cost_capacity_meter, &cost_model, + log_messages_bytes_limit, )?; for hash in tick_hashes { bank.register_tick(hash); @@ -960,6 +971,7 @@ fn confirm_full_slot( opts.entry_callback.as_ref(), recyclers, opts.allow_dead_slots, + opts.runtime_config.log_messages_bytes_limit, )?; timing.accumulate(&confirmation_timing.execute_timings); @@ -1086,6 +1098,7 @@ pub fn confirm_slot( entry_callback: Option<&ProcessCallback>, recyclers: &VerifyRecyclers, allow_dead_slots: bool, + log_messages_bytes_limit: Option, ) -> result::Result<(), BlockstoreProcessorError> { let slot = bank.slot(); @@ -1114,6 +1127,7 @@ pub fn confirm_slot( transaction_cost_metrics_sender, entry_callback, recyclers, + log_messages_bytes_limit, ) } @@ -1129,6 +1143,7 @@ fn confirm_slot_entries( transaction_cost_metrics_sender: Option<&TransactionCostMetricsSender>, entry_callback: Option<&ProcessCallback>, recyclers: &VerifyRecyclers, + log_messages_bytes_limit: Option, ) -> result::Result<(), BlockstoreProcessorError> { let slot = bank.slot(); let (entries, num_shreds, slot_full) = slot_entries_load_result; @@ -1230,6 +1245,7 @@ fn confirm_slot_entries( transaction_cost_metrics_sender, timing, cost_capacity_meter, + log_messages_bytes_limit, ) .map_err(BlockstoreProcessorError::from); replay_elapsed.stop(); @@ -3729,6 +3745,7 @@ pub mod tests { false, false, &mut ExecuteTimings::default(), + None, ); let (err, signature) = get_first_error(&batch, fee_collection_results).unwrap(); assert_eq!(err.unwrap_err(), TransactionError::AccountNotFound); @@ -4107,6 +4124,7 @@ pub mod tests { None, None, &VerifyRecyclers::default(), + None, ) } @@ -4250,6 +4268,7 @@ pub mod tests { None, None, &VerifyRecyclers::default(), + None, ) .unwrap(); assert_eq!(progress.num_txs, 2); @@ -4295,6 +4314,7 @@ pub mod tests { None, None, &VerifyRecyclers::default(), + None, ) .unwrap(); assert_eq!(progress.num_txs, 5); diff --git a/multinode-demo/bootstrap-validator.sh b/multinode-demo/bootstrap-validator.sh index 52244ba349..9245f507c3 100755 --- a/multinode-demo/bootstrap-validator.sh +++ b/multinode-demo/bootstrap-validator.sh @@ -103,6 +103,9 @@ while [[ -n $1 ]]; do elif [[ $1 == --skip-require-tower ]]; then maybeRequireTower=false shift + elif [[ $1 = --log-messages-bytes-limit ]]; then + args+=("$1" "$2") + shift 2 else echo "Unknown argument: $1" $program --help diff --git a/programs/bpf/tests/programs.rs b/programs/bpf/tests/programs.rs index 187df92f40..0439316dc6 100644 --- a/programs/bpf/tests/programs.rs +++ b/programs/bpf/tests/programs.rs @@ -336,6 +336,7 @@ fn process_transaction_and_record_inner( false, false, &mut ExecuteTimings::default(), + None, ) .0; let result = results @@ -378,6 +379,7 @@ fn execute_transactions( true, true, &mut timings, + None, ); let tx_post_token_balances = collect_token_balances(&bank, &batch, &mut mint_decimals); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 2d539506d1..50e8aed5fa 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3963,6 +3963,7 @@ impl Bank { true, &mut timings, Some(&account_overrides), + None, ); let post_simulation_accounts = loaded_transactions @@ -4259,6 +4260,7 @@ impl Bank { enable_return_data_recording: bool, timings: &mut ExecuteTimings, error_counters: &mut TransactionErrorMetrics, + log_messages_bytes_limit: Option, ) -> TransactionExecutionResult { let mut get_executors_time = Measure::start("get_executors_time"); let executors = self.get_executors(&loaded_transaction.accounts); @@ -4280,7 +4282,12 @@ impl Bank { self.get_transaction_account_state_info(&transaction_context, tx.message()); let log_collector = if enable_log_recording { - Some(LogCollector::new_ref()) + match log_messages_bytes_limit { + None => Some(LogCollector::new_ref()), + Some(log_messages_bytes_limit) => Some(LogCollector::new_ref_with_limit(Some( + log_messages_bytes_limit, + ))), + } } else { None }; @@ -4410,6 +4417,7 @@ impl Bank { enable_return_data_recording: bool, timings: &mut ExecuteTimings, account_overrides: Option<&AccountOverrides>, + log_messages_bytes_limit: Option, ) -> LoadAndExecuteTransactionsOutput { let sanitized_txs = batch.sanitized_transactions(); debug!("processing transactions: {}", sanitized_txs.len()); @@ -4510,6 +4518,7 @@ impl Bank { enable_return_data_recording, timings, &mut error_counters, + log_messages_bytes_limit, ) } }) @@ -6035,6 +6044,7 @@ impl Bank { enable_log_recording: bool, enable_return_data_recording: bool, timings: &mut ExecuteTimings, + log_messages_bytes_limit: Option, ) -> (TransactionResults, TransactionBalancesSet) { let pre_balances = if collect_balances { self.collect_balances(batch) @@ -6057,6 +6067,7 @@ impl Bank { enable_return_data_recording, timings, None, + log_messages_bytes_limit, ); let (last_blockhash, lamports_per_signature) = @@ -6109,6 +6120,7 @@ impl Bank { true, false, &mut ExecuteTimings::default(), + None, ); tx.signatures .get(0) @@ -6170,6 +6182,7 @@ impl Bank { false, false, &mut ExecuteTimings::default(), + None, ) .0 .fee_collection_results @@ -11106,6 +11119,7 @@ pub(crate) mod tests { false, false, &mut ExecuteTimings::default(), + None, ) .0 .fee_collection_results; @@ -13785,6 +13799,7 @@ pub(crate) mod tests { false, false, &mut ExecuteTimings::default(), + None, ); assert_eq!(transaction_balances_set.pre_balances.len(), 3); @@ -16809,6 +16824,7 @@ pub(crate) mod tests { true, false, &mut ExecuteTimings::default(), + None, ) .0 .execution_results; @@ -16917,6 +16933,7 @@ pub(crate) mod tests { false, true, &mut ExecuteTimings::default(), + None, ) .0 .execution_results[0] diff --git a/runtime/src/runtime_config.rs b/runtime/src/runtime_config.rs index 18acbd131e..106e326027 100644 --- a/runtime/src/runtime_config.rs +++ b/runtime/src/runtime_config.rs @@ -5,4 +5,5 @@ use solana_program_runtime::compute_budget::ComputeBudget; pub struct RuntimeConfig { pub bpf_jit: bool, pub compute_budget: Option, + pub log_messages_bytes_limit: Option, } diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index 5410f7024e..48091892d1 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -119,6 +119,7 @@ pub struct TestValidatorGenesis { pub accounts_db_caching_enabled: bool, deactivate_feature_set: HashSet, compute_unit_limit: Option, + pub log_messages_bytes_limit: Option, } impl Default for TestValidatorGenesis { @@ -147,6 +148,7 @@ impl Default for TestValidatorGenesis { accounts_db_caching_enabled: bool::default(), deactivate_feature_set: HashSet::::default(), compute_unit_limit: Option::::default(), + log_messages_bytes_limit: Option::::default(), } } } @@ -702,6 +704,7 @@ impl TestValidator { compute_unit_limit, ..ComputeBudget::default() }), + log_messages_bytes_limit: config.log_messages_bytes_limit, }; let mut validator_config = ValidatorConfig { diff --git a/validator/src/bin/solana-test-validator.rs b/validator/src/bin/solana-test-validator.rs index a05a09dc64..1c95ac13b3 100644 --- a/validator/src/bin/solana-test-validator.rs +++ b/validator/src/bin/solana-test-validator.rs @@ -386,6 +386,14 @@ fn main() { .takes_value(true) .help("Override the runtime's compute unit limit per transaction") ) + .arg( + Arg::with_name("log_messages_bytes_limit") + .long("log-messages-bytes-limit") + .value_name("BYTES") + .validator(is_parsable::) + .takes_value(true) + .help("Maximum number of bytes written to the program log before truncation") + ) .get_matches(); let output = if matches.is_present("quiet") { @@ -643,6 +651,7 @@ fn main() { genesis.max_ledger_shreds = value_of(&matches, "limit_ledger_size"); genesis.max_genesis_archive_unpacked_size = Some(u64::MAX); genesis.accounts_db_caching_enabled = !matches.is_present("no_accounts_db_caching"); + genesis.log_messages_bytes_limit = value_t!(matches, "log_messages_bytes_limit", usize).ok(); let tower_storage = Arc::new(FileTowerStorage::new(ledger_path.clone())); diff --git a/validator/src/main.rs b/validator/src/main.rs index 21d3e6cc7b..eb6c098de6 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1768,6 +1768,14 @@ pub fn main() { .help("Allow contacting private ip addresses") .hidden(true), ) + .arg( + Arg::with_name("log_messages_bytes_limit") + .long("log-messages-bytes-limit") + .takes_value(true) + .validator(is_parsable::) + .value_name("BYTES") + .help("Maximum number of bytes written to the program log before truncation") + ) .after_help("The default subcommand is run") .subcommand( SubCommand::with_name("exit") @@ -2637,6 +2645,7 @@ pub fn main() { accounts_shrink_ratio, runtime_config: RuntimeConfig { bpf_jit: !matches.is_present("no_bpf_jit"), + log_messages_bytes_limit: value_t!(matches, "log_messages_bytes_limit", usize).ok(), ..RuntimeConfig::default() }, enable_quic_servers,