Add validator argument log_messages_bytes_limit to change log truncation limit.

Add new cli argument log_messages_bytes_limit to solana-validator to control how long program logs can be before truncation
This commit is contained in:
Nicholas Clarke 2022-07-11 08:53:18 -07:00 committed by GitHub
parent 458a872234
commit ee0a40937e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 124 additions and 1 deletions

View File

@ -213,6 +213,12 @@ fn main() {
.takes_value(true) .takes_value(true)
.help("Number of threads to use in the banking stage"), .help("Number of threads to use in the banking stage"),
) )
.arg(
Arg::new("log_messages_bytes_limit")
.long("log-messages-bytes-limit")
.takes_value(false)
.help("Maximum number of bytes written to the program log before truncation"),
)
.arg( .arg(
Arg::new("tpu_use_quic") Arg::new("tpu_use_quic")
.long("tpu-use-quic") .long("tpu-use-quic")
@ -236,6 +242,7 @@ fn main() {
let write_lock_contention = matches let write_lock_contention = matches
.value_of_t::<WriteLockContention>("write_lock_contention") .value_of_t::<WriteLockContention>("write_lock_contention")
.unwrap_or(WriteLockContention::None); .unwrap_or(WriteLockContention::None);
let log_messages_bytes_limit = matches.value_of_t::<usize>("log_messages_bytes_limit").ok();
let mint_total = 1_000_000_000_000; let mint_total = 1_000_000_000_000;
let GenesisConfigInfo { let GenesisConfigInfo {
@ -356,6 +363,7 @@ fn main() {
None, None,
replay_vote_sender, replay_vote_sender,
Arc::new(RwLock::new(CostModel::default())), Arc::new(RwLock::new(CostModel::default())),
log_messages_bytes_limit,
Arc::new(connection_cache), Arc::new(connection_cache),
bank_forks.clone(), bank_forks.clone(),
); );

View File

@ -99,6 +99,7 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
&QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1),
&mut LeaderSlotMetricsTracker::new(0), &mut LeaderSlotMetricsTracker::new(0),
10, 10,
None,
); );
}); });
@ -232,6 +233,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
None, None,
s, s,
Arc::new(RwLock::new(CostModel::default())), Arc::new(RwLock::new(CostModel::default())),
None,
Arc::new(ConnectionCache::default()), Arc::new(ConnectionCache::default()),
bank_forks, bank_forks,
); );

View File

@ -396,6 +396,7 @@ impl BankingStage {
transaction_status_sender: Option<TransactionStatusSender>, transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: ReplayVoteSender, gossip_vote_sender: ReplayVoteSender,
cost_model: Arc<RwLock<CostModel>>, cost_model: Arc<RwLock<CostModel>>,
log_messages_bytes_limit: Option<usize>,
connection_cache: Arc<ConnectionCache>, connection_cache: Arc<ConnectionCache>,
bank_forks: Arc<RwLock<BankForks>>, bank_forks: Arc<RwLock<BankForks>>,
) -> Self { ) -> Self {
@ -409,6 +410,7 @@ impl BankingStage {
transaction_status_sender, transaction_status_sender,
gossip_vote_sender, gossip_vote_sender,
cost_model, cost_model,
log_messages_bytes_limit,
connection_cache, connection_cache,
bank_forks, bank_forks,
) )
@ -425,6 +427,7 @@ impl BankingStage {
transaction_status_sender: Option<TransactionStatusSender>, transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: ReplayVoteSender, gossip_vote_sender: ReplayVoteSender,
cost_model: Arc<RwLock<CostModel>>, cost_model: Arc<RwLock<CostModel>>,
log_messages_bytes_limit: Option<usize>,
connection_cache: Arc<ConnectionCache>, connection_cache: Arc<ConnectionCache>,
bank_forks: Arc<RwLock<BankForks>>, bank_forks: Arc<RwLock<BankForks>>,
) -> Self { ) -> Self {
@ -475,6 +478,7 @@ impl BankingStage {
gossip_vote_sender, gossip_vote_sender,
&data_budget, &data_budget,
cost_model, cost_model,
log_messages_bytes_limit,
connection_cache, connection_cache,
&bank_forks, &bank_forks,
); );
@ -645,6 +649,7 @@ impl BankingStage {
qos_service: &QosService, qos_service: &QosService,
slot_metrics_tracker: &mut LeaderSlotMetricsTracker, slot_metrics_tracker: &mut LeaderSlotMetricsTracker,
num_packets_to_process_per_iteration: usize, num_packets_to_process_per_iteration: usize,
log_messages_bytes_limit: Option<usize>,
) { ) {
let mut rebuffered_packet_count = 0; let mut rebuffered_packet_count = 0;
let mut consumed_buffered_packets_count = 0; let mut consumed_buffered_packets_count = 0;
@ -692,6 +697,7 @@ impl BankingStage {
banking_stage_stats, banking_stage_stats,
qos_service, qos_service,
slot_metrics_tracker, slot_metrics_tracker,
log_messages_bytes_limit
), ),
"process_packets_transactions", "process_packets_transactions",
); );
@ -873,6 +879,7 @@ impl BankingStage {
data_budget: &DataBudget, data_budget: &DataBudget,
qos_service: &QosService, qos_service: &QosService,
slot_metrics_tracker: &mut LeaderSlotMetricsTracker, slot_metrics_tracker: &mut LeaderSlotMetricsTracker,
log_messages_bytes_limit: Option<usize>,
connection_cache: &ConnectionCache, connection_cache: &ConnectionCache,
tracer_packet_stats: &mut TracerPacketStats, tracer_packet_stats: &mut TracerPacketStats,
bank_forks: &Arc<RwLock<BankForks>>, bank_forks: &Arc<RwLock<BankForks>>,
@ -935,6 +942,7 @@ impl BankingStage {
qos_service, qos_service,
slot_metrics_tracker, slot_metrics_tracker,
UNPROCESSED_BUFFER_STEP_SIZE, UNPROCESSED_BUFFER_STEP_SIZE,
log_messages_bytes_limit
), ),
"consume_buffered_packets", "consume_buffered_packets",
); );
@ -1091,6 +1099,7 @@ impl BankingStage {
gossip_vote_sender: ReplayVoteSender, gossip_vote_sender: ReplayVoteSender,
data_budget: &DataBudget, data_budget: &DataBudget,
cost_model: Arc<RwLock<CostModel>>, cost_model: Arc<RwLock<CostModel>>,
log_messages_bytes_limit: Option<usize>,
connection_cache: Arc<ConnectionCache>, connection_cache: Arc<ConnectionCache>,
bank_forks: &Arc<RwLock<BankForks>>, bank_forks: &Arc<RwLock<BankForks>>,
) { ) {
@ -1124,6 +1133,7 @@ impl BankingStage {
data_budget, data_budget,
&qos_service, &qos_service,
&mut slot_metrics_tracker, &mut slot_metrics_tracker,
log_messages_bytes_limit,
&connection_cache, &connection_cache,
&mut tracer_packet_stats, &mut tracer_packet_stats,
bank_forks, bank_forks,
@ -1235,6 +1245,7 @@ impl BankingStage {
batch: &TransactionBatch, batch: &TransactionBatch,
transaction_status_sender: Option<TransactionStatusSender>, transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender, gossip_vote_sender: &ReplayVoteSender,
log_messages_bytes_limit: Option<usize>,
) -> ExecuteAndCommitTransactionsOutput { ) -> ExecuteAndCommitTransactionsOutput {
let mut execute_and_commit_timings = LeaderExecuteAndCommitTimings::default(); let mut execute_and_commit_timings = LeaderExecuteAndCommitTimings::default();
let mut mint_decimals: HashMap<Pubkey, u8> = HashMap::new(); let mut mint_decimals: HashMap<Pubkey, u8> = HashMap::new();
@ -1272,6 +1283,7 @@ impl BankingStage {
transaction_status_sender.is_some(), transaction_status_sender.is_some(),
&mut execute_and_commit_timings.execute_timings, &mut execute_and_commit_timings.execute_timings,
None, // account_overrides None, // account_overrides
log_messages_bytes_limit
), ),
"load_execute", "load_execute",
); );
@ -1476,6 +1488,7 @@ impl BankingStage {
transaction_status_sender: Option<TransactionStatusSender>, transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender, gossip_vote_sender: &ReplayVoteSender,
qos_service: &QosService, qos_service: &QosService,
log_messages_bytes_limit: Option<usize>,
) -> ProcessTransactionBatchOutput { ) -> ProcessTransactionBatchOutput {
let mut cost_model_time = Measure::start("cost_model"); let mut cost_model_time = Measure::start("cost_model");
@ -1511,6 +1524,7 @@ impl BankingStage {
&batch, &batch,
transaction_status_sender, transaction_status_sender,
gossip_vote_sender, gossip_vote_sender,
log_messages_bytes_limit,
); );
let mut unlock_time = Measure::start("unlock_time"); let mut unlock_time = Measure::start("unlock_time");
@ -1667,6 +1681,7 @@ impl BankingStage {
transaction_status_sender: Option<TransactionStatusSender>, transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender, gossip_vote_sender: &ReplayVoteSender,
qos_service: &QosService, qos_service: &QosService,
log_messages_bytes_limit: Option<usize>,
) -> ProcessTransactionsSummary { ) -> ProcessTransactionsSummary {
let mut chunk_start = 0; let mut chunk_start = 0;
let mut all_retryable_tx_indexes = vec![]; let mut all_retryable_tx_indexes = vec![];
@ -1698,6 +1713,7 @@ impl BankingStage {
transaction_status_sender.clone(), transaction_status_sender.clone(),
gossip_vote_sender, gossip_vote_sender,
qos_service, qos_service,
log_messages_bytes_limit,
); );
let ProcessTransactionBatchOutput { let ProcessTransactionBatchOutput {
@ -1883,6 +1899,7 @@ impl BankingStage {
banking_stage_stats: &'a BankingStageStats, banking_stage_stats: &'a BankingStageStats,
qos_service: &'a QosService, qos_service: &'a QosService,
slot_metrics_tracker: &'a mut LeaderSlotMetricsTracker, slot_metrics_tracker: &'a mut LeaderSlotMetricsTracker,
log_messages_bytes_limit: Option<usize>,
) -> ProcessTransactionsSummary { ) -> ProcessTransactionsSummary {
// Convert packets to transactions // Convert packets to transactions
let ((transactions, transaction_to_packet_indexes), packet_conversion_time): ( let ((transactions, transaction_to_packet_indexes), packet_conversion_time): (
@ -1921,6 +1938,7 @@ impl BankingStage {
transaction_status_sender, transaction_status_sender,
gossip_vote_sender, gossip_vote_sender,
qos_service, qos_service,
log_messages_bytes_limit,
), ),
"process_transaction_time", "process_transaction_time",
); );
@ -2272,6 +2290,7 @@ mod tests {
None, None,
gossip_vote_sender, gossip_vote_sender,
Arc::new(RwLock::new(CostModel::default())), Arc::new(RwLock::new(CostModel::default())),
None,
Arc::new(ConnectionCache::default()), Arc::new(ConnectionCache::default()),
bank_forks, bank_forks,
); );
@ -2325,6 +2344,7 @@ mod tests {
None, None,
gossip_vote_sender, gossip_vote_sender,
Arc::new(RwLock::new(CostModel::default())), Arc::new(RwLock::new(CostModel::default())),
None,
Arc::new(ConnectionCache::default()), Arc::new(ConnectionCache::default()),
bank_forks, bank_forks,
); );
@ -2403,6 +2423,7 @@ mod tests {
None, None,
gossip_vote_sender, gossip_vote_sender,
Arc::new(RwLock::new(CostModel::default())), Arc::new(RwLock::new(CostModel::default())),
None,
Arc::new(ConnectionCache::default()), Arc::new(ConnectionCache::default()),
bank_forks, bank_forks,
); );
@ -2558,6 +2579,7 @@ mod tests {
None, None,
gossip_vote_sender, gossip_vote_sender,
Arc::new(RwLock::new(CostModel::default())), Arc::new(RwLock::new(CostModel::default())),
None,
Arc::new(ConnectionCache::default()), Arc::new(ConnectionCache::default()),
bank_forks, bank_forks,
); );
@ -2867,6 +2889,7 @@ mod tests {
None, None,
&gossip_vote_sender, &gossip_vote_sender,
&QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1),
None,
); );
let ExecuteAndCommitTransactionsOutput { let ExecuteAndCommitTransactionsOutput {
@ -2919,6 +2942,7 @@ mod tests {
None, None,
&gossip_vote_sender, &gossip_vote_sender,
&QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1),
None,
); );
let ExecuteAndCommitTransactionsOutput { let ExecuteAndCommitTransactionsOutput {
@ -3002,6 +3026,7 @@ mod tests {
None, None,
&gossip_vote_sender, &gossip_vote_sender,
&QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1),
None,
); );
let ExecuteAndCommitTransactionsOutput { let ExecuteAndCommitTransactionsOutput {
@ -3093,6 +3118,7 @@ mod tests {
None, None,
&gossip_vote_sender, &gossip_vote_sender,
&qos_service, &qos_service,
None,
); );
let ExecuteAndCommitTransactionsOutput { let ExecuteAndCommitTransactionsOutput {
@ -3132,6 +3158,7 @@ mod tests {
None, None,
&gossip_vote_sender, &gossip_vote_sender,
&qos_service, &qos_service,
None,
); );
let ExecuteAndCommitTransactionsOutput { let ExecuteAndCommitTransactionsOutput {
@ -3228,6 +3255,7 @@ mod tests {
None, None,
&gossip_vote_sender, &gossip_vote_sender,
&QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1),
None,
); );
poh_recorder poh_recorder
@ -3393,6 +3421,7 @@ mod tests {
None, None,
&gossip_vote_sender, &gossip_vote_sender,
&QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1),
None,
); );
let ProcessTransactionsSummary { let ProcessTransactionsSummary {
@ -3459,6 +3488,7 @@ mod tests {
None, None,
&gossip_vote_sender, &gossip_vote_sender,
&QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1),
None,
); );
poh_recorder poh_recorder
@ -3681,6 +3711,7 @@ mod tests {
}), }),
&gossip_vote_sender, &gossip_vote_sender,
&QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1),
None,
); );
transaction_status_service.join().unwrap(); transaction_status_service.join().unwrap();
@ -3842,6 +3873,7 @@ mod tests {
}), }),
&gossip_vote_sender, &gossip_vote_sender,
&QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1),
None,
); );
transaction_status_service.join().unwrap(); transaction_status_service.join().unwrap();
@ -3964,6 +3996,7 @@ mod tests {
&QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1),
&mut LeaderSlotMetricsTracker::new(0), &mut LeaderSlotMetricsTracker::new(0),
num_conflicting_transactions, num_conflicting_transactions,
None,
); );
assert_eq!(buffered_packet_batches.len(), num_conflicting_transactions); assert_eq!(buffered_packet_batches.len(), num_conflicting_transactions);
// When the poh recorder has a bank, should process all non conflicting buffered packets. // When the poh recorder has a bank, should process all non conflicting buffered packets.
@ -3984,6 +4017,7 @@ mod tests {
&QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1),
&mut LeaderSlotMetricsTracker::new(0), &mut LeaderSlotMetricsTracker::new(0),
num_packets_to_process_per_iteration, num_packets_to_process_per_iteration,
None,
); );
if num_expected_unprocessed == 0 { if num_expected_unprocessed == 0 {
assert!(buffered_packet_batches.is_empty()) assert!(buffered_packet_batches.is_empty())
@ -4057,6 +4091,7 @@ mod tests {
&QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1),
&mut LeaderSlotMetricsTracker::new(0), &mut LeaderSlotMetricsTracker::new(0),
num_packets_to_process_per_iteration, num_packets_to_process_per_iteration,
None,
); );
// Check everything is correct. All indexes after `interrupted_iteration` // Check everything is correct. All indexes after `interrupted_iteration`

View File

@ -375,6 +375,7 @@ impl ReplayStage {
drop_bank_sender: Sender<Vec<Arc<Bank>>>, drop_bank_sender: Sender<Vec<Arc<Bank>>>,
block_metadata_notifier: Option<BlockMetadataNotifierLock>, block_metadata_notifier: Option<BlockMetadataNotifierLock>,
transaction_cost_metrics_sender: Option<TransactionCostMetricsSender>, transaction_cost_metrics_sender: Option<TransactionCostMetricsSender>,
log_messages_bytes_limit: Option<usize>,
) -> Self { ) -> Self {
let mut tower = if let Some(process_blockstore) = maybe_process_blockstore { let mut tower = if let Some(process_blockstore) = maybe_process_blockstore {
let tower = process_blockstore.process_to_create_tower(); let tower = process_blockstore.process_to_create_tower();
@ -507,6 +508,7 @@ impl ReplayStage {
block_metadata_notifier.clone(), block_metadata_notifier.clone(),
transaction_cost_metrics_sender.as_ref(), transaction_cost_metrics_sender.as_ref(),
&mut replay_timing, &mut replay_timing,
log_messages_bytes_limit
); );
replay_active_banks_time.stop(); replay_active_banks_time.stop();
@ -1690,6 +1692,7 @@ impl ReplayStage {
replay_vote_sender: &ReplayVoteSender, replay_vote_sender: &ReplayVoteSender,
transaction_cost_metrics_sender: Option<&TransactionCostMetricsSender>, transaction_cost_metrics_sender: Option<&TransactionCostMetricsSender>,
verify_recyclers: &VerifyRecyclers, verify_recyclers: &VerifyRecyclers,
log_messages_bytes_limit: Option<usize>,
) -> result::Result<usize, BlockstoreProcessorError> { ) -> result::Result<usize, BlockstoreProcessorError> {
let tx_count_before = bank_progress.replay_progress.num_txs; let tx_count_before = bank_progress.replay_progress.num_txs;
// All errors must lead to marking the slot as dead, otherwise, // All errors must lead to marking the slot as dead, otherwise,
@ -1707,6 +1710,7 @@ impl ReplayStage {
None, None,
verify_recyclers, verify_recyclers,
false, false,
log_messages_bytes_limit,
)?; )?;
let tx_count_after = bank_progress.replay_progress.num_txs; let tx_count_after = bank_progress.replay_progress.num_txs;
let tx_count = tx_count_after - tx_count_before; let tx_count = tx_count_after - tx_count_before;
@ -2207,6 +2211,7 @@ impl ReplayStage {
block_metadata_notifier: Option<BlockMetadataNotifierLock>, block_metadata_notifier: Option<BlockMetadataNotifierLock>,
transaction_cost_metrics_sender: Option<&TransactionCostMetricsSender>, transaction_cost_metrics_sender: Option<&TransactionCostMetricsSender>,
replay_timing: &mut ReplayTiming, replay_timing: &mut ReplayTiming,
log_messages_bytes_limit: Option<usize>,
) -> bool { ) -> bool {
let mut did_complete_bank = false; let mut did_complete_bank = false;
let mut tx_count = 0; let mut tx_count = 0;
@ -2259,6 +2264,7 @@ impl ReplayStage {
replay_vote_sender, replay_vote_sender,
transaction_cost_metrics_sender, transaction_cost_metrics_sender,
verify_recyclers, verify_recyclers,
log_messages_bytes_limit,
); );
replay_blockstore_time.stop(); replay_blockstore_time.stop();
replay_timing.replay_blockstore_us += replay_blockstore_time.as_us(); replay_timing.replay_blockstore_us += replay_blockstore_time.as_us();
@ -3879,6 +3885,7 @@ pub(crate) mod tests {
&replay_vote_sender, &replay_vote_sender,
None, None,
&VerifyRecyclers::default(), &VerifyRecyclers::default(),
None,
); );
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let rpc_subscriptions = Arc::new(RpcSubscriptions::new_for_tests( let rpc_subscriptions = Arc::new(RpcSubscriptions::new_for_tests(

View File

@ -95,6 +95,7 @@ impl Tpu {
cost_model: &Arc<RwLock<CostModel>>, cost_model: &Arc<RwLock<CostModel>>,
connection_cache: &Arc<ConnectionCache>, connection_cache: &Arc<ConnectionCache>,
keypair: &Keypair, keypair: &Keypair,
log_messages_bytes_limit: Option<usize>,
enable_quic_servers: bool, enable_quic_servers: bool,
) -> Self { ) -> Self {
let TpuSockets { let TpuSockets {
@ -229,6 +230,7 @@ impl Tpu {
transaction_status_sender, transaction_status_sender,
replay_vote_sender, replay_vote_sender,
cost_model.clone(), cost_model.clone(),
log_messages_bytes_limit,
connection_cache.clone(), connection_cache.clone(),
bank_forks.clone(), bank_forks.clone(),
); );

View File

@ -133,6 +133,7 @@ impl Tvu {
block_metadata_notifier: Option<BlockMetadataNotifierLock>, block_metadata_notifier: Option<BlockMetadataNotifierLock>,
wait_to_vote_slot: Option<Slot>, wait_to_vote_slot: Option<Slot>,
accounts_background_request_sender: AbsRequestSender, accounts_background_request_sender: AbsRequestSender,
log_messages_bytes_limit: Option<usize>,
connection_cache: &Arc<ConnectionCache>, connection_cache: &Arc<ConnectionCache>,
) -> Self { ) -> Self {
let TvuSockets { let TvuSockets {
@ -300,6 +301,7 @@ impl Tvu {
drop_bank_sender, drop_bank_sender,
block_metadata_notifier, block_metadata_notifier,
transaction_cost_metrics_sender, transaction_cost_metrics_sender,
log_messages_bytes_limit,
); );
let ledger_cleanup_service = tvu_config.max_ledger_shreds.map(|max_ledger_shreds| { let ledger_cleanup_service = tvu_config.max_ledger_shreds.map(|max_ledger_shreds| {
@ -462,6 +464,7 @@ pub mod tests {
None, None,
None, None,
AbsRequestSender::default(), AbsRequestSender::default(),
None,
&Arc::new(ConnectionCache::default()), &Arc::new(ConnectionCache::default()),
); );
exit.store(true, Ordering::Relaxed); exit.store(true, Ordering::Relaxed);

View File

@ -982,6 +982,7 @@ impl Validator {
block_metadata_notifier, block_metadata_notifier,
config.wait_to_vote_slot, config.wait_to_vote_slot,
accounts_background_request_sender, accounts_background_request_sender,
config.runtime_config.log_messages_bytes_limit,
&connection_cache, &connection_cache,
); );
@ -1028,6 +1029,7 @@ impl Validator {
&cost_model, &cost_model,
&connection_cache, &connection_cache,
&identity_keypair, &identity_keypair,
config.runtime_config.log_messages_bytes_limit,
enable_quic_servers, enable_quic_servers,
); );

View File

@ -174,6 +174,7 @@ fn execute_batch(
timings: &mut ExecuteTimings, timings: &mut ExecuteTimings,
cost_capacity_meter: Arc<RwLock<BlockCostCapacityMeter>>, cost_capacity_meter: Arc<RwLock<BlockCostCapacityMeter>>,
tx_cost: u64, tx_cost: u64,
log_messages_bytes_limit: Option<usize>,
) -> Result<()> { ) -> Result<()> {
let TransactionBatchWithIndexes { let TransactionBatchWithIndexes {
batch, batch,
@ -199,6 +200,7 @@ fn execute_batch(
transaction_status_sender.is_some(), transaction_status_sender.is_some(),
transaction_status_sender.is_some(), transaction_status_sender.is_some(),
timings, timings,
log_messages_bytes_limit,
); );
if bank if bank
@ -281,6 +283,7 @@ fn execute_batches_internal(
replay_vote_sender: Option<&ReplayVoteSender>, replay_vote_sender: Option<&ReplayVoteSender>,
cost_capacity_meter: Arc<RwLock<BlockCostCapacityMeter>>, cost_capacity_meter: Arc<RwLock<BlockCostCapacityMeter>>,
tx_costs: &[u64], tx_costs: &[u64],
log_messages_bytes_limit: Option<usize>,
) -> Result<ExecuteBatchesInternalMetrics> { ) -> Result<ExecuteBatchesInternalMetrics> {
inc_new_counter_debug!("bank-par_execute_entries-count", batches.len()); inc_new_counter_debug!("bank-par_execute_entries-count", batches.len());
let execution_timings_per_thread: Mutex<HashMap<usize, ThreadExecuteTimings>> = let execution_timings_per_thread: Mutex<HashMap<usize, ThreadExecuteTimings>> =
@ -307,6 +310,7 @@ fn execute_batches_internal(
&mut timings, &mut timings,
cost_capacity_meter.clone(), cost_capacity_meter.clone(),
tx_costs[index], tx_costs[index],
log_messages_bytes_limit,
); );
if let Some(entry_callback) = entry_callback { if let Some(entry_callback) = entry_callback {
entry_callback(bank); entry_callback(bank);
@ -382,6 +386,7 @@ fn execute_batches(
confirmation_timing: &mut ConfirmationTiming, confirmation_timing: &mut ConfirmationTiming,
cost_capacity_meter: Arc<RwLock<BlockCostCapacityMeter>>, cost_capacity_meter: Arc<RwLock<BlockCostCapacityMeter>>,
cost_model: &CostModel, cost_model: &CostModel,
log_messages_bytes_limit: Option<usize>,
) -> Result<()> { ) -> Result<()> {
let ((lock_results, sanitized_txs), transaction_indexes): ((Vec<_>, Vec<_>), Vec<_>) = batches let ((lock_results, sanitized_txs), transaction_indexes): ((Vec<_>, Vec<_>), Vec<_>) = batches
.iter() .iter()
@ -466,6 +471,7 @@ fn execute_batches(
replay_vote_sender, replay_vote_sender,
cost_capacity_meter, cost_capacity_meter,
&tx_batch_costs, &tx_batch_costs,
log_messages_bytes_limit,
)?; )?;
confirmation_timing.process_execute_batches_internal_metrics(execute_batches_internal_metrics); confirmation_timing.process_execute_batches_internal_metrics(execute_batches_internal_metrics);
@ -521,6 +527,7 @@ pub fn process_entries_for_tests(
None, None,
&mut confirmation_timing, &mut confirmation_timing,
Arc::new(RwLock::new(BlockCostCapacityMeter::default())), Arc::new(RwLock::new(BlockCostCapacityMeter::default())),
None,
); );
debug!("process_entries: {:?}", confirmation_timing); debug!("process_entries: {:?}", confirmation_timing);
@ -539,6 +546,7 @@ fn process_entries_with_callback(
transaction_cost_metrics_sender: Option<&TransactionCostMetricsSender>, transaction_cost_metrics_sender: Option<&TransactionCostMetricsSender>,
confirmation_timing: &mut ConfirmationTiming, confirmation_timing: &mut ConfirmationTiming,
cost_capacity_meter: Arc<RwLock<BlockCostCapacityMeter>>, cost_capacity_meter: Arc<RwLock<BlockCostCapacityMeter>>,
log_messages_bytes_limit: Option<usize>,
) -> Result<()> { ) -> Result<()> {
// accumulator for entries that can be processed in parallel // accumulator for entries that can be processed in parallel
let mut batches = vec![]; let mut batches = vec![];
@ -567,6 +575,7 @@ fn process_entries_with_callback(
confirmation_timing, confirmation_timing,
cost_capacity_meter.clone(), cost_capacity_meter.clone(),
&cost_model, &cost_model,
log_messages_bytes_limit,
)?; )?;
batches.clear(); batches.clear();
for hash in &tick_hashes { for hash in &tick_hashes {
@ -637,6 +646,7 @@ fn process_entries_with_callback(
confirmation_timing, confirmation_timing,
cost_capacity_meter.clone(), cost_capacity_meter.clone(),
&cost_model, &cost_model,
log_messages_bytes_limit,
)?; )?;
batches.clear(); batches.clear();
} }
@ -653,6 +663,7 @@ fn process_entries_with_callback(
confirmation_timing, confirmation_timing,
cost_capacity_meter, cost_capacity_meter,
&cost_model, &cost_model,
log_messages_bytes_limit,
)?; )?;
for hash in tick_hashes { for hash in tick_hashes {
bank.register_tick(hash); bank.register_tick(hash);
@ -960,6 +971,7 @@ fn confirm_full_slot(
opts.entry_callback.as_ref(), opts.entry_callback.as_ref(),
recyclers, recyclers,
opts.allow_dead_slots, opts.allow_dead_slots,
opts.runtime_config.log_messages_bytes_limit,
)?; )?;
timing.accumulate(&confirmation_timing.execute_timings); timing.accumulate(&confirmation_timing.execute_timings);
@ -1086,6 +1098,7 @@ pub fn confirm_slot(
entry_callback: Option<&ProcessCallback>, entry_callback: Option<&ProcessCallback>,
recyclers: &VerifyRecyclers, recyclers: &VerifyRecyclers,
allow_dead_slots: bool, allow_dead_slots: bool,
log_messages_bytes_limit: Option<usize>,
) -> result::Result<(), BlockstoreProcessorError> { ) -> result::Result<(), BlockstoreProcessorError> {
let slot = bank.slot(); let slot = bank.slot();
@ -1114,6 +1127,7 @@ pub fn confirm_slot(
transaction_cost_metrics_sender, transaction_cost_metrics_sender,
entry_callback, entry_callback,
recyclers, recyclers,
log_messages_bytes_limit,
) )
} }
@ -1129,6 +1143,7 @@ fn confirm_slot_entries(
transaction_cost_metrics_sender: Option<&TransactionCostMetricsSender>, transaction_cost_metrics_sender: Option<&TransactionCostMetricsSender>,
entry_callback: Option<&ProcessCallback>, entry_callback: Option<&ProcessCallback>,
recyclers: &VerifyRecyclers, recyclers: &VerifyRecyclers,
log_messages_bytes_limit: Option<usize>,
) -> result::Result<(), BlockstoreProcessorError> { ) -> result::Result<(), BlockstoreProcessorError> {
let slot = bank.slot(); let slot = bank.slot();
let (entries, num_shreds, slot_full) = slot_entries_load_result; let (entries, num_shreds, slot_full) = slot_entries_load_result;
@ -1230,6 +1245,7 @@ fn confirm_slot_entries(
transaction_cost_metrics_sender, transaction_cost_metrics_sender,
timing, timing,
cost_capacity_meter, cost_capacity_meter,
log_messages_bytes_limit,
) )
.map_err(BlockstoreProcessorError::from); .map_err(BlockstoreProcessorError::from);
replay_elapsed.stop(); replay_elapsed.stop();
@ -3729,6 +3745,7 @@ pub mod tests {
false, false,
false, false,
&mut ExecuteTimings::default(), &mut ExecuteTimings::default(),
None,
); );
let (err, signature) = get_first_error(&batch, fee_collection_results).unwrap(); let (err, signature) = get_first_error(&batch, fee_collection_results).unwrap();
assert_eq!(err.unwrap_err(), TransactionError::AccountNotFound); assert_eq!(err.unwrap_err(), TransactionError::AccountNotFound);
@ -4107,6 +4124,7 @@ pub mod tests {
None, None,
None, None,
&VerifyRecyclers::default(), &VerifyRecyclers::default(),
None,
) )
} }
@ -4250,6 +4268,7 @@ pub mod tests {
None, None,
None, None,
&VerifyRecyclers::default(), &VerifyRecyclers::default(),
None,
) )
.unwrap(); .unwrap();
assert_eq!(progress.num_txs, 2); assert_eq!(progress.num_txs, 2);
@ -4295,6 +4314,7 @@ pub mod tests {
None, None,
None, None,
&VerifyRecyclers::default(), &VerifyRecyclers::default(),
None,
) )
.unwrap(); .unwrap();
assert_eq!(progress.num_txs, 5); assert_eq!(progress.num_txs, 5);

View File

@ -103,6 +103,9 @@ while [[ -n $1 ]]; do
elif [[ $1 == --skip-require-tower ]]; then elif [[ $1 == --skip-require-tower ]]; then
maybeRequireTower=false maybeRequireTower=false
shift shift
elif [[ $1 = --log-messages-bytes-limit ]]; then
args+=("$1" "$2")
shift 2
else else
echo "Unknown argument: $1" echo "Unknown argument: $1"
$program --help $program --help

View File

@ -336,6 +336,7 @@ fn process_transaction_and_record_inner(
false, false,
false, false,
&mut ExecuteTimings::default(), &mut ExecuteTimings::default(),
None,
) )
.0; .0;
let result = results let result = results
@ -378,6 +379,7 @@ fn execute_transactions(
true, true,
true, true,
&mut timings, &mut timings,
None,
); );
let tx_post_token_balances = collect_token_balances(&bank, &batch, &mut mint_decimals); let tx_post_token_balances = collect_token_balances(&bank, &batch, &mut mint_decimals);

View File

@ -3963,6 +3963,7 @@ impl Bank {
true, true,
&mut timings, &mut timings,
Some(&account_overrides), Some(&account_overrides),
None,
); );
let post_simulation_accounts = loaded_transactions let post_simulation_accounts = loaded_transactions
@ -4259,6 +4260,7 @@ impl Bank {
enable_return_data_recording: bool, enable_return_data_recording: bool,
timings: &mut ExecuteTimings, timings: &mut ExecuteTimings,
error_counters: &mut TransactionErrorMetrics, error_counters: &mut TransactionErrorMetrics,
log_messages_bytes_limit: Option<usize>,
) -> TransactionExecutionResult { ) -> TransactionExecutionResult {
let mut get_executors_time = Measure::start("get_executors_time"); let mut get_executors_time = Measure::start("get_executors_time");
let executors = self.get_executors(&loaded_transaction.accounts); let executors = self.get_executors(&loaded_transaction.accounts);
@ -4280,7 +4282,12 @@ impl Bank {
self.get_transaction_account_state_info(&transaction_context, tx.message()); self.get_transaction_account_state_info(&transaction_context, tx.message());
let log_collector = if enable_log_recording { let log_collector = if enable_log_recording {
Some(LogCollector::new_ref()) match log_messages_bytes_limit {
None => Some(LogCollector::new_ref()),
Some(log_messages_bytes_limit) => Some(LogCollector::new_ref_with_limit(Some(
log_messages_bytes_limit,
))),
}
} else { } else {
None None
}; };
@ -4410,6 +4417,7 @@ impl Bank {
enable_return_data_recording: bool, enable_return_data_recording: bool,
timings: &mut ExecuteTimings, timings: &mut ExecuteTimings,
account_overrides: Option<&AccountOverrides>, account_overrides: Option<&AccountOverrides>,
log_messages_bytes_limit: Option<usize>,
) -> LoadAndExecuteTransactionsOutput { ) -> LoadAndExecuteTransactionsOutput {
let sanitized_txs = batch.sanitized_transactions(); let sanitized_txs = batch.sanitized_transactions();
debug!("processing transactions: {}", sanitized_txs.len()); debug!("processing transactions: {}", sanitized_txs.len());
@ -4510,6 +4518,7 @@ impl Bank {
enable_return_data_recording, enable_return_data_recording,
timings, timings,
&mut error_counters, &mut error_counters,
log_messages_bytes_limit,
) )
} }
}) })
@ -6035,6 +6044,7 @@ impl Bank {
enable_log_recording: bool, enable_log_recording: bool,
enable_return_data_recording: bool, enable_return_data_recording: bool,
timings: &mut ExecuteTimings, timings: &mut ExecuteTimings,
log_messages_bytes_limit: Option<usize>,
) -> (TransactionResults, TransactionBalancesSet) { ) -> (TransactionResults, TransactionBalancesSet) {
let pre_balances = if collect_balances { let pre_balances = if collect_balances {
self.collect_balances(batch) self.collect_balances(batch)
@ -6057,6 +6067,7 @@ impl Bank {
enable_return_data_recording, enable_return_data_recording,
timings, timings,
None, None,
log_messages_bytes_limit,
); );
let (last_blockhash, lamports_per_signature) = let (last_blockhash, lamports_per_signature) =
@ -6109,6 +6120,7 @@ impl Bank {
true, true,
false, false,
&mut ExecuteTimings::default(), &mut ExecuteTimings::default(),
None,
); );
tx.signatures tx.signatures
.get(0) .get(0)
@ -6170,6 +6182,7 @@ impl Bank {
false, false,
false, false,
&mut ExecuteTimings::default(), &mut ExecuteTimings::default(),
None,
) )
.0 .0
.fee_collection_results .fee_collection_results
@ -11106,6 +11119,7 @@ pub(crate) mod tests {
false, false,
false, false,
&mut ExecuteTimings::default(), &mut ExecuteTimings::default(),
None,
) )
.0 .0
.fee_collection_results; .fee_collection_results;
@ -13785,6 +13799,7 @@ pub(crate) mod tests {
false, false,
false, false,
&mut ExecuteTimings::default(), &mut ExecuteTimings::default(),
None,
); );
assert_eq!(transaction_balances_set.pre_balances.len(), 3); assert_eq!(transaction_balances_set.pre_balances.len(), 3);
@ -16809,6 +16824,7 @@ pub(crate) mod tests {
true, true,
false, false,
&mut ExecuteTimings::default(), &mut ExecuteTimings::default(),
None,
) )
.0 .0
.execution_results; .execution_results;
@ -16917,6 +16933,7 @@ pub(crate) mod tests {
false, false,
true, true,
&mut ExecuteTimings::default(), &mut ExecuteTimings::default(),
None,
) )
.0 .0
.execution_results[0] .execution_results[0]

View File

@ -5,4 +5,5 @@ use solana_program_runtime::compute_budget::ComputeBudget;
pub struct RuntimeConfig { pub struct RuntimeConfig {
pub bpf_jit: bool, pub bpf_jit: bool,
pub compute_budget: Option<ComputeBudget>, pub compute_budget: Option<ComputeBudget>,
pub log_messages_bytes_limit: Option<usize>,
} }

View File

@ -119,6 +119,7 @@ pub struct TestValidatorGenesis {
pub accounts_db_caching_enabled: bool, pub accounts_db_caching_enabled: bool,
deactivate_feature_set: HashSet<Pubkey>, deactivate_feature_set: HashSet<Pubkey>,
compute_unit_limit: Option<u64>, compute_unit_limit: Option<u64>,
pub log_messages_bytes_limit: Option<usize>,
} }
impl Default for TestValidatorGenesis { impl Default for TestValidatorGenesis {
@ -147,6 +148,7 @@ impl Default for TestValidatorGenesis {
accounts_db_caching_enabled: bool::default(), accounts_db_caching_enabled: bool::default(),
deactivate_feature_set: HashSet::<Pubkey>::default(), deactivate_feature_set: HashSet::<Pubkey>::default(),
compute_unit_limit: Option::<u64>::default(), compute_unit_limit: Option::<u64>::default(),
log_messages_bytes_limit: Option::<usize>::default(),
} }
} }
} }
@ -702,6 +704,7 @@ impl TestValidator {
compute_unit_limit, compute_unit_limit,
..ComputeBudget::default() ..ComputeBudget::default()
}), }),
log_messages_bytes_limit: config.log_messages_bytes_limit,
}; };
let mut validator_config = ValidatorConfig { let mut validator_config = ValidatorConfig {

View File

@ -386,6 +386,14 @@ fn main() {
.takes_value(true) .takes_value(true)
.help("Override the runtime's compute unit limit per transaction") .help("Override the runtime's compute unit limit per transaction")
) )
.arg(
Arg::with_name("log_messages_bytes_limit")
.long("log-messages-bytes-limit")
.value_name("BYTES")
.validator(is_parsable::<usize>)
.takes_value(true)
.help("Maximum number of bytes written to the program log before truncation")
)
.get_matches(); .get_matches();
let output = if matches.is_present("quiet") { let output = if matches.is_present("quiet") {
@ -643,6 +651,7 @@ fn main() {
genesis.max_ledger_shreds = value_of(&matches, "limit_ledger_size"); genesis.max_ledger_shreds = value_of(&matches, "limit_ledger_size");
genesis.max_genesis_archive_unpacked_size = Some(u64::MAX); genesis.max_genesis_archive_unpacked_size = Some(u64::MAX);
genesis.accounts_db_caching_enabled = !matches.is_present("no_accounts_db_caching"); genesis.accounts_db_caching_enabled = !matches.is_present("no_accounts_db_caching");
genesis.log_messages_bytes_limit = value_t!(matches, "log_messages_bytes_limit", usize).ok();
let tower_storage = Arc::new(FileTowerStorage::new(ledger_path.clone())); let tower_storage = Arc::new(FileTowerStorage::new(ledger_path.clone()));

View File

@ -1768,6 +1768,14 @@ pub fn main() {
.help("Allow contacting private ip addresses") .help("Allow contacting private ip addresses")
.hidden(true), .hidden(true),
) )
.arg(
Arg::with_name("log_messages_bytes_limit")
.long("log-messages-bytes-limit")
.takes_value(true)
.validator(is_parsable::<usize>)
.value_name("BYTES")
.help("Maximum number of bytes written to the program log before truncation")
)
.after_help("The default subcommand is run") .after_help("The default subcommand is run")
.subcommand( .subcommand(
SubCommand::with_name("exit") SubCommand::with_name("exit")
@ -2637,6 +2645,7 @@ pub fn main() {
accounts_shrink_ratio, accounts_shrink_ratio,
runtime_config: RuntimeConfig { runtime_config: RuntimeConfig {
bpf_jit: !matches.is_present("no_bpf_jit"), bpf_jit: !matches.is_present("no_bpf_jit"),
log_messages_bytes_limit: value_t!(matches, "log_messages_bytes_limit", usize).ok(),
..RuntimeConfig::default() ..RuntimeConfig::default()
}, },
enable_quic_servers, enable_quic_servers,