diff --git a/Cargo.lock b/Cargo.lock index 6a07f5f38..16480d198 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2563,12 +2563,12 @@ dependencies = [ name = "solana-metrics" version = "0.15.0" dependencies = [ + "env_logger 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "influx_db_client 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "reqwest 0.9.17 (registry+https://github.com/rust-lang/crates.io-index)", - "solana-logger 0.15.0", "solana-sdk 0.15.0", "sys-info 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/bench-exchange/src/bench.rs b/bench-exchange/src/bench.rs index 2cf9865e1..afbce89fc 100644 --- a/bench-exchange/src/bench.rs +++ b/bench-exchange/src/bench.rs @@ -11,7 +11,7 @@ use solana_drone::drone::request_airdrop_transaction; use solana_exchange_api::exchange_instruction; use solana_exchange_api::exchange_state::*; use solana_exchange_api::id; -use solana_metrics::datapoint; +use solana_metrics::datapoint_info; use solana_sdk::client::Client; use solana_sdk::client::SyncClient; use solana_sdk::pubkey::Pubkey; @@ -262,7 +262,7 @@ fn do_tx_transfers( let duration = now.elapsed(); total_txs_sent_count.fetch_add(n, Ordering::Relaxed); - datapoint!( + datapoint_info!( "bench-exchange-do_tx_transfers", ("duration", duration_as_ms(&duration), i64), ("count", n, i64) @@ -409,7 +409,7 @@ fn swapper( txs = 0; } - datapoint!("bench-exchange-swaps", ("count", to_swap_txs.len(), i64)); + datapoint_info!("bench-exchange-swaps", ("count", to_swap_txs.len(), i64)); let chunks: Vec<_> = to_swap_txs.chunks(chunk_size).collect(); { @@ -541,7 +541,7 @@ fn trader( txs = 0; } - datapoint!("bench-exchange-trades", ("count", trades_txs.len(), i64)); + datapoint_info!("bench-exchange-trades", ("count", trades_txs.len(), i64)); { let mut shared_txs_wl = shared_txs diff --git a/bench-tps/src/bench.rs b/bench-tps/src/bench.rs index 8e11d0f32..7c2104af9 100644 --- a/bench-tps/src/bench.rs +++ b/bench-tps/src/bench.rs @@ -5,7 +5,7 @@ use rayon::prelude::*; use solana::gen_keys::GenKeys; use solana_client::perf_utils::{sample_txs, SampleStats}; use solana_drone::drone::request_airdrop_transaction; -use solana_metrics::datapoint; +use solana_metrics::datapoint_info; use solana_sdk::client::Client; use solana_sdk::hash::Hash; use solana_sdk::signature::{Keypair, KeypairUtil}; @@ -209,7 +209,7 @@ where fn metrics_submit_lamport_balance(lamport_balance: u64) { println!("Token balance: {}", lamport_balance); - datapoint!( + datapoint_info!( "bench-tps-lamport_balance", ("balance", lamport_balance, i64) ); @@ -253,7 +253,7 @@ fn generate_txs( duration_as_ms(&duration), blockhash, ); - datapoint!( + datapoint_info!( "bench-tps-generate_txs", ("duration", duration_as_ms(&duration), i64) ); @@ -310,7 +310,7 @@ fn do_tx_transfers( duration_as_ms(&transfer_start.elapsed()), tx_len as f32 / duration_as_s(&transfer_start.elapsed()), ); - datapoint!( + datapoint_info!( "bench-tps-do_tx_transfers", ("duration", duration_as_ms(&transfer_start.elapsed()), i64), ("count", tx_len, i64) diff --git a/core/src/blocktree.rs b/core/src/blocktree.rs index d8b1140d1..8c689cb2c 100644 --- a/core/src/blocktree.rs +++ b/core/src/blocktree.rs @@ -16,7 +16,7 @@ use hashbrown::HashMap; #[cfg(not(feature = "kvstore"))] use rocksdb; -use solana_metrics::datapoint; +use solana_metrics::{datapoint_error, datapoint_info}; use solana_sdk::genesis_block::GenesisBlock; use solana_sdk::hash::Hash; @@ -398,17 +398,14 @@ impl Blocktree { for (signal, slots) in self.completed_slots_senders.iter().zip(slots.into_iter()) { let res = signal.try_send(slots); if let Err(TrySendError::Full(_)) = res { - solana_metrics::submit( - solana_metrics::influxdb::Point::new("blocktree_error") - .add_field( - "error", - solana_metrics::influxdb::Value::String( - "Unable to send newly completed slot because channel is full" - .to_string(), - ), - ) - .to_owned(), - log::Level::Error, + datapoint_error!( + "blocktree_error", + ( + "error", + "Unable to send newly completed slot because channel is full" + .to_string(), + String + ), ); } } @@ -1031,7 +1028,7 @@ fn should_insert_blob( // for the slot let last_index = slot.last_index; if blob_index >= last_index { - datapoint!( + datapoint_error!( "blocktree_error", ( "error", @@ -1048,7 +1045,7 @@ fn should_insert_blob( // Check that we do not receive a blob with "last_index" true, but index // less than our current received if blob.is_last_in_slot() && blob_index < slot.received { - datapoint!( + datapoint_error!( "blocktree_error", ( "error", @@ -1322,7 +1319,7 @@ fn try_erasure_recover( let (data_end_index, _) = erasure_meta.end_indexes(); let submit_metrics = |attempted: bool, status: String| { - datapoint!( + datapoint_info!( "blocktree-erasure", ("slot", slot as i64, i64), ("start_index", start_index as i64, i64), diff --git a/core/src/cluster_info.rs b/core/src/cluster_info.rs index 091e153ab..32cf8751d 100644 --- a/core/src/cluster_info.rs +++ b/core/src/cluster_info.rs @@ -29,7 +29,7 @@ use core::cmp; use hashbrown::HashMap; use rand::{thread_rng, Rng}; use rayon::prelude::*; -use solana_metrics::{datapoint, inc_new_counter_debug, inc_new_counter_error}; +use solana_metrics::{datapoint_debug, inc_new_counter_debug, inc_new_counter_error}; use solana_netutil::{ bind_in_range, bind_to, find_available_port_in_range, multi_bind_in_range, PortRange, }; @@ -850,7 +850,7 @@ impl ClusterInfo { let out = { match repair_request { RepairType::Blob(slot, blob_index) => { - datapoint!( + datapoint_debug!( "cluster_info-repair", ("repair-slot", *slot, i64), ("repair-ix", *blob_index, i64) @@ -858,7 +858,7 @@ impl ClusterInfo { self.window_index_request_bytes(*slot, *blob_index)? } RepairType::HighestBlob(slot, blob_index) => { - datapoint!( + datapoint_debug!( "cluster_info-repair_highest", ("repair-highest-slot", *slot, i64), ("repair-highest-ix", *blob_index, i64) @@ -866,7 +866,7 @@ impl ClusterInfo { self.window_highest_index_request_bytes(*slot, *blob_index)? } RepairType::Orphan(slot) => { - datapoint!("cluster_info-repair_orphan", ("repair-orphan", *slot, i64)); + datapoint_debug!("cluster_info-repair_orphan", ("repair-orphan", *slot, i64)); self.orphan_bytes(*slot)? } } diff --git a/core/src/locktower.rs b/core/src/locktower.rs index 8e8a6a1b7..a20fe6f3e 100644 --- a/core/src/locktower.rs +++ b/core/src/locktower.rs @@ -1,7 +1,7 @@ use crate::bank_forks::BankForks; use crate::staking_utils; use hashbrown::{HashMap, HashSet}; -use solana_metrics::datapoint; +use solana_metrics::datapoint_info; use solana_runtime::bank::Bank; use solana_sdk::account::Account; use solana_sdk::pubkey::Pubkey; @@ -125,7 +125,7 @@ impl Locktower { vote_state.nth_recent_vote(0).map(|v| v.slot).unwrap_or(0) as i64 ); debug!("observed root {}", vote_state.root_slot.unwrap_or(0) as i64); - datapoint!( + datapoint_info!( "locktower-observed", ( "slot", @@ -211,7 +211,7 @@ impl Locktower { self.epoch_stakes.epoch ); self.epoch_stakes = EpochStakes::new_from_bank(bank, &self.epoch_stakes.delegate_id); - datapoint!( + datapoint_info!( "locktower-epoch", ("epoch", self.epoch_stakes.epoch, i64), ("self_staked", self.epoch_stakes.self_staked, i64), @@ -223,7 +223,7 @@ impl Locktower { pub fn record_vote(&mut self, slot: u64) -> Option { let root_slot = self.lockouts.root_slot; self.lockouts.process_vote(&Vote { slot }); - datapoint!( + datapoint_info!( "locktower-vote", ("latest", slot, i64), ("root", self.lockouts.root_slot.unwrap_or(0), i64) diff --git a/core/src/repair_service.rs b/core/src/repair_service.rs index 3d7dc7cc9..0193d6213 100644 --- a/core/src/repair_service.rs +++ b/core/src/repair_service.rs @@ -6,7 +6,7 @@ use crate::blocktree::{Blocktree, CompletedSlotsReceiver, SlotMeta}; use crate::cluster_info::ClusterInfo; use crate::result::Result; use crate::service::Service; -use solana_metrics::datapoint; +use solana_metrics::datapoint_info; use solana_runtime::epoch_schedule::EpochSchedule; use solana_sdk::pubkey::Pubkey; use std::collections::HashSet; @@ -156,7 +156,7 @@ impl RepairService { for ((to, req), repair_request) in reqs { if let Ok(local_addr) = repair_socket.local_addr() { - datapoint!( + datapoint_info!( "repair_service", ("repair_request", format!("{:?}", repair_request), String), ("to", to.to_string(), String), diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index b9b7a9b3c..5400c664e 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -14,7 +14,7 @@ use crate::result::{Error, Result}; use crate::rpc_subscriptions::RpcSubscriptions; use crate::service::Service; use hashbrown::HashMap; -use solana_metrics::{datapoint, inc_new_counter_error, inc_new_counter_info}; +use solana_metrics::{datapoint_warn, inc_new_counter_error, inc_new_counter_info}; use solana_runtime::bank::Bank; use solana_sdk::hash::Hash; use solana_sdk::pubkey::Pubkey; @@ -245,7 +245,7 @@ impl ReplayStage { cluster_info.write().unwrap().set_leader(&next_leader); if next_leader == *my_id && reached_leader_tick { debug!("{} starting tpu for slot {}", my_id, poh_slot); - datapoint!( + datapoint_warn!( "replay_stage-new_leader", ("count", poh_slot, i64), ("grace", grace_ticks, i64)); @@ -468,7 +468,7 @@ impl ReplayStage { .unwrap_or(true) { info!("validator fork confirmed {} {}", *slot, duration); - datapoint!("validator-confirmation", ("duration_ms", duration, i64)); + datapoint_warn!("validator-confirmation", ("duration_ms", duration, i64)); false } else { debug!( diff --git a/core/src/retransmit_stage.rs b/core/src/retransmit_stage.rs index bc8c69c73..88550cfa0 100644 --- a/core/src/retransmit_stage.rs +++ b/core/src/retransmit_stage.rs @@ -10,7 +10,7 @@ use crate::service::Service; use crate::staking_utils; use crate::streamer::BlobReceiver; use crate::window_service::{should_retransmit_and_persist, WindowService}; -use solana_metrics::{datapoint, inc_new_counter_error}; +use solana_metrics::{datapoint_info, inc_new_counter_error}; use solana_runtime::epoch_schedule::EpochSchedule; use solana_sdk::hash::Hash; use std::net::UdpSocket; @@ -34,7 +34,7 @@ fn retransmit( blobs.append(&mut nq); } - datapoint!("retransmit-stage", ("count", blobs.len(), i64)); + datapoint_info!("retransmit-stage", ("count", blobs.len(), i64)); let r_bank = bank_forks.read().unwrap().working_bank(); let bank_epoch = r_bank.get_stakers_epoch(r_bank.slot()); diff --git a/core/src/sigverify_stage.rs b/core/src/sigverify_stage.rs index 410a1aae5..c1d6fd058 100644 --- a/core/src/sigverify_stage.rs +++ b/core/src/sigverify_stage.rs @@ -10,7 +10,7 @@ use crate::result::{Error, Result}; use crate::service::Service; use crate::sigverify; use crate::streamer::{self, PacketReceiver}; -use solana_metrics::{datapoint, inc_new_counter_info}; +use solana_metrics::{datapoint_info, inc_new_counter_info}; use solana_sdk::timing; use std::sync::mpsc::{Receiver, RecvTimeoutError, Sender}; use std::sync::{Arc, Mutex}; @@ -95,7 +95,7 @@ impl SigVerifyStage { (len as f32 / total_time_s) ); - datapoint!( + datapoint_info!( "sigverify_stage-total_verify_time", ("batch_len", batch_len, i64), ("len", len, i64), diff --git a/drone/src/drone.rs b/drone/src/drone.rs index 8b3df6954..c5ea5d4c0 100644 --- a/drone/src/drone.rs +++ b/drone/src/drone.rs @@ -9,7 +9,7 @@ use byteorder::{ByteOrder, LittleEndian}; use bytes::{Bytes, BytesMut}; use log::*; use serde_derive::{Deserialize, Serialize}; -use solana_metrics::datapoint; +use solana_metrics::datapoint_info; use solana_sdk::hash::Hash; use solana_sdk::message::Message; use solana_sdk::packet::PACKET_DATA_SIZE; @@ -113,7 +113,7 @@ impl Drone { } => { if self.check_request_limit(lamports) { self.request_current += lamports; - datapoint!( + datapoint_info!( "drone-airdrop", ("request_amount", lamports, i64), ("request_current", self.request_current, i64) diff --git a/metrics/Cargo.toml b/metrics/Cargo.toml index d1bf689d7..4cc050b1f 100644 --- a/metrics/Cargo.toml +++ b/metrics/Cargo.toml @@ -9,12 +9,12 @@ homepage = "https://solana.com/" edition = "2018" [dependencies] +env_logger = "0.6.1" influx_db_client = "0.3.6" log = "0.4.2" reqwest = "0.9.17" lazy_static = "1.3.0" sys-info = "0.5.6" -solana-logger = { path = "../logger", version = "0.15.0" } solana-sdk = { path = "../sdk", version = "0.15.0" } [dev-dependencies] diff --git a/metrics/src/counter.rs b/metrics/src/counter.rs index 81d6ac705..2a0548d0c 100644 --- a/metrics/src/counter.rs +++ b/metrics/src/counter.rs @@ -37,29 +37,39 @@ macro_rules! create_counter { #[macro_export] macro_rules! inc_counter { ($name:expr, $level:expr, $count:expr) => { - unsafe { $name.inc($level, $count) }; + unsafe { + if log_enabled!($level) { + $name.inc($level, $count) + } + }; }; } #[macro_export] macro_rules! inc_counter_info { ($name:expr, $count:expr) => { - unsafe { $name.inc(log::Level::Info, $count) }; + unsafe { + if log_enabled!(log::Level::Info) { + $name.inc(log::Level::Info, $count) + } + }; }; } #[macro_export] macro_rules! inc_new_counter { ($name:expr, $count:expr, $level:expr, $lograte:expr, $metricsrate:expr) => {{ - static mut INC_NEW_COUNTER: $crate::counter::Counter = - create_counter!($name, $lograte, $metricsrate); - static INIT_HOOK: std::sync::Once = std::sync::ONCE_INIT; - unsafe { - INIT_HOOK.call_once(|| { - INC_NEW_COUNTER.init(); - }); + if log_enabled!($level) { + static mut INC_NEW_COUNTER: $crate::counter::Counter = + create_counter!($name, $lograte, $metricsrate); + static INIT_HOOK: std::sync::Once = std::sync::ONCE_INIT; + unsafe { + INIT_HOOK.call_once(|| { + INC_NEW_COUNTER.init(); + }); + } + inc_counter!(INC_NEW_COUNTER, $level, $count); } - inc_counter!(INC_NEW_COUNTER, $level, $count); }}; } @@ -134,10 +144,6 @@ impl Counter { ); } pub fn inc(&mut self, level: log::Level, events: usize) { - if !log_enabled!(level) { - return; - } - let counts = self.counts.fetch_add(events, Ordering::Relaxed); let times = self.times.fetch_add(1, Ordering::Relaxed); let mut lograte = self.lograte.load(Ordering::Relaxed); @@ -187,7 +193,7 @@ impl Counter { mod tests { use crate::counter::{Counter, DEFAULT_LOG_RATE}; use log::Level; - use solana_logger; + use log::*; use std::env; use std::sync::atomic::Ordering; use std::sync::{Once, RwLock, ONCE_INIT}; @@ -206,8 +212,8 @@ mod tests { #[test] fn test_counter() { - env::set_var("RUST_LOG", "info"); - solana_logger::setup(); + env_logger::Builder::from_env(env_logger::Env::new().default_filter_or("solana=info")) + .init(); let _readlock = get_env_lock().read(); static mut COUNTER: Counter = create_counter!("test", 1000, 1); let count = 1; @@ -241,8 +247,6 @@ mod tests { } #[test] fn test_lograte() { - env::set_var("RUST_LOG", "info"); - solana_logger::setup(); let _readlock = get_env_lock().read(); assert_eq!( Counter::default_log_rate(), @@ -252,7 +256,7 @@ mod tests { DEFAULT_LOG_RATE, ); static mut COUNTER: Counter = create_counter!("test_lograte", 0, 1); - inc_counter!(COUNTER, Level::Info, 2); + inc_counter!(COUNTER, Level::Error, 2); unsafe { assert_eq!(COUNTER.lograte.load(Ordering::Relaxed), DEFAULT_LOG_RATE); } @@ -260,20 +264,18 @@ mod tests { #[test] fn test_lograte_env() { - env::set_var("RUST_LOG", "info"); - solana_logger::setup(); assert_ne!(DEFAULT_LOG_RATE, 0); let _writelock = get_env_lock().write(); static mut COUNTER: Counter = create_counter!("test_lograte_env", 0, 1); env::set_var("SOLANA_DEFAULT_LOG_RATE", "50"); - inc_counter!(COUNTER, Level::Info, 2); + inc_counter!(COUNTER, Level::Error, 2); unsafe { assert_eq!(COUNTER.lograte.load(Ordering::Relaxed), 50); } static mut COUNTER2: Counter = create_counter!("test_lograte_env", 0, 1); env::set_var("SOLANA_DEFAULT_LOG_RATE", "0"); - inc_counter!(COUNTER2, Level::Info, 2); + inc_counter!(COUNTER2, Level::Error, 2); unsafe { assert_eq!(COUNTER2.lograte.load(Ordering::Relaxed), DEFAULT_LOG_RATE); } diff --git a/metrics/src/metrics.rs b/metrics/src/metrics.rs index ee7adaefa..e9380734c 100644 --- a/metrics/src/metrics.rs +++ b/metrics/src/metrics.rs @@ -53,9 +53,46 @@ macro_rules! datapoint { point } }; - ($name:expr, $($fields:tt)+) => { - $crate::submit($crate::datapoint!(@point $name, $($fields)+), log::Level::Info); + if log_enabled!(log::Level::Debug) { + $crate::submit($crate::datapoint!(@point $name, $($fields)+), log::Level::Debug); + } + }; +} + +#[macro_export] +macro_rules! datapoint_error { + ($name:expr, $($fields:tt)+) => { + if log_enabled!(log::Level::Error) { + $crate::submit($crate::datapoint!(@point $name, $($fields)+), log::Level::Error); + } + }; +} + +#[macro_export] +macro_rules! datapoint_warn { + ($name:expr, $($fields:tt)+) => { + if log_enabled!(log::Level::Warn) { + $crate::submit($crate::datapoint!(@point $name, $($fields)+), log::Level::Warn); + } + }; +} + +#[macro_export] +macro_rules! datapoint_info { + ($name:expr, $($fields:tt)+) => { + if log_enabled!(log::Level::Info) { + $crate::submit($crate::datapoint!(@point $name, $($fields)+), log::Level::Info); + } + }; +} + +#[macro_export] +macro_rules! datapoint_debug { + ($name:expr, $($fields:tt)+) => { + if log_enabled!(log::Level::Debug) { + $crate::submit($crate::datapoint!(@point $name, $($fields)+), log::Level::Debug); + } }; } diff --git a/programs/vote_api/src/vote_instruction.rs b/programs/vote_api/src/vote_instruction.rs index 480c4c09a..70e905a54 100644 --- a/programs/vote_api/src/vote_instruction.rs +++ b/programs/vote_api/src/vote_instruction.rs @@ -6,7 +6,7 @@ use crate::vote_state::{self, Vote, VoteState}; use bincode::deserialize; use log::*; use serde_derive::{Deserialize, Serialize}; -use solana_metrics::datapoint; +use solana_metrics::datapoint_warn; use solana_sdk::account::KeyedAccount; use solana_sdk::instruction::{AccountMeta, Instruction, InstructionError}; use solana_sdk::pubkey::Pubkey; @@ -88,7 +88,7 @@ pub fn process_instruction( vote_state::authorize_voter(vote_account, other_signers, &voter_id) } VoteInstruction::Vote(votes) => { - datapoint!("vote-native", ("count", 1, i64)); + datapoint_warn!("vote-native", ("count", 1, i64)); let (vote_account, other_signers) = keyed_accounts.split_at_mut(1); let vote_account = &mut vote_account[0]; diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index d6a85fa49..5f8300096 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -15,7 +15,7 @@ use bincode::serialize; use hashbrown::HashMap; use log::*; use solana_metrics::{ - datapoint, inc_new_counter_debug, inc_new_counter_error, inc_new_counter_info, + datapoint_info, inc_new_counter_debug, inc_new_counter_error, inc_new_counter_info, }; use solana_sdk::account::Account; use solana_sdk::fee_calculator::FeeCalculator; @@ -151,7 +151,7 @@ impl Bank { bank.slot = slot; bank.max_tick_height = (bank.slot + 1) * bank.ticks_per_slot - 1; - datapoint!( + datapoint_info!( "bank-new_from_parent-heights", ("slot_height", slot, i64), ("bank_height", bank.bank_height, i64) @@ -232,7 +232,7 @@ impl Bank { .for_each(|p| self.status_cache.write().unwrap().add_root(p.slot())); let squash_cache_ms = duration_as_ms(&squash_cache_start.elapsed()); - datapoint!( + datapoint_info!( "locktower-observed", ("squash_accounts_ms", squash_accounts_ms, i64), ("squash_cache_ms", squash_cache_ms, i64) @@ -547,6 +547,57 @@ impl Bank { self.check_signatures(txs, age_results, &mut error_counters) } + fn update_error_counters(error_counters: &ErrorCounters) { + if 0 != error_counters.blockhash_not_found { + inc_new_counter_error!( + "bank-process_transactions-error-blockhash_not_found", + error_counters.blockhash_not_found, + 0, + 1000 + ); + } + if 0 != error_counters.invalid_account_index { + inc_new_counter_error!( + "bank-process_transactions-error-invalid_account_index", + error_counters.invalid_account_index, + 0, + 1000 + ); + } + if 0 != error_counters.reserve_blockhash { + inc_new_counter_error!( + "bank-process_transactions-error-reserve_blockhash", + error_counters.reserve_blockhash, + 0, + 1000 + ); + } + if 0 != error_counters.duplicate_signature { + inc_new_counter_error!( + "bank-process_transactions-error-duplicate_signature", + error_counters.duplicate_signature, + 0, + 1000 + ); + } + if 0 != error_counters.insufficient_funds { + inc_new_counter_error!( + "bank-process_transactions-error-insufficient_funds", + error_counters.insufficient_funds, + 0, + 1000 + ); + } + if 0 != error_counters.account_loaded_twice { + inc_new_counter_error!( + "bank-process_transactions-account_loaded_twice", + error_counters.account_loaded_twice, + 0, + 1000 + ); + } + } + #[allow(clippy::type_complexity)] pub fn load_and_execute_transactions( &self, @@ -617,54 +668,7 @@ impl Bank { self.increment_transaction_count(tx_count); inc_new_counter_info!("bank-process_transactions-txs", tx_count, 0, 1000); - if 0 != error_counters.blockhash_not_found { - inc_new_counter_error!( - "bank-process_transactions-error-blockhash_not_found", - error_counters.blockhash_not_found, - 0, - 1000 - ); - } - if 0 != error_counters.invalid_account_index { - inc_new_counter_error!( - "bank-process_transactions-error-invalid_account_index", - error_counters.invalid_account_index, - 0, - 1000 - ); - } - if 0 != error_counters.reserve_blockhash { - inc_new_counter_error!( - "bank-process_transactions-error-reserve_blockhash", - error_counters.reserve_blockhash, - 0, - 1000 - ); - } - if 0 != error_counters.duplicate_signature { - inc_new_counter_error!( - "bank-process_transactions-error-duplicate_signature", - error_counters.duplicate_signature, - 0, - 1000 - ); - } - if 0 != error_counters.insufficient_funds { - inc_new_counter_error!( - "bank-process_transactions-error-insufficient_funds", - error_counters.insufficient_funds, - 0, - 1000 - ); - } - if 0 != error_counters.account_loaded_twice { - inc_new_counter_error!( - "bank-process_transactions-account_loaded_twice", - error_counters.account_loaded_twice, - 0, - 1000 - ); - } + Self::update_error_counters(&error_counters); (loaded_accounts, executed) }