diff --git a/core/src/block_processor.rs b/core/src/block_processor.rs index c8d55192..86d80b36 100644 --- a/core/src/block_processor.rs +++ b/core/src/block_processor.rs @@ -1,3 +1,4 @@ +use anyhow::Context; use log::{info, warn}; use solana_rpc_client::nonblocking::rpc_client::RpcClient; use solana_rpc_client_api::config::RpcBlockConfig; @@ -76,7 +77,8 @@ impl BlockProcessor { rewards: Some(true), }, ) - .await?; + .await + .context("failed to get block")?; let Some(block_height) = block.block_height else { return Ok(BlockProcessorResult::invalid()); diff --git a/core/src/block_store.rs b/core/src/block_store.rs index d4a86ced..1118a462 100644 --- a/core/src/block_store.rs +++ b/core/src/block_store.rs @@ -68,7 +68,8 @@ impl BlockStore { RpcRequest::GetLatestBlockhash, json!([commitment_config]), ) - .await?; + .await + .context("failed to poll latest blockhash")?; let processed_blockhash = response.value.blockhash; let processed_block = BlockInformation { @@ -91,7 +92,8 @@ impl BlockStore { ) -> anyhow::Result<(String, BlockInformation)> { let slot = rpc_client .get_slot_with_commitment(commitment_config) - .await?; + .await + .context("failed to fetch latest slot")?; let block = rpc_client .get_block_with_config( @@ -104,7 +106,8 @@ impl BlockStore { max_supported_transaction_version: Some(0), }, ) - .await?; + .await + .context("failed to fetch latest blockhash")?; let latest_block_hash = block.blockhash; let block_height = block diff --git a/core/src/leader_schedule.rs b/core/src/leader_schedule.rs index e2dd222b..cb3c52ad 100644 --- a/core/src/leader_schedule.rs +++ b/core/src/leader_schedule.rs @@ -1,3 +1,4 @@ +use anyhow::Context; use std::{collections::VecDeque, str::FromStr, sync::Arc}; use dashmap::DashMap; @@ -36,7 +37,10 @@ impl LeaderSchedule { } pub async fn load_cluster_info(&self, rpc_client: Arc) -> anyhow::Result<()> { - let cluster_nodes = rpc_client.get_cluster_nodes().await?; + let cluster_nodes = rpc_client + .get_cluster_nodes() + .await + .context("failed to get cluster nodes")?; cluster_nodes.iter().for_each(|x| { if let Ok(pubkey) = Pubkey::from_str(x.pubkey.as_str()) { self.cluster_nodes.insert(pubkey, Arc::new(x.clone())); @@ -70,14 +74,17 @@ impl LeaderSchedule { let first_slot_to_fetch = queue_end_slot + 1; let leaders = rpc_client .get_slot_leaders(first_slot_to_fetch, last_slot_needed - first_slot_to_fetch) - .await?; + .await + .context("failed to get slot leaders")?; let mut leader_queue = self.leader_schedule.write().await; for i in first_slot_to_fetch..last_slot_needed { let current_leader = (i - first_slot_to_fetch) as usize; let leader = leaders[current_leader]; if !self.cluster_nodes.contains_key(&leader) { - self.load_cluster_info(rpc_client.clone()).await?; + self.load_cluster_info(rpc_client.clone()) + .await + .context("failed to load cluster info")?; } match self.cluster_nodes.get(&leader) { diff --git a/lite-rpc/src/bridge.rs b/lite-rpc/src/bridge.rs index 436e5a9b..9d6bf231 100644 --- a/lite-rpc/src/bridge.rs +++ b/lite-rpc/src/bridge.rs @@ -17,7 +17,7 @@ use solana_lite_rpc_services::{ tx_sender::{TxSender, TXS_IN_CHANNEL}, }; -use anyhow::bail; +use anyhow::{bail, Context}; use jsonrpsee::{core::SubscriptionResult, server::ServerBuilder, PendingSubscriptionSink}; use log::{error, info}; use prometheus::{opts, register_int_counter, IntCounter}; @@ -83,7 +83,10 @@ impl LiteBridge { max_retries: usize, ) -> anyhow::Result { let rpc_client = Arc::new(RpcClient::new(rpc_url.clone())); - let current_slot = rpc_client.get_slot().await?; + let current_slot = rpc_client + .get_slot() + .await + .context("failed to get initial slot")?; let tx_store = empty_tx_store(); @@ -101,7 +104,7 @@ impl LiteBridge { max_number_of_connections: 10, unistream_timeout: Duration::from_millis(500), write_timeout: Duration::from_secs(1), - number_of_transactions_per_unistream: 10, + number_of_transactions_per_unistream: 8, }, }; @@ -331,6 +334,7 @@ impl LiteRpcServer for LiteBridge { .rpc_client .is_blockhash_valid(&blockhash, commitment) .await + .context("failed to get blockhash validity") { Ok(is_valid) => is_valid, Err(err) => { @@ -407,6 +411,7 @@ impl LiteRpcServer for LiteBridge { .rpc_client .request_airdrop_with_config(&pubkey, lamports, config.unwrap_or_default()) .await + .context("failed to request airdrop") { Ok(airdrop_sig) => airdrop_sig.to_string(), Err(err) => { @@ -417,6 +422,7 @@ impl LiteRpcServer for LiteBridge { .rpc_client .get_latest_blockhash_with_commitment(CommitmentConfig::finalized()) .await + .context("failed to get latest blockhash") { self.tx_store.insert( airdrop_sig.clone(), diff --git a/services/src/block_listenser.rs b/services/src/block_listenser.rs index 2f7af353..2b911b2b 100644 --- a/services/src/block_listenser.rs +++ b/services/src/block_listenser.rs @@ -238,7 +238,11 @@ impl BlockListener { // TODO insert if not exists leader_id into accountaddrs // fetch cluster time from rpc - let block_time = self.rpc_client.get_block_time(slot).await?; + let block_time = self + .rpc_client + .get_block_time(slot) + .await + .context("failed to get block time")?; // fetch local time from blockstore let block_info = self diff --git a/services/src/tpu_utils/tpu_connection_manager.rs b/services/src/tpu_utils/tpu_connection_manager.rs index 2a3f04cd..68a9b5f5 100644 --- a/services/src/tpu_utils/tpu_connection_manager.rs +++ b/services/src/tpu_utils/tpu_connection_manager.rs @@ -146,19 +146,11 @@ impl ActiveConnection { } } - if txs.len() >= number_of_transactions_per_unistream - 1 { - // queue getting full and a connection poll is getting slower - // add more connections to the pool - if connection_pool.len() < max_number_of_connections { - connection_pool.add_connection().await; - NB_QUIC_CONNECTIONS.inc(); - } - } else if txs.len() == 1 { - // low traffic / reduce connection till minimum 1 - if connection_pool.len() > 1 { - connection_pool.remove_connection().await; - NB_QUIC_CONNECTIONS.dec(); - } + // queue getting full and a connection poll is getting slower + // add more connections to the pool + if connection_pool.len() < max_number_of_connections { + connection_pool.add_connection().await; + NB_QUIC_CONNECTIONS.inc(); } let task_counter = task_counter.clone(); diff --git a/services/src/tpu_utils/tpu_service.rs b/services/src/tpu_utils/tpu_service.rs index f646c3ae..7b925033 100644 --- a/services/src/tpu_utils/tpu_service.rs +++ b/services/src/tpu_utils/tpu_service.rs @@ -1,4 +1,4 @@ -use anyhow::bail; +use anyhow::{bail, Context}; use log::{error, info}; use prometheus::{core::GenericGauge, opts, register_int_gauge}; use solana_client::nonblocking::rpc_client::RpcClient; @@ -202,7 +202,8 @@ impl TpuService { // setup self.leader_schedule .load_cluster_info(self.rpc_client.clone()) - .await?; + .await + .context("failed to load initial cluster info")?; self.update_current_stakes().await?; self.update_leader_schedule().await?; self.update_quic_connections().await;