From 95f923f7c7dd31b64dcd3ad6f578dc6e2c2cb566 Mon Sep 17 00:00:00 2001 From: Riordan Panayides Date: Wed, 31 May 2023 14:19:29 +0100 Subject: [PATCH] cargo fmt --- src/worker/candle_batching/mod.rs | 8 ++++++-- src/worker/main.rs | 11 ++++++++--- src/worker/metrics/mod.rs | 20 ++++++++++++-------- src/worker/trade_fetching/parsing.rs | 9 +++++++-- src/worker/trade_fetching/scrape.rs | 8 ++++++-- 5 files changed, 39 insertions(+), 17 deletions(-) diff --git a/src/worker/candle_batching/mod.rs b/src/worker/candle_batching/mod.rs index 3a8cd0b..d2c12b1 100644 --- a/src/worker/candle_batching/mod.rs +++ b/src/worker/candle_batching/mod.rs @@ -50,14 +50,18 @@ async fn batch_inner( let market_name = &market.name.clone(); let candles = batch_1m_candles(pool, market).await?; send_candles(candles.clone(), candles_sender).await; - METRIC_CANDLES_TOTAL.with_label_values(&[market.name.as_str()]).inc_by(candles.clone().len() as u64); + METRIC_CANDLES_TOTAL + .with_label_values(&[market.name.as_str()]) + .inc_by(candles.clone().len() as u64); for resolution in Resolution::iter() { if resolution == Resolution::R1m { continue; } let candles = batch_higher_order_candles(pool, market_name, resolution).await?; send_candles(candles.clone(), candles_sender).await; - METRIC_CANDLES_TOTAL.with_label_values(&[market.name.as_str()]).inc_by(candles.clone().len() as u64); + METRIC_CANDLES_TOTAL + .with_label_values(&[market.name.as_str()]) + .inc_by(candles.clone().len() as u64); } Ok(()) } diff --git a/src/worker/main.rs b/src/worker/main.rs index f8b4550..05137d2 100644 --- a/src/worker/main.rs +++ b/src/worker/main.rs @@ -2,7 +2,10 @@ use openbook_candles::structs::candle::Candle; use openbook_candles::structs::markets::{fetch_market_infos, load_markets}; use openbook_candles::structs::openbook::OpenBookFillEvent; use openbook_candles::utils::Config; -use openbook_candles::worker::metrics::{serve_metrics, METRIC_DB_POOL_AVAILABLE, METRIC_DB_POOL_SIZE, METRIC_CANDLES_QUEUE_LENGTH, METRIC_FILLS_QUEUE_LENGTH}; +use openbook_candles::worker::metrics::{ + serve_metrics, METRIC_CANDLES_QUEUE_LENGTH, METRIC_DB_POOL_AVAILABLE, METRIC_DB_POOL_SIZE, + METRIC_FILLS_QUEUE_LENGTH, +}; use openbook_candles::worker::trade_fetching::scrape::scrape; use openbook_candles::{ database::{ @@ -91,8 +94,10 @@ async fn main() -> anyhow::Result<()> { METRIC_DB_POOL_AVAILABLE.set(pool_status.available as i64); METRIC_DB_POOL_SIZE.set(pool_status.size as i64); - METRIC_CANDLES_QUEUE_LENGTH.set((candles_queue_max_size - monitor_candle_channel.capacity()) as i64); - METRIC_FILLS_QUEUE_LENGTH.set((fills_queue_max_size - monitor_fill_channel.capacity()) as i64); + METRIC_CANDLES_QUEUE_LENGTH + .set((candles_queue_max_size - monitor_candle_channel.capacity()) as i64); + METRIC_FILLS_QUEUE_LENGTH + .set((fills_queue_max_size - monitor_fill_channel.capacity()) as i64); tokio::time::sleep(WaitDuration::from_secs(10)).await; } diff --git a/src/worker/metrics/mod.rs b/src/worker/metrics/mod.rs index 8b65d48..2302784 100644 --- a/src/worker/metrics/mod.rs +++ b/src/worker/metrics/mod.rs @@ -1,7 +1,10 @@ use actix_web::{dev::Server, http::StatusCode, App, HttpServer}; use actix_web_prom::PrometheusMetricsBuilder; use lazy_static::lazy_static; -use prometheus::{register_int_counter_vec_with_registry, IntCounterVec, Registry, IntGauge, register_int_gauge_with_registry, register_int_counter_with_registry, IntCounter}; +use prometheus::{ + register_int_counter_vec_with_registry, register_int_gauge_with_registry, IntCounterVec, + IntGauge, Registry, +}; lazy_static! { static ref METRIC_REGISTRY: Registry = @@ -39,13 +42,14 @@ lazy_static! { METRIC_REGISTRY ) .unwrap(); - pub static ref METRIC_RPC_ERRORS_TOTAL: IntCounterVec = register_int_counter_vec_with_registry!( - "rpc_errors_total", - "RPC errors while scraping", - &["method"], - METRIC_REGISTRY - ) - .unwrap(); + pub static ref METRIC_RPC_ERRORS_TOTAL: IntCounterVec = + register_int_counter_vec_with_registry!( + "rpc_errors_total", + "RPC errors while scraping", + &["method"], + METRIC_REGISTRY + ) + .unwrap(); pub static ref METRIC_DB_POOL_SIZE: IntGauge = register_int_gauge_with_registry!( "db_pool_size", "Current size of the DB connection pool", diff --git a/src/worker/trade_fetching/parsing.rs b/src/worker/trade_fetching/parsing.rs index 3d1981b..fea4e0d 100644 --- a/src/worker/trade_fetching/parsing.rs +++ b/src/worker/trade_fetching/parsing.rs @@ -5,7 +5,10 @@ use solana_transaction_status::{ }; use std::{collections::HashMap, io::Error}; -use crate::{structs::openbook::{OpenBookFillEvent, OpenBookFillEventRaw}, worker::metrics::METRIC_RPC_ERRORS_TOTAL}; +use crate::{ + structs::openbook::{OpenBookFillEvent, OpenBookFillEventRaw}, + worker::metrics::METRIC_RPC_ERRORS_TOTAL, +}; const PROGRAM_DATA: &str = "Program data: "; @@ -35,7 +38,9 @@ pub fn parse_trades_from_openbook_txns( } } Err(_) => { - METRIC_RPC_ERRORS_TOTAL.with_label_values(&["getTransaction"]).inc(); + METRIC_RPC_ERRORS_TOTAL + .with_label_values(&["getTransaction"]) + .inc(); } } } diff --git a/src/worker/trade_fetching/scrape.rs b/src/worker/trade_fetching/scrape.rs index 9dd7553..f038781 100644 --- a/src/worker/trade_fetching/scrape.rs +++ b/src/worker/trade_fetching/scrape.rs @@ -9,7 +9,9 @@ use std::{collections::HashMap, str::FromStr, time::Duration as WaitDuration}; use tokio::sync::mpsc::Sender; use crate::{ - structs::openbook::OpenBookFillEvent, utils::Config, worker::metrics::{METRIC_FILLS_TOTAL, METRIC_RPC_ERRORS_TOTAL}, + structs::openbook::OpenBookFillEvent, + utils::Config, + worker::metrics::{METRIC_FILLS_TOTAL, METRIC_RPC_ERRORS_TOTAL}, }; use super::parsing::parse_trades_from_openbook_txns; @@ -60,7 +62,9 @@ pub async fn scrape_transactions( Ok(s) => s, Err(e) => { println!("Error in get_signatures_for_address_with_config: {}", e); - METRIC_RPC_ERRORS_TOTAL.with_label_values(&["getSignaturesForAddress"]).inc(); + METRIC_RPC_ERRORS_TOTAL + .with_label_values(&["getSignaturesForAddress"]) + .inc(); return before_sig; } };