Update to solana 1.16.14 and anchor 0.28.0 (#718)

- Change fixed to be a git dependency (no more submodules!)
- Upgrade fixed to a version compatible with borsh 0.10
- Upgrade openbook-v2 dependency (for anchor compat)
- Move services from mango-feeds repo into bin/
- Update mango-feeds-connector

Co-authored-by: Christian Kamm <mail@ckamm.de>
Co-authored-by: Riordan Panayides <riordan@panayid.es>
This commit is contained in:
Steve 2023-10-05 10:56:45 +02:00 committed by GitHub
parent 2ad6ec4166
commit f625284593
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
60 changed files with 8324 additions and 1703 deletions

View File

@ -19,4 +19,6 @@ programs/mango-v4/src/lib-expanded.rs
keeper/.env
.env
ts/client/src/scripts/archive/ts.ts
ts/client/src/scripts/archive/ts.ts
**/.git

View File

@ -31,8 +31,8 @@ on:
env:
CARGO_TERM_COLOR: always
SOLANA_VERSION: '1.14.9'
RUST_TOOLCHAIN: '1.65.0'
SOLANA_VERSION: '1.16.14'
RUST_TOOLCHAIN: '1.69.0'
LOG_PROGRAM: '4MangoMjqJ2firMokCjjGgoK8d4MXcrgL7XJaL3w6fVg'
jobs:

View File

@ -11,6 +11,10 @@ on:
'bin/keeper/**',
'bin/liquidator/**',
'bin/settler/**',
'bin/service-mango-crank/**',
'bin/service-mango-fills/**',
'bin/service-mango-orderbook/**',
'bin/service-mango-pnl/**',
]
workflow_dispatch:

View File

@ -10,8 +10,8 @@ on:
env:
CARGO_TERM_COLOR: always
SOLANA_VERSION: '1.14.9'
RUST_TOOLCHAIN: '1.65.0'
SOLANA_VERSION: '1.16.7'
RUST_TOOLCHAIN: '1.69.0'
jobs:
build:
@ -46,8 +46,8 @@ jobs:
solana --version
echo "Generating keypair..."
solana-keygen new -o "$HOME/.config/solana/id.json" --no-passphrase --silent
echo Installing bpf toolchain...
(cd /home/runner/.local/share/solana/install/active_release/bin/sdk/bpf/scripts; ./install.sh)
echo Installing sbf toolchain...
(cd /home/runner/.local/share/solana/install/active_release/bin/sdk/sbf/scripts; ./install.sh)
- name: Install Soteria
run: |

3
.gitmodules vendored
View File

@ -1,3 +0,0 @@
[submodule "3rdparty/fixed"]
path = 3rdparty/fixed
url = https://gitlab.com/ckamm/fixed.git

1
3rdparty/fixed vendored

@ -1 +0,0 @@
Subproject commit 95bf614b09742333451e073704f11ea502d4563b

4810
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -6,20 +6,24 @@ members = [
]
[workspace.dependencies]
anchor-client = "0.27.0"
anchor-lang = "0.27.0"
anchor-spl = "0.27.0"
fixed = { path = "./3rdparty/fixed", version = "1.11.0" }
pyth-sdk-solana = "0.7.0"
serum_dex = { git = "https://github.com/openbook-dex/program.git" }
solana-address-lookup-table-program = "~1.14.9"
solana-account-decoder = "~1.14.9"
solana-client = "~1.14.9"
solana-logger = "~1.14.9"
solana-program = "~1.14.9"
solana-program-test = "~1.14.9"
solana-rpc = "~1.14.9"
solana-sdk = "~1.14.9"
anchor-client = "0.28.0"
anchor-lang = "0.28.0"
anchor-spl = "0.28.0"
fixed = { git = "https://github.com/blockworks-foundation/fixed.git", branch = "v1.11.0-borsh0_10-mango" }
pyth-sdk-solana = "0.8.0"
# commit c85e56d (0.5.10 plus depedency updates)
serum_dex = { git = "https://github.com/openbook-dex/program.git", default-features=false }
mango-feeds-connector = "0.2.0"
# 1.16.7+ is required due to this: https://github.com/blockworks-foundation/mango-v4/issues/712
solana-address-lookup-table-program = "~1.16.7"
solana-account-decoder = "~1.16.7"
solana-client = "~1.16.7"
solana-logger = "~1.16.7"
solana-program = "~1.16.7"
solana-program-test = "~1.16.7"
solana-rpc = "~1.16.7"
solana-sdk = { version = "~1.16.7", default-features = false }
[profile.release]
overflow-checks = true

View File

@ -1,29 +1,36 @@
# syntax = docker/dockerfile:1.2
# Base image containing all binaries, deployed to ghcr.io/blockworks-foundation/mango-v4:latest
FROM rust:1.65 as base
# RUN cargo install cargo-chef --locked
FROM rust:1.69.0-bullseye as base
RUN cargo install cargo-chef --locked
RUN rustup component add rustfmt
RUN apt-get update && apt-get -y install clang cmake
WORKDIR /app
FROM base as plan
COPY . .
# Hack to prevent a ghost member lib/init
RUN sed -i 's|lib/\*|lib/checked_math|' Cargo.toml
# Hack to prevent local serum_dex manifests conflicting with cargo dependency
RUN rm -rf anchor/tests
# RUN cargo chef prepare --recipe-path recipe.json
RUN cargo chef prepare --recipe-path recipe.json
FROM base as build
# COPY --from=plan /app/recipe.json .
COPY --from=plan /app/recipe.json .
RUN cargo chef cook --release --recipe-path recipe.json
COPY . .
# RUN cargo chef cook --release --recipe-path recipe.json
RUN cargo build --release --bins
FROM debian:bullseye-slim as run
RUN apt-get update && apt-get -y install ca-certificates libc6
COPY --from=build /app/target/release/keeper /usr/local/bin/
COPY --from=build /app/target/release/liquidator /usr/local/bin/
COPY --from=build /app/target/release/settler /usr/local/bin/
COPY --from=build /app/target/release/service-mango-* /usr/local/bin/
COPY --from=build /app/bin/service-mango-pnl/conf/template-config.toml ./pnl-config.toml
COPY --from=build /app/bin/service-mango-fills/conf/template-config.toml ./fills-config.toml
COPY --from=build /app/bin/service-mango-orderbook/conf/template-config.toml ./orderbook-config.toml
COPY --from=build /app/bin/service-mango-pnl/conf/template-config.toml ./pnl-config.toml
COPY --from=build /app/bin/service-mango-fills/conf//template-config.toml ./fills-config.toml
COPY --from=build /app/bin/service-mango-orderbook/conf/template-config.toml ./orderbook-config.toml
RUN adduser --system --group --no-create-home mangouser
USER mangouser

View File

@ -25,17 +25,12 @@ See DEVELOPING.md
### Dependencies
- rust version 1.65.0
- solana-cli 1.14.9
- anchor-cli 0.27.0
- rust version 1.69.0
- solana-cli 1.16.7
- anchor-cli 0.28.0
- npm 8.1.2
- node v16.13.1
### Submodules
After cloning this repo you'll need to init and update its git submodules.
Consider setting the git option `submodule.recurse=true`.
### Deployments
- devnet: 4MangoMjqJ2firMokCjjGgoK8d4MXcrgL7XJaL3w6fVg

View File

@ -21,7 +21,7 @@ futures = "0.3.21"
mango-v4 = { path = "../../programs/mango-v4", features = ["client"] }
mango-v4-client = { path = "../../lib/client" }
pyth-sdk-solana = { workspace = true }
serum_dex = { workspace = true, default-features=false,features = ["no-entrypoint", "program"] }
serum_dex = { workspace = true, features = ["no-entrypoint", "program"] }
solana-client = { workspace = true }
solana-sdk = { workspace = true }
tokio = { version = "1.14.1", features = ["rt-multi-thread", "time", "macros", "sync"] }

View File

@ -22,7 +22,7 @@ itertools = "0.10.3"
mango-v4 = { path = "../../programs/mango-v4", features = ["client"] }
mango-v4-client = { path = "../../lib/client" }
pyth-sdk-solana = { workspace = true }
serum_dex = { workspace = true, default-features=false,features = ["no-entrypoint", "program"] }
serum_dex = { workspace = true, features = ["no-entrypoint", "program"] }
solana-client = { workspace = true }
solana-sdk = { workspace = true }
tokio = { version = "1.14.1", features = ["rt-multi-thread", "time", "macros", "sync"] }

View File

@ -39,7 +39,7 @@ rand = "0.7"
serde = "1.0.130"
serde_derive = "1.0.130"
serde_json = "1.0.68"
serum_dex = { workspace = true, default-features=false,features = ["no-entrypoint", "program"] }
serum_dex = { workspace = true, features = ["no-entrypoint", "program"] }
shellexpand = "2.1.0"
solana-account-decoder = { workspace = true }
solana-client = { workspace = true }

View File

@ -0,0 +1,45 @@
[package]
name = "service-mango-crank"
version = "0.1.0"
authors = ["Maximilian Schneider <max@mango.markets>"]
license = "AGPL-3.0-or-later"
edition = "2018"
[dependencies]
mango-feeds-lib = { path = "../../lib/mango-feeds-lib" }
mango-feeds-connector = { workspace = true }
solana-client = { workspace = true }
solana-logger = { workspace = true }
solana-sdk = { workspace = true }
anchor-lang = { workspace = true }
anchor-client = { workspace = true }
mango-v4 = { path = "../../programs/mango-v4", features = ["client"] }
mango-v4-client = { path = "../../lib/client" }
serum_dex = { workspace = true }
bs58 = "0.3.1"
log = "0.4"
anyhow = "1.0"
toml = "0.5"
serde = "1.0.130"
serde_derive = "1.0.130"
serde_json = "1.0.68"
futures-channel = "0.3"
futures-util = "0.3"
ws = "^0.9.2"
async-channel = "1.6"
async-trait = "0.1"
bytemuck = "^1.7.2"
itertools = "0.10.3"
tokio = { version = "1", features = ["full"] }
tokio-tungstenite = "0.17"

View File

@ -0,0 +1,45 @@
use log::*;
use solana_client::nonblocking::rpc_client::RpcClient;
use solana_sdk::{clock::DEFAULT_MS_PER_SLOT, commitment_config::CommitmentConfig, hash::Hash};
use std::{
sync::{Arc, RwLock},
time::Duration,
};
use tokio::{spawn, time::sleep};
const RETRY_INTERVAL: Duration = Duration::from_millis(5 * DEFAULT_MS_PER_SLOT);
pub async fn poll_loop(blockhash: Arc<RwLock<Hash>>, client: Arc<RpcClient>) {
let cfg = CommitmentConfig::processed();
loop {
let old_blockhash = *blockhash.read().unwrap();
if let Ok((new_blockhash, _)) = client.get_latest_blockhash_with_commitment(cfg).await {
if new_blockhash != old_blockhash {
debug!("new blockhash ({:?})", blockhash);
*blockhash.write().unwrap() = new_blockhash;
}
}
// Retry every few slots
sleep(RETRY_INTERVAL).await;
}
}
pub async fn init(client: Arc<RpcClient>) -> Arc<RwLock<Hash>> {
// get the first blockhash
let blockhash = Arc::new(RwLock::new(
client
.get_latest_blockhash()
.await
.expect("fetch initial blockhash"),
));
// launch task
let _join_hdl = {
// create a thread-local reference to blockhash
let blockhash_c = blockhash.clone();
spawn(async move { poll_loop(blockhash_c, client).await })
};
blockhash
}

View File

@ -0,0 +1,182 @@
mod blockhash_poller;
mod mango_v4_perp_crank_sink;
mod openbook_crank_sink;
mod transaction_builder;
mod transaction_sender;
use anchor_client::Cluster;
use bytemuck::bytes_of;
use log::*;
use mango_v4_client::{Client, MangoGroupContext, TransactionBuilderConfig};
use solana_client::nonblocking::rpc_client::RpcClient;
use solana_sdk::commitment_config::CommitmentConfig;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::Keypair;
use std::iter::FromIterator;
use std::{
collections::HashSet,
convert::TryFrom,
fs::File,
io::Read,
str::FromStr,
sync::{atomic::AtomicBool, Arc},
time::Duration,
};
use mango_feeds_connector::EntityFilter::FilterByAccountIds;
use mango_feeds_connector::FilterConfig;
use mango_feeds_connector::{
grpc_plugin_source, metrics, websocket_source, MetricsConfig, SourceConfig,
};
use serde::Deserialize;
#[derive(Clone, Debug, Deserialize)]
pub struct Config {
pub source: SourceConfig,
pub metrics: MetricsConfig,
pub bind_ws_addr: String,
pub rpc_http_url: String,
pub mango_group: String,
pub keypair: Vec<u8>,
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
solana_logger::setup_with_default("info");
let exit: Arc<AtomicBool> = Arc::new(AtomicBool::new(false));
let args: Vec<String> = std::env::args().collect();
if args.len() < 2 {
error!("Please enter a config file path argument.");
return Ok(());
}
let config: Config = {
let mut file = File::open(&args[1])?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
toml::from_str(&contents).unwrap()
};
let rpc_client = Arc::new(RpcClient::new(config.rpc_http_url.clone()));
let blockhash = blockhash_poller::init(rpc_client.clone()).await;
let metrics_tx = metrics::start(config.metrics, "crank".into());
let rpc_url = config.rpc_http_url;
let ws_url = rpc_url.replace("https", "wss");
let rpc_timeout = Duration::from_secs(10);
let cluster = Cluster::Custom(rpc_url.clone(), ws_url.clone());
let client = Client::new(
cluster.clone(),
CommitmentConfig::processed(),
Arc::new(Keypair::new()),
Some(rpc_timeout),
TransactionBuilderConfig {
prioritization_micro_lamports: None,
},
);
let group_pk = Pubkey::from_str(&config.mango_group).unwrap();
let group_context =
Arc::new(MangoGroupContext::new_from_rpc(&client.rpc_async(), group_pk).await?);
let perp_queue_pks: Vec<_> = group_context
.perp_markets
.values()
.map(|context| (context.address, context.market.event_queue))
.collect();
// fetch all serum/openbook markets to find their event queues
let serum_market_pks: Vec<_> = group_context
.serum3_markets
.values()
.map(|context| context.market.serum_market_external)
.collect();
let serum_market_ais = client
.rpc_async()
.get_multiple_accounts(serum_market_pks.as_slice())
.await?;
let serum_market_ais: Vec<_> = serum_market_ais
.iter()
.filter_map(|maybe_ai| match maybe_ai {
Some(ai) => Some(ai),
None => None,
})
.collect();
let serum_queue_pks: Vec<_> = serum_market_ais
.iter()
.enumerate()
.map(|pair| {
let market_state: serum_dex::state::MarketState = *bytemuck::from_bytes(
&pair.1.data[5..5 + std::mem::size_of::<serum_dex::state::MarketState>()],
);
let event_q = market_state.event_q;
(
serum_market_pks[pair.0],
Pubkey::try_from(bytes_of(&event_q)).unwrap(),
)
})
.collect();
let (account_write_queue_sender, slot_queue_sender, instruction_receiver) =
transaction_builder::init(
perp_queue_pks.clone(),
serum_queue_pks.clone(),
group_pk,
metrics_tx.clone(),
)
.expect("init transaction builder");
transaction_sender::init(
instruction_receiver,
blockhash,
rpc_client,
Keypair::from_bytes(&config.keypair).expect("valid keyair in config"),
);
info!(
"connect: {}",
config
.source
.grpc_sources
.iter()
.map(|c| c.connection_string.clone())
.collect::<String>()
);
let use_geyser = true;
let all_queue_pks: HashSet<Pubkey> = perp_queue_pks
.iter()
.chain(serum_queue_pks.iter())
.map(|mkt| mkt.1)
.collect();
let filter_config = FilterConfig {
entity_filter: FilterByAccountIds(Vec::from_iter(all_queue_pks)),
};
if use_geyser {
grpc_plugin_source::process_events(
&config.source,
&filter_config,
account_write_queue_sender,
slot_queue_sender,
metrics_tx.clone(),
exit.clone(),
)
.await;
} else {
websocket_source::process_events(
&config.source,
&filter_config,
account_write_queue_sender,
slot_queue_sender,
)
.await;
}
Ok(())
}

View File

@ -0,0 +1,112 @@
use std::{
borrow::BorrowMut,
collections::{BTreeMap, BTreeSet},
convert::TryFrom,
};
use async_channel::Sender;
use async_trait::async_trait;
use log::*;
use mango_feeds_connector::{account_write_filter::AccountWriteSink, chain_data::AccountData};
use solana_sdk::{
account::ReadableAccount,
instruction::{AccountMeta, Instruction},
pubkey::Pubkey,
};
use bytemuck::cast_ref;
use anchor_lang::AccountDeserialize;
const MAX_BACKLOG: usize = 2;
pub struct MangoV4PerpCrankSink {
pks: BTreeMap<Pubkey, Pubkey>,
group_pk: Pubkey,
instruction_sender: Sender<Vec<Instruction>>,
}
impl MangoV4PerpCrankSink {
pub fn new(
pks: Vec<(Pubkey, Pubkey)>,
group_pk: Pubkey,
instruction_sender: Sender<Vec<Instruction>>,
) -> Self {
Self {
pks: pks.iter().copied().collect(),
group_pk,
instruction_sender,
}
}
}
#[async_trait]
impl AccountWriteSink for MangoV4PerpCrankSink {
async fn process(&self, pk: &Pubkey, account: &AccountData) -> Result<(), String> {
let account = &account.account;
let event_queue: mango_v4::state::EventQueue =
mango_v4::state::EventQueue::try_deserialize(account.data().borrow_mut()).unwrap();
// only crank if at least 1 fill or a sufficient events of other categories are buffered
let contains_fill_events = event_queue
.iter()
.any(|e| e.event_type == mango_v4::state::EventType::Fill as u8);
let has_backlog = event_queue.iter().count() > MAX_BACKLOG;
if !contains_fill_events && !has_backlog {
return Err("throttled".into());
}
let mango_accounts: BTreeSet<_> = event_queue
.iter()
.take(10)
.flat_map(|e| {
match mango_v4::state::EventType::try_from(e.event_type).expect("mango v4 event") {
mango_v4::state::EventType::Fill => {
let fill: &mango_v4::state::FillEvent = cast_ref(e);
vec![fill.maker, fill.taker]
}
mango_v4::state::EventType::Out => {
let out: &mango_v4::state::OutEvent = cast_ref(e);
vec![out.owner]
}
mango_v4::state::EventType::Liquidate => vec![],
}
})
.collect();
let mkt_pk = self
.pks
.get(pk)
.unwrap_or_else(|| panic!("{:?} is a known public key", pk));
let mut ams: Vec<_> = anchor_lang::ToAccountMetas::to_account_metas(
&mango_v4::accounts::PerpConsumeEvents {
group: self.group_pk,
perp_market: *mkt_pk,
event_queue: *pk,
},
None,
);
ams.append(
&mut mango_accounts
.iter()
.map(|pk| AccountMeta::new(*pk, false))
.collect(),
);
let ix = Instruction {
program_id: mango_v4::id(),
accounts: ams,
data: anchor_lang::InstructionData::data(&mango_v4::instruction::PerpConsumeEvents {
limit: 10,
}),
};
info!("evq={pk:?} count={} limit=10", event_queue.iter().count());
if let Err(e) = self.instruction_sender.send(vec![ix]).await {
return Err(e.to_string());
}
Ok(())
}
}

View File

@ -0,0 +1,108 @@
use std::{
collections::{BTreeMap, BTreeSet},
str::FromStr,
};
use async_channel::Sender;
use async_trait::async_trait;
use log::*;
use mango_feeds_connector::{account_write_filter::AccountWriteSink, chain_data::AccountData};
use mango_feeds_lib::serum::SerumEventQueueHeader;
use serum_dex::{instruction::MarketInstruction, state::EventView};
use solana_sdk::{
account::ReadableAccount,
instruction::{AccountMeta, Instruction},
pubkey::Pubkey,
};
const MAX_BACKLOG: usize = 2;
pub struct OpenbookCrankSink {
pks: BTreeMap<Pubkey, Pubkey>,
instruction_sender: Sender<Vec<Instruction>>,
}
impl OpenbookCrankSink {
pub fn new(pks: Vec<(Pubkey, Pubkey)>, instruction_sender: Sender<Vec<Instruction>>) -> Self {
Self {
pks: pks.iter().copied().collect(),
instruction_sender,
}
}
}
#[async_trait]
impl AccountWriteSink for OpenbookCrankSink {
async fn process(&self, pk: &Pubkey, account: &AccountData) -> Result<(), String> {
let account = &account.account;
let inner_data = &account.data()[5..&account.data().len() - 7];
let header_span = std::mem::size_of::<SerumEventQueueHeader>();
let header: SerumEventQueueHeader = *bytemuck::from_bytes(&inner_data[..header_span]);
let count = header.count;
let rest = &inner_data[header_span..];
let event_size = std::mem::size_of::<serum_dex::state::Event>();
let slop = rest.len() % event_size;
let end = rest.len() - slop;
let events = bytemuck::cast_slice::<u8, serum_dex::state::Event>(&rest[..end]);
let seq_num = header.seq_num;
let events: Vec<_> = (0..count)
.map(|i| {
let offset = (seq_num - count + i) % events.len() as u64;
let event: serum_dex::state::Event = events[offset as usize];
event.as_view().unwrap()
})
.collect();
// only crank if at least 1 fill or a sufficient events of other categories are buffered
let contains_fill_events = events
.iter()
.any(|e| matches!(e, serum_dex::state::EventView::Fill { .. }));
let has_backlog = events.len() > MAX_BACKLOG;
if !contains_fill_events && !has_backlog {
return Err("throttled".into());
}
let oo_pks: BTreeSet<_> = events
.iter()
.map(|e| match e {
EventView::Fill { owner, .. } | EventView::Out { owner, .. } => {
bytemuck::cast_slice::<u64, Pubkey>(owner)[0]
}
})
.collect();
let mut ams: Vec<_> = oo_pks
.iter()
.map(|pk| AccountMeta::new(*pk, false))
.collect();
// pass two times evq_pk instead of deprecated fee receivers to reduce encoded tx size
let mkt_pk = self
.pks
.get(pk)
.unwrap_or_else(|| panic!("{:?} is a known public key", pk));
ams.append(
&mut [mkt_pk, pk, /*coin_pk*/ pk, /*pc_pk*/ pk]
.iter()
.map(|pk| AccountMeta::new(**pk, false))
.collect(),
);
let ix = Instruction {
program_id: Pubkey::from_str("srmqPvymJeFKQ4zGQed1GFppgkRHL9kaELCbyksJtPX").unwrap(),
accounts: ams,
data: MarketInstruction::ConsumeEvents(count as u16).pack(),
};
info!("evq={pk:?} count={count}");
if let Err(e) = self.instruction_sender.send(vec![ix]).await {
return Err(e.to_string());
}
Ok(())
}
}

View File

@ -0,0 +1,56 @@
use mango_feeds_connector::{
account_write_filter::{self, AccountWriteRoute},
metrics::Metrics,
AccountWrite, SlotUpdate,
};
use solana_sdk::{instruction::Instruction, pubkey::Pubkey};
use std::{sync::Arc, time::Duration};
use crate::{
mango_v4_perp_crank_sink::MangoV4PerpCrankSink, openbook_crank_sink::OpenbookCrankSink,
};
#[allow(clippy::type_complexity)]
pub fn init(
perp_queue_pks: Vec<(Pubkey, Pubkey)>,
serum_queue_pks: Vec<(Pubkey, Pubkey)>,
group_pk: Pubkey,
metrics_sender: Metrics,
) -> anyhow::Result<(
async_channel::Sender<AccountWrite>,
async_channel::Sender<SlotUpdate>,
async_channel::Receiver<Vec<Instruction>>,
)> {
// Event queue updates can be consumed by client connections
let (instruction_sender, instruction_receiver) = async_channel::unbounded::<Vec<Instruction>>();
let routes = vec![
AccountWriteRoute {
matched_pubkeys: serum_queue_pks.iter().map(|(_, evq_pk)| *evq_pk).collect(),
sink: Arc::new(OpenbookCrankSink::new(
serum_queue_pks,
instruction_sender.clone(),
)),
timeout_interval: Duration::default(),
},
AccountWriteRoute {
matched_pubkeys: perp_queue_pks.iter().map(|(_, evq_pk)| *evq_pk).collect(),
sink: Arc::new(MangoV4PerpCrankSink::new(
perp_queue_pks,
group_pk,
instruction_sender,
)),
timeout_interval: Duration::default(),
},
];
let (account_write_queue_sender, slot_queue_sender) =
account_write_filter::init(routes, metrics_sender)?;
Ok((
account_write_queue_sender,
slot_queue_sender,
instruction_receiver,
))
}

View File

@ -0,0 +1,47 @@
use log::*;
use solana_client::{nonblocking::rpc_client::RpcClient, rpc_config::RpcSendTransactionConfig};
use solana_sdk::{
hash::Hash, instruction::Instruction, signature::Keypair, signature::Signer,
transaction::Transaction,
};
use std::sync::{Arc, RwLock};
use tokio::spawn;
pub async fn send_loop(
ixs_rx: async_channel::Receiver<Vec<Instruction>>,
blockhash: Arc<RwLock<Hash>>,
client: Arc<RpcClient>,
keypair: Keypair,
) {
info!("signing with keypair pk={:?}", keypair.pubkey());
let cfg = RpcSendTransactionConfig {
skip_preflight: true,
..RpcSendTransactionConfig::default()
};
loop {
if let Ok(ixs) = ixs_rx.recv().await {
// TODO add priority fee
let tx = Transaction::new_signed_with_payer(
&ixs,
Some(&keypair.pubkey()),
&[&keypair],
*blockhash.read().unwrap(),
);
// TODO: collect metrics
info!(
"send tx={:?} ok={:?}",
tx.signatures[0],
client.send_transaction_with_config(&tx, cfg).await
);
}
}
}
pub fn init(
ixs_rx: async_channel::Receiver<Vec<Instruction>>,
blockhash: Arc<RwLock<Hash>>,
client: Arc<RpcClient>,
keypair: Keypair,
) {
spawn(async move { send_loop(ixs_rx, blockhash, client, keypair).await });
}

View File

@ -0,0 +1,60 @@
[package]
name = "service-mango-fills"
version = "0.1.0"
authors = ["Christian Kamm <mail@ckamm.de>", "Maximilian Schneider <max@mango.markets>"]
edition = "2018"
license = "AGPL-3.0-or-later"
[dependencies]
mango-feeds-lib = { path = "../../lib/mango-feeds-lib" }
mango-feeds-connector = { workspace = true }
solana-client = { workspace = true }
solana-logger = { workspace = true }
solana-sdk = { workspace = true }
anchor-lang = { workspace = true }
anchor-client = { workspace = true }
mango-v4 = { path = "../../programs/mango-v4", features = ["client"] }
mango-v4-client = { path = "../../lib/client" }
serum_dex = { workspace = true }
bs58 = "0.3.1"
log = "0.4"
anyhow = "1.0"
toml = "0.5"
serde = "1.0.130"
serde_derive = "1.0.130"
serde_json = "1.0.68"
futures = "0.3.17"
futures-core = "0.3"
futures-channel = "0.3"
futures-util = "0.3"
ws = "^0.9.2"
async-channel = "1.6"
async-trait = "0.1"
bytemuck = "^1.7.2"
itertools = "0.10.3"
jemallocator = "0.3.2"
chrono = "0.4.23"
base64 = "0.21"
tokio = { version = "1", features = ["full"] }
tokio-tungstenite = "0.17"
#jsonrpc-core = "18.0.0"
#jsonrpc-core-client = { version = "18.0.0", features = ["ws", "http"] }
tokio-postgres = { version = "0.7", features = ["with-chrono-0_4"] }
tokio-postgres-rustls = "0.9.0"
postgres-types = { version = "0.2", features = ["array-impls", "derive", "with-chrono-0_4"] }
postgres-native-tls = "0.5"
native-tls = "0.2"
rustls = "0.20.8"
# postgres_query hasn't updated its crate in a while
postgres_query = { git = "https://github.com/nolanderc/rust-postgres-query", rev = "b4422051c8a31fbba4a35f88004c1cefb1878dd5" }

View File

@ -0,0 +1,111 @@
# service-mango-fills
This module parses event queues and exposes individual fills on a websocket.
Public API: `https://api.mngo.cloud/fills/v1/`
## API Reference
Get a list of markets
```
{
"command": "getMarkets"
}
```
```
{
"ESdnpnNLgTkBCZRuTJkZLi5wKEZ2z47SG3PJrhundSQ2": "SOL-PERP",
"HwhVGkfsSQ9JSQeQYu2CbkRCLvsh3qRZxG6m4oMVwZpN": "BTC-PERP",
"Fgh9JSZ2qfSjCw9RPJ85W2xbihsp2muLvfRztzoVR7f1": "ETH-PERP",
}
```
Subscribe to markets
```
{
"command": "subscribe"
"marketIds": ["MARKET_PUBKEY"]
}
```
```
{
"success": true,
"message": "subscribed to market MARKET_PUBKEY"
}
```
Subscribe to account
```
{
"command": "subscribe"
"account": ["MANGO_ACCOUNT_PUBKEY"]
}
```
```
{
"success": true,
"message": "subscribed to account MANGO_ACCOUNT_PUBKEY"
}
```
Fill Event
```
{
"event": {
"eventType": "perp",
"maker": "MAKER_MANGO_ACCOUNT_PUBKEY",
"taker": "TAKER_MANGO_ACCOUNT_PUBKEY",
"takerSide": "bid",
"timestamp": "2023-04-06T13:00:00+00:00",
"seqNum": 132420,
"makerClientOrderId": 1680786677648,
"takerClientOrderId": 1680786688080,
"makerFee": -0.0003,
"takerFee": 0.0006,
"price": 20.72,
"quantity": 0.45
},
"marketKey": "ESdnpnNLgTkBCZRuTJkZLi5wKEZ2z47SG3PJrhundSQ2",
"marketName": "SOL-PERP",
"status": "new",
"slot": 186869253,
"writeVersion": 662992260539
}
```
If the fill ocurred on a fork, an event will be sent with the 'status' field set to 'revoke'.
## Setup
## Local
1. Prepare the connector configuration file.
[Here is an example](service-mango-fills/conf/example-config.toml).
- `bind_ws_addr` is the listen port for the websocket clients
- `rpc_ws_url` is unused and can stay empty.
- `connection_string` for your `grpc_sources` must point to the gRPC server
address configured for the plugin.
- `rpc_http_url` must point to the JSON-RPC URL.
- `program_id` must match what is configured for the gRPC plugin
2. Start the service binary.
Pass the path to the config file as the first argument. It logs to stdout. It
should be restarted on exit.
3. Monitor the logs
`WARN` messages can be recovered from. `ERROR` messages need attention. The
logs are very spammy changing the default log level is recommended when you
dont want to analyze performance of the service.
## fly.io

View File

@ -0,0 +1,35 @@
bind_ws_addr = "0.0.0.0:8080"
rpc_http_url = "http://mango.rpcpool.com/<token>"
mango_group = "78b8f4cGCwmZ9ysPFMWLaLTkkaYnUjwMJYStWe5RTSSX"
[metrics]
output_stdout = true
output_http = true
# [postgres]
# connection_string = "$PG_CONNECTION_STRING"
# connection_count = 1
# max_batch_size = 1
# max_queue_size = 50000
# retry_query_max_count = 10
# retry_query_sleep_secs = 2
# retry_connection_sleep_secs = 10
# fatal_connection_timeout_secs = 30
# allow_invalid_certs = true
# # [postgres.tls]
# # ca_cert_path = "$PG_CA_CERT"
# # client_key_path = "$PG_CLIENT_KEY"
[source]
dedup_queue_size = 50000
rpc_ws_url = "wss://mango.rpcpool.com/<token>"
[[source.grpc_sources]]
name = "accountsdb-client"
connection_string = "http://tyo64.rpcpool.com/"
retry_connection_sleep_secs = 30
[source.snapshot]
rpc_http_url = "http://mango.rpcpool.com/<token>"
program_id = "4MangoMjqJ2firMokCjjGgoK8d4MXcrgL7XJaL3w6fVg"

View File

@ -0,0 +1,36 @@
bind_ws_addr = "[::]:8080"
rpc_http_url = "$RPC_HTTP_URL"
mango_group = "78b8f4cGCwmZ9ysPFMWLaLTkkaYnUjwMJYStWe5RTSSX"
[metrics]
output_stdout = true
output_http = true
[postgres]
connection_string = "$PG_CONNECTION_STRING"
connection_count = 1
max_batch_size = 1
max_queue_size = 50000
retry_query_max_count = 10
retry_query_sleep_secs = 2
retry_connection_sleep_secs = 10
fatal_connection_timeout_secs = 30
allow_invalid_certs = true
[postgres.tls]
ca_cert_path = "$PG_CA_CERT"
client_key_path = "$PG_CLIENT_KEY"
[source]
dedup_queue_size = 50000
rpc_ws_url = "$RPC_WS_URL"
[[source.grpc_sources]]
name = "accountsdb-client"
connection_string = "$GEYSER_CONNECTION_STRING"
token = "$GEYSER_TOKEN"
retry_connection_sleep_secs = 30
[source.snapshot]
rpc_http_url = "$RPC_HTTP_URL"
program_id = "srmqPvymJeFKQ4zGQed1GFppgkRHL9kaELCbyksJtPX"

View File

@ -0,0 +1,625 @@
use log::*;
use mango_feeds_connector::{
chain_data::{AccountData, ChainData, ChainDataMetrics, SlotData},
metrics::{MetricType, Metrics},
AccountWrite, SlotUpdate,
};
use mango_feeds_lib::serum::SerumEventQueueHeader;
use mango_feeds_lib::MarketConfig;
use solana_sdk::{
account::{ReadableAccount, WritableAccount},
clock::Epoch,
pubkey::Pubkey,
};
use std::{
borrow::BorrowMut,
cmp::max,
collections::{HashMap, HashSet},
iter::FromIterator,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
};
use crate::metrics::MetricU64;
use anchor_lang::AccountDeserialize;
use mango_v4::state::{
AnyEvent, EventQueue, EventQueueHeader, EventType, FillEvent as PerpFillEvent,
OutEvent as PerpOutEvent, QueueHeader, MAX_NUM_EVENTS,
};
use service_mango_fills::*;
// couldn't compile the correct struct size / math on m1, fixed sizes resolve this issue
type EventQueueEvents = [AnyEvent; MAX_NUM_EVENTS as usize];
#[allow(clippy::too_many_arguments)]
fn publish_changes_perp(
slot: u64,
write_version: u64,
mkt: &(Pubkey, MarketConfig),
header: &EventQueueHeader,
events: &EventQueueEvents,
prev_seq_num: u64,
prev_head: usize,
prev_events: &EventQueueEvents,
fill_update_sender: &async_channel::Sender<FillEventFilterMessage>,
metric_events_new: &mut MetricU64,
metric_events_change: &mut MetricU64,
metric_events_drop: &mut MetricU64,
metric_head_update: &mut MetricU64,
) {
// seq_num = N means that events (N-QUEUE_LEN) until N-1 are available
let start_seq_num = max(prev_seq_num, header.seq_num).saturating_sub(MAX_NUM_EVENTS as u64);
let mut checkpoint = Vec::new();
let mkt_pk_string = mkt.0.to_string();
let evq_pk_string = mkt.1.event_queue.to_string();
for seq_num in start_seq_num..header.seq_num {
let idx = (seq_num % MAX_NUM_EVENTS as u64) as usize;
// there are three possible cases:
// 1) the event is past the old seq num, hence guaranteed new event
// 2) the event is not matching the old event queue
// 3) all other events are matching the old event queue
// the order of these checks is important so they are exhaustive
if seq_num >= prev_seq_num {
debug!(
"found new event {} idx {} type {} slot {} write_version {}",
mkt_pk_string, idx, events[idx].event_type as u32, slot, write_version
);
metric_events_new.increment();
// new fills are published and recorded in checkpoint
if events[idx].event_type == EventType::Fill as u8 {
let fill: PerpFillEvent = bytemuck::cast(events[idx]);
let fill = FillEvent::new_from_perp(fill, &mkt.1);
fill_update_sender
.try_send(FillEventFilterMessage::Update(FillUpdate {
slot,
write_version,
event: fill.clone(),
status: FillUpdateStatus::New,
market_key: mkt_pk_string.clone(),
market_name: mkt.1.name.clone(),
}))
.unwrap(); // TODO: use anyhow to bubble up error
checkpoint.push(fill);
}
} else if prev_events[idx].event_type != events[idx].event_type
|| prev_events[idx].padding != events[idx].padding
{
debug!(
"found changed event {} idx {} seq_num {} header seq num {} old seq num {}",
mkt_pk_string, idx, seq_num, header.seq_num, prev_seq_num
);
metric_events_change.increment();
// first revoke old event if a fill
if prev_events[idx].event_type == EventType::Fill as u8 {
let fill: PerpFillEvent = bytemuck::cast(prev_events[idx]);
let fill = FillEvent::new_from_perp(fill, &mkt.1);
fill_update_sender
.try_send(FillEventFilterMessage::Update(FillUpdate {
slot,
write_version,
event: fill,
status: FillUpdateStatus::Revoke,
market_key: mkt_pk_string.clone(),
market_name: mkt.1.name.clone(),
}))
.unwrap(); // TODO: use anyhow to bubble up error
}
// then publish new if its a fill and record in checkpoint
if events[idx].event_type == EventType::Fill as u8 {
let fill: PerpFillEvent = bytemuck::cast(events[idx]);
let fill = FillEvent::new_from_perp(fill, &mkt.1);
fill_update_sender
.try_send(FillEventFilterMessage::Update(FillUpdate {
slot,
write_version,
event: fill.clone(),
status: FillUpdateStatus::New,
market_key: mkt_pk_string.clone(),
market_name: mkt.1.name.clone(),
}))
.unwrap(); // TODO: use anyhow to bubble up error
checkpoint.push(fill);
}
} else {
// every already published event is recorded in checkpoint if a fill
if events[idx].event_type == EventType::Fill as u8 {
let fill: PerpFillEvent = bytemuck::cast(events[idx]);
let fill = FillEvent::new_from_perp(fill, &mkt.1);
checkpoint.push(fill);
}
}
}
// in case queue size shrunk due to a fork we need revoke all previous fills
for seq_num in header.seq_num..prev_seq_num {
let idx = (seq_num % MAX_NUM_EVENTS as u64) as usize;
debug!(
"found dropped event {} idx {} seq_num {} header seq num {} old seq num {} slot {} write_version {}",
mkt_pk_string, idx, seq_num, header.seq_num, prev_seq_num, slot, write_version
);
metric_events_drop.increment();
if prev_events[idx].event_type == EventType::Fill as u8 {
let fill: PerpFillEvent = bytemuck::cast(prev_events[idx]);
let fill = FillEvent::new_from_perp(fill, &mkt.1);
fill_update_sender
.try_send(FillEventFilterMessage::Update(FillUpdate {
slot,
event: fill,
write_version,
status: FillUpdateStatus::Revoke,
market_key: mkt_pk_string.clone(),
market_name: mkt.1.name.clone(),
}))
.unwrap(); // TODO: use anyhow to bubble up error
}
}
let head = header.head();
let head_seq_num = if events[head - 1].event_type == EventType::Fill as u8 {
let event: PerpFillEvent = bytemuck::cast(events[head - 1]);
event.seq_num + 1
} else if events[head - 1].event_type == EventType::Out as u8 {
let event: PerpOutEvent = bytemuck::cast(events[head - 1]);
event.seq_num + 1
} else {
0
};
let prev_head_seq_num = if prev_events[prev_head - 1].event_type == EventType::Fill as u8 {
let event: PerpFillEvent = bytemuck::cast(prev_events[prev_head - 1]);
event.seq_num + 1
} else if prev_events[prev_head - 1].event_type == EventType::Out as u8 {
let event: PerpOutEvent = bytemuck::cast(prev_events[prev_head - 1]);
event.seq_num + 1
} else {
0
};
// publish a head update event if the head changed (events were consumed)
if head != prev_head {
metric_head_update.increment();
fill_update_sender
.try_send(FillEventFilterMessage::HeadUpdate(HeadUpdate {
head,
prev_head,
head_seq_num,
prev_head_seq_num,
status: FillUpdateStatus::New,
market_key: mkt_pk_string.clone(),
market_name: mkt.1.name.clone(),
slot,
write_version,
}))
.unwrap(); // TODO: use anyhow to bubble up error
}
fill_update_sender
.try_send(FillEventFilterMessage::Checkpoint(FillCheckpoint {
slot,
write_version,
events: checkpoint,
market: mkt_pk_string,
queue: evq_pk_string,
}))
.unwrap()
}
#[allow(clippy::too_many_arguments)]
fn publish_changes_serum(
_slot: u64,
_write_version: u64,
_mkt: &(Pubkey, MarketConfig),
_header: &SerumEventQueueHeader,
_events: &[serum_dex::state::Event],
_prev_seq_num: u64,
_prev_events: &[serum_dex::state::Event],
_fill_update_sender: &async_channel::Sender<FillEventFilterMessage>,
_metric_events_new: &mut MetricU64,
_metric_events_change: &mut MetricU64,
_metric_events_drop: &mut MetricU64,
) {
// // seq_num = N means that events (N-QUEUE_LEN) until N-1 are available
// let start_seq_num = max(prev_seq_num, header.seq_num)
// .checked_sub(MAX_NUM_EVENTS as u64)
// .unwrap_or(0);
// let mut checkpoint = Vec::new();
// let mkt_pk_string = mkt.0.to_string();
// let evq_pk_string = mkt.1.event_queue.to_string();
// let header_seq_num = header.seq_num;
// debug!("start seq {} header seq {}", start_seq_num, header_seq_num);
// // Timestamp for spot events is time scraped
// let timestamp = SystemTime::now()
// .duration_since(SystemTime::UNIX_EPOCH)
// .unwrap()
// .as_secs();
// for seq_num in start_seq_num..header_seq_num {
// let idx = (seq_num % MAX_NUM_EVENTS as u64) as usize;
// let event_view = events[idx].as_view().unwrap();
// let old_event_view = prev_events[idx].as_view().unwrap();
// match event_view {
// SpotEvent::Fill { .. } => {
// // there are three possible cases:
// // 1) the event is past the old seq num, hence guaranteed new event
// // 2) the event is not matching the old event queue
// // 3) all other events are matching the old event queue
// // the order of these checks is important so they are exhaustive
// let fill = FillEvent::new_from_spot(event_view, timestamp, seq_num, &mkt.1);
// if seq_num >= prev_seq_num {
// debug!("found new serum fill {} idx {}", mkt_pk_string, idx,);
// metric_events_new.increment();
// fill_update_sender
// .try_send(FillEventFilterMessage::Update(FillUpdate {
// slot,
// write_version,
// event: fill.clone(),
// status: FillUpdateStatus::New,
// market_key: mkt_pk_string.clone(),
// market_name: mkt.1.name.clone(),
// }))
// .unwrap(); // TODO: use anyhow to bubble up error
// checkpoint.push(fill);
// continue;
// }
// match old_event_view {
// SpotEvent::Fill {
// client_order_id, ..
// } => {
// let client_order_id = match client_order_id {
// Some(id) => id.into(),
// None => 0u64,
// };
// if client_order_id != fill.client_order_id {
// debug!(
// "found changed id event {} idx {} seq_num {} header seq num {} old seq num {}",
// mkt_pk_string, idx, seq_num, header_seq_num, prev_seq_num
// );
// metric_events_change.increment();
// let old_fill = FillEvent::new_from_spot(
// old_event_view,
// timestamp,
// seq_num,
// &mkt.1,
// );
// // first revoke old event
// fill_update_sender
// .try_send(FillEventFilterMessage::Update(FillUpdate {
// slot,
// write_version,
// event: old_fill,
// status: FillUpdateStatus::Revoke,
// market_key: mkt_pk_string.clone(),
// market_name: mkt.1.name.clone(),
// }))
// .unwrap(); // TODO: use anyhow to bubble up error
// // then publish new
// fill_update_sender
// .try_send(FillEventFilterMessage::Update(FillUpdate {
// slot,
// write_version,
// event: fill.clone(),
// status: FillUpdateStatus::New,
// market_key: mkt_pk_string.clone(),
// market_name: mkt.1.name.clone(),
// }))
// .unwrap(); // TODO: use anyhow to bubble up error
// }
// // record new event in checkpoint
// checkpoint.push(fill);
// }
// SpotEvent::Out { .. } => {
// debug!(
// "found changed type event {} idx {} seq_num {} header seq num {} old seq num {}",
// mkt_pk_string, idx, seq_num, header_seq_num, prev_seq_num
// );
// metric_events_change.increment();
// // publish new fill and record in checkpoint
// fill_update_sender
// .try_send(FillEventFilterMessage::Update(FillUpdate {
// slot,
// write_version,
// event: fill.clone(),
// status: FillUpdateStatus::New,
// market_key: mkt_pk_string.clone(),
// market_name: mkt.1.name.clone(),
// }))
// .unwrap(); // TODO: use anyhow to bubble up error
// checkpoint.push(fill);
// }
// }
// }
// _ => continue,
// }
// }
// // in case queue size shrunk due to a fork we need revoke all previous fills
// for seq_num in header_seq_num..prev_seq_num {
// let idx = (seq_num % MAX_NUM_EVENTS as u64) as usize;
// let old_event_view = prev_events[idx].as_view().unwrap();
// debug!(
// "found dropped event {} idx {} seq_num {} header seq num {} old seq num {}",
// mkt_pk_string, idx, seq_num, header_seq_num, prev_seq_num
// );
// metric_events_drop.increment();
// match old_event_view {
// SpotEvent::Fill { .. } => {
// let old_fill = FillEvent::new_from_spot(old_event_view, timestamp, seq_num, &mkt.1);
// fill_update_sender
// .try_send(FillEventFilterMessage::Update(FillUpdate {
// slot,
// event: old_fill,
// write_version,
// status: FillUpdateStatus::Revoke,
// market_key: mkt_pk_string.clone(),
// market_name: mkt.1.name.clone(),
// }))
// .unwrap(); // TODO: use anyhow to bubble up error
// }
// SpotEvent::Out { .. } => continue,
// }
// }
// fill_update_sender
// .try_send(FillEventFilterMessage::Checkpoint(FillCheckpoint {
// slot,
// write_version,
// events: checkpoint,
// market: mkt_pk_string,
// queue: evq_pk_string,
// }))
// .unwrap()
}
pub async fn init(
perp_market_configs: Vec<(Pubkey, MarketConfig)>,
spot_market_configs: Vec<(Pubkey, MarketConfig)>,
metrics_sender: Metrics,
exit: Arc<AtomicBool>,
) -> anyhow::Result<(
async_channel::Sender<AccountWrite>,
async_channel::Sender<SlotUpdate>,
async_channel::Receiver<FillEventFilterMessage>,
)> {
let metrics_sender = metrics_sender;
let mut metric_events_new =
metrics_sender.register_u64("fills_feed_events_new".into(), MetricType::Counter);
let mut metric_events_new_serum =
metrics_sender.register_u64("fills_feed_events_new_serum".into(), MetricType::Counter);
let mut metric_events_change =
metrics_sender.register_u64("fills_feed_events_change".into(), MetricType::Counter);
let mut metric_events_change_serum =
metrics_sender.register_u64("fills_feed_events_change_serum".into(), MetricType::Counter);
let mut metrics_events_drop =
metrics_sender.register_u64("fills_feed_events_drop".into(), MetricType::Counter);
let mut metrics_events_drop_serum =
metrics_sender.register_u64("fills_feed_events_drop_serum".into(), MetricType::Counter);
let mut metrics_head_update =
metrics_sender.register_u64("fills_feed_head_update".into(), MetricType::Counter);
// The actual message may want to also contain a retry count, if it self-reinserts on failure?
let (account_write_queue_sender, account_write_queue_receiver) =
async_channel::unbounded::<AccountWrite>();
// Slot updates flowing from the outside into the single processing thread. From
// there they'll flow into the postgres sending thread.
let (slot_queue_sender, slot_queue_receiver) = async_channel::unbounded::<SlotUpdate>();
// Fill updates can be consumed by client connections, they contain all fills for all markets
let (fill_update_sender, fill_update_receiver) =
async_channel::unbounded::<FillEventFilterMessage>();
let account_write_queue_receiver_c = account_write_queue_receiver;
let mut chain_cache = ChainData::new();
let mut chain_data_metrics = ChainDataMetrics::new(&metrics_sender);
let mut perp_events_cache: HashMap<String, EventQueueEvents> = HashMap::new();
let mut serum_events_cache: HashMap<String, Vec<serum_dex::state::Event>> = HashMap::new();
let mut seq_num_cache = HashMap::new();
let mut head_cache = HashMap::new();
let mut last_evq_versions = HashMap::<String, (u64, u64)>::new();
let all_market_configs = [perp_market_configs.clone(), spot_market_configs.clone()].concat();
let perp_queue_pks: Vec<Pubkey> = perp_market_configs
.iter()
.map(|x| x.1.event_queue)
.collect();
let spot_queue_pks: Vec<Pubkey> = spot_market_configs
.iter()
.map(|x| x.1.event_queue)
.collect();
let all_queue_pks: HashSet<Pubkey> =
HashSet::from_iter([perp_queue_pks, spot_queue_pks].concat());
// update handling thread, reads both sloths and account updates
tokio::spawn(async move {
loop {
if exit.load(Ordering::Relaxed) {
warn!("shutting down fill_event_filter...");
break;
}
tokio::select! {
Ok(account_write) = account_write_queue_receiver_c.recv() => {
if !all_queue_pks.contains(&account_write.pubkey) {
continue;
}
chain_cache.update_account(
account_write.pubkey,
AccountData {
slot: account_write.slot,
write_version: account_write.write_version,
account: WritableAccount::create(
account_write.lamports,
account_write.data.clone(),
account_write.owner,
account_write.executable,
account_write.rent_epoch as Epoch,
),
},
);
}
Ok(slot_update) = slot_queue_receiver.recv() => {
chain_cache.update_slot(SlotData {
slot: slot_update.slot,
parent: slot_update.parent,
status: slot_update.status,
chain: 0,
});
}
Err(e) = slot_queue_receiver.recv() => {
warn!("slot update channel err {:?}", e);
}
Err(e) = account_write_queue_receiver_c.recv() => {
warn!("write update channel err {:?}", e);
}
}
chain_data_metrics.report(&chain_cache);
for mkt in all_market_configs.iter() {
let evq_pk = mkt.1.event_queue;
let evq_pk_string = evq_pk.to_string();
let last_evq_version = last_evq_versions
.get(&mkt.1.event_queue.to_string())
.unwrap_or(&(0, 0));
match chain_cache.account(&evq_pk) {
Ok(account_info) => {
// only process if the account state changed
let evq_version = (account_info.slot, account_info.write_version);
if evq_version == *last_evq_version {
continue;
}
if evq_version.0 < last_evq_version.0 {
debug!("evq version slot was old");
continue;
}
if evq_version.0 == last_evq_version.0 && evq_version.1 < last_evq_version.1
{
info!("evq version slot was same and write version was old");
continue;
}
last_evq_versions.insert(evq_pk_string.clone(), evq_version);
let account = &account_info.account;
let is_perp = mango_v4::check_id(account.owner());
if is_perp {
let event_queue =
EventQueue::try_deserialize(account.data().borrow_mut()).unwrap();
match (
seq_num_cache.get(&evq_pk_string),
head_cache.get(&evq_pk_string),
) {
(Some(prev_seq_num), Some(prev_head)) => match perp_events_cache
.get(&evq_pk_string)
{
Some(prev_events) => publish_changes_perp(
account_info.slot,
account_info.write_version,
mkt,
&event_queue.header,
&event_queue.buf,
*prev_seq_num,
*prev_head,
prev_events,
&fill_update_sender,
&mut metric_events_new,
&mut metric_events_change,
&mut metrics_events_drop,
&mut metrics_head_update,
),
_ => {
info!("perp_events_cache could not find {}", evq_pk_string)
}
},
_ => info!("seq_num/head cache could not find {}", evq_pk_string),
}
seq_num_cache.insert(evq_pk_string.clone(), event_queue.header.seq_num);
head_cache.insert(evq_pk_string.clone(), event_queue.header.head());
perp_events_cache.insert(evq_pk_string.clone(), event_queue.buf);
} else {
let inner_data = &account.data()[5..&account.data().len() - 7];
let header_span = std::mem::size_of::<SerumEventQueueHeader>();
let header: SerumEventQueueHeader =
*bytemuck::from_bytes(&inner_data[..header_span]);
let seq_num = header.seq_num;
let count = header.count;
let rest = &inner_data[header_span..];
let slop = rest.len() % std::mem::size_of::<serum_dex::state::Event>();
let new_len = rest.len() - slop;
let events = &rest[..new_len];
debug!("evq {} header_span {} header_seq_num {} header_count {} inner_len {} events_len {} sizeof Event {}", evq_pk_string, header_span, seq_num, count, inner_data.len(), events.len(), std::mem::size_of::<serum_dex::state::Event>());
let events: &[serum_dex::state::Event] = bytemuck::cast_slice(events);
match seq_num_cache.get(&evq_pk_string) {
Some(prev_seq_num) => {
match serum_events_cache.get(&evq_pk_string) {
Some(prev_events) => publish_changes_serum(
account_info.slot,
account_info.write_version,
mkt,
&header,
events,
*prev_seq_num,
prev_events,
&fill_update_sender,
&mut metric_events_new_serum,
&mut metric_events_change_serum,
&mut metrics_events_drop_serum,
),
_ => {
debug!(
"serum_events_cache could not find {}",
evq_pk_string
)
}
}
}
_ => debug!("seq_num_cache could not find {}", evq_pk_string),
}
seq_num_cache.insert(evq_pk_string.clone(), seq_num);
head_cache.insert(evq_pk_string.clone(), header.head as usize);
serum_events_cache.insert(evq_pk_string.clone(), events.to_vec());
}
}
Err(_) => debug!("chain_cache could not find {}", mkt.1.event_queue),
}
}
}
});
Ok((
account_write_queue_sender,
slot_queue_sender,
fill_update_receiver,
))
}

View File

@ -0,0 +1,273 @@
use crate::postgres_config::PostgresConfig;
use chrono::{TimeZone, Utc};
use log::*;
use mango_feeds_connector::metrics::{MetricType, MetricU64, Metrics};
use native_tls::{Certificate, Identity, TlsConnector};
use postgres_native_tls::MakeTlsConnector;
use postgres_query::Caching;
use service_mango_fills::*;
use std::{
env, fs,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::Duration,
};
use tokio_postgres::Client;
async fn postgres_connection(
config: &PostgresConfig,
metric_retries: MetricU64,
metric_live: MetricU64,
exit: Arc<AtomicBool>,
) -> anyhow::Result<async_channel::Receiver<Option<tokio_postgres::Client>>> {
let (tx, rx) = async_channel::unbounded();
// openssl pkcs12 -export -in client.cer -inkey client-key.cer -out client.pks
// base64 -i ca.cer -o ca.cer.b64 && base64 -i client.pks -o client.pks.b64
// fly secrets set PG_CA_CERT=- < ./ca.cer.b64 -a mango-fills
// fly secrets set PG_CLIENT_KEY=- < ./client.pks.b64 -a mango-fills
let tls = match &config.tls {
Some(tls) => {
use base64::{engine::general_purpose, Engine as _};
let ca_cert = match &tls.ca_cert_path.chars().next().unwrap() {
'$' => general_purpose::STANDARD
.decode(
env::var(&tls.ca_cert_path[1..])
.expect("reading client cert from env")
.into_bytes(),
)
.expect("decoding client cert"),
_ => fs::read(&tls.ca_cert_path).expect("reading client cert from file"),
};
let client_key = match &tls.client_key_path.chars().next().unwrap() {
'$' => general_purpose::STANDARD
.decode(
env::var(&tls.client_key_path[1..])
.expect("reading client key from env")
.into_bytes(),
)
.expect("decoding client key"),
_ => fs::read(&tls.client_key_path).expect("reading client key from file"),
};
MakeTlsConnector::new(
TlsConnector::builder()
.add_root_certificate(Certificate::from_pem(&ca_cert)?)
.identity(Identity::from_pkcs12(&client_key, "pass")?)
.danger_accept_invalid_certs(config.allow_invalid_certs)
.build()?,
)
}
None => MakeTlsConnector::new(
TlsConnector::builder()
.danger_accept_invalid_certs(config.allow_invalid_certs)
.build()?,
),
};
let config = config.clone();
let connection_string = match &config.connection_string.chars().next().unwrap() {
'$' => {
env::var(&config.connection_string[1..]).expect("reading connection string from env")
}
_ => config.connection_string.clone(),
};
let mut initial = Some(tokio_postgres::connect(&connection_string, tls.clone()).await?);
let mut metric_retries = metric_retries;
let mut metric_live = metric_live;
tokio::spawn(async move {
loop {
// don't acquire a new connection if we're shutting down
if exit.load(Ordering::Relaxed) {
warn!("shutting down fill_event_postgres_target...");
break;
}
let (client, connection) = match initial.take() {
Some(v) => v,
None => {
let result = tokio_postgres::connect(&connection_string, tls.clone()).await;
match result {
Ok(v) => v,
Err(err) => {
warn!("could not connect to postgres: {:?}", err);
tokio::time::sleep(Duration::from_secs(
config.retry_connection_sleep_secs,
))
.await;
continue;
}
}
}
};
tx.send(Some(client)).await.expect("send success");
metric_live.increment();
let result = connection.await;
metric_retries.increment();
metric_live.decrement();
tx.send(None).await.expect("send success");
warn!("postgres connection error: {:?}", result);
tokio::time::sleep(Duration::from_secs(config.retry_connection_sleep_secs)).await;
}
});
Ok(rx)
}
async fn update_postgres_client<'a>(
client: &'a mut Option<postgres_query::Caching<tokio_postgres::Client>>,
rx: &async_channel::Receiver<Option<tokio_postgres::Client>>,
config: &PostgresConfig,
) -> &'a postgres_query::Caching<tokio_postgres::Client> {
// get the most recent client, waiting if there's a disconnect
while !rx.is_empty() || client.is_none() {
tokio::select! {
client_raw_opt = rx.recv() => {
*client = client_raw_opt.expect("not closed").map(postgres_query::Caching::new);
},
_ = tokio::time::sleep(Duration::from_secs(config.fatal_connection_timeout_secs)) => {
error!("waited too long for new postgres client");
std::process::exit(1);
},
}
}
client.as_ref().expect("must contain value")
}
async fn process_update(client: &Caching<Client>, update: &FillUpdate) -> anyhow::Result<()> {
let market = &update.market_key;
let seq_num = update.event.seq_num as i64;
let fill_timestamp = Utc.timestamp_opt(update.event.timestamp as i64, 0).unwrap();
let price = update.event.price;
let quantity = update.event.quantity;
let slot = update.slot as i64;
let write_version = update.write_version as i64;
if update.status == FillUpdateStatus::New {
// insert new events
let query = postgres_query::query!(
"INSERT INTO transactions_v4.perp_fills_feed_events
(market, seq_num, fill_timestamp, price,
quantity, slot, write_version)
VALUES
($market, $seq_num, $fill_timestamp, $price,
$quantity, $slot, $write_version)
ON CONFLICT (market, seq_num) DO NOTHING",
market,
seq_num,
fill_timestamp,
price,
quantity,
slot,
write_version,
);
let _ = query.execute(&client).await?;
} else {
// delete revoked events
let query = postgres_query::query!(
"DELETE FROM transactions_v4.perp_fills_feed_events
WHERE market=$market
AND seq_num=$seq_num",
market,
seq_num,
);
let _ = query.execute(&client).await?;
}
Ok(())
}
pub async fn init(
config: &PostgresConfig,
metrics_sender: Metrics,
exit: Arc<AtomicBool>,
) -> anyhow::Result<async_channel::Sender<FillUpdate>> {
// The actual message may want to also contain a retry count, if it self-reinserts on failure?
let (fill_update_queue_sender, fill_update_queue_receiver) =
async_channel::bounded::<FillUpdate>(config.max_queue_size);
let metric_con_retries = metrics_sender.register_u64(
"fills_postgres_connection_retries".into(),
MetricType::Counter,
);
let metric_con_live =
metrics_sender.register_u64("fills_postgres_connections_alive".into(), MetricType::Gauge);
// postgres fill update sending worker threads
for _ in 0..config.connection_count {
let postgres_account_writes = postgres_connection(
config,
metric_con_retries.clone(),
metric_con_live.clone(),
exit.clone(),
)
.await?;
let fill_update_queue_receiver_c = fill_update_queue_receiver.clone();
let config = config.clone();
let mut metric_retries =
metrics_sender.register_u64("fills_postgres_retries".into(), MetricType::Counter);
tokio::spawn(async move {
let mut client_opt = None;
loop {
// Retrieve up to batch_size updates
let mut batch = Vec::new();
batch.push(
fill_update_queue_receiver_c
.recv()
.await
.expect("sender must stay alive"),
);
while batch.len() < config.max_batch_size {
match fill_update_queue_receiver_c.try_recv() {
Ok(update) => batch.push(update),
Err(async_channel::TryRecvError::Empty) => break,
Err(async_channel::TryRecvError::Closed) => {
panic!("sender must stay alive")
}
};
}
info!(
"updates, batch {}, channel size {}",
batch.len(),
fill_update_queue_receiver_c.len(),
);
let mut error_count = 0;
loop {
let client =
update_postgres_client(&mut client_opt, &postgres_account_writes, &config)
.await;
let mut results = futures::future::join_all(
batch.iter().map(|update| process_update(client, update)),
)
.await;
let mut iter = results.iter();
batch.retain(|_| iter.next().unwrap().is_err());
if !batch.is_empty() {
metric_retries.add(batch.len() as u64);
error_count += 1;
if error_count - 1 < config.retry_query_max_count {
results.retain(|r| r.is_err());
warn!("failed to process fill update, retrying: {:?}", results);
tokio::time::sleep(Duration::from_secs(config.retry_query_sleep_secs))
.await;
continue;
} else {
error!("failed to process account write, exiting");
std::process::exit(1);
}
};
break;
}
}
});
}
Ok(fill_update_queue_sender)
}

View File

@ -0,0 +1,338 @@
use std::convert::{identity, TryFrom};
use anchor_lang::prelude::Pubkey;
use bytemuck::cast_slice;
use chrono::{TimeZone, Utc};
use mango_feeds_lib::{base_lots_to_ui_perp, price_lots_to_ui_perp, MarketConfig, OrderbookSide};
use mango_v4::state::{FillEvent as PerpFillEvent, Side};
use serde::{ser::SerializeStruct, Deserialize, Serialize, Serializer};
use serum_dex::state::EventView as SpotEvent;
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum FillUpdateStatus {
New,
Revoke,
}
impl Serialize for FillUpdateStatus {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
FillUpdateStatus::New => {
serializer.serialize_unit_variant("FillUpdateStatus", 0, "new")
}
FillUpdateStatus::Revoke => {
serializer.serialize_unit_variant("FillUpdateStatus", 1, "revoke")
}
}
}
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum FillEventType {
Spot,
Perp,
}
impl Serialize for FillEventType {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
FillEventType::Spot => serializer.serialize_unit_variant("FillEventType", 0, "spot"),
FillEventType::Perp => serializer.serialize_unit_variant("FillEventType", 1, "perp"),
}
}
}
#[derive(Clone, Debug)]
pub struct FillEvent {
pub event_type: FillEventType,
pub maker: String,
pub taker: String,
pub taker_side: OrderbookSide,
pub timestamp: u64, // make all strings
pub seq_num: u64,
pub maker_client_order_id: u64,
pub taker_client_order_id: u64,
pub maker_fee: f32,
pub taker_fee: f32,
pub price: f64,
pub quantity: f64,
}
impl Serialize for FillEvent {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut state = serializer.serialize_struct("FillEvent", 12)?;
state.serialize_field("eventType", &self.event_type)?;
state.serialize_field("maker", &self.maker)?;
state.serialize_field("taker", &self.taker)?;
state.serialize_field("takerSide", &self.taker_side)?;
state.serialize_field(
"timestamp",
&Utc.timestamp_opt(self.timestamp as i64, 0)
.unwrap()
.to_rfc3339(),
)?;
state.serialize_field("seqNum", &self.seq_num)?;
state.serialize_field("makerClientOrderId", &self.maker_client_order_id)?;
state.serialize_field("takerClientOrderId", &self.taker_client_order_id)?; // make string
state.serialize_field("makerFee", &self.maker_fee)?;
state.serialize_field("takerFee", &self.taker_fee)?;
state.serialize_field("price", &self.price)?;
state.serialize_field("quantity", &self.quantity)?;
state.end()
}
}
impl FillEvent {
pub fn new_from_perp(event: PerpFillEvent, config: &MarketConfig) -> Self {
let taker_side = match event.taker_side() {
Side::Ask => OrderbookSide::Ask,
Side::Bid => OrderbookSide::Bid,
};
let price = price_lots_to_ui_perp(
event.price,
config.base_decimals,
config.quote_decimals,
config.base_lot_size,
config.quote_lot_size,
);
let quantity =
base_lots_to_ui_perp(event.quantity, config.base_decimals, config.base_lot_size);
FillEvent {
event_type: FillEventType::Perp,
maker: event.maker.to_string(),
taker: event.taker.to_string(),
taker_side,
timestamp: event.timestamp,
seq_num: event.seq_num,
maker_client_order_id: event.maker_client_order_id,
taker_client_order_id: event.taker_client_order_id,
maker_fee: event.maker_fee,
taker_fee: event.taker_fee,
price,
quantity,
}
}
pub fn new_from_spot(
maker_event: SpotEvent,
taker_event: SpotEvent,
timestamp: u64,
seq_num: u64,
config: &MarketConfig,
) -> Self {
match (maker_event, taker_event) {
(
SpotEvent::Fill {
side: maker_side,
client_order_id: maker_client_order_id,
native_qty_paid: maker_native_qty_paid,
native_fee_or_rebate: maker_native_fee_or_rebate,
native_qty_received: maker_native_qty_received,
owner: maker_owner,
..
},
SpotEvent::Fill {
side: taker_side,
client_order_id: taker_client_order_id,
native_fee_or_rebate: taker_native_fee_or_rebate,
owner: taker_owner,
..
},
) => {
let maker_side = match maker_side as u8 {
0 => OrderbookSide::Bid,
1 => OrderbookSide::Ask,
_ => panic!("invalid side"),
};
let taker_side = match taker_side as u8 {
0 => OrderbookSide::Bid,
1 => OrderbookSide::Ask,
_ => panic!("invalid side"),
};
let maker_client_order_id: u64 = match maker_client_order_id {
Some(id) => id.into(),
None => 0u64,
};
let taker_client_order_id: u64 = match taker_client_order_id {
Some(id) => id.into(),
None => 0u64,
};
let base_multiplier = 10u64.pow(config.base_decimals.into());
let quote_multiplier = 10u64.pow(config.quote_decimals.into());
let (price, quantity) = match maker_side {
OrderbookSide::Bid => {
let price_before_fees = maker_native_qty_paid + maker_native_fee_or_rebate;
let top = price_before_fees * base_multiplier;
let bottom = quote_multiplier * maker_native_qty_received;
let price = top as f64 / bottom as f64;
let quantity = maker_native_qty_received as f64 / base_multiplier as f64;
(price, quantity)
}
OrderbookSide::Ask => {
let price_before_fees =
maker_native_qty_received - maker_native_fee_or_rebate;
let top = price_before_fees * base_multiplier;
let bottom = quote_multiplier * maker_native_qty_paid;
let price = top as f64 / bottom as f64;
let quantity = maker_native_qty_paid as f64 / base_multiplier as f64;
(price, quantity)
}
};
let maker_fee = maker_native_fee_or_rebate as f32 / quote_multiplier as f32;
let taker_fee = taker_native_fee_or_rebate as f32 / quote_multiplier as f32;
FillEvent {
event_type: FillEventType::Spot,
maker: Pubkey::try_from(cast_slice(&identity(maker_owner) as &[_]))
.unwrap()
.to_string(),
taker: Pubkey::try_from(cast_slice(&identity(taker_owner) as &[_]))
.unwrap()
.to_string(),
taker_side,
timestamp,
seq_num,
maker_client_order_id,
taker_client_order_id,
taker_fee,
maker_fee,
price,
quantity,
}
}
(_, _) => {
panic!("Can't build FillEvent from SpotEvent::Out")
}
}
}
}
#[derive(Clone, Debug)]
pub struct FillUpdate {
pub event: FillEvent,
pub status: FillUpdateStatus,
pub market_key: String,
pub market_name: String,
pub slot: u64,
pub write_version: u64,
}
impl Serialize for FillUpdate {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut state = serializer.serialize_struct("FillUpdate", 6)?;
state.serialize_field("event", &self.event)?;
state.serialize_field("marketKey", &self.market_key)?;
state.serialize_field("marketName", &self.market_name)?;
state.serialize_field("status", &self.status)?;
state.serialize_field("slot", &self.slot)?;
state.serialize_field("writeVersion", &self.write_version)?;
state.end()
}
}
#[derive(Clone, Debug)]
pub struct HeadUpdate {
pub head: usize,
pub prev_head: usize,
pub head_seq_num: u64,
pub prev_head_seq_num: u64,
pub status: FillUpdateStatus,
pub market_key: String,
pub market_name: String,
pub slot: u64,
pub write_version: u64,
}
impl Serialize for HeadUpdate {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut state = serializer.serialize_struct("HeadUpdate", 6)?;
state.serialize_field("head", &self.head)?;
state.serialize_field("previousHead", &self.prev_head)?;
state.serialize_field("headSeqNum", &self.head_seq_num)?;
state.serialize_field("previousHeadSeqNum", &self.prev_head_seq_num)?;
state.serialize_field("marketKey", &self.market_key)?;
state.serialize_field("marketName", &self.market_name)?;
state.serialize_field("status", &self.status)?;
state.serialize_field("slot", &self.slot)?;
state.serialize_field("writeVersion", &self.write_version)?;
state.end()
}
}
#[derive(Clone, Debug)]
pub struct FillCheckpoint {
pub market: String,
pub queue: String,
pub events: Vec<FillEvent>,
pub slot: u64,
pub write_version: u64,
}
impl Serialize for FillCheckpoint {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut state = serializer.serialize_struct("FillCheckpoint", 3)?;
state.serialize_field("events", &self.events)?;
state.serialize_field("market", &self.market)?;
state.serialize_field("queue", &self.queue)?;
state.serialize_field("slot", &self.slot)?;
state.serialize_field("write_version", &self.write_version)?;
state.end()
}
}
pub enum FillEventFilterMessage {
Update(FillUpdate),
HeadUpdate(HeadUpdate),
Checkpoint(FillCheckpoint),
}
#[derive(Clone, Debug, Deserialize)]
#[serde(tag = "command")]
pub enum Command {
#[serde(rename = "subscribe")]
Subscribe(SubscribeCommand),
#[serde(rename = "unsubscribe")]
Unsubscribe(UnsubscribeCommand),
#[serde(rename = "getMarkets")]
GetMarkets,
}
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct SubscribeCommand {
pub market_id: Option<String>,
pub market_ids: Option<Vec<String>>,
pub account_ids: Option<Vec<String>>,
pub head_updates: Option<bool>,
}
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct UnsubscribeCommand {
pub market_id: String,
}

View File

@ -0,0 +1,644 @@
mod fill_event_filter;
mod fill_event_postgres_target;
mod postgres_config;
use anchor_client::Cluster;
use anchor_lang::prelude::Pubkey;
use futures_channel::mpsc::{unbounded, UnboundedSender};
use futures_util::{
future::{self, Ready},
pin_mut, SinkExt, StreamExt, TryStreamExt,
};
use log::*;
use mango_feeds_connector::{
grpc_plugin_source, metrics,
metrics::{MetricType, MetricU64},
websocket_source, EntityFilter, FilterConfig, MetricsConfig, SourceConfig,
};
use mango_feeds_lib::MarketConfig;
use mango_feeds_lib::StatusResponse;
use mango_v4_client::{Client, MangoGroupContext, TransactionBuilderConfig};
use service_mango_fills::{Command, FillCheckpoint, FillEventFilterMessage, FillEventType};
use solana_sdk::commitment_config::CommitmentConfig;
use solana_sdk::signature::Keypair;
use std::{
collections::{HashMap, HashSet},
env,
fs::File,
io::Read,
net::SocketAddr,
str::FromStr,
sync::Arc,
sync::{
atomic::{AtomicBool, Ordering},
Mutex,
},
time::Duration,
};
use tokio::{
net::{TcpListener, TcpStream},
pin, time,
};
use tokio_tungstenite::tungstenite::{protocol::Message, Error};
use crate::postgres_config::PostgresConfig;
use serde::Deserialize;
type CheckpointMap = Arc<Mutex<HashMap<String, FillCheckpoint>>>;
type PeerMap = Arc<Mutex<HashMap<SocketAddr, Peer>>>;
// jemalloc seems to be better at keeping the memory footprint reasonable over
// longer periods of time
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
#[derive(Clone, Debug)]
pub struct Peer {
pub sender: UnboundedSender<Message>,
pub market_subscriptions: HashSet<String>,
pub account_subscriptions: HashSet<String>,
pub head_updates: bool,
}
#[allow(clippy::too_many_arguments)]
async fn handle_connection_error(
checkpoint_map: CheckpointMap,
peer_map: PeerMap,
market_ids: HashMap<String, String>,
raw_stream: TcpStream,
addr: SocketAddr,
metrics_opened_connections: MetricU64,
metrics_closed_connections: MetricU64,
) {
metrics_opened_connections.clone().increment();
let result = handle_connection(
checkpoint_map,
peer_map.clone(),
market_ids,
raw_stream,
addr,
)
.await;
if result.is_err() {
error!("connection {} error {}", addr, result.unwrap_err());
};
metrics_closed_connections.clone().increment();
peer_map.lock().unwrap().remove(&addr);
}
async fn handle_connection(
checkpoint_map: CheckpointMap,
peer_map: PeerMap,
market_ids: HashMap<String, String>,
raw_stream: TcpStream,
addr: SocketAddr,
) -> Result<(), Error> {
info!("ws connected: {}", addr);
let ws_stream = tokio_tungstenite::accept_async(raw_stream).await?;
let (ws_tx, ws_rx) = ws_stream.split();
// 1: publish channel in peer map
let (chan_tx, chan_rx) = unbounded();
{
peer_map.lock().unwrap().insert(
addr,
Peer {
sender: chan_tx,
market_subscriptions: HashSet::<String>::new(),
account_subscriptions: HashSet::<String>::new(),
head_updates: false,
},
);
}
let receive_commands = ws_rx.try_for_each(|msg| match msg {
Message::Text(_) => handle_commands(
addr,
msg,
peer_map.clone(),
checkpoint_map.clone(),
market_ids.clone(),
),
Message::Ping(_) => {
let peers = peer_map.clone();
let mut peers_lock = peers.lock().unwrap();
let peer = peers_lock.get_mut(&addr).expect("peer should be in map");
peer.sender
.unbounded_send(Message::Pong(Vec::new()))
.unwrap();
future::ready(Ok(()))
}
_ => future::ready(Ok(())),
});
let forward_updates = chan_rx.map(Ok).forward(ws_tx);
pin_mut!(receive_commands, forward_updates);
future::select(receive_commands, forward_updates).await;
peer_map.lock().unwrap().remove(&addr);
info!("ws disconnected: {}", &addr);
Ok(())
}
fn handle_commands(
addr: SocketAddr,
msg: Message,
peer_map: PeerMap,
checkpoint_map: CheckpointMap,
market_ids: HashMap<String, String>,
) -> Ready<Result<(), Error>> {
let msg_str = msg.into_text().unwrap();
let command: Result<Command, serde_json::Error> = serde_json::from_str(&msg_str);
let mut peers = peer_map.lock().unwrap();
let peer = peers.get_mut(&addr).expect("peer should be in map");
match command {
Ok(Command::Subscribe(cmd)) => {
let mut wildcard = true;
// DEPRECATED
if let Some(market_id) = cmd.market_id {
wildcard = false;
if market_ids.get(&market_id).is_none() {
let res = StatusResponse {
success: false,
message: "market not found",
};
peer.sender
.unbounded_send(Message::Text(serde_json::to_string(&res).unwrap()))
.unwrap();
return future::ok(());
}
let subscribed = peer.market_subscriptions.insert(market_id.clone());
let res = if subscribed {
StatusResponse {
success: true,
message: "subscribed",
}
} else {
StatusResponse {
success: false,
message: "already subscribed",
}
};
peer.sender
.unbounded_send(Message::Text(serde_json::to_string(&res).unwrap()))
.unwrap();
if subscribed {
let checkpoint_map = checkpoint_map.lock().unwrap();
let checkpoint = checkpoint_map.get(&market_id);
match checkpoint {
Some(checkpoint) => {
peer.sender
.unbounded_send(Message::Text(
serde_json::to_string(&checkpoint).unwrap(),
))
.unwrap();
}
None => info!(
"no checkpoint available on client subscription for market {}",
&market_id
),
};
}
}
if let Some(cmd_market_ids) = cmd.market_ids {
wildcard = false;
for market_id in cmd_market_ids {
if market_ids.get(&market_id).is_none() {
let res = StatusResponse {
success: false,
message: &format!("market {} not found", &market_id),
};
peer.sender
.unbounded_send(Message::Text(serde_json::to_string(&res).unwrap()))
.unwrap();
return future::ok(());
}
if peer.market_subscriptions.insert(market_id.clone()) {
let checkpoint_map = checkpoint_map.lock().unwrap();
let checkpoint = checkpoint_map.get(&market_id);
let res = StatusResponse {
success: true,
message: &format!("subscribed to market {}", &market_id),
};
peer.sender
.unbounded_send(Message::Text(serde_json::to_string(&res).unwrap()))
.unwrap();
match checkpoint {
Some(checkpoint) => {
peer.sender
.unbounded_send(Message::Text(
serde_json::to_string(&checkpoint).unwrap(),
))
.unwrap();
}
None => info!(
"no checkpoint available on client subscription for market {}",
&market_id
),
};
}
}
}
if let Some(account_ids) = cmd.account_ids {
wildcard = false;
for account_id in account_ids {
if peer.account_subscriptions.insert(account_id.clone()) {
let res = StatusResponse {
success: true,
message: &format!("subscribed to account {}", &account_id),
};
peer.sender
.unbounded_send(Message::Text(serde_json::to_string(&res).unwrap()))
.unwrap();
}
}
}
if wildcard {
for (market_id, market_name) in market_ids {
if peer.market_subscriptions.insert(market_id.clone()) {
let res = StatusResponse {
success: true,
message: &format!("subscribed to market {}", &market_name),
};
peer.sender
.unbounded_send(Message::Text(serde_json::to_string(&res).unwrap()))
.unwrap();
}
}
}
if let Some(head_updates) = cmd.head_updates {
peer.head_updates = head_updates;
}
}
Ok(Command::Unsubscribe(cmd)) => {
info!("unsubscribe {}", cmd.market_id);
let unsubscribed = peer.market_subscriptions.remove(&cmd.market_id);
let res = if unsubscribed {
StatusResponse {
success: true,
message: "unsubscribed",
}
} else {
StatusResponse {
success: false,
message: "not subscribed",
}
};
peer.sender
.unbounded_send(Message::Text(serde_json::to_string(&res).unwrap()))
.unwrap();
}
Ok(Command::GetMarkets) => {
info!("getMarkets");
peer.sender
.unbounded_send(Message::Text(serde_json::to_string(&market_ids).unwrap()))
.unwrap();
}
Err(err) => {
info!("error deserializing user input {:?}", err);
let res = StatusResponse {
success: false,
message: "invalid input",
};
peer.sender
.unbounded_send(Message::Text(serde_json::to_string(&res).unwrap()))
.unwrap();
}
};
future::ok(())
}
#[derive(Clone, Debug, Deserialize)]
pub struct Config {
pub source: SourceConfig,
pub metrics: MetricsConfig,
pub postgres: Option<PostgresConfig>,
pub bind_ws_addr: String,
pub rpc_http_url: String,
pub mango_group: String,
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let args: Vec<String> = std::env::args().collect();
let exit: Arc<AtomicBool> = Arc::new(AtomicBool::new(false));
if args.len() < 2 {
eprintln!("Please enter a config file path argument.");
return Ok(());
}
let config: Config = {
let mut file = File::open(&args[1])?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
toml::from_str(&contents).unwrap()
};
solana_logger::setup_with_default("info");
let metrics_tx = metrics::start(config.metrics, "fills".into());
let metrics_opened_connections =
metrics_tx.register_u64("fills_feed_opened_connections".into(), MetricType::Counter);
let metrics_closed_connections =
metrics_tx.register_u64("fills_feed_closed_connections".into(), MetricType::Counter);
let rpc_url = match &config.rpc_http_url.chars().next().unwrap() {
'$' => env::var(&config.rpc_http_url[1..]).expect("reading rpc http url from env"),
_ => config.rpc_http_url.clone(),
};
let ws_url = rpc_url.replace("https", "wss");
let rpc_timeout = Duration::from_secs(10);
let cluster = Cluster::Custom(rpc_url.clone(), ws_url.clone());
let client = Client::new(
cluster.clone(),
CommitmentConfig::processed(),
Arc::new(Keypair::new()),
Some(rpc_timeout),
TransactionBuilderConfig {
prioritization_micro_lamports: None,
},
);
let group_context = Arc::new(
MangoGroupContext::new_from_rpc(
&client.rpc_async(),
Pubkey::from_str(&config.mango_group).unwrap(),
)
.await?,
);
// todo: reload markets at intervals
let perp_market_configs: Vec<(Pubkey, MarketConfig)> = group_context
.perp_markets
.values()
.map(|context| {
let quote_decimals = match group_context.tokens.get(&context.market.settle_token_index)
{
Some(token) => token.decimals,
None => panic!("token not found for market"), // todo: default to 6 for usdc?
};
(
context.address,
MarketConfig {
name: context.market.name().to_owned(),
bids: context.market.bids,
asks: context.market.asks,
event_queue: context.market.event_queue,
oracle: context.market.oracle,
base_decimals: context.market.base_decimals,
quote_decimals,
base_lot_size: context.market.base_lot_size,
quote_lot_size: context.market.quote_lot_size,
},
)
})
.collect();
let spot_market_configs: Vec<(Pubkey, MarketConfig)> = group_context
.serum3_markets
.values()
.map(|context| {
let base_decimals = match group_context.tokens.get(&context.market.base_token_index) {
Some(token) => token.decimals,
None => panic!("token not found for market"), // todo: default?
};
let quote_decimals = match group_context.tokens.get(&context.market.quote_token_index) {
Some(token) => token.decimals,
None => panic!("token not found for market"), // todo: default to 6 for usdc?
};
(
context.market.serum_market_external,
MarketConfig {
name: context.market.name().to_owned(),
bids: context.bids,
asks: context.asks,
event_queue: context.event_q,
oracle: Pubkey::default(), // serum markets don't support oracle peg
base_decimals,
quote_decimals,
base_lot_size: context.pc_lot_size as i64,
quote_lot_size: context.coin_lot_size as i64,
},
)
})
.collect();
let perp_queue_pks: Vec<(Pubkey, Pubkey)> = group_context
.perp_markets
.values()
.map(|context| (context.address, context.market.event_queue))
.collect();
let _a: Vec<(String, String)> = group_context
.serum3_markets
.values()
.map(|context| {
(
context.market.serum_market_external.to_string(),
context.market.name().to_owned(),
)
})
.collect();
let b: Vec<(String, String)> = group_context
.perp_markets
.values()
.map(|context| {
(
context.address.to_string(),
context.market.name().to_owned(),
)
})
.collect();
let market_pubkey_strings: HashMap<String, String> = [b].concat().into_iter().collect();
let postgres_update_sender = match config.postgres {
Some(postgres_config) => Some(
fill_event_postgres_target::init(&postgres_config, metrics_tx.clone(), exit.clone())
.await?,
),
None => None,
};
let (account_write_queue_sender, slot_queue_sender, fill_receiver) = fill_event_filter::init(
perp_market_configs.clone(),
spot_market_configs.clone(),
metrics_tx.clone(),
exit.clone(),
)
.await?;
let checkpoints = CheckpointMap::new(Mutex::new(HashMap::new()));
let peers = PeerMap::new(Mutex::new(HashMap::new()));
let checkpoints_ref_thread = checkpoints.clone();
let peers_ref_thread = peers.clone();
let peers_ref_thread1 = peers.clone();
// filleventfilter websocket sink
tokio::spawn(async move {
pin!(fill_receiver);
loop {
let message = fill_receiver.recv().await.unwrap();
match message {
FillEventFilterMessage::Update(update) => {
debug!(
"ws update {} {:?} {:?} fill",
update.market_name, update.status, update.event.event_type
);
let mut peer_copy = peers_ref_thread.lock().unwrap().clone();
for (addr, peer) in peer_copy.iter_mut() {
let json = serde_json::to_string(&update.clone()).unwrap();
let is_subscribed = peer.market_subscriptions.contains(&update.market_key)
|| peer.account_subscriptions.contains(&update.event.taker)
|| peer.account_subscriptions.contains(&update.event.maker);
// only send updates if the peer is subscribed
if is_subscribed {
let result = peer.sender.send(Message::Text(json)).await;
if result.is_err() {
error!(
"ws update {} fill could not reach {}",
update.market_name, addr
);
}
}
}
// send fills to db
let update_c = update.clone();
if let (Some(sender), FillEventType::Perp) =
(postgres_update_sender.clone(), update_c.event.event_type)
{
sender.send(update_c).await.unwrap();
}
}
FillEventFilterMessage::Checkpoint(checkpoint) => {
checkpoints_ref_thread
.lock()
.unwrap()
.insert(checkpoint.queue.clone(), checkpoint);
}
FillEventFilterMessage::HeadUpdate(update) => {
debug!(
"ws update {} {:?} {} {} head",
update.market_name, update.status, update.head, update.prev_head
);
let mut peer_copy = peers_ref_thread.lock().unwrap().clone();
for (addr, peer) in peer_copy.iter_mut() {
let json = serde_json::to_string(&update.clone()).unwrap();
let is_subscribed = peer.market_subscriptions.contains(&update.market_key);
// only send updates if the peer is subscribed
if peer.head_updates && is_subscribed {
let result = peer.sender.send(Message::Text(json)).await;
if result.is_err() {
error!(
"ws update {} head could not reach {}",
update.market_name, addr
);
}
}
}
}
}
}
});
// websocket listener
info!("ws listen: {}", config.bind_ws_addr);
let try_socket = TcpListener::bind(&config.bind_ws_addr).await;
let listener = try_socket.expect("Failed to bind");
{
tokio::spawn(async move {
// Let's spawn the handling of each connection in a separate task.
while let Ok((stream, addr)) = listener.accept().await {
tokio::spawn(handle_connection_error(
checkpoints.clone(),
peers.clone(),
market_pubkey_strings.clone(),
stream,
addr,
metrics_opened_connections.clone(),
metrics_closed_connections.clone(),
));
}
});
}
// keepalive
{
tokio::spawn(async move {
let mut write_interval = time::interval(time::Duration::from_secs(30));
loop {
write_interval.tick().await;
let peers_copy = peers_ref_thread1.lock().unwrap().clone();
for (addr, peer) in peers_copy.iter() {
let pl = Vec::new();
let result = peer.clone().sender.send(Message::Ping(pl)).await;
if result.is_err() {
error!("ws ping could not reach {}", addr);
}
}
}
});
}
// handle sigint
{
let exit = exit.clone();
tokio::spawn(async move {
tokio::signal::ctrl_c().await.unwrap();
info!("Received SIGINT, shutting down...");
exit.store(true, Ordering::Relaxed);
});
}
info!(
"rpc connect: {}",
config
.source
.grpc_sources
.iter()
.map(|c| c.connection_string.clone())
.collect::<String>()
);
let use_geyser = true;
let all_queue_pks = [perp_queue_pks.clone()].concat();
let relevant_pubkeys = all_queue_pks.iter().map(|m| m.1).collect();
let filter_config = FilterConfig {
entity_filter: EntityFilter::FilterByAccountIds(relevant_pubkeys),
};
if use_geyser {
grpc_plugin_source::process_events(
&config.source,
&filter_config,
account_write_queue_sender,
slot_queue_sender,
metrics_tx.clone(),
exit.clone(),
)
.await;
} else {
websocket_source::process_events(
&config.source,
&filter_config,
account_write_queue_sender,
slot_queue_sender,
)
.await;
}
Ok(())
}

View File

@ -0,0 +1,31 @@
use serde_derive::Deserialize;
#[derive(Clone, Debug, Deserialize)]
pub struct PostgresConfig {
pub connection_string: String,
/// Number of parallel postgres connections used for insertions
pub connection_count: u64,
/// Maximum batch size for inserts over one connection
pub max_batch_size: usize,
/// Max size of queues
pub max_queue_size: usize,
/// Number of queries retries before fatal error
pub retry_query_max_count: u64,
/// Seconds to sleep between query retries
pub retry_query_sleep_secs: u64,
/// Seconds to sleep between connection attempts
pub retry_connection_sleep_secs: u64,
/// Fatal error when the connection can't be reestablished this long
pub fatal_connection_timeout_secs: u64,
/// Allow invalid TLS certificates, passed to native_tls danger_accept_invalid_certs
pub allow_invalid_certs: bool,
pub tls: Option<PostgresTlsConfig>,
}
#[derive(Clone, Debug, Deserialize)]
pub struct PostgresTlsConfig {
/// CA Cert file or env var
pub ca_cert_path: String,
/// PKCS12 client cert path
pub client_key_path: String,
}

View File

@ -0,0 +1,45 @@
[package]
name = "service-mango-orderbook"
version = "0.1.0"
authors = ["Riordan Panayides <riordan@panayid.es>"]
edition = "2021"
license = "AGPL-3.0-or-later"
[dependencies]
mango-feeds-lib = { path = "../../lib/mango-feeds-lib" }
mango-feeds-connector = { workspace = true }
solana-client = { workspace = true }
solana-logger = { workspace = true }
solana-sdk = { workspace = true }
anchor-lang = { workspace = true }
anchor-client = { workspace = true }
mango-v4 = { path = "../../programs/mango-v4", features = ["client"] }
mango-v4-client = { path = "../../lib/client" }
# note: we use a patched version of serum-dex to expose iterators - the mango program still uses the version 0.5.10+updates (commit c85e56d)
# 'groovie/v0.5.10-updates-expose-things' is a merge between https://github.com/jup-ag/openbook-program/tree/feat/expose-things and commit c85e56d
serum_dex = { git = "https://github.com/grooviegermanikus/program.git", branch = "groovie/v0.5.10-updates-expose-things" }
fixed = { workspace = true, features = ["serde"] }
bs58 = "0.3.1"
log = "0.4"
anyhow = "1.0"
toml = "0.5"
serde = "1.0.130"
serde_derive = "1.0.130"
serde_json = "1.0.68"
futures-channel = "0.3"
futures-util = "0.3"
ws = "^0.9.2"
async-channel = "1.6"
async-trait = "0.1"
bytemuck = "^1.7.2"
itertools = "0.10.3"
tokio = { version = "1", features = ["full"] }
tokio-tungstenite = "0.17"

View File

@ -0,0 +1,193 @@
# service-mango-orderbook
This module parses bookside accounts and exposes L2 and L3 data and updates on a websocket
Public API: `https://api.mngo.cloud/orderbook/v1/`
## API Reference
Get a list of markets
```
{
"command": "getMarkets"
}
```
```
{
"ESdnpnNLgTkBCZRuTJkZLi5wKEZ2z47SG3PJrhundSQ2": "SOL-PERP",
"HwhVGkfsSQ9JSQeQYu2CbkRCLvsh3qRZxG6m4oMVwZpN": "BTC-PERP",
"Fgh9JSZ2qfSjCw9RPJ85W2xbihsp2muLvfRztzoVR7f1": "ETH-PERP",
}
```
### L2 Data
Subscribe to L2 updates
```
{
"command": "subscribe",
"marketId": "MARKET_PUBKEY",
"subscriptionType": "level",
}
```
```
{
"success": true,
"message": "subscribed to level updates for MARKET_PUBKEY"
}
```
L2 Checkpoint - Sent upon initial subscription
```
{
"market": "ESdnpnNLgTkBCZRuTJkZLi5wKEZ2z47SG3PJrhundSQ2",
"bids":
[22.17, 8.86],
[22.15, 88.59],
],
"asks": [
[22.19, 9.17],
[22.21, 91.7],
],
"slot": 190826373,
"write_version": 688377208758
}
```
L2 Update - Sent per side
```
{
"market": "ESdnpnNLgTkBCZRuTJkZLi5wKEZ2z47SG3PJrhundSQ2",
"bids": // or asks
[22.18, 6], // new level added
[22.17, 1], // level changed
[22.15, 0], // level removed
],
"slot": 190826375,
"write_version": 688377208759
}
```
### L3 Data
Subscribe to L3 updates
:warning: If the subscribed market is a perp market, `ownerPubkey` corresponds to a `mangoAccount`, if the subscribed market is a spot market, `ownerPubkey` corresponds to an open orders account.
```
{
"command": "subscribe",
"marketId": "MARKET_PUBKEY",
"subscriptionType": "book",
}
```
```
{
"success": true,
"message": "subscribed to book updates for MARKET_PUBKEY"
}
```
L3 Checkpoint - Sent upon initial subscription
```
{
"market": "ESdnpnNLgTkBCZRuTJkZLi5wKEZ2z47SG3PJrhundSQ2",
"bids": [
{
"price": 20.81,
"quantity": 1.3,
"ownerPubkey": "F1SZxEDxxCSLVjEBbMEjDYqajWRJQRCZBwPQnmcVvTLV"
},
{
"price": 20.81,
"quantity": 62.22,
"ownerPubkey": "BGYWnqfaauCeebFQXEfYuDCktiVG8pqpprrsD4qfqL53"
},
{
"price": 20.8,
"quantity": 8,
"ownerPubkey": "CtHuPg2ctVVV7nqmvVEcMtcWyJAgtZw9YcNHFQidjPgF"
}
],
"asks": [
{
"price": 20.94,
"quantity": 62.22,
"ownerPubkey": "BGYWnqfaauCeebFQXEfYuDCktiVG8pqpprrsD4qfqL53"
},
{
"price": 20.95,
"quantity": 1.3,
"ownerPubkey": "F1SZxEDxxCSLVjEBbMEjDYqajWRJQRCZBwPQnmcVvTLV"
},
{
"price": 21.31,
"quantity": 30,
"ownerPubkey": "5gHsqmFsMaguM3HMyEmnME4NMQKj6NrJWUGv6VKnc2Hk"
}
],
"slot": 190826373,
"write_version": 688377208758
}
```
L3 Update - Sent per side
```
{
"market": "ESdnpnNLgTkBCZRuTJkZLi5wKEZ2z47SG3PJrhundSQ2",
"side": "ask",
"additions": [
{
"price": 20.92,
"quantity": 61.93,
"ownerPubkey": "BGYWnqfaauCeebFQXEfYuDCktiVG8pqpprrsD4qfqL53"
}
],
"removals": [
{
"price": 20.92,
"quantity": 61.910000000000004,
"ownerPubkey": "BGYWnqfaauCeebFQXEfYuDCktiVG8pqpprrsD4qfqL53"
}
],
"slot": 197077534,
"write_version": 727782187614
}
```
## Setup
## Local
1. Prepare the connector configuration file.
[Here is an example](service-mango-orderbook/conf/example-config.toml).
- `bind_ws_addr` is the listen port for the websocket clients
- `rpc_ws_url` is unused and can stay empty.
- `connection_string` for your `grpc_sources` must point to the gRPC server
address configured for the plugin.
- `rpc_http_url` must point to the JSON-RPC URL.
- `program_id` must match what is configured for the gRPC plugin
2. Start the service binary.
Pass the path to the config file as the first argument. It logs to stdout. It
should be restarted on exit.
3. Monitor the logs
`WARN` messages can be recovered from. `ERROR` messages need attention. The
logs are very spammy changing the default log level is recommended when you
dont want to analyze performance of the service.
## fly.io

View File

@ -0,0 +1,20 @@
bind_ws_addr = "0.0.0.0:8080"
rpc_http_url = "http://mango.rpcpool.com/<token>"
mango_group = "78b8f4cGCwmZ9ysPFMWLaLTkkaYnUjwMJYStWe5RTSSX"
[metrics]
output_stdout = true
output_http = true
[source]
dedup_queue_size = 50000
rpc_ws_url = "wss://mango.rpcpool.com/<token>"
[[source.grpc_sources]]
name = "accountsdb-client"
connection_string = "http://tyo64.rpcpool.com/"
retry_connection_sleep_secs = 30
[source.snapshot]
rpc_http_url = "http://mango.rpcpool.com/<token>"
program_id = "4MangoMjqJ2firMokCjjGgoK8d4MXcrgL7XJaL3w6fVg"

View File

@ -0,0 +1,21 @@
bind_ws_addr = "[::]:8080"
rpc_http_url = "$RPC_HTTP_URL"
mango_group = "78b8f4cGCwmZ9ysPFMWLaLTkkaYnUjwMJYStWe5RTSSX"
[metrics]
output_stdout = true
output_http = true
[source]
dedup_queue_size = 50000
rpc_ws_url = "$RPC_WS_URL"
[[source.grpc_sources]]
name = "accountsdb-client"
connection_string = "$GEYSER_CONNECTION_STRING"
token = "$GEYSER_TOKEN"
retry_connection_sleep_secs = 30
[source.snapshot]
rpc_http_url = "$RPC_HTTP_URL"
program_id = "srmqPvymJeFKQ4zGQed1GFppgkRHL9kaELCbyksJtPX"

View File

@ -0,0 +1,122 @@
use mango_feeds_lib::OrderbookSide;
use serde::{ser::SerializeStruct, Serialize, Serializer};
pub type OrderbookLevel = [f64; 2];
pub type Orderbook = Vec<Order>;
#[derive(Clone, Debug, Serialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct Order {
pub price: f64,
pub quantity: f64,
pub owner_pubkey: String,
}
#[derive(Clone, Debug)]
pub struct LevelUpdate {
pub market: String,
pub side: OrderbookSide,
pub update: Vec<OrderbookLevel>,
pub slot: u64,
pub write_version: u64,
}
impl Serialize for LevelUpdate {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut state = serializer.serialize_struct("LevelUpdate", 5)?;
state.serialize_field("market", &self.market)?;
state.serialize_field("side", &self.side)?;
state.serialize_field("update", &self.update)?;
state.serialize_field("slot", &self.slot)?;
state.serialize_field("write_version", &self.write_version)?;
state.end()
}
}
#[derive(Clone, Debug)]
pub struct LevelCheckpoint {
pub market: String,
pub bids: Vec<OrderbookLevel>,
pub asks: Vec<OrderbookLevel>,
pub slot: u64,
pub write_version: u64,
}
impl Serialize for LevelCheckpoint {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut state = serializer.serialize_struct("LevelCheckpoint", 3)?;
state.serialize_field("market", &self.market)?;
state.serialize_field("bids", &self.bids)?;
state.serialize_field("asks", &self.asks)?;
state.serialize_field("slot", &self.slot)?;
state.serialize_field("write_version", &self.write_version)?;
state.end()
}
}
#[derive(Clone, Debug)]
pub struct BookUpdate {
pub market: String,
pub side: OrderbookSide,
pub additions: Vec<Order>,
pub removals: Vec<Order>,
pub slot: u64,
pub write_version: u64,
}
impl Serialize for BookUpdate {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut state = serializer.serialize_struct("BookUpdate", 6)?;
state.serialize_field("market", &self.market)?;
state.serialize_field("side", &self.side)?;
state.serialize_field("additions", &self.additions)?;
state.serialize_field("removals", &self.removals)?;
state.serialize_field("slot", &self.slot)?;
state.serialize_field("write_version", &self.write_version)?;
state.end()
}
}
#[derive(Clone, Debug)]
pub struct BookCheckpoint {
pub market: String,
pub bids: Vec<Order>,
pub asks: Vec<Order>,
pub slot: u64,
pub write_version: u64,
}
impl Serialize for BookCheckpoint {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut state = serializer.serialize_struct("LevelCheckpoint", 5)?;
state.serialize_field("market", &self.market)?;
state.serialize_field("bids", &self.bids)?;
state.serialize_field("asks", &self.asks)?;
state.serialize_field("slot", &self.slot)?;
state.serialize_field("write_version", &self.write_version)?;
state.end()
}
}
pub enum OrderbookFilterMessage {
LevelUpdate(LevelUpdate),
LevelCheckpoint(LevelCheckpoint),
BookUpdate(BookUpdate),
BookCheckpoint(BookCheckpoint),
}

View File

@ -0,0 +1,631 @@
mod orderbook_filter;
use anchor_client::Cluster;
use futures_channel::mpsc::{unbounded, UnboundedSender};
use futures_util::{
future::{self, Ready},
pin_mut, SinkExt, StreamExt, TryStreamExt,
};
use itertools::Itertools;
use log::*;
use mango_v4_client::{Client, MangoGroupContext, TransactionBuilderConfig};
use solana_sdk::commitment_config::CommitmentConfig;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::Keypair;
use std::{
collections::{HashMap, HashSet},
env, fmt,
fs::File,
io::Read,
net::SocketAddr,
str::FromStr,
sync::{
atomic::{AtomicBool, Ordering},
Arc, Mutex,
},
time::Duration,
};
use tokio::{
net::{TcpListener, TcpStream},
pin, time,
};
use tokio_tungstenite::tungstenite::{protocol::Message, Error};
use mango_feeds_connector::EntityFilter::FilterByAccountIds;
use mango_feeds_connector::{
grpc_plugin_source, metrics, websocket_source, MetricsConfig, SourceConfig,
};
use mango_feeds_connector::{
metrics::{MetricType, MetricU64},
FilterConfig,
};
use mango_feeds_lib::MarketConfig;
use mango_feeds_lib::StatusResponse;
use serde::{Deserialize, Serialize};
use service_mango_orderbook::{BookCheckpoint, LevelCheckpoint, OrderbookFilterMessage};
type LevelCheckpointMap = Arc<Mutex<HashMap<String, LevelCheckpoint>>>;
type BookCheckpointMap = Arc<Mutex<HashMap<String, BookCheckpoint>>>;
type PeerMap = Arc<Mutex<HashMap<SocketAddr, Peer>>>;
#[derive(Clone, Debug, Deserialize)]
#[serde(tag = "command")]
pub enum Command {
#[serde(rename = "subscribe")]
Subscribe(SubscribeCommand),
#[serde(rename = "unsubscribe")]
Unsubscribe(UnsubscribeCommand),
#[serde(rename = "getMarkets")]
GetMarkets,
}
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct SubscribeCommand {
pub market_id: String,
pub subscription_type: Option<SubscriptionType>,
}
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum SubscriptionType {
#[serde(rename = "level")]
Level,
#[serde(rename = "book")]
Book,
}
impl fmt::Display for SubscriptionType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
SubscriptionType::Level => write!(f, "level"),
SubscriptionType::Book => write!(f, "book"),
}
}
}
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct UnsubscribeCommand {
pub market_id: String,
}
#[derive(Clone, Debug)]
pub struct Peer {
pub sender: UnboundedSender<Message>,
pub level_subscriptions: HashSet<String>,
pub book_subscriptions: HashSet<String>,
}
#[derive(Clone, Debug, Deserialize)]
pub struct Config {
pub source: SourceConfig,
pub metrics: MetricsConfig,
pub bind_ws_addr: String,
pub rpc_http_url: String,
pub mango_group: String,
}
#[allow(clippy::too_many_arguments)]
async fn handle_connection_error(
level_checkpoint_map: LevelCheckpointMap,
book_checkpoint_map: BookCheckpointMap,
peer_map: PeerMap,
market_ids: HashMap<String, String>,
raw_stream: TcpStream,
addr: SocketAddr,
metrics_opened_connections: MetricU64,
metrics_closed_connections: MetricU64,
) {
metrics_opened_connections.clone().increment();
let result = handle_connection(
level_checkpoint_map,
book_checkpoint_map,
peer_map.clone(),
market_ids,
raw_stream,
addr,
)
.await;
if result.is_err() {
error!("connection {} error {}", addr, result.unwrap_err());
};
metrics_closed_connections.clone().increment();
peer_map.lock().unwrap().remove(&addr);
}
async fn handle_connection(
level_checkpoint_map: LevelCheckpointMap,
book_checkpoint_map: BookCheckpointMap,
peer_map: PeerMap,
market_ids: HashMap<String, String>,
raw_stream: TcpStream,
addr: SocketAddr,
) -> Result<(), Error> {
info!("ws connected: {}", addr);
let ws_stream = tokio_tungstenite::accept_async(raw_stream).await?;
let (ws_tx, ws_rx) = ws_stream.split();
// 1: publish channel in peer map
let (chan_tx, chan_rx) = unbounded();
{
peer_map.lock().unwrap().insert(
addr,
Peer {
sender: chan_tx,
level_subscriptions: HashSet::<String>::new(),
book_subscriptions: HashSet::<String>::new(),
},
);
}
let receive_commands = ws_rx.try_for_each(|msg| match msg {
Message::Text(_) => handle_commands(
addr,
msg,
peer_map.clone(),
level_checkpoint_map.clone(),
book_checkpoint_map.clone(),
market_ids.clone(),
),
Message::Ping(_) => {
let peers = peer_map.clone();
let mut peers_lock = peers.lock().unwrap();
let peer = peers_lock.get_mut(&addr).expect("peer should be in map");
peer.sender
.unbounded_send(Message::Pong(Vec::new()))
.unwrap();
future::ready(Ok(()))
}
_ => future::ready(Ok(())),
});
let forward_updates = chan_rx.map(Ok).forward(ws_tx);
pin_mut!(receive_commands, forward_updates);
future::select(receive_commands, forward_updates).await;
peer_map.lock().unwrap().remove(&addr);
info!("ws disconnected: {}", &addr);
Ok(())
}
fn handle_commands(
addr: SocketAddr,
msg: Message,
peer_map: PeerMap,
level_checkpoint_map: LevelCheckpointMap,
book_checkpoint_map: BookCheckpointMap,
market_ids: HashMap<String, String>,
) -> Ready<Result<(), Error>> {
let msg_str = msg.into_text().unwrap();
let command: Result<Command, serde_json::Error> = serde_json::from_str(&msg_str);
let mut peers = peer_map.lock().unwrap();
let peer = peers.get_mut(&addr).expect("peer should be in map");
match command {
Ok(Command::Subscribe(cmd)) => {
let market_id = cmd.market_id;
if market_ids.get(&market_id).is_none() {
let res = StatusResponse {
success: false,
message: "market not found",
};
peer.sender
.unbounded_send(Message::Text(serde_json::to_string(&res).unwrap()))
.unwrap();
return future::ok(());
}
// default to level subscription
let subscription_type = match cmd.subscription_type {
Some(subscription) => subscription,
None => SubscriptionType::Level,
};
let subscribed = match subscription_type {
SubscriptionType::Level => peer.level_subscriptions.insert(market_id.clone()),
SubscriptionType::Book => peer.book_subscriptions.insert(market_id.clone()),
};
let message = format!(
"subscribed to {} updates for {}",
subscription_type, market_id
);
let res = if subscribed {
StatusResponse {
success: true,
message: &message,
}
} else {
StatusResponse {
success: false,
message: "already subscribed",
}
};
peer.sender
.unbounded_send(Message::Text(serde_json::to_string(&res).unwrap()))
.unwrap();
if subscribed {
match subscription_type {
SubscriptionType::Level => {
send_checkpoint(&level_checkpoint_map, &market_id, peer);
}
SubscriptionType::Book => {
send_checkpoint(&book_checkpoint_map, &market_id, peer);
}
};
}
}
Ok(Command::Unsubscribe(cmd)) => {
info!("unsubscribe {}", cmd.market_id);
// match
let unsubscribed = peer.level_subscriptions.remove(&cmd.market_id);
let res = if unsubscribed {
StatusResponse {
success: true,
message: "unsubscribed",
}
} else {
StatusResponse {
success: false,
message: "not subscribed",
}
};
peer.sender
.unbounded_send(Message::Text(serde_json::to_string(&res).unwrap()))
.unwrap();
}
Ok(Command::GetMarkets) => {
info!("getMarkets");
peer.sender
.unbounded_send(Message::Text(serde_json::to_string(&market_ids).unwrap()))
.unwrap();
}
Err(err) => {
info!("error deserializing user input {:?}", err);
let res = StatusResponse {
success: false,
message: "invalid input",
};
peer.sender
.unbounded_send(Message::Text(serde_json::to_string(&res).unwrap()))
.unwrap();
}
};
future::ok(())
}
fn send_checkpoint<T>(checkpoint_map: &Mutex<HashMap<String, T>>, market_id: &str, peer: &Peer)
where
T: Serialize,
{
let checkpoint_map = checkpoint_map.lock().unwrap();
let checkpoint = checkpoint_map.get(market_id);
match checkpoint {
Some(checkpoint) => {
peer.sender
.unbounded_send(Message::Text(serde_json::to_string(&checkpoint).unwrap()))
.unwrap();
}
None => info!("no checkpoint available on client subscription"), // todo: what to do here?
}
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
solana_logger::setup_with_default("info");
let exit: Arc<AtomicBool> = Arc::new(AtomicBool::new(false));
// load config
let args: Vec<String> = std::env::args().collect();
if args.len() < 2 {
eprintln!("Please enter a config file path argument");
return Ok(());
}
let config: Config = {
let mut file = File::open(&args[1])?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
toml::from_str(&contents).unwrap()
};
// setup metrics
let metrics_tx = metrics::start(config.metrics, "orderbook".into());
let metrics_opened_connections =
metrics_tx.register_u64("orderbook_opened_connections".into(), MetricType::Counter);
let metrics_closed_connections =
metrics_tx.register_u64("orderbook_closed_connections".into(), MetricType::Counter);
// load mango group and markets from rpc
let rpc_url = match &config.rpc_http_url.chars().next().unwrap() {
'$' => env::var(&config.rpc_http_url[1..]).expect("reading rpc url from env"),
_ => config.rpc_http_url.clone(),
};
let ws_url = rpc_url.replace("https", "wss");
let rpc_timeout = Duration::from_secs(10);
let cluster = Cluster::Custom(rpc_url.clone(), ws_url.clone());
let client = Client::new(
cluster.clone(),
CommitmentConfig::processed(),
Arc::new(Keypair::new()),
Some(rpc_timeout),
TransactionBuilderConfig {
prioritization_micro_lamports: None,
},
);
let group_context = Arc::new(
MangoGroupContext::new_from_rpc(
&client.rpc_async(),
Pubkey::from_str(&config.mango_group).unwrap(),
)
.await?,
);
// todo: reload markets at intervals
let market_configs: Vec<(Pubkey, MarketConfig)> = group_context
.perp_markets
.values()
.map(|context| {
let quote_decimals = match group_context.tokens.get(&context.market.settle_token_index)
{
Some(token) => token.decimals,
None => panic!("token not found for market"), // todo: default to 6 for usdc?
};
(
context.address,
MarketConfig {
name: context.market.name().to_owned(),
bids: context.market.bids,
asks: context.market.asks,
event_queue: context.market.event_queue,
oracle: context.market.oracle,
base_decimals: context.market.base_decimals,
quote_decimals,
base_lot_size: context.market.base_lot_size,
quote_lot_size: context.market.quote_lot_size,
},
)
})
.collect();
let serum_market_configs: Vec<(Pubkey, MarketConfig)> = group_context
.serum3_markets
.values()
.map(|context| {
let base_decimals = match group_context.tokens.get(&context.market.base_token_index) {
Some(token) => token.decimals,
None => panic!("token not found for market"), // todo: default?
};
let quote_decimals = match group_context.tokens.get(&context.market.quote_token_index) {
Some(token) => token.decimals,
None => panic!("token not found for market"), // todo: default to 6 for usdc?
};
(
context.market.serum_market_external,
MarketConfig {
name: context.market.name().to_owned(),
bids: context.bids,
asks: context.asks,
event_queue: context.event_q,
oracle: Pubkey::default(), // serum markets dont support oracle peg
base_decimals,
quote_decimals,
base_lot_size: context.coin_lot_size as i64,
quote_lot_size: context.pc_lot_size as i64,
},
)
})
.collect();
let market_pubkey_strings: HashMap<String, String> =
[market_configs.clone(), serum_market_configs.clone()]
.concat()
.iter()
.map(|market| (market.0.to_string(), market.1.name.clone()))
.collect::<Vec<(String, String)>>()
.into_iter()
.collect();
let (account_write_queue_sender, slot_queue_sender, orderbook_receiver) =
orderbook_filter::init(
market_configs.clone(),
serum_market_configs.clone(),
metrics_tx.clone(),
exit.clone(),
)
.await?;
let level_checkpoints = LevelCheckpointMap::new(Mutex::new(HashMap::new()));
let book_checkpoints = BookCheckpointMap::new(Mutex::new(HashMap::new()));
let peers = PeerMap::new(Mutex::new(HashMap::new()));
// orderbook receiver
{
let level_checkpoints = level_checkpoints.clone();
let book_checkpoints = book_checkpoints.clone();
let peers = peers.clone();
let exit = exit.clone();
tokio::spawn(async move {
pin!(orderbook_receiver);
loop {
if exit.load(Ordering::Relaxed) {
warn!("shutting down orderbook receiver...");
break;
}
let message: OrderbookFilterMessage = orderbook_receiver.recv().await.unwrap();
match message {
OrderbookFilterMessage::LevelUpdate(update) => {
debug!("ws level update {} {:?}", update.market, update.side);
let mut peer_copy = peers.lock().unwrap().clone();
for (addr, peer) in peer_copy.iter_mut() {
let json = serde_json::to_string(&update).unwrap();
// only send updates if the peer is subscribed
if peer.level_subscriptions.contains(&update.market) {
let result = peer.sender.send(Message::Text(json)).await;
if result.is_err() {
error!(
"ws level update {} {:?} could not reach {}",
update.market, update.side, addr
);
}
}
}
}
OrderbookFilterMessage::LevelCheckpoint(checkpoint) => {
debug!("ws level checkpoint {}", checkpoint.market);
level_checkpoints
.lock()
.unwrap()
.insert(checkpoint.market.clone(), checkpoint);
}
OrderbookFilterMessage::BookUpdate(update) => {
debug!("ws book update {} {:?}", update.market, update.side);
let mut peer_copy = peers.lock().unwrap().clone();
for (addr, peer) in peer_copy.iter_mut() {
let json = serde_json::to_string(&update).unwrap();
// only send updates if the peer is subscribed
if peer.book_subscriptions.contains(&update.market) {
let result = peer.sender.send(Message::Text(json)).await;
if result.is_err() {
error!(
"ws book update {} {:?} could not reach {}",
update.market, update.side, addr
);
}
}
}
}
OrderbookFilterMessage::BookCheckpoint(checkpoint) => {
debug!("ws book checkpoint {}", checkpoint.market);
book_checkpoints
.lock()
.unwrap()
.insert(checkpoint.market.clone(), checkpoint);
}
}
}
});
}
// websocket server
{
info!("ws listen: {}", config.bind_ws_addr);
let try_socket = TcpListener::bind(&config.bind_ws_addr).await;
let listener = try_socket.expect("Failed to bind");
let exit = exit.clone();
let peers = peers.clone();
tokio::spawn(async move {
// Let's spawn the handling of each connection in a separate task.
while let Ok((stream, addr)) = listener.accept().await {
if exit.load(Ordering::Relaxed) {
warn!("shutting down websocket server...");
break;
}
tokio::spawn(handle_connection_error(
level_checkpoints.clone(),
book_checkpoints.clone(),
peers.clone(),
market_pubkey_strings.clone(),
stream,
addr,
metrics_opened_connections.clone(),
metrics_closed_connections.clone(),
));
}
});
}
// keepalive
{
let exit = exit.clone();
let peers = peers.clone();
tokio::spawn(async move {
let mut write_interval = time::interval(time::Duration::from_secs(30));
loop {
if exit.load(Ordering::Relaxed) {
warn!("shutting down keepalive...");
break;
}
write_interval.tick().await;
let peers_copy = peers.lock().unwrap().clone();
for (addr, peer) in peers_copy.iter() {
let pl = Vec::new();
let result = peer.clone().sender.send(Message::Ping(pl)).await;
if result.is_err() {
error!("ws ping could not reach {}", addr);
}
}
}
});
}
// handle sigint
{
let exit = exit.clone();
tokio::spawn(async move {
tokio::signal::ctrl_c().await.unwrap();
info!("Received SIGINT, shutting down...");
exit.store(true, Ordering::Relaxed);
});
}
info!(
"rpc connect: {}",
config
.source
.grpc_sources
.iter()
.map(|c| c.connection_string.clone())
.collect::<String>()
);
let relevant_pubkeys = [market_configs.clone(), serum_market_configs.clone()]
.concat()
.iter()
.flat_map(|m| [m.1.bids, m.1.asks])
.collect_vec();
let filter_config = FilterConfig {
entity_filter: FilterByAccountIds(
[
relevant_pubkeys,
market_configs
.iter()
.map(|(_, mkt)| mkt.oracle)
.collect_vec(),
]
.concat()
.to_vec(),
),
};
let use_geyser = true;
if use_geyser {
grpc_plugin_source::process_events(
&config.source,
&filter_config,
account_write_queue_sender,
slot_queue_sender,
metrics_tx.clone(),
exit.clone(),
)
.await;
} else {
websocket_source::process_events(
&config.source,
&filter_config,
account_write_queue_sender,
slot_queue_sender,
)
.await;
}
Ok(())
}

View File

@ -0,0 +1,543 @@
use anchor_lang::AccountDeserialize;
use fixed::types::I80F48;
use itertools::Itertools;
use log::*;
use mango_feeds_connector::metrics::MetricU64;
use mango_feeds_connector::{
chain_data::{AccountData, ChainData, ChainDataMetrics, SlotData},
metrics::{MetricType, Metrics},
AccountWrite, SlotUpdate,
};
use mango_feeds_lib::{
base_lots_to_ui, base_lots_to_ui_perp, price_lots_to_ui, price_lots_to_ui_perp, MarketConfig,
OrderbookSide,
};
use mango_v4::accounts_zerocopy::{AccountReader, KeyedAccountReader};
use mango_v4::state::oracle_state_unchecked;
use mango_v4::state::OracleConfigParams;
use mango_v4::{
serum3_cpi::OrderBookStateHeader,
state::{BookSide, OrderTreeType},
};
use serum_dex::critbit::Slab;
use service_mango_orderbook::{
BookCheckpoint, BookUpdate, LevelCheckpoint, LevelUpdate, Order, OrderbookFilterMessage,
OrderbookLevel,
};
use solana_sdk::account::AccountSharedData;
use solana_sdk::{
account::{ReadableAccount, WritableAccount},
clock::Epoch,
pubkey::Pubkey,
};
use std::borrow::BorrowMut;
use std::{
collections::{HashMap, HashSet},
mem::size_of,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::{SystemTime, UNIX_EPOCH},
};
struct KeyedSharedDataAccountReader {
pub key: Pubkey,
pub shared: AccountSharedData,
}
impl AccountReader for KeyedSharedDataAccountReader {
fn owner(&self) -> &Pubkey {
ReadableAccount::owner(&self.shared)
}
fn data(&self) -> &[u8] {
ReadableAccount::data(&self.shared)
}
}
impl KeyedAccountReader for KeyedSharedDataAccountReader {
fn key(&self) -> &Pubkey {
&self.key
}
}
#[allow(clippy::too_many_arguments)]
#[allow(clippy::ptr_arg)]
fn publish_changes(
slot: u64,
write_version: u64,
mkt: &(Pubkey, MarketConfig),
side: OrderbookSide,
current_orders: &Vec<Order>,
previous_orders: &Vec<Order>,
maybe_other_orders: Option<&Vec<Order>>,
orderbook_update_sender: &async_channel::Sender<OrderbookFilterMessage>,
metric_book_updates: &mut MetricU64,
metric_level_updates: &mut MetricU64,
) {
let mut level_update: Vec<OrderbookLevel> = vec![];
let mut book_additions: Vec<Order> = vec![];
let mut book_removals: Vec<Order> = vec![];
let current_bookside: Vec<OrderbookLevel> = current_orders
.iter()
.group_by(|order| order.price)
.into_iter()
.map(|(price, group)| [price, group.map(|o| o.quantity).sum()])
.collect();
let previous_bookside: Vec<OrderbookLevel> = previous_orders
.iter()
.group_by(|order| order.price)
.into_iter()
.map(|(price, group)| [price, group.map(|o| o.quantity).sum()])
.collect();
// push diff for levels that are no longer present
if current_bookside.len() != previous_bookside.len() {
debug!(
"L {}",
current_bookside.len() as i64 - previous_bookside.len() as i64
)
}
for prev_order in previous_orders.iter() {
let peer = current_orders.iter().find(|order| prev_order == *order);
match peer {
None => {
debug!("R {:?}", prev_order);
book_removals.push(prev_order.clone());
}
_ => continue,
}
}
for previous_level in previous_bookside.iter() {
let peer = current_bookside
.iter()
.find(|level| previous_level[0] == level[0]);
match peer {
None => {
debug!("R {} {}", previous_level[0], previous_level[1]);
level_update.push([previous_level[0], 0f64]);
}
_ => continue,
}
}
// push diff where there's a new level or size has changed
for current_level in &current_bookside {
let peer = previous_bookside
.iter()
.find(|item| item[0] == current_level[0]);
match peer {
Some(previous_level) => {
if previous_level[1] == current_level[1] {
continue;
}
debug!(
"C {} {} -> {}",
current_level[0], previous_level[1], current_level[1]
);
level_update.push(*current_level);
}
None => {
debug!("A {} {}", current_level[0], current_level[1]);
level_update.push(*current_level)
}
}
}
for current_order in current_orders {
let peer = previous_orders.iter().find(|order| current_order == *order);
match peer {
Some(_) => {
continue;
}
None => {
debug!("A {:?}", current_order);
book_additions.push(current_order.clone())
}
}
}
match maybe_other_orders {
Some(other_orders) => {
let (bids, asks) = match side {
OrderbookSide::Bid => (current_orders, other_orders),
OrderbookSide::Ask => (other_orders, current_orders),
};
orderbook_update_sender
.try_send(OrderbookFilterMessage::BookCheckpoint(BookCheckpoint {
slot,
write_version,
bids: bids.clone(),
asks: asks.clone(),
market: mkt.0.to_string(),
}))
.unwrap();
let bid_levels = bids
.iter()
.group_by(|order| order.price)
.into_iter()
.map(|(price, group)| [price, group.map(|o| o.quantity).sum()])
.collect();
let ask_levels = asks
.iter()
.group_by(|order| order.price)
.into_iter()
.map(|(price, group)| [price, group.map(|o| o.quantity).sum()])
.collect();
orderbook_update_sender
.try_send(OrderbookFilterMessage::LevelCheckpoint(LevelCheckpoint {
slot,
write_version,
bids: bid_levels,
asks: ask_levels,
market: mkt.0.to_string(),
}))
.unwrap()
}
None => info!("other bookside not in cache"),
}
if !level_update.is_empty() {
orderbook_update_sender
.try_send(OrderbookFilterMessage::LevelUpdate(LevelUpdate {
market: mkt.0.to_string(),
side: side.clone(),
update: level_update,
slot,
write_version,
}))
.unwrap(); // TODO: use anyhow to bubble up error
metric_level_updates.increment();
}
if !book_additions.is_empty() && !book_removals.is_empty() {
orderbook_update_sender
.try_send(OrderbookFilterMessage::BookUpdate(BookUpdate {
market: mkt.0.to_string(),
side,
additions: book_additions,
removals: book_removals,
slot,
write_version,
}))
.unwrap();
metric_book_updates.increment();
}
}
pub async fn init(
market_configs: Vec<(Pubkey, MarketConfig)>,
serum_market_configs: Vec<(Pubkey, MarketConfig)>,
metrics_sender: Metrics,
exit: Arc<AtomicBool>,
) -> anyhow::Result<(
async_channel::Sender<AccountWrite>,
async_channel::Sender<SlotUpdate>,
async_channel::Receiver<OrderbookFilterMessage>,
)> {
let mut metric_book_events_new =
metrics_sender.register_u64("orderbook_book_updates".into(), MetricType::Counter);
let mut metric_level_events_new =
metrics_sender.register_u64("orderbook_level_updates".into(), MetricType::Counter);
// The actual message may want to also contain a retry count, if it self-reinserts on failure?
let (account_write_queue_sender, account_write_queue_receiver) =
async_channel::unbounded::<AccountWrite>();
// Slot updates flowing from the outside into the single processing thread. From
// there they'll flow into the postgres sending thread.
let (slot_queue_sender, slot_queue_receiver) = async_channel::unbounded::<SlotUpdate>();
// Book updates can be consumed by client connections, they contain L2 and L3 updates for all markets
let (book_update_sender, book_update_receiver) =
async_channel::unbounded::<OrderbookFilterMessage>();
let mut chain_cache = ChainData::new();
let mut chain_data_metrics = ChainDataMetrics::new(&metrics_sender);
let mut bookside_cache: HashMap<String, Vec<Order>> = HashMap::new();
let mut serum_bookside_cache: HashMap<String, Vec<Order>> = HashMap::new();
let mut last_write_versions = HashMap::<String, (u64, u64)>::new();
let mut relevant_pubkeys = [market_configs.clone(), serum_market_configs.clone()]
.concat()
.iter()
.flat_map(|m| [m.1.bids, m.1.asks])
.collect::<HashSet<Pubkey>>();
relevant_pubkeys.extend(market_configs.iter().map(|(_, cfg)| cfg.oracle));
info!("relevant_pubkeys {:?}", relevant_pubkeys);
// update handling thread, reads both slots and account updates
tokio::spawn(async move {
loop {
if exit.load(Ordering::Relaxed) {
warn!("shutting down orderbook_filter...");
break;
}
tokio::select! {
Ok(account_write) = account_write_queue_receiver.recv() => {
if !relevant_pubkeys.contains(&account_write.pubkey) {
continue;
}
chain_cache.update_account(
account_write.pubkey,
AccountData {
slot: account_write.slot,
write_version: account_write.write_version,
account: WritableAccount::create(
account_write.lamports,
account_write.data.clone(),
account_write.owner,
account_write.executable,
account_write.rent_epoch as Epoch,
),
},
);
}
Ok(slot_update) = slot_queue_receiver.recv() => {
chain_cache.update_slot(SlotData {
slot: slot_update.slot,
parent: slot_update.parent,
status: slot_update.status,
chain: 0,
});
}
}
chain_data_metrics.report(&chain_cache);
for mkt in market_configs.iter() {
for side in 0..2 {
let mkt_pk = mkt.0;
let side_pk = if side == 0 { mkt.1.bids } else { mkt.1.asks };
let other_side_pk = if side == 0 { mkt.1.asks } else { mkt.1.bids };
let oracle_pk = mkt.1.oracle;
let last_side_write_version = last_write_versions
.get(&side_pk.to_string())
.unwrap_or(&(0, 0));
let last_oracle_write_version = last_write_versions
.get(&oracle_pk.to_string())
.unwrap_or(&(0, 0));
match (
chain_cache.account(&side_pk),
chain_cache.account(&oracle_pk),
) {
(Ok(side_info), Ok(oracle_info)) => {
let side_pk_string = side_pk.to_string();
let oracle_pk_string = oracle_pk.to_string();
if !side_info
.is_newer_than(last_side_write_version.0, last_side_write_version.1)
&& !oracle_info.is_newer_than(
last_oracle_write_version.0,
last_oracle_write_version.1,
)
{
// neither bookside nor oracle was updated
continue;
}
last_write_versions.insert(
side_pk_string.clone(),
(side_info.slot, side_info.write_version),
);
last_write_versions.insert(
oracle_pk_string.clone(),
(oracle_info.slot, oracle_info.write_version),
);
let keyed_account = KeyedSharedDataAccountReader {
key: oracle_pk,
shared: oracle_info.account.clone(),
};
let oracle_config = OracleConfigParams {
conf_filter: 100_000.0, // use a large value to never fail the confidence check
max_staleness_slots: None, // don't check oracle staleness to get an orderbook
};
if let Ok(unchecked_oracle_state) =
oracle_state_unchecked(&keyed_account, mkt.1.base_decimals)
{
if unchecked_oracle_state
.check_confidence_and_maybe_staleness(
&oracle_pk,
&oracle_config.to_oracle_config(),
None, // force this to always return a price no matter how stale
)
.is_ok()
{
let oracle_price = unchecked_oracle_state.price;
let account = &side_info.account;
let bookside: BookSide = BookSide::try_deserialize(
solana_sdk::account::ReadableAccount::data(account)
.borrow_mut(),
)
.unwrap();
let side = match bookside.nodes.order_tree_type() {
OrderTreeType::Bids => OrderbookSide::Bid,
OrderTreeType::Asks => OrderbookSide::Ask,
};
let time_now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
let oracle_price_lots = (oracle_price
* I80F48::from_num(mkt.1.base_lot_size)
/ I80F48::from_num(mkt.1.quote_lot_size))
.to_num();
let bookside: Vec<Order> = bookside
.iter_valid(time_now, oracle_price_lots)
.map(|item| Order {
price: price_lots_to_ui_perp(
item.price_lots,
mkt.1.base_decimals,
mkt.1.quote_decimals,
mkt.1.base_lot_size,
mkt.1.quote_lot_size,
),
quantity: base_lots_to_ui_perp(
item.node.quantity,
mkt.1.base_decimals,
mkt.1.base_lot_size,
),
owner_pubkey: item.node.owner.to_string(),
})
.collect();
let other_bookside =
bookside_cache.get(&other_side_pk.to_string());
match bookside_cache.get(&side_pk_string) {
Some(old_bookside) => publish_changes(
side_info.slot,
side_info.write_version,
mkt,
side,
&bookside,
old_bookside,
other_bookside,
&book_update_sender,
&mut metric_book_events_new,
&mut metric_level_events_new,
),
_ => info!(
"bookside_cache could not find {}",
side_pk_string
),
}
bookside_cache.insert(side_pk_string.clone(), bookside.clone());
}
}
}
(side, oracle) => debug!(
"chain_cache could not find for mkt={} side={} oracle={}",
mkt_pk,
side.is_err(),
oracle.is_err()
),
}
}
}
for mkt in serum_market_configs.iter() {
for side in 0..2 {
let side_pk = if side == 0 { mkt.1.bids } else { mkt.1.asks };
let other_side_pk = if side == 0 { mkt.1.asks } else { mkt.1.bids };
let last_write_version = last_write_versions
.get(&side_pk.to_string())
.unwrap_or(&(0, 0));
match chain_cache.account(&side_pk) {
Ok(account_info) => {
let side_pk_string = side_pk.to_string();
let write_version = (account_info.slot, account_info.write_version);
// todo: should this be <= so we don't overwrite with old data received late?
if write_version <= *last_write_version {
continue;
}
last_write_versions.insert(side_pk_string.clone(), write_version);
debug!("W {}", mkt.1.name);
let account = &mut account_info.account.clone();
let data = account.data_as_mut_slice();
let len = data.len();
let inner = &mut data[5..len - 7];
let slab = Slab::new(&mut inner[size_of::<OrderBookStateHeader>()..]);
let bookside: Vec<Order> = slab
.iter(side == 0)
.map(|item| {
let owner_bytes: [u8; 32] = bytemuck::cast(item.owner());
Order {
price: price_lots_to_ui(
u64::from(item.price()) as i64,
mkt.1.base_decimals,
mkt.1.quote_decimals,
mkt.1.base_lot_size,
mkt.1.quote_lot_size,
),
quantity: base_lots_to_ui(
item.quantity() as i64,
mkt.1.base_decimals,
mkt.1.quote_decimals,
mkt.1.base_lot_size,
mkt.1.quote_lot_size,
),
owner_pubkey: Pubkey::new_from_array(owner_bytes)
.to_string(),
}
})
.collect();
let other_bookside =
serum_bookside_cache.get(&other_side_pk.to_string());
match serum_bookside_cache.get(&side_pk_string) {
Some(old_bookside) => publish_changes(
account_info.slot,
account_info.write_version,
mkt,
if side == 0 {
OrderbookSide::Bid
} else {
OrderbookSide::Ask
},
&bookside,
old_bookside,
other_bookside,
&book_update_sender,
&mut metric_book_events_new,
&mut metric_level_events_new,
),
_ => info!("bookside_cache could not find {}", side_pk_string),
}
serum_bookside_cache.insert(side_pk_string.clone(), bookside);
}
Err(_) => debug!("chain_cache could not find {}", side_pk),
}
}
}
}
});
Ok((
account_write_queue_sender,
slot_queue_sender,
book_update_receiver,
))
}

View File

@ -0,0 +1,35 @@
[package]
name = "service-mango-pnl"
version = "0.1.0"
authors = ["Christian Kamm <mail@ckamm.de>"]
edition = "2021"
license = "AGPL-3.0-or-later"
[dependencies]
mango-feeds-connector = { workspace = true }
solana-client = { workspace = true }
solana-logger = { workspace = true }
solana-sdk = { workspace = true }
anchor-lang = { workspace = true }
anchor-client = { workspace = true }
mango-v4 = { path = "../../programs/mango-v4", features = ["client"] }
mango-v4-client = { path = "../../lib/client" }
bs58 = "0.3.1"
log = "0.4"
anyhow = "1.0"
toml = "0.5"
fixed = { workspace = true }
jsonrpsee = { version = "0.9.0", features = ["http-server"] }
async-trait = "0.1"
async-channel = "1.6"
tokio = { version = "1", features = ["full"] }
serde = "1.0.130"
serde_derive = "1.0.130"

View File

@ -0,0 +1,31 @@
[metrics]
output_stdout = true
output_http = true
[source]
dedup_queue_size = 50000
rpc_ws_url = ""
[[source.grpc_sources]]
name = "server"
connection_string = "http://[::1]:10000"
retry_connection_sleep_secs = 30
#[source.grpc_sources.tls]
#ca_cert_path = "ca.pem"
#client_cert_path = "client.pem"
#client_key_path = "client.pem"
#domain_name = "example.com"
[source.snapshot]
rpc_http_url = ""
program_id = "mv3ekLzLbnVPNxjSKvqBpU3ZeZXPQdEC3bp5MDEBG68"
[pnl]
update_interval_millis = 5000
mango_program = "mv3ekLzLbnVPNxjSKvqBpU3ZeZXPQdEC3bp5MDEBG68"
mango_group = "98pjRuQjK3qA6gXts96PqZT4Ze5QmnCmt3QYjhbUSPue"
mango_cache = "EBDRoayCDDUvDgCimta45ajQeXbexv7aKqJubruqpyvu"
[jsonrpc_server]
bind_address = "127.0.0.1:8889"

View File

@ -0,0 +1,31 @@
[metrics]
output_stdout = true
output_http = true
[source]
dedup_queue_size = 50000
rpc_ws_url = ""
[[source.grpc_sources]]
name = "accountsdb-client"
connection_string = "$GEYSER_CONNECTION_STRING"
retry_connection_sleep_secs = 30
[source.grpc_sources.tls]
ca_cert_path = "$GEYSER_CA_CERT"
client_cert_path = "$GEYSER_CLIENT_CERT"
client_key_path = "$GEYSER_CLIENT_CERT"
domain_name = "$GEYSER_CERT_DOMAIN"
[source.snapshot]
rpc_http_url = "$RPC_HTTP_URL"
program_id = "mv3ekLzLbnVPNxjSKvqBpU3ZeZXPQdEC3bp5MDEBG68"
[pnl]
update_interval_millis = 5000
mango_program = "mv3ekLzLbnVPNxjSKvqBpU3ZeZXPQdEC3bp5MDEBG68"
mango_group = "98pjRuQjK3qA6gXts96PqZT4Ze5QmnCmt3QYjhbUSPue"
mango_cache = "EBDRoayCDDUvDgCimta45ajQeXbexv7aKqJubruqpyvu"
[jsonrpc_server]
bind_address = "0.0.0.0:2052"

View File

@ -0,0 +1,329 @@
mod memory_target;
use {
log::*,
mango_feeds_connector::chain_data::ChainData,
serde_derive::{Deserialize, Serialize},
solana_sdk::pubkey::Pubkey,
std::str::FromStr,
std::{
fs::File,
io::Read,
mem::size_of,
sync::{atomic::AtomicBool, Arc, RwLock},
time::Duration,
},
};
use anchor_client::Cluster;
use anchor_lang::Discriminator;
use fixed::types::I80F48;
use mango_feeds_connector::metrics::*;
use mango_v4::state::{MangoAccount, MangoAccountValue, PerpMarketIndex};
use mango_v4_client::{
chain_data, health_cache, AccountFetcher, Client, MangoGroupContext, TransactionBuilderConfig,
};
use solana_sdk::commitment_config::CommitmentConfig;
use solana_sdk::{account::ReadableAccount, signature::Keypair};
#[derive(Clone, Debug, Deserialize)]
pub struct PnlConfig {
pub update_interval_millis: u64,
pub mango_program: String,
pub mango_group: String,
}
#[derive(Clone, Debug, Deserialize)]
pub struct JsonRpcConfig {
pub bind_address: String,
}
#[derive(Clone, Debug, Deserialize)]
pub struct Config {
pub source: SourceConfig,
pub metrics: MetricsConfig,
pub pnl: PnlConfig,
pub jsonrpc_server: JsonRpcConfig,
}
type PnlData = Vec<(Pubkey, Vec<(PerpMarketIndex, I80F48)>)>;
async fn compute_pnl(
context: Arc<MangoGroupContext>,
account_fetcher: Arc<impl AccountFetcher>,
account: &MangoAccountValue,
) -> anyhow::Result<Vec<(PerpMarketIndex, I80F48)>> {
let health_cache = health_cache::new(&context, account_fetcher.as_ref(), account).await?;
let pnls = account
.active_perp_positions()
.filter_map(|pp| {
if pp.base_position_lots() != 0 {
return None;
}
let pnl = pp.quote_position_native();
let settle_token_index = context
.perp_markets
.get(&pp.market_index)
.unwrap()
.market
.settle_token_index;
let perp_settle_health = health_cache.perp_max_settle(settle_token_index).unwrap();
let settleable_pnl = if pnl > 0 {
pnl
} else if pnl < 0 && perp_settle_health > 0 {
pnl.max(-perp_settle_health)
} else {
return None;
};
Some((pp.market_index, I80F48::from_bits(settleable_pnl.to_bits())))
})
.collect::<Vec<(PerpMarketIndex, I80F48)>>();
Ok(pnls)
}
// regularly updates pnl_data from chain_data
fn start_pnl_updater(
config: PnlConfig,
context: Arc<MangoGroupContext>,
account_fetcher: Arc<impl AccountFetcher + 'static>,
chain_data: Arc<RwLock<ChainData>>,
pnl_data: Arc<RwLock<PnlData>>,
metrics_pnls_tracked: MetricU64,
) {
let program_pk = Pubkey::from_str(&config.mango_program).unwrap();
let group_pk = Pubkey::from_str(&config.mango_group).unwrap();
tokio::spawn(async move {
loop {
tokio::time::sleep(std::time::Duration::from_millis(
config.update_interval_millis,
))
.await;
let snapshot = chain_data.read().unwrap().accounts_snapshot();
// get the group and cache now
let group = snapshot.get(&group_pk);
if group.is_none() {
continue;
}
let mut pnls = Vec::with_capacity(snapshot.len());
for (pubkey, account) in snapshot.iter() {
let owner = account.account.owner();
let data = account.account.data();
if data.len() != size_of::<MangoAccount>()
|| data[0..8] != MangoAccount::discriminator()
|| owner != &program_pk
{
continue;
}
let mango_account = MangoAccountValue::from_bytes(&data[8..]).unwrap();
if mango_account.fixed.group != group_pk {
continue;
}
let pnl_vals =
compute_pnl(context.clone(), account_fetcher.clone(), &mango_account)
.await
.unwrap();
// Alternatively, we could prepare the sorted and limited lists for each
// market here. That would be faster and cause less contention on the pnl_data
// lock, but it looks like it's very far from being an issue.
pnls.push((*pubkey, pnl_vals));
}
*pnl_data.write().unwrap() = pnls;
metrics_pnls_tracked
.clone()
.set(pnl_data.read().unwrap().len() as u64)
}
});
}
#[derive(Serialize, Deserialize, Debug)]
struct UnsettledPnlRankedRequest {
market_index: u8,
limit: u8,
order: String,
}
#[derive(Serialize, Deserialize)]
struct PnlResponseItem {
pnl: f64,
pubkey: String,
}
use jsonrpsee::http_server::HttpServerHandle;
use mango_feeds_connector::{
grpc_plugin_source, metrics, EntityFilter, FilterConfig, MetricsConfig, SourceConfig,
};
fn start_jsonrpc_server(
config: JsonRpcConfig,
pnl_data: Arc<RwLock<PnlData>>,
metrics_reqs: MetricU64,
metrics_invalid_reqs: MetricU64,
) -> anyhow::Result<HttpServerHandle> {
use jsonrpsee::core::Error;
use jsonrpsee::http_server::{HttpServerBuilder, RpcModule};
use jsonrpsee::types::error::CallError;
use std::net::SocketAddr;
let server = HttpServerBuilder::default().build(config.bind_address.parse::<SocketAddr>()?)?;
let mut module = RpcModule::new(());
module.register_method("unsettledPnlRanked", move |params, _| {
let req = params.parse::<UnsettledPnlRankedRequest>()?;
metrics_reqs.clone().increment();
let invalid =
|s: &'static str| Err(Error::Call(CallError::InvalidParams(anyhow::anyhow!(s))));
let limit = req.limit as usize;
if limit > 20 {
metrics_invalid_reqs.clone().increment();
return invalid("'limit' must be <= 20");
}
let market_index = req.market_index as u16;
// if market_index >= MAX_PAIRS {
// metrics_invalid_reqs.clone().increment();
// return invalid("'market_index' must be < MAX_PAIRS");
// }
if req.order != "ASC" && req.order != "DESC" {
metrics_invalid_reqs.clone().increment();
return invalid("'order' must be ASC or DESC");
}
// write lock, because we sort in-place...
let mut pnls = pnl_data.write().unwrap();
if req.order == "ASC" {
pnls.sort_unstable_by(|a, b| {
a.1.iter()
.find(|x| x.0 == market_index)
.cmp(&b.1.iter().find(|x| x.0 == market_index))
});
} else {
pnls.sort_unstable_by(|a, b| {
b.1.iter()
.find(|x| x.0 == market_index)
.cmp(&a.1.iter().find(|x| x.0 == market_index))
});
}
let response = pnls
.iter()
.take(limit)
.map(|p| PnlResponseItem {
pnl: p
.1
.iter()
.find(|x| x.0 == market_index)
.unwrap()
.1
.to_num::<f64>(),
pubkey: p.0.to_string(),
})
.collect::<Vec<_>>();
Ok(response)
})?;
Ok(server.start(module)?)
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let exit: Arc<AtomicBool> = Arc::new(AtomicBool::new(false));
let args: Vec<String> = std::env::args().collect();
if args.len() < 2 {
println!("requires a config file argument");
return Ok(());
}
let config: Config = {
let mut file = File::open(&args[1])?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
toml::from_str(&contents).unwrap()
};
solana_logger::setup_with_default("info");
info!("startup");
let rpc_url = &config.source.snapshot.rpc_http_url;
let ws_url = rpc_url.replace("https", "wss");
let rpc_timeout = Duration::from_secs(10);
let cluster = Cluster::Custom(rpc_url.clone(), ws_url.clone());
let commitment = CommitmentConfig::processed();
let client = Client::new(
cluster.clone(),
commitment,
Arc::new(Keypair::new()),
Some(rpc_timeout),
TransactionBuilderConfig {
prioritization_micro_lamports: None,
},
);
let group_context = Arc::new(
MangoGroupContext::new_from_rpc(
&client.rpc_async(),
Pubkey::from_str(&config.pnl.mango_group).unwrap(),
)
.await?,
);
let chain_data = Arc::new(RwLock::new(chain_data::ChainData::new()));
let account_fetcher = Arc::new(chain_data::AccountFetcher {
chain_data: chain_data.clone(),
rpc: client.rpc_async(),
});
let metrics_tx = metrics::start(config.metrics, "pnl".into());
let metrics_reqs =
metrics_tx.register_u64("pnl_jsonrpc_reqs_total".into(), MetricType::Counter);
let metrics_invalid_reqs =
metrics_tx.register_u64("pnl_jsonrpc_reqs_invalid_total".into(), MetricType::Counter);
let metrics_pnls_tracked = metrics_tx.register_u64("pnl_num_tracked".into(), MetricType::Gauge);
// BUG: This shadows the previous chain_data and means this can't actually get data!
let chain_data = Arc::new(RwLock::new(ChainData::new()));
let pnl_data = Arc::new(RwLock::new(PnlData::new()));
start_pnl_updater(
config.pnl.clone(),
group_context.clone(),
account_fetcher.clone(),
chain_data.clone(),
pnl_data.clone(),
metrics_pnls_tracked,
);
// dropping the handle would exit the server
let _http_server_handle = start_jsonrpc_server(
config.jsonrpc_server.clone(),
pnl_data,
metrics_reqs,
metrics_invalid_reqs,
)?;
// start filling chain_data from the grpc plugin source
let (account_write_queue_sender, slot_queue_sender) = memory_target::init(chain_data).await?;
let filter_config = FilterConfig {
entity_filter: EntityFilter::filter_by_program_id(
"4MangoMjqJ2firMokCjjGgoK8d4MXcrgL7XJaL3w6fVg",
),
};
grpc_plugin_source::process_events(
&config.source,
&filter_config,
account_write_queue_sender,
slot_queue_sender,
metrics_tx.clone(),
exit.clone(),
)
.await;
Ok(())
}

View File

@ -0,0 +1,52 @@
use mango_feeds_connector::chain_data::*;
use mango_feeds_connector::*;
use solana_sdk::{account::WritableAccount, clock::Epoch};
use std::sync::{Arc, RwLock};
pub async fn init(
chain_data: Arc<RwLock<ChainData>>,
) -> anyhow::Result<(
async_channel::Sender<AccountWrite>,
async_channel::Sender<SlotUpdate>,
)> {
let (account_write_queue_sender, account_write_queue_receiver) =
async_channel::unbounded::<AccountWrite>();
let (slot_queue_sender, slot_queue_receiver) = async_channel::unbounded::<SlotUpdate>();
// update handling thread, reads both slots and account updates
tokio::spawn(async move {
loop {
tokio::select! {
Ok(account_write) = account_write_queue_receiver.recv() => {
let mut chain = chain_data.write().unwrap();
chain.update_account(
account_write.pubkey,
AccountData {
slot: account_write.slot,
write_version: account_write.write_version,
account: WritableAccount::create(
account_write.lamports,
account_write.data.clone(),
account_write.owner,
account_write.executable,
account_write.rent_epoch as Epoch,
),
},
);
}
Ok(slot_update) = slot_queue_receiver.recv() => {
let mut chain = chain_data.write().unwrap();
chain.update_slot(SlotData {
slot: slot_update.slot,
parent: slot_update.parent,
status: slot_update.status,
chain: 0,
});
}
}
}
});
Ok((account_write_queue_sender, slot_queue_sender))
}

View File

@ -40,7 +40,7 @@ rand = "0.7"
serde = "1.0.130"
serde_derive = "1.0.130"
serde_json = "1.0.68"
serum_dex = { workspace = true, default-features=false,features = ["no-entrypoint", "program"] }
serum_dex = { workspace = true, features = ["no-entrypoint", "program"] }
shellexpand = "2.1.0"
solana-account-decoder = { workspace = true }
solana-client = { workspace = true }

23
cd/fills.toml Normal file
View File

@ -0,0 +1,23 @@
app = "mango-fills"
kill_signal = "SIGTERM"
kill_timeout = 30
[build]
dockerfile = "../Dockerfile"
[experimental]
cmd = ["service-mango-fills", "fills-config.toml"]
[[services]]
internal_port = 8080
processes = ["app"]
protocol = "tcp"
[services.concurrency]
hard_limit = 1024
soft_limit = 1024
type = "connections"
[metrics]
path = "/metrics"
port = 9091

23
cd/orderbook.toml Normal file
View File

@ -0,0 +1,23 @@
app = "mango-orderbook"
kill_signal = "SIGINT"
kill_timeout = 5
[build]
dockerfile = "../Dockerfile"
[experimental]
cmd = ["service-mango-orderbook", "orderbook-config.toml"]
[[services]]
internal_port = 8080
processes = ["app"]
protocol = "tcp"
[services.concurrency]
hard_limit = 1024
soft_limit = 1024
type = "connections"
[metrics]
path = "/metrics"
port = 9091

23
cd/pnl.toml Normal file
View File

@ -0,0 +1,23 @@
app = "mango-pnl"
kill_signal = "SIGINT"
kill_timeout = 5
[build]
dockerfile = "../Dockerfile"
[experimental]
cmd = ["service-mango-pnl", "pnl-config.toml"]
[[services]]
internal_port = 8081
processes = ["app"]
protocol = "tcp"
[services.concurrency]
hard_limit = 1024
soft_limit = 1024
type = "connections"
[metrics]
path = "/metrics"
port = 9091

View File

@ -24,17 +24,18 @@ jsonrpc-core = "18.0.0"
jsonrpc-core-client = { version = "18.0.0", features = ["ws", "http", "tls"] }
mango-v4 = { path = "../../programs/mango-v4", features = ["client"] }
pyth-sdk-solana = { workspace = true }
serum_dex = { workspace = true, default-features=false,features = ["no-entrypoint", "program"] }
serum_dex = { workspace = true, features = ["no-entrypoint", "program"] }
shellexpand = "2.1.0"
solana-account-decoder = { workspace = true }
solana-client = { workspace = true }
solana-rpc = { workspace = true }
solana-sdk = { workspace = true }
solana-address-lookup-table-program = { workspace = true }
mango-feeds-connector = "0.1.1"
mango-feeds-connector = { workspace = true }
spl-associated-token-account = "1.0.3"
thiserror = "1.0.31"
reqwest = "0.11.11"
# note: should match the version used in solana
reqwest = "0.11.17"
tokio = { version = "1", features = ["full"] }
tokio-stream = { version = "0.1.9"}
serde = "1.0.141"
@ -42,4 +43,4 @@ serde_json = "1.0.82"
base64 = "0.13.0"
bincode = "1.3.3"
tracing = { version = "0.1", features = ["log"] }
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
tracing-subscriber = { version = "0.3", features = ["env-filter"] }

View File

@ -7,11 +7,13 @@ use solana_client::{
rpc_config::{RpcAccountInfoConfig, RpcContextConfig, RpcProgramAccountsConfig},
rpc_response::{OptionalContext, Response, RpcKeyedAccount},
};
use solana_rpc::rpc::{rpc_accounts::AccountsDataClient, rpc_minimal::MinimalClient};
use solana_rpc::rpc::rpc_minimal::MinimalClient;
use solana_sdk::{account::AccountSharedData, commitment_config::CommitmentConfig, pubkey::Pubkey};
use anyhow::Context;
use futures::{stream, StreamExt};
use solana_rpc::rpc::rpc_accounts::AccountsDataClient;
use solana_rpc::rpc::rpc_accounts_scan::AccountsScanClient;
use std::str::FromStr;
use std::time::Duration;
use tokio::time;
@ -93,9 +95,18 @@ async fn feed_snapshots(
mango_oracles: Vec<Pubkey>,
sender: &async_channel::Sender<Message>,
) -> anyhow::Result<()> {
let rpc_client = http::connect_with_options::<AccountsDataClient>(&config.rpc_http_url, true)
.await
.map_err_anyhow()?;
// TODO replace the following with mango-feeds connector's snapshot.rs
// note: with solana 1.15 the gPA (get_program_accounts) rpc call was moved to a new mod rpc_client_scan
let rpc_client_data =
http::connect_with_options::<AccountsDataClient>(&config.rpc_http_url, true)
.await
.map_err_anyhow()?;
let rpc_client_scan =
http::connect_with_options::<AccountsScanClient>(&config.rpc_http_url, true)
.await
.map_err_anyhow()?;
let account_info_config = RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::Base64),
@ -114,7 +125,7 @@ async fn feed_snapshots(
let mut snapshot = AccountSnapshot::default();
// Get all accounts of the mango program
let response = rpc_client
let response = rpc_client_scan
.get_program_accounts(
mango_v4::id().to_string(),
Some(all_accounts_config.clone()),
@ -135,7 +146,7 @@ async fn feed_snapshots(
)> = stream::iter(mango_oracles)
.chunks(config.get_multiple_accounts_count)
.map(|keys| {
let rpc_client = &rpc_client;
let rpc_client = &rpc_client_data;
let account_info_config = account_info_config.clone();
async move {
let string_keys = keys.iter().map(|k| k.to_string()).collect::<Vec<_>>();
@ -179,7 +190,7 @@ async fn feed_snapshots(
)> = stream::iter(oo_account_pubkeys)
.chunks(config.get_multiple_accounts_count)
.map(|keys| {
let rpc_client = &rpc_client;
let rpc_client = &rpc_client_data;
let account_info_config = account_info_config.clone();
async move {
let string_keys = keys.iter().map(|k| k.to_string()).collect::<Vec<_>>();

View File

@ -0,0 +1,40 @@
[package]
name = "mango-feeds-lib"
version = "0.1.0"
authors = ["Christian Kamm <mail@ckamm.de>"]
edition = "2021"
license = "AGPL-3.0-or-later"
[lib]
[dependencies]
solana-client = "~1.16.7"
solana-account-decoder = "~1.16.7"
solana-sdk = "~1.16.7"
fixed = { workspace = true }
bs58 = "0.5"
base64 = "0.21.0"
log = "0.4"
rand = "0.7"
anyhow = "1.0"
bytes = "1.0"
itertools = "0.10.5"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
futures = "0.3.17"
futures-core = "0.3"
async-channel = "1.6"
async-trait = "0.1"
bytemuck = "1.7.2"
chrono = "0.4.23"
[build-dependencies]
tonic-build = { version = "0.6", features = ["compression"] }

View File

@ -0,0 +1,107 @@
pub mod serum;
use serde::{ser::SerializeStruct, Serialize, Serializer};
use solana_sdk::pubkey::Pubkey;
#[derive(Clone, Debug)]
pub struct StatusResponse<'a> {
pub success: bool,
pub message: &'a str,
}
impl<'a> Serialize for StatusResponse<'a> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut state = serializer.serialize_struct("Status", 2)?;
state.serialize_field("success", &self.success)?;
state.serialize_field("message", &self.message)?;
state.end()
}
}
#[derive(Clone, Debug)]
pub enum OrderbookSide {
Bid = 0,
Ask = 1,
}
impl Serialize for OrderbookSide {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
OrderbookSide::Bid => serializer.serialize_unit_variant("Side", 0, "bid"),
OrderbookSide::Ask => serializer.serialize_unit_variant("Side", 1, "ask"),
}
}
}
#[derive(Clone, Debug)]
pub struct MarketConfig {
pub name: String,
pub bids: Pubkey,
pub asks: Pubkey,
pub event_queue: Pubkey,
pub oracle: Pubkey,
pub base_decimals: u8,
pub quote_decimals: u8,
pub base_lot_size: i64,
pub quote_lot_size: i64,
}
pub fn base_lots_to_ui(
native: i64,
base_decimals: u8,
_quote_decimals: u8,
base_lot_size: i64,
_quote_lot_size: i64,
) -> f64 {
(native * base_lot_size) as f64 / 10i64.pow(base_decimals.into()) as f64
}
pub fn base_lots_to_ui_perp(native: i64, decimals: u8, base_lot_size: i64) -> f64 {
native as f64 * (base_lot_size as f64 / (10i64.pow(decimals.into()) as f64))
}
pub fn price_lots_to_ui(
native: i64,
base_decimals: u8,
quote_decimals: u8,
base_lot_size: i64,
quote_lot_size: i64,
) -> f64 {
let base_multiplier = 10i64.pow(base_decimals.into());
let quote_multiplier = 10i64.pow(quote_decimals.into());
let left: u128 = native as u128 * quote_lot_size as u128 * base_multiplier as u128;
let right: u128 = base_lot_size as u128 * quote_multiplier as u128;
left as f64 / right as f64
}
pub fn spot_price_to_ui(
native: i64,
native_size: i64,
base_decimals: u8,
quote_decimals: u8,
) -> f64 {
// TODO: account for fees
((native * 10i64.pow(base_decimals.into())) / (10i64.pow(quote_decimals.into()) * native_size))
as f64
}
pub fn price_lots_to_ui_perp(
native: i64,
base_decimals: u8,
quote_decimals: u8,
base_lot_size: i64,
quote_lot_size: i64,
) -> f64 {
let decimals = base_decimals.checked_sub(quote_decimals).unwrap();
let multiplier = 10u64.pow(decimals.into()) as f64;
native as f64 * ((multiplier * quote_lot_size as f64) / base_lot_size as f64)
}

View File

@ -0,0 +1,12 @@
use bytemuck::{Pod, Zeroable};
#[derive(Copy, Clone, Debug)]
#[repr(packed)]
pub struct SerumEventQueueHeader {
pub _account_flags: u64, // Initialized, EventQueue
pub head: u64,
pub count: u64,
pub seq_num: u64,
}
unsafe impl Zeroable for SerumEventQueueHeader {}
unsafe impl Pod for SerumEventQueueHeader {}

View File

@ -20,7 +20,7 @@ default = []
test-bpf = ["client"]
client = ["solana-sdk", "no-entrypoint"]
# Enables GPL-licensed parts of the code. See LICENSE file.
enable-gpl = []
enable-gpl = ["openbook-v2/enable-gpl"]
[dependencies]
# todo: when to fix, when to use caret? need a regular chore to bump dependencies
@ -29,7 +29,7 @@ anchor-lang = { workspace = true }
anchor-spl = { workspace = true }
arrayref = "0.3.6"
bincode = "1.3.3"
borsh = { version = "0.9.3", features = ["const-generics"] }
borsh = { version = "0.10.3", features = ["const-generics"] }
bytemuck = { version = "^1.7.2", features = ["min_const_generics"] }
default-env = "0.1.1"
derivative = "2.2.0"
@ -37,15 +37,20 @@ fixed = { workspace = true, features = ["serde", "borsh", "debug-assert-in-relea
num_enum = "0.5.1"
pyth-sdk-solana = { workspace = true }
serde = "^1.0"
serum_dex = { workspace = true, default-features=false, features = ["no-entrypoint", "program"] }
serum_dex = { workspace = true, features = ["no-entrypoint", "program"] }
solana-address-lookup-table-program = { workspace = true }
solana-program = { workspace = true }
solana-sdk = { workspace = true, default-features = false, optional = true }
solana-security-txt = "1.1.0"
static_assertions = "1.1"
switchboard-program = ">=0.2.0"
switchboard-v2 = "0.1.17"
openbook-v2 = { git = "https://github.com/openbook-dex/openbook-v2.git", default-features=false, features = ["no-entrypoint"] }
# note: switchboard-common 0.8.19 is broken - use 0.8.18 instead
switchboard-program = "0.2"
switchboard-v2 = { package = "switchboard-solana", version = "0.28" }
openbook-v2 = { git = "https://github.com/openbook-dex/openbook-v2.git", features = ["no-entrypoint"] }
[dev-dependencies]
solana-sdk = { workspace = true, default-features = false }

View File

@ -35,7 +35,7 @@ pub struct OpenbookV2LiqForceCancelOrders<'info> {
#[account(
has_one = bids,
has_one = asks,
has_one = event_queue,
has_one = event_heap,
)]
pub openbook_v2_market_external: AccountLoader<'info, Market>,
@ -49,7 +49,7 @@ pub struct OpenbookV2LiqForceCancelOrders<'info> {
#[account(mut)]
/// CHECK: event will be checked by openbook_v2
pub event_queue: UncheckedAccount<'info>,
pub event_heap: UncheckedAccount<'info>,
#[account(mut)]
pub market_base_vault: Box<Account<'info, TokenAccount>>,

View File

@ -33,7 +33,7 @@ pub struct OpenbookV2PlaceOrder<'info> {
mut,
has_one = bids,
has_one = asks,
has_one = event_queue,
has_one = event_heap,
)]
pub openbook_v2_market_external: AccountLoader<'info, Market>,
@ -47,7 +47,7 @@ pub struct OpenbookV2PlaceOrder<'info> {
#[account(mut)]
/// CHECK: event queue will be checked by openbook_v2
pub event_queue: UncheckedAccount<'info>,
pub event_heap: UncheckedAccount<'info>,
#[account(mut)]
/// CHECK: base vault will be checked by openbook_v2

View File

@ -34,7 +34,7 @@ pub struct OpenbookV2PlaceTakeOrder<'info> {
mut,
has_one = bids,
has_one = asks,
has_one = event_queue,
has_one = event_heap,
)]
pub openbook_v2_market_external: AccountLoader<'info, Market>,
@ -48,7 +48,7 @@ pub struct OpenbookV2PlaceTakeOrder<'info> {
#[account(mut)]
/// CHECK: Validated by the openbook_v2 cpi call
pub event_queue: UncheckedAccount<'info>,
pub event_heap: UncheckedAccount<'info>,
#[account(mut)]
/// CHECK: Validated by the openbook_v2 cpi call

View File

@ -296,4 +296,13 @@ mod tests {
}
}
}
// regression test for https://gitlab.com/tspiteri/fixed/-/issues/57
// see https://github.com/blockworks-foundation/fixed/issues/1
#[test]
fn bug_fixed_comparison_u64() {
let a: u64 = 66000;
let b: u64 = 1000;
assert!(I80F48::from(a) > b); // fails!
}
}

View File

@ -28,6 +28,7 @@ pub mod types;
pub mod instructions;
#[cfg(all(not(feature = "no-entrypoint"), not(feature = "enable-gpl")))]
compile_error!("compiling the program entrypoint without 'enable-gpl' makes no sense, enable it or use the 'cpi' or 'client' features");
use state::{

View File

@ -5,7 +5,7 @@ use mango_v4::accounts_ix::{Serum3OrderType, Serum3SelfTradeBehavior, Serum3Side
#[tokio::test]
async fn test_liq_tokens_force_cancel() -> Result<(), TransportError> {
let mut test_builder = TestContextBuilder::new();
test_builder.test().set_compute_max_units(95_000); // Serum3PlaceOrder needs 92.8k
test_builder.test().set_compute_max_units(105_000); // force cancel needs >100k
let context = test_builder.start_default().await;
let solana = &context.solana.clone();

View File

@ -314,7 +314,6 @@ async fn test_perp_fixed() -> Result<(), TransportError> {
)
.await
.is_err());
solana.advance_by_slots(1).await;
// Trade again to bring base_position_lots to 0
send_tx(

View File

@ -42,6 +42,7 @@ impl SolanaCookie {
self.logger_capture.write().unwrap().clear();
let mut context = self.context.borrow_mut();
let blockhash = context.get_new_latest_blockhash().await?;
let mut transaction =
Transaction::new_with_payer(&instructions, Some(&context.payer.pubkey()));
@ -57,10 +58,7 @@ impl SolanaCookie {
all_signers.extend(signer_keypair_refs.iter());
}
// This fails when warping is involved - https://gitmemory.com/issue/solana-labs/solana/18201/868325078
// let recent_blockhash = self.context.banks_client.get_recent_blockhash().await.unwrap();
transaction.sign(&all_signers, context.last_blockhash);
transaction.sign(&all_signers, blockhash);
let result = context
.banks_client
@ -75,10 +73,6 @@ impl SolanaCookie {
drop(tx_log_lock);
drop(context);
// This makes sure every transaction gets a new blockhash, avoiding issues where sending
// the same transaction again would lead to it being skipped.
self.advance_by_slots(1).await;
result
}

View File

@ -1,3 +1,3 @@
[toolchain]
channel = "1.65"
channel = "1.69"
components = ["rustfmt", "clippy"]