Merge pull request #16 from blockworks-foundation/update-solana-1-16-anchor-28
Update solana 1.16 anchor 28
This commit is contained in:
commit
a3b7391c63
|
@ -1,3 +0,0 @@
|
|||
target
|
||||
node_modules
|
||||
dist
|
|
@ -1,59 +0,0 @@
|
|||
name: Publish Docker Image
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
# use this tag to test CI build for this workflow without merging into main branch
|
||||
- experimental-github-actions
|
||||
|
||||
env:
|
||||
IMAGE: mango-feeds
|
||||
ORG: blockworks-foundation
|
||||
REGISTRY: ghcr.io
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set rust version
|
||||
run: |
|
||||
source ci/rust-version.sh
|
||||
echo "RUST_STABLE=$rust_stable" | tee -a $GITHUB_ENV
|
||||
|
||||
# Use docker buildx
|
||||
- name: Use docker buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
id: buildx
|
||||
with:
|
||||
install: true
|
||||
buildkitd-flags: --debug
|
||||
|
||||
# Login to Registry
|
||||
- name: Login to Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Build and push the image
|
||||
- name: Build and Push Image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
build-args: |
|
||||
RUST_TOOLCHAIN_VERSION=${{ env.RUST_STABLE }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.REGISTRY }}/${{ env.ORG }}/${{ env.IMAGE }}:${{ github.sha }}
|
||||
${{ env.REGISTRY }}/${{ env.ORG }}/${{ env.IMAGE }}:latest
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
|
@ -1,31 +0,0 @@
|
|||
name: Deploy to Fly
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Publish Docker Image"]
|
||||
branches: [production]
|
||||
types:
|
||||
- completed
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
imageTag:
|
||||
description: 'Docker Image Tag'
|
||||
required: true
|
||||
type: string
|
||||
default: 'latest'
|
||||
|
||||
env:
|
||||
FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }}
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Setup Fly
|
||||
uses: superfly/flyctl-actions/setup-flyctl@master
|
||||
|
||||
- name: Deploy
|
||||
run: flyctl deploy
|
File diff suppressed because it is too large
Load Diff
57
Cargo.toml
57
Cargo.toml
|
@ -1,19 +1,54 @@
|
|||
[workspace]
|
||||
members = [
|
||||
"connector",
|
||||
"lib",
|
||||
"service-mango-crank",
|
||||
"service-mango-fills",
|
||||
"service-mango-pnl",
|
||||
"service-mango-orderbook",
|
||||
]
|
||||
|
||||
|
||||
[patch.crates-io]
|
||||
# for gzip encoded responses
|
||||
jsonrpc-core-client = { git = "https://github.com/ckamm/jsonrpc.git", branch = "ckamm/http-with-gzip-default-v18.0.0" }
|
||||
# force usage of mango-v4 submodules with 1.14.9 support
|
||||
anchor-spl = { git = "https://github.com/blockworks-foundation/mango-v4", branch = "dev" }
|
||||
anchor-lang = { git = "https://github.com/blockworks-foundation/mango-v4", branch = "dev" }
|
||||
anchor-client = { git = "https://github.com/blockworks-foundation/mango-v4", branch = "dev" }
|
||||
switchboard-v2 = { git = "https://github.com/blockworks-foundation/sbv2-solana", branch = "mango-v4" }
|
||||
|
||||
[workspace.dependencies]
|
||||
solana-rpc = "~1.16.7"
|
||||
solana-client = "~1.16.7"
|
||||
solana-account-decoder = "~1.16.7"
|
||||
solana-sdk = "~1.16.7"
|
||||
solana-logger = "~1.16.7"
|
||||
|
||||
yellowstone-grpc-client = "1.9.0"
|
||||
yellowstone-grpc-proto = "1.9.0"
|
||||
|
||||
jsonrpc-core = "18.0.0"
|
||||
jsonrpc-core-client = { version = "18.0.0", features = ["ws", "http"] }
|
||||
|
||||
bs58 = "0.5"
|
||||
base64 = "0.21.0"
|
||||
log = "0.4"
|
||||
rand = "0.7"
|
||||
anyhow = "1.0"
|
||||
toml = "0.5"
|
||||
bytes = "1.0"
|
||||
itertools = "0.10.5"
|
||||
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
futures = "0.3.17"
|
||||
futures-core = "0.3"
|
||||
futures-channel = "0.3"
|
||||
futures-util = "0.3"
|
||||
ws = "^0.9.2"
|
||||
async-channel = "1.6"
|
||||
async-trait = "0.1"
|
||||
bytemuck = "1.7.2"
|
||||
jemallocator = "0.3.2"
|
||||
chrono = "0.4.23"
|
||||
clap = { version = "3.1.8", features = ["derive", "env"] }
|
||||
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-tungstenite = "0.17"
|
||||
rustls = "0.20.8"
|
||||
|
||||
warp = "0.3"
|
||||
|
||||
|
||||
|
||||
|
|
26
Dockerfile
26
Dockerfile
|
@ -1,26 +0,0 @@
|
|||
# syntax = docker/dockerfile:1.2
|
||||
# Base image containing all binaries, deployed to gcr.io/mango-markets/mango-geyser-services:latest
|
||||
ARG RUST_TOOLCHAIN_VERSION
|
||||
FROM rust:${RUST_TOOLCHAIN_VERSION}-bullseye as base
|
||||
RUN cargo install cargo-chef
|
||||
RUN rustup component add rustfmt
|
||||
RUN apt-get update && apt-get install -y clang cmake ssh
|
||||
WORKDIR /app
|
||||
|
||||
FROM base AS plan
|
||||
COPY . .
|
||||
WORKDIR /app
|
||||
RUN cargo chef prepare --recipe-path recipe.json
|
||||
|
||||
FROM base as build
|
||||
COPY --from=plan /app/recipe.json recipe.json
|
||||
RUN cargo chef cook --release --recipe-path recipe.json
|
||||
COPY . .
|
||||
RUN cargo build --release --bin service-mango-fills --bin service-mango-pnl --bin service-mango-orderbook
|
||||
|
||||
FROM debian:bullseye-slim as run
|
||||
RUN apt-get update && apt-get -y install ca-certificates libc6
|
||||
COPY --from=build /app/target/release/service-mango-* /usr/local/bin/
|
||||
COPY --from=build /app/service-mango-pnl/conf/template-config.toml ./pnl-config.toml
|
||||
COPY --from=build /app/service-mango-fills/conf/template-config.toml ./fills-config.toml
|
||||
COPY --from=build /app/service-mango-orderbook/conf/template-config.toml ./orderbook-config.toml
|
19
README.md
19
README.md
|
@ -1,22 +1,9 @@
|
|||
# mango-geyser-services
|
||||
# mango-feeds-connector
|
||||
|
||||
Mango v4 Geyser Services
|
||||
Solana Websocket and Geyser Services
|
||||
|
||||
# Components
|
||||
|
||||
- [`lib/`](lib/)
|
||||
- [`connector/`](connector/)
|
||||
|
||||
Tools for building services
|
||||
|
||||
- [`service-mango-fills/`](service-mango-fills/)
|
||||
|
||||
A service providing lowest-latency, bandwidth conserving access to fill events for Mango V4 Perp and Openbook markets
|
||||
as they are processed by the rpc node.
|
||||
|
||||
- [`service-mango-pnl/`](service-mango-pnl/)
|
||||
|
||||
A service providing pre-computed account lists ordered by unsettled PnL per market
|
||||
|
||||
- [`service-mango-orderbook/`](service-mango-pnl/)
|
||||
|
||||
A service providing Orderbook L2/L3 state and delta updates for Mango V4 Perp and Openbook Spot markets
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
app = "mango-fills"
|
||||
kill_signal = "SIGTERM"
|
||||
kill_timeout = 30
|
||||
|
||||
[build]
|
||||
dockerfile = "../Dockerfile"
|
||||
|
||||
[experimental]
|
||||
cmd = ["service-mango-fills", "fills-config.toml"]
|
||||
|
||||
[[services]]
|
||||
internal_port = 8080
|
||||
processes = ["app"]
|
||||
protocol = "tcp"
|
||||
|
||||
[services.concurrency]
|
||||
hard_limit = 1024
|
||||
soft_limit = 1024
|
||||
type = "connections"
|
||||
|
||||
[metrics]
|
||||
path = "/metrics"
|
||||
port = 9091
|
|
@ -1,23 +0,0 @@
|
|||
app = "mango-orderbook"
|
||||
kill_signal = "SIGINT"
|
||||
kill_timeout = 5
|
||||
|
||||
[build]
|
||||
dockerfile = "../Dockerfile"
|
||||
|
||||
[experimental]
|
||||
cmd = ["service-mango-orderbook", "orderbook-config.toml"]
|
||||
|
||||
[[services]]
|
||||
internal_port = 8080
|
||||
processes = ["app"]
|
||||
protocol = "tcp"
|
||||
|
||||
[services.concurrency]
|
||||
hard_limit = 1024
|
||||
soft_limit = 1024
|
||||
type = "connections"
|
||||
|
||||
[metrics]
|
||||
path = "/metrics"
|
||||
port = 9091
|
23
cd/pnl.toml
23
cd/pnl.toml
|
@ -1,23 +0,0 @@
|
|||
app = "mango-pnl"
|
||||
kill_signal = "SIGINT"
|
||||
kill_timeout = 5
|
||||
|
||||
[build]
|
||||
dockerfile = "../Dockerfile"
|
||||
|
||||
[experimental]
|
||||
cmd = ["service-mango-pnl", "pnl-config.toml"]
|
||||
|
||||
[[services]]
|
||||
internal_port = 8081
|
||||
processes = ["app"]
|
||||
protocol = "tcp"
|
||||
|
||||
[services.concurrency]
|
||||
hard_limit = 1024
|
||||
soft_limit = 1024
|
||||
type = "connections"
|
||||
|
||||
[metrics]
|
||||
path = "/metrics"
|
||||
port = 9091
|
40
cd/ws.toml
40
cd/ws.toml
|
@ -1,40 +0,0 @@
|
|||
app = "mango-geyser-services"
|
||||
|
||||
kill_signal = "SIGINT"
|
||||
kill_timeout = 5
|
||||
|
||||
[processes]
|
||||
fills = "service-mango-fills fills-config.toml"
|
||||
orderbook = "service-mango-orderbook orderbook-config.toml"
|
||||
|
||||
[[services]]
|
||||
processes = ["fills"]
|
||||
internal_port = 8080
|
||||
protocol = "tcp"
|
||||
|
||||
[[services.ports]]
|
||||
handlers = ["tls", "http"]
|
||||
port = "8080"
|
||||
|
||||
[services.concurrency]
|
||||
type = "connections"
|
||||
hard_limit = 1024
|
||||
soft_limit = 1024
|
||||
|
||||
[[services]]
|
||||
processes = ["orderbook"]
|
||||
internal_port = 8082
|
||||
protocol = "tcp"
|
||||
|
||||
[[services.ports]]
|
||||
handlers = ["tls", "http"]
|
||||
port = "8082"
|
||||
|
||||
[services.concurrency]
|
||||
type = "connections"
|
||||
hard_limit = 1024
|
||||
soft_limit = 1024
|
||||
|
||||
[metrics]
|
||||
port = 9091
|
||||
path = "/metrics"
|
|
@ -23,7 +23,7 @@
|
|||
if [[ -n $RUST_STABLE_VERSION ]]; then
|
||||
stable_version="$RUST_STABLE_VERSION"
|
||||
else
|
||||
stable_version=1.71.1
|
||||
stable_version=1.70.0
|
||||
fi
|
||||
|
||||
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
|
||||
|
@ -91,7 +91,7 @@ export rust_nightly_docker_image=solanalabs/rust-nightly:"$nightly_version"
|
|||
if [[ -n $RUST_STABLE_VERSION ]]; then
|
||||
stable_version="$RUST_STABLE_VERSION"
|
||||
else
|
||||
stable_version=1.71.1
|
||||
stable_version=1.70.0
|
||||
fi
|
||||
|
||||
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "mango-feeds-connector"
|
||||
version = "0.1.1"
|
||||
version = "0.2.0"
|
||||
authors = ["Christian Kamm <mail@ckamm.de>"]
|
||||
edition = "2021"
|
||||
license = "AGPL-3.0-or-later"
|
||||
|
@ -9,40 +9,39 @@ description = "Listen to Solana account updates via geyser or websockets"
|
|||
[lib]
|
||||
|
||||
[features]
|
||||
default = ["solana-1-14"]
|
||||
solana-1-14 = []
|
||||
solana-1-15 = []
|
||||
default = []
|
||||
|
||||
[dependencies]
|
||||
jsonrpc-core = "18.0.0"
|
||||
jsonrpc-core-client = { version = "18.0.0", features = ["ws", "http"] }
|
||||
jsonrpc-core = { workspace = true }
|
||||
jsonrpc-core-client = { workspace = true }
|
||||
|
||||
solana-rpc = "1.14.9"
|
||||
solana-client = "1.14.9"
|
||||
solana-account-decoder = "1.14.9"
|
||||
solana-sdk = "1.14.9"
|
||||
solana-rpc = { workspace = true }
|
||||
solana-client = { workspace = true }
|
||||
solana-account-decoder = { workspace = true }
|
||||
solana-sdk = { workspace = true }
|
||||
solana-logger = { workspace = true }
|
||||
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
rustls = "0.20.8"
|
||||
tokio = { workspace = true }
|
||||
rustls = { workspace = true }
|
||||
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.130"
|
||||
serde = { workspace = true }
|
||||
serde_derive = { workspace = true }
|
||||
|
||||
log = "0.4"
|
||||
anyhow = "1.0"
|
||||
log = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
|
||||
itertools = "0.10.5"
|
||||
itertools = { workspace = true }
|
||||
|
||||
futures = "0.3.17"
|
||||
futures = { workspace = true }
|
||||
|
||||
async-channel = "1.6"
|
||||
async-trait = "0.1"
|
||||
async-channel = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
|
||||
warp = "0.3"
|
||||
warp = { workspace = true }
|
||||
|
||||
yellowstone-grpc-proto = "1.1.0"
|
||||
# 1.9.0+solana.1.16.1
|
||||
yellowstone-grpc-client = { workspace = true }
|
||||
yellowstone-grpc-proto = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-logger = "*"
|
||||
clap = { version = "4.4.2", features = ["derive", "env"] }
|
||||
|
||||
clap = { workspace = true }
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
# mango-feeds-connector
|
||||
|
||||
## Installation
|
||||
```
|
||||
cargo add mango-feeds-connector
|
||||
```
|
|
@ -1,16 +1,14 @@
|
|||
#![allow(unused_variables)]
|
||||
|
||||
use clap::Parser;
|
||||
|
||||
use jsonrpc_core_client::transports::http;
|
||||
use mango_feeds_connector::GetProgramAccountsClient;
|
||||
use solana_account_decoder::UiAccountEncoding;
|
||||
use solana_client::rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig};
|
||||
use solana_client::rpc_response::OptionalContext;
|
||||
use solana_rpc::rpc::rpc_accounts::AccountsDataClient as GetProgramAccountsClient;
|
||||
use solana_sdk::commitment_config::CommitmentConfig;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
// use solana_rpc::rpc::rpc_accounts_scan::AccountsScanClient as GetProgramAccountsClient;
|
||||
|
||||
/// this tool tests the differences between rpc_accounts and rpc_accounts_scan (should be same)
|
||||
|
||||
#[derive(Parser, Debug, Clone)]
|
||||
#[clap()]
|
||||
|
@ -33,7 +31,7 @@ async fn main() -> anyhow::Result<()> {
|
|||
let rpc_http_url = cli.rpc_url;
|
||||
let program_id = cli.program_account;
|
||||
|
||||
let rpc_client = http::connect::<GetProgramAccountsClient>(&rpc_http_url)
|
||||
let rpc_client_scan = http::connect::<GetProgramAccountsClient>(&rpc_http_url)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
@ -50,7 +48,7 @@ async fn main() -> anyhow::Result<()> {
|
|||
with_context: Some(true),
|
||||
};
|
||||
|
||||
let snapshot = rpc_client
|
||||
let snapshot = rpc_client_scan
|
||||
.get_program_accounts(program_id.to_string(), Some(program_info_config))
|
||||
.await;
|
||||
if let OptionalContext::Context(snapshot_data) = snapshot.unwrap() {
|
||||
|
|
|
@ -17,9 +17,8 @@ use std::sync::Arc;
|
|||
use std::{collections::HashMap, env, str::FromStr, time::Duration};
|
||||
|
||||
use yellowstone_grpc_proto::prelude::{
|
||||
geyser_client::GeyserClient, subscribe_update, SubscribeRequest,
|
||||
geyser_client::GeyserClient, subscribe_update, CommitmentLevel, SubscribeRequest,
|
||||
SubscribeRequestFilterAccounts, SubscribeRequestFilterSlots, SubscribeUpdate,
|
||||
SubscribeUpdateSlotStatus,
|
||||
};
|
||||
|
||||
use crate::snapshot::{get_snapshot_gma, get_snapshot_gpa};
|
||||
|
@ -122,8 +121,11 @@ async fn feed_data_geyser(
|
|||
accounts,
|
||||
blocks,
|
||||
blocks_meta,
|
||||
entry: Default::default(),
|
||||
commitment: None,
|
||||
slots,
|
||||
transactions,
|
||||
accounts_data_slice: vec![],
|
||||
};
|
||||
info!("Going to send request: {:?}", request);
|
||||
|
||||
|
@ -195,7 +197,7 @@ async fn feed_data_geyser(
|
|||
match update.update_oneof.as_mut().expect("invalid grpc") {
|
||||
UpdateOneof::Slot(slot_update) => {
|
||||
let status = slot_update.status;
|
||||
if status == SubscribeUpdateSlotStatus::Finalized as i32 {
|
||||
if status == CommitmentLevel::Finalized as i32 {
|
||||
if first_full_slot == u64::MAX {
|
||||
// TODO: is this equivalent to before? what was highesy_write_slot?
|
||||
first_full_slot = slot_update.slot + 1;
|
||||
|
@ -263,6 +265,7 @@ async fn feed_data_geyser(
|
|||
UpdateOneof::Block(_) => {},
|
||||
UpdateOneof::Transaction(_) => {},
|
||||
UpdateOneof::BlockMeta(_) => {},
|
||||
UpdateOneof::Entry(_) => {},
|
||||
UpdateOneof::Ping(_) => {},
|
||||
}
|
||||
sender.send(Message::GrpcUpdate(update)).await.expect("send success");
|
||||
|
@ -503,12 +506,11 @@ pub async fn process_events(
|
|||
metric_slot_updates.increment();
|
||||
metric_slot_queue.set(slot_queue_sender.len() as u64);
|
||||
|
||||
let status =
|
||||
SubscribeUpdateSlotStatus::from_i32(update.status).map(|v| match v {
|
||||
SubscribeUpdateSlotStatus::Processed => SlotStatus::Processed,
|
||||
SubscribeUpdateSlotStatus::Confirmed => SlotStatus::Confirmed,
|
||||
SubscribeUpdateSlotStatus::Finalized => SlotStatus::Rooted,
|
||||
});
|
||||
let status = CommitmentLevel::from_i32(update.status).map(|v| match v {
|
||||
CommitmentLevel::Processed => SlotStatus::Processed,
|
||||
CommitmentLevel::Confirmed => SlotStatus::Confirmed,
|
||||
CommitmentLevel::Finalized => SlotStatus::Rooted,
|
||||
});
|
||||
if status.is_none() {
|
||||
error!("unexpected slot status: {}", update.status);
|
||||
continue;
|
||||
|
@ -527,6 +529,7 @@ pub async fn process_events(
|
|||
UpdateOneof::Block(_) => {}
|
||||
UpdateOneof::Transaction(_) => {}
|
||||
UpdateOneof::BlockMeta(_) => {}
|
||||
UpdateOneof::Entry(_) => {}
|
||||
UpdateOneof::Ping(_) => {}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,15 +12,7 @@ use {
|
|||
solana_sdk::{account::Account, pubkey::Pubkey},
|
||||
};
|
||||
|
||||
#[cfg(all(feature = "solana-1-14", feature = "solana-1-15"))]
|
||||
compile_error!(
|
||||
"feature \"solana-1-14\" and feature \"solana-1-15\" cannot be enabled at the same time"
|
||||
);
|
||||
|
||||
#[cfg(feature = "solana-1-14")]
|
||||
use solana_rpc::rpc::rpc_accounts::AccountsDataClient as GetProgramAccountsClient;
|
||||
#[cfg(feature = "solana-1-15")]
|
||||
use solana_rpc::rpc::rpc_accounts_scan::AccountsScanClient as GetProgramAccountsClient;
|
||||
pub use solana_rpc::rpc::rpc_accounts_scan::AccountsScanClient as GetProgramAccountsClient;
|
||||
|
||||
pub use solana_sdk;
|
||||
|
||||
|
|
|
@ -1,61 +0,0 @@
|
|||
[package]
|
||||
name = "mango-feeds-lib"
|
||||
version = "0.1.0"
|
||||
authors = ["Christian Kamm <mail@ckamm.de>"]
|
||||
edition = "2021"
|
||||
license = "AGPL-3.0-or-later"
|
||||
|
||||
[lib]
|
||||
|
||||
|
||||
[dependencies]
|
||||
mango-feeds-connector = { path = "../connector" }
|
||||
|
||||
jsonrpc-core = "18.0.0"
|
||||
jsonrpc-core-client = { version = "18.0.0", features = ["ws", "http"] }
|
||||
|
||||
solana-rpc = "~1.14.9"
|
||||
solana-client = "~1.14.9"
|
||||
solana-account-decoder = "~1.14.9"
|
||||
solana-sdk = "~1.14.9"
|
||||
|
||||
mango-v4 = { git = "https://github.com/blockworks-foundation/mango-v4", branch = "dev", features = ["client"] }
|
||||
bytemuck = "*"
|
||||
fixed = { version = "*", features = ["serde"] }
|
||||
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-postgres = { version = "0.7", features = ["with-chrono-0_4"] }
|
||||
tokio-postgres-rustls = "0.9.0"
|
||||
postgres-types = { version = "0.2", features = ["array-impls", "derive", "with-chrono-0_4"] }
|
||||
postgres-native-tls = "0.5"
|
||||
native-tls = "0.2"
|
||||
|
||||
# postgres_query hasn't updated its crate in a while
|
||||
postgres_query = { git = "https://github.com/nolanderc/rust-postgres-query", rev = "b4422051c8a31fbba4a35f88004c1cefb1878dd5" }
|
||||
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.130"
|
||||
serde_json = "1.0.68"
|
||||
|
||||
bs58 = "*"
|
||||
base64 = "0.21.0"
|
||||
log = "0.4"
|
||||
rand = "0.7"
|
||||
anyhow = "1.0"
|
||||
bytes = "1.0"
|
||||
itertools = "0.10.5"
|
||||
chrono = "0.4.23"
|
||||
|
||||
futures = "0.3.17"
|
||||
futures-core = "0.3"
|
||||
|
||||
async-channel = "1.6"
|
||||
async-trait = "0.1"
|
||||
|
||||
anchor-lang = "0.25.0"
|
||||
|
||||
serum_dex = { git = "https://github.com/jup-ag/openbook-program", branch = "feat/expose-things", features = ["no-entrypoint"] }
|
||||
|
||||
[build-dependencies]
|
||||
tonic-build = { version = "0.6", features = ["compression"] }
|
||||
|
149
lib/src/lib.rs
149
lib/src/lib.rs
|
@ -1,149 +0,0 @@
|
|||
pub mod memory_target;
|
||||
pub mod postgres_types_numeric;
|
||||
pub mod serum;
|
||||
|
||||
use anchor_lang::prelude::Pubkey;
|
||||
use serde::{ser::SerializeStruct, Serialize, Serializer};
|
||||
use serde_derive::Deserialize;
|
||||
|
||||
pub use mango_feeds_connector::*;
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
pub struct PostgresConfig {
|
||||
pub connection_string: String,
|
||||
/// Number of parallel postgres connections used for insertions
|
||||
pub connection_count: u64,
|
||||
/// Maximum batch size for inserts over one connection
|
||||
pub max_batch_size: usize,
|
||||
/// Max size of queues
|
||||
pub max_queue_size: usize,
|
||||
/// Number of queries retries before fatal error
|
||||
pub retry_query_max_count: u64,
|
||||
/// Seconds to sleep between query retries
|
||||
pub retry_query_sleep_secs: u64,
|
||||
/// Seconds to sleep between connection attempts
|
||||
pub retry_connection_sleep_secs: u64,
|
||||
/// Fatal error when the connection can't be reestablished this long
|
||||
pub fatal_connection_timeout_secs: u64,
|
||||
/// Allow invalid TLS certificates, passed to native_tls danger_accept_invalid_certs
|
||||
pub allow_invalid_certs: bool,
|
||||
pub tls: Option<PostgresTlsConfig>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
pub struct PostgresTlsConfig {
|
||||
/// CA Cert file or env var
|
||||
pub ca_cert_path: String,
|
||||
/// PKCS12 client cert path
|
||||
pub client_key_path: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
pub struct Config {
|
||||
pub postgres_target: PostgresConfig,
|
||||
pub source: SourceConfig,
|
||||
pub metrics: MetricsConfig,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct StatusResponse<'a> {
|
||||
pub success: bool,
|
||||
pub message: &'a str,
|
||||
}
|
||||
|
||||
impl<'a> Serialize for StatusResponse<'a> {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let mut state = serializer.serialize_struct("Status", 2)?;
|
||||
state.serialize_field("success", &self.success)?;
|
||||
state.serialize_field("message", &self.message)?;
|
||||
|
||||
state.end()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum OrderbookSide {
|
||||
Bid = 0,
|
||||
Ask = 1,
|
||||
}
|
||||
|
||||
impl Serialize for OrderbookSide {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match *self {
|
||||
OrderbookSide::Bid => serializer.serialize_unit_variant("Side", 0, "bid"),
|
||||
OrderbookSide::Ask => serializer.serialize_unit_variant("Side", 1, "ask"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct MarketConfig {
|
||||
pub name: String,
|
||||
pub bids: Pubkey,
|
||||
pub asks: Pubkey,
|
||||
pub event_queue: Pubkey,
|
||||
pub oracle: Pubkey,
|
||||
pub base_decimals: u8,
|
||||
pub quote_decimals: u8,
|
||||
pub base_lot_size: i64,
|
||||
pub quote_lot_size: i64,
|
||||
}
|
||||
|
||||
pub fn base_lots_to_ui(
|
||||
native: i64,
|
||||
base_decimals: u8,
|
||||
_quote_decimals: u8,
|
||||
base_lot_size: i64,
|
||||
_quote_lot_size: i64,
|
||||
) -> f64 {
|
||||
(native * base_lot_size) as f64 / 10i64.pow(base_decimals.into()) as f64
|
||||
}
|
||||
|
||||
pub fn base_lots_to_ui_perp(native: i64, decimals: u8, base_lot_size: i64) -> f64 {
|
||||
native as f64 * (base_lot_size as f64 / (10i64.pow(decimals.into()) as f64))
|
||||
}
|
||||
|
||||
pub fn price_lots_to_ui(
|
||||
native: i64,
|
||||
base_decimals: u8,
|
||||
quote_decimals: u8,
|
||||
base_lot_size: i64,
|
||||
quote_lot_size: i64,
|
||||
) -> f64 {
|
||||
let base_multiplier = 10i64.pow(base_decimals.into());
|
||||
let quote_multiplier = 10i64.pow(quote_decimals.into());
|
||||
|
||||
let left: u128 = native as u128 * quote_lot_size as u128 * base_multiplier as u128;
|
||||
let right: u128 = base_lot_size as u128 * quote_multiplier as u128;
|
||||
|
||||
left as f64 / right as f64
|
||||
}
|
||||
|
||||
pub fn spot_price_to_ui(
|
||||
native: i64,
|
||||
native_size: i64,
|
||||
base_decimals: u8,
|
||||
quote_decimals: u8,
|
||||
) -> f64 {
|
||||
// TODO: account for fees
|
||||
((native * 10i64.pow(base_decimals.into())) / (10i64.pow(quote_decimals.into()) * native_size))
|
||||
as f64
|
||||
}
|
||||
|
||||
pub fn price_lots_to_ui_perp(
|
||||
native: i64,
|
||||
base_decimals: u8,
|
||||
quote_decimals: u8,
|
||||
base_lot_size: i64,
|
||||
quote_lot_size: i64,
|
||||
) -> f64 {
|
||||
let decimals = base_decimals.checked_sub(quote_decimals).unwrap();
|
||||
let multiplier = 10u64.pow(decimals.into()) as f64;
|
||||
native as f64 * ((multiplier * quote_lot_size as f64) / base_lot_size as f64)
|
||||
}
|
|
@ -1,54 +0,0 @@
|
|||
use crate::{
|
||||
chain_data::{AccountData, ChainData, SlotData},
|
||||
AccountWrite, SlotUpdate,
|
||||
};
|
||||
use solana_sdk::{account::WritableAccount, clock::Epoch};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
pub async fn init(
|
||||
chain_data: Arc<RwLock<ChainData>>,
|
||||
) -> anyhow::Result<(
|
||||
async_channel::Sender<AccountWrite>,
|
||||
async_channel::Sender<SlotUpdate>,
|
||||
)> {
|
||||
let (account_write_queue_sender, account_write_queue_receiver) =
|
||||
async_channel::unbounded::<AccountWrite>();
|
||||
|
||||
let (slot_queue_sender, slot_queue_receiver) = async_channel::unbounded::<SlotUpdate>();
|
||||
|
||||
// update handling thread, reads both slots and account updates
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
tokio::select! {
|
||||
Ok(account_write) = account_write_queue_receiver.recv() => {
|
||||
let mut chain = chain_data.write().unwrap();
|
||||
chain.update_account(
|
||||
account_write.pubkey,
|
||||
AccountData {
|
||||
slot: account_write.slot,
|
||||
write_version: account_write.write_version,
|
||||
account: WritableAccount::create(
|
||||
account_write.lamports,
|
||||
account_write.data.clone(),
|
||||
account_write.owner,
|
||||
account_write.executable,
|
||||
account_write.rent_epoch as Epoch,
|
||||
),
|
||||
},
|
||||
);
|
||||
}
|
||||
Ok(slot_update) = slot_queue_receiver.recv() => {
|
||||
let mut chain = chain_data.write().unwrap();
|
||||
chain.update_slot(SlotData {
|
||||
slot: slot_update.slot,
|
||||
parent: slot_update.parent,
|
||||
status: slot_update.status,
|
||||
chain: 0,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok((account_write_queue_sender, slot_queue_sender))
|
||||
}
|
|
@ -1,222 +0,0 @@
|
|||
use {
|
||||
bytes::{BufMut, BytesMut},
|
||||
fixed::types::I80F48,
|
||||
postgres_types::{IsNull, ToSql, Type},
|
||||
std::{cmp, error},
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SqlNumericI80F48(pub I80F48);
|
||||
|
||||
impl ToSql for SqlNumericI80F48 {
|
||||
fn to_sql(
|
||||
&self,
|
||||
_: &postgres_types::Type,
|
||||
out: &mut BytesMut,
|
||||
) -> Result<IsNull, Box<dyn error::Error + 'static + Sync + Send>> {
|
||||
if self.0 == 0 {
|
||||
out.reserve(10);
|
||||
out.put_u16(1); // num groups
|
||||
out.put_i16(0); // first group weight
|
||||
out.put_u16(0); // sign
|
||||
out.put_u16(0); // dscale
|
||||
out.put_i16(0); // first group
|
||||
return Ok(IsNull::No);
|
||||
}
|
||||
|
||||
let abs_val = self.0.abs();
|
||||
let decimals = abs_val.int_log10();
|
||||
let first_group_weight = ((decimals as f64) / 4.0f64).floor() as i16;
|
||||
let last_group_weight = -4i16;
|
||||
let num_groups = (first_group_weight - last_group_weight + 1) as usize;
|
||||
|
||||
// Reserve bytes
|
||||
out.reserve(8 + num_groups * 2);
|
||||
|
||||
// Number of groups
|
||||
out.put_u16(num_groups as u16);
|
||||
// Weight of first group
|
||||
out.put_i16(first_group_weight);
|
||||
// Sign
|
||||
out.put_u16(if self.0 < 0 { 0x4000 } else { 0x0000 });
|
||||
// DScale
|
||||
out.put_u16(16);
|
||||
|
||||
let mut int_part = abs_val.int().to_num::<u128>();
|
||||
let mut frac_part = (abs_val.frac() * I80F48::from_num(1e16)).to_num::<u64>();
|
||||
|
||||
//info!("i80f48 {} {} {} {} {}", self.0, decimals, first_group_weight, int_part, frac_part);
|
||||
|
||||
for weight in (0..=first_group_weight).rev() {
|
||||
let decimal_shift = 10000u128.pow(weight as u32);
|
||||
let v = (int_part / decimal_shift) & 0xFFFF;
|
||||
out.put_i16(v as i16);
|
||||
//info!("int {} {} {}", weight, v, int_part);
|
||||
int_part -= v * decimal_shift;
|
||||
}
|
||||
for weight in (last_group_weight..=cmp::min(first_group_weight, -1)).rev() {
|
||||
let decimal_shift = 10000u64.pow((4 + weight) as u32);
|
||||
let v = (frac_part / decimal_shift) & 0xFFFF;
|
||||
out.put_i16(v as i16);
|
||||
//info!("frac {} {} {}", weight, v, frac_part);
|
||||
frac_part -= v * decimal_shift;
|
||||
}
|
||||
|
||||
Ok(IsNull::No)
|
||||
}
|
||||
|
||||
fn accepts(ty: &Type) -> bool {
|
||||
matches!(*ty, Type::NUMERIC)
|
||||
}
|
||||
|
||||
postgres_types::to_sql_checked!();
|
||||
}
|
||||
|
||||
// from https://github.com/rust-lang/rust/pull/86930
|
||||
mod int_log {
|
||||
// 0 < val < 100_000_000
|
||||
const fn less_than_8(mut val: u32) -> u32 {
|
||||
let mut log = 0;
|
||||
if val >= 10_000 {
|
||||
val /= 10_000;
|
||||
log += 4;
|
||||
}
|
||||
log + if val >= 1000 {
|
||||
3
|
||||
} else if val >= 100 {
|
||||
2
|
||||
} else if val >= 10 {
|
||||
1
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
// 0 < val < 10_000_000_000_000_000
|
||||
const fn less_than_16(mut val: u64) -> u32 {
|
||||
let mut log = 0;
|
||||
if val >= 100_000_000 {
|
||||
val /= 100_000_000;
|
||||
log += 8;
|
||||
}
|
||||
log + less_than_8(val as u32)
|
||||
}
|
||||
|
||||
// 0 < val <= u64::MAX
|
||||
pub const fn u64(mut val: u64) -> u32 {
|
||||
let mut log = 0;
|
||||
if val >= 10_000_000_000_000_000 {
|
||||
val /= 10_000_000_000_000_000;
|
||||
log += 16;
|
||||
}
|
||||
log + less_than_16(val)
|
||||
}
|
||||
|
||||
// 0 < val <= u128::MAX
|
||||
pub const fn u128(mut val: u128) -> u32 {
|
||||
let mut log = 0;
|
||||
if val >= 100_000_000_000_000_000_000_000_000_000_000 {
|
||||
val /= 100_000_000_000_000_000_000_000_000_000_000;
|
||||
log += 32;
|
||||
return log + less_than_8(val as u32);
|
||||
}
|
||||
if val >= 10_000_000_000_000_000 {
|
||||
val /= 10_000_000_000_000_000;
|
||||
log += 16;
|
||||
}
|
||||
log + less_than_16(val as u64)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SqlNumericI128(pub i128);
|
||||
|
||||
impl ToSql for SqlNumericI128 {
|
||||
fn to_sql(
|
||||
&self,
|
||||
_: &postgres_types::Type,
|
||||
out: &mut BytesMut,
|
||||
) -> Result<IsNull, Box<dyn error::Error + 'static + Sync + Send>> {
|
||||
let abs_val = self.0.unsigned_abs();
|
||||
let decimals = if self.0 != 0 {
|
||||
int_log::u128(abs_val)
|
||||
} else {
|
||||
0
|
||||
};
|
||||
let first_group_weight = ((decimals as f64) / 4.0f64).floor() as i16;
|
||||
let num_groups = (first_group_weight + 1) as usize;
|
||||
|
||||
// Reserve bytes
|
||||
out.reserve(8 + num_groups * 2);
|
||||
|
||||
// Number of groups
|
||||
out.put_u16(num_groups as u16);
|
||||
// Weight of first group
|
||||
out.put_i16(first_group_weight);
|
||||
// Sign
|
||||
out.put_u16(if self.0 < 0 { 0x4000 } else { 0x0000 });
|
||||
// DScale
|
||||
out.put_u16(0);
|
||||
|
||||
let mut int_part = abs_val;
|
||||
|
||||
for weight in (0..=first_group_weight).rev() {
|
||||
let decimal_shift = 10000u128.pow(weight as u32);
|
||||
let v = (int_part / decimal_shift) & 0xFFFF;
|
||||
out.put_i16(v as i16);
|
||||
int_part -= v * decimal_shift;
|
||||
}
|
||||
|
||||
Ok(IsNull::No)
|
||||
}
|
||||
|
||||
fn accepts(ty: &Type) -> bool {
|
||||
matches!(*ty, Type::NUMERIC)
|
||||
}
|
||||
|
||||
postgres_types::to_sql_checked!();
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SqlNumericU64(pub u64);
|
||||
|
||||
impl ToSql for SqlNumericU64 {
|
||||
fn to_sql(
|
||||
&self,
|
||||
_: &postgres_types::Type,
|
||||
out: &mut BytesMut,
|
||||
) -> Result<IsNull, Box<dyn error::Error + 'static + Sync + Send>> {
|
||||
let decimals = if self.0 != 0 { int_log::u64(self.0) } else { 0 };
|
||||
let first_group_weight = ((decimals as f64) / 4.0f64).floor() as i16;
|
||||
let num_groups = (first_group_weight + 1) as usize;
|
||||
|
||||
// Reserve bytes
|
||||
out.reserve(8 + num_groups * 2);
|
||||
|
||||
// Number of groups
|
||||
out.put_u16(num_groups as u16);
|
||||
// Weight of first group
|
||||
out.put_i16(first_group_weight);
|
||||
// Sign
|
||||
out.put_u16(0);
|
||||
// DScale
|
||||
out.put_u16(0);
|
||||
|
||||
let mut int_part = self.0;
|
||||
|
||||
for weight in (0..=first_group_weight).rev() {
|
||||
let decimal_shift = 10000u64.pow(weight as u32);
|
||||
let v = (int_part / decimal_shift) & 0xFFFF;
|
||||
out.put_i16(v as i16);
|
||||
int_part -= v * decimal_shift;
|
||||
}
|
||||
|
||||
Ok(IsNull::No)
|
||||
}
|
||||
|
||||
fn accepts(ty: &Type) -> bool {
|
||||
matches!(*ty, Type::NUMERIC)
|
||||
}
|
||||
|
||||
postgres_types::to_sql_checked!();
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
use bytemuck::{Pod, Zeroable};
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
#[repr(packed)]
|
||||
pub struct SerumEventQueueHeader {
|
||||
pub _account_flags: u64, // Initialized, EventQueue
|
||||
pub head: u64,
|
||||
pub count: u64,
|
||||
pub seq_num: u64,
|
||||
}
|
||||
unsafe impl Zeroable for SerumEventQueueHeader {}
|
||||
unsafe impl Pod for SerumEventQueueHeader {}
|
|
@ -1,46 +0,0 @@
|
|||
[package]
|
||||
name = "service-mango-accounts"
|
||||
version = "0.1.0"
|
||||
authors = ["Christian Kamm <mail@ckamm.de>", "Maximilian Schneider <max@mango.markets>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0-or-later"
|
||||
|
||||
[dependencies]
|
||||
mango-feeds-lib = { path = "../lib" }
|
||||
solana-logger = "*"
|
||||
bs58 = "*"
|
||||
log = "*"
|
||||
anyhow = "*"
|
||||
toml = "*"
|
||||
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
futures = "0.3.17"
|
||||
futures-core = "0.3"
|
||||
futures-channel = "0.3"
|
||||
futures-util = "0.3"
|
||||
ws = "^0.9.2"
|
||||
async-channel = "1.6"
|
||||
async-trait = "0.1"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-tungstenite = "0.17"
|
||||
bytemuck = "1.7.2"
|
||||
jemallocator = "0.3.2"
|
||||
chrono = "0.4.23"
|
||||
solana-sdk = "~1.14.9"
|
||||
|
||||
tokio-postgres = { version = "0.7", features = ["with-chrono-0_4"] }
|
||||
tokio-postgres-rustls = "0.9.0"
|
||||
postgres-types = { version = "0.2", features = ["array-impls", "derive", "with-chrono-0_4"] }
|
||||
postgres-native-tls = "0.5"
|
||||
native-tls = "0.2"
|
||||
# postgres_query hasn't updated its crate in a while
|
||||
postgres_query = { git = "https://github.com/nolanderc/rust-postgres-query", rev = "b4422051c8a31fbba4a35f88004c1cefb1878dd5" }
|
||||
base64 = "0.21.0"
|
||||
|
||||
mango-v4 = { git = "https://github.com/blockworks-foundation/mango-v4", branch = "dev" }
|
||||
mango-v4-client = { git = "https://github.com/blockworks-foundation/mango-v4", branch = "dev" }
|
||||
serum_dex = { git = "https://github.com/jup-ag/openbook-program", branch = "feat/expose-things", features = ["no-entrypoint"] }
|
||||
anchor-lang = "0.25.0"
|
||||
anchor-client = "0.25.0"
|
|
@ -1,34 +0,0 @@
|
|||
[package]
|
||||
name = "service-mango-crank"
|
||||
version = "0.1.0"
|
||||
authors = ["Maximilian Schneider <max@mango.markets>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
mango-feeds-lib = { path = "../lib" }
|
||||
solana-client = "1"
|
||||
solana-logger = "1"
|
||||
solana-sdk = "1"
|
||||
bs58 = "*"
|
||||
log = "*"
|
||||
anyhow = "*"
|
||||
toml = "*"
|
||||
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
futures-channel = "0.3"
|
||||
futures-util = "0.3"
|
||||
ws = "^0.9.2"
|
||||
async-channel = "1.6"
|
||||
async-trait = "0.1"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-tungstenite = "0.17"
|
||||
bytemuck = "1.7.2"
|
||||
|
||||
mango-v4 = { git = "https://github.com/blockworks-foundation/mango-v4", branch = "dev" }
|
||||
mango-v4-client = { git = "https://github.com/blockworks-foundation/mango-v4", branch = "dev" }
|
||||
serum_dex = { git = "https://github.com/openbook-dex/program" }
|
||||
anchor-lang = "0.25.0"
|
||||
anchor-client = "0.25.0"
|
|
@ -1,45 +0,0 @@
|
|||
use log::*;
|
||||
use solana_client::nonblocking::rpc_client::RpcClient;
|
||||
use solana_sdk::{clock::DEFAULT_MS_PER_SLOT, commitment_config::CommitmentConfig, hash::Hash};
|
||||
use std::{
|
||||
sync::{Arc, RwLock},
|
||||
time::Duration,
|
||||
};
|
||||
use tokio::{spawn, time::sleep};
|
||||
|
||||
const RETRY_INTERVAL: Duration = Duration::from_millis(5 * DEFAULT_MS_PER_SLOT);
|
||||
|
||||
pub async fn poll_loop(blockhash: Arc<RwLock<Hash>>, client: Arc<RpcClient>) {
|
||||
let cfg = CommitmentConfig::processed();
|
||||
loop {
|
||||
let old_blockhash = *blockhash.read().unwrap();
|
||||
if let Ok((new_blockhash, _)) = client.get_latest_blockhash_with_commitment(cfg).await {
|
||||
if new_blockhash != old_blockhash {
|
||||
debug!("new blockhash ({:?})", blockhash);
|
||||
*blockhash.write().unwrap() = new_blockhash;
|
||||
}
|
||||
}
|
||||
|
||||
// Retry every few slots
|
||||
sleep(RETRY_INTERVAL).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn init(client: Arc<RpcClient>) -> Arc<RwLock<Hash>> {
|
||||
// get the first blockhash
|
||||
let blockhash = Arc::new(RwLock::new(
|
||||
client
|
||||
.get_latest_blockhash()
|
||||
.await
|
||||
.expect("fetch initial blockhash"),
|
||||
));
|
||||
|
||||
// launch task
|
||||
let _join_hdl = {
|
||||
// create a thread-local reference to blockhash
|
||||
let blockhash_c = blockhash.clone();
|
||||
spawn(async move { poll_loop(blockhash_c, client).await })
|
||||
};
|
||||
|
||||
blockhash
|
||||
}
|
|
@ -1,181 +0,0 @@
|
|||
mod blockhash_poller;
|
||||
mod mango_v4_perp_crank_sink;
|
||||
mod openbook_crank_sink;
|
||||
mod transaction_builder;
|
||||
mod transaction_sender;
|
||||
|
||||
use anchor_client::{
|
||||
solana_sdk::{commitment_config::CommitmentConfig, signature::Keypair},
|
||||
Cluster,
|
||||
};
|
||||
use anchor_lang::prelude::Pubkey;
|
||||
use bytemuck::bytes_of;
|
||||
use log::*;
|
||||
use mango_v4_client::{Client, MangoGroupContext, TransactionBuilderConfig};
|
||||
use solana_client::nonblocking::rpc_client::RpcClient;
|
||||
use std::iter::FromIterator;
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
convert::TryFrom,
|
||||
fs::File,
|
||||
io::Read,
|
||||
str::FromStr,
|
||||
sync::{atomic::AtomicBool, Arc},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use mango_feeds_lib::EntityFilter::FilterByAccountIds;
|
||||
use mango_feeds_lib::FilterConfig;
|
||||
use mango_feeds_lib::{grpc_plugin_source, metrics, websocket_source, MetricsConfig, SourceConfig};
|
||||
use serde::Deserialize;
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
pub struct Config {
|
||||
pub source: SourceConfig,
|
||||
pub metrics: MetricsConfig,
|
||||
pub bind_ws_addr: String,
|
||||
pub rpc_http_url: String,
|
||||
pub mango_group: String,
|
||||
pub keypair: Vec<u8>,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
solana_logger::setup_with_default("info");
|
||||
|
||||
let exit: Arc<AtomicBool> = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let args: Vec<String> = std::env::args().collect();
|
||||
if args.len() < 2 {
|
||||
error!("Please enter a config file path argument.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let config: Config = {
|
||||
let mut file = File::open(&args[1])?;
|
||||
let mut contents = String::new();
|
||||
file.read_to_string(&mut contents)?;
|
||||
toml::from_str(&contents).unwrap()
|
||||
};
|
||||
|
||||
let rpc_client = Arc::new(RpcClient::new(config.rpc_http_url.clone()));
|
||||
|
||||
let blockhash = blockhash_poller::init(rpc_client.clone()).await;
|
||||
|
||||
let metrics_tx = metrics::start(config.metrics, "crank".into());
|
||||
|
||||
let rpc_url = config.rpc_http_url;
|
||||
let ws_url = rpc_url.replace("https", "wss");
|
||||
let rpc_timeout = Duration::from_secs(10);
|
||||
let cluster = Cluster::Custom(rpc_url.clone(), ws_url.clone());
|
||||
let client = Client::new(
|
||||
cluster.clone(),
|
||||
CommitmentConfig::processed(),
|
||||
Arc::new(Keypair::new()),
|
||||
Some(rpc_timeout),
|
||||
TransactionBuilderConfig {
|
||||
prioritization_micro_lamports: None,
|
||||
},
|
||||
);
|
||||
let group_pk = Pubkey::from_str(&config.mango_group).unwrap();
|
||||
let group_context =
|
||||
Arc::new(MangoGroupContext::new_from_rpc(&client.rpc_async(), group_pk).await?);
|
||||
|
||||
let perp_queue_pks: Vec<_> = group_context
|
||||
.perp_markets
|
||||
.values()
|
||||
.map(|context| (context.address, context.market.event_queue))
|
||||
.collect();
|
||||
|
||||
// fetch all serum/openbook markets to find their event queues
|
||||
let serum_market_pks: Vec<_> = group_context
|
||||
.serum3_markets
|
||||
.values()
|
||||
.map(|context| context.market.serum_market_external)
|
||||
.collect();
|
||||
|
||||
let serum_market_ais = client
|
||||
.rpc_async()
|
||||
.get_multiple_accounts(serum_market_pks.as_slice())
|
||||
.await?;
|
||||
|
||||
let serum_market_ais: Vec<_> = serum_market_ais
|
||||
.iter()
|
||||
.filter_map(|maybe_ai| match maybe_ai {
|
||||
Some(ai) => Some(ai),
|
||||
None => None,
|
||||
})
|
||||
.collect();
|
||||
|
||||
let serum_queue_pks: Vec<_> = serum_market_ais
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|pair| {
|
||||
let market_state: serum_dex::state::MarketState = *bytemuck::from_bytes(
|
||||
&pair.1.data[5..5 + std::mem::size_of::<serum_dex::state::MarketState>()],
|
||||
);
|
||||
let event_q = market_state.event_q;
|
||||
(
|
||||
serum_market_pks[pair.0],
|
||||
Pubkey::try_from(bytes_of(&event_q)).unwrap(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let (account_write_queue_sender, slot_queue_sender, instruction_receiver) =
|
||||
transaction_builder::init(
|
||||
perp_queue_pks.clone(),
|
||||
serum_queue_pks.clone(),
|
||||
group_pk,
|
||||
metrics_tx.clone(),
|
||||
)
|
||||
.expect("init transaction builder");
|
||||
|
||||
transaction_sender::init(
|
||||
instruction_receiver,
|
||||
blockhash,
|
||||
rpc_client,
|
||||
Keypair::from_bytes(&config.keypair).expect("valid keyair in config"),
|
||||
);
|
||||
|
||||
info!(
|
||||
"connect: {}",
|
||||
config
|
||||
.source
|
||||
.grpc_sources
|
||||
.iter()
|
||||
.map(|c| c.connection_string.clone())
|
||||
.collect::<String>()
|
||||
);
|
||||
let use_geyser = true;
|
||||
let all_queue_pks: HashSet<Pubkey> = perp_queue_pks
|
||||
.iter()
|
||||
.chain(serum_queue_pks.iter())
|
||||
.map(|mkt| mkt.1)
|
||||
.collect();
|
||||
|
||||
let filter_config = FilterConfig {
|
||||
entity_filter: FilterByAccountIds(Vec::from_iter(all_queue_pks)),
|
||||
};
|
||||
if use_geyser {
|
||||
grpc_plugin_source::process_events(
|
||||
&config.source,
|
||||
&filter_config,
|
||||
account_write_queue_sender,
|
||||
slot_queue_sender,
|
||||
metrics_tx.clone(),
|
||||
exit.clone(),
|
||||
)
|
||||
.await;
|
||||
} else {
|
||||
websocket_source::process_events(
|
||||
&config.source,
|
||||
&filter_config,
|
||||
account_write_queue_sender,
|
||||
slot_queue_sender,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -1,112 +0,0 @@
|
|||
use std::{
|
||||
borrow::BorrowMut,
|
||||
collections::{BTreeMap, BTreeSet},
|
||||
convert::TryFrom,
|
||||
};
|
||||
|
||||
use async_channel::Sender;
|
||||
use async_trait::async_trait;
|
||||
use log::*;
|
||||
use mango_feeds_lib::{account_write_filter::AccountWriteSink, chain_data::AccountData};
|
||||
use solana_sdk::{
|
||||
account::ReadableAccount,
|
||||
instruction::{AccountMeta, Instruction},
|
||||
pubkey::Pubkey,
|
||||
};
|
||||
|
||||
use bytemuck::cast_ref;
|
||||
|
||||
use anchor_lang::AccountDeserialize;
|
||||
|
||||
const MAX_BACKLOG: usize = 2;
|
||||
|
||||
pub struct MangoV4PerpCrankSink {
|
||||
pks: BTreeMap<Pubkey, Pubkey>,
|
||||
group_pk: Pubkey,
|
||||
instruction_sender: Sender<Vec<Instruction>>,
|
||||
}
|
||||
|
||||
impl MangoV4PerpCrankSink {
|
||||
pub fn new(
|
||||
pks: Vec<(Pubkey, Pubkey)>,
|
||||
group_pk: Pubkey,
|
||||
instruction_sender: Sender<Vec<Instruction>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
pks: pks.iter().copied().collect(),
|
||||
group_pk,
|
||||
instruction_sender,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl AccountWriteSink for MangoV4PerpCrankSink {
|
||||
async fn process(&self, pk: &Pubkey, account: &AccountData) -> Result<(), String> {
|
||||
let account = &account.account;
|
||||
let event_queue: mango_v4::state::EventQueue =
|
||||
mango_v4::state::EventQueue::try_deserialize(account.data().borrow_mut()).unwrap();
|
||||
|
||||
// only crank if at least 1 fill or a sufficient events of other categories are buffered
|
||||
let contains_fill_events = event_queue
|
||||
.iter()
|
||||
.any(|e| e.event_type == mango_v4::state::EventType::Fill as u8);
|
||||
let has_backlog = event_queue.iter().count() > MAX_BACKLOG;
|
||||
if !contains_fill_events && !has_backlog {
|
||||
return Err("throttled".into());
|
||||
}
|
||||
|
||||
let mango_accounts: BTreeSet<_> = event_queue
|
||||
.iter()
|
||||
.take(10)
|
||||
.flat_map(|e| {
|
||||
match mango_v4::state::EventType::try_from(e.event_type).expect("mango v4 event") {
|
||||
mango_v4::state::EventType::Fill => {
|
||||
let fill: &mango_v4::state::FillEvent = cast_ref(e);
|
||||
vec![fill.maker, fill.taker]
|
||||
}
|
||||
mango_v4::state::EventType::Out => {
|
||||
let out: &mango_v4::state::OutEvent = cast_ref(e);
|
||||
vec![out.owner]
|
||||
}
|
||||
mango_v4::state::EventType::Liquidate => vec![],
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mkt_pk = self
|
||||
.pks
|
||||
.get(pk)
|
||||
.unwrap_or_else(|| panic!("{:?} is a known public key", pk));
|
||||
let mut ams: Vec<_> = anchor_lang::ToAccountMetas::to_account_metas(
|
||||
&mango_v4::accounts::PerpConsumeEvents {
|
||||
group: self.group_pk,
|
||||
perp_market: *mkt_pk,
|
||||
event_queue: *pk,
|
||||
},
|
||||
None,
|
||||
);
|
||||
ams.append(
|
||||
&mut mango_accounts
|
||||
.iter()
|
||||
.map(|pk| AccountMeta::new(*pk, false))
|
||||
.collect(),
|
||||
);
|
||||
|
||||
let ix = Instruction {
|
||||
program_id: mango_v4::id(),
|
||||
accounts: ams,
|
||||
data: anchor_lang::InstructionData::data(&mango_v4::instruction::PerpConsumeEvents {
|
||||
limit: 10,
|
||||
}),
|
||||
};
|
||||
|
||||
info!("evq={pk:?} count={} limit=10", event_queue.iter().count());
|
||||
|
||||
if let Err(e) = self.instruction_sender.send(vec![ix]).await {
|
||||
return Err(e.to_string());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
|
@ -1,109 +0,0 @@
|
|||
use std::{
|
||||
collections::{BTreeMap, BTreeSet},
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
use async_channel::Sender;
|
||||
use async_trait::async_trait;
|
||||
use log::*;
|
||||
use mango_feeds_lib::{
|
||||
account_write_filter::AccountWriteSink, chain_data::AccountData, serum::SerumEventQueueHeader,
|
||||
};
|
||||
use serum_dex::{instruction::MarketInstruction, state::EventView};
|
||||
use solana_sdk::{
|
||||
account::ReadableAccount,
|
||||
instruction::{AccountMeta, Instruction},
|
||||
pubkey::Pubkey,
|
||||
};
|
||||
|
||||
const MAX_BACKLOG: usize = 2;
|
||||
|
||||
pub struct OpenbookCrankSink {
|
||||
pks: BTreeMap<Pubkey, Pubkey>,
|
||||
instruction_sender: Sender<Vec<Instruction>>,
|
||||
}
|
||||
|
||||
impl OpenbookCrankSink {
|
||||
pub fn new(pks: Vec<(Pubkey, Pubkey)>, instruction_sender: Sender<Vec<Instruction>>) -> Self {
|
||||
Self {
|
||||
pks: pks.iter().copied().collect(),
|
||||
instruction_sender,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl AccountWriteSink for OpenbookCrankSink {
|
||||
async fn process(&self, pk: &Pubkey, account: &AccountData) -> Result<(), String> {
|
||||
let account = &account.account;
|
||||
|
||||
let inner_data = &account.data()[5..&account.data().len() - 7];
|
||||
let header_span = std::mem::size_of::<SerumEventQueueHeader>();
|
||||
let header: SerumEventQueueHeader = *bytemuck::from_bytes(&inner_data[..header_span]);
|
||||
let count = header.count;
|
||||
|
||||
let rest = &inner_data[header_span..];
|
||||
let event_size = std::mem::size_of::<serum_dex::state::Event>();
|
||||
let slop = rest.len() % event_size;
|
||||
let end = rest.len() - slop;
|
||||
let events = bytemuck::cast_slice::<u8, serum_dex::state::Event>(&rest[..end]);
|
||||
let seq_num = header.seq_num;
|
||||
|
||||
let events: Vec<_> = (0..count)
|
||||
.map(|i| {
|
||||
let offset = (seq_num - count + i) % events.len() as u64;
|
||||
let event: serum_dex::state::Event = events[offset as usize];
|
||||
event.as_view().unwrap()
|
||||
})
|
||||
.collect();
|
||||
|
||||
// only crank if at least 1 fill or a sufficient events of other categories are buffered
|
||||
let contains_fill_events = events
|
||||
.iter()
|
||||
.any(|e| matches!(e, serum_dex::state::EventView::Fill { .. }));
|
||||
|
||||
let has_backlog = events.len() > MAX_BACKLOG;
|
||||
if !contains_fill_events && !has_backlog {
|
||||
return Err("throttled".into());
|
||||
}
|
||||
|
||||
let oo_pks: BTreeSet<_> = events
|
||||
.iter()
|
||||
.map(|e| match e {
|
||||
EventView::Fill { owner, .. } | EventView::Out { owner, .. } => {
|
||||
bytemuck::cast_slice::<u64, Pubkey>(owner)[0]
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut ams: Vec<_> = oo_pks
|
||||
.iter()
|
||||
.map(|pk| AccountMeta::new(*pk, false))
|
||||
.collect();
|
||||
|
||||
// pass two times evq_pk instead of deprecated fee receivers to reduce encoded tx size
|
||||
let mkt_pk = self
|
||||
.pks
|
||||
.get(pk)
|
||||
.unwrap_or_else(|| panic!("{:?} is a known public key", pk));
|
||||
ams.append(
|
||||
&mut [mkt_pk, pk, /*coin_pk*/ pk, /*pc_pk*/ pk]
|
||||
.iter()
|
||||
.map(|pk| AccountMeta::new(**pk, false))
|
||||
.collect(),
|
||||
);
|
||||
|
||||
let ix = Instruction {
|
||||
program_id: Pubkey::from_str("srmqPvymJeFKQ4zGQed1GFppgkRHL9kaELCbyksJtPX").unwrap(),
|
||||
accounts: ams,
|
||||
data: MarketInstruction::ConsumeEvents(count as u16).pack(),
|
||||
};
|
||||
|
||||
info!("evq={pk:?} count={count}");
|
||||
if let Err(e) = self.instruction_sender.send(vec![ix]).await {
|
||||
return Err(e.to_string());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
|
@ -1,56 +0,0 @@
|
|||
use mango_feeds_lib::{
|
||||
account_write_filter::{self, AccountWriteRoute},
|
||||
metrics::Metrics,
|
||||
AccountWrite, SlotUpdate,
|
||||
};
|
||||
|
||||
use solana_sdk::{instruction::Instruction, pubkey::Pubkey};
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
use crate::{
|
||||
mango_v4_perp_crank_sink::MangoV4PerpCrankSink, openbook_crank_sink::OpenbookCrankSink,
|
||||
};
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub fn init(
|
||||
perp_queue_pks: Vec<(Pubkey, Pubkey)>,
|
||||
serum_queue_pks: Vec<(Pubkey, Pubkey)>,
|
||||
group_pk: Pubkey,
|
||||
metrics_sender: Metrics,
|
||||
) -> anyhow::Result<(
|
||||
async_channel::Sender<AccountWrite>,
|
||||
async_channel::Sender<SlotUpdate>,
|
||||
async_channel::Receiver<Vec<Instruction>>,
|
||||
)> {
|
||||
// Event queue updates can be consumed by client connections
|
||||
let (instruction_sender, instruction_receiver) = async_channel::unbounded::<Vec<Instruction>>();
|
||||
|
||||
let routes = vec![
|
||||
AccountWriteRoute {
|
||||
matched_pubkeys: serum_queue_pks.iter().map(|(_, evq_pk)| *evq_pk).collect(),
|
||||
sink: Arc::new(OpenbookCrankSink::new(
|
||||
serum_queue_pks,
|
||||
instruction_sender.clone(),
|
||||
)),
|
||||
timeout_interval: Duration::default(),
|
||||
},
|
||||
AccountWriteRoute {
|
||||
matched_pubkeys: perp_queue_pks.iter().map(|(_, evq_pk)| *evq_pk).collect(),
|
||||
sink: Arc::new(MangoV4PerpCrankSink::new(
|
||||
perp_queue_pks,
|
||||
group_pk,
|
||||
instruction_sender,
|
||||
)),
|
||||
timeout_interval: Duration::default(),
|
||||
},
|
||||
];
|
||||
|
||||
let (account_write_queue_sender, slot_queue_sender) =
|
||||
account_write_filter::init(routes, metrics_sender)?;
|
||||
|
||||
Ok((
|
||||
account_write_queue_sender,
|
||||
slot_queue_sender,
|
||||
instruction_receiver,
|
||||
))
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
use log::*;
|
||||
use solana_client::{nonblocking::rpc_client::RpcClient, rpc_config::RpcSendTransactionConfig};
|
||||
use solana_sdk::{
|
||||
hash::Hash, instruction::Instruction, signature::Keypair, signature::Signer,
|
||||
transaction::Transaction,
|
||||
};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use tokio::spawn;
|
||||
|
||||
pub async fn send_loop(
|
||||
ixs_rx: async_channel::Receiver<Vec<Instruction>>,
|
||||
blockhash: Arc<RwLock<Hash>>,
|
||||
client: Arc<RpcClient>,
|
||||
keypair: Keypair,
|
||||
) {
|
||||
info!("signing with keypair pk={:?}", keypair.pubkey());
|
||||
let cfg = RpcSendTransactionConfig {
|
||||
skip_preflight: true,
|
||||
..RpcSendTransactionConfig::default()
|
||||
};
|
||||
loop {
|
||||
if let Ok(ixs) = ixs_rx.recv().await {
|
||||
// TODO add priority fee
|
||||
let tx = Transaction::new_signed_with_payer(
|
||||
&ixs,
|
||||
Some(&keypair.pubkey()),
|
||||
&[&keypair],
|
||||
*blockhash.read().unwrap(),
|
||||
);
|
||||
// TODO: collect metrics
|
||||
info!(
|
||||
"send tx={:?} ok={:?}",
|
||||
tx.signatures[0],
|
||||
client.send_transaction_with_config(&tx, cfg).await
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn init(
|
||||
ixs_rx: async_channel::Receiver<Vec<Instruction>>,
|
||||
blockhash: Arc<RwLock<Hash>>,
|
||||
client: Arc<RpcClient>,
|
||||
keypair: Keypair,
|
||||
) {
|
||||
spawn(async move { send_loop(ixs_rx, blockhash, client, keypair).await });
|
||||
}
|
|
@ -1,46 +0,0 @@
|
|||
[package]
|
||||
name = "service-mango-fills"
|
||||
version = "0.1.0"
|
||||
authors = ["Christian Kamm <mail@ckamm.de>", "Maximilian Schneider <max@mango.markets>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0-or-later"
|
||||
|
||||
[dependencies]
|
||||
mango-feeds-lib = { path = "../lib" }
|
||||
solana-logger = "*"
|
||||
bs58 = "*"
|
||||
log = "*"
|
||||
anyhow = "*"
|
||||
toml = "*"
|
||||
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
futures = "0.3.17"
|
||||
futures-core = "0.3"
|
||||
futures-channel = "0.3"
|
||||
futures-util = "0.3"
|
||||
ws = "^0.9.2"
|
||||
async-channel = "1.6"
|
||||
async-trait = "0.1"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-tungstenite = "0.17"
|
||||
bytemuck = "1.7.2"
|
||||
jemallocator = "0.3.2"
|
||||
chrono = "0.4.23"
|
||||
solana-sdk = "~1.14.9"
|
||||
|
||||
tokio-postgres = { version = "0.7", features = ["with-chrono-0_4"] }
|
||||
tokio-postgres-rustls = "0.9.0"
|
||||
postgres-types = { version = "0.2", features = ["array-impls", "derive", "with-chrono-0_4"] }
|
||||
postgres-native-tls = "0.5"
|
||||
native-tls = "0.2"
|
||||
# postgres_query hasn't updated its crate in a while
|
||||
postgres_query = { git = "https://github.com/nolanderc/rust-postgres-query", rev = "b4422051c8a31fbba4a35f88004c1cefb1878dd5" }
|
||||
base64 = "0.21.0"
|
||||
|
||||
mango-v4 = { git = "https://github.com/blockworks-foundation/mango-v4", branch = "dev" }
|
||||
mango-v4-client = { git = "https://github.com/blockworks-foundation/mango-v4", branch = "dev" }
|
||||
serum_dex = { git = "https://github.com/jup-ag/openbook-program", branch = "feat/expose-things", features = ["no-entrypoint"] }
|
||||
anchor-lang = "0.25.0"
|
||||
anchor-client = "0.25.0"
|
|
@ -1,111 +0,0 @@
|
|||
# service-mango-fills
|
||||
|
||||
This module parses event queues and exposes individual fills on a websocket.
|
||||
|
||||
Public API: `https://api.mngo.cloud/fills/v1/`
|
||||
|
||||
## API Reference
|
||||
|
||||
Get a list of markets
|
||||
|
||||
```
|
||||
{
|
||||
"command": "getMarkets"
|
||||
}
|
||||
```
|
||||
|
||||
```
|
||||
{
|
||||
"ESdnpnNLgTkBCZRuTJkZLi5wKEZ2z47SG3PJrhundSQ2": "SOL-PERP",
|
||||
"HwhVGkfsSQ9JSQeQYu2CbkRCLvsh3qRZxG6m4oMVwZpN": "BTC-PERP",
|
||||
"Fgh9JSZ2qfSjCw9RPJ85W2xbihsp2muLvfRztzoVR7f1": "ETH-PERP",
|
||||
}
|
||||
```
|
||||
|
||||
Subscribe to markets
|
||||
|
||||
```
|
||||
{
|
||||
"command": "subscribe"
|
||||
"marketIds": ["MARKET_PUBKEY"]
|
||||
}
|
||||
```
|
||||
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"message": "subscribed to market MARKET_PUBKEY"
|
||||
}
|
||||
```
|
||||
|
||||
Subscribe to account
|
||||
|
||||
```
|
||||
{
|
||||
"command": "subscribe"
|
||||
"account": ["MANGO_ACCOUNT_PUBKEY"]
|
||||
}
|
||||
```
|
||||
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"message": "subscribed to account MANGO_ACCOUNT_PUBKEY"
|
||||
}
|
||||
```
|
||||
|
||||
Fill Event
|
||||
|
||||
```
|
||||
{
|
||||
"event": {
|
||||
"eventType": "perp",
|
||||
"maker": "MAKER_MANGO_ACCOUNT_PUBKEY",
|
||||
"taker": "TAKER_MANGO_ACCOUNT_PUBKEY",
|
||||
"takerSide": "bid",
|
||||
"timestamp": "2023-04-06T13:00:00+00:00",
|
||||
"seqNum": 132420,
|
||||
"makerClientOrderId": 1680786677648,
|
||||
"takerClientOrderId": 1680786688080,
|
||||
"makerFee": -0.0003,
|
||||
"takerFee": 0.0006,
|
||||
"price": 20.72,
|
||||
"quantity": 0.45
|
||||
},
|
||||
"marketKey": "ESdnpnNLgTkBCZRuTJkZLi5wKEZ2z47SG3PJrhundSQ2",
|
||||
"marketName": "SOL-PERP",
|
||||
"status": "new",
|
||||
"slot": 186869253,
|
||||
"writeVersion": 662992260539
|
||||
}
|
||||
```
|
||||
|
||||
If the fill ocurred on a fork, an event will be sent with the 'status' field set to 'revoke'.
|
||||
|
||||
## Setup
|
||||
|
||||
## Local
|
||||
|
||||
1. Prepare the connector configuration file.
|
||||
|
||||
[Here is an example](service-mango-fills/conf/example-config.toml).
|
||||
|
||||
- `bind_ws_addr` is the listen port for the websocket clients
|
||||
- `rpc_ws_url` is unused and can stay empty.
|
||||
- `connection_string` for your `grpc_sources` must point to the gRPC server
|
||||
address configured for the plugin.
|
||||
- `rpc_http_url` must point to the JSON-RPC URL.
|
||||
- `program_id` must match what is configured for the gRPC plugin
|
||||
|
||||
2. Start the service binary.
|
||||
|
||||
Pass the path to the config file as the first argument. It logs to stdout. It
|
||||
should be restarted on exit.
|
||||
|
||||
3. Monitor the logs
|
||||
|
||||
`WARN` messages can be recovered from. `ERROR` messages need attention. The
|
||||
logs are very spammy changing the default log level is recommended when you
|
||||
dont want to analyze performance of the service.
|
||||
|
||||
## fly.io
|
|
@ -1,35 +0,0 @@
|
|||
bind_ws_addr = "0.0.0.0:8080"
|
||||
rpc_http_url = "http://mango.rpcpool.com/<token>"
|
||||
mango_group = "78b8f4cGCwmZ9ysPFMWLaLTkkaYnUjwMJYStWe5RTSSX"
|
||||
|
||||
[metrics]
|
||||
output_stdout = true
|
||||
output_http = true
|
||||
|
||||
# [postgres]
|
||||
# connection_string = "$PG_CONNECTION_STRING"
|
||||
# connection_count = 1
|
||||
# max_batch_size = 1
|
||||
# max_queue_size = 50000
|
||||
# retry_query_max_count = 10
|
||||
# retry_query_sleep_secs = 2
|
||||
# retry_connection_sleep_secs = 10
|
||||
# fatal_connection_timeout_secs = 30
|
||||
# allow_invalid_certs = true
|
||||
|
||||
# # [postgres.tls]
|
||||
# # ca_cert_path = "$PG_CA_CERT"
|
||||
# # client_key_path = "$PG_CLIENT_KEY"
|
||||
|
||||
[source]
|
||||
dedup_queue_size = 50000
|
||||
rpc_ws_url = "wss://mango.rpcpool.com/<token>"
|
||||
|
||||
[[source.grpc_sources]]
|
||||
name = "accountsdb-client"
|
||||
connection_string = "http://tyo64.rpcpool.com/"
|
||||
retry_connection_sleep_secs = 30
|
||||
|
||||
[source.snapshot]
|
||||
rpc_http_url = "http://mango.rpcpool.com/<token>"
|
||||
program_id = "4MangoMjqJ2firMokCjjGgoK8d4MXcrgL7XJaL3w6fVg"
|
|
@ -1,35 +0,0 @@
|
|||
bind_ws_addr = "[::]:8080"
|
||||
rpc_http_url = "$RPC_HTTP_URL"
|
||||
mango_group = "78b8f4cGCwmZ9ysPFMWLaLTkkaYnUjwMJYStWe5RTSSX"
|
||||
|
||||
[metrics]
|
||||
output_stdout = true
|
||||
output_http = true
|
||||
|
||||
[postgres]
|
||||
connection_string = "$PG_CONNECTION_STRING"
|
||||
connection_count = 1
|
||||
max_batch_size = 1
|
||||
max_queue_size = 50000
|
||||
retry_query_max_count = 10
|
||||
retry_query_sleep_secs = 2
|
||||
retry_connection_sleep_secs = 10
|
||||
fatal_connection_timeout_secs = 30
|
||||
allow_invalid_certs = true
|
||||
|
||||
[postgres.tls]
|
||||
ca_cert_path = "$PG_CA_CERT"
|
||||
client_key_path = "$PG_CLIENT_KEY"
|
||||
|
||||
[source]
|
||||
dedup_queue_size = 50000
|
||||
rpc_ws_url = "$RPC_WS_URL"
|
||||
|
||||
[[source.grpc_sources]]
|
||||
name = "accountsdb-client"
|
||||
connection_string = "$GEYSER_CONNECTION_STRING"
|
||||
retry_connection_sleep_secs = 30
|
||||
|
||||
[source.snapshot]
|
||||
rpc_http_url = "$RPC_HTTP_URL"
|
||||
program_id = "srmqPvymJeFKQ4zGQed1GFppgkRHL9kaELCbyksJtPX"
|
|
@ -1,624 +0,0 @@
|
|||
use log::*;
|
||||
use mango_feeds_lib::{
|
||||
chain_data::{AccountData, ChainData, ChainDataMetrics, SlotData},
|
||||
metrics::{MetricType, Metrics},
|
||||
serum::SerumEventQueueHeader,
|
||||
AccountWrite, MarketConfig, SlotUpdate,
|
||||
};
|
||||
use solana_sdk::{
|
||||
account::{ReadableAccount, WritableAccount},
|
||||
clock::Epoch,
|
||||
pubkey::Pubkey,
|
||||
};
|
||||
use std::{
|
||||
borrow::BorrowMut,
|
||||
cmp::max,
|
||||
collections::{HashMap, HashSet},
|
||||
iter::FromIterator,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::metrics::MetricU64;
|
||||
use anchor_lang::AccountDeserialize;
|
||||
use mango_v4::state::{
|
||||
AnyEvent, EventQueue, EventQueueHeader, EventType, FillEvent as PerpFillEvent,
|
||||
OutEvent as PerpOutEvent, QueueHeader, MAX_NUM_EVENTS,
|
||||
};
|
||||
use service_mango_fills::*;
|
||||
|
||||
// couldn't compile the correct struct size / math on m1, fixed sizes resolve this issue
|
||||
type EventQueueEvents = [AnyEvent; MAX_NUM_EVENTS as usize];
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn publish_changes_perp(
|
||||
slot: u64,
|
||||
write_version: u64,
|
||||
mkt: &(Pubkey, MarketConfig),
|
||||
header: &EventQueueHeader,
|
||||
events: &EventQueueEvents,
|
||||
prev_seq_num: u64,
|
||||
prev_head: usize,
|
||||
prev_events: &EventQueueEvents,
|
||||
fill_update_sender: &async_channel::Sender<FillEventFilterMessage>,
|
||||
metric_events_new: &mut MetricU64,
|
||||
metric_events_change: &mut MetricU64,
|
||||
metric_events_drop: &mut MetricU64,
|
||||
metric_head_update: &mut MetricU64,
|
||||
) {
|
||||
// seq_num = N means that events (N-QUEUE_LEN) until N-1 are available
|
||||
let start_seq_num = max(prev_seq_num, header.seq_num).saturating_sub(MAX_NUM_EVENTS as u64);
|
||||
let mut checkpoint = Vec::new();
|
||||
let mkt_pk_string = mkt.0.to_string();
|
||||
let evq_pk_string = mkt.1.event_queue.to_string();
|
||||
for seq_num in start_seq_num..header.seq_num {
|
||||
let idx = (seq_num % MAX_NUM_EVENTS as u64) as usize;
|
||||
|
||||
// there are three possible cases:
|
||||
// 1) the event is past the old seq num, hence guaranteed new event
|
||||
// 2) the event is not matching the old event queue
|
||||
// 3) all other events are matching the old event queue
|
||||
// the order of these checks is important so they are exhaustive
|
||||
if seq_num >= prev_seq_num {
|
||||
debug!(
|
||||
"found new event {} idx {} type {} slot {} write_version {}",
|
||||
mkt_pk_string, idx, events[idx].event_type as u32, slot, write_version
|
||||
);
|
||||
|
||||
metric_events_new.increment();
|
||||
|
||||
// new fills are published and recorded in checkpoint
|
||||
if events[idx].event_type == EventType::Fill as u8 {
|
||||
let fill: PerpFillEvent = bytemuck::cast(events[idx]);
|
||||
let fill = FillEvent::new_from_perp(fill, &mkt.1);
|
||||
|
||||
fill_update_sender
|
||||
.try_send(FillEventFilterMessage::Update(FillUpdate {
|
||||
slot,
|
||||
write_version,
|
||||
event: fill.clone(),
|
||||
status: FillUpdateStatus::New,
|
||||
market_key: mkt_pk_string.clone(),
|
||||
market_name: mkt.1.name.clone(),
|
||||
}))
|
||||
.unwrap(); // TODO: use anyhow to bubble up error
|
||||
checkpoint.push(fill);
|
||||
}
|
||||
} else if prev_events[idx].event_type != events[idx].event_type
|
||||
|| prev_events[idx].padding != events[idx].padding
|
||||
{
|
||||
debug!(
|
||||
"found changed event {} idx {} seq_num {} header seq num {} old seq num {}",
|
||||
mkt_pk_string, idx, seq_num, header.seq_num, prev_seq_num
|
||||
);
|
||||
|
||||
metric_events_change.increment();
|
||||
|
||||
// first revoke old event if a fill
|
||||
if prev_events[idx].event_type == EventType::Fill as u8 {
|
||||
let fill: PerpFillEvent = bytemuck::cast(prev_events[idx]);
|
||||
let fill = FillEvent::new_from_perp(fill, &mkt.1);
|
||||
fill_update_sender
|
||||
.try_send(FillEventFilterMessage::Update(FillUpdate {
|
||||
slot,
|
||||
write_version,
|
||||
event: fill,
|
||||
status: FillUpdateStatus::Revoke,
|
||||
market_key: mkt_pk_string.clone(),
|
||||
market_name: mkt.1.name.clone(),
|
||||
}))
|
||||
.unwrap(); // TODO: use anyhow to bubble up error
|
||||
}
|
||||
|
||||
// then publish new if its a fill and record in checkpoint
|
||||
if events[idx].event_type == EventType::Fill as u8 {
|
||||
let fill: PerpFillEvent = bytemuck::cast(events[idx]);
|
||||
let fill = FillEvent::new_from_perp(fill, &mkt.1);
|
||||
fill_update_sender
|
||||
.try_send(FillEventFilterMessage::Update(FillUpdate {
|
||||
slot,
|
||||
write_version,
|
||||
event: fill.clone(),
|
||||
status: FillUpdateStatus::New,
|
||||
market_key: mkt_pk_string.clone(),
|
||||
market_name: mkt.1.name.clone(),
|
||||
}))
|
||||
.unwrap(); // TODO: use anyhow to bubble up error
|
||||
checkpoint.push(fill);
|
||||
}
|
||||
} else {
|
||||
// every already published event is recorded in checkpoint if a fill
|
||||
if events[idx].event_type == EventType::Fill as u8 {
|
||||
let fill: PerpFillEvent = bytemuck::cast(events[idx]);
|
||||
let fill = FillEvent::new_from_perp(fill, &mkt.1);
|
||||
checkpoint.push(fill);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// in case queue size shrunk due to a fork we need revoke all previous fills
|
||||
for seq_num in header.seq_num..prev_seq_num {
|
||||
let idx = (seq_num % MAX_NUM_EVENTS as u64) as usize;
|
||||
debug!(
|
||||
"found dropped event {} idx {} seq_num {} header seq num {} old seq num {} slot {} write_version {}",
|
||||
mkt_pk_string, idx, seq_num, header.seq_num, prev_seq_num, slot, write_version
|
||||
);
|
||||
|
||||
metric_events_drop.increment();
|
||||
|
||||
if prev_events[idx].event_type == EventType::Fill as u8 {
|
||||
let fill: PerpFillEvent = bytemuck::cast(prev_events[idx]);
|
||||
let fill = FillEvent::new_from_perp(fill, &mkt.1);
|
||||
fill_update_sender
|
||||
.try_send(FillEventFilterMessage::Update(FillUpdate {
|
||||
slot,
|
||||
event: fill,
|
||||
write_version,
|
||||
status: FillUpdateStatus::Revoke,
|
||||
market_key: mkt_pk_string.clone(),
|
||||
market_name: mkt.1.name.clone(),
|
||||
}))
|
||||
.unwrap(); // TODO: use anyhow to bubble up error
|
||||
}
|
||||
}
|
||||
|
||||
let head = header.head();
|
||||
|
||||
let head_seq_num = if events[head - 1].event_type == EventType::Fill as u8 {
|
||||
let event: PerpFillEvent = bytemuck::cast(events[head - 1]);
|
||||
event.seq_num + 1
|
||||
} else if events[head - 1].event_type == EventType::Out as u8 {
|
||||
let event: PerpOutEvent = bytemuck::cast(events[head - 1]);
|
||||
event.seq_num + 1
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
let prev_head_seq_num = if prev_events[prev_head - 1].event_type == EventType::Fill as u8 {
|
||||
let event: PerpFillEvent = bytemuck::cast(prev_events[prev_head - 1]);
|
||||
event.seq_num + 1
|
||||
} else if prev_events[prev_head - 1].event_type == EventType::Out as u8 {
|
||||
let event: PerpOutEvent = bytemuck::cast(prev_events[prev_head - 1]);
|
||||
event.seq_num + 1
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
// publish a head update event if the head changed (events were consumed)
|
||||
if head != prev_head {
|
||||
metric_head_update.increment();
|
||||
|
||||
fill_update_sender
|
||||
.try_send(FillEventFilterMessage::HeadUpdate(HeadUpdate {
|
||||
head,
|
||||
prev_head,
|
||||
head_seq_num,
|
||||
prev_head_seq_num,
|
||||
status: FillUpdateStatus::New,
|
||||
market_key: mkt_pk_string.clone(),
|
||||
market_name: mkt.1.name.clone(),
|
||||
slot,
|
||||
write_version,
|
||||
}))
|
||||
.unwrap(); // TODO: use anyhow to bubble up error
|
||||
}
|
||||
|
||||
fill_update_sender
|
||||
.try_send(FillEventFilterMessage::Checkpoint(FillCheckpoint {
|
||||
slot,
|
||||
write_version,
|
||||
events: checkpoint,
|
||||
market: mkt_pk_string,
|
||||
queue: evq_pk_string,
|
||||
}))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn publish_changes_serum(
|
||||
_slot: u64,
|
||||
_write_version: u64,
|
||||
_mkt: &(Pubkey, MarketConfig),
|
||||
_header: &SerumEventQueueHeader,
|
||||
_events: &[serum_dex::state::Event],
|
||||
_prev_seq_num: u64,
|
||||
_prev_events: &[serum_dex::state::Event],
|
||||
_fill_update_sender: &async_channel::Sender<FillEventFilterMessage>,
|
||||
_metric_events_new: &mut MetricU64,
|
||||
_metric_events_change: &mut MetricU64,
|
||||
_metric_events_drop: &mut MetricU64,
|
||||
) {
|
||||
// // seq_num = N means that events (N-QUEUE_LEN) until N-1 are available
|
||||
// let start_seq_num = max(prev_seq_num, header.seq_num)
|
||||
// .checked_sub(MAX_NUM_EVENTS as u64)
|
||||
// .unwrap_or(0);
|
||||
// let mut checkpoint = Vec::new();
|
||||
// let mkt_pk_string = mkt.0.to_string();
|
||||
// let evq_pk_string = mkt.1.event_queue.to_string();
|
||||
// let header_seq_num = header.seq_num;
|
||||
// debug!("start seq {} header seq {}", start_seq_num, header_seq_num);
|
||||
|
||||
// // Timestamp for spot events is time scraped
|
||||
// let timestamp = SystemTime::now()
|
||||
// .duration_since(SystemTime::UNIX_EPOCH)
|
||||
// .unwrap()
|
||||
// .as_secs();
|
||||
// for seq_num in start_seq_num..header_seq_num {
|
||||
// let idx = (seq_num % MAX_NUM_EVENTS as u64) as usize;
|
||||
// let event_view = events[idx].as_view().unwrap();
|
||||
// let old_event_view = prev_events[idx].as_view().unwrap();
|
||||
|
||||
// match event_view {
|
||||
// SpotEvent::Fill { .. } => {
|
||||
// // there are three possible cases:
|
||||
// // 1) the event is past the old seq num, hence guaranteed new event
|
||||
// // 2) the event is not matching the old event queue
|
||||
// // 3) all other events are matching the old event queue
|
||||
// // the order of these checks is important so they are exhaustive
|
||||
// let fill = FillEvent::new_from_spot(event_view, timestamp, seq_num, &mkt.1);
|
||||
// if seq_num >= prev_seq_num {
|
||||
// debug!("found new serum fill {} idx {}", mkt_pk_string, idx,);
|
||||
|
||||
// metric_events_new.increment();
|
||||
// fill_update_sender
|
||||
// .try_send(FillEventFilterMessage::Update(FillUpdate {
|
||||
// slot,
|
||||
// write_version,
|
||||
// event: fill.clone(),
|
||||
// status: FillUpdateStatus::New,
|
||||
// market_key: mkt_pk_string.clone(),
|
||||
// market_name: mkt.1.name.clone(),
|
||||
// }))
|
||||
// .unwrap(); // TODO: use anyhow to bubble up error
|
||||
// checkpoint.push(fill);
|
||||
// continue;
|
||||
// }
|
||||
|
||||
// match old_event_view {
|
||||
// SpotEvent::Fill {
|
||||
// client_order_id, ..
|
||||
// } => {
|
||||
// let client_order_id = match client_order_id {
|
||||
// Some(id) => id.into(),
|
||||
// None => 0u64,
|
||||
// };
|
||||
// if client_order_id != fill.client_order_id {
|
||||
// debug!(
|
||||
// "found changed id event {} idx {} seq_num {} header seq num {} old seq num {}",
|
||||
// mkt_pk_string, idx, seq_num, header_seq_num, prev_seq_num
|
||||
// );
|
||||
|
||||
// metric_events_change.increment();
|
||||
|
||||
// let old_fill = FillEvent::new_from_spot(
|
||||
// old_event_view,
|
||||
// timestamp,
|
||||
// seq_num,
|
||||
// &mkt.1,
|
||||
// );
|
||||
// // first revoke old event
|
||||
// fill_update_sender
|
||||
// .try_send(FillEventFilterMessage::Update(FillUpdate {
|
||||
// slot,
|
||||
// write_version,
|
||||
// event: old_fill,
|
||||
// status: FillUpdateStatus::Revoke,
|
||||
// market_key: mkt_pk_string.clone(),
|
||||
// market_name: mkt.1.name.clone(),
|
||||
// }))
|
||||
// .unwrap(); // TODO: use anyhow to bubble up error
|
||||
|
||||
// // then publish new
|
||||
// fill_update_sender
|
||||
// .try_send(FillEventFilterMessage::Update(FillUpdate {
|
||||
// slot,
|
||||
// write_version,
|
||||
// event: fill.clone(),
|
||||
// status: FillUpdateStatus::New,
|
||||
// market_key: mkt_pk_string.clone(),
|
||||
// market_name: mkt.1.name.clone(),
|
||||
// }))
|
||||
// .unwrap(); // TODO: use anyhow to bubble up error
|
||||
// }
|
||||
|
||||
// // record new event in checkpoint
|
||||
// checkpoint.push(fill);
|
||||
// }
|
||||
// SpotEvent::Out { .. } => {
|
||||
// debug!(
|
||||
// "found changed type event {} idx {} seq_num {} header seq num {} old seq num {}",
|
||||
// mkt_pk_string, idx, seq_num, header_seq_num, prev_seq_num
|
||||
// );
|
||||
|
||||
// metric_events_change.increment();
|
||||
|
||||
// // publish new fill and record in checkpoint
|
||||
// fill_update_sender
|
||||
// .try_send(FillEventFilterMessage::Update(FillUpdate {
|
||||
// slot,
|
||||
// write_version,
|
||||
// event: fill.clone(),
|
||||
// status: FillUpdateStatus::New,
|
||||
// market_key: mkt_pk_string.clone(),
|
||||
// market_name: mkt.1.name.clone(),
|
||||
// }))
|
||||
// .unwrap(); // TODO: use anyhow to bubble up error
|
||||
// checkpoint.push(fill);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// _ => continue,
|
||||
// }
|
||||
// }
|
||||
|
||||
// // in case queue size shrunk due to a fork we need revoke all previous fills
|
||||
// for seq_num in header_seq_num..prev_seq_num {
|
||||
// let idx = (seq_num % MAX_NUM_EVENTS as u64) as usize;
|
||||
// let old_event_view = prev_events[idx].as_view().unwrap();
|
||||
// debug!(
|
||||
// "found dropped event {} idx {} seq_num {} header seq num {} old seq num {}",
|
||||
// mkt_pk_string, idx, seq_num, header_seq_num, prev_seq_num
|
||||
// );
|
||||
|
||||
// metric_events_drop.increment();
|
||||
|
||||
// match old_event_view {
|
||||
// SpotEvent::Fill { .. } => {
|
||||
// let old_fill = FillEvent::new_from_spot(old_event_view, timestamp, seq_num, &mkt.1);
|
||||
// fill_update_sender
|
||||
// .try_send(FillEventFilterMessage::Update(FillUpdate {
|
||||
// slot,
|
||||
// event: old_fill,
|
||||
// write_version,
|
||||
// status: FillUpdateStatus::Revoke,
|
||||
// market_key: mkt_pk_string.clone(),
|
||||
// market_name: mkt.1.name.clone(),
|
||||
// }))
|
||||
// .unwrap(); // TODO: use anyhow to bubble up error
|
||||
// }
|
||||
// SpotEvent::Out { .. } => continue,
|
||||
// }
|
||||
// }
|
||||
|
||||
// fill_update_sender
|
||||
// .try_send(FillEventFilterMessage::Checkpoint(FillCheckpoint {
|
||||
// slot,
|
||||
// write_version,
|
||||
// events: checkpoint,
|
||||
// market: mkt_pk_string,
|
||||
// queue: evq_pk_string,
|
||||
// }))
|
||||
// .unwrap()
|
||||
}
|
||||
|
||||
pub async fn init(
|
||||
perp_market_configs: Vec<(Pubkey, MarketConfig)>,
|
||||
spot_market_configs: Vec<(Pubkey, MarketConfig)>,
|
||||
metrics_sender: Metrics,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> anyhow::Result<(
|
||||
async_channel::Sender<AccountWrite>,
|
||||
async_channel::Sender<SlotUpdate>,
|
||||
async_channel::Receiver<FillEventFilterMessage>,
|
||||
)> {
|
||||
let metrics_sender = metrics_sender;
|
||||
|
||||
let mut metric_events_new =
|
||||
metrics_sender.register_u64("fills_feed_events_new".into(), MetricType::Counter);
|
||||
let mut metric_events_new_serum =
|
||||
metrics_sender.register_u64("fills_feed_events_new_serum".into(), MetricType::Counter);
|
||||
let mut metric_events_change =
|
||||
metrics_sender.register_u64("fills_feed_events_change".into(), MetricType::Counter);
|
||||
let mut metric_events_change_serum =
|
||||
metrics_sender.register_u64("fills_feed_events_change_serum".into(), MetricType::Counter);
|
||||
let mut metrics_events_drop =
|
||||
metrics_sender.register_u64("fills_feed_events_drop".into(), MetricType::Counter);
|
||||
let mut metrics_events_drop_serum =
|
||||
metrics_sender.register_u64("fills_feed_events_drop_serum".into(), MetricType::Counter);
|
||||
let mut metrics_head_update =
|
||||
metrics_sender.register_u64("fills_feed_head_update".into(), MetricType::Counter);
|
||||
|
||||
// The actual message may want to also contain a retry count, if it self-reinserts on failure?
|
||||
let (account_write_queue_sender, account_write_queue_receiver) =
|
||||
async_channel::unbounded::<AccountWrite>();
|
||||
|
||||
// Slot updates flowing from the outside into the single processing thread. From
|
||||
// there they'll flow into the postgres sending thread.
|
||||
let (slot_queue_sender, slot_queue_receiver) = async_channel::unbounded::<SlotUpdate>();
|
||||
|
||||
// Fill updates can be consumed by client connections, they contain all fills for all markets
|
||||
let (fill_update_sender, fill_update_receiver) =
|
||||
async_channel::unbounded::<FillEventFilterMessage>();
|
||||
|
||||
let account_write_queue_receiver_c = account_write_queue_receiver;
|
||||
|
||||
let mut chain_cache = ChainData::new();
|
||||
let mut chain_data_metrics = ChainDataMetrics::new(&metrics_sender);
|
||||
let mut perp_events_cache: HashMap<String, EventQueueEvents> = HashMap::new();
|
||||
let mut serum_events_cache: HashMap<String, Vec<serum_dex::state::Event>> = HashMap::new();
|
||||
let mut seq_num_cache = HashMap::new();
|
||||
let mut head_cache = HashMap::new();
|
||||
let mut last_evq_versions = HashMap::<String, (u64, u64)>::new();
|
||||
|
||||
let all_market_configs = [perp_market_configs.clone(), spot_market_configs.clone()].concat();
|
||||
let perp_queue_pks: Vec<Pubkey> = perp_market_configs
|
||||
.iter()
|
||||
.map(|x| x.1.event_queue)
|
||||
.collect();
|
||||
let spot_queue_pks: Vec<Pubkey> = spot_market_configs
|
||||
.iter()
|
||||
.map(|x| x.1.event_queue)
|
||||
.collect();
|
||||
let all_queue_pks: HashSet<Pubkey> =
|
||||
HashSet::from_iter([perp_queue_pks, spot_queue_pks].concat());
|
||||
|
||||
// update handling thread, reads both sloths and account updates
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
warn!("shutting down fill_event_filter...");
|
||||
break;
|
||||
}
|
||||
tokio::select! {
|
||||
Ok(account_write) = account_write_queue_receiver_c.recv() => {
|
||||
if !all_queue_pks.contains(&account_write.pubkey) {
|
||||
continue;
|
||||
}
|
||||
|
||||
chain_cache.update_account(
|
||||
account_write.pubkey,
|
||||
AccountData {
|
||||
slot: account_write.slot,
|
||||
write_version: account_write.write_version,
|
||||
account: WritableAccount::create(
|
||||
account_write.lamports,
|
||||
account_write.data.clone(),
|
||||
account_write.owner,
|
||||
account_write.executable,
|
||||
account_write.rent_epoch as Epoch,
|
||||
),
|
||||
},
|
||||
);
|
||||
}
|
||||
Ok(slot_update) = slot_queue_receiver.recv() => {
|
||||
chain_cache.update_slot(SlotData {
|
||||
slot: slot_update.slot,
|
||||
parent: slot_update.parent,
|
||||
status: slot_update.status,
|
||||
chain: 0,
|
||||
});
|
||||
|
||||
}
|
||||
Err(e) = slot_queue_receiver.recv() => {
|
||||
warn!("slot update channel err {:?}", e);
|
||||
}
|
||||
Err(e) = account_write_queue_receiver_c.recv() => {
|
||||
warn!("write update channel err {:?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
chain_data_metrics.report(&chain_cache);
|
||||
|
||||
for mkt in all_market_configs.iter() {
|
||||
let evq_pk = mkt.1.event_queue;
|
||||
let evq_pk_string = evq_pk.to_string();
|
||||
let last_evq_version = last_evq_versions
|
||||
.get(&mkt.1.event_queue.to_string())
|
||||
.unwrap_or(&(0, 0));
|
||||
|
||||
match chain_cache.account(&evq_pk) {
|
||||
Ok(account_info) => {
|
||||
// only process if the account state changed
|
||||
let evq_version = (account_info.slot, account_info.write_version);
|
||||
if evq_version == *last_evq_version {
|
||||
continue;
|
||||
}
|
||||
if evq_version.0 < last_evq_version.0 {
|
||||
debug!("evq version slot was old");
|
||||
continue;
|
||||
}
|
||||
if evq_version.0 == last_evq_version.0 && evq_version.1 < last_evq_version.1
|
||||
{
|
||||
info!("evq version slot was same and write version was old");
|
||||
continue;
|
||||
}
|
||||
last_evq_versions.insert(evq_pk_string.clone(), evq_version);
|
||||
|
||||
let account = &account_info.account;
|
||||
let is_perp = mango_v4::check_id(account.owner());
|
||||
if is_perp {
|
||||
let event_queue =
|
||||
EventQueue::try_deserialize(account.data().borrow_mut()).unwrap();
|
||||
|
||||
match (
|
||||
seq_num_cache.get(&evq_pk_string),
|
||||
head_cache.get(&evq_pk_string),
|
||||
) {
|
||||
(Some(prev_seq_num), Some(prev_head)) => match perp_events_cache
|
||||
.get(&evq_pk_string)
|
||||
{
|
||||
Some(prev_events) => publish_changes_perp(
|
||||
account_info.slot,
|
||||
account_info.write_version,
|
||||
mkt,
|
||||
&event_queue.header,
|
||||
&event_queue.buf,
|
||||
*prev_seq_num,
|
||||
*prev_head,
|
||||
prev_events,
|
||||
&fill_update_sender,
|
||||
&mut metric_events_new,
|
||||
&mut metric_events_change,
|
||||
&mut metrics_events_drop,
|
||||
&mut metrics_head_update,
|
||||
),
|
||||
_ => {
|
||||
info!("perp_events_cache could not find {}", evq_pk_string)
|
||||
}
|
||||
},
|
||||
_ => info!("seq_num/head cache could not find {}", evq_pk_string),
|
||||
}
|
||||
|
||||
seq_num_cache.insert(evq_pk_string.clone(), event_queue.header.seq_num);
|
||||
head_cache.insert(evq_pk_string.clone(), event_queue.header.head());
|
||||
perp_events_cache.insert(evq_pk_string.clone(), event_queue.buf);
|
||||
} else {
|
||||
let inner_data = &account.data()[5..&account.data().len() - 7];
|
||||
let header_span = std::mem::size_of::<SerumEventQueueHeader>();
|
||||
let header: SerumEventQueueHeader =
|
||||
*bytemuck::from_bytes(&inner_data[..header_span]);
|
||||
let seq_num = header.seq_num;
|
||||
let count = header.count;
|
||||
let rest = &inner_data[header_span..];
|
||||
let slop = rest.len() % std::mem::size_of::<serum_dex::state::Event>();
|
||||
let new_len = rest.len() - slop;
|
||||
let events = &rest[..new_len];
|
||||
debug!("evq {} header_span {} header_seq_num {} header_count {} inner_len {} events_len {} sizeof Event {}", evq_pk_string, header_span, seq_num, count, inner_data.len(), events.len(), std::mem::size_of::<serum_dex::state::Event>());
|
||||
let events: &[serum_dex::state::Event] = bytemuck::cast_slice(events);
|
||||
|
||||
match seq_num_cache.get(&evq_pk_string) {
|
||||
Some(prev_seq_num) => {
|
||||
match serum_events_cache.get(&evq_pk_string) {
|
||||
Some(prev_events) => publish_changes_serum(
|
||||
account_info.slot,
|
||||
account_info.write_version,
|
||||
mkt,
|
||||
&header,
|
||||
events,
|
||||
*prev_seq_num,
|
||||
prev_events,
|
||||
&fill_update_sender,
|
||||
&mut metric_events_new_serum,
|
||||
&mut metric_events_change_serum,
|
||||
&mut metrics_events_drop_serum,
|
||||
),
|
||||
_ => {
|
||||
debug!(
|
||||
"serum_events_cache could not find {}",
|
||||
evq_pk_string
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => debug!("seq_num_cache could not find {}", evq_pk_string),
|
||||
}
|
||||
|
||||
seq_num_cache.insert(evq_pk_string.clone(), seq_num);
|
||||
head_cache.insert(evq_pk_string.clone(), header.head as usize);
|
||||
serum_events_cache.insert(evq_pk_string.clone(), events.to_vec());
|
||||
}
|
||||
}
|
||||
Err(_) => debug!("chain_cache could not find {}", mkt.1.event_queue),
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok((
|
||||
account_write_queue_sender,
|
||||
slot_queue_sender,
|
||||
fill_update_receiver,
|
||||
))
|
||||
}
|
|
@ -1,275 +0,0 @@
|
|||
use chrono::{TimeZone, Utc};
|
||||
use log::*;
|
||||
use mango_feeds_lib::{
|
||||
metrics::{MetricType, MetricU64, Metrics},
|
||||
*,
|
||||
};
|
||||
use native_tls::{Certificate, Identity, TlsConnector};
|
||||
use postgres_native_tls::MakeTlsConnector;
|
||||
use postgres_query::Caching;
|
||||
use service_mango_fills::*;
|
||||
use std::{
|
||||
env, fs,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
},
|
||||
time::Duration,
|
||||
};
|
||||
use tokio_postgres::Client;
|
||||
|
||||
async fn postgres_connection(
|
||||
config: &PostgresConfig,
|
||||
metric_retries: MetricU64,
|
||||
metric_live: MetricU64,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> anyhow::Result<async_channel::Receiver<Option<tokio_postgres::Client>>> {
|
||||
let (tx, rx) = async_channel::unbounded();
|
||||
|
||||
// openssl pkcs12 -export -in client.cer -inkey client-key.cer -out client.pks
|
||||
// base64 -i ca.cer -o ca.cer.b64 && base64 -i client.pks -o client.pks.b64
|
||||
// fly secrets set PG_CA_CERT=- < ./ca.cer.b64 -a mango-fills
|
||||
// fly secrets set PG_CLIENT_KEY=- < ./client.pks.b64 -a mango-fills
|
||||
let tls = match &config.tls {
|
||||
Some(tls) => {
|
||||
use base64::{engine::general_purpose, Engine as _};
|
||||
let ca_cert = match &tls.ca_cert_path.chars().next().unwrap() {
|
||||
'$' => general_purpose::STANDARD
|
||||
.decode(
|
||||
env::var(&tls.ca_cert_path[1..])
|
||||
.expect("reading client cert from env")
|
||||
.into_bytes(),
|
||||
)
|
||||
.expect("decoding client cert"),
|
||||
_ => fs::read(&tls.ca_cert_path).expect("reading client cert from file"),
|
||||
};
|
||||
let client_key = match &tls.client_key_path.chars().next().unwrap() {
|
||||
'$' => general_purpose::STANDARD
|
||||
.decode(
|
||||
env::var(&tls.client_key_path[1..])
|
||||
.expect("reading client key from env")
|
||||
.into_bytes(),
|
||||
)
|
||||
.expect("decoding client key"),
|
||||
_ => fs::read(&tls.client_key_path).expect("reading client key from file"),
|
||||
};
|
||||
MakeTlsConnector::new(
|
||||
TlsConnector::builder()
|
||||
.add_root_certificate(Certificate::from_pem(&ca_cert)?)
|
||||
.identity(Identity::from_pkcs12(&client_key, "pass")?)
|
||||
.danger_accept_invalid_certs(config.allow_invalid_certs)
|
||||
.build()?,
|
||||
)
|
||||
}
|
||||
None => MakeTlsConnector::new(
|
||||
TlsConnector::builder()
|
||||
.danger_accept_invalid_certs(config.allow_invalid_certs)
|
||||
.build()?,
|
||||
),
|
||||
};
|
||||
|
||||
let config = config.clone();
|
||||
let connection_string = match &config.connection_string.chars().next().unwrap() {
|
||||
'$' => {
|
||||
env::var(&config.connection_string[1..]).expect("reading connection string from env")
|
||||
}
|
||||
_ => config.connection_string.clone(),
|
||||
};
|
||||
let mut initial = Some(tokio_postgres::connect(&connection_string, tls.clone()).await?);
|
||||
let mut metric_retries = metric_retries;
|
||||
let mut metric_live = metric_live;
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
// don't acquire a new connection if we're shutting down
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
warn!("shutting down fill_event_postgres_target...");
|
||||
break;
|
||||
}
|
||||
let (client, connection) = match initial.take() {
|
||||
Some(v) => v,
|
||||
None => {
|
||||
let result = tokio_postgres::connect(&connection_string, tls.clone()).await;
|
||||
match result {
|
||||
Ok(v) => v,
|
||||
Err(err) => {
|
||||
warn!("could not connect to postgres: {:?}", err);
|
||||
tokio::time::sleep(Duration::from_secs(
|
||||
config.retry_connection_sleep_secs,
|
||||
))
|
||||
.await;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
tx.send(Some(client)).await.expect("send success");
|
||||
metric_live.increment();
|
||||
|
||||
let result = connection.await;
|
||||
|
||||
metric_retries.increment();
|
||||
metric_live.decrement();
|
||||
|
||||
tx.send(None).await.expect("send success");
|
||||
warn!("postgres connection error: {:?}", result);
|
||||
tokio::time::sleep(Duration::from_secs(config.retry_connection_sleep_secs)).await;
|
||||
}
|
||||
});
|
||||
|
||||
Ok(rx)
|
||||
}
|
||||
|
||||
async fn update_postgres_client<'a>(
|
||||
client: &'a mut Option<postgres_query::Caching<tokio_postgres::Client>>,
|
||||
rx: &async_channel::Receiver<Option<tokio_postgres::Client>>,
|
||||
config: &PostgresConfig,
|
||||
) -> &'a postgres_query::Caching<tokio_postgres::Client> {
|
||||
// get the most recent client, waiting if there's a disconnect
|
||||
while !rx.is_empty() || client.is_none() {
|
||||
tokio::select! {
|
||||
client_raw_opt = rx.recv() => {
|
||||
*client = client_raw_opt.expect("not closed").map(postgres_query::Caching::new);
|
||||
},
|
||||
_ = tokio::time::sleep(Duration::from_secs(config.fatal_connection_timeout_secs)) => {
|
||||
error!("waited too long for new postgres client");
|
||||
std::process::exit(1);
|
||||
},
|
||||
}
|
||||
}
|
||||
client.as_ref().expect("must contain value")
|
||||
}
|
||||
|
||||
async fn process_update(client: &Caching<Client>, update: &FillUpdate) -> anyhow::Result<()> {
|
||||
let market = &update.market_key;
|
||||
let seq_num = update.event.seq_num as i64;
|
||||
let fill_timestamp = Utc.timestamp_opt(update.event.timestamp as i64, 0).unwrap();
|
||||
let price = update.event.price;
|
||||
let quantity = update.event.quantity;
|
||||
let slot = update.slot as i64;
|
||||
let write_version = update.write_version as i64;
|
||||
|
||||
if update.status == FillUpdateStatus::New {
|
||||
// insert new events
|
||||
let query = postgres_query::query!(
|
||||
"INSERT INTO transactions_v4.perp_fills_feed_events
|
||||
(market, seq_num, fill_timestamp, price,
|
||||
quantity, slot, write_version)
|
||||
VALUES
|
||||
($market, $seq_num, $fill_timestamp, $price,
|
||||
$quantity, $slot, $write_version)
|
||||
ON CONFLICT (market, seq_num) DO NOTHING",
|
||||
market,
|
||||
seq_num,
|
||||
fill_timestamp,
|
||||
price,
|
||||
quantity,
|
||||
slot,
|
||||
write_version,
|
||||
);
|
||||
let _ = query.execute(&client).await?;
|
||||
} else {
|
||||
// delete revoked events
|
||||
let query = postgres_query::query!(
|
||||
"DELETE FROM transactions_v4.perp_fills_feed_events
|
||||
WHERE market=$market
|
||||
AND seq_num=$seq_num",
|
||||
market,
|
||||
seq_num,
|
||||
);
|
||||
let _ = query.execute(&client).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn init(
|
||||
config: &PostgresConfig,
|
||||
metrics_sender: Metrics,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> anyhow::Result<async_channel::Sender<FillUpdate>> {
|
||||
// The actual message may want to also contain a retry count, if it self-reinserts on failure?
|
||||
let (fill_update_queue_sender, fill_update_queue_receiver) =
|
||||
async_channel::bounded::<FillUpdate>(config.max_queue_size);
|
||||
|
||||
let metric_con_retries = metrics_sender.register_u64(
|
||||
"fills_postgres_connection_retries".into(),
|
||||
MetricType::Counter,
|
||||
);
|
||||
let metric_con_live =
|
||||
metrics_sender.register_u64("fills_postgres_connections_alive".into(), MetricType::Gauge);
|
||||
|
||||
// postgres fill update sending worker threads
|
||||
for _ in 0..config.connection_count {
|
||||
let postgres_account_writes = postgres_connection(
|
||||
config,
|
||||
metric_con_retries.clone(),
|
||||
metric_con_live.clone(),
|
||||
exit.clone(),
|
||||
)
|
||||
.await?;
|
||||
let fill_update_queue_receiver_c = fill_update_queue_receiver.clone();
|
||||
let config = config.clone();
|
||||
let mut metric_retries =
|
||||
metrics_sender.register_u64("fills_postgres_retries".into(), MetricType::Counter);
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut client_opt = None;
|
||||
loop {
|
||||
// Retrieve up to batch_size updates
|
||||
let mut batch = Vec::new();
|
||||
batch.push(
|
||||
fill_update_queue_receiver_c
|
||||
.recv()
|
||||
.await
|
||||
.expect("sender must stay alive"),
|
||||
);
|
||||
while batch.len() < config.max_batch_size {
|
||||
match fill_update_queue_receiver_c.try_recv() {
|
||||
Ok(update) => batch.push(update),
|
||||
Err(async_channel::TryRecvError::Empty) => break,
|
||||
Err(async_channel::TryRecvError::Closed) => {
|
||||
panic!("sender must stay alive")
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
info!(
|
||||
"updates, batch {}, channel size {}",
|
||||
batch.len(),
|
||||
fill_update_queue_receiver_c.len(),
|
||||
);
|
||||
|
||||
let mut error_count = 0;
|
||||
loop {
|
||||
let client =
|
||||
update_postgres_client(&mut client_opt, &postgres_account_writes, &config)
|
||||
.await;
|
||||
let mut results = futures::future::join_all(
|
||||
batch.iter().map(|update| process_update(client, update)),
|
||||
)
|
||||
.await;
|
||||
let mut iter = results.iter();
|
||||
batch.retain(|_| iter.next().unwrap().is_err());
|
||||
if !batch.is_empty() {
|
||||
metric_retries.add(batch.len() as u64);
|
||||
error_count += 1;
|
||||
if error_count - 1 < config.retry_query_max_count {
|
||||
results.retain(|r| r.is_err());
|
||||
warn!("failed to process fill update, retrying: {:?}", results);
|
||||
tokio::time::sleep(Duration::from_secs(config.retry_query_sleep_secs))
|
||||
.await;
|
||||
continue;
|
||||
} else {
|
||||
error!("failed to process account write, exiting");
|
||||
std::process::exit(1);
|
||||
}
|
||||
};
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Ok(fill_update_queue_sender)
|
||||
}
|
|
@ -1,338 +0,0 @@
|
|||
use std::convert::{identity, TryFrom};
|
||||
|
||||
use anchor_lang::prelude::Pubkey;
|
||||
use bytemuck::cast_slice;
|
||||
use chrono::{TimeZone, Utc};
|
||||
use mango_feeds_lib::{base_lots_to_ui_perp, price_lots_to_ui_perp, MarketConfig, OrderbookSide};
|
||||
use mango_v4::state::{FillEvent as PerpFillEvent, Side};
|
||||
use serde::{ser::SerializeStruct, Deserialize, Serialize, Serializer};
|
||||
use serum_dex::state::EventView as SpotEvent;
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
pub enum FillUpdateStatus {
|
||||
New,
|
||||
Revoke,
|
||||
}
|
||||
|
||||
impl Serialize for FillUpdateStatus {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match *self {
|
||||
FillUpdateStatus::New => {
|
||||
serializer.serialize_unit_variant("FillUpdateStatus", 0, "new")
|
||||
}
|
||||
FillUpdateStatus::Revoke => {
|
||||
serializer.serialize_unit_variant("FillUpdateStatus", 1, "revoke")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
pub enum FillEventType {
|
||||
Spot,
|
||||
Perp,
|
||||
}
|
||||
|
||||
impl Serialize for FillEventType {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match *self {
|
||||
FillEventType::Spot => serializer.serialize_unit_variant("FillEventType", 0, "spot"),
|
||||
FillEventType::Perp => serializer.serialize_unit_variant("FillEventType", 1, "perp"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct FillEvent {
|
||||
pub event_type: FillEventType,
|
||||
pub maker: String,
|
||||
pub taker: String,
|
||||
pub taker_side: OrderbookSide,
|
||||
pub timestamp: u64, // make all strings
|
||||
pub seq_num: u64,
|
||||
pub maker_client_order_id: u64,
|
||||
pub taker_client_order_id: u64,
|
||||
pub maker_fee: f32,
|
||||
pub taker_fee: f32,
|
||||
pub price: f64,
|
||||
pub quantity: f64,
|
||||
}
|
||||
|
||||
impl Serialize for FillEvent {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let mut state = serializer.serialize_struct("FillEvent", 12)?;
|
||||
state.serialize_field("eventType", &self.event_type)?;
|
||||
state.serialize_field("maker", &self.maker)?;
|
||||
state.serialize_field("taker", &self.taker)?;
|
||||
state.serialize_field("takerSide", &self.taker_side)?;
|
||||
state.serialize_field(
|
||||
"timestamp",
|
||||
&Utc.timestamp_opt(self.timestamp as i64, 0)
|
||||
.unwrap()
|
||||
.to_rfc3339(),
|
||||
)?;
|
||||
state.serialize_field("seqNum", &self.seq_num)?;
|
||||
state.serialize_field("makerClientOrderId", &self.maker_client_order_id)?;
|
||||
state.serialize_field("takerClientOrderId", &self.taker_client_order_id)?; // make string
|
||||
state.serialize_field("makerFee", &self.maker_fee)?;
|
||||
state.serialize_field("takerFee", &self.taker_fee)?;
|
||||
state.serialize_field("price", &self.price)?;
|
||||
state.serialize_field("quantity", &self.quantity)?;
|
||||
state.end()
|
||||
}
|
||||
}
|
||||
|
||||
impl FillEvent {
|
||||
pub fn new_from_perp(event: PerpFillEvent, config: &MarketConfig) -> Self {
|
||||
let taker_side = match event.taker_side() {
|
||||
Side::Ask => OrderbookSide::Ask,
|
||||
Side::Bid => OrderbookSide::Bid,
|
||||
};
|
||||
let price = price_lots_to_ui_perp(
|
||||
event.price,
|
||||
config.base_decimals,
|
||||
config.quote_decimals,
|
||||
config.base_lot_size,
|
||||
config.quote_lot_size,
|
||||
);
|
||||
let quantity =
|
||||
base_lots_to_ui_perp(event.quantity, config.base_decimals, config.base_lot_size);
|
||||
FillEvent {
|
||||
event_type: FillEventType::Perp,
|
||||
maker: event.maker.to_string(),
|
||||
taker: event.taker.to_string(),
|
||||
taker_side,
|
||||
timestamp: event.timestamp,
|
||||
seq_num: event.seq_num,
|
||||
maker_client_order_id: event.maker_client_order_id,
|
||||
taker_client_order_id: event.taker_client_order_id,
|
||||
maker_fee: event.maker_fee,
|
||||
taker_fee: event.taker_fee,
|
||||
price,
|
||||
quantity,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_from_spot(
|
||||
maker_event: SpotEvent,
|
||||
taker_event: SpotEvent,
|
||||
timestamp: u64,
|
||||
seq_num: u64,
|
||||
config: &MarketConfig,
|
||||
) -> Self {
|
||||
match (maker_event, taker_event) {
|
||||
(
|
||||
SpotEvent::Fill {
|
||||
side: maker_side,
|
||||
client_order_id: maker_client_order_id,
|
||||
native_qty_paid: maker_native_qty_paid,
|
||||
native_fee_or_rebate: maker_native_fee_or_rebate,
|
||||
native_qty_received: maker_native_qty_received,
|
||||
owner: maker_owner,
|
||||
..
|
||||
},
|
||||
SpotEvent::Fill {
|
||||
side: taker_side,
|
||||
client_order_id: taker_client_order_id,
|
||||
native_fee_or_rebate: taker_native_fee_or_rebate,
|
||||
owner: taker_owner,
|
||||
..
|
||||
},
|
||||
) => {
|
||||
let maker_side = match maker_side as u8 {
|
||||
0 => OrderbookSide::Bid,
|
||||
1 => OrderbookSide::Ask,
|
||||
_ => panic!("invalid side"),
|
||||
};
|
||||
let taker_side = match taker_side as u8 {
|
||||
0 => OrderbookSide::Bid,
|
||||
1 => OrderbookSide::Ask,
|
||||
_ => panic!("invalid side"),
|
||||
};
|
||||
let maker_client_order_id: u64 = match maker_client_order_id {
|
||||
Some(id) => id.into(),
|
||||
None => 0u64,
|
||||
};
|
||||
let taker_client_order_id: u64 = match taker_client_order_id {
|
||||
Some(id) => id.into(),
|
||||
None => 0u64,
|
||||
};
|
||||
|
||||
let base_multiplier = 10u64.pow(config.base_decimals.into());
|
||||
let quote_multiplier = 10u64.pow(config.quote_decimals.into());
|
||||
|
||||
let (price, quantity) = match maker_side {
|
||||
OrderbookSide::Bid => {
|
||||
let price_before_fees = maker_native_qty_paid + maker_native_fee_or_rebate;
|
||||
|
||||
let top = price_before_fees * base_multiplier;
|
||||
let bottom = quote_multiplier * maker_native_qty_received;
|
||||
let price = top as f64 / bottom as f64;
|
||||
let quantity = maker_native_qty_received as f64 / base_multiplier as f64;
|
||||
(price, quantity)
|
||||
}
|
||||
OrderbookSide::Ask => {
|
||||
let price_before_fees =
|
||||
maker_native_qty_received - maker_native_fee_or_rebate;
|
||||
|
||||
let top = price_before_fees * base_multiplier;
|
||||
let bottom = quote_multiplier * maker_native_qty_paid;
|
||||
let price = top as f64 / bottom as f64;
|
||||
let quantity = maker_native_qty_paid as f64 / base_multiplier as f64;
|
||||
(price, quantity)
|
||||
}
|
||||
};
|
||||
|
||||
let maker_fee = maker_native_fee_or_rebate as f32 / quote_multiplier as f32;
|
||||
let taker_fee = taker_native_fee_or_rebate as f32 / quote_multiplier as f32;
|
||||
|
||||
FillEvent {
|
||||
event_type: FillEventType::Spot,
|
||||
maker: Pubkey::try_from(cast_slice(&identity(maker_owner) as &[_]))
|
||||
.unwrap()
|
||||
.to_string(),
|
||||
taker: Pubkey::try_from(cast_slice(&identity(taker_owner) as &[_]))
|
||||
.unwrap()
|
||||
.to_string(),
|
||||
taker_side,
|
||||
timestamp,
|
||||
seq_num,
|
||||
maker_client_order_id,
|
||||
taker_client_order_id,
|
||||
taker_fee,
|
||||
maker_fee,
|
||||
price,
|
||||
quantity,
|
||||
}
|
||||
}
|
||||
(_, _) => {
|
||||
panic!("Can't build FillEvent from SpotEvent::Out")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct FillUpdate {
|
||||
pub event: FillEvent,
|
||||
pub status: FillUpdateStatus,
|
||||
pub market_key: String,
|
||||
pub market_name: String,
|
||||
pub slot: u64,
|
||||
pub write_version: u64,
|
||||
}
|
||||
|
||||
impl Serialize for FillUpdate {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let mut state = serializer.serialize_struct("FillUpdate", 6)?;
|
||||
state.serialize_field("event", &self.event)?;
|
||||
state.serialize_field("marketKey", &self.market_key)?;
|
||||
state.serialize_field("marketName", &self.market_name)?;
|
||||
state.serialize_field("status", &self.status)?;
|
||||
state.serialize_field("slot", &self.slot)?;
|
||||
state.serialize_field("writeVersion", &self.write_version)?;
|
||||
|
||||
state.end()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct HeadUpdate {
|
||||
pub head: usize,
|
||||
pub prev_head: usize,
|
||||
pub head_seq_num: u64,
|
||||
pub prev_head_seq_num: u64,
|
||||
pub status: FillUpdateStatus,
|
||||
pub market_key: String,
|
||||
pub market_name: String,
|
||||
pub slot: u64,
|
||||
pub write_version: u64,
|
||||
}
|
||||
impl Serialize for HeadUpdate {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let mut state = serializer.serialize_struct("HeadUpdate", 6)?;
|
||||
state.serialize_field("head", &self.head)?;
|
||||
state.serialize_field("previousHead", &self.prev_head)?;
|
||||
state.serialize_field("headSeqNum", &self.head_seq_num)?;
|
||||
state.serialize_field("previousHeadSeqNum", &self.prev_head_seq_num)?;
|
||||
state.serialize_field("marketKey", &self.market_key)?;
|
||||
state.serialize_field("marketName", &self.market_name)?;
|
||||
state.serialize_field("status", &self.status)?;
|
||||
state.serialize_field("slot", &self.slot)?;
|
||||
state.serialize_field("writeVersion", &self.write_version)?;
|
||||
|
||||
state.end()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct FillCheckpoint {
|
||||
pub market: String,
|
||||
pub queue: String,
|
||||
pub events: Vec<FillEvent>,
|
||||
pub slot: u64,
|
||||
pub write_version: u64,
|
||||
}
|
||||
|
||||
impl Serialize for FillCheckpoint {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let mut state = serializer.serialize_struct("FillCheckpoint", 3)?;
|
||||
state.serialize_field("events", &self.events)?;
|
||||
state.serialize_field("market", &self.market)?;
|
||||
state.serialize_field("queue", &self.queue)?;
|
||||
state.serialize_field("slot", &self.slot)?;
|
||||
state.serialize_field("write_version", &self.write_version)?;
|
||||
|
||||
state.end()
|
||||
}
|
||||
}
|
||||
|
||||
pub enum FillEventFilterMessage {
|
||||
Update(FillUpdate),
|
||||
HeadUpdate(HeadUpdate),
|
||||
Checkpoint(FillCheckpoint),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
#[serde(tag = "command")]
|
||||
pub enum Command {
|
||||
#[serde(rename = "subscribe")]
|
||||
Subscribe(SubscribeCommand),
|
||||
#[serde(rename = "unsubscribe")]
|
||||
Unsubscribe(UnsubscribeCommand),
|
||||
#[serde(rename = "getMarkets")]
|
||||
GetMarkets,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SubscribeCommand {
|
||||
pub market_id: Option<String>,
|
||||
pub market_ids: Option<Vec<String>>,
|
||||
pub account_ids: Option<Vec<String>>,
|
||||
pub head_updates: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UnsubscribeCommand {
|
||||
pub market_id: String,
|
||||
}
|
|
@ -1,642 +0,0 @@
|
|||
mod fill_event_filter;
|
||||
mod fill_event_postgres_target;
|
||||
|
||||
use anchor_client::{
|
||||
solana_sdk::{commitment_config::CommitmentConfig, signature::Keypair},
|
||||
Cluster,
|
||||
};
|
||||
use anchor_lang::prelude::Pubkey;
|
||||
use futures_channel::mpsc::{unbounded, UnboundedSender};
|
||||
use futures_util::{
|
||||
future::{self, Ready},
|
||||
pin_mut, SinkExt, StreamExt, TryStreamExt,
|
||||
};
|
||||
use log::*;
|
||||
use mango_feeds_lib::{
|
||||
grpc_plugin_source, metrics,
|
||||
metrics::{MetricType, MetricU64},
|
||||
websocket_source, EntityFilter, FilterConfig, MarketConfig, MetricsConfig, PostgresConfig,
|
||||
SourceConfig, StatusResponse,
|
||||
};
|
||||
use mango_v4_client::{Client, MangoGroupContext, TransactionBuilderConfig};
|
||||
use service_mango_fills::{Command, FillCheckpoint, FillEventFilterMessage, FillEventType};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
env,
|
||||
fs::File,
|
||||
io::Read,
|
||||
net::SocketAddr,
|
||||
str::FromStr,
|
||||
sync::Arc,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Mutex,
|
||||
},
|
||||
time::Duration,
|
||||
};
|
||||
use tokio::{
|
||||
net::{TcpListener, TcpStream},
|
||||
pin, time,
|
||||
};
|
||||
use tokio_tungstenite::tungstenite::{protocol::Message, Error};
|
||||
|
||||
use serde::Deserialize;
|
||||
|
||||
type CheckpointMap = Arc<Mutex<HashMap<String, FillCheckpoint>>>;
|
||||
type PeerMap = Arc<Mutex<HashMap<SocketAddr, Peer>>>;
|
||||
|
||||
// jemalloc seems to be better at keeping the memory footprint reasonable over
|
||||
// longer periods of time
|
||||
#[global_allocator]
|
||||
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Peer {
|
||||
pub sender: UnboundedSender<Message>,
|
||||
pub market_subscriptions: HashSet<String>,
|
||||
pub account_subscriptions: HashSet<String>,
|
||||
pub head_updates: bool,
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn handle_connection_error(
|
||||
checkpoint_map: CheckpointMap,
|
||||
peer_map: PeerMap,
|
||||
market_ids: HashMap<String, String>,
|
||||
raw_stream: TcpStream,
|
||||
addr: SocketAddr,
|
||||
metrics_opened_connections: MetricU64,
|
||||
metrics_closed_connections: MetricU64,
|
||||
) {
|
||||
metrics_opened_connections.clone().increment();
|
||||
|
||||
let result = handle_connection(
|
||||
checkpoint_map,
|
||||
peer_map.clone(),
|
||||
market_ids,
|
||||
raw_stream,
|
||||
addr,
|
||||
)
|
||||
.await;
|
||||
if result.is_err() {
|
||||
error!("connection {} error {}", addr, result.unwrap_err());
|
||||
};
|
||||
|
||||
metrics_closed_connections.clone().increment();
|
||||
|
||||
peer_map.lock().unwrap().remove(&addr);
|
||||
}
|
||||
|
||||
async fn handle_connection(
|
||||
checkpoint_map: CheckpointMap,
|
||||
peer_map: PeerMap,
|
||||
market_ids: HashMap<String, String>,
|
||||
raw_stream: TcpStream,
|
||||
addr: SocketAddr,
|
||||
) -> Result<(), Error> {
|
||||
info!("ws connected: {}", addr);
|
||||
let ws_stream = tokio_tungstenite::accept_async(raw_stream).await?;
|
||||
|
||||
let (ws_tx, ws_rx) = ws_stream.split();
|
||||
|
||||
// 1: publish channel in peer map
|
||||
let (chan_tx, chan_rx) = unbounded();
|
||||
{
|
||||
peer_map.lock().unwrap().insert(
|
||||
addr,
|
||||
Peer {
|
||||
sender: chan_tx,
|
||||
market_subscriptions: HashSet::<String>::new(),
|
||||
account_subscriptions: HashSet::<String>::new(),
|
||||
head_updates: false,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
let receive_commands = ws_rx.try_for_each(|msg| match msg {
|
||||
Message::Text(_) => handle_commands(
|
||||
addr,
|
||||
msg,
|
||||
peer_map.clone(),
|
||||
checkpoint_map.clone(),
|
||||
market_ids.clone(),
|
||||
),
|
||||
Message::Ping(_) => {
|
||||
let peers = peer_map.clone();
|
||||
let mut peers_lock = peers.lock().unwrap();
|
||||
let peer = peers_lock.get_mut(&addr).expect("peer should be in map");
|
||||
peer.sender
|
||||
.unbounded_send(Message::Pong(Vec::new()))
|
||||
.unwrap();
|
||||
future::ready(Ok(()))
|
||||
}
|
||||
_ => future::ready(Ok(())),
|
||||
});
|
||||
let forward_updates = chan_rx.map(Ok).forward(ws_tx);
|
||||
|
||||
pin_mut!(receive_commands, forward_updates);
|
||||
future::select(receive_commands, forward_updates).await;
|
||||
|
||||
peer_map.lock().unwrap().remove(&addr);
|
||||
info!("ws disconnected: {}", &addr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_commands(
|
||||
addr: SocketAddr,
|
||||
msg: Message,
|
||||
peer_map: PeerMap,
|
||||
checkpoint_map: CheckpointMap,
|
||||
market_ids: HashMap<String, String>,
|
||||
) -> Ready<Result<(), Error>> {
|
||||
let msg_str = msg.into_text().unwrap();
|
||||
let command: Result<Command, serde_json::Error> = serde_json::from_str(&msg_str);
|
||||
let mut peers = peer_map.lock().unwrap();
|
||||
let peer = peers.get_mut(&addr).expect("peer should be in map");
|
||||
|
||||
match command {
|
||||
Ok(Command::Subscribe(cmd)) => {
|
||||
let mut wildcard = true;
|
||||
// DEPRECATED
|
||||
if let Some(market_id) = cmd.market_id {
|
||||
wildcard = false;
|
||||
if market_ids.get(&market_id).is_none() {
|
||||
let res = StatusResponse {
|
||||
success: false,
|
||||
message: "market not found",
|
||||
};
|
||||
peer.sender
|
||||
.unbounded_send(Message::Text(serde_json::to_string(&res).unwrap()))
|
||||
.unwrap();
|
||||
return future::ok(());
|
||||
}
|
||||
let subscribed = peer.market_subscriptions.insert(market_id.clone());
|
||||
|
||||
let res = if subscribed {
|
||||
StatusResponse {
|
||||
success: true,
|
||||
message: "subscribed",
|
||||
}
|
||||
} else {
|
||||
StatusResponse {
|
||||
success: false,
|
||||
message: "already subscribed",
|
||||
}
|
||||
};
|
||||
peer.sender
|
||||
.unbounded_send(Message::Text(serde_json::to_string(&res).unwrap()))
|
||||
.unwrap();
|
||||
|
||||
if subscribed {
|
||||
let checkpoint_map = checkpoint_map.lock().unwrap();
|
||||
let checkpoint = checkpoint_map.get(&market_id);
|
||||
match checkpoint {
|
||||
Some(checkpoint) => {
|
||||
peer.sender
|
||||
.unbounded_send(Message::Text(
|
||||
serde_json::to_string(&checkpoint).unwrap(),
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
None => info!(
|
||||
"no checkpoint available on client subscription for market {}",
|
||||
&market_id
|
||||
),
|
||||
};
|
||||
}
|
||||
}
|
||||
if let Some(cmd_market_ids) = cmd.market_ids {
|
||||
wildcard = false;
|
||||
for market_id in cmd_market_ids {
|
||||
if market_ids.get(&market_id).is_none() {
|
||||
let res = StatusResponse {
|
||||
success: false,
|
||||
message: &format!("market {} not found", &market_id),
|
||||
};
|
||||
peer.sender
|
||||
.unbounded_send(Message::Text(serde_json::to_string(&res).unwrap()))
|
||||
.unwrap();
|
||||
return future::ok(());
|
||||
}
|
||||
if peer.market_subscriptions.insert(market_id.clone()) {
|
||||
let checkpoint_map = checkpoint_map.lock().unwrap();
|
||||
let checkpoint = checkpoint_map.get(&market_id);
|
||||
let res = StatusResponse {
|
||||
success: true,
|
||||
message: &format!("subscribed to market {}", &market_id),
|
||||
};
|
||||
|
||||
peer.sender
|
||||
.unbounded_send(Message::Text(serde_json::to_string(&res).unwrap()))
|
||||
.unwrap();
|
||||
match checkpoint {
|
||||
Some(checkpoint) => {
|
||||
peer.sender
|
||||
.unbounded_send(Message::Text(
|
||||
serde_json::to_string(&checkpoint).unwrap(),
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
None => info!(
|
||||
"no checkpoint available on client subscription for market {}",
|
||||
&market_id
|
||||
),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(account_ids) = cmd.account_ids {
|
||||
wildcard = false;
|
||||
for account_id in account_ids {
|
||||
if peer.account_subscriptions.insert(account_id.clone()) {
|
||||
let res = StatusResponse {
|
||||
success: true,
|
||||
message: &format!("subscribed to account {}", &account_id),
|
||||
};
|
||||
|
||||
peer.sender
|
||||
.unbounded_send(Message::Text(serde_json::to_string(&res).unwrap()))
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
if wildcard {
|
||||
for (market_id, market_name) in market_ids {
|
||||
if peer.market_subscriptions.insert(market_id.clone()) {
|
||||
let res = StatusResponse {
|
||||
success: true,
|
||||
message: &format!("subscribed to market {}", &market_name),
|
||||
};
|
||||
|
||||
peer.sender
|
||||
.unbounded_send(Message::Text(serde_json::to_string(&res).unwrap()))
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(head_updates) = cmd.head_updates {
|
||||
peer.head_updates = head_updates;
|
||||
}
|
||||
}
|
||||
Ok(Command::Unsubscribe(cmd)) => {
|
||||
info!("unsubscribe {}", cmd.market_id);
|
||||
let unsubscribed = peer.market_subscriptions.remove(&cmd.market_id);
|
||||
let res = if unsubscribed {
|
||||
StatusResponse {
|
||||
success: true,
|
||||
message: "unsubscribed",
|
||||
}
|
||||
} else {
|
||||
StatusResponse {
|
||||
success: false,
|
||||
message: "not subscribed",
|
||||
}
|
||||
};
|
||||
peer.sender
|
||||
.unbounded_send(Message::Text(serde_json::to_string(&res).unwrap()))
|
||||
.unwrap();
|
||||
}
|
||||
Ok(Command::GetMarkets) => {
|
||||
info!("getMarkets");
|
||||
peer.sender
|
||||
.unbounded_send(Message::Text(serde_json::to_string(&market_ids).unwrap()))
|
||||
.unwrap();
|
||||
}
|
||||
Err(err) => {
|
||||
info!("error deserializing user input {:?}", err);
|
||||
let res = StatusResponse {
|
||||
success: false,
|
||||
message: "invalid input",
|
||||
};
|
||||
peer.sender
|
||||
.unbounded_send(Message::Text(serde_json::to_string(&res).unwrap()))
|
||||
.unwrap();
|
||||
}
|
||||
};
|
||||
|
||||
future::ok(())
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
pub struct Config {
|
||||
pub source: SourceConfig,
|
||||
pub metrics: MetricsConfig,
|
||||
pub postgres: Option<PostgresConfig>,
|
||||
pub bind_ws_addr: String,
|
||||
pub rpc_http_url: String,
|
||||
pub mango_group: String,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let args: Vec<String> = std::env::args().collect();
|
||||
let exit: Arc<AtomicBool> = Arc::new(AtomicBool::new(false));
|
||||
|
||||
if args.len() < 2 {
|
||||
eprintln!("Please enter a config file path argument.");
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let config: Config = {
|
||||
let mut file = File::open(&args[1])?;
|
||||
let mut contents = String::new();
|
||||
file.read_to_string(&mut contents)?;
|
||||
toml::from_str(&contents).unwrap()
|
||||
};
|
||||
|
||||
solana_logger::setup_with_default("info");
|
||||
|
||||
let metrics_tx = metrics::start(config.metrics, "fills".into());
|
||||
|
||||
let metrics_opened_connections =
|
||||
metrics_tx.register_u64("fills_feed_opened_connections".into(), MetricType::Counter);
|
||||
|
||||
let metrics_closed_connections =
|
||||
metrics_tx.register_u64("fills_feed_closed_connections".into(), MetricType::Counter);
|
||||
|
||||
let rpc_url = match &config.rpc_http_url.chars().next().unwrap() {
|
||||
'$' => env::var(&config.rpc_http_url[1..]).expect("reading rpc http url from env"),
|
||||
_ => config.rpc_http_url.clone(),
|
||||
};
|
||||
let ws_url = rpc_url.replace("https", "wss");
|
||||
let rpc_timeout = Duration::from_secs(10);
|
||||
let cluster = Cluster::Custom(rpc_url.clone(), ws_url.clone());
|
||||
let client = Client::new(
|
||||
cluster.clone(),
|
||||
CommitmentConfig::processed(),
|
||||
Arc::new(Keypair::new()),
|
||||
Some(rpc_timeout),
|
||||
TransactionBuilderConfig {
|
||||
prioritization_micro_lamports: None,
|
||||
},
|
||||
);
|
||||
let group_context = Arc::new(
|
||||
MangoGroupContext::new_from_rpc(
|
||||
&client.rpc_async(),
|
||||
Pubkey::from_str(&config.mango_group).unwrap(),
|
||||
)
|
||||
.await?,
|
||||
);
|
||||
|
||||
// todo: reload markets at intervals
|
||||
let perp_market_configs: Vec<(Pubkey, MarketConfig)> = group_context
|
||||
.perp_markets
|
||||
.values()
|
||||
.map(|context| {
|
||||
let quote_decimals = match group_context.tokens.get(&context.market.settle_token_index)
|
||||
{
|
||||
Some(token) => token.decimals,
|
||||
None => panic!("token not found for market"), // todo: default to 6 for usdc?
|
||||
};
|
||||
(
|
||||
context.address,
|
||||
MarketConfig {
|
||||
name: context.market.name().to_owned(),
|
||||
bids: context.market.bids,
|
||||
asks: context.market.asks,
|
||||
event_queue: context.market.event_queue,
|
||||
oracle: context.market.oracle,
|
||||
base_decimals: context.market.base_decimals,
|
||||
quote_decimals,
|
||||
base_lot_size: context.market.base_lot_size,
|
||||
quote_lot_size: context.market.quote_lot_size,
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let spot_market_configs: Vec<(Pubkey, MarketConfig)> = group_context
|
||||
.serum3_markets
|
||||
.values()
|
||||
.map(|context| {
|
||||
let base_decimals = match group_context.tokens.get(&context.market.base_token_index) {
|
||||
Some(token) => token.decimals,
|
||||
None => panic!("token not found for market"), // todo: default?
|
||||
};
|
||||
let quote_decimals = match group_context.tokens.get(&context.market.quote_token_index) {
|
||||
Some(token) => token.decimals,
|
||||
None => panic!("token not found for market"), // todo: default to 6 for usdc?
|
||||
};
|
||||
(
|
||||
context.market.serum_market_external,
|
||||
MarketConfig {
|
||||
name: context.market.name().to_owned(),
|
||||
bids: context.bids,
|
||||
asks: context.asks,
|
||||
event_queue: context.event_q,
|
||||
oracle: Pubkey::default(), // serum markets don't support oracle peg
|
||||
base_decimals,
|
||||
quote_decimals,
|
||||
base_lot_size: context.pc_lot_size as i64,
|
||||
quote_lot_size: context.coin_lot_size as i64,
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let perp_queue_pks: Vec<(Pubkey, Pubkey)> = group_context
|
||||
.perp_markets
|
||||
.values()
|
||||
.map(|context| (context.address, context.market.event_queue))
|
||||
.collect();
|
||||
|
||||
let _a: Vec<(String, String)> = group_context
|
||||
.serum3_markets
|
||||
.values()
|
||||
.map(|context| {
|
||||
(
|
||||
context.market.serum_market_external.to_string(),
|
||||
context.market.name().to_owned(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
let b: Vec<(String, String)> = group_context
|
||||
.perp_markets
|
||||
.values()
|
||||
.map(|context| {
|
||||
(
|
||||
context.address.to_string(),
|
||||
context.market.name().to_owned(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
let market_pubkey_strings: HashMap<String, String> = [b].concat().into_iter().collect();
|
||||
|
||||
let postgres_update_sender = match config.postgres {
|
||||
Some(postgres_config) => Some(
|
||||
fill_event_postgres_target::init(&postgres_config, metrics_tx.clone(), exit.clone())
|
||||
.await?,
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
|
||||
let (account_write_queue_sender, slot_queue_sender, fill_receiver) = fill_event_filter::init(
|
||||
perp_market_configs.clone(),
|
||||
spot_market_configs.clone(),
|
||||
metrics_tx.clone(),
|
||||
exit.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let checkpoints = CheckpointMap::new(Mutex::new(HashMap::new()));
|
||||
let peers = PeerMap::new(Mutex::new(HashMap::new()));
|
||||
|
||||
let checkpoints_ref_thread = checkpoints.clone();
|
||||
let peers_ref_thread = peers.clone();
|
||||
let peers_ref_thread1 = peers.clone();
|
||||
|
||||
// filleventfilter websocket sink
|
||||
tokio::spawn(async move {
|
||||
pin!(fill_receiver);
|
||||
loop {
|
||||
let message = fill_receiver.recv().await.unwrap();
|
||||
match message {
|
||||
FillEventFilterMessage::Update(update) => {
|
||||
debug!(
|
||||
"ws update {} {:?} {:?} fill",
|
||||
update.market_name, update.status, update.event.event_type
|
||||
);
|
||||
let mut peer_copy = peers_ref_thread.lock().unwrap().clone();
|
||||
for (addr, peer) in peer_copy.iter_mut() {
|
||||
let json = serde_json::to_string(&update.clone()).unwrap();
|
||||
let is_subscribed = peer.market_subscriptions.contains(&update.market_key)
|
||||
|| peer.account_subscriptions.contains(&update.event.taker)
|
||||
|| peer.account_subscriptions.contains(&update.event.maker);
|
||||
// only send updates if the peer is subscribed
|
||||
if is_subscribed {
|
||||
let result = peer.sender.send(Message::Text(json)).await;
|
||||
if result.is_err() {
|
||||
error!(
|
||||
"ws update {} fill could not reach {}",
|
||||
update.market_name, addr
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
// send fills to db
|
||||
let update_c = update.clone();
|
||||
if let (Some(sender), FillEventType::Perp) =
|
||||
(postgres_update_sender.clone(), update_c.event.event_type)
|
||||
{
|
||||
sender.send(update_c).await.unwrap();
|
||||
}
|
||||
}
|
||||
FillEventFilterMessage::Checkpoint(checkpoint) => {
|
||||
checkpoints_ref_thread
|
||||
.lock()
|
||||
.unwrap()
|
||||
.insert(checkpoint.queue.clone(), checkpoint);
|
||||
}
|
||||
FillEventFilterMessage::HeadUpdate(update) => {
|
||||
debug!(
|
||||
"ws update {} {:?} {} {} head",
|
||||
update.market_name, update.status, update.head, update.prev_head
|
||||
);
|
||||
let mut peer_copy = peers_ref_thread.lock().unwrap().clone();
|
||||
for (addr, peer) in peer_copy.iter_mut() {
|
||||
let json = serde_json::to_string(&update.clone()).unwrap();
|
||||
let is_subscribed = peer.market_subscriptions.contains(&update.market_key);
|
||||
// only send updates if the peer is subscribed
|
||||
if peer.head_updates && is_subscribed {
|
||||
let result = peer.sender.send(Message::Text(json)).await;
|
||||
if result.is_err() {
|
||||
error!(
|
||||
"ws update {} head could not reach {}",
|
||||
update.market_name, addr
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// websocket listener
|
||||
info!("ws listen: {}", config.bind_ws_addr);
|
||||
let try_socket = TcpListener::bind(&config.bind_ws_addr).await;
|
||||
let listener = try_socket.expect("Failed to bind");
|
||||
{
|
||||
tokio::spawn(async move {
|
||||
// Let's spawn the handling of each connection in a separate task.
|
||||
while let Ok((stream, addr)) = listener.accept().await {
|
||||
tokio::spawn(handle_connection_error(
|
||||
checkpoints.clone(),
|
||||
peers.clone(),
|
||||
market_pubkey_strings.clone(),
|
||||
stream,
|
||||
addr,
|
||||
metrics_opened_connections.clone(),
|
||||
metrics_closed_connections.clone(),
|
||||
));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// keepalive
|
||||
{
|
||||
tokio::spawn(async move {
|
||||
let mut write_interval = time::interval(time::Duration::from_secs(30));
|
||||
|
||||
loop {
|
||||
write_interval.tick().await;
|
||||
let peers_copy = peers_ref_thread1.lock().unwrap().clone();
|
||||
for (addr, peer) in peers_copy.iter() {
|
||||
let pl = Vec::new();
|
||||
let result = peer.clone().sender.send(Message::Ping(pl)).await;
|
||||
if result.is_err() {
|
||||
error!("ws ping could not reach {}", addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// handle sigint
|
||||
{
|
||||
let exit = exit.clone();
|
||||
tokio::spawn(async move {
|
||||
tokio::signal::ctrl_c().await.unwrap();
|
||||
info!("Received SIGINT, shutting down...");
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
});
|
||||
}
|
||||
|
||||
info!(
|
||||
"rpc connect: {}",
|
||||
config
|
||||
.source
|
||||
.grpc_sources
|
||||
.iter()
|
||||
.map(|c| c.connection_string.clone())
|
||||
.collect::<String>()
|
||||
);
|
||||
let use_geyser = true;
|
||||
let all_queue_pks = [perp_queue_pks.clone()].concat();
|
||||
let relevant_pubkeys = all_queue_pks.iter().map(|m| m.1).collect();
|
||||
let filter_config = FilterConfig {
|
||||
entity_filter: EntityFilter::FilterByAccountIds(relevant_pubkeys),
|
||||
};
|
||||
if use_geyser {
|
||||
grpc_plugin_source::process_events(
|
||||
&config.source,
|
||||
&filter_config,
|
||||
account_write_queue_sender,
|
||||
slot_queue_sender,
|
||||
metrics_tx.clone(),
|
||||
exit.clone(),
|
||||
)
|
||||
.await;
|
||||
} else {
|
||||
websocket_source::process_events(
|
||||
&config.source,
|
||||
&filter_config,
|
||||
account_write_queue_sender,
|
||||
slot_queue_sender,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -1,36 +0,0 @@
|
|||
[package]
|
||||
name = "service-mango-orderbook"
|
||||
version = "0.1.0"
|
||||
authors = ["Riordan Panayides <riordan@panayid.es>"]
|
||||
edition = "2021"
|
||||
license = "AGPL-3.0-or-later"
|
||||
|
||||
[dependencies]
|
||||
mango-feeds-lib = { path = "../lib" }
|
||||
solana-logger = "*"
|
||||
bs58 = "*"
|
||||
log = "*"
|
||||
anyhow = "*"
|
||||
toml = "*"
|
||||
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
futures-channel = "0.3"
|
||||
futures-util = "0.3"
|
||||
ws = "^0.9.2"
|
||||
async-channel = "1.6"
|
||||
async-trait = "0.1"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-tungstenite = "0.17"
|
||||
bytemuck = "1.7.2"
|
||||
itertools = "0.10.5"
|
||||
|
||||
solana-sdk = "~1.14.9"
|
||||
|
||||
mango-v4 = { git = "https://github.com/blockworks-foundation/mango-v4", branch = "dev" }
|
||||
mango-v4-client = { git = "https://github.com/blockworks-foundation/mango-v4", branch = "dev" }
|
||||
fixed = { git = "https://github.com/blockworks-foundation/mango-v4", branch = "dev", version = "1.11.0", features = ["serde", "borsh", "debug-assert-in-release"] }
|
||||
serum_dex = { git = "https://github.com/jup-ag/openbook-program", branch = "feat/expose-things", features = ["no-entrypoint"] }
|
||||
anchor-lang = "0.25.0"
|
||||
anchor-client = "0.25.0"
|
|
@ -1,193 +0,0 @@
|
|||
# service-mango-orderbook
|
||||
|
||||
This module parses bookside accounts and exposes L2 and L3 data and updates on a websocket
|
||||
|
||||
Public API: `https://api.mngo.cloud/orderbook/v1/`
|
||||
|
||||
## API Reference
|
||||
|
||||
Get a list of markets
|
||||
|
||||
```
|
||||
{
|
||||
"command": "getMarkets"
|
||||
}
|
||||
```
|
||||
|
||||
```
|
||||
{
|
||||
"ESdnpnNLgTkBCZRuTJkZLi5wKEZ2z47SG3PJrhundSQ2": "SOL-PERP",
|
||||
"HwhVGkfsSQ9JSQeQYu2CbkRCLvsh3qRZxG6m4oMVwZpN": "BTC-PERP",
|
||||
"Fgh9JSZ2qfSjCw9RPJ85W2xbihsp2muLvfRztzoVR7f1": "ETH-PERP",
|
||||
}
|
||||
```
|
||||
|
||||
### L2 Data
|
||||
|
||||
Subscribe to L2 updates
|
||||
|
||||
```
|
||||
{
|
||||
"command": "subscribe",
|
||||
"marketId": "MARKET_PUBKEY",
|
||||
"subscriptionType": "level",
|
||||
}
|
||||
```
|
||||
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"message": "subscribed to level updates for MARKET_PUBKEY"
|
||||
}
|
||||
```
|
||||
|
||||
L2 Checkpoint - Sent upon initial subscription
|
||||
|
||||
```
|
||||
{
|
||||
"market": "ESdnpnNLgTkBCZRuTJkZLi5wKEZ2z47SG3PJrhundSQ2",
|
||||
"bids":
|
||||
[22.17, 8.86],
|
||||
[22.15, 88.59],
|
||||
],
|
||||
"asks": [
|
||||
[22.19, 9.17],
|
||||
[22.21, 91.7],
|
||||
],
|
||||
"slot": 190826373,
|
||||
"write_version": 688377208758
|
||||
}
|
||||
```
|
||||
|
||||
L2 Update - Sent per side
|
||||
|
||||
```
|
||||
{
|
||||
"market": "ESdnpnNLgTkBCZRuTJkZLi5wKEZ2z47SG3PJrhundSQ2",
|
||||
"bids": // or asks
|
||||
[22.18, 6], // new level added
|
||||
[22.17, 1], // level changed
|
||||
[22.15, 0], // level removed
|
||||
],
|
||||
"slot": 190826375,
|
||||
"write_version": 688377208759
|
||||
}
|
||||
```
|
||||
### L3 Data
|
||||
|
||||
Subscribe to L3 updates
|
||||
:warning: If the subscribed market is a perp market, `ownerPubkey` corresponds to a `mangoAccount`, if the subscribed market is a spot market, `ownerPubkey` corresponds to an open orders account.
|
||||
|
||||
```
|
||||
{
|
||||
"command": "subscribe",
|
||||
"marketId": "MARKET_PUBKEY",
|
||||
"subscriptionType": "book",
|
||||
}
|
||||
```
|
||||
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"message": "subscribed to book updates for MARKET_PUBKEY"
|
||||
}
|
||||
```
|
||||
|
||||
L3 Checkpoint - Sent upon initial subscription
|
||||
|
||||
```
|
||||
{
|
||||
"market": "ESdnpnNLgTkBCZRuTJkZLi5wKEZ2z47SG3PJrhundSQ2",
|
||||
"bids": [
|
||||
{
|
||||
"price": 20.81,
|
||||
"quantity": 1.3,
|
||||
"ownerPubkey": "F1SZxEDxxCSLVjEBbMEjDYqajWRJQRCZBwPQnmcVvTLV"
|
||||
},
|
||||
{
|
||||
"price": 20.81,
|
||||
"quantity": 62.22,
|
||||
"ownerPubkey": "BGYWnqfaauCeebFQXEfYuDCktiVG8pqpprrsD4qfqL53"
|
||||
},
|
||||
{
|
||||
"price": 20.8,
|
||||
"quantity": 8,
|
||||
"ownerPubkey": "CtHuPg2ctVVV7nqmvVEcMtcWyJAgtZw9YcNHFQidjPgF"
|
||||
}
|
||||
],
|
||||
"asks": [
|
||||
{
|
||||
"price": 20.94,
|
||||
"quantity": 62.22,
|
||||
"ownerPubkey": "BGYWnqfaauCeebFQXEfYuDCktiVG8pqpprrsD4qfqL53"
|
||||
},
|
||||
{
|
||||
"price": 20.95,
|
||||
"quantity": 1.3,
|
||||
"ownerPubkey": "F1SZxEDxxCSLVjEBbMEjDYqajWRJQRCZBwPQnmcVvTLV"
|
||||
},
|
||||
{
|
||||
"price": 21.31,
|
||||
"quantity": 30,
|
||||
"ownerPubkey": "5gHsqmFsMaguM3HMyEmnME4NMQKj6NrJWUGv6VKnc2Hk"
|
||||
}
|
||||
],
|
||||
"slot": 190826373,
|
||||
"write_version": 688377208758
|
||||
}
|
||||
```
|
||||
|
||||
L3 Update - Sent per side
|
||||
|
||||
```
|
||||
{
|
||||
"market": "ESdnpnNLgTkBCZRuTJkZLi5wKEZ2z47SG3PJrhundSQ2",
|
||||
"side": "ask",
|
||||
"additions": [
|
||||
{
|
||||
"price": 20.92,
|
||||
"quantity": 61.93,
|
||||
"ownerPubkey": "BGYWnqfaauCeebFQXEfYuDCktiVG8pqpprrsD4qfqL53"
|
||||
}
|
||||
],
|
||||
"removals": [
|
||||
{
|
||||
"price": 20.92,
|
||||
"quantity": 61.910000000000004,
|
||||
"ownerPubkey": "BGYWnqfaauCeebFQXEfYuDCktiVG8pqpprrsD4qfqL53"
|
||||
}
|
||||
],
|
||||
"slot": 197077534,
|
||||
"write_version": 727782187614
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Setup
|
||||
|
||||
## Local
|
||||
|
||||
1. Prepare the connector configuration file.
|
||||
|
||||
[Here is an example](service-mango-orderbook/conf/example-config.toml).
|
||||
|
||||
- `bind_ws_addr` is the listen port for the websocket clients
|
||||
- `rpc_ws_url` is unused and can stay empty.
|
||||
- `connection_string` for your `grpc_sources` must point to the gRPC server
|
||||
address configured for the plugin.
|
||||
- `rpc_http_url` must point to the JSON-RPC URL.
|
||||
- `program_id` must match what is configured for the gRPC plugin
|
||||
|
||||
2. Start the service binary.
|
||||
|
||||
Pass the path to the config file as the first argument. It logs to stdout. It
|
||||
should be restarted on exit.
|
||||
|
||||
3. Monitor the logs
|
||||
|
||||
`WARN` messages can be recovered from. `ERROR` messages need attention. The
|
||||
logs are very spammy changing the default log level is recommended when you
|
||||
dont want to analyze performance of the service.
|
||||
|
||||
## fly.io
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
bind_ws_addr = "0.0.0.0:8080"
|
||||
rpc_http_url = "http://mango.rpcpool.com/<token>"
|
||||
mango_group = "78b8f4cGCwmZ9ysPFMWLaLTkkaYnUjwMJYStWe5RTSSX"
|
||||
|
||||
[metrics]
|
||||
output_stdout = true
|
||||
output_http = true
|
||||
|
||||
[source]
|
||||
dedup_queue_size = 50000
|
||||
rpc_ws_url = "wss://mango.rpcpool.com/<token>"
|
||||
|
||||
[[source.grpc_sources]]
|
||||
name = "accountsdb-client"
|
||||
connection_string = "http://tyo64.rpcpool.com/"
|
||||
retry_connection_sleep_secs = 30
|
||||
|
||||
[source.snapshot]
|
||||
rpc_http_url = "http://mango.rpcpool.com/<token>"
|
||||
program_id = "4MangoMjqJ2firMokCjjGgoK8d4MXcrgL7XJaL3w6fVg"
|
|
@ -1,20 +0,0 @@
|
|||
bind_ws_addr = "[::]:8080"
|
||||
rpc_http_url = "$RPC_HTTP_URL"
|
||||
mango_group = "78b8f4cGCwmZ9ysPFMWLaLTkkaYnUjwMJYStWe5RTSSX"
|
||||
|
||||
[metrics]
|
||||
output_stdout = true
|
||||
output_http = true
|
||||
|
||||
[source]
|
||||
dedup_queue_size = 50000
|
||||
rpc_ws_url = "$RPC_WS_URL"
|
||||
|
||||
[[source.grpc_sources]]
|
||||
name = "accountsdb-client"
|
||||
connection_string = "$GEYSER_CONNECTION_STRING"
|
||||
retry_connection_sleep_secs = 30
|
||||
|
||||
[source.snapshot]
|
||||
rpc_http_url = "$RPC_HTTP_URL"
|
||||
program_id = "srmqPvymJeFKQ4zGQed1GFppgkRHL9kaELCbyksJtPX"
|
|
@ -1,122 +0,0 @@
|
|||
use mango_feeds_lib::OrderbookSide;
|
||||
use serde::{ser::SerializeStruct, Serialize, Serializer};
|
||||
|
||||
pub type OrderbookLevel = [f64; 2];
|
||||
pub type Orderbook = Vec<Order>;
|
||||
|
||||
#[derive(Clone, Debug, Serialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Order {
|
||||
pub price: f64,
|
||||
pub quantity: f64,
|
||||
pub owner_pubkey: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct LevelUpdate {
|
||||
pub market: String,
|
||||
pub side: OrderbookSide,
|
||||
pub update: Vec<OrderbookLevel>,
|
||||
pub slot: u64,
|
||||
pub write_version: u64,
|
||||
}
|
||||
|
||||
impl Serialize for LevelUpdate {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let mut state = serializer.serialize_struct("LevelUpdate", 5)?;
|
||||
state.serialize_field("market", &self.market)?;
|
||||
state.serialize_field("side", &self.side)?;
|
||||
state.serialize_field("update", &self.update)?;
|
||||
state.serialize_field("slot", &self.slot)?;
|
||||
state.serialize_field("write_version", &self.write_version)?;
|
||||
|
||||
state.end()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct LevelCheckpoint {
|
||||
pub market: String,
|
||||
pub bids: Vec<OrderbookLevel>,
|
||||
pub asks: Vec<OrderbookLevel>,
|
||||
pub slot: u64,
|
||||
pub write_version: u64,
|
||||
}
|
||||
|
||||
impl Serialize for LevelCheckpoint {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let mut state = serializer.serialize_struct("LevelCheckpoint", 3)?;
|
||||
state.serialize_field("market", &self.market)?;
|
||||
state.serialize_field("bids", &self.bids)?;
|
||||
state.serialize_field("asks", &self.asks)?;
|
||||
state.serialize_field("slot", &self.slot)?;
|
||||
state.serialize_field("write_version", &self.write_version)?;
|
||||
|
||||
state.end()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct BookUpdate {
|
||||
pub market: String,
|
||||
pub side: OrderbookSide,
|
||||
pub additions: Vec<Order>,
|
||||
pub removals: Vec<Order>,
|
||||
pub slot: u64,
|
||||
pub write_version: u64,
|
||||
}
|
||||
|
||||
impl Serialize for BookUpdate {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let mut state = serializer.serialize_struct("BookUpdate", 6)?;
|
||||
state.serialize_field("market", &self.market)?;
|
||||
state.serialize_field("side", &self.side)?;
|
||||
state.serialize_field("additions", &self.additions)?;
|
||||
state.serialize_field("removals", &self.removals)?;
|
||||
state.serialize_field("slot", &self.slot)?;
|
||||
state.serialize_field("write_version", &self.write_version)?;
|
||||
|
||||
state.end()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct BookCheckpoint {
|
||||
pub market: String,
|
||||
pub bids: Vec<Order>,
|
||||
pub asks: Vec<Order>,
|
||||
pub slot: u64,
|
||||
pub write_version: u64,
|
||||
}
|
||||
|
||||
impl Serialize for BookCheckpoint {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let mut state = serializer.serialize_struct("LevelCheckpoint", 5)?;
|
||||
state.serialize_field("market", &self.market)?;
|
||||
state.serialize_field("bids", &self.bids)?;
|
||||
state.serialize_field("asks", &self.asks)?;
|
||||
state.serialize_field("slot", &self.slot)?;
|
||||
state.serialize_field("write_version", &self.write_version)?;
|
||||
|
||||
state.end()
|
||||
}
|
||||
}
|
||||
|
||||
pub enum OrderbookFilterMessage {
|
||||
LevelUpdate(LevelUpdate),
|
||||
LevelCheckpoint(LevelCheckpoint),
|
||||
BookUpdate(BookUpdate),
|
||||
BookCheckpoint(BookCheckpoint),
|
||||
}
|
|
@ -1,627 +0,0 @@
|
|||
mod orderbook_filter;
|
||||
|
||||
use anchor_client::{
|
||||
solana_sdk::{commitment_config::CommitmentConfig, signature::Keypair},
|
||||
Cluster,
|
||||
};
|
||||
use anchor_lang::prelude::Pubkey;
|
||||
use futures_channel::mpsc::{unbounded, UnboundedSender};
|
||||
use futures_util::{
|
||||
future::{self, Ready},
|
||||
pin_mut, SinkExt, StreamExt, TryStreamExt,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
use log::*;
|
||||
use mango_v4_client::{Client, MangoGroupContext, TransactionBuilderConfig};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
fmt,
|
||||
fs::File,
|
||||
io::Read,
|
||||
net::SocketAddr,
|
||||
str::FromStr,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc, Mutex,
|
||||
},
|
||||
time::Duration,
|
||||
};
|
||||
use tokio::{
|
||||
net::{TcpListener, TcpStream},
|
||||
pin, time,
|
||||
};
|
||||
use tokio_tungstenite::tungstenite::{protocol::Message, Error};
|
||||
|
||||
use mango_feeds_lib::EntityFilter::FilterByAccountIds;
|
||||
use mango_feeds_lib::{
|
||||
grpc_plugin_source, metrics, websocket_source, MarketConfig, MetricsConfig, SourceConfig,
|
||||
};
|
||||
use mango_feeds_lib::{
|
||||
metrics::{MetricType, MetricU64},
|
||||
FilterConfig, StatusResponse,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use service_mango_orderbook::{BookCheckpoint, LevelCheckpoint, OrderbookFilterMessage};
|
||||
|
||||
type LevelCheckpointMap = Arc<Mutex<HashMap<String, LevelCheckpoint>>>;
|
||||
type BookCheckpointMap = Arc<Mutex<HashMap<String, BookCheckpoint>>>;
|
||||
type PeerMap = Arc<Mutex<HashMap<SocketAddr, Peer>>>;
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
#[serde(tag = "command")]
|
||||
pub enum Command {
|
||||
#[serde(rename = "subscribe")]
|
||||
Subscribe(SubscribeCommand),
|
||||
#[serde(rename = "unsubscribe")]
|
||||
Unsubscribe(UnsubscribeCommand),
|
||||
#[serde(rename = "getMarkets")]
|
||||
GetMarkets,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SubscribeCommand {
|
||||
pub market_id: String,
|
||||
pub subscription_type: Option<SubscriptionType>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum SubscriptionType {
|
||||
#[serde(rename = "level")]
|
||||
Level,
|
||||
#[serde(rename = "book")]
|
||||
Book,
|
||||
}
|
||||
|
||||
impl fmt::Display for SubscriptionType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
SubscriptionType::Level => write!(f, "level"),
|
||||
SubscriptionType::Book => write!(f, "book"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UnsubscribeCommand {
|
||||
pub market_id: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Peer {
|
||||
pub sender: UnboundedSender<Message>,
|
||||
pub level_subscriptions: HashSet<String>,
|
||||
pub book_subscriptions: HashSet<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
pub struct Config {
|
||||
pub source: SourceConfig,
|
||||
pub metrics: MetricsConfig,
|
||||
pub bind_ws_addr: String,
|
||||
pub rpc_http_url: String,
|
||||
pub mango_group: String,
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn handle_connection_error(
|
||||
level_checkpoint_map: LevelCheckpointMap,
|
||||
book_checkpoint_map: BookCheckpointMap,
|
||||
peer_map: PeerMap,
|
||||
market_ids: HashMap<String, String>,
|
||||
raw_stream: TcpStream,
|
||||
addr: SocketAddr,
|
||||
metrics_opened_connections: MetricU64,
|
||||
metrics_closed_connections: MetricU64,
|
||||
) {
|
||||
metrics_opened_connections.clone().increment();
|
||||
|
||||
let result = handle_connection(
|
||||
level_checkpoint_map,
|
||||
book_checkpoint_map,
|
||||
peer_map.clone(),
|
||||
market_ids,
|
||||
raw_stream,
|
||||
addr,
|
||||
)
|
||||
.await;
|
||||
if result.is_err() {
|
||||
error!("connection {} error {}", addr, result.unwrap_err());
|
||||
};
|
||||
|
||||
metrics_closed_connections.clone().increment();
|
||||
|
||||
peer_map.lock().unwrap().remove(&addr);
|
||||
}
|
||||
|
||||
async fn handle_connection(
|
||||
level_checkpoint_map: LevelCheckpointMap,
|
||||
book_checkpoint_map: BookCheckpointMap,
|
||||
peer_map: PeerMap,
|
||||
market_ids: HashMap<String, String>,
|
||||
raw_stream: TcpStream,
|
||||
addr: SocketAddr,
|
||||
) -> Result<(), Error> {
|
||||
info!("ws connected: {}", addr);
|
||||
let ws_stream = tokio_tungstenite::accept_async(raw_stream).await?;
|
||||
let (ws_tx, ws_rx) = ws_stream.split();
|
||||
|
||||
// 1: publish channel in peer map
|
||||
let (chan_tx, chan_rx) = unbounded();
|
||||
{
|
||||
peer_map.lock().unwrap().insert(
|
||||
addr,
|
||||
Peer {
|
||||
sender: chan_tx,
|
||||
level_subscriptions: HashSet::<String>::new(),
|
||||
book_subscriptions: HashSet::<String>::new(),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
let receive_commands = ws_rx.try_for_each(|msg| match msg {
|
||||
Message::Text(_) => handle_commands(
|
||||
addr,
|
||||
msg,
|
||||
peer_map.clone(),
|
||||
level_checkpoint_map.clone(),
|
||||
book_checkpoint_map.clone(),
|
||||
market_ids.clone(),
|
||||
),
|
||||
Message::Ping(_) => {
|
||||
let peers = peer_map.clone();
|
||||
let mut peers_lock = peers.lock().unwrap();
|
||||
let peer = peers_lock.get_mut(&addr).expect("peer should be in map");
|
||||
peer.sender
|
||||
.unbounded_send(Message::Pong(Vec::new()))
|
||||
.unwrap();
|
||||
future::ready(Ok(()))
|
||||
}
|
||||
_ => future::ready(Ok(())),
|
||||
});
|
||||
let forward_updates = chan_rx.map(Ok).forward(ws_tx);
|
||||
|
||||
pin_mut!(receive_commands, forward_updates);
|
||||
future::select(receive_commands, forward_updates).await;
|
||||
|
||||
peer_map.lock().unwrap().remove(&addr);
|
||||
info!("ws disconnected: {}", &addr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_commands(
|
||||
addr: SocketAddr,
|
||||
msg: Message,
|
||||
peer_map: PeerMap,
|
||||
level_checkpoint_map: LevelCheckpointMap,
|
||||
book_checkpoint_map: BookCheckpointMap,
|
||||
market_ids: HashMap<String, String>,
|
||||
) -> Ready<Result<(), Error>> {
|
||||
let msg_str = msg.into_text().unwrap();
|
||||
let command: Result<Command, serde_json::Error> = serde_json::from_str(&msg_str);
|
||||
let mut peers = peer_map.lock().unwrap();
|
||||
let peer = peers.get_mut(&addr).expect("peer should be in map");
|
||||
match command {
|
||||
Ok(Command::Subscribe(cmd)) => {
|
||||
let market_id = cmd.market_id;
|
||||
if market_ids.get(&market_id).is_none() {
|
||||
let res = StatusResponse {
|
||||
success: false,
|
||||
message: "market not found",
|
||||
};
|
||||
peer.sender
|
||||
.unbounded_send(Message::Text(serde_json::to_string(&res).unwrap()))
|
||||
.unwrap();
|
||||
return future::ok(());
|
||||
}
|
||||
// default to level subscription
|
||||
let subscription_type = match cmd.subscription_type {
|
||||
Some(subscription) => subscription,
|
||||
None => SubscriptionType::Level,
|
||||
};
|
||||
|
||||
let subscribed = match subscription_type {
|
||||
SubscriptionType::Level => peer.level_subscriptions.insert(market_id.clone()),
|
||||
SubscriptionType::Book => peer.book_subscriptions.insert(market_id.clone()),
|
||||
};
|
||||
let message = format!(
|
||||
"subscribed to {} updates for {}",
|
||||
subscription_type, market_id
|
||||
);
|
||||
|
||||
let res = if subscribed {
|
||||
StatusResponse {
|
||||
success: true,
|
||||
message: &message,
|
||||
}
|
||||
} else {
|
||||
StatusResponse {
|
||||
success: false,
|
||||
message: "already subscribed",
|
||||
}
|
||||
};
|
||||
peer.sender
|
||||
.unbounded_send(Message::Text(serde_json::to_string(&res).unwrap()))
|
||||
.unwrap();
|
||||
|
||||
if subscribed {
|
||||
match subscription_type {
|
||||
SubscriptionType::Level => {
|
||||
send_checkpoint(&level_checkpoint_map, &market_id, peer);
|
||||
}
|
||||
SubscriptionType::Book => {
|
||||
send_checkpoint(&book_checkpoint_map, &market_id, peer);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
Ok(Command::Unsubscribe(cmd)) => {
|
||||
info!("unsubscribe {}", cmd.market_id);
|
||||
// match
|
||||
let unsubscribed = peer.level_subscriptions.remove(&cmd.market_id);
|
||||
let res = if unsubscribed {
|
||||
StatusResponse {
|
||||
success: true,
|
||||
message: "unsubscribed",
|
||||
}
|
||||
} else {
|
||||
StatusResponse {
|
||||
success: false,
|
||||
message: "not subscribed",
|
||||
}
|
||||
};
|
||||
peer.sender
|
||||
.unbounded_send(Message::Text(serde_json::to_string(&res).unwrap()))
|
||||
.unwrap();
|
||||
}
|
||||
Ok(Command::GetMarkets) => {
|
||||
info!("getMarkets");
|
||||
peer.sender
|
||||
.unbounded_send(Message::Text(serde_json::to_string(&market_ids).unwrap()))
|
||||
.unwrap();
|
||||
}
|
||||
Err(err) => {
|
||||
info!("error deserializing user input {:?}", err);
|
||||
let res = StatusResponse {
|
||||
success: false,
|
||||
message: "invalid input",
|
||||
};
|
||||
peer.sender
|
||||
.unbounded_send(Message::Text(serde_json::to_string(&res).unwrap()))
|
||||
.unwrap();
|
||||
}
|
||||
};
|
||||
|
||||
future::ok(())
|
||||
}
|
||||
|
||||
fn send_checkpoint<T>(checkpoint_map: &Mutex<HashMap<String, T>>, market_id: &str, peer: &Peer)
|
||||
where
|
||||
T: Serialize,
|
||||
{
|
||||
let checkpoint_map = checkpoint_map.lock().unwrap();
|
||||
let checkpoint = checkpoint_map.get(market_id);
|
||||
match checkpoint {
|
||||
Some(checkpoint) => {
|
||||
peer.sender
|
||||
.unbounded_send(Message::Text(serde_json::to_string(&checkpoint).unwrap()))
|
||||
.unwrap();
|
||||
}
|
||||
None => info!("no checkpoint available on client subscription"), // todo: what to do here?
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
solana_logger::setup_with_default("info");
|
||||
let exit: Arc<AtomicBool> = Arc::new(AtomicBool::new(false));
|
||||
|
||||
// load config
|
||||
let args: Vec<String> = std::env::args().collect();
|
||||
if args.len() < 2 {
|
||||
eprintln!("Please enter a config file path argument");
|
||||
return Ok(());
|
||||
}
|
||||
let config: Config = {
|
||||
let mut file = File::open(&args[1])?;
|
||||
let mut contents = String::new();
|
||||
file.read_to_string(&mut contents)?;
|
||||
toml::from_str(&contents).unwrap()
|
||||
};
|
||||
|
||||
// setup metrics
|
||||
let metrics_tx = metrics::start(config.metrics, "orderbook".into());
|
||||
let metrics_opened_connections =
|
||||
metrics_tx.register_u64("orderbook_opened_connections".into(), MetricType::Counter);
|
||||
let metrics_closed_connections =
|
||||
metrics_tx.register_u64("orderbook_closed_connections".into(), MetricType::Counter);
|
||||
|
||||
// load mango group and markets from rpc
|
||||
let rpc_url = config.rpc_http_url;
|
||||
let ws_url = rpc_url.replace("https", "wss");
|
||||
let rpc_timeout = Duration::from_secs(10);
|
||||
let cluster = Cluster::Custom(rpc_url.clone(), ws_url.clone());
|
||||
let client = Client::new(
|
||||
cluster.clone(),
|
||||
CommitmentConfig::processed(),
|
||||
Arc::new(Keypair::new()),
|
||||
Some(rpc_timeout),
|
||||
TransactionBuilderConfig {
|
||||
prioritization_micro_lamports: None,
|
||||
},
|
||||
);
|
||||
let group_context = Arc::new(
|
||||
MangoGroupContext::new_from_rpc(
|
||||
&client.rpc_async(),
|
||||
Pubkey::from_str(&config.mango_group).unwrap(),
|
||||
)
|
||||
.await?,
|
||||
);
|
||||
|
||||
// todo: reload markets at intervals
|
||||
let market_configs: Vec<(Pubkey, MarketConfig)> = group_context
|
||||
.perp_markets
|
||||
.values()
|
||||
.map(|context| {
|
||||
let quote_decimals = match group_context.tokens.get(&context.market.settle_token_index)
|
||||
{
|
||||
Some(token) => token.decimals,
|
||||
None => panic!("token not found for market"), // todo: default to 6 for usdc?
|
||||
};
|
||||
(
|
||||
context.address,
|
||||
MarketConfig {
|
||||
name: context.market.name().to_owned(),
|
||||
bids: context.market.bids,
|
||||
asks: context.market.asks,
|
||||
event_queue: context.market.event_queue,
|
||||
oracle: context.market.oracle,
|
||||
base_decimals: context.market.base_decimals,
|
||||
quote_decimals,
|
||||
base_lot_size: context.market.base_lot_size,
|
||||
quote_lot_size: context.market.quote_lot_size,
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let serum_market_configs: Vec<(Pubkey, MarketConfig)> = group_context
|
||||
.serum3_markets
|
||||
.values()
|
||||
.map(|context| {
|
||||
let base_decimals = match group_context.tokens.get(&context.market.base_token_index) {
|
||||
Some(token) => token.decimals,
|
||||
None => panic!("token not found for market"), // todo: default?
|
||||
};
|
||||
let quote_decimals = match group_context.tokens.get(&context.market.quote_token_index) {
|
||||
Some(token) => token.decimals,
|
||||
None => panic!("token not found for market"), // todo: default to 6 for usdc?
|
||||
};
|
||||
(
|
||||
context.market.serum_market_external,
|
||||
MarketConfig {
|
||||
name: context.market.name().to_owned(),
|
||||
bids: context.bids,
|
||||
asks: context.asks,
|
||||
event_queue: context.event_q,
|
||||
oracle: Pubkey::default(), // serum markets dont support oracle peg
|
||||
base_decimals,
|
||||
quote_decimals,
|
||||
base_lot_size: context.coin_lot_size as i64,
|
||||
quote_lot_size: context.pc_lot_size as i64,
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let market_pubkey_strings: HashMap<String, String> =
|
||||
[market_configs.clone(), serum_market_configs.clone()]
|
||||
.concat()
|
||||
.iter()
|
||||
.map(|market| (market.0.to_string(), market.1.name.clone()))
|
||||
.collect::<Vec<(String, String)>>()
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let (account_write_queue_sender, slot_queue_sender, orderbook_receiver) =
|
||||
orderbook_filter::init(
|
||||
market_configs.clone(),
|
||||
serum_market_configs.clone(),
|
||||
metrics_tx.clone(),
|
||||
exit.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let level_checkpoints = LevelCheckpointMap::new(Mutex::new(HashMap::new()));
|
||||
let book_checkpoints = BookCheckpointMap::new(Mutex::new(HashMap::new()));
|
||||
let peers = PeerMap::new(Mutex::new(HashMap::new()));
|
||||
|
||||
// orderbook receiver
|
||||
{
|
||||
let level_checkpoints = level_checkpoints.clone();
|
||||
let book_checkpoints = book_checkpoints.clone();
|
||||
let peers = peers.clone();
|
||||
let exit = exit.clone();
|
||||
tokio::spawn(async move {
|
||||
pin!(orderbook_receiver);
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
warn!("shutting down orderbook receiver...");
|
||||
break;
|
||||
}
|
||||
|
||||
let message: OrderbookFilterMessage = orderbook_receiver.recv().await.unwrap();
|
||||
match message {
|
||||
OrderbookFilterMessage::LevelUpdate(update) => {
|
||||
debug!("ws level update {} {:?}", update.market, update.side);
|
||||
let mut peer_copy = peers.lock().unwrap().clone();
|
||||
for (addr, peer) in peer_copy.iter_mut() {
|
||||
let json = serde_json::to_string(&update).unwrap();
|
||||
|
||||
// only send updates if the peer is subscribed
|
||||
if peer.level_subscriptions.contains(&update.market) {
|
||||
let result = peer.sender.send(Message::Text(json)).await;
|
||||
if result.is_err() {
|
||||
error!(
|
||||
"ws level update {} {:?} could not reach {}",
|
||||
update.market, update.side, addr
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
OrderbookFilterMessage::LevelCheckpoint(checkpoint) => {
|
||||
debug!("ws level checkpoint {}", checkpoint.market);
|
||||
level_checkpoints
|
||||
.lock()
|
||||
.unwrap()
|
||||
.insert(checkpoint.market.clone(), checkpoint);
|
||||
}
|
||||
OrderbookFilterMessage::BookUpdate(update) => {
|
||||
debug!("ws book update {} {:?}", update.market, update.side);
|
||||
let mut peer_copy = peers.lock().unwrap().clone();
|
||||
for (addr, peer) in peer_copy.iter_mut() {
|
||||
let json = serde_json::to_string(&update).unwrap();
|
||||
|
||||
// only send updates if the peer is subscribed
|
||||
if peer.book_subscriptions.contains(&update.market) {
|
||||
let result = peer.sender.send(Message::Text(json)).await;
|
||||
if result.is_err() {
|
||||
error!(
|
||||
"ws book update {} {:?} could not reach {}",
|
||||
update.market, update.side, addr
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
OrderbookFilterMessage::BookCheckpoint(checkpoint) => {
|
||||
debug!("ws book checkpoint {}", checkpoint.market);
|
||||
book_checkpoints
|
||||
.lock()
|
||||
.unwrap()
|
||||
.insert(checkpoint.market.clone(), checkpoint);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// websocket server
|
||||
{
|
||||
info!("ws listen: {}", config.bind_ws_addr);
|
||||
let try_socket = TcpListener::bind(&config.bind_ws_addr).await;
|
||||
let listener = try_socket.expect("Failed to bind");
|
||||
let exit = exit.clone();
|
||||
let peers = peers.clone();
|
||||
tokio::spawn(async move {
|
||||
// Let's spawn the handling of each connection in a separate task.
|
||||
while let Ok((stream, addr)) = listener.accept().await {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
warn!("shutting down websocket server...");
|
||||
break;
|
||||
}
|
||||
tokio::spawn(handle_connection_error(
|
||||
level_checkpoints.clone(),
|
||||
book_checkpoints.clone(),
|
||||
peers.clone(),
|
||||
market_pubkey_strings.clone(),
|
||||
stream,
|
||||
addr,
|
||||
metrics_opened_connections.clone(),
|
||||
metrics_closed_connections.clone(),
|
||||
));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// keepalive
|
||||
{
|
||||
let exit = exit.clone();
|
||||
let peers = peers.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut write_interval = time::interval(time::Duration::from_secs(30));
|
||||
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
warn!("shutting down keepalive...");
|
||||
break;
|
||||
}
|
||||
|
||||
write_interval.tick().await;
|
||||
let peers_copy = peers.lock().unwrap().clone();
|
||||
for (addr, peer) in peers_copy.iter() {
|
||||
let pl = Vec::new();
|
||||
let result = peer.clone().sender.send(Message::Ping(pl)).await;
|
||||
if result.is_err() {
|
||||
error!("ws ping could not reach {}", addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// handle sigint
|
||||
{
|
||||
let exit = exit.clone();
|
||||
tokio::spawn(async move {
|
||||
tokio::signal::ctrl_c().await.unwrap();
|
||||
info!("Received SIGINT, shutting down...");
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
});
|
||||
}
|
||||
|
||||
info!(
|
||||
"rpc connect: {}",
|
||||
config
|
||||
.source
|
||||
.grpc_sources
|
||||
.iter()
|
||||
.map(|c| c.connection_string.clone())
|
||||
.collect::<String>()
|
||||
);
|
||||
|
||||
let relevant_pubkeys = [market_configs.clone(), serum_market_configs.clone()]
|
||||
.concat()
|
||||
.iter()
|
||||
.flat_map(|m| [m.1.bids, m.1.asks])
|
||||
.collect_vec();
|
||||
let filter_config = FilterConfig {
|
||||
entity_filter: FilterByAccountIds(
|
||||
[
|
||||
relevant_pubkeys,
|
||||
market_configs
|
||||
.iter()
|
||||
.map(|(_, mkt)| mkt.oracle)
|
||||
.collect_vec(),
|
||||
]
|
||||
.concat()
|
||||
.to_vec(),
|
||||
),
|
||||
};
|
||||
let use_geyser = true;
|
||||
if use_geyser {
|
||||
grpc_plugin_source::process_events(
|
||||
&config.source,
|
||||
&filter_config,
|
||||
account_write_queue_sender,
|
||||
slot_queue_sender,
|
||||
metrics_tx.clone(),
|
||||
exit.clone(),
|
||||
)
|
||||
.await;
|
||||
} else {
|
||||
websocket_source::process_events(
|
||||
&config.source,
|
||||
&filter_config,
|
||||
account_write_queue_sender,
|
||||
slot_queue_sender,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -1,531 +0,0 @@
|
|||
use anchor_lang::AccountDeserialize;
|
||||
use fixed::types::I80F48;
|
||||
use itertools::Itertools;
|
||||
use log::*;
|
||||
use mango_feeds_lib::metrics::MetricU64;
|
||||
use mango_feeds_lib::{
|
||||
base_lots_to_ui, base_lots_to_ui_perp, price_lots_to_ui, price_lots_to_ui_perp, MarketConfig,
|
||||
OrderbookSide,
|
||||
};
|
||||
use mango_feeds_lib::{
|
||||
chain_data::{AccountData, ChainData, ChainDataMetrics, SlotData},
|
||||
metrics::{MetricType, Metrics},
|
||||
AccountWrite, SlotUpdate,
|
||||
};
|
||||
use mango_v4::accounts_zerocopy::{AccountReader, KeyedAccountReader};
|
||||
use mango_v4::state::OracleConfigParams;
|
||||
use mango_v4::{
|
||||
serum3_cpi::OrderBookStateHeader,
|
||||
state::{self, BookSide, OrderTreeType},
|
||||
};
|
||||
use serum_dex::critbit::Slab;
|
||||
use service_mango_orderbook::{
|
||||
BookCheckpoint, BookUpdate, LevelCheckpoint, LevelUpdate, Order, OrderbookFilterMessage,
|
||||
OrderbookLevel,
|
||||
};
|
||||
use solana_sdk::account::AccountSharedData;
|
||||
use solana_sdk::{
|
||||
account::{ReadableAccount, WritableAccount},
|
||||
clock::Epoch,
|
||||
pubkey::Pubkey,
|
||||
};
|
||||
use std::borrow::BorrowMut;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
mem::size_of,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
},
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
|
||||
struct KeyedSharedDataAccountReader {
|
||||
pub key: Pubkey,
|
||||
pub shared: AccountSharedData,
|
||||
}
|
||||
|
||||
impl AccountReader for KeyedSharedDataAccountReader {
|
||||
fn owner(&self) -> &Pubkey {
|
||||
ReadableAccount::owner(&self.shared)
|
||||
}
|
||||
|
||||
fn data(&self) -> &[u8] {
|
||||
ReadableAccount::data(&self.shared)
|
||||
}
|
||||
}
|
||||
|
||||
impl KeyedAccountReader for KeyedSharedDataAccountReader {
|
||||
fn key(&self) -> &Pubkey {
|
||||
&self.key
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[allow(clippy::ptr_arg)]
|
||||
fn publish_changes(
|
||||
slot: u64,
|
||||
write_version: u64,
|
||||
mkt: &(Pubkey, MarketConfig),
|
||||
side: OrderbookSide,
|
||||
current_orders: &Vec<Order>,
|
||||
previous_orders: &Vec<Order>,
|
||||
maybe_other_orders: Option<&Vec<Order>>,
|
||||
orderbook_update_sender: &async_channel::Sender<OrderbookFilterMessage>,
|
||||
metric_book_updates: &mut MetricU64,
|
||||
metric_level_updates: &mut MetricU64,
|
||||
) {
|
||||
let mut level_update: Vec<OrderbookLevel> = vec![];
|
||||
let mut book_additions: Vec<Order> = vec![];
|
||||
let mut book_removals: Vec<Order> = vec![];
|
||||
|
||||
let current_bookside: Vec<OrderbookLevel> = current_orders
|
||||
.iter()
|
||||
.group_by(|order| order.price)
|
||||
.into_iter()
|
||||
.map(|(price, group)| [price, group.map(|o| o.quantity).sum()])
|
||||
.collect();
|
||||
|
||||
let previous_bookside: Vec<OrderbookLevel> = previous_orders
|
||||
.iter()
|
||||
.group_by(|order| order.price)
|
||||
.into_iter()
|
||||
.map(|(price, group)| [price, group.map(|o| o.quantity).sum()])
|
||||
.collect();
|
||||
|
||||
// push diff for levels that are no longer present
|
||||
if current_bookside.len() != previous_bookside.len() {
|
||||
debug!(
|
||||
"L {}",
|
||||
current_bookside.len() as i64 - previous_bookside.len() as i64
|
||||
)
|
||||
}
|
||||
|
||||
for prev_order in previous_orders.iter() {
|
||||
let peer = current_orders.iter().find(|order| prev_order == *order);
|
||||
|
||||
match peer {
|
||||
None => {
|
||||
debug!("R {:?}", prev_order);
|
||||
book_removals.push(prev_order.clone());
|
||||
}
|
||||
_ => continue,
|
||||
}
|
||||
}
|
||||
|
||||
for previous_level in previous_bookside.iter() {
|
||||
let peer = current_bookside
|
||||
.iter()
|
||||
.find(|level| previous_level[0] == level[0]);
|
||||
|
||||
match peer {
|
||||
None => {
|
||||
debug!("R {} {}", previous_level[0], previous_level[1]);
|
||||
level_update.push([previous_level[0], 0f64]);
|
||||
}
|
||||
_ => continue,
|
||||
}
|
||||
}
|
||||
|
||||
// push diff where there's a new level or size has changed
|
||||
for current_level in ¤t_bookside {
|
||||
let peer = previous_bookside
|
||||
.iter()
|
||||
.find(|item| item[0] == current_level[0]);
|
||||
|
||||
match peer {
|
||||
Some(previous_level) => {
|
||||
if previous_level[1] == current_level[1] {
|
||||
continue;
|
||||
}
|
||||
debug!(
|
||||
"C {} {} -> {}",
|
||||
current_level[0], previous_level[1], current_level[1]
|
||||
);
|
||||
level_update.push(*current_level);
|
||||
}
|
||||
None => {
|
||||
debug!("A {} {}", current_level[0], current_level[1]);
|
||||
level_update.push(*current_level)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for current_order in current_orders {
|
||||
let peer = previous_orders.iter().find(|order| current_order == *order);
|
||||
|
||||
match peer {
|
||||
Some(_) => {
|
||||
continue;
|
||||
}
|
||||
None => {
|
||||
debug!("A {:?}", current_order);
|
||||
book_additions.push(current_order.clone())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match maybe_other_orders {
|
||||
Some(other_orders) => {
|
||||
let (bids, asks) = match side {
|
||||
OrderbookSide::Bid => (current_orders, other_orders),
|
||||
OrderbookSide::Ask => (other_orders, current_orders),
|
||||
};
|
||||
orderbook_update_sender
|
||||
.try_send(OrderbookFilterMessage::BookCheckpoint(BookCheckpoint {
|
||||
slot,
|
||||
write_version,
|
||||
bids: bids.clone(),
|
||||
asks: asks.clone(),
|
||||
market: mkt.0.to_string(),
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let bid_levels = bids
|
||||
.iter()
|
||||
.group_by(|order| order.price)
|
||||
.into_iter()
|
||||
.map(|(price, group)| [price, group.map(|o| o.quantity).sum()])
|
||||
.collect();
|
||||
|
||||
let ask_levels = asks
|
||||
.iter()
|
||||
.group_by(|order| order.price)
|
||||
.into_iter()
|
||||
.map(|(price, group)| [price, group.map(|o| o.quantity).sum()])
|
||||
.collect();
|
||||
|
||||
orderbook_update_sender
|
||||
.try_send(OrderbookFilterMessage::LevelCheckpoint(LevelCheckpoint {
|
||||
slot,
|
||||
write_version,
|
||||
bids: bid_levels,
|
||||
asks: ask_levels,
|
||||
market: mkt.0.to_string(),
|
||||
}))
|
||||
.unwrap()
|
||||
}
|
||||
None => info!("other bookside not in cache"),
|
||||
}
|
||||
|
||||
if !level_update.is_empty() {
|
||||
orderbook_update_sender
|
||||
.try_send(OrderbookFilterMessage::LevelUpdate(LevelUpdate {
|
||||
market: mkt.0.to_string(),
|
||||
side: side.clone(),
|
||||
update: level_update,
|
||||
slot,
|
||||
write_version,
|
||||
}))
|
||||
.unwrap(); // TODO: use anyhow to bubble up error
|
||||
metric_level_updates.increment();
|
||||
}
|
||||
|
||||
if !book_additions.is_empty() && !book_removals.is_empty() {
|
||||
orderbook_update_sender
|
||||
.try_send(OrderbookFilterMessage::BookUpdate(BookUpdate {
|
||||
market: mkt.0.to_string(),
|
||||
side,
|
||||
additions: book_additions,
|
||||
removals: book_removals,
|
||||
slot,
|
||||
write_version,
|
||||
}))
|
||||
.unwrap();
|
||||
metric_book_updates.increment();
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn init(
|
||||
market_configs: Vec<(Pubkey, MarketConfig)>,
|
||||
serum_market_configs: Vec<(Pubkey, MarketConfig)>,
|
||||
metrics_sender: Metrics,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> anyhow::Result<(
|
||||
async_channel::Sender<AccountWrite>,
|
||||
async_channel::Sender<SlotUpdate>,
|
||||
async_channel::Receiver<OrderbookFilterMessage>,
|
||||
)> {
|
||||
let mut metric_book_events_new =
|
||||
metrics_sender.register_u64("orderbook_book_updates".into(), MetricType::Counter);
|
||||
let mut metric_level_events_new =
|
||||
metrics_sender.register_u64("orderbook_level_updates".into(), MetricType::Counter);
|
||||
|
||||
// The actual message may want to also contain a retry count, if it self-reinserts on failure?
|
||||
let (account_write_queue_sender, account_write_queue_receiver) =
|
||||
async_channel::unbounded::<AccountWrite>();
|
||||
|
||||
// Slot updates flowing from the outside into the single processing thread. From
|
||||
// there they'll flow into the postgres sending thread.
|
||||
let (slot_queue_sender, slot_queue_receiver) = async_channel::unbounded::<SlotUpdate>();
|
||||
|
||||
// Book updates can be consumed by client connections, they contain L2 and L3 updates for all markets
|
||||
let (book_update_sender, book_update_receiver) =
|
||||
async_channel::unbounded::<OrderbookFilterMessage>();
|
||||
|
||||
let mut chain_cache = ChainData::new();
|
||||
let mut chain_data_metrics = ChainDataMetrics::new(&metrics_sender);
|
||||
let mut bookside_cache: HashMap<String, Vec<Order>> = HashMap::new();
|
||||
let mut serum_bookside_cache: HashMap<String, Vec<Order>> = HashMap::new();
|
||||
let mut last_write_versions = HashMap::<String, (u64, u64)>::new();
|
||||
|
||||
let mut relevant_pubkeys = [market_configs.clone(), serum_market_configs.clone()]
|
||||
.concat()
|
||||
.iter()
|
||||
.flat_map(|m| [m.1.bids, m.1.asks])
|
||||
.collect::<HashSet<Pubkey>>();
|
||||
|
||||
relevant_pubkeys.extend(market_configs.iter().map(|(_, cfg)| cfg.oracle));
|
||||
|
||||
info!("relevant_pubkeys {:?}", relevant_pubkeys);
|
||||
// update handling thread, reads both slots and account updates
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
warn!("shutting down orderbook_filter...");
|
||||
break;
|
||||
}
|
||||
tokio::select! {
|
||||
Ok(account_write) = account_write_queue_receiver.recv() => {
|
||||
if !relevant_pubkeys.contains(&account_write.pubkey) {
|
||||
continue;
|
||||
}
|
||||
chain_cache.update_account(
|
||||
account_write.pubkey,
|
||||
AccountData {
|
||||
slot: account_write.slot,
|
||||
write_version: account_write.write_version,
|
||||
account: WritableAccount::create(
|
||||
account_write.lamports,
|
||||
account_write.data.clone(),
|
||||
account_write.owner,
|
||||
account_write.executable,
|
||||
account_write.rent_epoch as Epoch,
|
||||
),
|
||||
},
|
||||
);
|
||||
}
|
||||
Ok(slot_update) = slot_queue_receiver.recv() => {
|
||||
chain_cache.update_slot(SlotData {
|
||||
slot: slot_update.slot,
|
||||
parent: slot_update.parent,
|
||||
status: slot_update.status,
|
||||
chain: 0,
|
||||
});
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
chain_data_metrics.report(&chain_cache);
|
||||
|
||||
for mkt in market_configs.iter() {
|
||||
for side in 0..2 {
|
||||
let mkt_pk = mkt.0;
|
||||
let side_pk = if side == 0 { mkt.1.bids } else { mkt.1.asks };
|
||||
let other_side_pk = if side == 0 { mkt.1.asks } else { mkt.1.bids };
|
||||
let oracle_pk = mkt.1.oracle;
|
||||
let last_side_write_version = last_write_versions
|
||||
.get(&side_pk.to_string())
|
||||
.unwrap_or(&(0, 0));
|
||||
let last_oracle_write_version = last_write_versions
|
||||
.get(&oracle_pk.to_string())
|
||||
.unwrap_or(&(0, 0));
|
||||
|
||||
match (
|
||||
chain_cache.account(&side_pk),
|
||||
chain_cache.account(&oracle_pk),
|
||||
) {
|
||||
(Ok(side_info), Ok(oracle_info)) => {
|
||||
let side_pk_string = side_pk.to_string();
|
||||
let oracle_pk_string = oracle_pk.to_string();
|
||||
|
||||
if !side_info
|
||||
.is_newer_than(last_side_write_version.0, last_side_write_version.1)
|
||||
&& !oracle_info.is_newer_than(
|
||||
last_oracle_write_version.0,
|
||||
last_oracle_write_version.1,
|
||||
)
|
||||
{
|
||||
// neither bookside nor oracle was updated
|
||||
continue;
|
||||
}
|
||||
last_write_versions.insert(
|
||||
side_pk_string.clone(),
|
||||
(side_info.slot, side_info.write_version),
|
||||
);
|
||||
last_write_versions.insert(
|
||||
oracle_pk_string.clone(),
|
||||
(oracle_info.slot, oracle_info.write_version),
|
||||
);
|
||||
|
||||
let keyed_account = KeyedSharedDataAccountReader {
|
||||
key: oracle_pk,
|
||||
shared: oracle_info.account.clone(),
|
||||
};
|
||||
let oracle_config = OracleConfigParams {
|
||||
conf_filter: 100_000.0, // use a large value to never fail the confidence check
|
||||
max_staleness_slots: None, // don't check oracle staleness to get an orderbook
|
||||
};
|
||||
|
||||
if let Ok((oracle_price, _slot)) = state::oracle_price_and_slot(
|
||||
&keyed_account,
|
||||
&oracle_config.to_oracle_config(),
|
||||
mkt.1.base_decimals,
|
||||
None, // force this to always return a price no matter how stale
|
||||
) {
|
||||
let account = &side_info.account;
|
||||
let bookside: BookSide = BookSide::try_deserialize(
|
||||
solana_sdk::account::ReadableAccount::data(account)
|
||||
.borrow_mut(),
|
||||
)
|
||||
.unwrap();
|
||||
let side = match bookside.nodes.order_tree_type() {
|
||||
OrderTreeType::Bids => OrderbookSide::Bid,
|
||||
OrderTreeType::Asks => OrderbookSide::Ask,
|
||||
};
|
||||
let time_now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs();
|
||||
let oracle_price_lots = (oracle_price
|
||||
* I80F48::from_num(mkt.1.base_lot_size)
|
||||
/ I80F48::from_num(mkt.1.quote_lot_size))
|
||||
.to_num();
|
||||
let bookside: Vec<Order> = bookside
|
||||
.iter_valid(time_now, oracle_price_lots)
|
||||
.map(|item| Order {
|
||||
price: price_lots_to_ui_perp(
|
||||
item.price_lots,
|
||||
mkt.1.base_decimals,
|
||||
mkt.1.quote_decimals,
|
||||
mkt.1.base_lot_size,
|
||||
mkt.1.quote_lot_size,
|
||||
),
|
||||
quantity: base_lots_to_ui_perp(
|
||||
item.node.quantity,
|
||||
mkt.1.base_decimals,
|
||||
mkt.1.base_lot_size,
|
||||
),
|
||||
owner_pubkey: item.node.owner.to_string(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
let other_bookside = bookside_cache.get(&other_side_pk.to_string());
|
||||
|
||||
match bookside_cache.get(&side_pk_string) {
|
||||
Some(old_bookside) => publish_changes(
|
||||
side_info.slot,
|
||||
side_info.write_version,
|
||||
mkt,
|
||||
side,
|
||||
&bookside,
|
||||
old_bookside,
|
||||
other_bookside,
|
||||
&book_update_sender,
|
||||
&mut metric_book_events_new,
|
||||
&mut metric_level_events_new,
|
||||
),
|
||||
_ => info!("bookside_cache could not find {}", side_pk_string),
|
||||
}
|
||||
|
||||
bookside_cache.insert(side_pk_string.clone(), bookside.clone());
|
||||
}
|
||||
}
|
||||
(side, oracle) => debug!(
|
||||
"chain_cache could not find for mkt={} side={} oracle={}",
|
||||
mkt_pk,
|
||||
side.is_err(),
|
||||
oracle.is_err()
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for mkt in serum_market_configs.iter() {
|
||||
for side in 0..2 {
|
||||
let side_pk = if side == 0 { mkt.1.bids } else { mkt.1.asks };
|
||||
let other_side_pk = if side == 0 { mkt.1.asks } else { mkt.1.bids };
|
||||
let last_write_version = last_write_versions
|
||||
.get(&side_pk.to_string())
|
||||
.unwrap_or(&(0, 0));
|
||||
|
||||
match chain_cache.account(&side_pk) {
|
||||
Ok(account_info) => {
|
||||
let side_pk_string = side_pk.to_string();
|
||||
|
||||
let write_version = (account_info.slot, account_info.write_version);
|
||||
// todo: should this be <= so we don't overwrite with old data received late?
|
||||
if write_version <= *last_write_version {
|
||||
continue;
|
||||
}
|
||||
last_write_versions.insert(side_pk_string.clone(), write_version);
|
||||
debug!("W {}", mkt.1.name);
|
||||
let account = &mut account_info.account.clone();
|
||||
let data = account.data_as_mut_slice();
|
||||
let len = data.len();
|
||||
let inner = &mut data[5..len - 7];
|
||||
let slab = Slab::new(&mut inner[size_of::<OrderBookStateHeader>()..]);
|
||||
|
||||
let bookside: Vec<Order> = slab
|
||||
.iter(side == 0)
|
||||
.map(|item| {
|
||||
let owner_bytes: [u8; 32] = bytemuck::cast(item.owner());
|
||||
Order {
|
||||
price: price_lots_to_ui(
|
||||
u64::from(item.price()) as i64,
|
||||
mkt.1.base_decimals,
|
||||
mkt.1.quote_decimals,
|
||||
mkt.1.base_lot_size,
|
||||
mkt.1.quote_lot_size,
|
||||
),
|
||||
quantity: base_lots_to_ui(
|
||||
item.quantity() as i64,
|
||||
mkt.1.base_decimals,
|
||||
mkt.1.quote_decimals,
|
||||
mkt.1.base_lot_size,
|
||||
mkt.1.quote_lot_size,
|
||||
),
|
||||
owner_pubkey: Pubkey::new_from_array(owner_bytes)
|
||||
.to_string(),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
let other_bookside =
|
||||
serum_bookside_cache.get(&other_side_pk.to_string());
|
||||
|
||||
match serum_bookside_cache.get(&side_pk_string) {
|
||||
Some(old_bookside) => publish_changes(
|
||||
account_info.slot,
|
||||
account_info.write_version,
|
||||
mkt,
|
||||
if side == 0 {
|
||||
OrderbookSide::Bid
|
||||
} else {
|
||||
OrderbookSide::Ask
|
||||
},
|
||||
&bookside,
|
||||
old_bookside,
|
||||
other_bookside,
|
||||
&book_update_sender,
|
||||
&mut metric_book_events_new,
|
||||
&mut metric_level_events_new,
|
||||
),
|
||||
_ => info!("bookside_cache could not find {}", side_pk_string),
|
||||
}
|
||||
|
||||
serum_bookside_cache.insert(side_pk_string.clone(), bookside);
|
||||
}
|
||||
Err(_) => debug!("chain_cache could not find {}", side_pk),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok((
|
||||
account_write_queue_sender,
|
||||
slot_queue_sender,
|
||||
book_update_receiver,
|
||||
))
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
[package]
|
||||
name = "service-mango-pnl"
|
||||
version = "0.1.0"
|
||||
authors = ["Christian Kamm <mail@ckamm.de>"]
|
||||
edition = "2021"
|
||||
license = "AGPL-3.0-or-later"
|
||||
|
||||
[dependencies]
|
||||
mango-feeds-lib = { path = "../lib" }
|
||||
solana-logger = "~1.14.9"
|
||||
solana-sdk = "~1.14.9"
|
||||
log = "0.4"
|
||||
anyhow = "1.0"
|
||||
toml = "0.5"
|
||||
bytemuck = "1.7.2"
|
||||
jsonrpsee = { version = "0.9.0", features = ["http-server"] }
|
||||
|
||||
async-trait = "0.1"
|
||||
fixed = { version = "1.9.0", features = ["serde"] }
|
||||
bs58 = "0.3.1"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.130"
|
||||
mango-v4 = { git = "https://github.com/blockworks-foundation/mango-v4", branch = "dev" }
|
||||
mango-v4-client = { git = "https://github.com/blockworks-foundation/mango-v4", branch = "dev" }
|
||||
anchor-lang = "0.25.0"
|
||||
anchor-client = "0.25.0"
|
|
@ -1,31 +0,0 @@
|
|||
[metrics]
|
||||
output_stdout = true
|
||||
output_http = true
|
||||
|
||||
[source]
|
||||
dedup_queue_size = 50000
|
||||
rpc_ws_url = ""
|
||||
|
||||
[[source.grpc_sources]]
|
||||
name = "server"
|
||||
connection_string = "http://[::1]:10000"
|
||||
retry_connection_sleep_secs = 30
|
||||
|
||||
#[source.grpc_sources.tls]
|
||||
#ca_cert_path = "ca.pem"
|
||||
#client_cert_path = "client.pem"
|
||||
#client_key_path = "client.pem"
|
||||
#domain_name = "example.com"
|
||||
|
||||
[source.snapshot]
|
||||
rpc_http_url = ""
|
||||
program_id = "mv3ekLzLbnVPNxjSKvqBpU3ZeZXPQdEC3bp5MDEBG68"
|
||||
|
||||
[pnl]
|
||||
update_interval_millis = 5000
|
||||
mango_program = "mv3ekLzLbnVPNxjSKvqBpU3ZeZXPQdEC3bp5MDEBG68"
|
||||
mango_group = "98pjRuQjK3qA6gXts96PqZT4Ze5QmnCmt3QYjhbUSPue"
|
||||
mango_cache = "EBDRoayCDDUvDgCimta45ajQeXbexv7aKqJubruqpyvu"
|
||||
|
||||
[jsonrpc_server]
|
||||
bind_address = "127.0.0.1:8889"
|
|
@ -1,31 +0,0 @@
|
|||
[metrics]
|
||||
output_stdout = true
|
||||
output_http = true
|
||||
|
||||
[source]
|
||||
dedup_queue_size = 50000
|
||||
rpc_ws_url = ""
|
||||
|
||||
[[source.grpc_sources]]
|
||||
name = "accountsdb-client"
|
||||
connection_string = "$GEYSER_CONNECTION_STRING"
|
||||
retry_connection_sleep_secs = 30
|
||||
|
||||
[source.grpc_sources.tls]
|
||||
ca_cert_path = "$GEYSER_CA_CERT"
|
||||
client_cert_path = "$GEYSER_CLIENT_CERT"
|
||||
client_key_path = "$GEYSER_CLIENT_CERT"
|
||||
domain_name = "$GEYSER_CERT_DOMAIN"
|
||||
|
||||
[source.snapshot]
|
||||
rpc_http_url = "$RPC_HTTP_URL"
|
||||
program_id = "mv3ekLzLbnVPNxjSKvqBpU3ZeZXPQdEC3bp5MDEBG68"
|
||||
|
||||
[pnl]
|
||||
update_interval_millis = 5000
|
||||
mango_program = "mv3ekLzLbnVPNxjSKvqBpU3ZeZXPQdEC3bp5MDEBG68"
|
||||
mango_group = "98pjRuQjK3qA6gXts96PqZT4Ze5QmnCmt3QYjhbUSPue"
|
||||
mango_cache = "EBDRoayCDDUvDgCimta45ajQeXbexv7aKqJubruqpyvu"
|
||||
|
||||
[jsonrpc_server]
|
||||
bind_address = "0.0.0.0:2052"
|
|
@ -1,320 +0,0 @@
|
|||
use {
|
||||
log::*,
|
||||
mango_feeds_lib::chain_data::ChainData,
|
||||
mango_feeds_lib::*,
|
||||
serde_derive::{Deserialize, Serialize},
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
std::str::FromStr,
|
||||
std::{
|
||||
fs::File,
|
||||
io::Read,
|
||||
mem::size_of,
|
||||
sync::{atomic::AtomicBool, Arc, RwLock},
|
||||
time::Duration,
|
||||
},
|
||||
};
|
||||
|
||||
use anchor_client::Cluster;
|
||||
use anchor_lang::Discriminator;
|
||||
use fixed::types::I80F48;
|
||||
use mango_feeds_lib::metrics::*;
|
||||
use mango_v4::state::{MangoAccount, MangoAccountValue, PerpMarketIndex};
|
||||
use mango_v4_client::{
|
||||
chain_data, health_cache, AccountFetcher, Client, MangoGroupContext, TransactionBuilderConfig,
|
||||
};
|
||||
use solana_sdk::commitment_config::CommitmentConfig;
|
||||
use solana_sdk::{account::ReadableAccount, signature::Keypair};
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
pub struct PnlConfig {
|
||||
pub update_interval_millis: u64,
|
||||
pub mango_program: String,
|
||||
pub mango_group: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
pub struct JsonRpcConfig {
|
||||
pub bind_address: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
pub struct Config {
|
||||
pub source: SourceConfig,
|
||||
pub snapshot_source: SnapshotSourceConfig,
|
||||
pub metrics: MetricsConfig,
|
||||
pub pnl: PnlConfig,
|
||||
pub jsonrpc_server: JsonRpcConfig,
|
||||
}
|
||||
|
||||
type PnlData = Vec<(Pubkey, Vec<(PerpMarketIndex, I80F48)>)>;
|
||||
|
||||
async fn compute_pnl(
|
||||
context: Arc<MangoGroupContext>,
|
||||
account_fetcher: Arc<impl AccountFetcher>,
|
||||
account: &MangoAccountValue,
|
||||
) -> anyhow::Result<Vec<(PerpMarketIndex, I80F48)>> {
|
||||
let health_cache = health_cache::new(&context, account_fetcher.as_ref(), account).await?;
|
||||
let perp_settle_health = health_cache.perp_settle_health();
|
||||
|
||||
let pnls = account
|
||||
.active_perp_positions()
|
||||
.filter_map(|pp| {
|
||||
if pp.base_position_lots() != 0 {
|
||||
return None;
|
||||
}
|
||||
let pnl = pp.quote_position_native();
|
||||
let settleable_pnl = if pnl > 0 {
|
||||
pnl
|
||||
} else if pnl < 0 && perp_settle_health > 0 {
|
||||
pnl.max(-perp_settle_health)
|
||||
} else {
|
||||
return None;
|
||||
};
|
||||
Some((pp.market_index, I80F48::from_bits(settleable_pnl.to_bits())))
|
||||
})
|
||||
.collect::<Vec<(PerpMarketIndex, I80F48)>>();
|
||||
|
||||
Ok(pnls)
|
||||
}
|
||||
|
||||
// regularly updates pnl_data from chain_data
|
||||
fn start_pnl_updater(
|
||||
config: PnlConfig,
|
||||
context: Arc<MangoGroupContext>,
|
||||
account_fetcher: Arc<impl AccountFetcher + 'static>,
|
||||
chain_data: Arc<RwLock<ChainData>>,
|
||||
pnl_data: Arc<RwLock<PnlData>>,
|
||||
metrics_pnls_tracked: MetricU64,
|
||||
) {
|
||||
let program_pk = Pubkey::from_str(&config.mango_program).unwrap();
|
||||
let group_pk = Pubkey::from_str(&config.mango_group).unwrap();
|
||||
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
tokio::time::sleep(std::time::Duration::from_millis(
|
||||
config.update_interval_millis,
|
||||
))
|
||||
.await;
|
||||
|
||||
let snapshot = chain_data.read().unwrap().accounts_snapshot();
|
||||
|
||||
// get the group and cache now
|
||||
let group = snapshot.get(&group_pk);
|
||||
if group.is_none() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut pnls = Vec::with_capacity(snapshot.len());
|
||||
for (pubkey, account) in snapshot.iter() {
|
||||
let owner = account.account.owner();
|
||||
let data = account.account.data();
|
||||
|
||||
if data.len() != size_of::<MangoAccount>()
|
||||
|| data[0..8] != MangoAccount::discriminator()
|
||||
|| owner != &program_pk
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
let mango_account = MangoAccountValue::from_bytes(&data[8..]).unwrap();
|
||||
if mango_account.fixed.group != group_pk {
|
||||
continue;
|
||||
}
|
||||
|
||||
let pnl_vals =
|
||||
compute_pnl(context.clone(), account_fetcher.clone(), &mango_account)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Alternatively, we could prepare the sorted and limited lists for each
|
||||
// market here. That would be faster and cause less contention on the pnl_data
|
||||
// lock, but it looks like it's very far from being an issue.
|
||||
pnls.push((*pubkey, pnl_vals));
|
||||
}
|
||||
|
||||
*pnl_data.write().unwrap() = pnls;
|
||||
metrics_pnls_tracked
|
||||
.clone()
|
||||
.set(pnl_data.read().unwrap().len() as u64)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
struct UnsettledPnlRankedRequest {
|
||||
market_index: u8,
|
||||
limit: u8,
|
||||
order: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct PnlResponseItem {
|
||||
pnl: f64,
|
||||
pubkey: String,
|
||||
}
|
||||
|
||||
use jsonrpsee::http_server::HttpServerHandle;
|
||||
|
||||
fn start_jsonrpc_server(
|
||||
config: JsonRpcConfig,
|
||||
pnl_data: Arc<RwLock<PnlData>>,
|
||||
metrics_reqs: MetricU64,
|
||||
metrics_invalid_reqs: MetricU64,
|
||||
) -> anyhow::Result<HttpServerHandle> {
|
||||
use jsonrpsee::core::Error;
|
||||
use jsonrpsee::http_server::{HttpServerBuilder, RpcModule};
|
||||
use jsonrpsee::types::error::CallError;
|
||||
use std::net::SocketAddr;
|
||||
|
||||
let server = HttpServerBuilder::default().build(config.bind_address.parse::<SocketAddr>()?)?;
|
||||
let mut module = RpcModule::new(());
|
||||
module.register_method("unsettledPnlRanked", move |params, _| {
|
||||
let req = params.parse::<UnsettledPnlRankedRequest>()?;
|
||||
metrics_reqs.clone().increment();
|
||||
let invalid =
|
||||
|s: &'static str| Err(Error::Call(CallError::InvalidParams(anyhow::anyhow!(s))));
|
||||
let limit = req.limit as usize;
|
||||
if limit > 20 {
|
||||
metrics_invalid_reqs.clone().increment();
|
||||
return invalid("'limit' must be <= 20");
|
||||
}
|
||||
let market_index = req.market_index as u16;
|
||||
// if market_index >= MAX_PAIRS {
|
||||
// metrics_invalid_reqs.clone().increment();
|
||||
// return invalid("'market_index' must be < MAX_PAIRS");
|
||||
// }
|
||||
if req.order != "ASC" && req.order != "DESC" {
|
||||
metrics_invalid_reqs.clone().increment();
|
||||
return invalid("'order' must be ASC or DESC");
|
||||
}
|
||||
|
||||
// write lock, because we sort in-place...
|
||||
let mut pnls = pnl_data.write().unwrap();
|
||||
if req.order == "ASC" {
|
||||
pnls.sort_unstable_by(|a, b| {
|
||||
a.1.iter()
|
||||
.find(|x| x.0 == market_index)
|
||||
.cmp(&b.1.iter().find(|x| x.0 == market_index))
|
||||
});
|
||||
} else {
|
||||
pnls.sort_unstable_by(|a, b| {
|
||||
b.1.iter()
|
||||
.find(|x| x.0 == market_index)
|
||||
.cmp(&a.1.iter().find(|x| x.0 == market_index))
|
||||
});
|
||||
}
|
||||
let response = pnls
|
||||
.iter()
|
||||
.take(limit)
|
||||
.map(|p| PnlResponseItem {
|
||||
pnl: p
|
||||
.1
|
||||
.iter()
|
||||
.find(|x| x.0 == market_index)
|
||||
.unwrap()
|
||||
.1
|
||||
.to_num::<f64>(),
|
||||
pubkey: p.0.to_string(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(response)
|
||||
})?;
|
||||
|
||||
Ok(server.start(module)?)
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let exit: Arc<AtomicBool> = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let args: Vec<String> = std::env::args().collect();
|
||||
if args.len() < 2 {
|
||||
println!("requires a config file argument");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let config: Config = {
|
||||
let mut file = File::open(&args[1])?;
|
||||
let mut contents = String::new();
|
||||
file.read_to_string(&mut contents)?;
|
||||
toml::from_str(&contents).unwrap()
|
||||
};
|
||||
|
||||
solana_logger::setup_with_default("info");
|
||||
info!("startup");
|
||||
|
||||
let rpc_url = config.snapshot_source.rpc_http_url;
|
||||
let ws_url = rpc_url.replace("https", "wss");
|
||||
let rpc_timeout = Duration::from_secs(10);
|
||||
let cluster = Cluster::Custom(rpc_url.clone(), ws_url.clone());
|
||||
let commitment = CommitmentConfig::processed();
|
||||
let client = Client::new(
|
||||
cluster.clone(),
|
||||
commitment,
|
||||
Arc::new(Keypair::new()),
|
||||
Some(rpc_timeout),
|
||||
TransactionBuilderConfig {
|
||||
prioritization_micro_lamports: None,
|
||||
},
|
||||
);
|
||||
let group_context = Arc::new(
|
||||
MangoGroupContext::new_from_rpc(
|
||||
&client.rpc_async(),
|
||||
Pubkey::from_str(&config.pnl.mango_group).unwrap(),
|
||||
)
|
||||
.await?,
|
||||
);
|
||||
let chain_data = Arc::new(RwLock::new(chain_data::ChainData::new()));
|
||||
let account_fetcher = Arc::new(chain_data::AccountFetcher {
|
||||
chain_data: chain_data.clone(),
|
||||
rpc: client.rpc_async(),
|
||||
});
|
||||
|
||||
let metrics_tx = metrics::start(config.metrics, "pnl".into());
|
||||
|
||||
let metrics_reqs =
|
||||
metrics_tx.register_u64("pnl_jsonrpc_reqs_total".into(), MetricType::Counter);
|
||||
let metrics_invalid_reqs =
|
||||
metrics_tx.register_u64("pnl_jsonrpc_reqs_invalid_total".into(), MetricType::Counter);
|
||||
let metrics_pnls_tracked = metrics_tx.register_u64("pnl_num_tracked".into(), MetricType::Gauge);
|
||||
|
||||
// BUG: This shadows the previous chain_data and means this can't actually get data!
|
||||
let chain_data = Arc::new(RwLock::new(ChainData::new()));
|
||||
let pnl_data = Arc::new(RwLock::new(PnlData::new()));
|
||||
|
||||
start_pnl_updater(
|
||||
config.pnl.clone(),
|
||||
group_context.clone(),
|
||||
account_fetcher.clone(),
|
||||
chain_data.clone(),
|
||||
pnl_data.clone(),
|
||||
metrics_pnls_tracked,
|
||||
);
|
||||
|
||||
// dropping the handle would exit the server
|
||||
let _http_server_handle = start_jsonrpc_server(
|
||||
config.jsonrpc_server.clone(),
|
||||
pnl_data,
|
||||
metrics_reqs,
|
||||
metrics_invalid_reqs,
|
||||
)?;
|
||||
|
||||
// start filling chain_data from the grpc plugin source
|
||||
let (account_write_queue_sender, slot_queue_sender) = memory_target::init(chain_data).await?;
|
||||
let filter_config = FilterConfig {
|
||||
entity_filter: EntityFilter::filter_by_program_id(
|
||||
"4MangoMjqJ2firMokCjjGgoK8d4MXcrgL7XJaL3w6fVg",
|
||||
),
|
||||
};
|
||||
grpc_plugin_source::process_events(
|
||||
&config.source,
|
||||
&filter_config,
|
||||
account_write_queue_sender,
|
||||
slot_queue_sender,
|
||||
metrics_tx.clone(),
|
||||
exit.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
Loading…
Reference in New Issue