Merge remote-tracking branch 'origin/main' into feature/util-histogram

This commit is contained in:
GroovieGermanikus 2024-03-27 12:15:15 +01:00
commit 43365a3342
No known key found for this signature in database
GPG Key ID: 5B6EB831A5CD2015
80 changed files with 4038 additions and 1196 deletions

View File

@ -0,0 +1,23 @@
name: Deploy benchrunner to Fly
on:
push:
tags:
- 'production/benchrunner-*'
- 'experimental/benchrunner-*'
env:
FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }}
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup Fly
uses: superfly/flyctl-actions/setup-flyctl@master
- name: Deploy solana-lite-rpc-benchrunner
run: flyctl deploy -c cd/solana-lite-rpc-benchrunner.toml --remote-only

327
Cargo.lock generated
View File

@ -89,9 +89,9 @@ dependencies = [
[[package]]
name = "aho-corasick"
version = "1.1.2"
version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0"
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
dependencies = [
"memchr",
]
@ -433,18 +433,18 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
name = "async-trait"
version = "0.1.77"
version = "0.1.79"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9"
checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -520,9 +520,9 @@ dependencies = [
[[package]]
name = "backtrace"
version = "0.3.69"
version = "0.3.71"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837"
checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d"
dependencies = [
"addr2line",
"cc",
@ -573,14 +573,19 @@ dependencies = [
"anyhow",
"bincode",
"clap 4.5.3",
"csv",
"dashmap 5.5.3",
"dirs",
"futures",
"itertools 0.10.5",
"lazy_static",
"log",
"rand 0.8.5",
"rand_chacha 0.3.1",
"reqwest",
"serde",
"serde_json",
"solana-lite-rpc-util",
"solana-rpc-client",
"solana-rpc-client-api",
"solana-sdk",
@ -589,6 +594,7 @@ dependencies = [
"tokio",
"tracing",
"tracing-subscriber",
"url",
]
[[package]]
@ -608,9 +614,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitflags"
version = "2.4.2"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf"
checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1"
dependencies = [
"serde",
]
@ -755,9 +761,9 @@ dependencies = [
[[package]]
name = "brotli"
version = "3.4.0"
version = "3.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "516074a47ef4bce09577a3b379392300159ce5b1ba2e501ff1c819950066100f"
checksum = "d640d25bc63c50fb1f0b545ffd80207d2e10a4c965530809b40ba3386825c391"
dependencies = [
"alloc-no-stdlib",
"alloc-stdlib",
@ -813,7 +819,7 @@ checksum = "4da9a32f3fed317401fa3c862968128267c3106685286e15d5aaa3d7389c2f60"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -824,9 +830,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "bytes"
version = "1.5.0"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223"
checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9"
[[package]]
name = "cap"
@ -946,7 +952,7 @@ dependencies = [
"heck 0.5.0",
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -1166,6 +1172,27 @@ dependencies = [
"subtle",
]
[[package]]
name = "csv"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe"
dependencies = [
"csv-core",
"itoa",
"ryu",
"serde",
]
[[package]]
name = "csv-core"
version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70"
dependencies = [
"memchr",
]
[[package]]
name = "ctr"
version = "0.8.0"
@ -1218,7 +1245,7 @@ dependencies = [
"proc-macro2",
"quote",
"strsim 0.10.0",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -1229,7 +1256,7 @@ checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f"
dependencies = [
"darling_core",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -1355,6 +1382,27 @@ dependencies = [
"subtle",
]
[[package]]
name = "dirs"
version = "5.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225"
dependencies = [
"dirs-sys",
]
[[package]]
name = "dirs-sys"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c"
dependencies = [
"libc",
"option-ext",
"redox_users",
"windows-sys 0.48.0",
]
[[package]]
name = "displaydoc"
version = "0.2.4"
@ -1363,7 +1411,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -1386,7 +1434,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -1474,7 +1522,7 @@ checksum = "03cdc46ec28bd728e67540c528013c6a10eb69a02eb31078a1bda695438cbfb8"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -1520,9 +1568,9 @@ checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7"
[[package]]
name = "fastrand"
version = "2.0.1"
version = "2.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5"
checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984"
[[package]]
name = "feature-probe"
@ -1638,7 +1686,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -1731,7 +1779,7 @@ dependencies = [
[[package]]
name = "geyser-grpc-connector"
version = "0.10.1+yellowstone.1.12"
source = "git+https://github.com/blockworks-foundation/geyser-grpc-connector.git?tag=v0.10.3+yellowstone.1.12+solana.1.17.15#7e2fd7f97d69335fcb14fafc67287759af268733"
source = "git+https://github.com/blockworks-foundation/geyser-grpc-connector.git?tag=v0.10.3+yellowstone.1.12+solana.1.17.15-hacked-windowsize3#ae56e0f5f894933bea046e8f220f74df3eab5355"
dependencies = [
"anyhow",
"async-stream",
@ -1744,6 +1792,7 @@ dependencies = [
"merge-streams",
"solana-sdk",
"tokio",
"tonic-health",
"tracing",
"url",
"yellowstone-grpc-client",
@ -1825,7 +1874,7 @@ dependencies = [
"futures-sink",
"futures-util",
"http",
"indexmap 2.2.5",
"indexmap 2.2.6",
"slab",
"tokio",
"tokio-util",
@ -2041,6 +2090,19 @@ dependencies = [
"tokio-io-timeout",
]
[[package]]
name = "hyper-tls"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905"
dependencies = [
"bytes",
"hyper",
"native-tls",
"tokio",
"tokio-native-tls",
]
[[package]]
name = "iana-time-zone"
version = "0.1.60"
@ -2108,9 +2170,9 @@ dependencies = [
[[package]]
name = "indexmap"
version = "2.2.5"
version = "2.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4"
checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26"
dependencies = [
"equivalent",
"hashbrown 0.14.3",
@ -2178,6 +2240,26 @@ version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c"
[[package]]
name = "jemalloc-sys"
version = "0.5.4+5.3.0-patched"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac6c1946e1cea1788cbfde01c993b52a10e2da07f4bac608228d1bed20bfebf2"
dependencies = [
"cc",
"libc",
]
[[package]]
name = "jemallocator"
version = "0.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a0de374a9f8e63150e6f5e8a60cc14c668226d7a347d8aee1a45766e3c4dd3bc"
dependencies = [
"jemalloc-sys",
"libc",
]
[[package]]
name = "jobserver"
version = "0.1.28"
@ -2393,6 +2475,17 @@ version = "0.2.153"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
[[package]]
name = "libredox"
version = "0.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8"
dependencies = [
"bitflags 2.5.0",
"libc",
"redox_syscall",
]
[[package]]
name = "libsecp256k1"
version = "0.6.0"
@ -2481,6 +2574,7 @@ dependencies = [
"futures-util",
"hyper",
"itertools 0.10.5",
"jemallocator",
"jsonrpsee",
"lazy_static",
"log",
@ -2780,7 +2874,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -2870,7 +2964,7 @@ dependencies = [
"proc-macro-crate 1.3.1",
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -2882,7 +2976,7 @@ dependencies = [
"proc-macro-crate 3.1.0",
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -2927,7 +3021,7 @@ version = "0.10.64"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f"
dependencies = [
"bitflags 2.4.2",
"bitflags 2.5.0",
"cfg-if",
"foreign-types",
"libc",
@ -2944,7 +3038,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -2965,6 +3059,12 @@ dependencies = [
"vcpkg",
]
[[package]]
name = "option-ext"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d"
[[package]]
name = "os_str_bytes"
version = "6.6.1"
@ -3055,7 +3155,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9"
dependencies = [
"fixedbitset",
"indexmap 2.2.5",
"indexmap 2.2.6",
]
[[package]]
@ -3093,7 +3193,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -3149,6 +3249,18 @@ version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0"
[[package]]
name = "postgres-derive"
version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "83145eba741b050ef981a9a1838c843fa7665e154383325aa8b440ae703180a2"
dependencies = [
"heck 0.4.1",
"proc-macro2",
"quote",
"syn 2.0.55",
]
[[package]]
name = "postgres-native-tls"
version = "0.5.0"
@ -3189,7 +3301,10 @@ dependencies = [
"bytes",
"chrono",
"fallible-iterator",
"postgres-derive",
"postgres-protocol",
"serde",
"serde_json",
]
[[package]]
@ -3206,12 +3321,12 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
[[package]]
name = "prettyplease"
version = "0.2.16"
version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5"
checksum = "8d3928fb5db768cb86f891ff014f0144589297e3c6a1aba6ed7cecfdace270c7"
dependencies = [
"proc-macro2",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -3293,7 +3408,7 @@ dependencies = [
"prost",
"prost-types",
"regex",
"syn 2.0.52",
"syn 2.0.55",
"tempfile",
"which",
]
@ -3308,7 +3423,7 @@ dependencies = [
"itertools 0.11.0",
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -3352,7 +3467,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -3500,9 +3615,9 @@ checksum = "e6e97ca3dbabd81e6033cfe09f0cef37c89f34f2a9223cab7cf99305d46a9633"
[[package]]
name = "rayon"
version = "1.9.0"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e4963ed1bc86e4f3ee217022bd855b297cef07fb9eac5dfa1f788b220b49b3bd"
checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa"
dependencies = [
"either",
"rayon-core",
@ -3552,10 +3667,21 @@ dependencies = [
]
[[package]]
name = "regex"
version = "1.10.3"
name = "redox_users"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15"
checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4"
dependencies = [
"getrandom 0.2.12",
"libredox",
"thiserror",
]
[[package]]
name = "regex"
version = "1.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c"
dependencies = [
"aho-corasick",
"memchr",
@ -3597,9 +3723,9 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f"
[[package]]
name = "reqwest"
version = "0.11.26"
version = "0.11.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78bf93c4af7a8bb7d879d51cebe797356ff10ae8516ace542b5182d9dcac10b2"
checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62"
dependencies = [
"async-compression",
"base64 0.21.7",
@ -3612,10 +3738,12 @@ dependencies = [
"http-body",
"hyper",
"hyper-rustls",
"hyper-tls",
"ipnet",
"js-sys",
"log",
"mime",
"native-tls",
"once_cell",
"percent-encoding",
"pin-project-lite",
@ -3627,6 +3755,7 @@ dependencies = [
"sync_wrapper",
"system-configuration",
"tokio",
"tokio-native-tls",
"tokio-rustls",
"tokio-util",
"tower-service",
@ -3727,11 +3856,11 @@ dependencies = [
[[package]]
name = "rustix"
version = "0.38.31"
version = "0.38.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949"
checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89"
dependencies = [
"bitflags 2.4.2",
"bitflags 2.5.0",
"errno",
"libc",
"linux-raw-sys",
@ -3825,7 +3954,7 @@ checksum = "1db149f81d46d2deba7cd3c50772474707729550221e69588478ebf9ada425ae"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -3899,7 +4028,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -3944,7 +4073,7 @@ dependencies = [
"darling",
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -4074,9 +4203,9 @@ dependencies = [
[[package]]
name = "smallvec"
version = "1.13.1"
version = "1.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7"
checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
[[package]]
name = "socket2"
@ -4178,7 +4307,7 @@ dependencies = [
"dashmap 4.0.2",
"futures",
"futures-util",
"indexmap 2.2.5",
"indexmap 2.2.6",
"indicatif",
"log",
"quinn",
@ -4224,7 +4353,7 @@ dependencies = [
"bincode",
"crossbeam-channel",
"futures-util",
"indexmap 2.2.5",
"indexmap 2.2.6",
"log",
"rand 0.8.5",
"rayon",
@ -4275,7 +4404,7 @@ dependencies = [
"proc-macro2",
"quote",
"rustc_version",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -4400,6 +4529,37 @@ dependencies = [
"tokio",
]
[[package]]
name = "solana-lite-rpc-benchrunner-service"
version = "0.2.4"
dependencies = [
"anyhow",
"async-trait",
"bench",
"chrono",
"clap 4.5.3",
"futures",
"futures-util",
"itertools 0.10.5",
"lazy_static",
"log",
"native-tls",
"postgres-native-tls",
"postgres-types",
"prometheus",
"serde",
"serde_json",
"solana-lite-rpc-util",
"solana-rpc-client",
"solana-rpc-client-api",
"solana-sdk",
"solana-transaction-status",
"tokio",
"tokio-postgres",
"tokio-util",
"tracing-subscriber",
]
[[package]]
name = "solana-lite-rpc-blockstore"
version = "0.2.4"
@ -4472,6 +4632,7 @@ dependencies = [
"thiserror",
"tokio",
"tokio-stream",
"tonic-health",
"tracing",
"yellowstone-grpc-client",
"yellowstone-grpc-proto",
@ -4492,7 +4653,9 @@ dependencies = [
"futures",
"itertools 0.10.5",
"log",
"prometheus",
"quinn",
"rand 0.8.5",
"rustls",
"serde",
"serde_json",
@ -4778,7 +4941,7 @@ dependencies = [
"ark-serialize",
"base64 0.21.7",
"bincode",
"bitflags 2.4.2",
"bitflags 2.5.0",
"blake3",
"borsh 0.10.3",
"borsh 0.9.3",
@ -4999,7 +5162,7 @@ dependencies = [
"assert_matches",
"base64 0.21.7",
"bincode",
"bitflags 2.4.2",
"bitflags 2.5.0",
"borsh 0.10.3",
"bs58",
"bytemuck",
@ -5054,7 +5217,7 @@ dependencies = [
"proc-macro2",
"quote",
"rustversion",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -5074,7 +5237,7 @@ dependencies = [
"crossbeam-channel",
"futures-util",
"histogram",
"indexmap 2.2.5",
"indexmap 2.2.6",
"itertools 0.10.5",
"libc",
"log",
@ -5119,7 +5282,7 @@ dependencies = [
"async-trait",
"bincode",
"futures-util",
"indexmap 2.2.5",
"indexmap 2.2.6",
"indicatif",
"log",
"rayon",
@ -5317,7 +5480,7 @@ checksum = "07fd7858fc4ff8fb0e34090e41d7eb06a823e1057945c26d480bfc21d2338a93"
dependencies = [
"quote",
"spl-discriminator-syn",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -5329,7 +5492,7 @@ dependencies = [
"proc-macro2",
"quote",
"sha2 0.10.8",
"syn 2.0.52",
"syn 2.0.55",
"thiserror",
]
@ -5386,7 +5549,7 @@ dependencies = [
"proc-macro2",
"quote",
"sha2 0.10.8",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -5546,9 +5709,9 @@ dependencies = [
[[package]]
name = "syn"
version = "2.0.52"
version = "2.0.55"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07"
checksum = "002a1b3dbf967edfafc32655d0f377ab0bb7b994aa1d32c8cc7e9b8bf3ebb8f0"
dependencies = [
"proc-macro2",
"quote",
@ -5647,7 +5810,7 @@ checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -5762,7 +5925,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -5874,7 +6037,7 @@ version = "0.19.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421"
dependencies = [
"indexmap 2.2.5",
"indexmap 2.2.6",
"toml_datetime",
"winnow",
]
@ -5885,7 +6048,7 @@ version = "0.21.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1"
dependencies = [
"indexmap 2.2.5",
"indexmap 2.2.6",
"toml_datetime",
"winnow",
]
@ -5931,7 +6094,7 @@ dependencies = [
"proc-macro2",
"prost-build",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -5975,7 +6138,7 @@ checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140"
dependencies = [
"async-compression",
"base64 0.21.7",
"bitflags 2.4.2",
"bitflags 2.5.0",
"bytes",
"futures-core",
"futures-util",
@ -6029,7 +6192,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -6212,9 +6375,9 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
[[package]]
name = "uuid"
version = "1.7.0"
version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a"
checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0"
dependencies = [
"getrandom 0.2.12",
]
@ -6297,7 +6460,7 @@ dependencies = [
"once_cell",
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
"wasm-bindgen-shared",
]
@ -6331,7 +6494,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
@ -6655,7 +6818,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]
@ -6675,7 +6838,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.55",
]
[[package]]

View File

@ -12,6 +12,7 @@ members = [
"blockstore",
"prioritization_fees",
"bench",
"benchrunner-service",
"address-lookup-tables",
"accounts",
"accounts-on-demand",
@ -40,7 +41,6 @@ solana-account-decoder = "~1.17.15"
solana-ledger = "~1.17.15"
solana-program = "~1.17.15"
solana-address-lookup-table-program = "~1.17.15"
itertools = "0.10.5"
rangetools = "0.1.4"
serde = { version = "1.0.160", features = ["derive"] }
@ -69,6 +69,7 @@ lazy_static = "1.4.0"
dotenv = "0.15.0"
async-channel = "1.8.0"
merge-streams = "0.1.2"
jemallocator = "0.5"
quinn = "0.10.2"
quinn-proto = "0.10.5"
@ -84,7 +85,9 @@ solana-lite-rpc-prioritization-fees = {path = "prioritization_fees", version="0.
solana-lite-rpc-address-lookup-tables = {path = "address-lookup-tables", version="0.2.4"}
solana-lite-rpc-accounts = {path = "accounts", version = "0.2.4"}
solana-lite-rpc-accounts-on-demand = {path = "accounts-on-demand", version = "0.2.4"}
bench = { path = "bench", version="0.2.4" }
async-trait = "0.1.68"
yellowstone-grpc-client = { version = "1.13.0+solana.1.17.15", git = "https://github.com/rpcpool/yellowstone-grpc.git", tag = "v1.12.0+solana.1.17.15" }
yellowstone-grpc-proto = { version = "1.12.0+solana.1.17.15", git = "https://github.com/rpcpool/yellowstone-grpc.git", tag = "v1.12.0+solana.1.17.15" }
yellowstone-grpc-client = { git = "https://github.com/rpcpool/yellowstone-grpc.git", tag = "v1.12.0+solana.1.17.15" }
yellowstone-grpc-proto = { git = "https://github.com/rpcpool/yellowstone-grpc.git", tag = "v1.12.0+solana.1.17.15" }
tonic-health = "0.10"

View File

@ -14,11 +14,15 @@ FROM base as build
COPY --from=plan /app/recipe.json recipe.json
RUN cargo chef cook --release --recipe-path recipe.json
COPY . .
ENV RUSTFLAGS="--cfg tokio_unstable"
RUN cargo build --release --bin lite-rpc --bin solana-lite-rpc-quic-forward-proxy
FROM debian:bookworm-slim as run
RUN apt-get update && apt-get -y install ca-certificates libc6 libssl3 libssl-dev openssl
COPY --from=build /app/target/release/solana-lite-rpc-quic-forward-proxy /usr/local/bin/
COPY --from=build /app/target/release/lite-rpc /usr/local/bin/
COPY openssl-legacy.cnf /etc/ssl/openssl-legacy.cnf
CMD lite-rpc
ENV OPENSSL_CONF=/etc/ssl/openssl-legacy.cnf
CMD lite-rpc

31
Dockerfile-benchrunner Normal file
View File

@ -0,0 +1,31 @@
# syntax = docker/dockerfile:1.2
FROM rust:1.75.0 as base
RUN cargo install cargo-chef@0.1.62 --locked
RUN rustup component add rustfmt
RUN apt-get update && apt-get install -y clang cmake ssh
WORKDIR /app
FROM base AS plan
COPY . .
WORKDIR /app
RUN cargo chef prepare --recipe-path recipe.json
FROM base as build
COPY --from=plan /app/recipe.json recipe.json
RUN cargo chef cook --release --recipe-path recipe.json
COPY . .
RUN cargo build --release --bin solana-lite-rpc-benchrunner-service
FROM debian:bookworm-slim as run
RUN apt-get update && apt-get -y install ca-certificates libc6 libssl3 libssl-dev openssl
COPY openssl-legacy.cnf /etc/ssl/openssl-legacy.cnf
COPY --from=build /app/target/release/solana-lite-rpc-benchrunner-service /usr/local/bin/
ENV OPENSSL_CONF=/etc/ssl/openssl-legacy.cnf
CMD solana-lite-rpc-benchrunner-service \
--bench-interval 600000 \
--tx-count 100 \
--prio-fees 100000 --prio-fees 1000 --prio-fees 0

View File

@ -1,7 +1,8 @@
use std::sync::Arc;
use std::{collections::HashMap, sync::Arc, time::Duration};
use async_trait::async_trait;
use dashmap::DashSet;
use futures::lock::Mutex;
use itertools::Itertools;
use prometheus::{opts, register_int_gauge, IntGauge};
use solana_client::{
@ -9,7 +10,9 @@ use solana_client::{
rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig},
rpc_filter::RpcFilterType,
};
use solana_lite_rpc_accounts::account_store_interface::AccountStorageInterface;
use solana_lite_rpc_accounts::account_store_interface::{
AccountLoadingError, AccountStorageInterface,
};
use solana_lite_rpc_cluster_endpoints::geyser_grpc_connector::GrpcSourceConfig;
use solana_lite_rpc_core::{
commitment_utils::Commitment,
@ -19,7 +22,7 @@ use solana_lite_rpc_core::{
},
};
use solana_sdk::{clock::Slot, pubkey::Pubkey};
use tokio::sync::{broadcast::Sender, RwLock};
use tokio::sync::{broadcast::Sender, Notify, RwLock};
use crate::subscription_manager::SubscriptionManger;
@ -31,12 +34,15 @@ lazy_static::lazy_static! {
register_int_gauge!(opts!("literpc_number_of_program_filters_on_demand", "Number of program filters on demand")).unwrap();
}
const RETRY_FETCHING_ACCOUNT: usize = 10;
pub struct AccountsOnDemand {
rpc_client: Arc<RpcClient>,
accounts_storage: Arc<dyn AccountStorageInterface>,
accounts_subscribed: Arc<DashSet<Pubkey>>,
program_filters: Arc<RwLock<AccountFilters>>,
subscription_manager: SubscriptionManger,
accounts_in_loading: Arc<Mutex<HashMap<Pubkey, Arc<Notify>>>>,
}
impl AccountsOnDemand {
@ -56,6 +62,7 @@ impl AccountsOnDemand {
accounts_storage,
account_notification_sender,
),
accounts_in_loading: Arc::new(Mutex::new(HashMap::new())),
}
}
@ -102,52 +109,99 @@ impl AccountStorageInterface for AccountsOnDemand {
.await
}
async fn get_account(&self, account_pk: Pubkey, commitment: Commitment) -> Option<AccountData> {
async fn get_account(
&self,
account_pk: Pubkey,
commitment: Commitment,
) -> Result<Option<AccountData>, AccountLoadingError> {
match self
.accounts_storage
.get_account(account_pk, commitment)
.await
.await?
{
Some(account_data) => Some(account_data),
Some(account_data) => Ok(Some(account_data)),
None => {
// account does not exist in account store
// first check if we have already subscribed to the required account
// This is to avoid resetting geyser subscription because of accounts that do not exists.
if !self.accounts_subscribed.contains(&account_pk) {
// get account from rpc and create its subscription
self.accounts_subscribed.insert(account_pk);
self.refresh_subscription().await;
let account_response = self
.rpc_client
.get_account_with_commitment(
&account_pk,
commitment.into_commiment_config(),
let mut lk = self.accounts_in_loading.lock().await;
match lk.get(&account_pk).cloned() {
Some(loading_account) => {
drop(lk);
match tokio::time::timeout(
Duration::from_secs(10),
loading_account.notified(),
)
.await;
if let Ok(response) = account_response {
match response.value {
Some(account) => {
// update account in storage and return the account data
let account_data = AccountData {
pubkey: account_pk,
account: Arc::new(account),
updated_slot: response.context.slot,
};
.await
{
Ok(_) => {
self.accounts_storage
.update_account(account_data.clone(), commitment)
.await;
Some(account_data)
.get_account(account_pk, commitment)
.await
}
// account does not exist
None => None,
Err(_timeout) => Err(AccountLoadingError::OperationTimeOut),
}
}
None => {
// account is not loading
if self.accounts_subscribed.contains(&account_pk) {
// account was already tried to be loaded but does not exists
Ok(None)
} else {
// update account loading map
// create a notify for accounts under loading
lk.insert(account_pk, Arc::new(Notify::new()));
self.accounts_subscribed.insert(account_pk);
drop(lk);
self.refresh_subscription().await;
let mut return_value = None;
for _ in 0..RETRY_FETCHING_ACCOUNT {
let account_response = self
.rpc_client
.get_account_with_commitment(
&account_pk,
commitment.into_commiment_config(),
)
.await;
match account_response {
Ok(response) => {
if let Some(account) = response.value {
// update account in storage and return the account data
let account_data = AccountData {
pubkey: account_pk,
account: Arc::new(account),
updated_slot: response.context.slot,
};
self.accounts_storage
.update_account(account_data.clone(), commitment)
.await;
return_value = Some(account_data);
break;
} else {
// account does not exist
break;
}
}
Err(e) => {
log::error!(
"Error fetching account {} {e:?}",
account_pk.to_string()
);
}
}
}
// update loading lock
{
let mut write_lock = self.accounts_in_loading.lock().await;
let notify = write_lock.remove(&account_pk);
drop(write_lock);
if let Some(notify) = notify {
notify.notify_waiters();
}
}
Ok(return_value)
}
} else {
// issue getting account, will then be updated by geyser
None
}
} else {
// we have already subscribed to the account and it does not exist
None
}
}
}

View File

@ -1,33 +1,22 @@
use std::{
collections::{HashMap, HashSet},
sync::Arc,
time::Duration,
};
use std::{sync::Arc, time::Duration};
use futures::StreamExt;
use itertools::Itertools;
use merge_streams::MergeStreams;
use prometheus::{opts, register_int_gauge, IntGauge};
use solana_lite_rpc_accounts::account_store_interface::AccountStorageInterface;
use solana_lite_rpc_cluster_endpoints::geyser_grpc_connector::GrpcSourceConfig;
use solana_lite_rpc_cluster_endpoints::{
geyser_grpc_connector::GrpcSourceConfig,
grpc::grpc_accounts_streaming::start_account_streaming_tasks,
};
use solana_lite_rpc_core::{
commitment_utils::Commitment,
structures::{
account_data::{AccountData, AccountNotificationMessage, AccountStream},
account_filter::{AccountFilterType, AccountFilters, MemcmpFilterData},
account_data::{AccountNotificationMessage, AccountStream},
account_filter::AccountFilters,
},
AnyhowJoinHandle,
};
use solana_sdk::{account::Account, pubkey::Pubkey};
use tokio::sync::{
broadcast::{self, Sender},
watch, Notify,
};
use yellowstone_grpc_proto::geyser::{
subscribe_request_filter_accounts_filter::Filter,
subscribe_request_filter_accounts_filter_memcmp::Data, subscribe_update::UpdateOneof,
SubscribeRequest, SubscribeRequestFilterAccounts, SubscribeRequestFilterAccountsFilter,
SubscribeRequestFilterAccountsFilterMemcmp,
watch,
};
lazy_static::lazy_static! {
@ -89,212 +78,6 @@ impl SubscriptionManger {
}
}
pub fn start_account_streaming_task(
grpc_config: GrpcSourceConfig,
accounts_filters: AccountFilters,
account_stream_sx: broadcast::Sender<AccountNotificationMessage>,
has_started: Arc<Notify>,
) -> AnyhowJoinHandle {
tokio::spawn(async move {
'main_loop: loop {
let processed_commitment = yellowstone_grpc_proto::geyser::CommitmentLevel::Processed;
let mut subscribe_programs: HashMap<String, SubscribeRequestFilterAccounts> =
HashMap::new();
let mut accounts_to_subscribe = HashSet::new();
for (index, accounts_filter) in accounts_filters.iter().enumerate() {
if !accounts_filter.accounts.is_empty() {
accounts_filter.accounts.iter().for_each(|account| {
accounts_to_subscribe.insert(account.clone());
});
}
if let Some(program_id) = &accounts_filter.program_id {
let filters = if let Some(filters) = &accounts_filter.filters {
filters
.iter()
.map(|filter| match filter {
AccountFilterType::Datasize(size) => {
SubscribeRequestFilterAccountsFilter {
filter: Some(Filter::Datasize(*size)),
}
}
AccountFilterType::Memcmp(memcmp) => {
SubscribeRequestFilterAccountsFilter {
filter: Some(Filter::Memcmp(
SubscribeRequestFilterAccountsFilterMemcmp {
offset: memcmp.offset,
data: Some(match &memcmp.data {
MemcmpFilterData::Bytes(bytes) => {
Data::Bytes(bytes.clone())
}
MemcmpFilterData::Base58(data) => {
Data::Base58(data.clone())
}
MemcmpFilterData::Base64(data) => {
Data::Base64(data.clone())
}
}),
},
)),
}
}
AccountFilterType::TokenAccountState => {
SubscribeRequestFilterAccountsFilter {
filter: Some(Filter::TokenAccountState(false)),
}
}
})
.collect_vec()
} else {
vec![]
};
subscribe_programs.insert(
format!("program_accounts_on_demand_{}", index),
SubscribeRequestFilterAccounts {
account: vec![],
owner: vec![program_id.clone()],
filters,
},
);
}
}
let program_subscribe_request = SubscribeRequest {
accounts: subscribe_programs,
slots: Default::default(),
transactions: Default::default(),
blocks: Default::default(),
blocks_meta: Default::default(),
entry: Default::default(),
commitment: Some(processed_commitment.into()),
accounts_data_slice: Default::default(),
ping: None,
};
log::info!(
"Accounts on demand subscribing to {}",
grpc_config.grpc_addr
);
let Ok(mut client) = yellowstone_grpc_client::GeyserGrpcClient::connect(
grpc_config.grpc_addr.clone(),
grpc_config.grpc_x_token.clone(),
None,
) else {
// problem connecting to grpc, retry after a sec
tokio::time::sleep(Duration::from_secs(1)).await;
continue;
};
let Ok(account_stream) = client.subscribe_once2(program_subscribe_request).await else {
// problem subscribing to geyser stream, retry after a sec
tokio::time::sleep(Duration::from_secs(1)).await;
continue;
};
// each account subscription batch will require individual stream
let mut subscriptions = vec![account_stream];
let mut index = 0;
for accounts_chunk in accounts_to_subscribe.iter().collect_vec().chunks(100) {
let mut accounts_subscription: HashMap<String, SubscribeRequestFilterAccounts> =
HashMap::new();
index += 1;
accounts_subscription.insert(
format!("account_sub_{}", index),
SubscribeRequestFilterAccounts {
account: accounts_chunk
.iter()
.map(|acc| (*acc).clone())
.collect_vec(),
owner: vec![],
filters: vec![],
},
);
let mut client = yellowstone_grpc_client::GeyserGrpcClient::connect(
grpc_config.grpc_addr.clone(),
grpc_config.grpc_x_token.clone(),
None,
)
.unwrap();
let account_request = SubscribeRequest {
accounts: accounts_subscription,
slots: Default::default(),
transactions: Default::default(),
blocks: Default::default(),
blocks_meta: Default::default(),
entry: Default::default(),
commitment: Some(processed_commitment.into()),
accounts_data_slice: Default::default(),
ping: None,
};
let account_stream = client.subscribe_once2(account_request).await.unwrap();
subscriptions.push(account_stream);
}
let mut merged_stream = subscriptions.merge();
while let Some(message) = merged_stream.next().await {
let message = match message {
Ok(message) => message,
Err(status) => {
log::error!("Account on demand grpc error : {}", status.message());
continue;
}
};
let Some(update) = message.update_oneof else {
continue;
};
has_started.notify_one();
match update {
UpdateOneof::Account(account) => {
if let Some(account_data) = account.account {
let account_pk_bytes: [u8; 32] = account_data
.pubkey
.try_into()
.expect("Pubkey should be 32 byte long");
let owner: [u8; 32] = account_data
.owner
.try_into()
.expect("owner pubkey should be deserializable");
let notification = AccountNotificationMessage {
data: AccountData {
pubkey: Pubkey::new_from_array(account_pk_bytes),
account: Arc::new(Account {
lamports: account_data.lamports,
data: account_data.data,
owner: Pubkey::new_from_array(owner),
executable: account_data.executable,
rent_epoch: account_data.rent_epoch,
}),
updated_slot: account.slot,
},
// TODO update with processed commitment / check above
commitment: Commitment::Processed,
};
if account_stream_sx.send(notification).is_err() {
// non recoverable, i.e the whole stream is being restarted
log::error!("Account stream broken, breaking from main loop");
break 'main_loop;
}
}
}
UpdateOneof::Ping(_) => {
log::trace!("GRPC Ping accounts stream");
}
_ => {
log::error!("GRPC accounts steam misconfigured");
}
};
}
}
Ok(())
})
}
pub fn create_grpc_account_streaming_tasks(
grpc_sources: Vec<GrpcSourceConfig>,
mut account_filter_watch: watch::Receiver<AccountFilters>,
@ -318,7 +101,7 @@ pub fn create_grpc_account_streaming_tasks(
let mut current_tasks = grpc_sources
.iter()
.map(|grpc_config| {
start_account_streaming_task(
start_account_streaming_tasks(
grpc_config.clone(),
accounts_filters.clone(),
account_sender.clone(),
@ -338,7 +121,7 @@ pub fn create_grpc_account_streaming_tasks(
let new_tasks = grpc_sources
.iter()
.map(|grpc_config| {
start_account_streaming_task(
start_account_streaming_tasks(
grpc_config.clone(),
accounts_filters.clone(),
account_sender.clone(),

View File

@ -4,13 +4,13 @@ use anyhow::bail;
use itertools::Itertools;
use prometheus::{opts, register_int_gauge, IntGauge};
use solana_account_decoder::{UiAccount, UiDataSliceConfig};
use solana_lite_rpc_core::types::BlockInfoStream;
use solana_lite_rpc_core::{
commitment_utils::Commitment,
structures::{
account_data::{AccountData, AccountNotificationMessage, AccountStream},
account_filter::AccountFilters,
},
types::BlockStream,
AnyhowJoinHandle,
};
use solana_rpc_client::nonblocking::rpc_client::RpcClient;
@ -21,7 +21,7 @@ use solana_rpc_client_api::{
use solana_sdk::{commitment_config::CommitmentConfig, pubkey::Pubkey, slot_history::Slot};
use tokio::sync::broadcast::Sender;
use crate::account_store_interface::AccountStorageInterface;
use crate::account_store_interface::{AccountLoadingError, AccountStorageInterface};
lazy_static::lazy_static! {
static ref ACCOUNT_UPDATES: IntGauge =
@ -151,7 +151,7 @@ impl AccountService {
pub fn process_account_stream(
&self,
mut account_stream: AccountStream,
mut block_stream: BlockStream,
mut blockinfo_stream: BlockInfoStream,
) -> Vec<AnyhowJoinHandle> {
let this = self.clone();
let processed_task = tokio::spawn(async move {
@ -187,19 +187,19 @@ impl AccountService {
let this = self.clone();
let block_processing_task = tokio::spawn(async move {
loop {
match block_stream.recv().await {
Ok(block_notification) => {
if block_notification.commitment_config.is_processed() {
match blockinfo_stream.recv().await {
Ok(block_info) => {
if block_info.commitment_config.is_processed() {
// processed commitment is not processed in this loop
continue;
}
let commitment = Commitment::from(block_notification.commitment_config);
let commitment = Commitment::from(block_info.commitment_config);
let updated_accounts = this
.account_store
.process_slot_data(block_notification.slot, commitment)
.process_slot_data(block_info.slot, commitment)
.await;
if block_notification.commitment_config.is_finalized() {
if block_info.commitment_config.is_finalized() {
ACCOUNT_UPDATES_FINALIZED.add(updated_accounts.len() as i64)
} else {
ACCOUNT_UPDATES_CONFIRMED.add(updated_accounts.len() as i64);
@ -250,7 +250,7 @@ impl AccountService {
&self,
account: Pubkey,
config: Option<RpcAccountInfoConfig>,
) -> anyhow::Result<(Slot, Option<UiAccount>)> {
) -> Result<(Slot, Option<UiAccount>), AccountLoadingError> {
GET_ACCOUNT_CALLED.inc();
let commitment = config
.as_ref()
@ -259,7 +259,7 @@ impl AccountService {
let commitment = Commitment::from(commitment);
if let Some(account_data) = self.account_store.get_account(account, commitment).await {
if let Some(account_data) = self.account_store.get_account(account, commitment).await? {
// if minimum context slot is not satisfied return Null
let minimum_context_slot = config
.as_ref()
@ -273,10 +273,7 @@ impl AccountService {
Ok((account_data.updated_slot, None))
}
} else {
bail!(
"Account {} does not satisfy any configured filters",
account.to_string()
)
Err(AccountLoadingError::ConfigDoesnotContainRequiredFilters)
}
}

View File

@ -5,6 +5,13 @@ use solana_rpc_client_api::filter::RpcFilterType;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::slot_history::Slot;
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum AccountLoadingError {
AccountNotFound,
ConfigDoesnotContainRequiredFilters,
OperationTimeOut,
}
#[async_trait]
pub trait AccountStorageInterface: Send + Sync {
// Update account and return true if the account was sucessfylly updated
@ -12,7 +19,11 @@ pub trait AccountStorageInterface: Send + Sync {
async fn initilize_or_update_account(&self, account_data: AccountData);
async fn get_account(&self, account_pk: Pubkey, commitment: Commitment) -> Option<AccountData>;
async fn get_account(
&self,
account_pk: Pubkey,
commitment: Commitment,
) -> Result<Option<AccountData>, AccountLoadingError>;
async fn get_program_accounts(
&self,

View File

@ -1,6 +1,6 @@
use std::{collections::HashSet, sync::Arc};
use crate::account_store_interface::AccountStorageInterface;
use crate::account_store_interface::{AccountLoadingError, AccountStorageInterface};
use async_trait::async_trait;
use dashmap::{DashMap, DashSet};
use itertools::Itertools;
@ -313,11 +313,15 @@ impl AccountStorageInterface for InmemoryAccountStore {
}
}
async fn get_account(&self, account_pk: Pubkey, commitment: Commitment) -> Option<AccountData> {
async fn get_account(
&self,
account_pk: Pubkey,
commitment: Commitment,
) -> Result<Option<AccountData>, AccountLoadingError> {
if let Some(account_by_commitment) = self.account_store.get(&account_pk) {
account_by_commitment.get_account_data(commitment).clone()
Ok(account_by_commitment.get_account_data(commitment).clone())
} else {
None
Ok(None)
}
}
@ -331,7 +335,7 @@ impl AccountStorageInterface for InmemoryAccountStore {
let mut return_vec = vec![];
for program_account in program_accounts.iter() {
let account_data = self.get_account(*program_account, commitment).await;
if let Some(account_data) = account_data {
if let Ok(Some(account_data)) = account_data {
// recheck program owner and filters
if account_data.account.owner.eq(&program_pubkey) {
match &account_filters {
@ -483,28 +487,28 @@ mod tests {
assert_eq!(
store.get_account(pk1, Commitment::Processed).await,
Some(account_data_0.clone())
Ok(Some(account_data_0.clone()))
);
assert_eq!(
store.get_account(pk1, Commitment::Confirmed).await,
Some(account_data_0.clone())
Ok(Some(account_data_0.clone()))
);
assert_eq!(
store.get_account(pk1, Commitment::Finalized).await,
Some(account_data_0.clone())
Ok(Some(account_data_0.clone()))
);
assert_eq!(
store.get_account(pk2, Commitment::Processed).await,
Some(account_data_1.clone())
Ok(Some(account_data_1.clone()))
);
assert_eq!(
store.get_account(pk2, Commitment::Confirmed).await,
Some(account_data_1.clone())
Ok(Some(account_data_1.clone()))
);
assert_eq!(
store.get_account(pk2, Commitment::Finalized).await,
Some(account_data_1.clone())
Ok(Some(account_data_1.clone()))
);
let account_data_2 = create_random_account(&mut rng, 1, pk1, program);
@ -527,60 +531,60 @@ mod tests {
assert_eq!(
store.get_account(pk1, Commitment::Processed).await,
Some(account_data_5.clone())
Ok(Some(account_data_5.clone()))
);
assert_eq!(
store.get_account(pk1, Commitment::Confirmed).await,
Some(account_data_0.clone())
Ok(Some(account_data_0.clone()))
);
assert_eq!(
store.get_account(pk1, Commitment::Finalized).await,
Some(account_data_0.clone())
Ok(Some(account_data_0.clone()))
);
store.process_slot_data(1, Commitment::Confirmed).await;
assert_eq!(
store.get_account(pk1, Commitment::Processed).await,
Some(account_data_5.clone())
Ok(Some(account_data_5.clone()))
);
assert_eq!(
store.get_account(pk1, Commitment::Confirmed).await,
Some(account_data_2.clone())
Ok(Some(account_data_2.clone()))
);
assert_eq!(
store.get_account(pk1, Commitment::Finalized).await,
Some(account_data_0.clone())
Ok(Some(account_data_0.clone()))
);
store.process_slot_data(2, Commitment::Confirmed).await;
assert_eq!(
store.get_account(pk1, Commitment::Processed).await,
Some(account_data_5.clone())
Ok(Some(account_data_5.clone()))
);
assert_eq!(
store.get_account(pk1, Commitment::Confirmed).await,
Some(account_data_3.clone())
Ok(Some(account_data_3.clone()))
);
assert_eq!(
store.get_account(pk1, Commitment::Finalized).await,
Some(account_data_0.clone())
Ok(Some(account_data_0.clone()))
);
store.process_slot_data(1, Commitment::Finalized).await;
assert_eq!(
store.get_account(pk1, Commitment::Processed).await,
Some(account_data_5.clone())
Ok(Some(account_data_5.clone()))
);
assert_eq!(
store.get_account(pk1, Commitment::Confirmed).await,
Some(account_data_3.clone())
Ok(Some(account_data_3.clone()))
);
assert_eq!(
store.get_account(pk1, Commitment::Finalized).await,
Some(account_data_2.clone())
Ok(Some(account_data_2.clone()))
);
}
@ -690,7 +694,7 @@ mod tests {
assert_eq!(
store.get_account(pk1, Commitment::Finalized).await,
Some(last_account.clone()),
Ok(Some(last_account.clone())),
);
// check finalizing previous commitment does not affect
@ -698,7 +702,7 @@ mod tests {
assert_eq!(
store.get_account(pk1, Commitment::Finalized).await,
Some(last_account),
Ok(Some(last_account)),
);
}

View File

@ -3,8 +3,20 @@ name = "bench"
version = "0.2.4"
edition = "2021"
[[bin]]
name = "bench"
path = "src/main.rs"
[[bin]]
# WIP
name = "benchnew"
path = "src/benchnew.rs"
[dependencies]
clap = { workspace = true }
csv = "1.2.1"
dirs = "5.0.0"
solana-lite-rpc-util = { workspace = true }
solana-sdk = { workspace = true }
solana-rpc-client = { workspace = true }
solana-transaction-status = { workspace = true }
@ -23,6 +35,9 @@ dashmap = { workspace = true }
bincode = { workspace = true }
itertools = "0.10.5"
spl-memo = "4.0.0"
url = "*"
reqwest = "0.11.26"
lazy_static = "1.4.0"
[dev-dependencies]
bincode = { workspace = true }

166
bench/src/bench1.rs Normal file
View File

@ -0,0 +1,166 @@
use crate::{helpers::BenchHelper, metrics::Metric, metrics::TxMetricData};
use dashmap::DashMap;
use log::warn;
use solana_rpc_client::nonblocking::rpc_client::RpcClient;
use solana_sdk::hash::Hash;
use solana_sdk::signature::Keypair;
use solana_sdk::signature::Signature;
use solana_sdk::slot_history::Slot;
use std::sync::{
atomic::{AtomicU64, Ordering},
Arc,
};
use tokio::{
sync::{mpsc::UnboundedSender, RwLock},
time::{Duration, Instant},
};
#[derive(Clone, Debug, Copy)]
struct TxSendData {
sent_duration: Duration,
sent_instant: Instant,
sent_slot: Slot,
transaction_bytes: u64,
}
struct ApiCallerResult {
gross_send_time: Duration,
}
// called by benchrunner-service
#[allow(clippy::too_many_arguments)]
pub async fn bench(
rpc_client: Arc<RpcClient>,
tx_count: usize,
funded_payer: Keypair,
seed: u64,
block_hash: Arc<RwLock<Hash>>,
current_slot: Arc<AtomicU64>,
tx_metric_sx: UnboundedSender<TxMetricData>,
log_txs: bool,
transaction_size: TransactionSize,
cu_price_micro_lamports: u64,
) -> Metric {
let map_of_txs: Arc<DashMap<Signature, TxSendData>> = Arc::new(DashMap::new());
// transaction sender task
let api_caller_result = {
let map_of_txs = map_of_txs.clone();
let rpc_client = rpc_client.clone();
let current_slot = current_slot.clone();
tokio::spawn(async move {
let map_of_txs = map_of_txs.clone();
let n_chars = match transaction_size {
TransactionSize::Small => 10,
TransactionSize::Large => 232, // 565 is max but we need to lower that to not burn the CUs
};
let rand_strings = BenchHelper::generate_random_strings(tx_count, Some(seed), n_chars);
let bench_start_time = Instant::now();
for rand_string in &rand_strings {
let blockhash = { *block_hash.read().await };
let tx = match transaction_size {
TransactionSize::Small => BenchHelper::create_memo_tx_small(
rand_string,
&funded_payer,
blockhash,
cu_price_micro_lamports,
),
TransactionSize::Large => BenchHelper::create_memo_tx_large(
rand_string,
&funded_payer,
blockhash,
cu_price_micro_lamports,
),
};
let start_time = Instant::now();
match rpc_client.send_transaction(&tx).await {
Ok(signature) => {
map_of_txs.insert(
signature,
TxSendData {
sent_duration: start_time.elapsed(),
sent_instant: Instant::now(),
sent_slot: current_slot.load(std::sync::atomic::Ordering::Relaxed),
transaction_bytes: bincode::serialized_size(&tx).unwrap(),
},
);
}
Err(e) => {
warn!("tx send failed with error {}", e);
}
}
}
ApiCallerResult {
gross_send_time: bench_start_time.elapsed(),
}
})
};
let mut metric = Metric::default();
let confirmation_time = Instant::now();
let mut confirmed_count = 0;
while confirmation_time.elapsed() < Duration::from_secs(60)
&& !(map_of_txs.is_empty() && confirmed_count == tx_count)
{
let signatures = map_of_txs.iter().map(|x| *x.key()).collect::<Vec<_>>();
if signatures.is_empty() {
tokio::time::sleep(Duration::from_millis(1)).await;
continue;
}
if let Ok(res) = rpc_client.get_signature_statuses(&signatures).await {
for (i, signature) in signatures.iter().enumerate() {
let tx_status = &res.value[i];
if tx_status.is_some() {
let tx_data = map_of_txs.get(signature).unwrap();
let time_to_confirm = tx_data.sent_instant.elapsed();
let transaction_bytes = tx_data.transaction_bytes;
metric.add_successful_transaction(
tx_data.sent_duration,
time_to_confirm,
transaction_bytes,
);
if log_txs {
let _ = tx_metric_sx.send(TxMetricData {
signature: signature.to_string(),
sent_slot: tx_data.sent_slot,
confirmed_slot: current_slot.load(Ordering::Relaxed),
time_to_send_in_millis: tx_data.sent_duration.as_millis() as u64,
time_to_confirm_in_millis: time_to_confirm.as_millis() as u64,
});
}
drop(tx_data);
map_of_txs.remove(signature);
confirmed_count += 1;
}
}
}
}
for tx in map_of_txs.iter() {
metric.add_unsuccessful_transaction(tx.sent_duration, tx.transaction_bytes);
}
let api_caller_result = api_caller_result
.await
.expect("api caller task must succeed");
metric
.set_total_gross_send_time(api_caller_result.gross_send_time.as_micros() as f64 / 1_000.0);
metric.finalize();
metric
}
// see https://spl.solana.com/memo for sizing of transactions
// As of v1.5.1, an unsigned instruction can support single-byte UTF-8 of up to 566 bytes.
// An instruction with a simple memo of 32 bytes can support up to 12 signers.
#[derive(Debug, Clone, Copy)]
pub enum TransactionSize {
// 179 bytes, 5237 CUs
Small,
// 1186 bytes, 193175 CUs
Large,
}

View File

@ -13,7 +13,12 @@ use solana_sdk::signature::{read_keypair_file, Keypair, Signer};
use crate::create_memo_tx_small;
// TC3 measure how much load the API endpoint can take
pub async fn api_load(payer_path: &Path, rpc_url: String, time_ms: u64) -> anyhow::Result<()> {
pub async fn api_load(
payer_path: &Path,
rpc_url: String,
test_duration_ms: u64,
cu_price_micro_lamports: u64,
) -> anyhow::Result<()> {
warn!("THIS IS WORK IN PROGRESS");
let rpc = Arc::new(RpcClient::new(rpc_url));
@ -29,7 +34,7 @@ pub async fn api_load(payer_path: &Path, rpc_url: String, time_ms: u64) -> anyho
let hash = rpc.get_latest_blockhash().await?;
let time = tokio::time::Instant::now();
while time.elapsed().as_millis() < time_ms.into() {
while time.elapsed().as_millis() < test_duration_ms.into() {
let rpc = rpc.clone();
let payer = payer.clone();
@ -40,7 +45,7 @@ pub async fn api_load(payer_path: &Path, rpc_url: String, time_ms: u64) -> anyho
tokio::spawn(async move {
let msg = msg.as_bytes();
let tx = create_memo_tx_small(msg, &payer, hash);
let tx = create_memo_tx_small(msg, &payer, hash, cu_price_micro_lamports);
match rpc.send_transaction(&tx).await {
Ok(_) => success.fetch_add(1, Ordering::Relaxed),
Err(_) => failed.fetch_add(1, Ordering::Relaxed),
@ -50,7 +55,7 @@ pub async fn api_load(payer_path: &Path, rpc_url: String, time_ms: u64) -> anyho
txs += 1;
}
let calls_per_second = txs as f64 / (time_ms as f64 * 1000.0);
let calls_per_second = txs as f64 / (test_duration_ms as f64 * 1000.0);
info!("calls_per_second: {}", calls_per_second);
info!("failed: {}", failed.load(Ordering::Relaxed));
info!("success: {}", success.load(Ordering::Relaxed));

View File

@ -1,89 +1,129 @@
use crate::tx_size::TxSize;
use crate::{create_rng, generate_txs};
use anyhow::{bail, Error};
use futures::future::join_all;
use futures::TryFutureExt;
use itertools::Itertools;
use crate::{create_rng, generate_txs, BenchmarkTransactionParams};
use anyhow::Context;
use log::{debug, info, trace, warn};
use std::collections::{HashMap, HashSet};
use std::iter::zip;
use std::ops::Add;
use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
use crate::benches::rpc_interface::{
send_and_confirm_bulk_transactions, ConfirmationResponseFromRpc,
};
use solana_rpc_client::nonblocking::rpc_client::RpcClient;
use solana_rpc_client::rpc_client::SerializableTransaction;
use solana_rpc_client_api::client_error::ErrorKind;
use solana_sdk::signature::{read_keypair_file, Signature, Signer};
use solana_sdk::slot_history::Slot;
use solana_sdk::transaction::Transaction;
use solana_sdk::{commitment_config::CommitmentConfig, signature::Keypair};
use solana_transaction_status::TransactionConfirmationStatus;
use tokio::time::Instant;
use solana_sdk::signature::{read_keypair_file, Keypair, Signature, Signer};
#[derive(Debug, serde::Serialize)]
pub struct RpcStat {
confirmation_time: f32,
mode_slot: u64,
confirmed: u64,
unconfirmed: u64,
failed: u64,
tx_sent: u64,
tx_confirmed: u64,
// in ms
average_confirmation_time: f32,
// in slots
average_slot_confirmation_time: f32,
tx_send_errors: u64,
tx_unconfirmed: u64,
}
/// TC2 send multiple runs of num_txns, measure the confirmation rate
/// TC2 send multiple runs of num_txs, measure the confirmation rate
pub async fn confirmation_rate(
payer_path: &Path,
rpc_url: String,
tx_size: TxSize,
txns_per_round: usize,
num_rounds: usize,
tx_params: BenchmarkTransactionParams,
max_timeout: Duration,
txs_per_run: usize,
num_of_runs: usize,
) -> anyhow::Result<()> {
warn!("THIS IS WORK IN PROGRESS");
assert!(num_of_runs > 0, "num_of_runs must be greater than 0");
let rpc = Arc::new(RpcClient::new(rpc_url));
info!("RPC: {}", rpc.as_ref().url());
let payer: Arc<Keypair> = Arc::new(read_keypair_file(payer_path).unwrap());
info!("Payer: {}", payer.pubkey().to_string());
let mut rpc_results = Vec::with_capacity(num_rounds);
let mut rpc_results = Vec::with_capacity(num_of_runs);
for _ in 0..num_rounds {
let stat: RpcStat = send_bulk_txs_and_wait(&rpc, &payer, txns_per_round, tx_size).await?;
rpc_results.push(stat);
for _ in 0..num_of_runs {
match send_bulk_txs_and_wait(&rpc, &payer, txs_per_run, &tx_params, max_timeout)
.await
.context("send bulk tx and wait")
{
Ok(stat) => {
rpc_results.push(stat);
}
Err(err) => {
warn!(
"Failed to send bulk txs and wait - no rpc stats available: {}",
err
);
}
}
}
info!("avg_rpc: {:?}", calc_stats_avg(&rpc_results));
if !rpc_results.is_empty() {
info!("avg_rpc: {:?}", calc_stats_avg(&rpc_results));
} else {
info!("avg_rpc: n/a");
}
Ok(())
}
pub async fn send_bulk_txs_and_wait(
rpc: &RpcClient,
payer: &Keypair,
num_txns: usize,
tx_size: TxSize,
num_txs: usize,
tx_params: &BenchmarkTransactionParams,
max_timeout: Duration,
) -> anyhow::Result<RpcStat> {
let hash = rpc.get_latest_blockhash().await?;
trace!("Get latest blockhash and generate transactions");
let hash = rpc.get_latest_blockhash().await.map_err(|err| {
log::error!("Error get latest blockhash : {err:?}");
err
})?;
let mut rng = create_rng(None);
let txs = generate_txs(num_txns, payer, hash, &mut rng, tx_size);
let started_at = tokio::time::Instant::now();
let txs = generate_txs(num_txs, payer, hash, &mut rng, tx_params);
trace!("Sending {} transactions in bulk ..", txs.len());
let tx_and_confirmations_from_rpc: Vec<(Signature, ConfirmationResponseFromRpc)> =
send_and_confirm_bulk_transactions(rpc, &txs).await?;
send_and_confirm_bulk_transactions(rpc, &txs, max_timeout)
.await
.context("send and confirm bulk tx")?;
trace!("Done sending {} transaction.", txs.len());
let elapsed_total = started_at.elapsed();
for (tx_sig, confirmation) in &tx_and_confirmations_from_rpc {
match confirmation {
ConfirmationResponseFromRpc::Success(slots_elapsed, level, elapsed) => {
let mut tx_sent = 0;
let mut tx_send_errors = 0;
let mut tx_confirmed = 0;
let mut tx_unconfirmed = 0;
let mut sum_confirmation_time = Duration::default();
let mut sum_slot_confirmation_time = 0;
for (tx_sig, confirmation_response) in tx_and_confirmations_from_rpc {
match confirmation_response {
ConfirmationResponseFromRpc::Success(
slot_sent,
slot_confirmed,
commitment_status,
confirmation_time,
) => {
debug!(
"Signature {} confirmed with level {:?} after {:.02}ms, {} slots",
tx_sig,
level,
elapsed.as_secs_f32() * 1000.0,
slots_elapsed
commitment_status,
confirmation_time.as_secs_f64() * 1000.0,
slot_confirmed - slot_sent
);
tx_sent += 1;
tx_confirmed += 1;
sum_confirmation_time = sum_confirmation_time.add(confirmation_time);
sum_slot_confirmation_time += slot_confirmed - slot_sent;
}
ConfirmationResponseFromRpc::SendError(error_kind) => {
debug!(
"Signature {} failed to get send via RPC: {:?}",
tx_sig, error_kind
);
tx_send_errors += 1;
}
ConfirmationResponseFromRpc::Timeout(elapsed) => {
debug!(
@ -91,60 +131,30 @@ pub async fn send_bulk_txs_and_wait(
tx_sig,
elapsed.as_secs_f32() * 1000.0
);
}
ConfirmationResponseFromRpc::SendError(_) => {
unreachable!()
tx_sent += 1;
tx_unconfirmed += 1;
}
}
}
let (mut confirmed, mut unconfirmed, mut failed) = (0, 0, 0);
let mut slot_hz: HashMap<Slot, u64> = Default::default();
for (_, result_from_rpc) in tx_and_confirmations_from_rpc {
match result_from_rpc {
ConfirmationResponseFromRpc::Success(slot, _, _) => {
confirmed += 1;
*slot_hz.entry(slot).or_default() += 1;
}
ConfirmationResponseFromRpc::Timeout(_) => {
unconfirmed += 1;
}
ConfirmationResponseFromRpc::SendError(_) => {
failed += 1;
}
}
//
// match tx {
// Ok(Some(status)) => {
// if status.satisfies_commitment(CommitmentConfig::confirmed()) {
// confirmed += 1;
// *slot_hz.entry(status.slot).or_default() += 1;
// } else {
// unconfirmed += 1;
// }
// }
// Ok(None) => {
// unconfirmed += 1;
// }
// Err(_) => {
// failed += 1;
// }
// }
}
let mode_slot = slot_hz
.into_iter()
.max_by_key(|(_, v)| *v)
.map(|(k, _)| k)
.unwrap_or_default();
let average_confirmation_time_ms = if tx_confirmed > 0 {
sum_confirmation_time.as_secs_f32() * 1000.0 / tx_confirmed as f32
} else {
0.0
};
let average_slot_confirmation_time = if tx_confirmed > 0 {
sum_slot_confirmation_time as f32 / tx_confirmed as f32
} else {
0.0
};
Ok(RpcStat {
confirmation_time: elapsed_total.as_secs_f32(),
mode_slot,
confirmed,
unconfirmed,
failed,
tx_sent,
tx_send_errors,
tx_confirmed,
tx_unconfirmed,
average_confirmation_time: average_confirmation_time_ms,
average_slot_confirmation_time,
})
}
@ -152,235 +162,29 @@ fn calc_stats_avg(stats: &[RpcStat]) -> RpcStat {
let len = stats.len();
let mut avg = RpcStat {
confirmation_time: 0.0,
mode_slot: 0,
confirmed: 0,
unconfirmed: 0,
failed: 0,
tx_sent: 0,
tx_send_errors: 0,
tx_confirmed: 0,
tx_unconfirmed: 0,
average_confirmation_time: 0.0,
average_slot_confirmation_time: 0.0,
};
for stat in stats {
avg.confirmation_time += stat.confirmation_time;
avg.confirmed += stat.confirmed;
avg.unconfirmed += stat.unconfirmed;
avg.failed += stat.failed;
avg.tx_sent += stat.tx_sent;
avg.tx_send_errors += stat.tx_send_errors;
avg.tx_confirmed += stat.tx_confirmed;
avg.tx_unconfirmed += stat.tx_unconfirmed;
avg.average_confirmation_time += stat.average_confirmation_time;
avg.average_slot_confirmation_time += stat.average_slot_confirmation_time;
}
avg.confirmation_time /= len as f32;
avg.confirmed /= len as u64;
avg.unconfirmed /= len as u64;
avg.failed /= len as u64;
avg.tx_sent /= len as u64;
avg.tx_send_errors /= len as u64;
avg.tx_confirmed /= len as u64;
avg.tx_unconfirmed /= len as u64;
avg.average_confirmation_time /= len as f32;
avg.average_slot_confirmation_time /= len as f32;
avg
}
#[derive(Clone)]
enum ConfirmationResponseFromRpc {
SendError(Arc<ErrorKind>),
// elapsed slot: current slot (confirmed) at beginning til the slot where transaction showed up with status CONFIRMED
Success(Slot, TransactionConfirmationStatus, Duration),
Timeout(Duration),
}
async fn send_and_confirm_bulk_transactions(
rpc_client: &RpcClient,
txs: &[Transaction],
) -> anyhow::Result<Vec<(Signature, ConfirmationResponseFromRpc)>> {
let send_slot = poll_next_slot_start(rpc_client).await?;
let started_at = Instant::now();
let batch_sigs_or_fails = join_all(
txs.iter()
.map(|tx| rpc_client.send_transaction(tx).map_err(|e| e.kind)),
)
.await;
let after_send_slot = rpc_client
.get_slot_with_commitment(CommitmentConfig::confirmed())
.await?;
// optimal value is "0"
info!(
"slots passed while sending: {}",
after_send_slot - send_slot
);
let num_sent_ok = batch_sigs_or_fails
.iter()
.filter(|sig_or_fail| sig_or_fail.is_ok())
.count();
let num_sent_failed = batch_sigs_or_fails
.iter()
.filter(|sig_or_fail| sig_or_fail.is_err())
.count();
for (i, tx_sig) in txs.iter().enumerate() {
let tx_sent = batch_sigs_or_fails[i].is_ok();
if tx_sent {
debug!("- tx_sent {}", tx_sig.get_signature());
} else {
debug!("- tx_fail {}", tx_sig.get_signature());
}
}
debug!(
"{} transactions sent successfully in {:.02}ms",
num_sent_ok,
started_at.elapsed().as_secs_f32() * 1000.0
);
debug!(
"{} transactions failed to send in {:.02}ms",
num_sent_failed,
started_at.elapsed().as_secs_f32() * 1000.0
);
if num_sent_failed > 0 {
warn!(
"Some transactions failed to send: {} out of {}",
num_sent_failed,
txs.len()
);
bail!("Failed to send all transactions");
}
let mut pending_status_set: HashSet<Signature> = HashSet::new();
batch_sigs_or_fails
.iter()
.filter(|sig_or_fail| sig_or_fail.is_ok())
.for_each(|sig_or_fail| {
pending_status_set.insert(sig_or_fail.as_ref().unwrap().to_owned());
});
let mut result_status_map: HashMap<Signature, ConfirmationResponseFromRpc> = HashMap::new();
// items get moved from pending_status_set to result_status_map
let started_at = Instant::now();
let mut iteration = 1;
'pooling_loop: loop {
let iteration_ends_at = started_at + Duration::from_millis(iteration * 200);
assert_eq!(
pending_status_set.len() + result_status_map.len(),
num_sent_ok,
"Items must move between pending+result"
);
let tx_batch = pending_status_set.iter().cloned().collect_vec();
debug!(
"Request status for batch of remaining {} transactions in iteration {}",
tx_batch.len(),
iteration
);
// TODO warn if get_status api calles are slow
let batch_responses = rpc_client
.get_signature_statuses(tx_batch.as_slice())
.await?;
let elapsed = started_at.elapsed();
for (tx_sig, status_response) in zip(tx_batch, batch_responses.value) {
match status_response {
Some(tx_status) => {
trace!(
"Some signature status {:?} received for {} after {:.02}ms",
tx_status.confirmation_status,
tx_sig,
elapsed.as_secs_f32() * 1000.0
);
if !tx_status.satisfies_commitment(CommitmentConfig::confirmed()) {
continue 'pooling_loop;
}
// status is confirmed or finalized
pending_status_set.remove(&tx_sig);
let prev_value = result_status_map.insert(
tx_sig,
ConfirmationResponseFromRpc::Success(
tx_status.slot - send_slot,
tx_status.confirmation_status(),
elapsed,
),
);
assert!(prev_value.is_none(), "Must not override existing value");
}
None => {
// None: not yet processed by the cluster
trace!(
"No signature status was received for {} after {:.02}ms - continue waiting",
tx_sig,
elapsed.as_secs_f32() * 1000.0
);
}
}
}
if pending_status_set.is_empty() {
debug!("All transactions confirmed after {} iterations", iteration);
break 'pooling_loop;
}
if iteration == 100 {
debug!("Timeout waiting for transactions to confirmed after {} iterations - giving up on {}", iteration, pending_status_set.len());
break 'pooling_loop;
}
iteration += 1;
// avg 2 samples per slot
tokio::time::sleep_until(iteration_ends_at).await;
} // -- END polling loop
let total_time_elapsed_polling = started_at.elapsed();
// all transactions which remain in pending list are considered timed out
for tx_sig in pending_status_set.clone() {
pending_status_set.remove(&tx_sig);
result_status_map.insert(
tx_sig,
ConfirmationResponseFromRpc::Timeout(total_time_elapsed_polling),
);
}
let result_as_vec = batch_sigs_or_fails
.into_iter()
.enumerate()
.map(|(i, sig_or_fail)| match sig_or_fail {
Ok(tx_sig) => {
let confirmation = result_status_map
.get(&tx_sig)
.expect("consistent map with all tx")
.clone()
.to_owned();
(tx_sig, confirmation)
}
Err(send_error) => {
let tx_sig = txs[i].get_signature();
let confirmation = ConfirmationResponseFromRpc::SendError(Arc::new(send_error));
(*tx_sig, confirmation)
}
})
.collect_vec();
Ok(result_as_vec)
}
async fn poll_next_slot_start(rpc_client: &RpcClient) -> Result<Slot, Error> {
let started_at = Instant::now();
let mut last_slot: Option<Slot> = None;
let mut i = 1;
// try to catch slot start
let send_slot = loop {
if i > 500 {
bail!("Timeout waiting for slot change");
}
let iteration_ends_at = started_at + Duration::from_millis(i * 30);
let slot = rpc_client
.get_slot_with_commitment(CommitmentConfig::confirmed())
.await?;
trace!("polling slot {}", slot);
if let Some(last_slot) = last_slot {
if last_slot + 1 == slot {
break slot;
}
}
last_slot = Some(slot);
tokio::time::sleep_until(iteration_ends_at).await;
i += 1;
};
Ok(send_slot)
}

View File

@ -1,43 +1,142 @@
use std::path::Path;
use std::time::Duration;
use crate::tx_size::TxSize;
use crate::{create_memo_tx, create_rng, send_and_confirm_transactions, Rng8};
use anyhow::Context;
use log::{info, warn};
use crate::benches::rpc_interface::{
create_rpc_client, send_and_confirm_bulk_transactions, ConfirmationResponseFromRpc,
};
use crate::metrics::PingThing;
use crate::{create_memo_tx, create_rng, BenchmarkTransactionParams, Rng8};
use anyhow::anyhow;
use log::{debug, info, warn};
use solana_lite_rpc_util::obfuscate_rpcurl;
use solana_rpc_client::nonblocking::rpc_client::RpcClient;
use solana_sdk::signature::{read_keypair_file, Signer};
use solana_sdk::signature::{read_keypair_file, Signature, Signer};
use solana_sdk::transaction::Transaction;
use solana_sdk::{commitment_config::CommitmentConfig, signature::Keypair};
use tokio::time::{sleep, Instant};
use url::Url;
/// TC1 send 2 txs (one via LiteRPC, one via Solana RPC) and compare confirmation slot (=slot distance)
#[derive(Clone, Copy, Debug, Default)]
pub struct Metric {
pub txs_sent: u64,
pub txs_confirmed: u64,
pub txs_un_confirmed: u64,
pub average_confirmation_time_ms: f64,
pub average_time_to_send_txs: f64,
}
#[derive(Clone)]
pub enum ConfirmationSlotResult {
Success(ConfirmationSlotSuccess),
}
#[derive(Clone)]
pub struct ConfirmationSlotSuccess {
pub slot_sent: u64,
pub slot_confirmed: u64,
pub confirmation_time: Duration,
}
#[allow(clippy::too_many_arguments)]
/// TC1 -- Send 2 txs to separate RPCs and compare confirmation slot.
/// The benchmark attempts to minimize the effect of real-world distance and synchronize the time that each transaction reaches the RPC.
/// This is achieved by delaying submission of the transaction to the "nearer" RPC.
/// Delay time is calculated as half of the difference in duration of [getHealth](https://solana.com/docs/rpc/http/gethealth) calls to both RPCs.
pub async fn confirmation_slot(
payer_path: &Path,
rpc_a_url: String,
rpc_b_url: String,
tx_size: TxSize,
tx_params: BenchmarkTransactionParams,
max_timeout: Duration,
num_of_runs: usize,
_maybe_ping_thing: Option<PingThing>,
) -> anyhow::Result<()> {
info!(
"START BENCHMARK: confirmation_slot (prio_fees={})",
tx_params.cu_price_micro_lamports
);
warn!("THIS IS WORK IN PROGRESS");
info!("RPC A: {}", obfuscate_rpcurl(&rpc_a_url));
info!("RPC B: {}", obfuscate_rpcurl(&rpc_b_url));
let rpc_a = RpcClient::new(rpc_a_url);
info!("RPC A: {}", rpc_a.url());
let rpc_b = RpcClient::new(rpc_b_url);
info!("RPC B: {}", rpc_b.url());
let rpc_a_url =
Url::parse(&rpc_a_url).map_err(|e| anyhow!("Failed to parse RPC A URL: {}", e))?;
let rpc_b_url =
Url::parse(&rpc_b_url).map_err(|e| anyhow!("Failed to parse RPC B URL: {}", e))?;
let mut rng = create_rng(None);
let payer = read_keypair_file(payer_path).expect("payer file");
info!("Payer: {}", payer.pubkey().to_string());
// let mut ping_thing_tasks = vec![];
let rpc_a_tx = create_tx(&rpc_a, &payer, &mut rng, tx_size).await?;
let rpc_b_tx = create_tx(&rpc_b, &payer, &mut rng, tx_size).await?;
for _ in 0..num_of_runs {
let rpc_a = create_rpc_client(&rpc_a_url);
let rpc_b = create_rpc_client(&rpc_b_url);
// measure network time to reach the respective RPC endpoints,
// used to mitigate the difference in distance by delaying the txn sending
let time_a = rpc_roundtrip_duration(&rpc_a).await?.as_secs_f64();
let time_b = rpc_roundtrip_duration(&rpc_b).await?.as_secs_f64();
let (rpc_slot, lite_rpc_slot) = tokio::join!(
send_transaction_and_get_slot(&rpc_a, rpc_a_tx),
send_transaction_and_get_slot(&rpc_b, rpc_b_tx)
);
debug!("(A) rpc network latency: {}", time_a);
debug!("(B) rpc network latency: {}", time_b);
info!("rpc_slot: {}", rpc_slot?);
info!("lite_rpc_slot: {}", lite_rpc_slot?);
let rpc_a_tx = create_tx(&rpc_a, &payer, &mut rng, &tx_params).await?;
let rpc_b_tx = create_tx(&rpc_b, &payer, &mut rng, &tx_params).await?;
let one_way_delay = (time_a - time_b).abs() / 2.0;
let (a_delay, b_delay) = if time_a > time_b {
(0f64, one_way_delay)
} else {
(one_way_delay, 0f64)
};
debug!("A delay: {}s, B delay: {}s", a_delay, b_delay);
let a_task = tokio::spawn(async move {
sleep(Duration::from_secs_f64(a_delay)).await;
debug!("(A) sending tx {}", rpc_a_tx.signatures[0]);
send_and_confirm_transaction(&rpc_a, rpc_a_tx, max_timeout).await
});
let b_task = tokio::spawn(async move {
sleep(Duration::from_secs_f64(b_delay)).await;
debug!("(B) sending tx {}", rpc_b_tx.signatures[0]);
send_and_confirm_transaction(&rpc_b, rpc_b_tx, max_timeout).await
});
let (a, b) = tokio::join!(a_task, b_task);
// only continue if both paths suceed
let a_result: ConfirmationResponseFromRpc = a??;
let b_result: ConfirmationResponseFromRpc = b??;
if let (
ConfirmationResponseFromRpc::Success(a_slot_sent, a_slot_confirmed, _, _),
ConfirmationResponseFromRpc::Success(b_slot_sent, b_slot_confirmed, _, _),
) = (a_result, b_result)
{
info!(
"txn A landed after {} slots",
a_slot_confirmed - a_slot_sent
);
info!(
"txn B landed after {} slots",
b_slot_confirmed - b_slot_sent
);
}
// if let Some(ping_thing) = maybe_ping_thing.clone() {
// ping_thing_tasks.push(tokio::spawn(async move {
// submit_ping_thing_stats(&a_result, &ping_thing)
// .await
// .unwrap();
// submit_ping_thing_stats(&b_result, &ping_thing)
// .await
// .unwrap();
// }));
// };
}
// futures::future::join_all(ping_thing_tasks).await;
Ok(())
}
@ -46,20 +145,52 @@ async fn create_tx(
rpc: &RpcClient,
payer: &Keypair,
rng: &mut Rng8,
tx_size: TxSize,
tx_params: &BenchmarkTransactionParams,
) -> anyhow::Result<Transaction> {
let hash = rpc.get_latest_blockhash().await?;
let (blockhash, _) = rpc
.get_latest_blockhash_with_commitment(CommitmentConfig::confirmed())
.await?;
Ok(create_memo_tx(payer, hash, rng, tx_size))
Ok(create_memo_tx(payer, blockhash, rng, tx_params))
}
async fn send_transaction_and_get_slot(client: &RpcClient, tx: Transaction) -> anyhow::Result<u64> {
let status = send_and_confirm_transactions(client, &[tx], CommitmentConfig::confirmed(), None)
.await?
.into_iter()
.next()
.unwrap()?
.context("unable to confirm tx")?;
async fn send_and_confirm_transaction(
rpc: &RpcClient,
tx: Transaction,
max_timeout: Duration,
) -> anyhow::Result<ConfirmationResponseFromRpc> {
let result_vec: Vec<(Signature, ConfirmationResponseFromRpc)> =
send_and_confirm_bulk_transactions(rpc, &[tx], max_timeout).await?;
assert_eq!(result_vec.len(), 1, "expected 1 result");
let (_sig, confirmation_response) = result_vec.into_iter().next().unwrap();
Ok(status.slot)
Ok(confirmation_response)
}
pub async fn rpc_roundtrip_duration(rpc: &RpcClient) -> anyhow::Result<Duration> {
let started_at = Instant::now();
rpc.get_health().await?;
let duration = started_at.elapsed();
Ok(duration)
}
// async fn submit_ping_thing_stats(
// confirmation_info: &ConfirmationSlotResult,
// ping_thing: &PingThing,
// ) -> anyhow::Result<()> {
// match confirmation_info.result {
// ConfirmationSlotResult::Timeout(_) => Ok(()),
// ConfirmationSlotResult::Success(slot_landed) => {
// ping_thing
// .submit_confirmed_stats(
// confirmation_info.confirmation_time,
// confirmation_info.signature,
// PingThingTxType::Memo,
// true,
// confirmation_info.slot_sent,
// slot_landed,
// )
// .await
// }
// }
// }

View File

@ -1,3 +1,4 @@
pub mod api_load;
pub mod confirmation_rate;
pub mod confirmation_slot;
pub mod rpc_interface;

View File

@ -0,0 +1,285 @@
use anyhow::{bail, Context, Error};
use futures::future::join_all;
use futures::TryFutureExt;
use itertools::Itertools;
use log::{debug, trace, warn};
use solana_rpc_client::nonblocking::rpc_client::RpcClient;
use solana_rpc_client::rpc_client::SerializableTransaction;
use solana_rpc_client_api::client_error::ErrorKind;
use solana_rpc_client_api::config::RpcSendTransactionConfig;
use solana_sdk::clock::Slot;
use solana_sdk::commitment_config::CommitmentConfig;
use solana_sdk::signature::Signature;
use solana_sdk::transaction::Transaction;
use solana_transaction_status::TransactionConfirmationStatus;
use std::collections::{HashMap, HashSet};
use std::iter::zip;
use std::sync::Arc;
use std::time::Duration;
use tokio::time::Instant;
use url::Url;
pub fn create_rpc_client(rpc_url: &Url) -> RpcClient {
RpcClient::new_with_commitment(rpc_url.to_string(), CommitmentConfig::confirmed())
}
#[derive(Clone)]
pub enum ConfirmationResponseFromRpc {
// RPC error on send_transaction
SendError(Arc<ErrorKind>),
// (sent slot at confirmed commitment, confirmed slot, ..., ...)
// transaction_confirmation_status is "confirmed" or "finalized"
Success(Slot, Slot, TransactionConfirmationStatus, Duration),
// timout waiting for confirmation status
Timeout(Duration),
}
pub async fn send_and_confirm_bulk_transactions(
rpc_client: &RpcClient,
txs: &[Transaction],
max_timeout: Duration,
) -> anyhow::Result<Vec<(Signature, ConfirmationResponseFromRpc)>> {
trace!("Polling for next slot ..");
let send_slot = poll_next_slot_start(rpc_client)
.await
.context("poll for next start slot")?;
trace!("Send slot: {}", send_slot);
let send_config = RpcSendTransactionConfig {
skip_preflight: true,
preflight_commitment: None,
encoding: None,
max_retries: None,
min_context_slot: None,
};
let started_at = Instant::now();
trace!(
"Sending {} transactions via RPC (retries=off) ..",
txs.len()
);
let batch_sigs_or_fails = join_all(txs.iter().map(|tx| {
rpc_client
.send_transaction_with_config(tx, send_config)
.map_err(|e| e.kind)
}))
.await;
let after_send_slot = rpc_client
.get_slot_with_commitment(CommitmentConfig::confirmed())
.await
.context("get slot afterwards")?;
if after_send_slot - send_slot > 0 {
warn!(
"Slot advanced during sending transactions: {} -> {}",
send_slot, after_send_slot
);
} else {
debug!(
"Slot did not advance during sending transactions: {} -> {}",
send_slot, after_send_slot
);
}
let num_sent_ok = batch_sigs_or_fails
.iter()
.filter(|sig_or_fail| sig_or_fail.is_ok())
.count();
let num_sent_failed = batch_sigs_or_fails
.iter()
.filter(|sig_or_fail| sig_or_fail.is_err())
.count();
for (i, tx_sig) in txs.iter().enumerate() {
let tx_sent = batch_sigs_or_fails[i].is_ok();
if tx_sent {
trace!("- tx_sent {}", tx_sig.get_signature());
} else {
trace!("- tx_fail {}", tx_sig.get_signature());
}
}
debug!(
"{} transactions sent successfully in {:.02}ms",
num_sent_ok,
started_at.elapsed().as_secs_f32() * 1000.0
);
debug!(
"{} transactions failed to send in {:.02}ms",
num_sent_failed,
started_at.elapsed().as_secs_f32() * 1000.0
);
if num_sent_failed > 0 {
warn!(
"Some transactions failed to send: {} out of {}",
num_sent_failed,
txs.len()
);
bail!("Failed to send all transactions");
}
let mut pending_status_set: HashSet<Signature> = HashSet::new();
batch_sigs_or_fails
.iter()
.filter(|sig_or_fail| sig_or_fail.is_ok())
.for_each(|sig_or_fail| {
pending_status_set.insert(sig_or_fail.as_ref().unwrap().to_owned());
});
let mut result_status_map: HashMap<Signature, ConfirmationResponseFromRpc> = HashMap::new();
// items get moved from pending_status_set to result_status_map
let started_at = Instant::now();
let timeout_at = started_at + max_timeout;
'polling_loop: for iteration in 1.. {
let iteration_ends_at = started_at + Duration::from_millis(iteration * 400);
assert_eq!(
pending_status_set.len() + result_status_map.len(),
num_sent_ok,
"Items must move between pending+result"
);
let tx_batch = pending_status_set.iter().cloned().collect_vec();
debug!(
"Request status for batch of remaining {} transactions in iteration {}",
tx_batch.len(),
iteration
);
let status_started_at = Instant::now();
let mut batch_status = Vec::new();
// "Too many inputs provided; max 256"
for chunk in tx_batch.chunks(256) {
// fail hard if not possible to poll status
let chunk_responses = rpc_client
.get_signature_statuses(chunk)
.await
.expect("get signature statuses");
batch_status.extend(chunk_responses.value);
}
if status_started_at.elapsed() > Duration::from_millis(500) {
warn!(
"SLOW get_signature_statuses for {} transactions took {:?}",
tx_batch.len(),
status_started_at.elapsed()
);
}
let elapsed = started_at.elapsed();
for (tx_sig, status_response) in zip(tx_batch, batch_status) {
match status_response {
Some(tx_status) => {
trace!(
"Some signature status {:?} received for {} after {:.02}ms",
tx_status.confirmation_status,
tx_sig,
elapsed.as_secs_f32() * 1000.0
);
if !tx_status.satisfies_commitment(CommitmentConfig::confirmed()) {
continue 'polling_loop;
}
// status is confirmed or finalized
pending_status_set.remove(&tx_sig);
let prev_value = result_status_map.insert(
tx_sig,
ConfirmationResponseFromRpc::Success(
send_slot,
tx_status.slot,
tx_status.confirmation_status(),
elapsed,
),
);
assert!(prev_value.is_none(), "Must not override existing value");
}
None => {
// None: not yet processed by the cluster
trace!(
"No signature status was received for {} after {:.02}ms - continue waiting",
tx_sig,
elapsed.as_secs_f32() * 1000.0
);
}
}
}
if pending_status_set.is_empty() {
debug!(
"All transactions confirmed after {} iterations / {:?}",
iteration,
started_at.elapsed()
);
break 'polling_loop;
}
if Instant::now() > timeout_at {
warn!(
"Timeout waiting for transactions to confirm after {} iterations",
iteration
);
break 'polling_loop;
}
// avg 2 samples per slot
tokio::time::sleep_until(iteration_ends_at).await;
} // -- END polling loop
let total_time_elapsed_polling = started_at.elapsed();
// all transactions which remain in pending list are considered timed out
for tx_sig in pending_status_set.clone() {
pending_status_set.remove(&tx_sig);
result_status_map.insert(
tx_sig,
ConfirmationResponseFromRpc::Timeout(total_time_elapsed_polling),
);
}
let result_as_vec = batch_sigs_or_fails
.into_iter()
.enumerate()
.map(|(i, sig_or_fail)| match sig_or_fail {
Ok(tx_sig) => {
let confirmation = result_status_map
.get(&tx_sig)
.expect("consistent map with all tx")
.clone()
.to_owned();
(tx_sig, confirmation)
}
Err(send_error) => {
let tx_sig = txs[i].get_signature();
let confirmation = ConfirmationResponseFromRpc::SendError(Arc::new(send_error));
(*tx_sig, confirmation)
}
})
.collect_vec();
Ok(result_as_vec)
}
pub async fn poll_next_slot_start(rpc_client: &RpcClient) -> Result<Slot, Error> {
let started_at = Instant::now();
let mut last_slot: Option<Slot> = None;
let mut i = 1;
// try to catch slot start
let send_slot = loop {
if i > 500 {
bail!("Timeout waiting for slot change");
}
let iteration_ends_at = started_at + Duration::from_millis(i * 30);
let slot = rpc_client
.get_slot_with_commitment(CommitmentConfig::confirmed())
.await?;
trace!("polling slot {}", slot);
if let Some(last_slot) = last_slot {
if last_slot + 1 == slot {
break slot;
}
}
last_slot = Some(slot);
tokio::time::sleep_until(iteration_ends_at).await;
i += 1;
};
Ok(send_slot)
}

156
bench/src/benchnew.rs Normal file
View File

@ -0,0 +1,156 @@
use std::path::PathBuf;
use std::time::Duration;
use bench::{
benches::{
api_load::api_load, confirmation_rate::confirmation_rate,
confirmation_slot::confirmation_slot,
},
metrics::{PingThing, PingThingCluster},
tx_size::TxSize,
BenchmarkTransactionParams,
};
use clap::{Parser, Subcommand};
#[derive(Parser, Debug)]
#[clap(version, about)]
struct Arguments {
#[clap(subcommand)]
subcommand: SubCommand,
}
#[derive(Subcommand, Debug)]
enum SubCommand {
ApiLoad {
#[clap(short, long)]
payer_path: PathBuf,
#[clap(short, long)]
rpc_url: String,
#[clap(short, long)]
test_duration_ms: u64,
/// The CU price in micro lamports
#[clap(short, long, default_value_t = 3)]
#[arg(short = 'f')]
cu_price: u64,
},
ConfirmationRate {
#[clap(short, long)]
payer_path: PathBuf,
#[clap(short, long)]
rpc_url: String,
#[clap(short, long)]
size_tx: TxSize,
/// Maximum confirmation time in milliseconds. After this, the txn is considered unconfirmed
#[clap(short, long, default_value_t = 15_000)]
max_timeout_ms: u64,
#[clap(short, long)]
txs_per_run: usize,
#[clap(short, long)]
num_of_runs: usize,
/// The CU price in micro lamports
#[clap(short, long, default_value_t = 300)]
#[arg(short = 'f')]
cu_price: u64,
},
/// Compares the confirmation slot of txs sent to 2 different RPCs
ConfirmationSlot {
#[clap(short, long)]
payer_path: PathBuf,
/// URL of the 1st RPC
#[clap(short, long)]
#[arg(short = 'a')]
rpc_a: String,
/// URL of the 2nd RPC
#[clap(short, long)]
#[arg(short = 'b')]
rpc_b: String,
#[clap(short, long)]
size_tx: TxSize,
/// Maximum confirmation time in milliseconds. After this, the txn is considered unconfirmed
#[clap(short, long, default_value_t = 15_000)]
max_timeout_ms: u64,
#[clap(short, long)]
num_of_runs: usize,
/// The CU price in micro lamports
#[clap(short, long, default_value_t = 300)]
#[arg(short = 'f')]
cu_price: u64,
#[clap(long)]
ping_thing_token: Option<String>,
},
}
pub fn initialize_logger() {
tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.with_thread_ids(true)
.with_line_number(true)
.init();
}
#[tokio::main(flavor = "multi_thread", worker_threads = 16)]
async fn main() {
let args = Arguments::parse();
initialize_logger();
match args.subcommand {
SubCommand::ApiLoad {
payer_path,
rpc_url,
test_duration_ms,
cu_price,
} => {
api_load(&payer_path, rpc_url, test_duration_ms, cu_price)
.await
.unwrap();
}
SubCommand::ConfirmationRate {
payer_path,
rpc_url,
size_tx,
max_timeout_ms,
txs_per_run,
num_of_runs,
cu_price,
} => confirmation_rate(
&payer_path,
rpc_url,
BenchmarkTransactionParams {
tx_size: size_tx,
cu_price_micro_lamports: cu_price,
},
Duration::from_millis(max_timeout_ms),
txs_per_run,
num_of_runs,
)
.await
.unwrap(),
SubCommand::ConfirmationSlot {
payer_path,
rpc_a,
rpc_b,
size_tx,
max_timeout_ms,
num_of_runs,
cu_price,
ping_thing_token,
} => confirmation_slot(
&payer_path,
rpc_a,
rpc_b,
BenchmarkTransactionParams {
tx_size: size_tx,
cu_price_micro_lamports: cu_price,
},
Duration::from_millis(max_timeout_ms),
num_of_runs,
ping_thing_token.map(|t| PingThing {
cluster: PingThingCluster::Mainnet,
va_api_key: t,
}),
)
.await
.unwrap(),
}
}

208
bench/src/helpers.rs Normal file
View File

@ -0,0 +1,208 @@
use anyhow::Context;
use itertools::Itertools;
use lazy_static::lazy_static;
use rand::{distributions::Alphanumeric, prelude::Distribution, SeedableRng};
use solana_rpc_client::nonblocking::rpc_client::RpcClient;
use solana_sdk::compute_budget;
use solana_sdk::instruction::AccountMeta;
use solana_sdk::{
commitment_config::CommitmentConfig,
hash::Hash,
instruction::Instruction,
message::Message,
pubkey::Pubkey,
signature::{Keypair, Signature},
signer::Signer,
system_instruction,
transaction::Transaction,
};
use std::path::PathBuf;
use std::{str::FromStr, time::Duration};
use tokio::time::Instant;
const MEMO_PROGRAM_ID: &str = "MemoSq4gqABAXKb96qnH8TysNcWxMyWCqXgDLGmfcHr";
const WAIT_LIMIT_IN_SECONDS: u64 = 60;
lazy_static! {
static ref USER_KEYPAIR: PathBuf = {
dirs::home_dir()
.unwrap()
.join(".config")
.join("solana")
.join("id.json")
};
}
pub struct BenchHelper;
impl BenchHelper {
pub async fn get_payer() -> anyhow::Result<Keypair> {
let payer = tokio::fs::read_to_string(USER_KEYPAIR.as_path())
.await
.context("Error reading payer file")?;
let payer: Vec<u8> = serde_json::from_str(&payer)?;
let payer = Keypair::from_bytes(&payer)?;
Ok(payer)
}
pub async fn wait_till_signature_status(
rpc_client: &RpcClient,
sig: &Signature,
commitment_config: CommitmentConfig,
) -> anyhow::Result<()> {
let instant = Instant::now();
loop {
if instant.elapsed() > Duration::from_secs(WAIT_LIMIT_IN_SECONDS) {
return Err(anyhow::Error::msg("Timedout waiting"));
}
if let Some(err) = rpc_client
.get_signature_status_with_commitment(sig, commitment_config)
.await?
{
err?;
return Ok(());
}
}
}
pub fn create_transaction(funded_payer: &Keypair, blockhash: Hash) -> Transaction {
let to_pubkey = Pubkey::new_unique();
// transfer instruction
let instruction =
system_instruction::transfer(&funded_payer.pubkey(), &to_pubkey, 1_000_000);
let message = Message::new(&[instruction], Some(&funded_payer.pubkey()));
Transaction::new(&[funded_payer], message, blockhash)
}
pub fn generate_random_strings(
num_of_txs: usize,
random_seed: Option<u64>,
n_chars: usize,
) -> Vec<Vec<u8>> {
let seed = random_seed.map_or(0, |x| x);
let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(seed);
(0..num_of_txs)
.map(|_| Alphanumeric.sample_iter(&mut rng).take(n_chars).collect())
.collect()
}
#[inline]
pub fn generate_txs(
num_of_txs: usize,
funded_payer: &Keypair,
blockhash: Hash,
random_seed: Option<u64>,
cu_price_micro_lamports: u64,
) -> Vec<Transaction> {
let seed = random_seed.map_or(0, |x| x);
let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(seed);
(0..num_of_txs)
.map(|_| {
let random_bytes: Vec<u8> = Alphanumeric.sample_iter(&mut rng).take(10).collect();
Self::create_memo_tx_small(
&random_bytes,
funded_payer,
blockhash,
cu_price_micro_lamports,
)
})
.collect()
}
// note: there is another version of this
pub fn create_memo_tx_small(
msg: &[u8],
payer: &Keypair,
blockhash: Hash,
cu_price_micro_lamports: u64,
) -> Transaction {
let memo = Pubkey::from_str(MEMO_PROGRAM_ID).unwrap();
let instruction = Instruction::new_with_bytes(memo, msg, vec![]);
let cu_request: Instruction =
compute_budget::ComputeBudgetInstruction::set_compute_unit_limit(14000);
let instructions = if cu_price_micro_lamports > 0 {
let cu_budget_ix: Instruction =
compute_budget::ComputeBudgetInstruction::set_compute_unit_price(
cu_price_micro_lamports,
);
vec![cu_request, cu_budget_ix, instruction]
} else {
vec![cu_request, instruction]
};
let message = Message::new(&instructions, Some(&payer.pubkey()));
Transaction::new(&[payer], message, blockhash)
}
pub fn create_memo_tx_large(
msg: &[u8],
payer: &Keypair,
blockhash: Hash,
cu_price_micro_lamports: u64,
) -> Transaction {
let accounts = (0..8).map(|_| Keypair::new()).collect_vec();
let memo = Pubkey::from_str(MEMO_PROGRAM_ID).unwrap();
let instruction = Instruction::new_with_bytes(
memo,
msg,
accounts
.iter()
.map(|keypair| AccountMeta::new_readonly(keypair.pubkey(), true))
.collect_vec(),
);
let instructions = if cu_price_micro_lamports > 0 {
let cu_budget_ix: Instruction =
compute_budget::ComputeBudgetInstruction::set_compute_unit_price(
cu_price_micro_lamports,
);
vec![cu_budget_ix, instruction]
} else {
vec![instruction]
};
let message = Message::new(&instructions, Some(&payer.pubkey()));
let mut signers = vec![payer];
signers.extend(accounts.iter());
Transaction::new(&signers, message, blockhash)
}
}
#[test]
fn transaction_size_small() {
let blockhash = Hash::default();
let payer_keypair = Keypair::from_base58_string(
"rKiJ7H5UUp3JR18kNyTF1XPuwPKHEM7gMLWHZPWP5djrW1vSjfwjhvJrevxF9MPmUmN9gJMLHZdLMgc9ao78eKr",
);
let seed = 42;
let random_strings = BenchHelper::generate_random_strings(1, Some(seed), 10);
let rand_string = random_strings.first().unwrap();
let tx = BenchHelper::create_memo_tx_small(rand_string, &payer_keypair, blockhash, 300);
assert_eq!(bincode::serialized_size(&tx).unwrap(), 231);
}
#[test]
fn transaction_size_large() {
let blockhash = Hash::default();
let payer_keypair = Keypair::from_base58_string(
"rKiJ7H5UUp3JR18kNyTF1XPuwPKHEM7gMLWHZPWP5djrW1vSjfwjhvJrevxF9MPmUmN9gJMLHZdLMgc9ao78eKr",
);
let seed = 42;
let random_strings = BenchHelper::generate_random_strings(1, Some(seed), 232);
let rand_string = random_strings.first().unwrap();
let tx = BenchHelper::create_memo_tx_large(rand_string, &payer_keypair, blockhash, 300);
assert_eq!(bincode::serialized_size(&tx).unwrap(), 1222);
}

View File

@ -1,4 +1,5 @@
use anyhow::bail;
use clap::Parser;
use futures::future::join_all;
use itertools::Itertools;
use log::{debug, warn};
@ -19,12 +20,44 @@ use solana_sdk::{
use solana_transaction_status::TransactionStatus;
use std::{str::FromStr, time::Duration};
use tokio::time::Instant;
use tx_size::TxSize;
pub mod bench1;
pub mod benches;
pub mod helpers;
pub mod metrics;
pub mod service_adapter;
pub mod tx_size;
//TODO: use CLAP
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
pub struct Args {
/// Number of tx(s) sent in each run
#[arg(short = 'n', long, default_value_t = 5_000)]
pub tx_count: usize,
/// Number of bench runs
#[arg(short = 'r', long, default_value_t = 1)]
pub runs: usize,
/// Interval between each bench run (ms)
#[arg(short = 'i', long, default_value_t = 1000)]
pub run_interval_ms: u64,
/// Metrics output file name
#[arg(short = 'm', long, default_value_t = String::from("metrics.csv"))]
pub metrics_file_name: String,
/// Lite Rpc Address
#[arg(short = 'l', long, default_value_t = String::from("http://127.0.0.1:8890"))]
pub lite_rpc_addr: String,
#[arg(short = 't', long, default_value_t = String::from("transactions.csv"))]
pub transaction_save_file: String,
// choose between small (179 bytes) and large (1186 bytes) transactions
#[arg(short = 'L', long, default_value_t = false)]
pub large_transactions: bool,
}
pub struct BenchmarkTransactionParams {
pub tx_size: TxSize,
pub cu_price_micro_lamports: u64,
}
const MEMO_PROGRAM_ID: &str = "MemoSq4gqABAXKb96qnH8TysNcWxMyWCqXgDLGmfcHr";
const WAIT_LIMIT_IN_SECONDS: u64 = 60;
@ -162,10 +195,10 @@ pub fn generate_txs(
payer: &Keypair,
blockhash: Hash,
rng: &mut Rng8,
size: tx_size::TxSize,
tx_params: &BenchmarkTransactionParams,
) -> Vec<Transaction> {
(0..num_of_txs)
.map(|_| create_memo_tx(payer, blockhash, rng, size))
.map(|_| create_memo_tx(payer, blockhash, rng, tx_params))
.collect()
}
@ -173,37 +206,58 @@ pub fn create_memo_tx(
payer: &Keypair,
blockhash: Hash,
rng: &mut Rng8,
size: tx_size::TxSize,
tx_params: &BenchmarkTransactionParams,
) -> Transaction {
let rand_str = generate_random_string(rng, size.memo_size());
let rand_str = generate_random_string(rng, tx_params.tx_size.memo_size());
match size {
tx_size::TxSize::Small => create_memo_tx_small(&rand_str, payer, blockhash),
tx_size::TxSize::Large => create_memo_tx_large(&rand_str, payer, blockhash),
match tx_params.tx_size {
tx_size::TxSize::Small => create_memo_tx_small(
&rand_str,
payer,
blockhash,
tx_params.cu_price_micro_lamports,
),
tx_size::TxSize::Large => create_memo_tx_large(
&rand_str,
payer,
blockhash,
tx_params.cu_price_micro_lamports,
),
}
}
pub fn create_memo_tx_small(msg: &[u8], payer: &Keypair, blockhash: Hash) -> Transaction {
pub fn create_memo_tx_small(
msg: &[u8],
payer: &Keypair,
blockhash: Hash,
cu_price_micro_lamports: u64,
) -> Transaction {
let memo = Pubkey::from_str(MEMO_PROGRAM_ID).unwrap();
// TODO make configurable
// 3 -> 6 slots
// 1 -> 31 slots
let cu_budget: Instruction = ComputeBudgetInstruction::set_compute_unit_price(3);
let cu_budget_ix: Instruction =
ComputeBudgetInstruction::set_compute_unit_price(cu_price_micro_lamports);
// Program consumed: 12775 of 13700 compute units
let priority_fees: Instruction = ComputeBudgetInstruction::set_compute_unit_limit(14000);
let cu_limit_ix: Instruction = ComputeBudgetInstruction::set_compute_unit_limit(14000);
let instruction = Instruction::new_with_bytes(memo, msg, vec![]);
let message = Message::new(
&[cu_budget, priority_fees, instruction],
&[cu_budget_ix, cu_limit_ix, instruction],
Some(&payer.pubkey()),
);
Transaction::new(&[payer], message, blockhash)
}
pub fn create_memo_tx_large(msg: &[u8], payer: &Keypair, blockhash: Hash) -> Transaction {
pub fn create_memo_tx_large(
msg: &[u8],
payer: &Keypair,
blockhash: Hash,
cu_price_micro_lamports: u64,
) -> Transaction {
let accounts = (0..8).map(|_| Keypair::new()).collect_vec();
let memo = Pubkey::from_str(MEMO_PROGRAM_ID).unwrap();
let cu_budget_ix: Instruction =
ComputeBudgetInstruction::set_compute_unit_price(cu_price_micro_lamports);
let cu_limit_ix: Instruction = ComputeBudgetInstruction::set_compute_unit_limit(14000);
let instruction = Instruction::new_with_bytes(
memo,
@ -213,7 +267,10 @@ pub fn create_memo_tx_large(msg: &[u8], payer: &Keypair, blockhash: Hash) -> Tra
.map(|keypair| AccountMeta::new_readonly(keypair.pubkey(), true))
.collect_vec(),
);
let message = Message::new(&[instruction], Some(&payer.pubkey()));
let message = Message::new(
&[cu_budget_ix, cu_limit_ix, instruction],
Some(&payer.pubkey()),
);
let mut signers = vec![payer];
signers.extend(accounts.iter());
@ -229,8 +286,9 @@ fn transaction_size_small() {
);
let mut rng = create_rng(Some(42));
let rand_string = generate_random_string(&mut rng, 10);
let priority_fee = 100;
let tx = create_memo_tx_small(&rand_string, &payer_keypair, blockhash);
let tx = create_memo_tx_small(&rand_string, &payer_keypair, blockhash, priority_fee);
assert_eq!(bincode::serialized_size(&tx).unwrap(), 231);
}
@ -242,7 +300,8 @@ fn transaction_size_large() {
);
let mut rng = create_rng(Some(42));
let rand_string = generate_random_string(&mut rng, 240);
let priority_fee = 100;
let tx = create_memo_tx_large(&rand_string, &payer_keypair, blockhash);
assert_eq!(bincode::serialized_size(&tx).unwrap(), 1186);
let tx = create_memo_tx_large(&rand_string, &payer_keypair, blockhash, priority_fee);
assert_eq!(bincode::serialized_size(&tx).unwrap(), 1238);
}

View File

@ -1,95 +1,145 @@
use std::path::PathBuf;
use bench::{
benches::{
api_load::api_load, confirmation_rate::confirmation_rate,
confirmation_slot::confirmation_slot,
},
tx_size::TxSize,
bench1,
helpers::BenchHelper,
metrics::{AvgMetric, Metric, TxMetricData},
Args,
};
use clap::{Parser, Subcommand};
use clap::Parser;
#[derive(Parser, Debug)]
#[clap(version, about)]
use futures::future::join_all;
use log::{error, info};
use solana_rpc_client::nonblocking::rpc_client::RpcClient;
struct Arguments {
#[clap(subcommand)]
subcommand: SubCommand,
}
#[derive(Subcommand, Debug)]
enum SubCommand {
ApiLoad {
#[clap(short, long)]
payer_path: PathBuf,
#[clap(short, long)]
rpc_url: String,
#[clap(short, long)]
time_ms: u64,
},
ConfirmationRate {
#[clap(short, long)]
payer_path: PathBuf,
#[clap(short, long)]
rpc_url: String,
#[clap(short, long)]
size_tx: TxSize,
#[clap(short, long)]
txns_per_round: usize,
#[clap(short, long)]
num_rounds: usize,
},
ConfirmationSlot {
#[clap(short, long)]
payer_path: PathBuf,
#[clap(short, long)]
#[arg(short = 'a')]
rpc_a: String,
#[clap(short, long)]
#[arg(short = 'b')]
rpc_b: String,
#[clap(short, long)]
size_tx: TxSize,
},
}
pub fn initialize_logger() {
tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.with_thread_ids(true)
.with_line_number(true)
.init();
}
use bench::bench1::TransactionSize;
use solana_sdk::{
commitment_config::CommitmentConfig, hash::Hash, signature::Keypair, signer::Signer,
};
use std::sync::{atomic::AtomicU64, Arc};
use tokio::{sync::RwLock, time::Duration};
#[tokio::main(flavor = "multi_thread", worker_threads = 16)]
async fn main() {
let args = Arguments::parse();
initialize_logger();
tracing_subscriber::fmt::init();
match args.subcommand {
SubCommand::ApiLoad {
payer_path,
rpc_url,
time_ms,
} => {
api_load(&payer_path, rpc_url, time_ms).await.unwrap();
}
SubCommand::ConfirmationRate {
payer_path,
rpc_url,
size_tx,
txns_per_round,
num_rounds,
} => confirmation_rate(&payer_path, rpc_url, size_tx, txns_per_round, num_rounds)
.await
.unwrap(),
SubCommand::ConfirmationSlot {
payer_path,
rpc_a,
rpc_b,
size_tx,
} => confirmation_slot(&payer_path, rpc_a, rpc_b, size_tx)
.await
.unwrap(),
let Args {
tx_count,
runs,
run_interval_ms,
metrics_file_name,
lite_rpc_addr,
transaction_save_file,
large_transactions,
} = Args::parse();
let cu_price_micro_lamports = 300;
let mut run_interval_ms = tokio::time::interval(Duration::from_millis(run_interval_ms));
let transaction_size = if large_transactions {
TransactionSize::Large
} else {
TransactionSize::Small
};
info!("Connecting to LiteRPC using {lite_rpc_addr}");
let mut avg_metric = AvgMetric::default();
let mut tasks = vec![];
let funded_payer = BenchHelper::get_payer().await.unwrap();
info!("Payer: {}", funded_payer.pubkey());
let rpc_client = Arc::new(RpcClient::new_with_commitment(
lite_rpc_addr.clone(),
CommitmentConfig::confirmed(),
));
let bh = rpc_client.get_latest_blockhash().await.unwrap();
let slot = rpc_client.get_slot().await.unwrap();
let block_hash: Arc<RwLock<Hash>> = Arc::new(RwLock::new(bh));
let current_slot = Arc::new(AtomicU64::new(slot));
{
// block hash updater task
let block_hash = block_hash.clone();
let rpc_client = rpc_client.clone();
let current_slot = current_slot.clone();
tokio::spawn(async move {
loop {
let bh = rpc_client.get_latest_blockhash().await;
match bh {
Ok(bh) => {
let mut lock = block_hash.write().await;
*lock = bh;
}
Err(e) => println!("blockhash update error {}", e),
}
let slot = rpc_client.get_slot().await;
match slot {
Ok(slot) => {
current_slot.store(slot, std::sync::atomic::Ordering::Relaxed);
}
Err(e) => println!("slot {}", e),
}
tokio::time::sleep(Duration::from_millis(100)).await;
}
})
};
// transaction logger
let (tx_log_sx, mut tx_log_rx) = tokio::sync::mpsc::unbounded_channel::<TxMetricData>();
let log_transactions = !transaction_save_file.is_empty();
if log_transactions {
tokio::spawn(async move {
let mut tx_writer = csv::Writer::from_path(transaction_save_file).unwrap();
while let Some(x) = tx_log_rx.recv().await {
tx_writer.serialize(x).unwrap();
}
});
}
for seed in 0..runs {
let funded_payer = Keypair::from_bytes(funded_payer.to_bytes().as_slice()).unwrap();
tasks.push(tokio::spawn(bench1::bench(
rpc_client.clone(),
tx_count,
funded_payer,
seed as u64,
block_hash.clone(),
current_slot.clone(),
tx_log_sx.clone(),
log_transactions,
transaction_size,
cu_price_micro_lamports,
)));
// wait for an interval
run_interval_ms.tick().await;
}
let join_res = join_all(tasks).await;
let mut run_num = 1;
let mut csv_writer = csv::Writer::from_path(metrics_file_name).unwrap();
for res in join_res {
match res {
Ok(metric) => {
info!("Run {run_num}: Sent and Confirmed {tx_count} tx(s) in {metric:?} with",);
// update avg metric
avg_metric += &metric;
csv_writer.serialize(metric).unwrap();
}
Err(_) => {
error!("join error for run {}", run_num);
}
}
run_num += 1;
}
let avg_metric = Metric::from(avg_metric);
info!("Avg Metric {avg_metric:?}",);
csv_writer.serialize(avg_metric).unwrap();
csv_writer.flush().unwrap();
}

View File

@ -1,9 +1,13 @@
use std::{
fmt::{self, Display},
ops::{AddAssign, DivAssign},
time::Duration,
};
use solana_sdk::slot_history::Slot;
use reqwest::StatusCode;
use serde::{Deserialize, Serialize};
use solana_sdk::{signature::Signature, slot_history::Slot};
use tracing::debug;
#[derive(Clone, Copy, Debug, Default, serde::Serialize)]
pub struct Metric {
@ -143,3 +147,158 @@ pub struct TxMetricData {
pub time_to_send_in_millis: u64,
pub time_to_confirm_in_millis: u64,
}
#[derive(Clone, Debug)]
pub enum PingThingCluster {
Mainnet,
Testnet,
Devnet,
}
impl PingThingCluster {
pub fn from_arg(cluster: String) -> Self {
match cluster.to_lowercase().as_str() {
"mainnet" => PingThingCluster::Mainnet,
"testnet" => PingThingCluster::Testnet,
"devnet" => PingThingCluster::Devnet,
_ => panic!("incorrect cluster name"),
}
}
}
impl PingThingCluster {
pub fn to_url_part(&self) -> String {
match self {
PingThingCluster::Mainnet => "mainnet",
PingThingCluster::Testnet => "testnet",
PingThingCluster::Devnet => "devnet",
}
.to_string()
}
}
#[derive(Clone, Debug)]
pub enum PingThingTxType {
Transfer,
Memo,
}
impl Display for PingThingTxType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
PingThingTxType::Transfer => write!(f, "transfer"),
PingThingTxType::Memo => write!(f, "memo"),
}
}
}
#[derive(Clone)]
pub struct PingThing {
pub cluster: PingThingCluster,
pub va_api_key: String,
}
/// request format see https://github.com/Block-Logic/ping-thing-client/blob/4c008c741164702a639c282f1503a237f7d95e64/ping-thing-client.mjs#L160
#[derive(Debug, Serialize, Deserialize)]
struct PingThingData {
pub time: u128,
pub signature: String, // Tx sig
pub transaction_type: String, // 'transfer',
pub success: bool, // txSuccess
pub application: String, // e.g. 'web3'
pub commitment_level: String, // e.g. 'confirmed'
pub slot_sent: Slot,
pub slot_landed: Slot,
}
impl PingThing {
pub async fn submit_confirmed_stats(
&self,
tx_elapsed: Duration,
tx_sig: Signature,
tx_type: PingThingTxType,
tx_success: bool,
slot_sent: Slot,
slot_landed: Slot,
) -> anyhow::Result<()> {
submit_stats_to_ping_thing(
self.cluster.clone(),
self.va_api_key.clone(),
tx_elapsed,
tx_sig,
tx_type,
tx_success,
slot_sent,
slot_landed,
)
.await
}
}
/// submits to https://www.validators.app/ping-thing?network=mainnet
/// Assumes that the txn was sent on Mainnet and had the "confirmed" commitment level
#[allow(clippy::too_many_arguments)]
async fn submit_stats_to_ping_thing(
cluster: PingThingCluster,
va_api_key: String,
tx_elapsed: Duration,
tx_sig: Signature,
tx_type: PingThingTxType,
tx_success: bool,
slot_sent: Slot,
slot_landed: Slot,
) -> anyhow::Result<()> {
let submit_data_request = PingThingData {
time: tx_elapsed.as_millis(),
signature: tx_sig.to_string(),
transaction_type: tx_type.to_string(),
success: tx_success,
application: "LiteRPC.bench".to_string(),
commitment_level: "confirmed".to_string(),
slot_sent,
slot_landed,
};
let client = reqwest::Client::new();
// cluster: 'mainnet'
let response = client
.post(format!(
"https://www.validators.app/api/v1/ping-thing/{}",
cluster.to_url_part()
))
.header("Content-Type", "application/json")
.header("Token", va_api_key)
.json(&submit_data_request)
.send()
.await?
.error_for_status()?;
assert_eq!(response.status(), StatusCode::CREATED);
debug!("Sent data for tx {} to ping-thing server", tx_sig);
Ok(())
}
#[ignore]
#[tokio::test]
async fn test_ping_thing() {
let token = "".to_string();
assert!(token.is_empty(), "Empty token for ping thing test");
let ping_thing = PingThing {
cluster: PingThingCluster::Mainnet,
va_api_key: token,
};
ping_thing
.submit_confirmed_stats(
Duration::from_secs(2),
Signature::new_unique(),
PingThingTxType::Transfer,
true,
123,
124,
)
.await
.unwrap();
}

View File

@ -0,0 +1,102 @@
// adapter code for all from benchrunner-service
use crate::bench1;
use crate::bench1::TransactionSize;
use crate::metrics::{Metric, TxMetricData};
use crate::tx_size::TxSize;
use log::debug;
use solana_rpc_client::nonblocking::rpc_client::RpcClient;
use solana_sdk::commitment_config::CommitmentConfig;
use solana_sdk::hash::Hash;
use solana_sdk::signature::Keypair;
use solana_sdk::signer::Signer;
use std::fmt::Display;
use std::sync::atomic::AtomicU64;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::RwLock;
use tokio::time::Instant;
#[derive(Debug, Clone)]
pub struct BenchConfig {
pub tx_count: usize,
pub cu_price_micro_lamports: u64,
}
impl Display for BenchConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self)
}
}
pub async fn bench_servicerunner(
bench_config: &BenchConfig,
rpc_addr: String,
funded_payer: Keypair,
size_tx: TxSize,
) -> Metric {
let started_at = Instant::now();
let transaction_size = match size_tx {
TxSize::Small => TransactionSize::Small,
TxSize::Large => TransactionSize::Large,
};
debug!("Payer: {}", funded_payer.pubkey());
let rpc_client = Arc::new(RpcClient::new_with_commitment(
rpc_addr.clone(),
CommitmentConfig::confirmed(),
));
let bh = rpc_client.get_latest_blockhash().await.unwrap();
let slot = rpc_client.get_slot().await.unwrap();
let block_hash: Arc<RwLock<Hash>> = Arc::new(RwLock::new(bh));
let current_slot = Arc::new(AtomicU64::new(slot));
{
// block hash updater task
let block_hash = block_hash.clone();
let rpc_client = rpc_client.clone();
let current_slot = current_slot.clone();
tokio::spawn(async move {
loop {
let bh = rpc_client.get_latest_blockhash().await;
match bh {
Ok(bh) => {
let mut lock = block_hash.write().await;
*lock = bh;
}
Err(e) => println!("blockhash update error {}", e),
}
let slot = rpc_client.get_slot().await;
match slot {
Ok(slot) => {
current_slot.store(slot, std::sync::atomic::Ordering::Relaxed);
}
Err(e) => println!("slot {}", e),
}
tokio::time::sleep(Duration::from_millis(100)).await;
}
})
};
{
// TODO what todo
// not used unless log_txs is set to true
let (tx_log_sx_null, _tx_log_rx) = tokio::sync::mpsc::unbounded_channel::<TxMetricData>();
bench1::bench(
rpc_client.clone(),
bench_config.tx_count,
funded_payer,
started_at.elapsed().as_micros() as u64,
block_hash.clone(),
current_slot.clone(),
tx_log_sx_null,
false, // log_transactions
transaction_size,
bench_config.cu_price_micro_lamports,
)
.await
}
}

View File

@ -1,4 +1,5 @@
use serde::Deserialize;
use std::fmt::Display;
// see https://spl.solana.com/memo for sizing of transactions
// As of v1.5.1, an unsigned instruction can support single-byte UTF-8 of up to 566 bytes.
@ -11,6 +12,15 @@ pub enum TxSize {
Large,
}
impl Display for TxSize {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
TxSize::Small => write!(f, "small"),
TxSize::Large => write!(f, "large"),
}
}
}
impl TxSize {
pub fn size(&self) -> usize {
match self {

0
bench/transactions.csv Normal file
View File

View File

@ -0,0 +1,39 @@
[package]
name = "solana-lite-rpc-benchrunner-service"
version = "0.2.4"
edition = "2021"
description = "Service for running recurring benchmarks"
rust-version = "1.73.0"
repository = "https://github.com/blockworks-foundation/lite-rpc"
license = "AGPL"
[dependencies]
solana-lite-rpc-util = { workspace = true }
bench = { workspace = true }
solana-sdk = { workspace = true }
solana-rpc-client = { workspace = true }
solana-transaction-status = { workspace = true }
solana-rpc-client-api = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
futures = { workspace = true }
futures-util = { workspace = true }
anyhow = { workspace = true }
log = { workspace = true }
clap = { workspace = true }
tracing-subscriber = { workspace = true }
prometheus = { workspace = true }
lazy_static = { workspace = true }
async-trait = { workspace = true }
tokio = { version = "1.28.2", features = ["full", "fs"]}
tokio-util = "0.7"
chrono = { workspace = true }
itertools = { workspace = true }
native-tls = { workspace = true }
postgres-native-tls = { workspace = true }
postgres-types = { version = "0.2.6", features = ["derive", "with-serde_json-1"] }
tokio-postgres = { version = "0.7.8", features = ["with-chrono-0_4"] }

View File

@ -0,0 +1,35 @@
# Setup
### Hardware
Hardware: recommend 1024MB RAM, 2 vCPUs, small disk
### Environment Variables
| Environment Variable | Purpose | Required? | Default Value |
|----------------------|-------------------------------------------------------|---------------|---------------|
| `PG_ENABLED` | Enable writing to PostgreSQL | No | false |
| `PG_CONFIG` | PostgreSQL connection string | if PG_ENABLED | |
| `TENANT1_ID` | Technical ID for the tenant | Yes | |
| `TENANT1_RPC_ADDR` | RPC address for the target RPC node | Yes | |
| `TENANT2_.. | more tenants can be added using TENANT2, TENANT3, ... | | |
### Command-line Arguments
```
Options:
-b, --bench-interval <BENCH_INTERVAL>
interval in milliseconds to run the benchmark [default: 60000]
-n, --tx-count <TX_COUNT>
[default: 10]
-s, --size-tx <SIZE_TX>
[default: small] [possible values: small, large]
-p, --prio-fees <PRIO_FEES>
[default: 0]
```
```bash
solana-lite-rpc-benchrunner-service \
--bench-interval 600000 \
--tx-count 100 \
--prio-fees 0 --prio-fees 1000 --prio-fees 100000
```

View File

@ -0,0 +1,80 @@
use itertools::Itertools;
use solana_sdk::signature::Keypair;
#[derive(Debug, Clone)]
pub struct TenantConfig {
// technical identifier for the tenant, e.g. "solana-rpc"
pub tenant_id: String,
pub rpc_addr: String,
}
// recommend to use one payer keypair for all targets and fund that keypair with enough SOL
pub fn get_funded_payer_from_env() -> Keypair {
let keypair58_string: String = std::env::var("FUNDED_PAYER_KEYPAIR58")
.expect("need funded payer keypair on env (variable FUNDED_PAYER_KEYPAIR58)");
Keypair::from_base58_string(&keypair58_string)
}
pub fn read_tenant_configs(env_vars: Vec<(String, String)>) -> Vec<TenantConfig> {
let map = env_vars
.iter()
.filter(|(k, _)| k.starts_with("TENANT"))
.into_group_map_by(|(k, _v)| {
let tenant_counter = k
.split('_')
.next()
.expect("tenant prefix must be split by underscore (e.g. TENANT99_SOMETHING")
.replace("TENANT", "");
tenant_counter
.parse::<u32>()
.expect("tenant counter must be a number (e.g. TENANT99)")
});
let values = map
.iter()
.sorted()
.map(|(tc, v)| TenantConfig {
tenant_id: v
.iter()
.find(|(v, _)| *v == format!("TENANT{}_ID", tc))
.iter()
.exactly_one()
.expect("need ID")
.1
.to_string(),
rpc_addr: v
.iter()
.find(|(v, _)| *v == format!("TENANT{}_RPC_ADDR", tc))
.iter()
.exactly_one()
.expect("need RPC_ADDR")
.1
.to_string(),
})
.collect::<Vec<TenantConfig>>();
values
}
#[test]
fn test_env_vars() {
let env_vars = vec![
(String::from("TENANT1_ID"), String::from("solana-rpc")),
(
String::from("TENANT1_RPC_ADDR"),
String::from("http://localhost:8899"),
),
(String::from("TENANT2_ID"), String::from("lite-rpc")),
(
String::from("TENANT2_RPC_ADDR"),
String::from("http://localhost:8890"),
),
];
let tenant_configs = read_tenant_configs(env_vars);
assert_eq!(tenant_configs.len(), 2);
assert_eq!(tenant_configs[0].tenant_id, "solana-rpc");
assert_eq!(tenant_configs[0].rpc_addr, "http://localhost:8899");
assert_eq!(tenant_configs[1].tenant_id, "lite-rpc");
assert_eq!(tenant_configs[1].rpc_addr, "http://localhost:8890");
}

View File

@ -0,0 +1,16 @@
use bench::tx_size::TxSize;
use clap::Parser;
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
pub struct Args {
/// interval in milliseconds to run the benchmark
#[arg(short = 'b', long, default_value_t = 60_000)]
pub bench_interval: u64,
#[arg(short = 'n', long, default_value_t = 10)]
pub tx_count: usize,
#[clap(short, long, default_value_t = TxSize::Small)]
pub size_tx: TxSize,
#[clap(short, long, default_values_t = [0])]
pub prio_fees: Vec<u64>,
}

View File

@ -0,0 +1,149 @@
mod args;
mod cli;
mod postgres;
mod prometheus;
use crate::args::{get_funded_payer_from_env, read_tenant_configs};
use crate::cli::Args;
use crate::postgres::metrics_dbstore::{
save_metrics_to_postgres, upsert_benchrun_status, BenchRunStatus,
};
use crate::postgres::postgres_session::PostgresSessionConfig;
use crate::postgres::postgres_session_cache::PostgresSessionCache;
use crate::prometheus::metrics_prometheus::publish_metrics_on_prometheus;
use crate::prometheus::prometheus_sync::PrometheusSync;
use bench::service_adapter::BenchConfig;
use clap::Parser;
use futures_util::future::join_all;
use itertools::Itertools;
use log::{debug, error, info};
use std::net::SocketAddr;
use std::str::FromStr;
use std::time::{Duration, SystemTime};
#[tokio::main]
async fn main() {
tracing_subscriber::fmt::init();
let Args {
bench_interval,
tx_count,
size_tx,
prio_fees,
} = Args::parse();
let postgres_config = PostgresSessionConfig::new_from_env().unwrap();
let bench_interval = Duration::from_millis(bench_interval);
let funded_payer = get_funded_payer_from_env();
let tenant_configs = read_tenant_configs(std::env::vars().collect::<Vec<(String, String)>>());
info!("Use postgres config: {:?}", postgres_config.is_some());
info!("Use prio fees: [{}]", prio_fees.iter().join(","));
info!("Start running benchmarks every {:?}", bench_interval);
info!(
"Found tenants: {}",
tenant_configs.iter().map(|tc| &tc.tenant_id).join(", ")
);
if tenant_configs.is_empty() {
error!("No tenants found (missing env vars) - exit");
return;
}
let _prometheus_task = PrometheusSync::sync(SocketAddr::from_str("[::]:9091").unwrap());
let mut jh_tenant_task = Vec::new();
// let postgres_session = Arc::new(PostgresSession::new(postgres_config.unwrap()).await);
let postgres_session = match postgres_config {
None => None,
Some(x) => {
let session_cache = PostgresSessionCache::new(x)
.await
.expect("PostgreSQL session cache");
Some(session_cache)
}
};
let bench_configs = prio_fees
.iter()
.map(|prio_fees| BenchConfig {
tx_count,
cu_price_micro_lamports: *prio_fees,
})
.collect_vec();
for tenant_config in &tenant_configs {
let funded_payer = funded_payer.insecure_clone();
let tenant_id = tenant_config.tenant_id.clone();
let postgres_session = postgres_session.clone();
let tenant_config = tenant_config.clone();
let bench_configs = bench_configs.clone();
let jh_runner = tokio::spawn(async move {
let mut interval = tokio::time::interval(bench_interval);
for run_count in 1.. {
let bench_config = bench_configs[run_count % bench_configs.len()].clone();
debug!(
"Invoke bench execution (#{}) on tenant <{}> using {}",
run_count, tenant_id, bench_config
);
let benchrun_at = SystemTime::now();
if let Some(postgres_session) = postgres_session.as_ref() {
let _dbstatus = upsert_benchrun_status(
postgres_session,
&tenant_config,
&bench_config,
benchrun_at,
BenchRunStatus::STARTED,
)
.await;
}
let metric = bench::service_adapter::bench_servicerunner(
&bench_config,
tenant_config.rpc_addr.clone(),
funded_payer.insecure_clone(),
size_tx,
)
.await;
if let Some(postgres_session) = postgres_session.as_ref() {
let _dbstatus = save_metrics_to_postgres(
postgres_session,
&tenant_config,
&bench_config,
&metric,
benchrun_at,
)
.await;
}
publish_metrics_on_prometheus(&tenant_config, &bench_config, &metric).await;
if let Some(postgres_session) = postgres_session.as_ref() {
let _dbstatus = upsert_benchrun_status(
postgres_session,
&tenant_config,
&bench_config,
benchrun_at,
BenchRunStatus::FINISHED,
)
.await;
}
debug!(
"Bench execution (#{}) done in {:?}",
run_count,
benchrun_at.elapsed().unwrap()
);
interval.tick().await;
}
});
jh_tenant_task.push(jh_runner);
} // -- END tenant loop
join_all(jh_tenant_task).await;
}

View File

@ -0,0 +1,27 @@
use std::time::SystemTime;
#[derive(Debug)]
pub struct PostgresConfirmationSlot {
pub signature: String,
pub bench_datetime: SystemTime,
pub slot_sent: u64,
pub slot_confirmed: u64,
pub endpoint: String,
pub confirmed: bool,
pub confirmation_time_ms: f32,
}
// impl PostgresConfirmationSlot {
// pub fn to_values() -> &[&(dyn ToSql + Sync)] {
// let values: &[&(dyn ToSql + Sync)] = &[
// &self.signature,
// &self.bench_datetime,
// &(self.slot_sent as i64),
// &(self.slot_confirmed as i64),
// &self.endpoint,
// &self.confirmed,
// &self.confirmation_time_ms,
// ];
// values
// }
// }

View File

@ -0,0 +1,104 @@
use crate::args::TenantConfig;
use crate::postgres::postgres_session_cache::PostgresSessionCache;
use bench::metrics::Metric;
use bench::service_adapter::BenchConfig;
use log::warn;
use postgres_types::ToSql;
use std::time::SystemTime;
#[allow(clippy::upper_case_acronyms)]
pub enum BenchRunStatus {
STARTED,
FINISHED,
}
impl BenchRunStatus {
pub fn to_db_string(&self) -> &str {
match self {
BenchRunStatus::STARTED => "STARTED",
BenchRunStatus::FINISHED => "FINISHED",
}
}
}
pub async fn upsert_benchrun_status(
postgres_session: &PostgresSessionCache,
tenant_config: &TenantConfig,
_bench_config: &BenchConfig,
benchrun_at: SystemTime,
status: BenchRunStatus,
) -> anyhow::Result<()> {
let values: &[&(dyn ToSql + Sync)] = &[
&tenant_config.tenant_id,
&benchrun_at,
&status.to_db_string(),
];
let write_result = postgres_session
.get_session()
.await?
.execute(
r#"
INSERT INTO benchrunner.bench_runs (
tenant,
ts,
status
)
VALUES ($1, $2, $3)
ON CONFLICT (tenant, ts) DO UPDATE SET status = $3
"#,
values,
)
.await;
if let Err(err) = write_result {
warn!("Failed to upsert status (err {:?}) - continue", err);
}
Ok(())
}
pub async fn save_metrics_to_postgres(
postgres_session: &PostgresSessionCache,
tenant_config: &TenantConfig,
bench_config: &BenchConfig,
metric: &Metric,
benchrun_at: SystemTime,
) -> anyhow::Result<()> {
let metricjson = serde_json::to_value(metric).unwrap();
let values: &[&(dyn ToSql + Sync)] = &[
&tenant_config.tenant_id,
&benchrun_at,
&(bench_config.cu_price_micro_lamports as i64),
&(metric.txs_sent as i64),
&(metric.txs_confirmed as i64),
&(metric.txs_un_confirmed as i64),
&(metric.average_confirmation_time_ms as f32),
&metricjson,
];
let write_result = postgres_session
.get_session()
.await?
.execute(
r#"
INSERT INTO
benchrunner.bench_metrics (
tenant,
ts,
prio_fees,
txs_sent,
txs_confirmed, txs_un_confirmed,
average_confirmation_time_ms,
metric_json
)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
"#,
values,
)
.await;
if let Err(err) = write_result {
warn!("Failed to insert metrics (err {:?}) - continue", err);
}
Ok(())
}

View File

@ -0,0 +1,4 @@
pub mod confirmation_slot;
pub mod metrics_dbstore;
pub mod postgres_session;
pub mod postgres_session_cache;

View File

@ -0,0 +1,214 @@
#![allow(dead_code)]
use std::env;
use std::sync::Arc;
use anyhow::Context;
use native_tls::{Certificate, Identity, TlsConnector};
use postgres_native_tls::MakeTlsConnector;
use solana_lite_rpc_util::encoding::BinaryEncoding;
use tokio_postgres::{
config::SslMode, tls::MakeTlsConnect, types::ToSql, Client, Error, NoTls, Row, Socket,
};
#[derive(serde::Deserialize, Debug, Clone)]
pub struct PostgresSessionConfig {
pub pg_config: String,
pub ssl: Option<PostgresSessionSslConfig>,
}
#[derive(serde::Deserialize, Debug, Clone)]
pub struct PostgresSessionSslConfig {
pub ca_pem_b64: String,
pub client_pks_b64: String,
pub client_pks_pass: String,
}
impl PostgresSessionConfig {
pub fn new_from_env() -> anyhow::Result<Option<Self>> {
// pg not enabled
if env::var("PG_ENABLED").is_err() {
return Ok(None);
}
let enable_pg = env::var("PG_ENABLED").context("PG_ENABLED")?;
if enable_pg != *"true" {
return Ok(None);
}
let env_pg_config = env::var("PG_CONFIG").context("PG_CONFIG not found")?;
let ssl_config = if env_pg_config
.parse::<tokio_postgres::Config>()?
.get_ssl_mode()
.eq(&SslMode::Disable)
{
None
} else {
let env_ca_pem_b64 = env::var("CA_PEM_B64").context("CA_PEM_B64 not found")?;
let env_client_pks_b64 =
env::var("CLIENT_PKS_B64").context("CLIENT_PKS_B64 not found")?;
let env_client_pks_pass =
env::var("CLIENT_PKS_PASS").context("CLIENT_PKS_PASS not found")?;
Some(PostgresSessionSslConfig {
ca_pem_b64: env_ca_pem_b64,
client_pks_b64: env_client_pks_b64,
client_pks_pass: env_client_pks_pass,
})
};
Ok(Some(Self {
pg_config: env_pg_config,
ssl: ssl_config,
}))
}
}
#[derive(Clone)]
pub struct PostgresSession {
client: Arc<Client>,
}
impl PostgresSession {
pub async fn new_from_env() -> anyhow::Result<Self> {
let pg_session_config = PostgresSessionConfig::new_from_env()
.expect("failed to start Postgres Client")
.expect("Postgres not enabled (use PG_ENABLED)");
PostgresSession::new(pg_session_config).await
}
pub async fn new(
PostgresSessionConfig { pg_config, ssl }: PostgresSessionConfig,
) -> anyhow::Result<Self> {
let pg_config = pg_config.parse::<tokio_postgres::Config>()?;
let client = if let SslMode::Disable = pg_config.get_ssl_mode() {
Self::spawn_connection(pg_config, NoTls).await?
} else {
let PostgresSessionSslConfig {
ca_pem_b64,
client_pks_b64,
client_pks_pass,
} = ssl.as_ref().unwrap();
let ca_pem = BinaryEncoding::Base64
.decode(ca_pem_b64)
.context("ca pem decode")?;
let client_pks = BinaryEncoding::Base64
.decode(client_pks_b64)
.context("client pks decode")?;
let connector = TlsConnector::builder()
.add_root_certificate(Certificate::from_pem(&ca_pem)?)
.identity(Identity::from_pkcs12(&client_pks, client_pks_pass).context("Identity")?)
.danger_accept_invalid_hostnames(true)
.danger_accept_invalid_certs(true)
.build()?;
Self::spawn_connection(pg_config, MakeTlsConnector::new(connector)).await?
};
Ok(Self {
client: Arc::new(client),
})
}
async fn spawn_connection<T>(
pg_config: tokio_postgres::Config,
connector: T,
) -> anyhow::Result<Client>
where
T: MakeTlsConnect<Socket> + Send + 'static,
<T as MakeTlsConnect<Socket>>::Stream: Send,
{
let (client, connection) = pg_config
.connect(connector)
.await
.context("Connecting to Postgres failed")?;
tokio::spawn(async move {
log::info!("Connecting to Postgres");
if let Err(err) = connection.await {
log::error!("Connection to Postgres broke: {err:?}");
return;
}
log::debug!("Postgres thread shutting down");
});
Ok(client)
}
pub fn is_closed(&self) -> bool {
self.client.is_closed()
}
pub async fn execute(
&self,
statement: &str,
params: &[&(dyn ToSql + Sync)],
) -> Result<u64, tokio_postgres::error::Error> {
self.client.execute(statement, params).await
}
// execute statements seperated by semicolon
pub async fn execute_multiple(&self, statement: &str) -> Result<(), Error> {
self.client.batch_execute(statement).await
}
pub async fn execute_prepared_batch(
&self,
statement: &str,
params: &Vec<Vec<&(dyn ToSql + Sync)>>,
) -> Result<u64, Error> {
let prepared_stmt = self.client.prepare(statement).await?;
let mut total_inserted = 0;
for row in params {
let result = self.client.execute(&prepared_stmt, row).await;
total_inserted += result?;
}
Ok(total_inserted)
}
pub async fn execute_prepared(
&self,
statement: &str,
params: &[&(dyn ToSql + Sync)],
) -> Result<u64, tokio_postgres::error::Error> {
let prepared_stmt = self.client.prepare(statement).await?;
self.client.execute(&prepared_stmt, params).await
}
pub async fn execute_and_return(
&self,
statement: &str,
params: &[&(dyn ToSql + Sync)],
) -> Result<Option<Row>, Error> {
self.client.query_opt(statement, params).await
}
pub async fn query_opt(
&self,
statement: &str,
params: &[&(dyn ToSql + Sync)],
) -> Result<Option<Row>, Error> {
self.client.query_opt(statement, params).await
}
pub async fn query_one(
&self,
statement: &str,
params: &[&(dyn ToSql + Sync)],
) -> Result<Row, Error> {
self.client.query_one(statement, params).await
}
pub async fn query_list(
&self,
statement: &str,
params: &[&(dyn ToSql + Sync)],
) -> Result<Vec<Row>, Error> {
self.client.query(statement, params).await
}
}

View File

@ -0,0 +1,33 @@
use crate::postgres::postgres_session::{PostgresSession, PostgresSessionConfig};
use log::info;
use std::sync::Arc;
use tokio::sync::RwLock;
#[derive(Clone)]
pub struct PostgresSessionCache {
session: Arc<RwLock<PostgresSession>>,
config: Arc<PostgresSessionConfig>,
}
impl PostgresSessionCache {
pub async fn new(config: PostgresSessionConfig) -> anyhow::Result<Self> {
let session = PostgresSession::new(config.clone()).await?;
Ok(Self {
session: Arc::new(RwLock::new(session)),
config: Arc::new(config),
})
}
pub async fn get_session(&self) -> anyhow::Result<PostgresSession> {
let session = self.session.read().await;
if session.is_closed() {
info!("PostgreSQL session closed - reconnecting");
drop(session);
let session = PostgresSession::new(self.config.as_ref().clone()).await?;
*self.session.write().await = session.clone();
Ok(session)
} else {
Ok(session.clone())
}
}
}

View File

@ -0,0 +1,36 @@
use bench::metrics::Metric;
use crate::args::TenantConfig;
use bench::service_adapter::BenchConfig;
use prometheus::{opts, register_gauge_vec, register_int_gauge_vec, GaugeVec, IntGaugeVec};
// https://github.com/blockworks-foundation/lite-rpc/blob/production/bench/src/metrics.rs
lazy_static::lazy_static! {
static ref PROM_TXS_SENT: IntGaugeVec = register_int_gauge_vec!(opts!("literpc_benchrunner_txs_sent", "Total number of transactions sent"), &["tenant"]).unwrap();
static ref PROM_TXS_CONFIRMED: IntGaugeVec = register_int_gauge_vec!(opts!("literpc_benchrunner_txs_confirmed", "Number of transactions confirmed"), &["tenant"]).unwrap();
static ref PROM_TXS_UN_CONFIRMED: IntGaugeVec = register_int_gauge_vec!(opts!("literpc_benchrunner_txs_un_confirmed", "Number of transactions not confirmed"), &["tenant"]).unwrap();
static ref PROM_AVG_CONFIRM: GaugeVec = register_gauge_vec!(opts!("literpc_benchrunner_avg_confirmation_time", "Confirmation time(ms)"), &["tenant"]).unwrap();
// static ref RPC_RESPONDING: Gauge = register_gauge!(opts!("literpc_benchrunner_send_tps", "Transactions")).unwrap();
// TODO add more
}
pub async fn publish_metrics_on_prometheus(
tenant_config: &TenantConfig,
_bench_config: &BenchConfig,
metric: &Metric,
) {
let dimensions: &[&str] = &[&tenant_config.tenant_id];
PROM_TXS_SENT
.with_label_values(dimensions)
.set(metric.txs_sent as i64);
PROM_TXS_CONFIRMED
.with_label_values(dimensions)
.set(metric.txs_confirmed as i64);
PROM_TXS_UN_CONFIRMED
.with_label_values(dimensions)
.set(metric.txs_un_confirmed as i64);
PROM_AVG_CONFIRM
.with_label_values(dimensions)
.set(metric.average_confirmation_time_ms);
}

View File

@ -0,0 +1,2 @@
pub mod metrics_prometheus;
pub mod prometheus_sync;

View File

@ -0,0 +1,58 @@
use std::time::Duration;
use log::error;
use prometheus::{Encoder, TextEncoder};
use tokio::{
io::AsyncWriteExt,
net::{TcpListener, TcpStream, ToSocketAddrs},
};
pub struct PrometheusSync;
impl PrometheusSync {
fn create_response(payload: &str) -> String {
format!(
"HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n{}",
payload.len(),
payload
)
}
async fn handle_stream(stream: &mut TcpStream) -> anyhow::Result<()> {
let mut metrics_buffer = Vec::new();
let encoder = TextEncoder::new();
let metric_families = prometheus::gather();
encoder
.encode(&metric_families, &mut metrics_buffer)
.unwrap();
let metrics_buffer = String::from_utf8(metrics_buffer).unwrap();
let response = Self::create_response(&metrics_buffer);
stream.writable().await?;
stream.write_all(response.as_bytes()).await?;
stream.flush().await?;
Ok(())
}
pub fn sync(
addr: impl ToSocketAddrs + Send + 'static,
) -> tokio::task::JoinHandle<anyhow::Result<()>> {
tokio::spawn(async move {
let listener = TcpListener::bind(addr).await?;
loop {
let Ok((mut stream, _addr)) = listener.accept().await else {
error!("Error accepting prometheus stream");
tokio::time::sleep(Duration::from_millis(1)).await;
continue;
};
let _ = Self::handle_stream(&mut stream).await;
}
})
}
}

View File

@ -70,7 +70,7 @@ async fn storage_test() {
let (slot_notifier, _jh_multiplex_slotstream) =
create_grpc_multiplex_processed_slots_subscription(grpc_sources.clone());
let (blocks_notifier, _jh_multiplex_blockstream) =
let (blocks_notifier, _blockmeta_output_stream, _jh_multiplex_blockstream) =
create_grpc_multiplex_blocks_subscription(grpc_sources);
let (epoch_cache, _) = EpochCache::bootstrap_epoch(&rpc_client).await.unwrap();

22
cd/lite-rpc-accounts.toml Normal file

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,14 @@
app = "solana-lite-rpc-benchrunner"
kill_signal = "SIGINT"
kill_timeout = 5
primary_region = "ams"
[build]
dockerfile = "../Dockerfile-benchrunner"
[env]
RUST_LOG = "info"
[metrics]
path = "/metrics"
port = 9091

View File

@ -9,8 +9,7 @@ license = "AGPL"
[dependencies]
#geyser-grpc-connector = { path = "../../geyser-grpc-connector" }
geyser-grpc-connector = { tag = "v0.10.3+yellowstone.1.12+solana.1.17.15", git = "https://github.com/blockworks-foundation/geyser-grpc-connector.git" }
#geyser-grpc-connector = { tag = "v1.17.15", git = "https://github.com/blockworks-foundation/geyser-grpc-connector.git" }
geyser-grpc-connector = { tag = "v0.10.3+yellowstone.1.12+solana.1.17.15-hacked-windowsize3", git = "https://github.com/blockworks-foundation/geyser-grpc-connector.git" }
solana-sdk = { workspace = true }
solana-rpc-client-api = { workspace = true }
@ -47,4 +46,5 @@ yellowstone-grpc-client = { workspace = true }
yellowstone-grpc-proto = { workspace = true }
itertools = {workspace = true}
prometheus = { workspace = true }
lazy_static = { workspace = true }
lazy_static = { workspace = true }
tonic-health = { workspace = true }

View File

@ -1,11 +1,12 @@
use solana_lite_rpc_core::{
structures::account_data::AccountStream,
types::{BlockStream, ClusterInfoStream, SlotStream, VoteAccountStream},
types::{BlockInfoStream, BlockStream, ClusterInfoStream, SlotStream, VoteAccountStream},
};
/// subscribers to broadcast channels should assume that channels are not getting closed unless the system is shutting down
pub struct EndpointStreaming {
pub blocks_notifier: BlockStream,
pub blockinfo_notifier: BlockInfoStream,
pub slot_notifier: SlotStream,
pub vote_account_notifier: VoteAccountStream,
pub cluster_info_notifier: ClusterInfoStream,

View File

@ -11,13 +11,13 @@ use itertools::Itertools;
use solana_lite_rpc_core::{
commitment_utils::Commitment,
structures::{
account_data::{AccountData, AccountNotificationMessage, AccountStream},
account_data::{AccountData, AccountNotificationMessage},
account_filter::{AccountFilterType, AccountFilters, MemcmpFilterData},
},
AnyhowJoinHandle,
};
use solana_sdk::{account::Account, pubkey::Pubkey};
use tokio::sync::broadcast;
use tokio::sync::Notify;
use yellowstone_grpc_proto::geyser::{
subscribe_request_filter_accounts_filter::Filter,
subscribe_request_filter_accounts_filter_memcmp::Data, subscribe_update::UpdateOneof,
@ -25,10 +25,13 @@ use yellowstone_grpc_proto::geyser::{
SubscribeRequestFilterAccountsFilterMemcmp,
};
use crate::grpc::grpc_utils::connect_with_timeout_hacked;
pub fn start_account_streaming_tasks(
grpc_config: GrpcSourceConfig,
accounts_filters: AccountFilters,
account_stream_sx: tokio::sync::mpsc::UnboundedSender<AccountNotificationMessage>,
account_stream_sx: tokio::sync::broadcast::Sender<AccountNotificationMessage>,
has_started: Arc<Notify>,
) -> AnyhowJoinHandle {
tokio::spawn(async move {
'main_loop: loop {
@ -108,12 +111,11 @@ pub fn start_account_streaming_tasks(
ping: None,
};
let mut client = yellowstone_grpc_client::GeyserGrpcClient::connect(
let mut client = connect_with_timeout_hacked(
grpc_config.grpc_addr.clone(),
grpc_config.grpc_x_token.clone(),
None,
)
.unwrap();
.await?;
let account_stream = client.subscribe_once2(program_subscription).await.unwrap();
// each account subscription batch will require individual stream
@ -134,12 +136,11 @@ pub fn start_account_streaming_tasks(
filters: vec![],
},
);
let mut client = yellowstone_grpc_client::GeyserGrpcClient::connect(
let mut client = connect_with_timeout_hacked(
grpc_config.grpc_addr.clone(),
grpc_config.grpc_x_token.clone(),
None,
)
.unwrap();
.await?;
let account_request = SubscribeRequest {
accounts: accounts_subscription,
@ -159,11 +160,17 @@ pub fn start_account_streaming_tasks(
let mut merged_stream = subscriptions.merge();
while let Some(message) = merged_stream.next().await {
let message = message.unwrap();
let Ok(message) = message else {
// channel broken resubscribe
break;
};
let Some(update) = message.update_oneof else {
continue;
};
has_started.notify_one();
match update {
UpdateOneof::Account(account) => {
if let Some(account_data) = account.account {
@ -215,46 +222,50 @@ pub fn start_account_streaming_tasks(
pub fn create_grpc_account_streaming(
grpc_sources: Vec<GrpcSourceConfig>,
accounts_filters: AccountFilters,
) -> (AnyhowJoinHandle, AccountStream) {
let (account_sender, accounts_stream) = broadcast::channel::<AccountNotificationMessage>(1024);
account_stream_sx: tokio::sync::broadcast::Sender<AccountNotificationMessage>,
notify_abort: Arc<Notify>,
) -> AnyhowJoinHandle {
let jh: AnyhowJoinHandle = tokio::spawn(async move {
loop {
let (accounts_sx, mut accounts_rx) = tokio::sync::mpsc::unbounded_channel();
let jhs = grpc_sources
.iter()
.map(|grpc_config| {
start_account_streaming_tasks(
grpc_config.clone(),
accounts_filters.clone(),
accounts_sx.clone(),
account_stream_sx.clone(),
Arc::new(Notify::new()),
)
})
.collect_vec();
drop(accounts_sx);
let mut rx = account_stream_sx.subscribe();
loop {
match tokio::time::timeout(Duration::from_secs(60), accounts_rx.recv()).await {
Ok(Some(data)) => {
let _ = account_sender.send(data);
}
Ok(None) => {
log::error!("All grpc accounts channels close; restarting subscription");
break;
}
Err(_elapsed) => {
log::error!("No accounts data for a minute; restarting subscription");
tokio::select! {
data = tokio::time::timeout(Duration::from_secs(60), rx.recv()) => {
match data{
Ok(Ok(_)) => {
// do nothing / notification channel is working fine
}
Ok(Err(e)) => {
log::error!("Grpc stream failed by error : {e:?}");
break;
}
Err(_elapsed) => {
log::error!("No accounts data for a minute; restarting subscription");
break;
}
}
},
_ = notify_abort.notified() => {
log::debug!("Account stream aborted");
break;
}
}
}
for jh in jhs {
// abort previous handles
jh.abort();
}
jhs.iter().for_each(|x| x.abort());
}
});
(jh, accounts_stream)
jh
}

View File

@ -0,0 +1,38 @@
use bytes::Bytes;
use std::time::Duration;
use tonic::metadata::{errors::InvalidMetadataValue, AsciiMetadataValue};
use tonic::service::Interceptor;
use tonic::transport::ClientTlsConfig;
use tonic_health::pb::health_client::HealthClient;
use yellowstone_grpc_client::{GeyserGrpcClient, InterceptorXToken};
use yellowstone_grpc_proto::geyser::geyser_client::GeyserClient;
use yellowstone_grpc_proto::tonic;
pub async fn connect_with_timeout_hacked<E, T>(
endpoint: E,
x_token: Option<T>,
) -> anyhow::Result<GeyserGrpcClient<impl Interceptor>>
where
E: Into<Bytes>,
T: TryInto<AsciiMetadataValue, Error = InvalidMetadataValue>,
{
let endpoint = tonic::transport::Endpoint::from_shared(endpoint)?
.buffer_size(Some(65536))
.initial_connection_window_size(4194304)
.initial_stream_window_size(4194304)
.connect_timeout(Duration::from_secs(10))
.timeout(Duration::from_secs(10))
// .http2_adaptive_window()
.tls_config(ClientTlsConfig::new())?;
let x_token: Option<AsciiMetadataValue> = x_token.map(|v| v.try_into()).transpose()?;
let interceptor = InterceptorXToken { x_token };
let channel = endpoint.connect_lazy();
let client = GeyserGrpcClient::new(
HealthClient::with_interceptor(channel.clone(), interceptor.clone()),
GeyserClient::with_interceptor(channel, interceptor)
.max_decoding_message_size(GeyserGrpcClient::max_decoding_message_size()),
);
Ok(client)
}

View File

@ -1 +1,2 @@
pub mod gprc_accounts_streaming;
pub mod grpc_accounts_streaming;
pub mod grpc_utils;

View File

@ -1,7 +1,5 @@
use crate::grpc_subscription::from_grpc_block_update;
use anyhow::{bail, Context};
use geyser_grpc_connector::grpc_subscription_autoreconnect_tasks::create_geyser_autoconnection_task_with_mpsc;
use geyser_grpc_connector::grpcmultiplex_fastestwins::FromYellowstoneExtractor;
use geyser_grpc_connector::{GeyserFilter, GrpcSourceConfig, Message};
use log::{debug, info, trace, warn};
use solana_lite_rpc_core::structures::produced_block::ProducedBlock;
@ -11,6 +9,7 @@ use solana_sdk::clock::Slot;
use solana_sdk::commitment_config::CommitmentConfig;
use solana_lite_rpc_core::solana_utils::hash_from_str;
use solana_lite_rpc_core::structures::block_info::BlockInfo;
use std::collections::{BTreeSet, HashMap, HashSet};
use std::time::Duration;
use tokio::sync::broadcast::Receiver;
@ -20,6 +19,8 @@ use tracing::debug_span;
use yellowstone_grpc_proto::geyser::subscribe_update::UpdateOneof;
use yellowstone_grpc_proto::geyser::SubscribeUpdate;
use crate::grpc_subscription::from_grpc_block_update;
/// connect to all sources provided using transparent autoconnection task
/// shutdown handling:
/// - task will shutdown of the receiver side of block_sender gets closed
@ -108,9 +109,9 @@ fn create_grpc_multiplex_processed_block_task(
}
// backpressure: the mpsc sender will block grpc stream until capacity is available
fn create_grpc_multiplex_block_meta_task(
fn create_grpc_multiplex_block_info_task(
grpc_sources: &Vec<GrpcSourceConfig>,
block_meta_sender: tokio::sync::mpsc::Sender<BlockMeta>,
block_info_sender: tokio::sync::mpsc::Sender<BlockInfo>,
commitment_config: CommitmentConfig,
) -> Vec<AbortHandle> {
let (autoconnect_tx, mut blocks_rx) = tokio::sync::mpsc::channel(10);
@ -133,14 +134,24 @@ fn create_grpc_multiplex_block_meta_task(
let proposed_slot = block_meta.slot;
if proposed_slot > tip {
tip = proposed_slot;
let block_meta = BlockMeta {
let block_meta = BlockInfo {
slot: proposed_slot,
block_height: block_meta
.block_height
.expect("block_height from geyser block meta")
.block_height,
blockhash: hash_from_str(&block_meta.blockhash)
.expect("valid blockhash"),
commitment_config,
block_time: block_meta
.block_time
.expect("block_time from geyser block meta")
.timestamp
as u64,
};
let send_started_at = Instant::now();
let send_result = block_meta_sender
let send_result = block_info_sender
.send(block_meta)
.await
.context("Send block to channel");
@ -187,7 +198,11 @@ fn create_grpc_multiplex_block_meta_task(
/// the channel must never be closed
pub fn create_grpc_multiplex_blocks_subscription(
grpc_sources: Vec<GrpcSourceConfig>,
) -> (Receiver<ProducedBlock>, AnyhowJoinHandle) {
) -> (
Receiver<ProducedBlock>,
Receiver<BlockInfo>,
AnyhowJoinHandle,
) {
info!("Setup grpc multiplexed blocks connection...");
if grpc_sources.is_empty() {
info!("- no grpc connection configured");
@ -197,9 +212,13 @@ pub fn create_grpc_multiplex_blocks_subscription(
}
// return value is the broadcast receiver
// must NEVER be closed form inside this method
// must NEVER be closed from inside this method
let (producedblock_sender, blocks_output_stream) =
tokio::sync::broadcast::channel::<ProducedBlock>(32);
// provide information about finalized blocks as quickly as possible
// note that produced block stream might most probably lag behind
let (blockinfo_sender, blockinfo_output_stream) =
tokio::sync::broadcast::channel::<BlockInfo>(32);
let mut reconnect_attempts = 0;
@ -209,10 +228,12 @@ pub fn create_grpc_multiplex_blocks_subscription(
// channels must NEVER GET CLOSED (unless full restart of multiplexer)
let (processed_block_sender, mut processed_block_reciever) =
tokio::sync::mpsc::channel::<ProducedBlock>(10); // experiemental
let (block_meta_sender_confirmed, mut block_meta_reciever_confirmed) =
tokio::sync::mpsc::channel::<BlockMeta>(500);
let (block_meta_sender_finalized, mut block_meta_reciever_finalized) =
tokio::sync::mpsc::channel::<BlockMeta>(500);
let (block_info_sender_processed, mut block_info_reciever_processed) =
tokio::sync::mpsc::channel::<BlockInfo>(500);
let (block_info_sender_confirmed, mut block_info_reciever_confirmed) =
tokio::sync::mpsc::channel::<BlockInfo>(500);
let (block_info_sender_finalized, mut block_info_reciever_finalized) =
tokio::sync::mpsc::channel::<BlockInfo>(500);
let processed_block_sender = processed_block_sender.clone();
reconnect_attempts += 1;
@ -233,15 +254,22 @@ pub fn create_grpc_multiplex_blocks_subscription(
task_list.extend(processed_blocks_tasks);
// TODO apply same pattern as in create_grpc_multiplex_processed_block_task
let jh_meta_task_confirmed = create_grpc_multiplex_block_meta_task(
let jh_meta_task_processed = create_grpc_multiplex_block_info_task(
&grpc_sources,
block_meta_sender_confirmed.clone(),
block_info_sender_processed.clone(),
CommitmentConfig::processed(),
);
task_list.extend(jh_meta_task_processed);
let jh_meta_task_confirmed = create_grpc_multiplex_block_info_task(
&grpc_sources,
block_info_sender_confirmed.clone(),
CommitmentConfig::confirmed(),
);
task_list.extend(jh_meta_task_confirmed);
let jh_meta_task_finalized = create_grpc_multiplex_block_meta_task(
let jh_meta_task_finalized = create_grpc_multiplex_block_info_task(
&grpc_sources,
block_meta_sender_finalized.clone(),
block_info_sender_finalized.clone(),
CommitmentConfig::finalized(),
);
task_list.extend(jh_meta_task_finalized);
@ -258,15 +286,16 @@ pub fn create_grpc_multiplex_blocks_subscription(
let mut cleanup_without_confirmed_recv_blocks_meta: u8 = 0;
let mut cleanup_without_finalized_recv_blocks_meta: u8 = 0;
let mut confirmed_block_not_yet_processed = HashSet::<solana_sdk::hash::Hash>::new();
let mut finalized_block_not_yet_processed = HashSet::<solana_sdk::hash::Hash>::new();
// start logging errors when we recieve first finalized block
let mut startup_completed = false;
const MAX_ALLOWED_CLEANUP_WITHOUT_RECV: u8 = 12; // 12*5 = 60s without recving data
'recv_loop: loop {
debug!("processed_block_sender: {}, block_meta_sender_confirmed: {}, block_meta_sender_finalized: {}",
debug!("channel capacities: processed_block_sender={}, block_info_sender_confirmed={}, block_info_sender_finalized={}",
processed_block_sender.capacity(),
block_meta_sender_confirmed.capacity(),
block_meta_sender_finalized.capacity()
block_info_sender_confirmed.capacity(),
block_info_sender_finalized.capacity()
);
tokio::select! {
processed_block = processed_block_reciever.recv() => {
@ -275,6 +304,11 @@ pub fn create_grpc_multiplex_blocks_subscription(
let processed_block = processed_block.expect("processed block from stream");
trace!("got processed block {} with blockhash {}",
processed_block.slot, processed_block.blockhash.clone());
if processed_block.commitment_config.is_finalized() {
last_finalized_slot = last_finalized_slot.max(processed_block.slot);
}
if let Err(e) = producedblock_sender.send(processed_block.clone()) {
warn!("produced block channel has no receivers {e:?}");
}
@ -283,15 +317,36 @@ pub fn create_grpc_multiplex_blocks_subscription(
warn!("produced block channel has no receivers while trying to send confirmed block {e:?}");
}
}
if finalized_block_not_yet_processed.remove(&processed_block.blockhash) {
if let Err(e) = producedblock_sender.send(processed_block.to_finalized_block()) {
warn!("produced block channel has no receivers while trying to send confirmed block {e:?}");
}
}
recent_processed_blocks.insert(processed_block.blockhash, processed_block);
},
meta_confirmed = block_meta_reciever_confirmed.recv() => {
blockinfo_processed = block_info_reciever_processed.recv() => {
let blockinfo_processed = blockinfo_processed.expect("processed block info from stream");
let blockhash = blockinfo_processed.blockhash;
trace!("got processed blockinfo {} with blockhash {}",
blockinfo_processed.slot, blockhash);
if let Err(e) = blockinfo_sender.send(blockinfo_processed) {
warn!("Processed blockinfo channel has no receivers {e:?}");
}
},
blockinfo_confirmed = block_info_reciever_confirmed.recv() => {
cleanup_without_confirmed_recv_blocks_meta = 0;
let meta_confirmed = meta_confirmed.expect("confirmed block meta from stream");
let blockhash = meta_confirmed.blockhash;
let blockinfo_confirmed = blockinfo_confirmed.expect("confirmed block info from stream");
let blockhash = blockinfo_confirmed.blockhash;
trace!("got confirmed blockinfo {} with blockhash {}",
blockinfo_confirmed.slot, blockhash);
if let Err(e) = blockinfo_sender.send(blockinfo_confirmed) {
warn!("Confirmed blockinfo channel has no receivers {e:?}");
}
if let Some(cached_processed_block) = recent_processed_blocks.get(&blockhash) {
let confirmed_block = cached_processed_block.to_confirmed_block();
debug!("got confirmed blockmeta {} with blockhash {}",
debug!("got confirmed blockinfo {} with blockhash {}",
confirmed_block.slot, confirmed_block.blockhash.clone());
if let Err(e) = producedblock_sender.send(confirmed_block) {
warn!("confirmed block channel has no receivers {e:?}");
@ -302,23 +357,30 @@ pub fn create_grpc_multiplex_blocks_subscription(
confirmed_block_not_yet_processed.len(), recent_processed_blocks.len());
}
},
meta_finalized = block_meta_reciever_finalized.recv() => {
blockinfo_finalized = block_info_reciever_finalized.recv() => {
cleanup_without_finalized_recv_blocks_meta = 0;
let meta_finalized = meta_finalized.expect("finalized block meta from stream");
// let _span = debug_span!("sequence_block_meta_finalized", ?meta_finalized.slot).entered();
let blockhash = meta_finalized.blockhash;
let blockinfo_finalized = blockinfo_finalized.expect("finalized block info from stream");
last_finalized_slot = last_finalized_slot.max(blockinfo_finalized.slot);
let blockhash = blockinfo_finalized.blockhash;
trace!("got finalized blockinfo {} with blockhash {}",
blockinfo_finalized.slot, blockhash);
if let Err(e) = blockinfo_sender.send(blockinfo_finalized) {
warn!("Finalized blockinfo channel has no receivers {e:?}");
}
if let Some(cached_processed_block) = recent_processed_blocks.remove(&blockhash) {
let finalized_block = cached_processed_block.to_finalized_block();
last_finalized_slot = finalized_block.slot;
startup_completed = true;
debug!("got finalized blockmeta {} with blockhash {}",
debug!("got finalized blockinfo {} with blockhash {}",
finalized_block.slot, finalized_block.blockhash.clone());
if let Err(e) = producedblock_sender.send(finalized_block) {
warn!("Finalized block channel has no receivers {e:?}");
}
} else if startup_completed {
// this warning is ok for first few blocks when we start lrpc
log::warn!("finalized block meta received for blockhash {} which was never seen or already emitted", blockhash);
log::warn!("finalized blockinfo received for blockhash {} which was never seen or already emitted", blockhash);
finalized_block_not_yet_processed.insert(blockhash);
}
},
_ = cleanup_tick.tick() => {
@ -326,10 +388,10 @@ pub fn create_grpc_multiplex_blocks_subscription(
if cleanup_without_recv_full_blocks > MAX_ALLOWED_CLEANUP_WITHOUT_RECV ||
cleanup_without_confirmed_recv_blocks_meta > MAX_ALLOWED_CLEANUP_WITHOUT_RECV ||
cleanup_without_finalized_recv_blocks_meta > MAX_ALLOWED_CLEANUP_WITHOUT_RECV {
log::error!("block or block meta geyser stream stopped - restarting multiplexer ({}-{}-{})",
log::error!("block or block info geyser stream stopped - restarting multiplexer ({}-{}-{})",
cleanup_without_recv_full_blocks, cleanup_without_confirmed_recv_blocks_meta, cleanup_without_finalized_recv_blocks_meta,);
// throttle a bit
sleep(Duration::from_millis(1500)).await;
sleep(Duration::from_millis(200)).await;
break 'recv_loop;
}
cleanup_without_recv_full_blocks += 1;
@ -350,7 +412,11 @@ pub fn create_grpc_multiplex_blocks_subscription(
} // -- END reconnect loop
});
(blocks_output_stream, jh_block_emitter_task)
(
blocks_output_stream,
blockinfo_output_stream,
jh_block_emitter_task,
)
}
pub fn create_grpc_multiplex_processed_slots_subscription(
@ -435,30 +501,6 @@ pub fn create_grpc_multiplex_processed_slots_subscription(
(multiplexed_messages_rx, jh_multiplex_task)
}
#[allow(dead_code)]
struct BlockMeta {
pub slot: Slot,
pub blockhash: solana_sdk::hash::Hash,
}
struct BlockMetaExtractor(CommitmentConfig);
impl FromYellowstoneExtractor for BlockMetaExtractor {
type Target = BlockMeta;
fn map_yellowstone_update(&self, update: SubscribeUpdate) -> Option<(u64, BlockMeta)> {
match update.update_oneof {
Some(UpdateOneof::BlockMeta(block_meta)) => Some((
block_meta.slot,
BlockMeta {
slot: block_meta.slot,
blockhash: hash_from_str(&block_meta.blockhash).unwrap(),
},
)),
_ => None,
}
}
}
fn map_slot_from_yellowstone_update(update: SubscribeUpdate) -> Option<Slot> {
match update.update_oneof {
Some(UpdateOneof::Slot(update_slot_message)) => Some(update_slot_message.slot),

View File

@ -1,12 +1,16 @@
use crate::endpoint_stremers::EndpointStreaming;
use crate::grpc::gprc_accounts_streaming::create_grpc_account_streaming;
use crate::grpc::grpc_accounts_streaming::create_grpc_account_streaming;
use crate::grpc::grpc_utils::connect_with_timeout_hacked;
use crate::grpc_multiplex::{
create_grpc_multiplex_blocks_subscription, create_grpc_multiplex_processed_slots_subscription,
};
use anyhow::Context;
use futures::StreamExt;
use geyser_grpc_connector::GrpcSourceConfig;
use itertools::Itertools;
use log::trace;
use solana_client::nonblocking::rpc_client::RpcClient;
use solana_lite_rpc_core::structures::account_data::AccountNotificationMessage;
use solana_lite_rpc_core::structures::account_filter::AccountFilters;
use solana_lite_rpc_core::{
structures::produced_block::{ProducedBlock, TransactionInfo},
@ -30,8 +34,15 @@ use solana_sdk::{
};
use solana_transaction_status::{Reward, RewardType};
use std::cell::OnceCell;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::Notify;
use tracing::trace_span;
use yellowstone_grpc_client::GeyserGrpcClient;
use yellowstone_grpc_proto::geyser::subscribe_update::UpdateOneof;
use yellowstone_grpc_proto::geyser::{
CommitmentLevel, SubscribeRequestFilterBlocks, SubscribeRequestFilterSlots, SubscribeUpdateSlot,
};
use crate::rpc_polling::vote_accounts_and_cluster_info_polling::{
poll_cluster_info, poll_vote_accounts,
@ -259,6 +270,138 @@ fn map_compute_budget_instructions(message: &VersionedMessage) -> (Option<u32>,
(cu_requested, prioritization_fees)
}
// not called
pub fn create_block_processing_task(
grpc_addr: String,
grpc_x_token: Option<String>,
block_sx: async_channel::Sender<SubscribeUpdateBlock>,
commitment_level: CommitmentLevel,
) -> AnyhowJoinHandle {
tokio::spawn(async move {
loop {
let mut blocks_subs = HashMap::new();
blocks_subs.insert(
"block_client".to_string(),
SubscribeRequestFilterBlocks {
account_include: Default::default(),
include_transactions: Some(true),
include_accounts: Some(false),
include_entries: Some(false),
},
);
// connect to grpc
let mut client =
connect_with_timeout_hacked(grpc_addr.clone(), grpc_x_token.clone()).await?;
let mut stream = client
.subscribe_once(
HashMap::new(),
Default::default(),
HashMap::new(),
Default::default(),
blocks_subs,
Default::default(),
Some(commitment_level),
Default::default(),
None,
)
.await?;
while let Some(message) = stream.next().await {
let message = message?;
let Some(update) = message.update_oneof else {
continue;
};
match update {
UpdateOneof::Block(block) => {
log::trace!(
"received block, hash: {} slot: {}",
block.blockhash,
block.slot
);
block_sx
.send(block)
.await
.context("Problem sending on block channel")?;
}
UpdateOneof::Ping(_) => {
log::trace!("GRPC Ping");
}
_ => {
log::trace!("unknown GRPC notification");
}
};
}
log::error!("Grpc block subscription broken (resubscribing)");
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
}
})
}
// not used
pub fn create_slot_stream_task(
grpc_addr: String,
grpc_x_token: Option<String>,
slot_sx: async_channel::Sender<SubscribeUpdateSlot>,
commitment_level: CommitmentLevel,
) -> AnyhowJoinHandle {
tokio::spawn(async move {
loop {
let mut slots = HashMap::new();
slots.insert(
"client_slot".to_string(),
SubscribeRequestFilterSlots {
filter_by_commitment: Some(true),
},
);
// connect to grpc
let mut client =
GeyserGrpcClient::connect(grpc_addr.clone(), grpc_x_token.clone(), None)?;
let mut stream = client
.subscribe_once(
slots,
Default::default(),
HashMap::new(),
Default::default(),
HashMap::new(),
Default::default(),
Some(commitment_level),
Default::default(),
None,
)
.await?;
while let Some(message) = stream.next().await {
let message = message?;
let Some(update) = message.update_oneof else {
continue;
};
match update {
UpdateOneof::Slot(slot) => {
slot_sx
.send(slot)
.await
.context("Problem sending on block channel")?;
}
UpdateOneof::Ping(_) => {
log::trace!("GRPC Ping");
}
_ => {
log::trace!("unknown GRPC notification");
}
};
}
log::error!("Grpc block subscription broken (resubscribing)");
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
}
})
}
pub fn create_grpc_subscription(
rpc_client: Arc<RpcClient>,
grpc_sources: Vec<GrpcSourceConfig>,
@ -271,22 +414,28 @@ pub fn create_grpc_subscription(
let (slot_multiplex_channel, jh_multiplex_slotstream) =
create_grpc_multiplex_processed_slots_subscription(grpc_sources.clone());
let (block_multiplex_channel, jh_multiplex_blockstream) =
let (block_multiplex_channel, blockmeta_channel, jh_multiplex_blockstream) =
create_grpc_multiplex_blocks_subscription(grpc_sources.clone());
let cluster_info_polling = poll_cluster_info(rpc_client.clone(), cluster_info_sx);
let vote_accounts_polling = poll_vote_accounts(rpc_client.clone(), va_sx);
// accounts
if !accounts_filter.is_empty() {
let (account_jh, processed_account_stream) =
create_grpc_account_streaming(grpc_sources, accounts_filter);
let (account_sender, accounts_stream) =
tokio::sync::broadcast::channel::<AccountNotificationMessage>(1024);
let account_jh = create_grpc_account_streaming(
grpc_sources,
accounts_filter,
account_sender,
Arc::new(Notify::new()),
);
let streamers = EndpointStreaming {
blocks_notifier: block_multiplex_channel,
blockinfo_notifier: blockmeta_channel,
slot_notifier: slot_multiplex_channel,
cluster_info_notifier,
vote_account_notifier,
processed_account_stream: Some(processed_account_stream),
processed_account_stream: Some(accounts_stream),
};
let endpoint_tasks = vec![
@ -300,6 +449,7 @@ pub fn create_grpc_subscription(
} else {
let streamers = EndpointStreaming {
blocks_notifier: block_multiplex_channel,
blockinfo_notifier: blockmeta_channel,
slot_notifier: slot_multiplex_channel,
cluster_info_notifier,
vote_account_notifier,

View File

@ -16,6 +16,7 @@ pub fn create_json_rpc_polling_subscription(
) -> anyhow::Result<(EndpointStreaming, Vec<AnyhowJoinHandle>)> {
let (slot_sx, slot_notifier) = tokio::sync::broadcast::channel(16);
let (block_sx, blocks_notifier) = tokio::sync::broadcast::channel(16);
let (blockinfo_sx, blockinfo_notifier) = tokio::sync::broadcast::channel(16);
let (cluster_info_sx, cluster_info_notifier) = tokio::sync::broadcast::channel(16);
let (va_sx, vote_account_notifier) = tokio::sync::broadcast::channel(16);
// does not support accounts support with rpc polling
@ -26,6 +27,7 @@ pub fn create_json_rpc_polling_subscription(
let mut block_polling_tasks = poll_block(
rpc_client.clone(),
block_sx,
blockinfo_sx,
slot_notifier.resubscribe(),
num_parallel_tasks,
);
@ -39,6 +41,7 @@ pub fn create_json_rpc_polling_subscription(
let streamers = EndpointStreaming {
blocks_notifier,
blockinfo_notifier,
slot_notifier,
cluster_info_notifier,
vote_account_notifier,

View File

@ -1,6 +1,7 @@
use anyhow::{bail, Context};
use solana_client::nonblocking::rpc_client::RpcClient;
use solana_lite_rpc_core::solana_utils::hash_from_str;
use solana_lite_rpc_core::structures::block_info::BlockInfo;
use solana_lite_rpc_core::structures::produced_block::{ProducedBlockInner, TransactionInfo};
use solana_lite_rpc_core::{
structures::{
@ -54,6 +55,7 @@ pub async fn process_block(
pub fn poll_block(
rpc_client: Arc<RpcClient>,
block_notification_sender: Sender<ProducedBlock>,
blockinfo_notification_sender: Sender<BlockInfo>,
slot_notification: Receiver<SlotNotification>,
num_parallel_tasks: usize,
) -> Vec<AnyhowJoinHandle> {
@ -66,6 +68,7 @@ pub fn poll_block(
for _i in 0..num_parallel_tasks {
let block_notification_sender = block_notification_sender.clone();
let blockinfo_notification_sender = blockinfo_notification_sender.clone();
let rpc_client = rpc_client.clone();
let block_schedule_queue_rx = block_schedule_queue_rx.clone();
let slot_retry_queue_sx = slot_retry_queue_sx.clone();
@ -79,9 +82,13 @@ pub fn poll_block(
process_block(rpc_client.as_ref(), slot, commitment_config).await;
match processed_block {
Some(processed_block) => {
let block_info = map_block_info(&processed_block);
block_notification_sender
.send(processed_block)
.context("Processed block should be sent")?;
blockinfo_notification_sender
.send(block_info)
.context("Processed block info should be sent")?;
// schedule to get finalized commitment
if commitment_config.commitment != CommitmentLevel::Finalized {
let retry_at = tokio::time::Instant::now()
@ -332,6 +339,16 @@ pub fn from_ui_block(
ProducedBlock::new(inner, commitment_config)
}
fn map_block_info(produced_block: &ProducedBlock) -> BlockInfo {
BlockInfo {
slot: produced_block.slot,
block_height: produced_block.block_height,
blockhash: produced_block.blockhash,
commitment_config: produced_block.commitment_config,
block_time: produced_block.block_time,
}
}
#[inline]
fn calc_prioritization_fees(units: u32, additional_fee: u32) -> u64 {
(units as u64 * 1000) / additional_fee as u64

View File

@ -35,3 +35,7 @@ chrono = { workspace = true }
rustls = { workspace = true }
async-trait = { workspace = true }
itertools = { workspace = true }
prometheus = { workspace = true }
[dev-dependencies]
rand = "0.8.5"

View File

@ -1,6 +1,8 @@
use base64::Engine;
use serde::{Deserialize, Serialize};
// TODO moved to util
#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum BinaryEncoding {

View File

@ -40,7 +40,7 @@ pub async fn get_current_confirmed_slot(data_cache: &DataCache) -> u64 {
let commitment = CommitmentConfig::confirmed();
let BlockInformation { slot, .. } = data_cache
.block_information_store
.get_latest_block(commitment)
.get_latest_block_information(commitment)
.await;
slot
}

View File

@ -7,6 +7,7 @@ use std::sync::atomic::AtomicU64;
use std::sync::Arc;
use tokio::sync::RwLock;
use crate::structures::block_info::BlockInfo;
use crate::structures::produced_block::ProducedBlock;
use solana_sdk::hash::Hash;
@ -33,6 +34,17 @@ impl BlockInformation {
block_time: block.block_time,
}
}
pub fn from_block_info(block_info: &BlockInfo) -> Self {
BlockInformation {
slot: block_info.slot,
block_height: block_info.block_height,
last_valid_blockheight: block_info.block_height + MAX_RECENT_BLOCKHASHES as u64,
cleanup_slot: block_info.block_height + 1000,
blockhash: block_info.blockhash,
commitment_config: block_info.commitment_config,
block_time: block_info.block_time,
}
}
}
/// - Block Information Store
@ -89,7 +101,7 @@ impl BlockInformationStore {
.blockhash
}
pub async fn get_latest_block_info(
pub async fn get_latest_block_information(
&self,
commitment_config: CommitmentConfig,
) -> BlockInformation {
@ -99,13 +111,6 @@ impl BlockInformationStore {
.clone()
}
pub async fn get_latest_block(&self, commitment_config: CommitmentConfig) -> BlockInformation {
self.get_latest_block_arc(commitment_config)
.read()
.await
.clone()
}
pub async fn add_block(&self, block_info: BlockInformation) -> bool {
// save slot copy to avoid borrow issues
let slot = block_info.slot;
@ -121,10 +126,18 @@ impl BlockInformationStore {
std::sync::atomic::Ordering::Relaxed,
);
}
// check if the block has already been added with higher commitment level
match self.blocks.get_mut(&block_info.blockhash) {
Some(mut prev_block_info) => {
let should_update = match prev_block_info.commitment_config.commitment {
// update latest block
{
let latest_block = self.get_latest_block_arc(commitment_config);
if slot > latest_block.read().await.slot {
*latest_block.write().await = block_info.clone();
}
}
match self.blocks.entry(block_info.blockhash) {
dashmap::mapref::entry::Entry::Occupied(entry) => {
let should_update = match entry.get().commitment_config.commitment {
CommitmentLevel::Finalized => false, // should never update blocks of finalized commitment
CommitmentLevel::Confirmed => {
commitment_config == CommitmentConfig::finalized()
@ -134,27 +147,21 @@ impl BlockInformationStore {
|| commitment_config == CommitmentConfig::finalized()
}
};
if !should_update {
return false;
if should_update {
entry.replace_entry(block_info);
}
*prev_block_info = block_info.clone();
should_update
}
None => {
self.blocks.insert(block_info.blockhash, block_info.clone());
dashmap::mapref::entry::Entry::Vacant(entry) => {
entry.insert(block_info);
true
}
}
// update latest block
let latest_block = self.get_latest_block_arc(commitment_config);
if slot > latest_block.read().await.slot {
*latest_block.write().await = block_info;
}
true
}
pub async fn clean(&self) {
let finalized_block_information = self
.get_latest_block_info(CommitmentConfig::finalized())
.get_latest_block_information(CommitmentConfig::finalized())
.await;
let before_length = self.blocks.len();
self.blocks
@ -175,7 +182,7 @@ impl BlockInformationStore {
blockhash: &Hash,
commitment_config: CommitmentConfig,
) -> (bool, Slot) {
let latest_block = self.get_latest_block(commitment_config).await;
let latest_block = self.get_latest_block_information(commitment_config).await;
match self.blocks.get(blockhash) {
Some(block_information) => (
latest_block.block_height <= block_information.last_valid_blockheight,

View File

@ -45,7 +45,7 @@ impl DataCache {
pub async fn clean(&self, ttl_duration: std::time::Duration) {
let block_info = self
.block_information_store
.get_latest_block_info(CommitmentConfig::finalized())
.get_latest_block_information(CommitmentConfig::finalized())
.await;
self.block_information_store.clean().await;
self.txs.clean(block_info.block_height);
@ -67,7 +67,7 @@ impl DataCache {
pub async fn get_current_epoch(&self, commitment: CommitmentConfig) -> Epoch {
let BlockInformation { slot, .. } = self
.block_information_store
.get_latest_block(commitment)
.get_latest_block_information(commitment)
.await;
self.epoch_data.get_epoch_at_slot(slot)
}

View File

@ -0,0 +1,11 @@
use solana_sdk::commitment_config::CommitmentConfig;
use solana_sdk::hash::Hash;
#[derive(Clone, Debug)]
pub struct BlockInfo {
pub slot: u64,
pub block_height: u64,
pub blockhash: Hash,
pub commitment_config: CommitmentConfig,
pub block_time: u64,
}

View File

@ -60,7 +60,7 @@ impl CalculatedSchedule {
None => {
let BlockInformation { slot, .. } = data_cache
.block_information_store
.get_latest_block(commitment)
.get_latest_block_information(commitment)
.await;
slot
}

View File

@ -2,6 +2,7 @@
pub mod account_data;
pub mod account_filter;
pub mod block_info;
pub mod epoch;
pub mod identity_stakes;
pub mod leader_data;

View File

@ -1,64 +1,63 @@
use std::{
collections::{BTreeMap, VecDeque},
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
collections::{BTreeMap, HashSet, VecDeque},
sync::Arc,
};
use dashmap::DashSet;
use solana_sdk::signature::Signature;
use tokio::sync::Mutex;
use super::transaction_sent_info::SentTransactionInfo;
#[derive(Default)]
struct PrioFeeHeapData {
map: BTreeMap<u64, VecDeque<SentTransactionInfo>>,
signatures: HashSet<Signature>,
}
#[derive(Default, Clone)]
pub struct PrioritizationFeesHeap {
signatures: DashSet<Signature>,
map: Arc<Mutex<BTreeMap<u64, VecDeque<SentTransactionInfo>>>>,
min_prioritization_fees: Arc<AtomicU64>,
map: Arc<Mutex<PrioFeeHeapData>>,
max_number_of_transactions: usize,
}
impl PrioritizationFeesHeap {
pub fn new(max_number_of_transactions: usize) -> Self {
Self {
signatures: DashSet::new(),
map: Arc::new(Mutex::new(BTreeMap::new())),
min_prioritization_fees: Arc::new(AtomicU64::new(0)),
map: Arc::new(Mutex::new(PrioFeeHeapData::default())),
max_number_of_transactions,
}
}
pub async fn pop(&self) -> Option<SentTransactionInfo> {
let mut write_lock = self.map.lock().await;
if let Some(mut entry) = write_lock.last_entry() {
if let Some(element) = entry.get_mut().pop_front() {
if entry.get().is_empty() {
entry.remove();
}
self.signatures.remove(&element.signature);
return Some(element);
if let Some(mut entry) = write_lock.map.last_entry() {
let element = entry.get_mut().pop_front().unwrap();
if entry.get().is_empty() {
entry.remove();
}
write_lock.signatures.remove(&element.signature);
return Some(element);
}
None
}
pub async fn insert(&self, tx: SentTransactionInfo) {
if self.signatures.len() >= self.max_number_of_transactions {
// check if prioritization is more than prioritization in the map
if tx.prioritization_fee <= self.min_prioritization_fees.load(Ordering::Relaxed) {
return;
}
}
if self.signatures.contains(&tx.signature) {
let mut write_lock = self.map.lock().await;
if write_lock.signatures.contains(&tx.signature) {
// signature already in the list
return;
}
self.signatures.insert(tx.signature);
let mut write_lock = self.map.lock().await;
match write_lock.get_mut(&tx.prioritization_fee) {
if write_lock.signatures.len() >= self.max_number_of_transactions {
// check if prioritization is more than prioritization in the map
if tx.prioritization_fee <= *write_lock.map.first_entry().unwrap().key() {
return;
}
}
write_lock.signatures.insert(tx.signature);
match write_lock.map.get_mut(&tx.prioritization_fee) {
Some(value) => {
value.push_back(tx);
}
@ -66,19 +65,18 @@ impl PrioritizationFeesHeap {
let mut vec_d = VecDeque::new();
let prioritization_fee = tx.prioritization_fee;
vec_d.push_back(tx);
write_lock.insert(prioritization_fee, vec_d);
write_lock.map.insert(prioritization_fee, vec_d);
}
}
if self.signatures.len() > self.max_number_of_transactions {
match write_lock.first_entry() {
if write_lock.signatures.len() > self.max_number_of_transactions {
match write_lock.map.first_entry() {
Some(mut first_entry) => {
let tx_info = first_entry.get_mut().pop_front();
if let Some(tx_info) = tx_info {
self.signatures.remove(&tx_info.signature);
}
let tx_info = first_entry.get_mut().pop_front().unwrap();
if first_entry.get().is_empty() {
first_entry.remove_entry();
first_entry.remove();
}
write_lock.signatures.remove(&tx_info.signature);
}
None => {
panic!("Should not happen");
@ -89,69 +87,141 @@ impl PrioritizationFeesHeap {
pub async fn remove_expired_transactions(&self, current_blockheight: u64) {
let mut write_lock = self.map.lock().await;
for (_, entry) in write_lock.iter_mut() {
let mut cells_to_remove = vec![];
let mut signatures_to_remove = vec![];
for (p, entry) in write_lock.map.iter_mut() {
entry.retain(|x| {
let retain = x.last_valid_block_height > current_blockheight;
if !retain {
self.signatures.remove(&x.signature);
signatures_to_remove.push(x.signature);
}
retain
});
if entry.is_empty() {
cells_to_remove.push(*p);
}
}
for p in cells_to_remove {
write_lock.map.remove(&p);
}
for sig in signatures_to_remove {
write_lock.signatures.remove(&sig);
}
}
pub async fn size(&self) -> usize {
self.map.lock().await.signatures.len()
}
pub async fn clear(&self) -> usize {
let mut lk = self.map.lock().await;
lk.map.clear();
let size = lk.signatures.len();
lk.signatures.clear();
size
}
}
#[tokio::test]
pub async fn test_prioritization_heap() {
let p_heap = PrioritizationFeesHeap::new(4);
let tx_creator = |signature, prioritization_fee| SentTransactionInfo {
signature,
slot: 0,
transaction: vec![],
last_valid_block_height: 0,
prioritization_fee,
#[cfg(test)]
mod tests {
use solana_sdk::signature::Signature;
use std::time::Duration;
use crate::structures::{
prioritization_fee_heap::PrioritizationFeesHeap, transaction_sent_info::SentTransactionInfo,
};
let tx_0 = tx_creator(Signature::new_unique(), 0);
let tx_1 = tx_creator(Signature::new_unique(), 10);
let tx_2 = tx_creator(Signature::new_unique(), 100);
let tx_3 = tx_creator(Signature::new_unique(), 0);
p_heap.insert(tx_0.clone()).await;
p_heap.insert(tx_0.clone()).await;
p_heap.insert(tx_0.clone()).await;
p_heap.insert(tx_0.clone()).await;
p_heap.insert(tx_0.clone()).await;
p_heap.insert(tx_1.clone()).await;
p_heap.insert(tx_2.clone()).await;
p_heap.insert(tx_2.clone()).await;
p_heap.insert(tx_2.clone()).await;
p_heap.insert(tx_2.clone()).await;
p_heap.insert(tx_3.clone()).await;
#[tokio::test]
pub async fn test_prioritization_heap() {
let p_heap = PrioritizationFeesHeap::new(4);
let tx_creator = |signature, prioritization_fee| SentTransactionInfo {
signature,
slot: 0,
transaction: vec![],
last_valid_block_height: 0,
prioritization_fee,
};
assert_eq!(p_heap.pop().await, Some(tx_2));
assert_eq!(p_heap.pop().await, Some(tx_1));
assert_eq!(p_heap.pop().await, Some(tx_0));
assert_eq!(p_heap.pop().await, Some(tx_3));
assert_eq!(p_heap.pop().await, None);
let tx_0 = tx_creator(Signature::new_unique(), 0);
let tx_1 = tx_creator(Signature::new_unique(), 10);
let tx_2 = tx_creator(Signature::new_unique(), 100);
let tx_3 = tx_creator(Signature::new_unique(), 0);
p_heap.insert(tx_0.clone()).await;
p_heap.insert(tx_0.clone()).await;
p_heap.insert(tx_0.clone()).await;
p_heap.insert(tx_0.clone()).await;
p_heap.insert(tx_0.clone()).await;
p_heap.insert(tx_1.clone()).await;
p_heap.insert(tx_2.clone()).await;
p_heap.insert(tx_2.clone()).await;
p_heap.insert(tx_2.clone()).await;
p_heap.insert(tx_2.clone()).await;
p_heap.insert(tx_3.clone()).await;
let tx_0 = tx_creator(Signature::new_unique(), 0);
let tx_1 = tx_creator(Signature::new_unique(), 10);
let tx_2 = tx_creator(Signature::new_unique(), 100);
let tx_3 = tx_creator(Signature::new_unique(), 0);
let tx_4 = tx_creator(Signature::new_unique(), 0);
let tx_5 = tx_creator(Signature::new_unique(), 1000);
let tx_6 = tx_creator(Signature::new_unique(), 10);
p_heap.insert(tx_0.clone()).await;
p_heap.insert(tx_1.clone()).await;
p_heap.insert(tx_2.clone()).await;
p_heap.insert(tx_3.clone()).await;
p_heap.insert(tx_4.clone()).await;
p_heap.insert(tx_5.clone()).await;
p_heap.insert(tx_6.clone()).await;
assert_eq!(p_heap.pop().await, Some(tx_2));
assert_eq!(p_heap.pop().await, Some(tx_1));
assert_eq!(p_heap.pop().await, Some(tx_0));
assert_eq!(p_heap.pop().await, Some(tx_3));
assert_eq!(p_heap.pop().await, None);
assert_eq!(p_heap.pop().await, Some(tx_5));
assert_eq!(p_heap.pop().await, Some(tx_2));
assert_eq!(p_heap.pop().await, Some(tx_1));
assert_eq!(p_heap.pop().await, Some(tx_6));
assert_eq!(p_heap.pop().await, None);
let tx_0 = tx_creator(Signature::new_unique(), 0);
let tx_1 = tx_creator(Signature::new_unique(), 10);
let tx_2 = tx_creator(Signature::new_unique(), 100);
let tx_3 = tx_creator(Signature::new_unique(), 0);
let tx_4 = tx_creator(Signature::new_unique(), 0);
let tx_5 = tx_creator(Signature::new_unique(), 1000);
let tx_6 = tx_creator(Signature::new_unique(), 10);
p_heap.insert(tx_0.clone()).await;
p_heap.insert(tx_1.clone()).await;
p_heap.insert(tx_2.clone()).await;
p_heap.insert(tx_3.clone()).await;
p_heap.insert(tx_4.clone()).await;
p_heap.insert(tx_5.clone()).await;
p_heap.insert(tx_6.clone()).await;
assert_eq!(p_heap.pop().await, Some(tx_5));
assert_eq!(p_heap.pop().await, Some(tx_2));
assert_eq!(p_heap.pop().await, Some(tx_1));
assert_eq!(p_heap.pop().await, Some(tx_6));
assert_eq!(p_heap.pop().await, None);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
pub async fn test_prioritization_bench() {
let p_heap = PrioritizationFeesHeap::new(4096);
let jh = {
let p_heap = p_heap.clone();
tokio::spawn(async move {
let instant = tokio::time::Instant::now();
let mut height = 0;
while instant.elapsed() < Duration::from_secs(45) {
let burst_count = rand::random::<u64>() % 128 + 1;
for _c in 0..burst_count {
let prioritization_fee = rand::random::<u64>() % 100000;
let info = SentTransactionInfo {
signature: Signature::new_unique(),
slot: height + 1,
transaction: vec![],
last_valid_block_height: height + 10,
prioritization_fee,
};
p_heap.insert(info).await;
}
tokio::time::sleep(Duration::from_millis(1)).await;
p_heap.remove_expired_transactions(height).await;
height += 1;
}
})
};
let mut pop_count = 0;
while !jh.is_finished() {
if p_heap.pop().await.is_some() {
pop_count += 1;
}
}
println!("pop_count : {pop_count}");
}
}

View File

@ -3,13 +3,22 @@ use std::sync::Arc;
use solana_rpc_client_api::response::{RpcContactInfo, RpcVoteAccountStatus};
use tokio::sync::broadcast::Receiver;
use crate::structures::block_info::BlockInfo;
use crate::{
structures::{produced_block::ProducedBlock, slot_notification::SlotNotification},
traits::subscription_sink::SubscriptionSink,
};
// full blocks, commitment level: processed, confirmed, finalized
// note: there is no guarantee about the order
// note: there is no guarantee about the order wrt commitment level
// note: there is no guarantee about the order wrt block vs block meta
pub type BlockStream = Receiver<ProducedBlock>;
// block info (slot, blockhash, etc), commitment level: processed, confirmed, finalized
// note: there is no guarantee about the order wrt commitment level
pub type BlockInfoStream = Receiver<BlockInfo>;
pub type SlotStream = Receiver<SlotNotification>;
pub type VoteAccountStream = Receiver<RpcVoteAccountStatus>;
pub type ClusterInfoStream = Receiver<Vec<RpcContactInfo>>;
pub type SubscptionHanderSink = Arc<dyn SubscriptionSink>;

View File

@ -50,6 +50,7 @@ cap = { version = "0.1.2", features = ["stats"] }
tower = "0.4.13"
hyper = { version = "0.14", features = ["server", "http1", "http2"] }
tower-http = { version = "0.4.0", features = ["full"] }
jemallocator = { workspace = true }
solana-lite-rpc-core = { workspace = true }
solana-lite-rpc-util = { workspace = true }

View File

@ -149,7 +149,7 @@ impl LiteRpcServer for LiteBridge {
let BlockInformation { slot, .. } = self
.data_cache
.block_information_store
.get_latest_block(commitment_config)
.get_latest_block_information(commitment_config)
.await;
Ok(slot)
}
@ -161,7 +161,7 @@ impl LiteRpcServer for LiteBridge {
let block_info = self
.data_cache
.block_information_store
.get_latest_block(commitment_config)
.get_latest_block_information(commitment_config)
.await;
Ok(block_info.block_height)
}
@ -189,7 +189,7 @@ impl LiteRpcServer for LiteBridge {
RPC_GET_LATEST_BLOCKHASH.inc();
let commitment_config = config
.map(|config| config.commitment.unwrap_or_default())
.map(|config| config.commitment.unwrap_or(CommitmentConfig::confirmed()))
.unwrap_or_default();
let BlockInformation {
@ -200,7 +200,7 @@ impl LiteRpcServer for LiteBridge {
} = self
.data_cache
.block_information_store
.get_latest_block(commitment_config)
.get_latest_block_information(commitment_config)
.await;
log::trace!("glb {blockhash} {slot} {block_height}");
@ -252,7 +252,7 @@ impl LiteRpcServer for LiteBridge {
let block_info = self
.data_cache
.block_information_store
.get_latest_block_info(commitment_config)
.get_latest_block_information(commitment_config)
.await;
//TODO manage transaction_count of epoch info. Currently None.
@ -294,7 +294,7 @@ impl LiteRpcServer for LiteBridge {
slot: self
.data_cache
.block_information_store
.get_latest_block_info(CommitmentConfig::finalized())
.get_latest_block_information(CommitmentConfig::finalized())
.await
.slot,
api_version: None,
@ -424,6 +424,7 @@ impl LiteRpcServer for LiteBridge {
.await;
Ok(schedule)
}
async fn get_slot_leaders(&self, start_slot: u64, limit: u64) -> RpcResult<Vec<Pubkey>> {
let epock_schedule = self.data_cache.epoch_data.get_epoch_schedule();
@ -518,10 +519,19 @@ impl LiteRpcServer for LiteBridge {
return Err(jsonrpsee::types::error::ErrorCode::InvalidParams.into());
};
if let Some(account_service) = &self.accounts_service {
let commitment = config
.as_ref()
.and_then(|x| x.commitment)
.unwrap_or_default();
let current_block_info = self
.data_cache
.block_information_store
.get_latest_block_information(commitment)
.await;
match account_service.get_account(pubkey, config).await {
Ok((slot, ui_account)) => Ok(RpcResponse {
Ok((_, ui_account)) => Ok(RpcResponse {
context: RpcResponseContext {
slot,
slot: current_block_info.slot,
api_version: None,
},
value: ui_account,
@ -555,16 +565,12 @@ impl LiteRpcServer for LiteBridge {
if let Some(account_service) = &self.accounts_service {
let mut ui_accounts = vec![];
let mut max_slot = 0;
for pubkey in pubkeys {
match account_service
.get_account(pubkey.unwrap(), config.clone())
.await
{
Ok((slot, ui_account)) => {
if slot > max_slot {
max_slot = slot;
}
Ok((_, ui_account)) => {
ui_accounts.push(ui_account);
}
Err(_) => {
@ -572,10 +578,19 @@ impl LiteRpcServer for LiteBridge {
}
}
}
let commitment = config
.as_ref()
.and_then(|x| x.commitment)
.unwrap_or_default();
let current_block_info = self
.data_cache
.block_information_store
.get_latest_block_information(commitment)
.await;
assert_eq!(ui_accounts.len(), pubkey_strs.len());
Ok(RpcResponse {
context: RpcResponseContext {
slot: max_slot,
slot: current_block_info.slot,
api_version: None,
},
value: ui_accounts,
@ -599,16 +614,26 @@ impl LiteRpcServer for LiteBridge {
.map(|value| value.with_context.unwrap_or_default())
.unwrap_or_default();
let commitment: CommitmentConfig = config
.as_ref()
.and_then(|x| x.account_config.commitment)
.unwrap_or_default();
let current_block_info = self
.data_cache
.block_information_store
.get_latest_block_information(commitment)
.await;
if let Some(account_service) = &self.accounts_service {
match account_service
.get_program_accounts(program_id, config)
.await
{
Ok((slot, ui_account)) => {
Ok((_, ui_account)) => {
if with_context {
Ok(OptionalContext::Context(RpcResponse {
context: RpcResponseContext {
slot,
slot: current_block_info.slot,
api_version: None,
},
value: ui_account,
@ -645,11 +670,22 @@ impl LiteRpcServer for LiteBridge {
commitment: x.commitment,
min_context_slot: x.min_context_slot,
});
let commitment = config
.as_ref()
.and_then(|x| x.commitment)
.unwrap_or_default();
let current_block_info = self
.data_cache
.block_information_store
.get_latest_block_information(commitment)
.await;
if let Some(account_service) = &self.accounts_service {
match account_service.get_account(pubkey, config).await {
Ok((slot, ui_account)) => Ok(RpcResponse {
Ok((_, ui_account)) => Ok(RpcResponse {
context: RpcResponseContext {
slot,
slot: current_block_info.slot,
api_version: None,
},
value: ui_account.map(|x| x.lamports).unwrap_or_default(),

View File

@ -1,10 +1,10 @@
use std::borrow::Cow;
use std::env;
use std::fmt::{Debug, Display, Formatter};
use std::net::SocketAddr;
use std::str::FromStr;
use std::{env, time::Duration};
use crate::postgres_logger;
use crate::postgres_logger::{self, PostgresSessionConfig};
use crate::{
DEFAULT_FANOUT_SIZE, DEFAULT_GRPC_ADDR, DEFAULT_RETRY_TIMEOUT, DEFAULT_RPC_ADDR,
DEFAULT_WS_ADDR, MAX_RETRIES,
@ -12,6 +12,7 @@ use crate::{
use anyhow::Context;
use clap::Parser;
use dotenv::dotenv;
use solana_lite_rpc_services::quic_connection_utils::QuicConnectionParameters;
use solana_rpc_client_api::client_error::reqwest::Url;
#[derive(Parser, Debug, Clone)]
@ -90,6 +91,9 @@ pub struct Config {
#[serde(default)]
pub enable_accounts_on_demand_accounts_service: Option<bool>,
#[serde(default)]
pub quic_connection_parameters: Option<QuicConnectionParameters>,
}
impl Config {
@ -207,9 +211,6 @@ impl Config {
.map(|x| x.parse().ok())
.unwrap_or(config.max_number_of_connection);
config.postgres =
postgres_logger::PostgresSessionConfig::new_from_env()?.or(config.postgres);
config.enable_address_lookup_tables = env::var("ENABLE_ADDRESS_LOOKUP_TABLES")
.map(|value| value.parse::<bool>().unwrap())
.ok()
@ -225,6 +226,11 @@ impl Config {
.map(|value| value.parse::<bool>().unwrap())
.ok()
.or(config.enable_accounts_on_demand_accounts_service);
config.postgres = PostgresSessionConfig::new_from_env()?.or(config.postgres);
config.quic_connection_parameters = config
.quic_connection_parameters
.or(quic_params_from_environment());
Ok(config)
}
@ -349,3 +355,46 @@ fn obfuscate_token(token: &Option<String>) -> String {
}
}
}
fn quic_params_from_environment() -> Option<QuicConnectionParameters> {
let mut quic_connection_parameters = QuicConnectionParameters::default();
quic_connection_parameters.connection_timeout = env::var("QUIC_CONNECTION_TIMEOUT_MILLIS")
.map(|millis| Duration::from_millis(millis.parse().unwrap()))
.unwrap_or(quic_connection_parameters.connection_timeout);
quic_connection_parameters.unistream_timeout = env::var("QUIC_UNISTREAM_TIMEOUT_MILLIS")
.map(|millis| Duration::from_millis(millis.parse().unwrap()))
.unwrap_or(quic_connection_parameters.unistream_timeout);
quic_connection_parameters.write_timeout = env::var("QUIC_WRITE_TIMEOUT_MILLIS")
.map(|millis| Duration::from_millis(millis.parse().unwrap()))
.unwrap_or(quic_connection_parameters.write_timeout);
quic_connection_parameters.finalize_timeout = env::var("QUIC_FINALIZE_TIMEOUT_MILLIS")
.map(|millis| Duration::from_millis(millis.parse().unwrap()))
.unwrap_or(quic_connection_parameters.finalize_timeout);
quic_connection_parameters.connection_retry_count = env::var("QUIC_CONNECTION_RETRY_COUNT")
.map(|millis| millis.parse().unwrap())
.unwrap_or(quic_connection_parameters.connection_retry_count);
quic_connection_parameters.max_number_of_connections =
env::var("QUIC_MAX_NUMBER_OF_CONNECTIONS")
.map(|millis| millis.parse().unwrap())
.unwrap_or(quic_connection_parameters.max_number_of_connections);
quic_connection_parameters.number_of_transactions_per_unistream =
env::var("QUIC_NUMBER_OF_TRANSACTIONS_PER_TASK")
.map(|millis| millis.parse().unwrap())
.unwrap_or(quic_connection_parameters.number_of_transactions_per_unistream);
quic_connection_parameters.unistreams_to_create_new_connection_in_percentage =
env::var("QUIC_PERCENTAGE_TO_CREATE_NEW_CONNECTION")
.map(|millis| millis.parse().unwrap())
.unwrap_or(
quic_connection_parameters.unistreams_to_create_new_connection_in_percentage,
);
Some(quic_connection_parameters)
}

View File

@ -10,7 +10,7 @@ use lite_rpc::cli::Config;
use lite_rpc::postgres_logger::PostgresLogger;
use lite_rpc::service_spawner::ServiceSpawner;
use lite_rpc::start_server::start_servers;
use lite_rpc::{DEFAULT_MAX_NUMBER_OF_TXS_IN_QUEUE, MAX_NB_OF_CONNECTIONS_WITH_LEADERS};
use lite_rpc::DEFAULT_MAX_NUMBER_OF_TXS_IN_QUEUE;
use log::{debug, info};
use solana_lite_rpc_accounts::account_service::AccountService;
use solana_lite_rpc_accounts::account_store_interface::AccountStorageInterface;
@ -19,6 +19,10 @@ use solana_lite_rpc_accounts_on_demand::accounts_on_demand::AccountsOnDemand;
use solana_lite_rpc_address_lookup_tables::address_lookup_table_store::AddressLookupTableStore;
use solana_lite_rpc_blockstore::history::History;
use solana_lite_rpc_cluster_endpoints::endpoint_stremers::EndpointStreaming;
use solana_lite_rpc_cluster_endpoints::geyser_grpc_connector::{
GrpcConnectionTimeouts, GrpcSourceConfig,
};
use solana_lite_rpc_cluster_endpoints::grpc_inspect::{
debugtask_blockstream_confirmation_sequence, debugtask_blockstream_slot_progression,
};
@ -38,24 +42,21 @@ use solana_lite_rpc_core::structures::account_filter::AccountFilters;
use solana_lite_rpc_core::structures::leaderschedule::CalculatedSchedule;
use solana_lite_rpc_core::structures::{
epoch::EpochCache, identity_stakes::IdentityStakes, notifications::NotificationSender,
produced_block::ProducedBlock,
};
use solana_lite_rpc_core::traits::address_lookup_table_interface::AddressLookupTableInterface;
use solana_lite_rpc_core::types::BlockStream;
use solana_lite_rpc_core::types::{BlockInfoStream, BlockStream};
use solana_lite_rpc_core::AnyhowJoinHandle;
use solana_lite_rpc_prioritization_fees::account_prio_service::AccountPrioService;
use solana_lite_rpc_services::data_caching_service::DataCachingService;
use solana_lite_rpc_services::quic_connection_utils::QuicConnectionParameters;
use solana_lite_rpc_services::tpu_utils::tpu_connection_path::TpuConnectionPath;
use solana_lite_rpc_services::tpu_utils::tpu_service::{TpuService, TpuServiceConfig};
use solana_lite_rpc_services::transaction_replayer::TransactionReplayer;
use solana_lite_rpc_services::tx_sender::TxSender;
use lite_rpc::postgres_logger;
use solana_lite_rpc_cluster_endpoints::geyser_grpc_connector::{
GrpcConnectionTimeouts, GrpcSourceConfig,
};
use solana_lite_rpc_core::structures::block_info::BlockInfo;
use solana_lite_rpc_prioritization_fees::start_block_priofees_task;
use solana_lite_rpc_util::obfuscate_rpcurl;
use solana_rpc_client::nonblocking::rpc_client::RpcClient;
use solana_sdk::commitment_config::CommitmentConfig;
use solana_sdk::signature::Keypair;
@ -70,27 +71,32 @@ use tokio::time::{timeout, Instant};
use tracing_subscriber::fmt::format::FmtSpan;
use tracing_subscriber::EnvFilter;
async fn get_latest_block(
mut block_stream: BlockStream,
// jemalloc seems to be better at keeping the memory footprint reasonable over
// longer periods of time
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
async fn get_latest_block_info(
mut blockinfo_stream: BlockInfoStream,
commitment_config: CommitmentConfig,
) -> ProducedBlock {
) -> BlockInfo {
let started = Instant::now();
loop {
match timeout(Duration::from_millis(500), block_stream.recv()).await {
Ok(Ok(block)) => {
if block.commitment_config == commitment_config {
return block;
match timeout(Duration::from_millis(500), blockinfo_stream.recv()).await {
Ok(Ok(block_info)) => {
if block_info.commitment_config == commitment_config {
return block_info;
}
}
Err(_elapsed) => {
debug!(
"waiting for latest block ({}) ... {:.02}ms",
"waiting for latest block info ({}) ... {:.02}ms",
commitment_config.commitment,
started.elapsed().as_secs_f32() * 1000.0
);
}
Ok(Err(_error)) => {
panic!("Did not recv blocks");
panic!("Did not recv block info");
}
}
}
@ -136,6 +142,7 @@ pub async fn start_lite_rpc(args: Config, rpc_client: Arc<RpcClient>) -> anyhow:
address_lookup_tables_binary,
account_filters,
enable_accounts_on_demand_accounts_service,
quic_connection_parameters,
..
} = args;
@ -195,6 +202,7 @@ pub async fn start_lite_rpc(args: Config, rpc_client: Arc<RpcClient>) -> anyhow:
let EndpointStreaming {
// note: blocks_notifier will be dropped at some point
blocks_notifier,
blockinfo_notifier,
cluster_info_notifier,
slot_notifier,
vote_account_notifier,
@ -229,8 +237,10 @@ pub async fn start_lite_rpc(args: Config, rpc_client: Arc<RpcClient>) -> anyhow:
let account_service = AccountService::new(account_storage, account_notification_sender);
account_service
.process_account_stream(account_stream.resubscribe(), blocks_notifier.resubscribe());
account_service.process_account_stream(
account_stream.resubscribe(),
blockinfo_notifier.resubscribe(),
);
account_service
.populate_from_rpc(
@ -244,21 +254,24 @@ pub async fn start_lite_rpc(args: Config, rpc_client: Arc<RpcClient>) -> anyhow:
None
};
info!("Waiting for first finalized block...");
let finalized_block =
get_latest_block(blocks_notifier.resubscribe(), CommitmentConfig::finalized()).await;
info!("Got finalized block: {:?}", finalized_block.slot);
info!("Waiting for first finalized block info...");
let finalized_block_info = get_latest_block_info(
blockinfo_notifier.resubscribe(),
CommitmentConfig::finalized(),
)
.await;
info!("Got finalized block info: {:?}", finalized_block_info.slot);
let (epoch_data, _current_epoch_info) = EpochCache::bootstrap_epoch(&rpc_client).await?;
let block_information_store =
BlockInformationStore::new(BlockInformation::from_block(&finalized_block));
BlockInformationStore::new(BlockInformation::from_block_info(&finalized_block_info));
let data_cache = DataCache {
block_information_store,
cluster_info: ClusterInfo::default(),
identity_stakes: IdentityStakes::new(validator_identity.pubkey()),
slot_cache: SlotCache::new(finalized_block.slot),
slot_cache: SlotCache::new(finalized_block_info.slot),
tx_subs: SubscriptionStore::default(),
txs: TxStore {
store: Arc::new(DashMap::new()),
@ -275,6 +288,7 @@ pub async fn start_lite_rpc(args: Config, rpc_client: Arc<RpcClient>) -> anyhow:
// to avoid laggin we resubscribe to block notification
let data_caching_service = data_cache_service.listen(
blocks_notifier.resubscribe(),
blockinfo_notifier.resubscribe(),
slot_notifier.resubscribe(),
cluster_info_notifier,
vote_account_notifier,
@ -320,17 +334,7 @@ pub async fn start_lite_rpc(args: Config, rpc_client: Arc<RpcClient>) -> anyhow:
let tpu_config = TpuServiceConfig {
fanout_slots: fanout_size,
maximum_transaction_in_queue: 20000,
quic_connection_params: QuicConnectionParameters {
connection_timeout: Duration::from_secs(1),
connection_retry_count: 10,
finalize_timeout: Duration::from_millis(1000),
max_number_of_connections: args
.max_number_of_connection
.unwrap_or(MAX_NB_OF_CONNECTIONS_WITH_LEADERS),
unistream_timeout: Duration::from_millis(500),
write_timeout: Duration::from_secs(1),
number_of_transactions_per_unistream: 1,
},
quic_connection_params: quic_connection_parameters.unwrap_or_default(),
tpu_connection_path,
};
@ -387,6 +391,7 @@ pub async fn start_lite_rpc(args: Config, rpc_client: Arc<RpcClient>) -> anyhow:
pubsub_service,
lite_rpc_ws_addr,
lite_rpc_http_addr,
None,
));
drop(slot_notifier);
@ -436,7 +441,7 @@ fn setup_grpc_stream_debugging(blocks_notifier: &BlockStream) {
debugtask_blockstream_confirmation_sequence(blocks_notifier.resubscribe());
}
#[tokio::main(flavor = "multi_thread", worker_threads = 16)]
#[tokio::main()]
pub async fn main() -> anyhow::Result<()> {
setup_tracing_subscriber();
@ -496,14 +501,6 @@ fn parse_host_port(host_port: &str) -> Result<SocketAddr, String> {
}
}
// http://mango.rpcpool.com/c232ab232ba2323
fn obfuscate_rpcurl(rpc_addr: &str) -> String {
if rpc_addr.contains("rpcpool.com") {
return rpc_addr.replacen(char::is_numeric, "X", 99);
}
rpc_addr.to_string()
}
fn setup_tracing_subscriber() {
let enable_instrument_tracing = std::env::var("ENABLE_INSTRUMENT_TRACING")
.unwrap_or("false".to_string())

View File

@ -65,7 +65,7 @@ impl PostgresSession {
.context("Connecting to Postgres failed")?;
tokio::spawn(async move {
log::info!("Connecting to Postgres");
log::debug!("Connecting to Postgres");
if let Err(err) = connection.await {
log::error!("Connection to Postgres broke {err:?}");

View File

@ -1,3 +1,4 @@
use solana_lite_rpc_core::types::BlockInfoStream;
use solana_lite_rpc_core::{
stores::data_cache::DataCache,
structures::notifications::NotificationSender,
@ -14,6 +15,7 @@ use solana_lite_rpc_services::{
tx_sender::TxSender,
};
use std::time::Duration;
pub struct ServiceSpawner {
pub prometheus_addr: String,
pub data_cache: DataCache,
@ -38,9 +40,11 @@ impl ServiceSpawner {
}
}
pub async fn spawn_data_caching_service(
// TODO remove
pub async fn _spawn_data_caching_service(
&self,
block_notifier: BlockStream,
blockinfo_notifier: BlockInfoStream,
slot_notification: SlotStream,
cluster_info_notification: ClusterInfoStream,
va_notification: VoteAccountStream,
@ -52,6 +56,7 @@ impl ServiceSpawner {
data_service.listen(
block_notifier,
blockinfo_notifier,
slot_notification,
cluster_info_notification,
va_notification,

View File

@ -9,14 +9,35 @@ use solana_lite_rpc_core::AnyhowJoinHandle;
use std::time::Duration;
use tower_http::cors::{Any, CorsLayer};
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct ServerConfiguration {
pub max_request_body_size: u32,
pub max_response_body_size: u32,
pub max_connection: u32,
}
impl Default for ServerConfiguration {
fn default() -> Self {
Self {
max_request_body_size: 50 * (1 << 10), // 50kb
max_response_body_size: 500_000 * (1 << 10), // 500MB response size
max_connection: 1000000,
}
}
}
pub async fn start_servers(
rpc: LiteBridge,
pubsub: LitePubSubBridge,
ws_addr: String,
http_addr: String,
server_configuration: Option<ServerConfiguration>,
) -> anyhow::Result<()> {
let rpc = rpc.into_rpc();
let pubsub = pubsub.into_rpc();
let server_configuration = server_configuration.unwrap_or_default();
let ws_server_handle = ServerBuilder::default()
.ws_only()
@ -36,6 +57,9 @@ pub async fn start_servers(
let http_server_handle = ServerBuilder::default()
.set_middleware(middleware)
.max_connections(server_configuration.max_connection)
.max_request_body_size(server_configuration.max_response_body_size)
.max_response_body_size(server_configuration.max_response_body_size)
.http_only()
.build(http_addr.clone())
.await?

View File

@ -0,0 +1,26 @@
CREATE SCHEMA benchrunner;
CREATE TABLE benchrunner.bench_metrics (
tenant text NOT NULL,
ts timestamp NOT NULL,
prio_fees int8 NOT NULL,
txs_sent int8 NOT NULL,
txs_confirmed int8 NOT NULL,
txs_un_confirmed int8 NOT NULL,
average_confirmation_time_ms real NOT NULL,
metric_json jsonb NOT NULL,
PRIMARY KEY (tenant, ts)
);
CREATE TABLE benchrunner.bench_runs (
tenant text NOT NULL,
ts timestamp NOT NULL,
status text NOT NULL,
PRIMARY KEY (tenant, ts)
);
GRANT USAGE ON SCHEMA benchrunner TO r_benchrunner;
GRANT SELECT, INSERT, UPDATE ON ALL TABLES IN SCHEMA benchrunner TO r_benchrunner;
GRANT USAGE ON SCHEMA benchrunner TO ro_benchrunner;
GRANT SELECT ON ALL TABLES IN SCHEMA benchrunner TO ro_benchrunner;

View File

@ -21,3 +21,11 @@ CREATE ROLE ro_literpc;
GRANT ro_literpc TO literpc_app;
GRANT CONNECT ON DATABASE literpc_integrationtest TO ro_literpc; -- TODO adjust database name
-- required for benchrunner-service
CREATE ROLE r_benchrunner;
CREATE ROLE ro_benchrunner;
GRANT ro_benchrunner TO r_benchrunner;
GRANT r_benchrunner TO literpc_app;
GRANT ro_benchrunner TO literpc_app;

19
openssl-legacy.cnf Normal file
View File

@ -0,0 +1,19 @@
# Extend the default debian openssl config
# see https://gist.github.com/tothi/392dbb008ae0b60d25cfa4447bc21121
# fixes "Global default library context, Algorithm (RC2-40-CBC : 0)"
.include = /etc/ssl/openssl.cnf
openssl_conf = openssl_init
[openssl_init]
providers = provider_sect
[provider_sect]
default = default_sect
legacy = legacy_sect
[default_sect]
activate = 1
[legacy_sect]
activate = 1

View File

@ -60,6 +60,7 @@ const QUIC_CONNECTION_PARAMS: QuicConnectionParameters = QuicConnectionParameter
unistream_timeout: Duration::from_secs(2),
write_timeout: Duration::from_secs(2),
number_of_transactions_per_unistream: 10,
unistreams_to_create_new_connection_in_percentage: 10,
};
#[test]

View File

@ -7,12 +7,14 @@ use prometheus::{opts, register_int_counter, register_int_gauge, IntCounter};
use solana_lite_rpc_core::stores::{
block_information_store::BlockInformation, data_cache::DataCache,
};
use solana_lite_rpc_core::structures::block_info::BlockInfo;
use solana_lite_rpc_core::types::{BlockStream, ClusterInfoStream, SlotStream, VoteAccountStream};
use solana_lite_rpc_core::AnyhowJoinHandle;
use solana_sdk::clock::MAX_RECENT_BLOCKHASHES;
use solana_sdk::commitment_config::CommitmentLevel;
use solana_transaction_status::{TransactionConfirmationStatus, TransactionStatus};
use tokio::sync::broadcast::error::RecvError;
use tokio::sync::broadcast::Receiver;
lazy_static::lazy_static! {
static ref NB_CLUSTER_NODES: GenericGauge<prometheus::core::AtomicI64> =
@ -43,13 +45,15 @@ impl DataCachingService {
pub fn listen(
self,
block_notifier: BlockStream,
blockinfo_notifier: Receiver<BlockInfo>,
slot_notification: SlotStream,
cluster_info_notification: ClusterInfoStream,
va_notification: VoteAccountStream,
) -> Vec<AnyhowJoinHandle> {
// clone the ledger to move into the processor task
let data_cache = self.data_cache.clone();
// process all the data into the ledger
let block_information_store_block = data_cache.block_information_store.clone();
let block_information_store_block_info = data_cache.block_information_store.clone();
let block_cache_jh = tokio::spawn(async move {
let mut block_notifier = block_notifier;
loop {
@ -64,8 +68,8 @@ impl DataCachingService {
}
};
data_cache
.block_information_store
// note: most likely the block has been added from blockinfo_notifier stream already
block_information_store_block
.add_block(BlockInformation::from_block(&block))
.await;
@ -76,9 +80,8 @@ impl DataCachingService {
};
for tx in &block.transactions {
let block_info = data_cache
.block_information_store
.get_block_info(&tx.recent_blockhash);
let block_info =
block_information_store_block.get_block_info(&tx.recent_blockhash);
let last_valid_blockheight = if let Some(block_info) = block_info {
block_info.last_valid_blockheight
} else {
@ -118,6 +121,26 @@ impl DataCachingService {
}
});
let blockinfo_cache_jh = tokio::spawn(async move {
let mut blockinfo_notifier = blockinfo_notifier;
loop {
let block_info = match blockinfo_notifier.recv().await {
Ok(block_info) => block_info,
Err(RecvError::Lagged(blockinfo_lagged)) => {
warn!("Lagged {} block info - continue", blockinfo_lagged);
continue;
}
Err(RecvError::Closed) => {
bail!("BlockInfo stream has been closed - abort");
}
};
block_information_store_block_info
.add_block(BlockInformation::from_block_info(&block_info))
.await;
}
});
let data_cache = self.data_cache.clone();
let slot_cache_jh = tokio::spawn(async move {
let mut slot_notification = slot_notification;
@ -174,6 +197,7 @@ impl DataCachingService {
vec![
slot_cache_jh,
block_cache_jh,
blockinfo_cache_jh,
cluster_info_jh,
identity_stakes_jh,
cleaning_service,

View File

@ -110,8 +110,14 @@ impl QuicConnection {
}
None => {
NB_QUIC_CONNECTION_REQUESTED.inc();
// so that only one instance is connecting
let mut lk = self.connection.write().await;
if lk.is_some() {
// connection has recently been established/ just use it
return (*lk).clone();
}
let connection = self.connect(false).await;
*self.connection.write().await = connection.clone();
*lk = connection.clone();
self.has_connected_once.store(true, Ordering::Relaxed);
connection
}
@ -211,7 +217,7 @@ pub struct QuicConnectionPool {
// counting semaphore is ideal way to manage backpressure on the connection
// because a connection can create only N unistream connections
transactions_in_sending_semaphore: Vec<Arc<Semaphore>>,
permit_threshold: usize,
threshold_to_create_new_connection: usize,
}
pub struct PooledConnection {
@ -250,8 +256,11 @@ impl QuicConnectionPool {
});
v
},
permit_threshold: max_number_of_unistream_connection
.saturating_mul(90)
threshold_to_create_new_connection: max_number_of_unistream_connection
.saturating_mul(std::cmp::min(
connection_parameters.unistreams_to_create_new_connection_in_percentage,
100,
) as usize)
.saturating_div(100),
}
}
@ -263,7 +272,7 @@ impl QuicConnectionPool {
if !connection.has_connected_atleast_once()
|| (connection.is_connected().await
&& sem.available_permits() > self.permit_threshold)
&& sem.available_permits() > self.threshold_to_create_new_connection)
{
// if it is connection is not yet connected even once or connection is still open
if let Ok(permit) = sem.clone().try_acquire_owned() {
@ -286,9 +295,6 @@ impl QuicConnectionPool {
let (permit, index) = self.get_permit_and_index().await?;
// establish a connection if the connection has not yet been used
let connection = self.connections[index].clone();
if !connection.has_connected_atleast_once() {
connection.get_connection().await;
}
Ok(PooledConnection { connection, permit })
}

View File

@ -1,9 +1,12 @@
use log::trace;
use prometheus::{core::GenericGauge, opts, register_int_gauge};
use prometheus::{
core::GenericGauge, histogram_opts, opts, register_histogram, register_int_gauge, Histogram,
};
use quinn::{
ClientConfig, Connection, ConnectionError, Endpoint, EndpointConfig, IdleTimeout, SendStream,
TokioRuntime, TransportConfig, VarInt,
};
use serde::{Deserialize, Serialize};
use solana_lite_rpc_core::network_utils::apply_gso_workaround;
use solana_sdk::pubkey::Pubkey;
use std::{
@ -17,6 +20,19 @@ use std::{
use tokio::time::timeout;
lazy_static::lazy_static! {
static ref NB_QUIC_0RTT_ATTEMPTED: GenericGauge<prometheus::core::AtomicI64> =
register_int_gauge!(opts!("literpc_quic_0RTT_attempted", "Number of times 0RTT attempted")).unwrap();
static ref NB_QUIC_CONN_ATTEMPTED: GenericGauge<prometheus::core::AtomicI64> =
register_int_gauge!(opts!("literpc_quic_connection_attempted", "Number of times conn attempted")).unwrap();
static ref NB_QUIC_0RTT_SUCCESSFUL: GenericGauge<prometheus::core::AtomicI64> =
register_int_gauge!(opts!("literpc_quic_0RTT_successful", "Number of times 0RTT successful")).unwrap();
static ref NB_QUIC_0RTT_FALLBACK_SUCCESSFUL: GenericGauge<prometheus::core::AtomicI64> =
register_int_gauge!(opts!("literpc_quic_0RTT_fallback_successful", "Number of times 0RTT successfully fallback to connection")).unwrap();
static ref NB_QUIC_0RTT_FALLBACK_UNSUCCESSFUL: GenericGauge<prometheus::core::AtomicI64> =
register_int_gauge!(opts!("literpc_quic_0RTT_fallback_unsuccessful", "Number of times 0RTT unsuccessfully fallback to connection")).unwrap();
static ref NB_QUIC_CONN_SUCCESSFUL: GenericGauge<prometheus::core::AtomicI64> =
register_int_gauge!(opts!("literpc_quic_connection_successful", "Number of times conn successful")).unwrap();
static ref NB_QUIC_0RTT_TIMEOUT: GenericGauge<prometheus::core::AtomicI64> =
register_int_gauge!(opts!("literpc_quic_0RTT_timedout", "Number of times 0RTT timedout")).unwrap();
static ref NB_QUIC_CONNECTION_TIMEOUT: GenericGauge<prometheus::core::AtomicI64> =
@ -31,6 +47,26 @@ lazy_static::lazy_static! {
register_int_gauge!(opts!("literpc_quic_finish_timedout", "Number of times finish timedout")).unwrap();
static ref NB_QUIC_FINISH_ERRORED: GenericGauge<prometheus::core::AtomicI64> =
register_int_gauge!(opts!("literpc_quic_finish_errored", "Number of times finish errored")).unwrap();
static ref NB_QUIC_CONNECTIONS: GenericGauge<prometheus::core::AtomicI64> =
register_int_gauge!(opts!("literpc_nb_active_quic_connections", "Number of quic connections open")).unwrap();
static ref TIME_OF_CONNECT: Histogram = register_histogram!(histogram_opts!(
"literpc_quic_connection_timer_histogram",
"Time to connect to the TPU port",
))
.unwrap();
static ref TIME_TO_WRITE: Histogram = register_histogram!(histogram_opts!(
"literpc_quic_write_timer_histogram",
"Time to write on the TPU port",
))
.unwrap();
static ref TIME_TO_FINISH: Histogram = register_histogram!(histogram_opts!(
"literpc_quic_finish_timer_histogram",
"Time to finish on the TPU port",
))
.unwrap();
}
const ALPN_TPU_PROTOCOL_ID: &[u8] = b"solana-tpu";
@ -40,7 +76,7 @@ pub enum QuicConnectionError {
ConnectionError { retry: bool },
}
#[derive(Clone, Copy)]
#[derive(Clone, Copy, Serialize, Deserialize, Debug)]
pub struct QuicConnectionParameters {
pub connection_timeout: Duration,
pub unistream_timeout: Duration,
@ -49,6 +85,22 @@ pub struct QuicConnectionParameters {
pub connection_retry_count: usize,
pub max_number_of_connections: usize,
pub number_of_transactions_per_unistream: usize,
pub unistreams_to_create_new_connection_in_percentage: u8,
}
impl Default for QuicConnectionParameters {
fn default() -> Self {
Self {
connection_timeout: Duration::from_millis(10000),
unistream_timeout: Duration::from_millis(10000),
write_timeout: Duration::from_millis(10000),
finalize_timeout: Duration::from_millis(10000),
connection_retry_count: 20,
max_number_of_connections: 8,
number_of_transactions_per_unistream: 1,
unistreams_to_create_new_connection_in_percentage: 10,
}
}
}
pub struct QuicConnectionUtils {}
@ -107,10 +159,15 @@ impl QuicConnectionUtils {
addr: SocketAddr,
connection_timeout: Duration,
) -> anyhow::Result<Connection> {
let timer = TIME_OF_CONNECT.start_timer();
let connecting = endpoint.connect(addr, "connect")?;
match timeout(connection_timeout, connecting).await {
Ok(res) => match res {
Ok(connection) => Ok(connection),
Ok(connection) => {
timer.observe_duration();
NB_QUIC_CONN_SUCCESSFUL.inc();
Ok(connection)
}
Err(e) => {
NB_QUIC_CONNECTION_ERRORED.inc();
Err(e.into())
@ -133,6 +190,7 @@ impl QuicConnectionUtils {
let connection = match connecting.into_0rtt() {
Ok((connection, zero_rtt)) => {
if (timeout(connection_timeout, zero_rtt).await).is_ok() {
NB_QUIC_0RTT_SUCCESSFUL.inc();
connection
} else {
NB_QUIC_0RTT_TIMEOUT.inc();
@ -142,7 +200,9 @@ impl QuicConnectionUtils {
Err(connecting) => {
if let Ok(connecting_result) = timeout(connection_timeout, connecting).await {
if connecting_result.is_err() {
NB_QUIC_CONNECTION_ERRORED.inc();
NB_QUIC_0RTT_FALLBACK_UNSUCCESSFUL.inc();
} else {
NB_QUIC_0RTT_FALLBACK_SUCCESSFUL.inc();
}
connecting_result?
} else {
@ -166,12 +226,15 @@ impl QuicConnectionUtils {
) -> Option<Connection> {
for _ in 0..connection_retry_count {
let conn = if already_connected {
NB_QUIC_0RTT_ATTEMPTED.inc();
Self::make_connection_0rtt(endpoint.clone(), addr, connection_timeout).await
} else {
NB_QUIC_CONN_ATTEMPTED.inc();
Self::make_connection(endpoint.clone(), addr, connection_timeout).await
};
match conn {
Ok(conn) => {
NB_QUIC_CONNECTIONS.inc();
return Some(conn);
}
Err(e) => {
@ -191,6 +254,7 @@ impl QuicConnectionUtils {
identity: Pubkey,
connection_params: QuicConnectionParameters,
) -> Result<(), QuicConnectionError> {
let timer = TIME_TO_WRITE.start_timer();
let write_timeout_res = timeout(
connection_params.write_timeout,
send_stream.write_all(tx.as_slice()),
@ -206,6 +270,8 @@ impl QuicConnectionUtils {
);
NB_QUIC_WRITEALL_ERRORED.inc();
return Err(QuicConnectionError::ConnectionError { retry: true });
} else {
timer.observe_duration();
}
}
Err(_) => {
@ -215,6 +281,7 @@ impl QuicConnectionUtils {
}
}
let timer: prometheus::HistogramTimer = TIME_TO_FINISH.start_timer();
let finish_timeout_res =
timeout(connection_params.finalize_timeout, send_stream.finish()).await;
match finish_timeout_res {
@ -227,6 +294,8 @@ impl QuicConnectionUtils {
);
NB_QUIC_FINISH_ERRORED.inc();
return Err(QuicConnectionError::ConnectionError { retry: false });
} else {
timer.observe_duration();
}
}
Err(_) => {

View File

@ -33,8 +33,6 @@ use crate::{
};
lazy_static::lazy_static! {
static ref NB_QUIC_CONNECTIONS: GenericGauge<prometheus::core::AtomicI64> =
register_int_gauge!(opts!("literpc_nb_active_quic_connections", "Number of quic connections open")).unwrap();
static ref NB_QUIC_ACTIVE_CONNECTIONS: GenericGauge<prometheus::core::AtomicI64> =
register_int_gauge!(opts!("literpc_nb_active_connections", "Number quic tasks that are running")).unwrap();
static ref NB_CONNECTIONS_TO_KEEP: GenericGauge<prometheus::core::AtomicI64> =
@ -46,6 +44,9 @@ lazy_static::lazy_static! {
"Time to send transaction batch",
))
.unwrap();
static ref TRANSACTIONS_IN_HEAP: GenericGauge<prometheus::core::AtomicI64> =
register_int_gauge!(opts!("literpc_transactions_in_priority_heap", "Number of transactions in priority heap")).unwrap();
}
#[derive(Clone)]
@ -84,19 +85,41 @@ impl ActiveConnection {
addr: SocketAddr,
identity_stakes: IdentityStakesData,
) {
let priorization_heap = PrioritizationFeesHeap::new(2048);
let fill_notify = Arc::new(Notify::new());
let identity = self.identity;
NB_QUIC_ACTIVE_CONNECTIONS.inc();
let max_number_of_connections = self.connection_parameters.max_number_of_connections;
let max_uni_stream_connections = compute_max_allowed_uni_streams(
identity_stakes.peer_type,
identity_stakes.stakes,
identity_stakes.total_stakes,
);
let exit_signal = self.exit_signal.clone();
let connection_pool = QuicConnectionPool::new(
identity,
self.endpoints.clone(),
addr,
self.connection_parameters,
exit_signal.clone(),
max_number_of_connections,
max_uni_stream_connections,
);
let priorization_heap = PrioritizationFeesHeap::new(2 * max_uni_stream_connections);
let heap_filler_task = {
let priorization_heap = priorization_heap.clone();
let data_cache = self.data_cache.clone();
let fill_notify = fill_notify.clone();
let exit_signal = exit_signal.clone();
tokio::spawn(async move {
let mut current_blockheight =
data_cache.block_information_store.get_last_blockheight();
loop {
while !exit_signal.load(Ordering::Relaxed) {
let tx = transaction_reciever.recv().await;
match tx {
Ok(transaction_sent_info) => {
@ -108,6 +131,8 @@ impl ActiveConnection {
}
priorization_heap.insert(transaction_sent_info).await;
TRANSACTIONS_IN_HEAP.inc();
fill_notify.notify_one();
// give little more priority to read the transaction sender with this wait
let last_blockheight =
@ -134,25 +159,15 @@ impl ActiveConnection {
})
};
NB_QUIC_ACTIVE_CONNECTIONS.inc();
let max_number_of_connections = self.connection_parameters.max_number_of_connections;
let max_uni_stream_connections = compute_max_allowed_uni_streams(
identity_stakes.peer_type,
identity_stakes.stakes,
identity_stakes.total_stakes,
);
let exit_signal = self.exit_signal.clone();
let connection_pool = QuicConnectionPool::new(
identity,
self.endpoints.clone(),
addr,
self.connection_parameters,
exit_signal.clone(),
max_number_of_connections,
max_uni_stream_connections,
);
// create atleast one connection before waiting from transactions
if let Ok(PooledConnection { connection, permit }) =
connection_pool.get_pooled_connection().await
{
tokio::task::spawn(async move {
let _permit = permit;
connection.get_connection().await;
});
}
'main_loop: loop {
// exit signal set
@ -173,6 +188,7 @@ impl ActiveConnection {
// wait to get notification from fill event
break;
};
TRANSACTIONS_IN_HEAP.dec();
// check if transaction is already confirmed
if self.data_cache.txs.is_transaction_confirmed(&tx.signature) {
@ -193,28 +209,32 @@ impl ActiveConnection {
tokio::spawn(async move {
// permit will be used to send all the transaction and then destroyed
let _permit = permit;
let timer = TT_SENT_TIMER.start_timer();
NB_QUIC_TASKS.inc();
connection.send_transaction(tx.transaction).await;
timer.observe_duration();
NB_QUIC_TASKS.dec();
});
}
},
_ = exit_notifier.notified() => {
// notified to exit
break;
break 'main_loop;
}
}
}
heap_filler_task.abort();
NB_QUIC_CONNECTIONS.dec();
let elements_removed = priorization_heap.clear().await;
TRANSACTIONS_IN_HEAP.sub(elements_removed as i64);
NB_QUIC_ACTIVE_CONNECTIONS.dec();
}
pub fn start_listening(
&self,
transaction_reciever: Receiver<SentTransactionInfo>,
exit_notifier: Arc<tokio::sync::Notify>,
exit_notifier: Arc<Notify>,
identity_stakes: IdentityStakesData,
) {
let addr = self.tpu_address;
@ -226,14 +246,14 @@ impl ActiveConnection {
}
}
struct ActiveConnectionWithExitChannel {
struct ActiveConnectionWithExitNotifier {
pub active_connection: ActiveConnection,
pub exit_notifier: Arc<tokio::sync::Notify>,
pub exit_notifier: Arc<Notify>,
}
pub struct TpuConnectionManager {
endpoints: RotatingQueue<Endpoint>,
identity_to_active_connection: Arc<DashMap<Pubkey, Arc<ActiveConnectionWithExitChannel>>>,
identity_to_active_connection: Arc<DashMap<Pubkey, Arc<ActiveConnectionWithExitNotifier>>>,
}
impl TpuConnectionManager {
@ -271,7 +291,7 @@ impl TpuConnectionManager {
connection_parameters,
);
// using mpsc as a oneshot channel/ because with one shot channel we cannot reuse the reciever
let exit_notifier = Arc::new(tokio::sync::Notify::new());
let exit_notifier = Arc::new(Notify::new());
let broadcast_receiver = broadcast_sender.subscribe();
active_connection.start_listening(
@ -281,7 +301,7 @@ impl TpuConnectionManager {
);
self.identity_to_active_connection.insert(
*identity,
Arc::new(ActiveConnectionWithExitChannel {
Arc::new(ActiveConnectionWithExitNotifier {
active_connection,
exit_notifier,
}),
@ -290,22 +310,19 @@ impl TpuConnectionManager {
}
// remove connections which are no longer needed
let collect_current_active_connections = self
.identity_to_active_connection
.iter()
.map(|x| (*x.key(), x.value().clone()))
.collect::<Vec<_>>();
for (identity, value) in collect_current_active_connections.iter() {
if !connections_to_keep.contains_key(identity) {
trace!("removing a connection for {}", identity);
self.identity_to_active_connection.retain(|key, value| {
if !connections_to_keep.contains_key(key) {
trace!("removing a connection for {}", key.to_string());
// ignore error for exit channel
value
.active_connection
.exit_signal
.store(true, Ordering::Relaxed);
value.exit_notifier.notify_one();
self.identity_to_active_connection.remove(identity);
false
} else {
true
}
}
});
}
}

View File

@ -9,6 +9,7 @@ use crate::{
tx_sender::TxSender,
};
use anyhow::bail;
use prometheus::{histogram_opts, register_histogram, Histogram};
use solana_lite_rpc_core::{
solana_utils::SerializableTransaction, structures::transaction_sent_info::SentTransactionInfo,
types::SlotStream,
@ -28,6 +29,14 @@ use tokio::{
time::Instant,
};
lazy_static::lazy_static! {
static ref PRIORITY_FEES_HISTOGRAM: Histogram = register_histogram!(histogram_opts!(
"literpc_txs_priority_fee",
"Priority fees of transactions sent by lite-rpc",
))
.unwrap();
}
#[derive(Clone)]
pub struct TransactionServiceBuilder {
tx_sender: TxSender,
@ -157,6 +166,8 @@ impl TransactionService {
prioritization_fee
};
PRIORITY_FEES_HISTOGRAM.observe(prioritization_fee as f64);
let max_replay = max_retries.map_or(self.max_retries, |x| x as usize);
let transaction_info = SentTransactionInfo {
signature,
@ -192,3 +203,5 @@ impl TransactionService {
Ok(signature.to_string())
}
}
mod test {}

52
util/src/encoding.rs Normal file
View File

@ -0,0 +1,52 @@
use base64::Engine;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum BinaryEncoding {
#[default]
Base58,
Base64,
}
#[derive(thiserror::Error, Debug)]
pub enum BinaryCodecError {
#[error("Base58DecodeError {0}")]
Base58DecodeError(#[from] bs58::decode::Error),
#[error("Base58EncodeError {0}")]
Base58EncodeError(#[from] bs58::encode::Error),
#[error("Base64DecodeError {0}")]
Base64DecodeError(#[from] base64::DecodeError),
}
impl BinaryEncoding {
pub fn decode<D: AsRef<[u8]>>(&self, to_decode: D) -> Result<Vec<u8>, BinaryCodecError> {
match self {
Self::Base58 => Ok(bs58::decode(to_decode).into_vec()?),
Self::Base64 => Ok(base64::engine::general_purpose::STANDARD.decode(to_decode)?),
}
}
pub fn encode<E: AsRef<[u8]>>(&self, to_encode: E) -> String {
match self {
Self::Base58 => bs58::encode(to_encode).into_string(),
Self::Base64 => base64::engine::general_purpose::STANDARD.encode(to_encode),
}
}
pub fn serialize<E: Serialize>(&self, to_serialize: &E) -> anyhow::Result<String> {
let bytes = bincode::serialize(to_serialize)?;
Ok(self.encode(bytes))
}
pub fn deserialize<E: for<'a> Deserialize<'a>>(
&self,
to_deserialize: &String,
) -> anyhow::Result<E> {
let bytes = self.decode(to_deserialize)?;
Ok(bincode::deserialize(&bytes)?)
}
}
pub const BASE64: BinaryEncoding = BinaryEncoding::Base64;
pub const BASE58: BinaryEncoding = BinaryEncoding::Base58;

View File

@ -1,2 +1,13 @@
pub mod encoding;
pub mod histogram_nbuckets;
pub mod histogram_percentiles;
pub mod secrets;
pub mod statistics;
// http://mango.rpcpool.com/c232ab232ba2323
pub fn obfuscate_rpcurl(rpc_addr: &str) -> String {
if rpc_addr.contains("rpcpool.com") {
return rpc_addr.replacen(char::is_numeric, "X", 99);
}
rpc_addr.to_string()
}

20
util/src/secrets.rs Normal file
View File

@ -0,0 +1,20 @@
#![allow(dead_code)]
pub fn obfuscate_rpcurl(rpc_addr: &str) -> String {
if rpc_addr.contains("rpcpool.com") {
return rpc_addr.replacen(char::is_numeric, "X", 99);
}
rpc_addr.to_string()
}
pub fn obfuscate_token(token: &Option<String>) -> String {
match token {
None => "n/a".to_string(),
Some(token) => {
let mut token = token.clone();
token.truncate(5);
token += "...";
token
}
}
}

40
util/src/statistics.rs Normal file
View File

@ -0,0 +1,40 @@
pub fn mean(data: &[f32]) -> Option<f32> {
let sum = data.iter().sum::<f32>();
let count = data.len();
match count {
positive if positive > 0 => Some(sum / count as f32),
_ => None,
}
}
pub fn std_deviation(data: &[f32]) -> Option<f32> {
match (mean(data), data.len()) {
(Some(data_mean), count) if count > 0 => {
let variance = data
.iter()
.map(|value| {
let diff = data_mean - *value;
diff * diff
})
.sum::<f32>()
/ count as f32;
Some(variance.sqrt())
}
_ => None,
}
}
#[test]
fn test_mean() {
let data = [1.0, 2.0, 3.0, 4.0, 5.0];
assert_eq!(mean(&data), Some(3.0));
}
#[test]
fn test_std_deviation() {
let data = [1.0, 3.0, 5.0];
assert_eq!(std_deviation(&data), Some(1.6329932));
}