Autobahn: initial public repository commit
Code imported from private repository with contribution from: - Christian Kamm <mail@ckamm.de> - Serge Farny <serge.farny@gmail.com> - Microwavedcola1 <microwavedcola@gmail.com> - Maximilian Schneider <mail@maximilianschneider.net> - GroovieGermanikus <groovie@mango.markets> - Godmode Galactus <godmodegalactus@gmail.com> - Adrian Brzeziński <a.brzezinski94@gmail.com> - Riordanp <riordan@panayid.es>
This commit is contained in:
commit
ad94eca4bb
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,34 @@
|
|||
[workspace]
|
||||
members = [
|
||||
"bin/*",
|
||||
"programs/*",
|
||||
"lib/*",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
[workspace.dependencies]
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
anyhow = "1.0"
|
||||
thiserror = "1.0.63"
|
||||
solana-sdk = { version = "1.17", default-features = false }
|
||||
solana-client = { version = "1.17" }
|
||||
solana-rpc-client = { version = "1.17" }
|
||||
solana-rpc-client-api = { version = "1.17" }
|
||||
mango-feeds-connector = { git = "https://github.com/blockworks-foundation/mango-feeds.git", tag = "connector-v0.4.8" }
|
||||
yellowstone-grpc-client = { version = "1.15.0", git = "https://github.com/blockworks-foundation/yellowstone-grpc.git", tag = "v1.15.0+solana.1.17" }
|
||||
yellowstone-grpc-proto = { version = "1.14.0", git = "https://github.com/blockworks-foundation/yellowstone-grpc.git", tag = "v1.15.0+solana.1.17" }
|
||||
reqwest = { version = "0.11.27", features = ["json"] }
|
||||
whirlpools-client = { git = "https://github.com/blockworks-foundation/whirlpools-client/", features = ["no-entrypoint"] }
|
||||
openbook-v2 = { git = "https://github.com/openbook-dex/openbook-v2", tag = "v0.2.7", features = ["no-entrypoint", "client"] }
|
||||
raydium-cp-swap = { git = "https://github.com/raydium-io/raydium-cp-swap/", features = ["no-entrypoint", "client"] }
|
||||
stable-swap = { version = "1.8.1", features = ["no-entrypoint", "client"] }
|
||||
stable-swap-client = { version = "1.8.1" }
|
||||
stable-swap-math = { version = "1.8.1" }
|
||||
uint = { version = "0.9.1" }
|
||||
|
||||
[profile.release]
|
||||
overflow-checks = true
|
||||
|
||||
[patch.crates-io]
|
||||
# for gzip encoded responses
|
||||
jsonrpc-core-client = { git = "https://github.com/ckamm/jsonrpc.git", branch = "ckamm/http-with-gzip" }
|
|
@ -0,0 +1,82 @@
|
|||
# Creating a Dex Adapter
|
||||
|
||||
## Architecture Summary
|
||||
|
||||
Router will call initialize on every Dex Adapter to gather a list of edge and account/program subscriptions for live refresh.
|
||||
|
||||
Dex must also provide a map of key to edge(s) so that we know what quote should be refreshed when some accounts are modified on chain.
|
||||
|
||||
> Edge = One way (pool/market) to exchange an input token for an output token
|
||||
|
||||
## Implementation
|
||||
|
||||
Please create a new library inside the `lib` folder named like this: `dex-<your-name>`
|
||||
|
||||
You will need to implement three traits:
|
||||
|
||||
- DexInterface
|
||||
- DexEdgeIdentifier
|
||||
- DexEdge
|
||||
|
||||
Looking at existing implementation (raydium for example) is recommended to start.
|
||||
|
||||
### DexInterface
|
||||
|
||||
Trait used to:
|
||||
|
||||
- Load pools / list accounts for which a change should trigger a repricing
|
||||
- Compute quote
|
||||
- Generate swap IX
|
||||
|
||||
### DexEdgeIdentifier
|
||||
|
||||
Static data used to identify an edge (pool/market+direction)
|
||||
|
||||
- `key()`
|
||||
- `input_mint()`
|
||||
- `output_mint()`
|
||||
- `accounts_needed()`
|
||||
this should count all account needed to make a swap, excluding:
|
||||
- user wallet address
|
||||
- user output ATA
|
||||
|
||||
Long living object.
|
||||
|
||||
**Warning**: key + input_mint should be unique
|
||||
|
||||
### DexEdge
|
||||
|
||||
Accounts data needed to compute a quote for a given edge.
|
||||
|
||||
Short living object, loaded by Autobahn when updating prices and finding best path.
|
||||
|
||||
## Testing and validating
|
||||
|
||||
Also see [Testing.MD](Testing.MD)
|
||||
|
||||
Please create a test directory in your dex adapter lib with, at least, one end-to-end test.
|
||||
|
||||
A three steps design is preferred:
|
||||
|
||||
- 1/ Capture all accounts needed
|
||||
- 2/ Generate at least one swap for every edge
|
||||
- 3/ Simulate execution
|
||||
|
||||
You can basically copy and paste the `test_raydium.rs` for a simple example of 1 and 2.
|
||||
For 3, you need to update `test_swap_from_dump.rs` in the simulator test program (again, take inspiration from raydium implementation: `test_quote_match_swap_for_raydium`).
|
||||
|
||||
Running should be done like this:
|
||||
|
||||
```
|
||||
DUMP_MAINNET_DATA=1 RUST_LOG=info RPC_HTTP_URL="..." cargo test --package dex-<your-name>
|
||||
cargo test-sbf --package simulator -- --nocapture cases::test_swap_from_dump::test_quote_match_swap_for_<your-name>
|
||||
```
|
||||
|
||||
Note the test runner automatically captures accounts that were not
|
||||
specified in the dex adapters subscription_mode fn. This is based on
|
||||
observation of the generated instructions and ensures all accounts
|
||||
used even the ones not used to calculate a quote are included in the
|
||||
snapshot.
|
||||
This can cause data hazards bc. the snapshot is not generated at a
|
||||
consistent blockheight, it's recommended to update the subscription
|
||||
mode fn in this case.
|
|
@ -0,0 +1,661 @@
|
|||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU Affero General Public License is a free, copyleft license for
|
||||
software and other kinds of works, specifically designed to ensure
|
||||
cooperation with the community in the case of network server software.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
our General Public Licenses are intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
Developers that use our General Public Licenses protect your rights
|
||||
with two steps: (1) assert copyright on the software, and (2) offer
|
||||
you this License which gives you legal permission to copy, distribute
|
||||
and/or modify the software.
|
||||
|
||||
A secondary benefit of defending all users' freedom is that
|
||||
improvements made in alternate versions of the program, if they
|
||||
receive widespread use, become available for other developers to
|
||||
incorporate. Many developers of free software are heartened and
|
||||
encouraged by the resulting cooperation. However, in the case of
|
||||
software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and
|
||||
letting the public access it on a server without ever releasing its
|
||||
source code to the public.
|
||||
|
||||
The GNU Affero General Public License is designed specifically to
|
||||
ensure that, in such cases, the modified source code becomes available
|
||||
to the community. It requires the operator of a network server to
|
||||
provide the source code of the modified version running there to the
|
||||
users of that server. Therefore, public use of a modified version, on
|
||||
a publicly accessible server, gives the public access to the source
|
||||
code of the modified version.
|
||||
|
||||
An older license, called the Affero General Public License and
|
||||
published by Affero, was designed to accomplish similar goals. This is
|
||||
a different license, not a version of the Affero GPL, but Affero has
|
||||
released a new version of the Affero GPL which permits relicensing under
|
||||
this license.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the
|
||||
Program, your modified version must prominently offer all users
|
||||
interacting with it remotely through a computer network (if your version
|
||||
supports such interaction) an opportunity to receive the Corresponding
|
||||
Source of your version by providing access to the Corresponding Source
|
||||
from a network server at no charge, through some standard or customary
|
||||
means of facilitating copying of software. This Corresponding Source
|
||||
shall include the Corresponding Source for any work covered by version 3
|
||||
of the GNU General Public License that is incorporated pursuant to the
|
||||
following paragraph.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the work with which it is combined will remain governed by version
|
||||
3 of the GNU General Public License.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU Affero General Public License from time to time. Such new versions
|
||||
will be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU Affero General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU Affero General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU Affero General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published
|
||||
by the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If your software can interact with users remotely through a computer
|
||||
network, you should also make sure that it provides a way for users to
|
||||
get its source. For example, if your program is a web application, its
|
||||
interface could display a "Source" link that leads users to an archive
|
||||
of the code. There are many ways you could offer source, and different
|
||||
solutions will be better for different programs; see section 13 for the
|
||||
specific requirements.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
|
@ -0,0 +1,82 @@
|
|||
# Fill.city Autobahn
|
||||
|
||||
![logo](./brand/autobahn-logo-mark.svg)
|
||||
|
||||
Autobahn is the open source aggregator for swaps on Solana.
|
||||
This public good protocol enables developers to contribute their own DEX adapters.
|
||||
Take back control: access to orderflow from routers on Solana should not be centralized.
|
||||
|
||||
The graph search is optimized for reliability of trade execution.
|
||||
Reliability is preferred over marginal price to improve user experience.
|
||||
Full test coverage through daily verification of all routed pools ensures correctness.
|
||||
|
||||
A hosted version is available.
|
||||
Reach out to partnerships@mango.markets to get an access token.
|
||||
Self-hosting requires custom validator patches to enable low-latency account subscriptions.
|
||||
|
||||
## Using the router (as a client)
|
||||
|
||||
Basically it is the same API as Jupiter:
|
||||
`https://autobahn.mngo.cloud/<TOKEN>/`
|
||||
|
||||
### quote (GET)
|
||||
|
||||
Supported parameters:
|
||||
- inputMint
|
||||
- outputMint
|
||||
- amount
|
||||
- slippageBps
|
||||
- maxAccounts
|
||||
- onlyDirectRoutes
|
||||
|
||||
### swap & swap-instructions (POST)
|
||||
|
||||
Supported parameters:
|
||||
|
||||
- userPublicKey
|
||||
- wrapAndUnwrapSol
|
||||
- autoCreateOutAta
|
||||
- quoteResponse
|
||||
|
||||
## Running the router
|
||||
|
||||
See example configuration file [example-config.toml](bin/autobahn-router/example-config.toml) to create your own setup
|
||||
|
||||
Run like this:
|
||||
|
||||
```
|
||||
RUST_LOG=info router my_config.toml
|
||||
```
|
||||
|
||||
## Creating a new DEX Adapter
|
||||
|
||||
Adding new DEX adapter is welcome, you can do a pull-request, it will be appreciated !
|
||||
|
||||
See [CreatingAnAdapter.MD](CreatingAnAdapter.MD) file for details.
|
||||
|
||||
## Integration testing
|
||||
|
||||
It's possible to dump data from mainnet, and then use that in tests:
|
||||
- To assert quoting is correct (same result as simulated swap)
|
||||
- To check router path finding perfomance
|
||||
|
||||
See [Testing.MD](Testing.MD) file for details.
|
||||
|
||||
There's a script for daily smoke tests:
|
||||
|
||||
```
|
||||
RPC_HTTP_URL=... ./scripts/smoke-test.sh
|
||||
```
|
||||
|
||||
## Tokio-Console
|
||||
|
||||
Build router with feature `tokio-console` and `RUSTFLAGS="--cfg tokio_unstable"` like this:
|
||||
|
||||
```RUSTFLAGS="--cfg tokio_unstable" cargo build --bin router --release --features tokio-console```
|
||||
|
||||
And use the `tokio-console` crate to display running tasks
|
||||
|
||||
## License
|
||||
|
||||
Autobahn is published under GNU Affero General Public License v3.0.
|
||||
In case you are interested in an alternative license please reach out to partnerships@mango.markets
|
|
@ -0,0 +1,85 @@
|
|||
# Swap vs quote consistency tests
|
||||
|
||||
### Increasing max_map_count
|
||||
|
||||
As the testing code uses account db which requires lots of threads and access lots of files.
|
||||
We have to increase the vm.max_map_count and vm.nr_open.
|
||||
We will get 'out of memory exception' without this configuration change.
|
||||
|
||||
```
|
||||
# in file /etc/sysctl.conf set / only to be done once per machine
|
||||
vm.max_map_count=1000000
|
||||
fs.nr_open = 1000000
|
||||
```
|
||||
|
||||
To reload config :
|
||||
|
||||
```
|
||||
sudo sysctl -p
|
||||
```
|
||||
|
||||
### Capture for swap tests
|
||||
|
||||
```
|
||||
export RPC_HTTP_URL="http://fcs-ams1._peer.internal:18899"
|
||||
```
|
||||
|
||||
```
|
||||
DUMP_MAINNET_DATA=1 RUST_LOG=info cargo test --package dex-orca -- --nocapture
|
||||
|
||||
DUMP_MAINNET_DATA=1 RUST_LOG=info cargo test --package dex-saber -- --nocapture
|
||||
|
||||
DUMP_MAINNET_DATA=1 RUST_LOG=info cargo test --package dex-raydium-cp -- --nocapture
|
||||
|
||||
DUMP_MAINNET_DATA=1 RUST_LOG=info cargo test --package dex-raydium -- --nocapture
|
||||
|
||||
DUMP_MAINNET_DATA=1 RUST_LOG=info cargo test --package dex-openbook-v2 -- --nocapture
|
||||
|
||||
DUMP_MAINNET_DATA=1 RUST_LOG=info cargo test --package dex-infinity -- --nocapture
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Run for every dex
|
||||
|
||||
```
|
||||
cargo test-sbf --package simulator -- --nocapture
|
||||
```
|
||||
|
||||
### Run for each dex one by one
|
||||
|
||||
```
|
||||
cargo test-sbf --package simulator -- --nocapture cases::test_swap_from_dump::test_quote_match_swap_for_saber
|
||||
cargo test-sbf --package simulator -- --nocapture cases::test_swap_from_dump::test_quote_match_swap_for_orca
|
||||
cargo test-sbf --package simulator -- --nocapture cases::test_swap_from_dump::test_quote_match_swap_for_cropper
|
||||
cargo test-sbf --package simulator -- --nocapture cases::test_swap_from_dump::test_quote_match_swap_for_raydium_cp
|
||||
cargo test-sbf --package simulator -- --nocapture cases::test_swap_from_dump::test_quote_match_swap_for_raydium
|
||||
cargo test-sbf --package simulator -- --nocapture cases::test_swap_from_dump::test_quote_match_swap_for_openbook_v2
|
||||
cargo test-sbf --package simulator -- --nocapture cases::test_swap_from_dump::test_quote_match_swap_for_infinity
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
# Run router
|
||||
|
||||
```
|
||||
cargo build --bin router && RUST_LOG=info ./target/debug/router config/small.toml
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
# Perf test:
|
||||
|
||||
### Step 1 dump data
|
||||
|
||||
```
|
||||
RUST_LOG=info RPC_HTTP_URL="http://fcs-ams1._peer.internal:18899" cargo test --package router --release -- dump_all_dex_data --nocapture
|
||||
```
|
||||
|
||||
### Step 2 run perf test
|
||||
|
||||
```
|
||||
RUST_LOG=info cargo test --package router --release -- path_finding_perf_test --nocapture
|
||||
|
||||
RUST_LOG=info cargo test --package router --release -- path_warmup_perf_test --nocapture
|
||||
```
|
|
@ -0,0 +1,113 @@
|
|||
[package]
|
||||
name = "autobahn-router"
|
||||
version = "0.0.1"
|
||||
edition = "2021"
|
||||
build = "build.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "autobahn-router"
|
||||
path = "src/main.rs"
|
||||
|
||||
[features]
|
||||
capture-accounts = []
|
||||
|
||||
[lib]
|
||||
|
||||
[dependencies]
|
||||
anchor-lang = "0.29.0"
|
||||
anchor-client = "0.29.0"
|
||||
anchor-spl = "0.29.0"
|
||||
anyhow = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
arrayref = "0.3.6"
|
||||
opool = "0.1.1"
|
||||
async-channel = "1.6"
|
||||
async-stream = "0.2"
|
||||
async-trait = "0.1"
|
||||
atty = "0.2"
|
||||
bs58 = "0.3.1"
|
||||
bytemuck = "^1.7.2"
|
||||
bytes = "1.0"
|
||||
chrono = "0.4"
|
||||
clap = { version = "3.1.8", features = ["derive", "env"] }
|
||||
dotenv = "0.15.0"
|
||||
fixed = { git = "https://github.com/blockworks-foundation/fixed.git", branch = "v1.11.0-borsh0_10-mango" }
|
||||
futures = "0.3.17"
|
||||
futures-core = "0.3"
|
||||
futures-util = "0.3"
|
||||
itertools = "0.12"
|
||||
jemallocator = "0.5"
|
||||
jsonrpc-core = "18.0.0"
|
||||
jsonrpc-core-client = { version = "18.0.0", features = ["ws", "http", "tls"] }
|
||||
# mango-v4 = { path = "../../programs/mango-v4", features = ["client"] }
|
||||
# mango-v4-client = { path = "../../lib/client" }
|
||||
mango-feeds-connector = { workspace = true }
|
||||
once_cell = "1.12.0"
|
||||
ordered-float = "4.2.0"
|
||||
priority-queue = "2.0.2"
|
||||
# pyth-sdk-solana = "0.10"
|
||||
rand = "0.7"
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
# serum_dex = { workspace = true, features = ["no-entrypoint", "program"] }
|
||||
sha2 = "0.10.6"
|
||||
shellexpand = "2.1.0"
|
||||
# solana-address-lookup-table-program = "1.17"
|
||||
solana-account-decoder = "1.17"
|
||||
solana-client = { workspace = true }
|
||||
solana-logger = "1.17"
|
||||
solana-program = "1.17"
|
||||
solana-program-test = "1.17"
|
||||
solana-sdk = { workspace = true }
|
||||
solana-transaction-status = { version = "1.17" }
|
||||
tokio = { workspace = true }
|
||||
tokio-stream = { version = "0.1"}
|
||||
tokio-tungstenite = "0.21"
|
||||
toml = "0.5"
|
||||
tracing = { version = "0.1", features = ["log"] }
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
regex = "1.9.5"
|
||||
lazy_static = "1.5.0"
|
||||
hdrhistogram = "7.5.4"
|
||||
indexmap = "2.0.0"
|
||||
router-lib = { path = "../../lib/router-lib/", version = "0.0.1" }
|
||||
dex-orca = { path = "../../lib/dex-orca/", version = "0.0.1" }
|
||||
dex-raydium-cp = { path = "../../lib/dex-raydium-cp/", version = "0.0.1" }
|
||||
dex-raydium = { path = "../../lib/dex-raydium/", version = "0.0.1" }
|
||||
dex-saber = { path = "../../lib/dex-saber/", version = "0.0.1" }
|
||||
dex-infinity = { path = "../../lib/dex-infinity/", version = "0.0.1" }
|
||||
dex-openbook-v2 = { path = "../../lib/dex-openbook-v2/", version = "0.0.1" }
|
||||
router-config-lib = { path = "../../lib/router-config-lib" }
|
||||
router-feed-lib = { path = "../../lib/router-feed-lib" }
|
||||
spl-associated-token-account = { version = "1.0.5",features = ["no-entrypoint"] }
|
||||
|
||||
# grpc
|
||||
yellowstone-grpc-client = { workspace = true }
|
||||
yellowstone-grpc-proto = { workspace = true }
|
||||
tonic = { version = "0.10.2", features = ["gzip"] }
|
||||
|
||||
# compressed snapshots
|
||||
lz4 = "1.24.0"
|
||||
|
||||
# http server
|
||||
axum = { version = "0.7.2", features = ["macros"] }
|
||||
tower-http = { version = "0.5.2" , features = ["cors"]}
|
||||
|
||||
# internal
|
||||
autobahn-executor = { path = "../../programs/autobahn-executor" }
|
||||
base64 = "0.21.7"
|
||||
bincode = "1.3.3"
|
||||
|
||||
# metrics
|
||||
prometheus = "0.13.4"
|
||||
warp = "0.3.5"
|
||||
|
||||
|
||||
[dev-dependencies]
|
||||
test-case = "*"
|
||||
router-test-lib = { path = "../../lib/router-test-lib" }
|
||||
|
||||
[build-dependencies]
|
||||
anyhow = { workspace = true }
|
||||
vergen-gitcl = { version = "1.0.0-beta.2", features = [] }
|
|
@ -0,0 +1,25 @@
|
|||
FROM rust:1.76.0 as base
|
||||
RUN cargo install cargo-chef@0.1.62 --locked
|
||||
RUN rustup component add rustfmt
|
||||
RUN apt-get update && apt-get install -y clang cmake ssh
|
||||
WORKDIR /app
|
||||
|
||||
FROM base AS plan
|
||||
COPY . .
|
||||
WORKDIR /app
|
||||
RUN cargo chef prepare --recipe-path recipe.json
|
||||
|
||||
FROM base as build
|
||||
COPY --from=plan /app/recipe.json recipe.json
|
||||
RUN cargo chef cook --release --recipe-path recipe.json
|
||||
COPY . .
|
||||
RUN cargo build --release --bin autobahn-router
|
||||
|
||||
FROM debian:bookworm-slim as run
|
||||
RUN apt-get update && apt-get -y install ca-certificates libc6 libssl3 libssl-dev openssl
|
||||
|
||||
COPY --from=build /app/target/release/autobahn-router /usr/local/bin/
|
||||
COPY --from=build /app/bin/autobahn-router/template-config.toml /usr/local/bin/template-config.toml
|
||||
|
||||
RUN adduser --system --group --no-create-home mangouser
|
||||
USER mangouser
|
|
@ -0,0 +1,14 @@
|
|||
use anyhow::Result;
|
||||
use vergen_gitcl::{Emitter, GitclBuilder};
|
||||
|
||||
pub fn main() -> Result<()> {
|
||||
Emitter::default()
|
||||
.add_instructions(
|
||||
&GitclBuilder::default()
|
||||
.commit_date(true)
|
||||
.sha(true)
|
||||
.dirty(false)
|
||||
.build()?,
|
||||
)?
|
||||
.emit()
|
||||
}
|
|
@ -0,0 +1,91 @@
|
|||
[infinity]
|
||||
enabled = true
|
||||
|
||||
[orca]
|
||||
enabled = true
|
||||
mints = [
|
||||
"So11111111111111111111111111111111111111112", # SOL
|
||||
"EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", # USDC
|
||||
"J1toso1uCk3RLmjorhTtrVwY9HJ7X8V9yYac6Y7kGCPn", # JitoSOL
|
||||
"Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB", # USDT
|
||||
"JUPyiwrYJFskUPiHa7hkeR8VUtAeFoSYbKedZNsDvCN", # JUP
|
||||
"5oVNBeEEQvYi1cX3ir8Dx5n1P7pdxydbGF2X4TxVusJm", # INF
|
||||
"27G8MtK7VtTcCHkpASjSDdkWWYfoqT6ggEuKidVJidD4", # JLP
|
||||
]
|
||||
take_all_mints = false
|
||||
add_mango_tokens = false
|
||||
|
||||
[cropper]
|
||||
enabled = true
|
||||
mints = [
|
||||
"So11111111111111111111111111111111111111112", # SOL
|
||||
"EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", # USDC
|
||||
"Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB", # USDT
|
||||
]
|
||||
take_all_mints = false
|
||||
add_mango_tokens = false
|
||||
|
||||
[saber]
|
||||
enabled = true
|
||||
mints = [
|
||||
"So11111111111111111111111111111111111111112", # SOL
|
||||
"EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", # USDC
|
||||
"Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB", # USDT
|
||||
]
|
||||
take_all_mints = false
|
||||
add_mango_tokens = false
|
||||
|
||||
[openbook_v2]
|
||||
enabled = true
|
||||
mints = [
|
||||
"So11111111111111111111111111111111111111112", # SOL
|
||||
"EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", # USDC
|
||||
"Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB", # USDT
|
||||
]
|
||||
take_all_mints = false
|
||||
add_mango_tokens = false
|
||||
|
||||
[raydium_cp]
|
||||
enabled = false
|
||||
mints = []
|
||||
take_all_mints = false
|
||||
add_mango_tokens = false
|
||||
|
||||
[raydium]
|
||||
enabled = false
|
||||
mints = []
|
||||
take_all_mints = false
|
||||
add_mango_tokens = false
|
||||
|
||||
[routing]
|
||||
path_cache_validity_ms = 30000
|
||||
path_warming_interval_secs = 15
|
||||
path_warming_for_mints = [
|
||||
"So11111111111111111111111111111111111111112",
|
||||
"EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v",
|
||||
"Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB",
|
||||
]
|
||||
path_warming_mode = "ConfiguredMints"
|
||||
lookup_tables = ["87TgskchTNEv1uXkGQk1U4zt65tjqbfGAZWNMGAcRRPx"]
|
||||
|
||||
[server]
|
||||
address = "127.0.0.1:8888"
|
||||
|
||||
[metrics]
|
||||
output_http = true
|
||||
prometheus_address = "0.0.0.0:9091"
|
||||
output_stdout = true
|
||||
|
||||
[[sources]]
|
||||
dedup_queue_size = 50000
|
||||
rpc_http_url = "https://mango.rpcpool.com/<TOKEN>"
|
||||
|
||||
[[sources.grpc_sources]]
|
||||
name = "accountsdb-client"
|
||||
connection_string = "https://mango.rpcpool.com/"
|
||||
token = ""
|
||||
retry_connection_sleep_secs = 30
|
||||
|
||||
[price_feed]
|
||||
birdeye_token = "<TOKEN>"
|
||||
refresh_interval_secs = 1800 # every 30 min
|
|
@ -0,0 +1,59 @@
|
|||
use autobahn_router::source::grpc_plugin_source::feed_data_geyser;
|
||||
use router_config_lib::{AccountDataSourceConfig, GrpcSourceConfig};
|
||||
use solana_program::pubkey::Pubkey;
|
||||
use std::collections::HashSet;
|
||||
use std::env;
|
||||
use std::str::FromStr;
|
||||
use tracing::log::info;
|
||||
|
||||
#[tokio::main]
|
||||
pub async fn main() {
|
||||
router_feed_lib::utils::tracing_subscriber_init();
|
||||
|
||||
let grpc_addr = env::var("GRPC_ADDR").expect("need grpc url");
|
||||
let grpc_config = GrpcSourceConfig {
|
||||
name: "mysource1".to_string(),
|
||||
connection_string: grpc_addr.clone(),
|
||||
token: None,
|
||||
retry_connection_sleep_secs: 3,
|
||||
tls: None,
|
||||
};
|
||||
|
||||
let rpc_http_addr = env::var("RPC_HTTP_ADDR").expect("need rpc http url");
|
||||
let snapshot_config = AccountDataSourceConfig {
|
||||
region: None,
|
||||
use_quic: None,
|
||||
quic_address: None,
|
||||
rpc_http_url: rpc_http_addr.clone(),
|
||||
rpc_support_compression: Some(false), /* no compression */
|
||||
re_snapshot_interval_secs: None,
|
||||
grpc_sources: vec![],
|
||||
dedup_queue_size: 0,
|
||||
};
|
||||
|
||||
// Raydium
|
||||
let raydium_program_id =
|
||||
Pubkey::from_str("675kPX9MHTjS2zt1qfr1NYHuzeLXfQM9H24wFSUt1Mp8").unwrap();
|
||||
|
||||
let account_sub = HashSet::new();
|
||||
let token_account_sub = HashSet::new();
|
||||
let program_sub = HashSet::from([raydium_program_id]);
|
||||
|
||||
let (channel_sender, _dummy_rx) = async_channel::unbounded();
|
||||
|
||||
info!("starting grpc_plugin_source...");
|
||||
// blocking
|
||||
feed_data_geyser(
|
||||
&grpc_config,
|
||||
None,
|
||||
snapshot_config,
|
||||
&account_sub,
|
||||
&program_sub,
|
||||
&token_account_sub,
|
||||
channel_sender,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
info!("DONE.");
|
||||
}
|
|
@ -0,0 +1,102 @@
|
|||
use solana_program::pubkey::Pubkey;
|
||||
use solana_sdk::address_lookup_table::AddressLookupTableAccount;
|
||||
|
||||
pub fn get_best_alt(
|
||||
all_alt: &Vec<AddressLookupTableAccount>,
|
||||
tx_addresses: &Vec<Pubkey>,
|
||||
) -> anyhow::Result<Vec<AddressLookupTableAccount>> {
|
||||
get_best_alt_internal(all_alt, tx_addresses, 0)
|
||||
}
|
||||
|
||||
pub fn get_best_alt_internal(
|
||||
all_alt: &Vec<AddressLookupTableAccount>,
|
||||
tx_addresses: &Vec<Pubkey>,
|
||||
level: u8,
|
||||
) -> anyhow::Result<Vec<AddressLookupTableAccount>> {
|
||||
let mut sorted_all_alt = all_alt
|
||||
.iter()
|
||||
.map(|alt| {
|
||||
(
|
||||
alt,
|
||||
tx_addresses
|
||||
.iter()
|
||||
.filter(|tx_address| alt.addresses.contains(tx_address))
|
||||
.count(),
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
sorted_all_alt.sort_by_key(|alt| std::cmp::Reverse(alt.1));
|
||||
|
||||
if sorted_all_alt.is_empty() || sorted_all_alt[0].1 <= 1 {
|
||||
// Only use LUT if it replaces 2 or more addr
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
let result = sorted_all_alt[0..1]
|
||||
.iter()
|
||||
.map(|x| x.0.clone())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if level < 3 {
|
||||
sorted_all_alt.remove(0);
|
||||
let all_alt = sorted_all_alt.into_iter().map(|x| x.0.clone()).collect();
|
||||
let tx_addresses = tx_addresses
|
||||
.into_iter()
|
||||
.filter(|x| !result[0].addresses.contains(x))
|
||||
.copied()
|
||||
.collect();
|
||||
|
||||
let next = get_best_alt_internal(&all_alt, &tx_addresses, level + 1)?;
|
||||
let result = result.into_iter().chain(next.into_iter()).collect();
|
||||
return Ok(result);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use itertools::Itertools;
|
||||
|
||||
#[test]
|
||||
fn should_find_best_alt() {
|
||||
let addr = (1..10i32).map(|_| Pubkey::new_unique()).collect::<Vec<_>>();
|
||||
let alt0 = make_alt(&[&addr[0], &addr[1]]);
|
||||
let alt1 = make_alt(&[&addr[2]]);
|
||||
let alt2 = make_alt(&[]);
|
||||
let alt3 = make_alt(&[&addr[3], &addr[4], &addr[5]]);
|
||||
let alts = vec![alt0, alt1, alt2, alt3];
|
||||
|
||||
assert_alt_are(&addr[0..3], &alts, &[alts[0].clone()]);
|
||||
assert_alt_are(&addr[2..3], &alts, &[]);
|
||||
assert_alt_are(&addr[3..7], &alts, &[alts[3].clone()]);
|
||||
assert_alt_are(&addr[7..9], &alts, &[]);
|
||||
assert_alt_are(&addr[0..8], &alts, &[alts[0].clone(), alts[3].clone()]);
|
||||
}
|
||||
|
||||
fn assert_alt_are(
|
||||
tx_addresses: &[Pubkey],
|
||||
all_alts: &Vec<AddressLookupTableAccount>,
|
||||
expected_alts: &[AddressLookupTableAccount],
|
||||
) {
|
||||
let result = get_best_alt(&all_alts, &tx_addresses.iter().copied().collect()).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
result.iter().map(|x| x.key.to_string()).sorted().join("; "),
|
||||
expected_alts
|
||||
.iter()
|
||||
.map(|x| x.key.to_string())
|
||||
.sorted()
|
||||
.join("; "),
|
||||
);
|
||||
}
|
||||
|
||||
fn make_alt(addresses: &[&Pubkey]) -> AddressLookupTableAccount {
|
||||
AddressLookupTableAccount {
|
||||
key: Pubkey::new_unique(),
|
||||
addresses: addresses.iter().map(|x| **x).collect(),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
pub mod alt_optimizer;
|
|
@ -0,0 +1,77 @@
|
|||
use once_cell::sync::OnceCell;
|
||||
use solana_program::pubkey::Pubkey;
|
||||
use std::collections::HashSet;
|
||||
|
||||
/// Used in routing
|
||||
/// we want to be sure that all accounts used to prices are in the "accepted filter" from the grpc subscriptions
|
||||
static GLOBAL_ACCOUNTS_FILTERS: OnceCell<HashSet<Pubkey>> = OnceCell::new();
|
||||
|
||||
pub fn set_global_filters(filters: &HashSet<Pubkey>) {
|
||||
GLOBAL_ACCOUNTS_FILTERS.try_insert(filters.clone()).unwrap();
|
||||
}
|
||||
|
||||
pub fn is_in_global_filters(address: &Pubkey) -> bool {
|
||||
GLOBAL_ACCOUNTS_FILTERS.get().unwrap().contains(address)
|
||||
}
|
||||
|
||||
pub fn name(mint: &Pubkey) -> String {
|
||||
let m = mint.to_string();
|
||||
|
||||
if m == "So11111111111111111111111111111111111111112" {
|
||||
"SOL".to_string()
|
||||
} else if m == "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v" {
|
||||
"USDC".to_string()
|
||||
} else if m == "Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB" {
|
||||
"USDT".to_string()
|
||||
} else if m == "USDH1SM1ojwWUga67PGrgFWUHibbjqMvuMaDkRJTgkX" {
|
||||
"USDH".to_string()
|
||||
} else if m == "J1toso1uCk3RLmjorhTtrVwY9HJ7X8V9yYac6Y7kGCPn" {
|
||||
"JitoSOL".to_string()
|
||||
} else if m == "jupSoLaHXQiZZTSfEWMTRRgpnyFm8f6sZdosWBjx93v" {
|
||||
"JupSol".to_string()
|
||||
} else if m == "mSoLzYCxHdYgdzU16g5QSh3i5K3z3KZK7ytfqcJm7So" {
|
||||
"mSol".to_string()
|
||||
} else if m == "JUPyiwrYJFskUPiHa7hkeR8VUtAeFoSYbKedZNsDvCN" {
|
||||
"JUP".to_string()
|
||||
} else if m == "5oVNBeEEQvYi1cX3ir8Dx5n1P7pdxydbGF2X4TxVusJm" {
|
||||
"INF".to_string()
|
||||
} else if m == "27G8MtK7VtTcCHkpASjSDdkWWYfoqT6ggEuKidVJidD4" {
|
||||
"JLP".to_string()
|
||||
} else if m == "MangoCzJ36AjZyKwVj3VnYU4GTonjfVEnJmvvWaxLac" {
|
||||
"MNGO".to_string()
|
||||
} else if m == "hntyVP6YFm1Hg25TN9WGLqM12b8TQmcknKrdu1oxWux" {
|
||||
"HNT".to_string()
|
||||
} else if m == "KMNo3nJsBXfcpJTVhZcXLW7RmTwTt4GVFE7suUBo9sS" {
|
||||
"KMNO".to_string()
|
||||
} else if m == "DriFtupJYLTosbwoN8koMbEYSx54aFAVLddWsbksjwg7" {
|
||||
"DRIFT".to_string()
|
||||
} else if m == "7GCihgDB8fe6KNjn2MYtkzZcRjQy3t9GHdC8uHYmW2hr" {
|
||||
"POPCAT".to_string()
|
||||
} else if m == "4k3Dyjzvzp8eMZWUXbBCjEvwSkkk59S5iCNLY3QrkX6R" {
|
||||
"RAY".to_string()
|
||||
} else if m == "3jsFX1tx2Z8ewmamiwSU851GzyzM2DJMq7KWW5DM8Py3" {
|
||||
"CHAI".to_string()
|
||||
} else if m == "rndrizKT3MK1iimdxRdWabcF7Zg7AR5T4nud4EkHBof" {
|
||||
"RENDER".to_string()
|
||||
} else if m == "nosXBVoaCTtYdLvKY6Csb4AC8JCdQKKAaWYtx2ZMoo7" {
|
||||
"NOS".to_string()
|
||||
} else if m == "METAewgxyPbgwsseH8T16a39CQ5VyVxZi9zXiDPY18m" {
|
||||
"MPLX".to_string()
|
||||
} else if m == "DezXAZ8z7PnrnRJjz3wXBoRgixCa6xjnB7YaB1pPB263" {
|
||||
"BONK".to_string()
|
||||
} else if m == "6CNHDCzD5RkvBWxxyokQQNQPjFWgoHF94D7BmC73X6ZK" {
|
||||
"GECKO".to_string()
|
||||
} else if m == "LMDAmLNduiDmSiMxgae1gW7ubArfEGdAfTpKohqE5gn" {
|
||||
"LMDA".to_string()
|
||||
} else if m == "NeonTjSjsuo3rexg9o6vHuMXw62f9V7zvmu8M8Zut44" {
|
||||
"Neon".to_string()
|
||||
} else if m == "SHDWyBxihqiCj6YekG2GUr7wqKLeLAMK1gHZck9pL6y" {
|
||||
"Shadow".to_string()
|
||||
} else if m == "ukHH6c7mMyiWCf1b9pnWe25TSpkDDt3H5pQZgZ74J82" {
|
||||
"BOME".to_string()
|
||||
} else if m == "3S8qX1MsMqRbiwKg2cQyx7nis1oHMgaCuc9c4VfvVdPN" {
|
||||
"MOTHER".to_string()
|
||||
} else {
|
||||
m
|
||||
}
|
||||
}
|
|
@ -0,0 +1,97 @@
|
|||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use itertools::Itertools;
|
||||
use router_lib::dex::{DexInterface, DexSubscriptionMode};
|
||||
use router_lib::mango::mango_fetcher::MangoMetadata;
|
||||
use tracing::info;
|
||||
|
||||
use crate::edge::Edge;
|
||||
use crate::edge_updater::Dex;
|
||||
use crate::utils;
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! build_dex {
|
||||
($dex_builder:expr, $metadata:expr, $enabled:expr, $add_mango_tokens:expr, $take_all_mints:expr, $mints:expr) => {
|
||||
if $enabled {
|
||||
let dex = $dex_builder;
|
||||
let result = crate::dex::generic::build_dex_internal(
|
||||
dex,
|
||||
$metadata,
|
||||
$enabled,
|
||||
$add_mango_tokens,
|
||||
$take_all_mints,
|
||||
$mints,
|
||||
)
|
||||
.await?;
|
||||
Some(result)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub(crate) use build_dex;
|
||||
|
||||
pub async fn build_dex_internal(
|
||||
dex: Arc<dyn DexInterface>,
|
||||
mango_metadata: &Option<MangoMetadata>,
|
||||
enabled: bool,
|
||||
add_mango_tokens: bool,
|
||||
take_all_mints: bool,
|
||||
mints: &Vec<String>,
|
||||
) -> anyhow::Result<Dex> {
|
||||
let mints = utils::get_configured_mints(&mango_metadata, enabled, add_mango_tokens, mints)?;
|
||||
|
||||
let edges_per_pk_src = dex.edges_per_pk();
|
||||
let mut edges_per_pk = HashMap::new();
|
||||
|
||||
for (key, edges) in edges_per_pk_src {
|
||||
let edges = edges
|
||||
.into_iter()
|
||||
.filter(|x| {
|
||||
let keep = take_all_mints
|
||||
|| (mints.contains(&x.input_mint()) && mints.contains(&x.output_mint()));
|
||||
keep
|
||||
})
|
||||
.map(|x| {
|
||||
Arc::new(Edge {
|
||||
input_mint: x.input_mint(),
|
||||
output_mint: x.output_mint(),
|
||||
accounts_needed: x.accounts_needed(),
|
||||
dex: dex.clone(),
|
||||
id: x,
|
||||
state: Default::default(),
|
||||
})
|
||||
})
|
||||
.collect_vec();
|
||||
if edges.len() > 0 {
|
||||
edges_per_pk.insert(key, edges);
|
||||
}
|
||||
}
|
||||
|
||||
let subscription_mode = match dex.subscription_mode() {
|
||||
DexSubscriptionMode::Disabled => DexSubscriptionMode::Disabled,
|
||||
DexSubscriptionMode::Accounts(a) => {
|
||||
if take_all_mints {
|
||||
DexSubscriptionMode::Accounts(a)
|
||||
} else {
|
||||
DexSubscriptionMode::Accounts(edges_per_pk.keys().map(|x| x.clone()).collect())
|
||||
}
|
||||
}
|
||||
DexSubscriptionMode::Programs(p) => DexSubscriptionMode::Programs(p),
|
||||
DexSubscriptionMode::Mixed(m) => DexSubscriptionMode::Mixed(m),
|
||||
};
|
||||
|
||||
info!("Dex {} will subscribe to {}", dex.name(), subscription_mode);
|
||||
|
||||
Ok(Dex {
|
||||
name: dex.name(),
|
||||
edges_per_pk,
|
||||
subscription_mode: if enabled {
|
||||
subscription_mode
|
||||
} else {
|
||||
DexSubscriptionMode::Disabled
|
||||
},
|
||||
})
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
mod generic_adapter;
|
||||
pub use generic_adapter::*;
|
|
@ -0,0 +1 @@
|
|||
pub mod generic;
|
|
@ -0,0 +1,341 @@
|
|||
use crate::debug_tools;
|
||||
use crate::prelude::*;
|
||||
use crate::token_cache::TokenCache;
|
||||
use ordered_float::Pow;
|
||||
use router_lib::dex::{
|
||||
AccountProviderView, DexEdge, DexEdgeIdentifier, DexInterface, Quote, SwapInstruction,
|
||||
};
|
||||
use router_lib::price_feeds::price_cache::PriceCache;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
use std::cmp::min;
|
||||
use std::fmt::Formatter;
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Clone, Debug, Default, serde_derive::Serialize, serde_derive::Deserialize)]
|
||||
pub struct EdgeState {
|
||||
/// List of (input, price, ln-price) pairs, sorted by input asc
|
||||
// TODO: it may be much better to store this centrally, so it's cheap to take a snapshot
|
||||
pub cached_prices: Vec<(u64, f64, f64)>,
|
||||
is_valid: bool,
|
||||
pub last_update: u64,
|
||||
pub last_update_slot: u64,
|
||||
|
||||
/// How many time did we cool down this edge ?
|
||||
pub cooldown_event: u64,
|
||||
/// When will the edge become available again ?
|
||||
pub cooldown_until: Option<u64>,
|
||||
}
|
||||
|
||||
pub struct Edge {
|
||||
pub input_mint: Pubkey,
|
||||
pub output_mint: Pubkey,
|
||||
pub dex: Arc<dyn DexInterface>,
|
||||
pub id: Arc<dyn DexEdgeIdentifier>,
|
||||
|
||||
/// Number of accounts required to traverse this edge, not including
|
||||
/// the source token account, signer, token program, ata program, system program
|
||||
// TODO: This should maybe just be a Vec<Pubkey>, so multiple same-type edges need fewer?
|
||||
// and to help with selecting address lookup tables? but then it depends on what tick-arrays
|
||||
// are needed (so on the particular quote() result)
|
||||
pub accounts_needed: usize,
|
||||
|
||||
pub state: RwLock<EdgeState>,
|
||||
// TODO: address lookup table, deboosted
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Edge {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{} => {} ({})",
|
||||
debug_tools::name(&self.input_mint),
|
||||
debug_tools::name(&self.output_mint),
|
||||
self.dex.name()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for Edge {
|
||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for Edge {
|
||||
fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl Edge {
|
||||
pub fn key(&self) -> Pubkey {
|
||||
self.id.key()
|
||||
}
|
||||
|
||||
pub fn unique_id(&self) -> (Pubkey, Pubkey) {
|
||||
(self.id.key(), self.id.input_mint())
|
||||
}
|
||||
|
||||
pub fn desc(&self) -> String {
|
||||
self.id.desc()
|
||||
}
|
||||
|
||||
pub fn kind(&self) -> String {
|
||||
self.dex.name()
|
||||
}
|
||||
|
||||
pub fn build_swap_ix(
|
||||
&self,
|
||||
chain_data: &AccountProviderView,
|
||||
wallet_pk: &Pubkey,
|
||||
amount_in: u64,
|
||||
out_amount: u64,
|
||||
max_slippage_bps: i32,
|
||||
) -> anyhow::Result<SwapInstruction> {
|
||||
self.dex.build_swap_ix(
|
||||
&self.id,
|
||||
chain_data,
|
||||
wallet_pk,
|
||||
amount_in,
|
||||
out_amount,
|
||||
max_slippage_bps,
|
||||
)
|
||||
}
|
||||
pub fn prepare(&self, chain_data: &AccountProviderView) -> anyhow::Result<Arc<dyn DexEdge>> {
|
||||
let edge = self.dex.load(&self.id, chain_data)?;
|
||||
Ok(edge)
|
||||
}
|
||||
|
||||
pub fn quote(
|
||||
&self,
|
||||
prepared_quote: &Arc<dyn DexEdge>,
|
||||
chain_data: &AccountProviderView,
|
||||
in_amount: u64,
|
||||
) -> anyhow::Result<Quote> {
|
||||
self.dex
|
||||
.quote(&self.id, &prepared_quote, chain_data, in_amount)
|
||||
}
|
||||
|
||||
pub fn supports_exact_out(&self) -> bool {
|
||||
self.dex.supports_exact_out(&self.id)
|
||||
}
|
||||
|
||||
pub fn quote_exact_out(
|
||||
&self,
|
||||
prepared_quote: &Arc<dyn DexEdge>,
|
||||
chain_data: &AccountProviderView,
|
||||
out_amount: u64,
|
||||
) -> anyhow::Result<Quote> {
|
||||
self.dex
|
||||
.quote_exact_out(&self.id, &prepared_quote, chain_data, out_amount)
|
||||
}
|
||||
|
||||
pub fn update_internal(
|
||||
&self,
|
||||
chain_data: &AccountProviderView,
|
||||
decimals: u8,
|
||||
price: f64,
|
||||
path_warming_amounts: &Vec<u64>,
|
||||
) {
|
||||
let multiplier = 10u64.pow(decimals as u32) as f64;
|
||||
let amounts = path_warming_amounts
|
||||
.iter()
|
||||
.map(|amount| {
|
||||
let quantity_ui = *amount as f64 / price;
|
||||
let quantity_native = quantity_ui * multiplier;
|
||||
quantity_native.ceil() as u64
|
||||
})
|
||||
.collect_vec();
|
||||
|
||||
debug!(input_mint = %self.input_mint, pool = %self.key(), multiplier = multiplier, price = price, amounts = amounts.iter().join(";"), "price_data");
|
||||
|
||||
let overflow = amounts.iter().any(|x| *x == u64::MAX);
|
||||
if overflow {
|
||||
if self.state.read().unwrap().is_valid {
|
||||
debug!("amount error, disabling edge {}", self.desc());
|
||||
}
|
||||
|
||||
let mut state = self.state.write().unwrap();
|
||||
state.last_update = millis_since_epoch();
|
||||
state.last_update_slot = chain_data.newest_processed_slot();
|
||||
state.cached_prices.clear();
|
||||
state.is_valid = false;
|
||||
return;
|
||||
}
|
||||
|
||||
let prepared_quote = self.prepare(chain_data);
|
||||
|
||||
// do calculation for in amounts
|
||||
let quote_results_in = amounts
|
||||
.iter()
|
||||
.map(|&amount| match &prepared_quote {
|
||||
Ok(p) => (amount, self.quote(&p, chain_data, amount)),
|
||||
Err(e) => (
|
||||
amount,
|
||||
anyhow::Result::<Quote>::Err(anyhow::format_err!("{}", e)),
|
||||
),
|
||||
})
|
||||
.collect_vec();
|
||||
|
||||
if let Some((_, err)) = quote_results_in.iter().find(|v| v.1.is_err()) {
|
||||
if self.state.read().unwrap().is_valid {
|
||||
warn!("quote error, disabling edge: {} {err:?}", self.desc());
|
||||
} else {
|
||||
debug!("quote error: {} {err:?}", self.desc());
|
||||
}
|
||||
}
|
||||
|
||||
let mut state = self.state.write().unwrap();
|
||||
state.last_update = millis_since_epoch();
|
||||
state.last_update_slot = chain_data.newest_processed_slot();
|
||||
state.cached_prices.clear();
|
||||
state.is_valid = true;
|
||||
|
||||
if let Some(timestamp) = state.cooldown_until {
|
||||
if timestamp < state.last_update {
|
||||
state.cooldown_until = None;
|
||||
}
|
||||
};
|
||||
|
||||
let mut has_at_least_one_non_zero = false;
|
||||
for quote_result in quote_results_in {
|
||||
if let (in_amount, Ok(quote)) = quote_result {
|
||||
// quote.in_amount may be different from in_amount if edge refuse to swap enough
|
||||
// then we want to have "actual price" for expected in_amount and not for quote.in_amount
|
||||
let price = quote.out_amount as f64 / in_amount as f64;
|
||||
if price.is_nan() {
|
||||
state.is_valid = false;
|
||||
continue;
|
||||
}
|
||||
if price > 0.0000001 {
|
||||
has_at_least_one_non_zero = true;
|
||||
}
|
||||
// TODO: output == 0?!
|
||||
state.cached_prices.push((in_amount, price, f64::ln(price)));
|
||||
} else {
|
||||
// TODO: should a single quote failure really invalidate the whole edge?
|
||||
state.is_valid = false;
|
||||
};
|
||||
}
|
||||
|
||||
if !has_at_least_one_non_zero {
|
||||
state.is_valid = false;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update(
|
||||
&self,
|
||||
chain_data: &AccountProviderView,
|
||||
token_cache: &TokenCache,
|
||||
price_cache: &PriceCache,
|
||||
path_warming_amounts: &Vec<u64>,
|
||||
) {
|
||||
trace!(edge = self.desc(), "updating");
|
||||
|
||||
let Ok(decimals) = token_cache.token(self.input_mint).map(|x| x.decimals) else {
|
||||
let mut state = self.state.write().unwrap();
|
||||
trace!("no decimals for {}", self.input_mint);
|
||||
state.is_valid = false;
|
||||
return;
|
||||
};
|
||||
let Some(price) = price_cache.price_ui(self.input_mint) else {
|
||||
let mut state = self.state.write().unwrap();
|
||||
state.is_valid = false;
|
||||
trace!("no price for {}", self.input_mint);
|
||||
return;
|
||||
};
|
||||
|
||||
self.update_internal(chain_data, decimals, price, path_warming_amounts);
|
||||
}
|
||||
}
|
||||
|
||||
impl EdgeState {
|
||||
/// Returns the price (in native/native) and ln(price) most applicable for the in amount
|
||||
/// Returns None if invalid
|
||||
pub fn cached_price_for(&self, in_amount: u64) -> Option<(f64, f64)> {
|
||||
if !self.is_valid() || self.cached_prices.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let cached_price = self
|
||||
.cached_prices
|
||||
.iter()
|
||||
.find(|(cached_in_amount, _, _)| *cached_in_amount >= in_amount)
|
||||
.unwrap_or(&self.cached_prices.last().unwrap());
|
||||
Some((cached_price.1, cached_price.2))
|
||||
}
|
||||
|
||||
pub fn cached_price_exact_out_for(&self, out_amount: u64) -> Option<(f64, f64)> {
|
||||
if !self.is_valid_out() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let out_amount_f = out_amount as f64;
|
||||
let cached_price = self
|
||||
.cached_prices
|
||||
.iter()
|
||||
.find(|(cached_in_amount, p, _)| (*cached_in_amount as f64) * p >= out_amount_f)
|
||||
.unwrap_or(&self.cached_prices.last().unwrap());
|
||||
|
||||
// inverse price for exact out
|
||||
let price = 1.0 / cached_price.1;
|
||||
Some((price, f64::ln(price)))
|
||||
}
|
||||
|
||||
pub fn is_valid(&self) -> bool {
|
||||
if !self.is_valid {
|
||||
return false;
|
||||
}
|
||||
|
||||
if self.cooldown_until.is_some() {
|
||||
// Do not check time here !
|
||||
// We will reset "cooldown until" on first account update coming after cooldown
|
||||
// So if this is not reset yet, it means that we didn't change anything
|
||||
// No reason to be working again
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
pub fn is_valid_out(&self) -> bool {
|
||||
if !self.is_valid {
|
||||
return false;
|
||||
}
|
||||
|
||||
if self.cooldown_until.is_some() {
|
||||
// Do not check time here !
|
||||
// We will reset "cooldown until" on first account update coming after cooldown
|
||||
// So if this is not reset yet, it means that we didn't change anything
|
||||
// No reason to be working again
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
pub fn reset_cooldown(&mut self) {
|
||||
self.cooldown_event += 0;
|
||||
self.cooldown_until = None;
|
||||
}
|
||||
|
||||
pub fn add_cooldown(&mut self, duration: &Duration) {
|
||||
self.cooldown_event += 1;
|
||||
|
||||
let counter = min(self.cooldown_event, 5) as f64;
|
||||
let exp_factor = 1.2.pow(counter);
|
||||
let factor = (counter * exp_factor).round() as u64;
|
||||
let until = millis_since_epoch() + (duration.as_millis() as u64 * factor);
|
||||
|
||||
self.cooldown_until = match self.cooldown_until {
|
||||
None => Some(until),
|
||||
Some(current) => Some(current.max(until)),
|
||||
};
|
||||
}
|
||||
}
|
|
@ -0,0 +1,457 @@
|
|||
use crate::edge::Edge;
|
||||
use crate::metrics;
|
||||
use crate::token_cache::TokenCache;
|
||||
use crate::util::tokio_spawn;
|
||||
use anchor_spl::token::spl_token;
|
||||
use itertools::Itertools;
|
||||
use router_config_lib::Config;
|
||||
use router_feed_lib::get_program_account::FeedMetadata;
|
||||
use router_lib::dex::{AccountProviderView, DexSubscriptionMode};
|
||||
use router_lib::price_feeds::price_cache::PriceCache;
|
||||
use router_lib::price_feeds::price_feed::PriceUpdate;
|
||||
use solana_program::pubkey::Pubkey;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::sync::broadcast::error::RecvError;
|
||||
use tokio::task::JoinHandle;
|
||||
use tracing::{debug, error, info, trace, warn};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Dex {
|
||||
pub name: String,
|
||||
/// reference edges by the subscribed_pks so they can be updated on account change
|
||||
pub edges_per_pk: HashMap<Pubkey, Vec<Arc<Edge>>>,
|
||||
/// in case the program has too many accounts it could overload the rpc subscription
|
||||
/// it can be easier to subscribe to the program id directly
|
||||
pub subscription_mode: DexSubscriptionMode,
|
||||
}
|
||||
|
||||
impl Dex {
|
||||
pub fn _desc(&self) -> String {
|
||||
match &self.subscription_mode {
|
||||
DexSubscriptionMode::Disabled => {
|
||||
format!("Dex {} mode=Disabled", self.name)
|
||||
}
|
||||
DexSubscriptionMode::Accounts(subscribed_pks) => {
|
||||
format!("Dex {} mode=gMa #pks={}", self.name, subscribed_pks.len())
|
||||
}
|
||||
DexSubscriptionMode::Programs(subscribed_prgs) => format!(
|
||||
"Dex {} mode=gPa program_ids={:?}",
|
||||
self.name, subscribed_prgs
|
||||
),
|
||||
DexSubscriptionMode::Mixed(m) => format!(
|
||||
"Dex {} mode=mix #pks={} program_ids={:?}, tokens_for_owners={:?}",
|
||||
self.name,
|
||||
m.accounts.len(),
|
||||
m.programs,
|
||||
m.token_accounts_for_owner
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn edges(&self) -> Vec<Arc<Edge>> {
|
||||
let edges: Vec<Arc<Edge>> = self
|
||||
.edges_per_pk
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|x| x.1)
|
||||
.flatten()
|
||||
.sorted_by_key(|x| x.unique_id())
|
||||
.unique_by(|x| x.unique_id())
|
||||
.collect();
|
||||
edges
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct EdgeUpdaterState {
|
||||
pub is_ready: bool,
|
||||
pub latest_slot_pending: u64,
|
||||
pub latest_slot_processed: u64,
|
||||
pub slot_excessive_lagging_since: Option<Instant>,
|
||||
pub dirty_prices: bool,
|
||||
pub dirty_pools: HashSet<Pubkey>,
|
||||
pub dirty_programs: HashSet<Pubkey>,
|
||||
pub dirty_token_accounts_for_owners: bool,
|
||||
}
|
||||
|
||||
struct EdgeUpdater {
|
||||
dex: Dex,
|
||||
chain_data: AccountProviderView,
|
||||
token_cache: TokenCache,
|
||||
price_cache: PriceCache,
|
||||
ready_sender: async_channel::Sender<()>,
|
||||
register_mint_sender: async_channel::Sender<Pubkey>,
|
||||
state: EdgeUpdaterState,
|
||||
config: Config,
|
||||
path_warming_amounts: Vec<u64>,
|
||||
}
|
||||
|
||||
pub fn spawn_updater_job(
|
||||
dex: &Dex,
|
||||
config: &Config,
|
||||
chain_data: AccountProviderView,
|
||||
token_cache: TokenCache,
|
||||
price_cache: PriceCache,
|
||||
path_warming_amounts: Vec<u64>,
|
||||
register_mint_sender: async_channel::Sender<Pubkey>,
|
||||
ready_sender: async_channel::Sender<()>,
|
||||
mut slot_updates: broadcast::Receiver<u64>,
|
||||
mut account_updates: broadcast::Receiver<(Pubkey, u64)>,
|
||||
mut metadata_updates: broadcast::Receiver<FeedMetadata>,
|
||||
mut price_updates: broadcast::Receiver<PriceUpdate>,
|
||||
mut exit: broadcast::Receiver<()>,
|
||||
) -> Option<JoinHandle<()>> {
|
||||
let dex = dex.clone();
|
||||
|
||||
let config = config.clone();
|
||||
let edges = dex.edges();
|
||||
|
||||
match &dex.subscription_mode {
|
||||
DexSubscriptionMode::Accounts(x) => info!(
|
||||
dex_name = dex.name,
|
||||
accounts_count = x.len(),
|
||||
"subscribing to accounts"
|
||||
),
|
||||
DexSubscriptionMode::Programs(x) => info!(
|
||||
dex_name = dex.name,
|
||||
programs = x.iter().map(|p| p.to_string()).join(", "),
|
||||
"subscribing to programs"
|
||||
),
|
||||
DexSubscriptionMode::Disabled => {
|
||||
debug!(dex_name = dex.name, "disabled");
|
||||
let _ = ready_sender.try_send(());
|
||||
return None;
|
||||
}
|
||||
DexSubscriptionMode::Mixed(m) => info!(
|
||||
dex_name = dex.name,
|
||||
accounts_count = m.accounts.len(),
|
||||
programs = m.programs.iter().map(|p| p.to_string()).join(", "),
|
||||
token_accounts_for_owner = m
|
||||
.token_accounts_for_owner
|
||||
.iter()
|
||||
.map(|p| p.to_string())
|
||||
.join(", "),
|
||||
"subscribing to mixed mode"
|
||||
),
|
||||
};
|
||||
|
||||
let snapshot_timeout = Instant::now() + Duration::from_secs(60 * 5);
|
||||
let listener_job = tokio_spawn(format!("edge_updater_{}", dex.name).as_str(), async move {
|
||||
let mut updater = EdgeUpdater {
|
||||
dex,
|
||||
chain_data,
|
||||
token_cache,
|
||||
price_cache,
|
||||
register_mint_sender,
|
||||
ready_sender,
|
||||
config,
|
||||
state: EdgeUpdaterState {
|
||||
..EdgeUpdaterState::default()
|
||||
},
|
||||
path_warming_amounts,
|
||||
};
|
||||
|
||||
let mut refresh_all_interval = tokio::time::interval(Duration::from_secs(1));
|
||||
let mut refresh_one_interval = tokio::time::interval(Duration::from_millis(10));
|
||||
refresh_all_interval.tick().await;
|
||||
refresh_one_interval.tick().await;
|
||||
|
||||
'drain_loop: loop {
|
||||
tokio::select! {
|
||||
_ = exit.recv() => {
|
||||
info!("shutting down {} update task", updater.dex.name);
|
||||
break;
|
||||
}
|
||||
slot = slot_updates.recv() => {
|
||||
updater.detect_and_handle_slot_lag(slot);
|
||||
}
|
||||
res = metadata_updates.recv() => {
|
||||
updater.on_metadata_update(res);
|
||||
}
|
||||
res = account_updates.recv() => {
|
||||
if !updater.invalidate_one(res) {
|
||||
break 'drain_loop;
|
||||
}
|
||||
|
||||
let mut batchsize: u32 = 0;
|
||||
let started_at = Instant::now();
|
||||
'batch_loop: while let Ok(res) = account_updates.try_recv() {
|
||||
batchsize += 1;
|
||||
if !updater.invalidate_one(Ok(res)) {
|
||||
break 'drain_loop;
|
||||
}
|
||||
|
||||
// budget for microbatch
|
||||
if batchsize > 10 || started_at.elapsed() > Duration::from_micros(500) {
|
||||
break 'batch_loop;
|
||||
}
|
||||
}
|
||||
|
||||
},
|
||||
Ok(_) = price_updates.recv() => {
|
||||
updater.state.dirty_prices = true;
|
||||
},
|
||||
_ = refresh_all_interval.tick() => {
|
||||
updater.refresh_all(&edges);
|
||||
|
||||
if !updater.state.is_ready && snapshot_timeout < Instant::now() {
|
||||
error!("Failed to init '{}' before timeout", updater.dex.name);
|
||||
break;
|
||||
}
|
||||
}
|
||||
_ = refresh_one_interval.tick() => {
|
||||
updater.refresh_some();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
error!("Edge updater {} job exited..", updater.dex.name);
|
||||
// send this to unblock the code in front of the exit handler
|
||||
let _ = updater.ready_sender.try_send(());
|
||||
});
|
||||
|
||||
Some(listener_job)
|
||||
}
|
||||
|
||||
impl EdgeUpdater {
|
||||
fn detect_and_handle_slot_lag(&mut self, slot: Result<u64, RecvError>) {
|
||||
let state = &mut self.state;
|
||||
if state.latest_slot_processed == 0 {
|
||||
return;
|
||||
}
|
||||
if let Ok(slot) = slot {
|
||||
let lag = slot as i64 - state.latest_slot_processed as i64;
|
||||
if lag <= 0 {
|
||||
return;
|
||||
}
|
||||
debug!(
|
||||
state.latest_slot_processed,
|
||||
state.latest_slot_pending, slot, lag, self.dex.name, "metrics"
|
||||
);
|
||||
|
||||
metrics::GRPC_TO_EDGE_SLOT_LAG
|
||||
.with_label_values(&[&self.dex.name])
|
||||
.set(lag);
|
||||
|
||||
let max_lag = self.config.routing.slot_excessive_lag.unwrap_or(300);
|
||||
let max_lag_duration = Duration::from_secs(
|
||||
self.config
|
||||
.routing
|
||||
.slot_excessive_lag_max_duration_secs
|
||||
.unwrap_or(60),
|
||||
);
|
||||
|
||||
if lag as u64 >= max_lag {
|
||||
match state.slot_excessive_lagging_since {
|
||||
None => state.slot_excessive_lagging_since = Some(Instant::now()),
|
||||
Some(since) => {
|
||||
if since.elapsed() > max_lag_duration {
|
||||
panic!(
|
||||
"Lagging a lot {} for more than {}s, exiting..",
|
||||
lag,
|
||||
max_lag_duration.as_secs()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// called once after startup
|
||||
#[tracing::instrument(skip_all, level = "trace")]
|
||||
fn on_ready(&self) {
|
||||
let mut mints = HashSet::new();
|
||||
for edge in self.dex.edges() {
|
||||
mints.insert(edge.input_mint);
|
||||
mints.insert(edge.output_mint);
|
||||
}
|
||||
|
||||
info!(
|
||||
"Received all accounts needed for {} [mints count={}]",
|
||||
self.dex.name,
|
||||
mints.len()
|
||||
);
|
||||
|
||||
for mint in mints {
|
||||
match self.register_mint_sender.try_send(mint) {
|
||||
Ok(_) => {}
|
||||
Err(_) => warn!("Failed to register mint '{}' for price update", mint),
|
||||
}
|
||||
}
|
||||
|
||||
let _ = self.ready_sender.try_send(());
|
||||
}
|
||||
|
||||
fn on_metadata_update(&mut self, res: Result<FeedMetadata, RecvError>) {
|
||||
let state = &mut self.state;
|
||||
match res {
|
||||
Ok(v) => match v {
|
||||
FeedMetadata::InvalidAccount(key) => {
|
||||
state.dirty_pools.insert(key);
|
||||
self.check_readiness();
|
||||
}
|
||||
FeedMetadata::SnapshotStart(_) => {}
|
||||
FeedMetadata::SnapshotEnd(x) => {
|
||||
if let Some(x) = x {
|
||||
if x == spl_token::ID {
|
||||
// TODO Handle multiples owners
|
||||
state.dirty_token_accounts_for_owners = true;
|
||||
} else {
|
||||
state.dirty_programs.insert(x);
|
||||
}
|
||||
self.check_readiness();
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Error on metadata update channel in {} update task {:?}",
|
||||
self.dex.name, e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn invalidate_one(&mut self, res: Result<(Pubkey, u64), RecvError>) -> bool {
|
||||
let state = &mut self.state;
|
||||
let (pk, slot) = match res {
|
||||
Ok(v) => v,
|
||||
Err(broadcast::error::RecvError::Closed) => {
|
||||
error!("account update channel closed unexpectedly");
|
||||
return false;
|
||||
}
|
||||
Err(broadcast::error::RecvError::Lagged(n)) => {
|
||||
warn!(
|
||||
"lagged {n} on account update channel in {} update task",
|
||||
self.dex.name
|
||||
);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
state.dirty_pools.insert(pk);
|
||||
state.latest_slot_pending = slot;
|
||||
|
||||
self.check_readiness();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
fn check_readiness(&mut self) {
|
||||
let state = &mut self.state;
|
||||
|
||||
if state.is_ready {
|
||||
return;
|
||||
}
|
||||
|
||||
match &self.dex.subscription_mode {
|
||||
DexSubscriptionMode::Accounts(accounts) => {
|
||||
state.is_ready = state.dirty_pools.is_superset(&accounts);
|
||||
}
|
||||
DexSubscriptionMode::Disabled => {}
|
||||
DexSubscriptionMode::Programs(programs) => {
|
||||
state.is_ready = state.dirty_programs.is_superset(&programs);
|
||||
}
|
||||
DexSubscriptionMode::Mixed(m) => {
|
||||
state.is_ready = state.dirty_pools.is_superset(&m.accounts)
|
||||
&& state.dirty_programs.is_superset(&m.programs)
|
||||
&& (state.dirty_token_accounts_for_owners
|
||||
|| m.token_accounts_for_owner.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
if state.is_ready {
|
||||
self.on_ready();
|
||||
}
|
||||
}
|
||||
|
||||
fn refresh_some(&mut self) {
|
||||
let state = &mut self.state;
|
||||
if state.dirty_pools.is_empty() || !state.is_ready {
|
||||
return;
|
||||
}
|
||||
|
||||
let started_at = Instant::now();
|
||||
let mut refreshed_edges = HashSet::new();
|
||||
|
||||
for pk in state.dirty_pools.iter() {
|
||||
let Some(edges_for_pk) = self.dex.edges_per_pk.get(&pk) else {
|
||||
// TODO Is that a new market/pool ?
|
||||
// Could have two list
|
||||
// - edges_per_pk
|
||||
// - ignored_pk
|
||||
// To check if that's a new pk (coming from gPa subscription)
|
||||
// And react accordingly (add to either of the two)
|
||||
continue;
|
||||
};
|
||||
|
||||
trace!(
|
||||
"- Updating {} slot={}",
|
||||
pk,
|
||||
self.chain_data.account(pk).unwrap().slot
|
||||
);
|
||||
|
||||
for edge in edges_for_pk {
|
||||
if !refreshed_edges.insert(edge.unique_id()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
edge.update(
|
||||
&self.chain_data,
|
||||
&self.token_cache,
|
||||
&self.price_cache,
|
||||
&self.path_warming_amounts,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
state.latest_slot_processed = state.latest_slot_pending;
|
||||
state.dirty_pools.clear();
|
||||
|
||||
if started_at.elapsed() > Duration::from_millis(100) {
|
||||
info!(
|
||||
"{} - refresh {} - took - {:?}",
|
||||
self.dex.name,
|
||||
refreshed_edges.len(),
|
||||
started_at.elapsed()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// called once when startup is completed
|
||||
#[tracing::instrument(skip_all, level = "trace")]
|
||||
fn refresh_all(&mut self, edges: &Vec<Arc<Edge>>) {
|
||||
let state = &mut self.state;
|
||||
if !state.dirty_prices || !state.is_ready {
|
||||
return;
|
||||
}
|
||||
|
||||
let started_at = Instant::now();
|
||||
|
||||
for edge in edges {
|
||||
edge.update(
|
||||
&self.chain_data,
|
||||
&self.token_cache,
|
||||
&self.price_cache,
|
||||
&self.path_warming_amounts,
|
||||
);
|
||||
}
|
||||
|
||||
state.latest_slot_processed = state.latest_slot_pending;
|
||||
state.dirty_prices = false;
|
||||
state.dirty_pools.clear();
|
||||
|
||||
if started_at.elapsed() > Duration::from_millis(100) {
|
||||
info!(
|
||||
"{} - refresh_all - took - {:?}",
|
||||
self.dex.name,
|
||||
started_at.elapsed()
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,122 @@
|
|||
use crate::debug_tools;
|
||||
use router_config_lib::HotMintsConfig;
|
||||
use solana_program::pubkey::Pubkey;
|
||||
use std::collections::{HashSet, VecDeque};
|
||||
use std::str::FromStr;
|
||||
use tracing::info;
|
||||
|
||||
pub struct HotMintsCache {
|
||||
max_count: usize,
|
||||
always_hot: HashSet<Pubkey>,
|
||||
latest_unordered: HashSet<Pubkey>,
|
||||
latest_ordered: VecDeque<Pubkey>,
|
||||
}
|
||||
|
||||
impl HotMintsCache {
|
||||
pub fn new(config: &Option<HotMintsConfig>) -> Self {
|
||||
let config = config.clone().unwrap_or(HotMintsConfig {
|
||||
always_hot_mints: vec![
|
||||
"So11111111111111111111111111111111111111112".to_string(),
|
||||
"EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v".to_string(),
|
||||
"Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB".to_string(),
|
||||
],
|
||||
keep_latest_count: 100,
|
||||
});
|
||||
|
||||
HotMintsCache {
|
||||
max_count: config.keep_latest_count,
|
||||
always_hot: config
|
||||
.always_hot_mints
|
||||
.iter()
|
||||
.map(|x| Pubkey::from_str(x).unwrap())
|
||||
.collect(),
|
||||
latest_unordered: Default::default(),
|
||||
latest_ordered: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add(&mut self, pubkey: Pubkey) {
|
||||
if self.always_hot.contains(&pubkey) {
|
||||
return;
|
||||
}
|
||||
|
||||
if self.latest_unordered.contains(&pubkey) {
|
||||
let position = self
|
||||
.latest_ordered
|
||||
.iter()
|
||||
.position(|x| *x == pubkey)
|
||||
.unwrap();
|
||||
self.latest_ordered.remove(position);
|
||||
} else if self.latest_unordered.len() >= self.max_count {
|
||||
let oldest = self.latest_ordered.pop_back().unwrap();
|
||||
self.latest_unordered.remove(&oldest);
|
||||
info!("Removing {} from hot mints", debug_tools::name(&oldest));
|
||||
}
|
||||
|
||||
if self.latest_unordered.insert(pubkey) {
|
||||
info!("Adding {} to hot mints", debug_tools::name(&pubkey));
|
||||
}
|
||||
self.latest_ordered.push_front(pubkey);
|
||||
return;
|
||||
}
|
||||
|
||||
pub fn get(&self) -> HashSet<Pubkey> {
|
||||
self.latest_unordered
|
||||
.union(&self.always_hot)
|
||||
.copied()
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::hot_mints::HotMintsCache;
|
||||
use itertools::Itertools;
|
||||
use router_config_lib::HotMintsConfig;
|
||||
use solana_program::pubkey::Pubkey;
|
||||
use std::collections::HashSet;
|
||||
use std::str::FromStr;
|
||||
|
||||
#[test]
|
||||
pub fn should_keep_hottest_in_list() {
|
||||
let jito = Pubkey::from_str("J1toso1uCk3RLmjorhTtrVwY9HJ7X8V9yYac6Y7kGCPn").unwrap();
|
||||
let tokens = (0..10).map(|_| Pubkey::new_unique()).collect_vec();
|
||||
|
||||
let mut cache = HotMintsCache::new(&Some(HotMintsConfig {
|
||||
always_hot_mints: vec![jito.to_string()],
|
||||
keep_latest_count: 3,
|
||||
}));
|
||||
|
||||
assert_eq!(cache.get().len(), 1);
|
||||
assert_eq!(cache.get(), HashSet::from([jito]));
|
||||
|
||||
cache.add(tokens[0]);
|
||||
cache.add(tokens[1]);
|
||||
cache.add(tokens[2]);
|
||||
cache.add(tokens[1]);
|
||||
cache.add(tokens[1]);
|
||||
cache.add(tokens[2]);
|
||||
|
||||
assert_eq!(cache.get().len(), 4);
|
||||
assert_eq!(
|
||||
cache.get(),
|
||||
HashSet::from([jito, tokens[0], tokens[1], tokens[2]])
|
||||
);
|
||||
|
||||
cache.add(tokens[3]);
|
||||
|
||||
assert_eq!(cache.get().len(), 4);
|
||||
assert_eq!(
|
||||
cache.get(),
|
||||
HashSet::from([jito, tokens[1], tokens[2], tokens[3]])
|
||||
);
|
||||
|
||||
cache.add(jito);
|
||||
|
||||
assert_eq!(cache.get().len(), 4);
|
||||
assert_eq!(
|
||||
cache.get(),
|
||||
HashSet::from([jito, tokens[1], tokens[2], tokens[3]])
|
||||
);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,563 @@
|
|||
use crate::routing_types::{Route, RouteStep};
|
||||
use crate::swap::Swap;
|
||||
use anchor_lang::Id;
|
||||
use anchor_spl::associated_token::get_associated_token_address;
|
||||
use anchor_spl::token::Token;
|
||||
use autobahn_executor::swap_ix::generate_swap_ix_data;
|
||||
use router_lib::dex::{AccountProviderView, SwapInstruction, SwapMode};
|
||||
use solana_program::instruction::Instruction;
|
||||
use solana_program::pubkey::Pubkey;
|
||||
use std::str::FromStr;
|
||||
|
||||
const CU_PER_HOP_DEFAULT: u32 = 75_000;
|
||||
const CU_BASE: u32 = 50_000;
|
||||
|
||||
pub trait SwapStepInstructionBuilder {
|
||||
fn build_ix(
|
||||
&self,
|
||||
wallet_pk: &Pubkey,
|
||||
step: &RouteStep,
|
||||
max_slippage_bps: i32,
|
||||
swap_mode: SwapMode,
|
||||
other_amount: u64,
|
||||
) -> anyhow::Result<SwapInstruction>; // TODO handle multi hop from same edge ?
|
||||
}
|
||||
|
||||
pub trait SwapInstructionsBuilder {
|
||||
fn build_ixs(
|
||||
&self,
|
||||
wallet_pk: &Pubkey,
|
||||
route: &Route,
|
||||
wrap_and_unwrap_sol: bool,
|
||||
auto_create_out: bool,
|
||||
max_slippage_bps: i32,
|
||||
other_amount_threshold: u64,
|
||||
swap_mode: SwapMode,
|
||||
) -> anyhow::Result<Swap>;
|
||||
}
|
||||
|
||||
pub struct SwapStepInstructionBuilderImpl {
|
||||
pub chain_data: AccountProviderView,
|
||||
}
|
||||
|
||||
impl SwapStepInstructionBuilder for SwapStepInstructionBuilderImpl {
|
||||
fn build_ix(
|
||||
&self,
|
||||
wallet_pk: &Pubkey,
|
||||
step: &RouteStep,
|
||||
max_slippage_bps: i32,
|
||||
swap_mode: SwapMode,
|
||||
other_amount: u64,
|
||||
) -> anyhow::Result<SwapInstruction> {
|
||||
let in_amount = match swap_mode {
|
||||
SwapMode::ExactIn => step.in_amount,
|
||||
SwapMode::ExactOut => other_amount,
|
||||
};
|
||||
|
||||
step.edge.build_swap_ix(
|
||||
&self.chain_data,
|
||||
wallet_pk,
|
||||
in_amount,
|
||||
step.out_amount,
|
||||
max_slippage_bps,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SwapInstructionsBuilderImpl<T: SwapStepInstructionBuilder> {
|
||||
ix_builder: T,
|
||||
router_version: u8,
|
||||
}
|
||||
|
||||
impl<T: SwapStepInstructionBuilder> SwapInstructionsBuilderImpl<T> {
|
||||
pub fn new(ix_builder: T, router_version: u8) -> SwapInstructionsBuilderImpl<T> {
|
||||
Self {
|
||||
ix_builder,
|
||||
router_version,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: SwapStepInstructionBuilder> SwapInstructionsBuilder for SwapInstructionsBuilderImpl<T> {
|
||||
fn build_ixs(
|
||||
&self,
|
||||
wallet_pk: &Pubkey,
|
||||
route: &Route,
|
||||
auto_wrap_sol: bool,
|
||||
auto_create_out: bool,
|
||||
max_slippage_bps: i32,
|
||||
other_amount_threshold: u64,
|
||||
swap_mode: SwapMode,
|
||||
) -> anyhow::Result<Swap> {
|
||||
if route.steps.len() == 0 {
|
||||
anyhow::bail!("Can't generate instructions for empty route");
|
||||
}
|
||||
|
||||
let mut setup_instructions = vec![];
|
||||
let mut cleanup_instructions = vec![];
|
||||
|
||||
let exec_program_id: Pubkey =
|
||||
Pubkey::from_str("EXECM4wjzdCnrtQjHx5hy1r5k31tdvWBPYbqsjSoPfAh").unwrap();
|
||||
let sol_mint: Pubkey =
|
||||
Pubkey::from_str("So11111111111111111111111111111111111111112").unwrap();
|
||||
|
||||
if auto_wrap_sol && route.input_mint == sol_mint {
|
||||
Self::create_ata(&wallet_pk, &mut setup_instructions, &sol_mint);
|
||||
let wsol_account = get_associated_token_address(wallet_pk, &sol_mint);
|
||||
|
||||
let in_amount = match swap_mode {
|
||||
SwapMode::ExactIn => route.in_amount,
|
||||
SwapMode::ExactOut => other_amount_threshold,
|
||||
};
|
||||
|
||||
setup_instructions.push(solana_program::system_instruction::transfer(
|
||||
&wallet_pk,
|
||||
&wsol_account,
|
||||
in_amount,
|
||||
));
|
||||
setup_instructions.push(anchor_spl::token::spl_token::instruction::sync_native(
|
||||
&Token::id(),
|
||||
&wsol_account,
|
||||
)?);
|
||||
|
||||
Self::close_wsol_ata(&wallet_pk, &mut cleanup_instructions, &wsol_account)?;
|
||||
}
|
||||
|
||||
// We don't really care about Orca/Raydium/Openbook min out amount
|
||||
// since we are checking it at the end of execution anyway
|
||||
// .. and it prevent using the "overquote" heuristic
|
||||
let max_slippage_for_hop_bps = max_slippage_bps * 2;
|
||||
|
||||
let swap_instructions = route
|
||||
.steps
|
||||
.iter()
|
||||
.map(|x| {
|
||||
self.ix_builder.build_ix(
|
||||
wallet_pk,
|
||||
x,
|
||||
max_slippage_for_hop_bps,
|
||||
swap_mode,
|
||||
other_amount_threshold,
|
||||
)
|
||||
})
|
||||
.collect::<anyhow::Result<Vec<SwapInstruction>>>()?;
|
||||
|
||||
let mut cu_estimate = CU_BASE;
|
||||
|
||||
for step in &swap_instructions {
|
||||
if auto_create_out || (step.out_mint == sol_mint && auto_wrap_sol) {
|
||||
Self::create_ata(&wallet_pk, &mut setup_instructions, &step.out_mint);
|
||||
cu_estimate += 5000;
|
||||
}
|
||||
|
||||
if step.out_mint == sol_mint && auto_wrap_sol {
|
||||
let wsol_account = get_associated_token_address(wallet_pk, &sol_mint);
|
||||
Self::close_wsol_ata(&wallet_pk, &mut cleanup_instructions, &wsol_account)?;
|
||||
cu_estimate += 5000;
|
||||
}
|
||||
|
||||
cu_estimate += step.cu_estimate.unwrap_or(CU_PER_HOP_DEFAULT);
|
||||
}
|
||||
|
||||
let (instructions, in_out): (Vec<_>, Vec<_>) = swap_instructions
|
||||
.into_iter()
|
||||
.map(|x| (x.instruction, (x.in_amount_offset, x.out_pubkey)))
|
||||
.unzip();
|
||||
let (in_amount_offsets, out_account_pubkeys): (Vec<_>, Vec<_>) = in_out.into_iter().unzip();
|
||||
|
||||
let min_out_amount = match swap_mode {
|
||||
SwapMode::ExactIn => other_amount_threshold,
|
||||
SwapMode::ExactOut => route.out_amount,
|
||||
};
|
||||
|
||||
let swap_instruction = generate_swap_ix_data(
|
||||
min_out_amount,
|
||||
instructions.as_slice(),
|
||||
in_amount_offsets.as_slice(),
|
||||
get_associated_token_address(&wallet_pk, &route.input_mint),
|
||||
out_account_pubkeys.as_slice(),
|
||||
exec_program_id,
|
||||
self.router_version,
|
||||
);
|
||||
|
||||
Ok(Swap {
|
||||
setup_instructions,
|
||||
swap_instruction,
|
||||
cleanup_instructions,
|
||||
cu_estimate,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: SwapStepInstructionBuilder> SwapInstructionsBuilderImpl<T> {
|
||||
fn close_wsol_ata(
|
||||
wallet_pk: &&Pubkey,
|
||||
cleanup_instructions: &mut Vec<Instruction>,
|
||||
wsol_account: &Pubkey,
|
||||
) -> anyhow::Result<()> {
|
||||
cleanup_instructions.push(anchor_spl::token::spl_token::instruction::close_account(
|
||||
&Token::id(),
|
||||
&wsol_account,
|
||||
&wallet_pk,
|
||||
&wallet_pk,
|
||||
&[&wallet_pk],
|
||||
)?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_ata(wallet_pk: &&Pubkey, setup_instructions: &mut Vec<Instruction>, mint: &Pubkey) {
|
||||
setup_instructions.push(
|
||||
spl_associated_token_account::instruction::create_associated_token_account_idempotent(
|
||||
&wallet_pk,
|
||||
&wallet_pk,
|
||||
&mint,
|
||||
&Token::id(),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::edge::Edge;
|
||||
use crate::test_utils::*;
|
||||
use router_feed_lib::router_rpc_client::RouterRpcClient;
|
||||
use router_lib::dex::{
|
||||
AccountProviderView, DexEdge, DexEdgeIdentifier, DexInterface, DexSubscriptionMode, Quote,
|
||||
};
|
||||
use std::any::Any;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
use test_case::test_case;
|
||||
|
||||
struct MockSwapStepInstructionBuilder {}
|
||||
struct MockDex {}
|
||||
struct MockId {}
|
||||
|
||||
impl DexEdgeIdentifier for MockId {
|
||||
fn key(&self) -> Pubkey {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn desc(&self) -> String {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn input_mint(&self) -> Pubkey {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn output_mint(&self) -> Pubkey {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn accounts_needed(&self) -> usize {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl DexInterface for MockDex {
|
||||
async fn initialize(
|
||||
_rpc: &mut RouterRpcClient,
|
||||
_options: HashMap<String, String>,
|
||||
) -> anyhow::Result<Arc<dyn DexInterface>>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn subscription_mode(&self) -> DexSubscriptionMode {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn edges_per_pk(&self) -> HashMap<Pubkey, Vec<Arc<dyn DexEdgeIdentifier>>> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn program_ids(&self) -> HashSet<Pubkey> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn load(
|
||||
&self,
|
||||
_id: &Arc<dyn DexEdgeIdentifier>,
|
||||
_chain_data: &AccountProviderView,
|
||||
) -> anyhow::Result<Arc<dyn DexEdge>> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn quote(
|
||||
&self,
|
||||
_id: &Arc<dyn DexEdgeIdentifier>,
|
||||
_edge: &Arc<dyn DexEdge>,
|
||||
_chain_data: &AccountProviderView,
|
||||
_in_amount: u64,
|
||||
) -> anyhow::Result<Quote> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn build_swap_ix(
|
||||
&self,
|
||||
_id: &Arc<dyn DexEdgeIdentifier>,
|
||||
_chain_data: &AccountProviderView,
|
||||
_wallet_pk: &Pubkey,
|
||||
_in_amount: u64,
|
||||
_out_amount: u64,
|
||||
_max_slippage_bps: i32,
|
||||
) -> anyhow::Result<SwapInstruction> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn supports_exact_out(&self, _id: &Arc<dyn DexEdgeIdentifier>) -> bool {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn quote_exact_out(
|
||||
&self,
|
||||
_id: &Arc<dyn DexEdgeIdentifier>,
|
||||
_edge: &Arc<dyn DexEdge>,
|
||||
_chain_data: &AccountProviderView,
|
||||
_out_amount: u64,
|
||||
) -> anyhow::Result<Quote> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl SwapStepInstructionBuilder for MockSwapStepInstructionBuilder {
|
||||
fn build_ix(
|
||||
&self,
|
||||
_wallet_pk: &Pubkey,
|
||||
step: &RouteStep,
|
||||
_max_slippage_bps: i32,
|
||||
_swap_mode: SwapMode,
|
||||
_other_amount: u64,
|
||||
) -> anyhow::Result<SwapInstruction> {
|
||||
Ok(SwapInstruction {
|
||||
instruction: Instruction {
|
||||
program_id: Default::default(),
|
||||
accounts: vec![],
|
||||
data: vec![],
|
||||
},
|
||||
out_pubkey: Default::default(),
|
||||
out_mint: step.edge.output_mint,
|
||||
in_amount_offset: 0,
|
||||
cu_estimate: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_fail_if_there_is_no_step() {
|
||||
let builder = SwapInstructionsBuilderImpl::new(MockSwapStepInstructionBuilder {}, 0);
|
||||
let wallet = 0.to_pubkey();
|
||||
|
||||
let ixs = builder.build_ixs(
|
||||
&wallet,
|
||||
&Route {
|
||||
input_mint: 1.to_pubkey(),
|
||||
output_mint: 2.to_pubkey(),
|
||||
in_amount: 1000,
|
||||
out_amount: 2000,
|
||||
price_impact_bps: 0,
|
||||
steps: vec![],
|
||||
slot: 0,
|
||||
accounts: None,
|
||||
},
|
||||
false,
|
||||
false,
|
||||
0,
|
||||
0,
|
||||
SwapMode::ExactIn,
|
||||
);
|
||||
|
||||
assert!(ixs.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_fail_if_there_is_no_step_exact_out() {
|
||||
let builder = SwapInstructionsBuilderImpl::new(MockSwapStepInstructionBuilder {}, 0);
|
||||
let wallet = 0.to_pubkey();
|
||||
|
||||
let ixs = builder.build_ixs(
|
||||
&wallet,
|
||||
&Route {
|
||||
input_mint: 1.to_pubkey(),
|
||||
output_mint: 2.to_pubkey(),
|
||||
in_amount: 1000,
|
||||
out_amount: 2000,
|
||||
price_impact_bps: 0,
|
||||
steps: vec![],
|
||||
slot: 0,
|
||||
accounts: None,
|
||||
},
|
||||
false,
|
||||
false,
|
||||
0,
|
||||
0,
|
||||
SwapMode::ExactOut,
|
||||
);
|
||||
|
||||
assert!(ixs.is_err());
|
||||
}
|
||||
|
||||
#[test_case(true, false, false, 3, 1 ; "when in is SOL")]
|
||||
#[test_case(false, true, false, 1, 1 ; "when out is SOL")]
|
||||
#[test_case(false, false, false, 0, 0 ; "when none is SOL")]
|
||||
#[test_case(true, false, true, 3, 1 ; "when in is SOL exact out")]
|
||||
#[test_case(false, true, true, 1, 1 ; "when out is SOL exact out")]
|
||||
#[test_case(false, false, true, 0, 0 ; "when none is SOL exact out")]
|
||||
fn should_add_wrapping_unwrapping_ix(
|
||||
in_mint_is_sol: bool,
|
||||
out_mint_is_sol: bool,
|
||||
is_exactout: bool,
|
||||
expected_setup_len: usize,
|
||||
expected_cleanup_len: usize,
|
||||
) {
|
||||
let builder = SwapInstructionsBuilderImpl::new(MockSwapStepInstructionBuilder {}, 0);
|
||||
let wallet = 0.to_pubkey();
|
||||
let sol = Pubkey::from_str("So11111111111111111111111111111111111111112").unwrap();
|
||||
|
||||
let in_mint = if in_mint_is_sol { sol } else { 1.to_pubkey() };
|
||||
let out_mint = if out_mint_is_sol { sol } else { 2.to_pubkey() };
|
||||
|
||||
let swap_mode = if is_exactout {
|
||||
SwapMode::ExactOut
|
||||
} else {
|
||||
SwapMode::ExactIn
|
||||
};
|
||||
|
||||
let ixs = builder
|
||||
.build_ixs(
|
||||
&wallet,
|
||||
&Route {
|
||||
input_mint: in_mint,
|
||||
output_mint: out_mint,
|
||||
in_amount: 1000,
|
||||
out_amount: 2000,
|
||||
price_impact_bps: 0,
|
||||
slot: 0,
|
||||
accounts: None,
|
||||
steps: vec![RouteStep {
|
||||
edge: Arc::new(Edge {
|
||||
input_mint: in_mint,
|
||||
output_mint: out_mint,
|
||||
dex: Arc::new(MockDex {}),
|
||||
id: Arc::new(MockId {}),
|
||||
accounts_needed: 1,
|
||||
state: Default::default(),
|
||||
}),
|
||||
in_amount: 1000,
|
||||
out_amount: 2000,
|
||||
fee_amount: 0,
|
||||
fee_mint: Default::default(),
|
||||
}],
|
||||
},
|
||||
true,
|
||||
false,
|
||||
0,
|
||||
0,
|
||||
swap_mode,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(ixs.setup_instructions.len(), expected_setup_len);
|
||||
assert_eq!(ixs.cleanup_instructions.len(), expected_cleanup_len);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_build_ixs() {
|
||||
let builder = SwapInstructionsBuilderImpl::new(MockSwapStepInstructionBuilder {}, 0);
|
||||
let wallet = 0.to_pubkey();
|
||||
|
||||
let ixs = builder
|
||||
.build_ixs(
|
||||
&wallet,
|
||||
&Route {
|
||||
input_mint: 1.to_pubkey(),
|
||||
output_mint: 2.to_pubkey(),
|
||||
in_amount: 1000,
|
||||
out_amount: 2000,
|
||||
price_impact_bps: 0,
|
||||
slot: 0,
|
||||
accounts: None,
|
||||
steps: vec![RouteStep {
|
||||
edge: Arc::new(Edge {
|
||||
input_mint: 1.to_pubkey(),
|
||||
output_mint: 2.to_pubkey(),
|
||||
accounts_needed: 1,
|
||||
dex: Arc::new(MockDex {}),
|
||||
id: Arc::new(MockId {}),
|
||||
state: Default::default(),
|
||||
}),
|
||||
in_amount: 1000,
|
||||
out_amount: 2000,
|
||||
fee_amount: 0,
|
||||
fee_mint: Default::default(),
|
||||
}],
|
||||
},
|
||||
false,
|
||||
false,
|
||||
0,
|
||||
0,
|
||||
SwapMode::ExactIn,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(0, ixs.setup_instructions.len());
|
||||
assert_eq!(0, ixs.cleanup_instructions.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_build_ixs_exact_out() {
|
||||
let builder = SwapInstructionsBuilderImpl::new(MockSwapStepInstructionBuilder {}, 0);
|
||||
let wallet = 0.to_pubkey();
|
||||
|
||||
let ixs = builder
|
||||
.build_ixs(
|
||||
&wallet,
|
||||
&Route {
|
||||
input_mint: 1.to_pubkey(),
|
||||
output_mint: 2.to_pubkey(),
|
||||
in_amount: 1000,
|
||||
out_amount: 2000,
|
||||
price_impact_bps: 0,
|
||||
slot: 0,
|
||||
accounts: None,
|
||||
steps: vec![RouteStep {
|
||||
edge: Arc::new(Edge {
|
||||
input_mint: 1.to_pubkey(),
|
||||
output_mint: 2.to_pubkey(),
|
||||
accounts_needed: 1,
|
||||
dex: Arc::new(MockDex {}),
|
||||
id: Arc::new(MockId {}),
|
||||
state: Default::default(),
|
||||
}),
|
||||
in_amount: 1000,
|
||||
out_amount: 2000,
|
||||
fee_amount: 0,
|
||||
fee_mint: Default::default(),
|
||||
}],
|
||||
},
|
||||
false,
|
||||
false,
|
||||
0,
|
||||
0,
|
||||
SwapMode::ExactOut,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(0, ixs.setup_instructions.len());
|
||||
assert_eq!(0, ixs.cleanup_instructions.len());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
pub mod metrics;
|
||||
pub mod source;
|
|
@ -0,0 +1,675 @@
|
|||
use crate::edge_updater::{spawn_updater_job, Dex};
|
||||
use crate::ix_builder::{SwapInstructionsBuilderImpl, SwapStepInstructionBuilderImpl};
|
||||
use crate::path_warmer::spawn_path_warmer_job;
|
||||
use itertools::chain;
|
||||
use mango_feeds_connector::chain_data::ChainData;
|
||||
use mango_feeds_connector::SlotUpdate;
|
||||
use prelude::*;
|
||||
use router_lib::price_feeds::price_cache::PriceCache;
|
||||
use router_lib::price_feeds::price_feed::PriceFeed;
|
||||
use solana_client::nonblocking::rpc_client::RpcClient;
|
||||
use solana_client::rpc_client::RpcClient as BlockingRpcClient;
|
||||
use solana_sdk::commitment_config::CommitmentConfig;
|
||||
use source::geyser;
|
||||
use std::env;
|
||||
use std::process::exit;
|
||||
use std::sync::RwLockWriteGuard;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::task::JoinHandle;
|
||||
|
||||
use crate::hot_mints::HotMintsCache;
|
||||
use crate::prometheus_sync::PrometheusSync;
|
||||
use crate::routing::Routing;
|
||||
use crate::server::alt_provider::RpcAltProvider;
|
||||
use crate::server::hash_provider::RpcHashProvider;
|
||||
use crate::server::http_server::HttpServer;
|
||||
use crate::server::live_account_provider::LiveAccountProvider;
|
||||
use crate::server::route_provider::RoutingRouteProvider;
|
||||
use crate::source::mint_accounts_source::{request_mint_metadata, Token};
|
||||
use crate::token_cache::{Decimals, TokenCache};
|
||||
use crate::tx_watcher::spawn_tx_watcher_jobs;
|
||||
use crate::util::tokio_spawn;
|
||||
use dex_orca::OrcaDex;
|
||||
use router_config_lib::{string_or_env, AccountDataSourceConfig, Config};
|
||||
use router_feed_lib::account_write::{AccountOrSnapshotUpdate, AccountWrite};
|
||||
use router_feed_lib::get_program_account::FeedMetadata;
|
||||
use router_feed_lib::router_rpc_client::RouterRpcClient;
|
||||
use router_feed_lib::router_rpc_wrapper::RouterRpcWrapper;
|
||||
use router_lib::chain_data::ChainDataArcRw;
|
||||
use router_lib::dex::{
|
||||
AccountProviderView, ChainDataAccountProvider, DexInterface, DexSubscriptionMode,
|
||||
};
|
||||
use router_lib::mango;
|
||||
use router_lib::price_feeds::composite::CompositePriceFeed;
|
||||
|
||||
mod alt;
|
||||
mod debug_tools;
|
||||
mod dex;
|
||||
pub mod edge;
|
||||
mod edge_updater;
|
||||
mod hot_mints;
|
||||
pub mod ix_builder;
|
||||
mod metrics;
|
||||
mod mock;
|
||||
mod path_warmer;
|
||||
pub mod prelude;
|
||||
mod prometheus_sync;
|
||||
pub mod routing;
|
||||
pub mod routing_objectpool;
|
||||
pub mod routing_types;
|
||||
pub mod server;
|
||||
mod slot_watcher;
|
||||
mod source;
|
||||
mod swap;
|
||||
mod syscallstubs;
|
||||
mod test_utils;
|
||||
mod tests;
|
||||
mod token_cache;
|
||||
mod tx_watcher;
|
||||
mod util;
|
||||
mod utils;
|
||||
|
||||
// jemalloc seems to be better at keeping the memory footprint reasonable over
|
||||
// longer periods of time
|
||||
#[global_allocator]
|
||||
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
|
||||
|
||||
#[repr(u8)]
|
||||
enum RouterVersion {
|
||||
// Initial = 0,
|
||||
OverestimateAmount = 1,
|
||||
// Max 15 as to fit into upper 4 bits of an u8
|
||||
}
|
||||
|
||||
#[tokio::main(flavor = "multi_thread", worker_threads = 16)]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
router_feed_lib::utils::tracing_subscriber_init();
|
||||
syscallstubs::deactivate_program_logs();
|
||||
util::print_git_version();
|
||||
util::configure_panic_hook();
|
||||
|
||||
let args: Vec<String> = std::env::args().collect();
|
||||
if args.len() < 2 {
|
||||
eprintln!("Please enter a config file path argument.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let config = Config::load(&args[1])?;
|
||||
let router_version = RouterVersion::OverestimateAmount;
|
||||
|
||||
let hot_mints = Arc::new(RwLock::new(HotMintsCache::new(&config.hot_mints)));
|
||||
|
||||
let mango_data = match mango::mango_fetcher::fetch_mango_data().await {
|
||||
Err(e) => {
|
||||
error!("Failed to fetch mango metdata: {}", e);
|
||||
None
|
||||
}
|
||||
Ok(metadata) => Some(metadata),
|
||||
};
|
||||
|
||||
let region = env::var("FLY_REGION").unwrap_or("undefined".to_string());
|
||||
let region_source_config = config
|
||||
.sources
|
||||
.clone()
|
||||
.into_iter()
|
||||
.find(|x| *x.region.as_ref().unwrap_or(&"".to_string()) == region);
|
||||
let default_source_config = config
|
||||
.sources
|
||||
.clone()
|
||||
.into_iter()
|
||||
.find(|x| x.region.is_none());
|
||||
let source_config = region_source_config
|
||||
.or(default_source_config)
|
||||
.unwrap_or_else(|| panic!("did not find a source config for region {}", region));
|
||||
|
||||
let rpc = build_rpc(&source_config);
|
||||
|
||||
// handle sigint
|
||||
let exit_flag: Arc<atomic::AtomicBool> = Arc::new(atomic::AtomicBool::new(false));
|
||||
let (exit_sender, _) = broadcast::channel(1);
|
||||
{
|
||||
let exit_flag = exit_flag.clone();
|
||||
let exit_sender = exit_sender.clone();
|
||||
tokio::spawn(async move {
|
||||
tokio::signal::ctrl_c().await.unwrap();
|
||||
info!("Received SIGINT, shutting down...");
|
||||
exit_flag.store(true, atomic::Ordering::Relaxed);
|
||||
exit_sender.send(()).unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
let (account_write_sender, account_write_receiver) =
|
||||
async_channel::unbounded::<AccountOrSnapshotUpdate>();
|
||||
let (metadata_write_sender, metadata_write_receiver) =
|
||||
async_channel::unbounded::<FeedMetadata>();
|
||||
let (slot_sender, slot_receiver) = async_channel::unbounded::<SlotUpdate>();
|
||||
let (account_update_sender, _) = broadcast::channel(524288); // TODO this is huge, nut init snapshot will completely spam this
|
||||
|
||||
let chain_data = Arc::new(RwLock::new(ChainData::new()));
|
||||
start_chaindata_updating(
|
||||
chain_data.clone(),
|
||||
account_write_receiver,
|
||||
slot_receiver,
|
||||
account_update_sender.clone(),
|
||||
exit_sender.subscribe(),
|
||||
);
|
||||
|
||||
let (metadata_update_sender, _) = broadcast::channel(500);
|
||||
let metadata_update_sender_clone = metadata_update_sender.clone();
|
||||
let metadata_job = tokio_spawn("metadata_relayer", async move {
|
||||
loop {
|
||||
let msg = metadata_write_receiver.recv().await;
|
||||
match msg {
|
||||
Ok(msg) => {
|
||||
if metadata_update_sender_clone.send(msg).is_err() {
|
||||
error!("Failed to write metadata update");
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
error!("Failed to receives metadata update");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if source_config.grpc_sources.len() > 1 {
|
||||
error!("only one grpc source is supported ATM");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
info!(
|
||||
"grpc sources: {}",
|
||||
source_config
|
||||
.grpc_sources
|
||||
.iter()
|
||||
.map(|c| c.connection_string.clone())
|
||||
.collect::<String>()
|
||||
);
|
||||
|
||||
if config.metrics.output_http {
|
||||
let prom_bind_addr = config
|
||||
.metrics
|
||||
.prometheus_address
|
||||
.clone()
|
||||
.expect("prometheus_address must be set");
|
||||
let _prometheus = PrometheusSync::sync(prom_bind_addr);
|
||||
}
|
||||
if config.metrics.output_stdout {
|
||||
warn!("metrics output to stdout is not supported yet");
|
||||
}
|
||||
let (mut price_feed, price_feed_job) = build_price_feed(&config, &exit_sender);
|
||||
|
||||
let (price_cache, price_cache_job) =
|
||||
PriceCache::new(exit_sender.subscribe(), price_feed.receiver());
|
||||
|
||||
let path_warming_amounts = config
|
||||
.routing
|
||||
.path_warming_amounts
|
||||
.clone()
|
||||
.unwrap_or(vec![100, 1000]);
|
||||
|
||||
let mut orca_config = HashMap::new();
|
||||
orca_config.insert(
|
||||
"program_id".to_string(),
|
||||
"whirLbMiicVdio4qvUfM5KAg6Ct8VwpYzGff3uctyCc".to_string(),
|
||||
);
|
||||
orca_config.insert("program_name".to_string(), "Orca".to_string());
|
||||
let mut cropper = HashMap::new();
|
||||
cropper.insert(
|
||||
"program_id".to_string(),
|
||||
"H8W3ctz92svYg6mkn1UtGfu2aQr2fnUFHM1RhScEtQDt".to_string(),
|
||||
);
|
||||
cropper.insert("program_name".to_string(), "Cropper".to_string());
|
||||
|
||||
let mut router_rpc = RouterRpcClient {
|
||||
rpc: Box::new(RouterRpcWrapper {
|
||||
rpc: build_rpc(&source_config),
|
||||
}),
|
||||
};
|
||||
|
||||
let dexs: Vec<Dex> = [
|
||||
dex::generic::build_dex!(
|
||||
OrcaDex::initialize(&mut router_rpc, orca_config).await?,
|
||||
&mango_data,
|
||||
config.orca.enabled,
|
||||
config.orca.add_mango_tokens,
|
||||
config.orca.take_all_mints,
|
||||
&config.orca.mints
|
||||
),
|
||||
dex::generic::build_dex!(
|
||||
OrcaDex::initialize(&mut router_rpc, cropper).await?,
|
||||
&mango_data,
|
||||
config.cropper.enabled,
|
||||
config.cropper.add_mango_tokens,
|
||||
config.cropper.take_all_mints,
|
||||
&config.cropper.mints
|
||||
),
|
||||
dex::generic::build_dex!(
|
||||
dex_saber::SaberDex::initialize(&mut router_rpc, HashMap::new()).await?,
|
||||
&mango_data,
|
||||
config.saber.enabled,
|
||||
config.saber.add_mango_tokens,
|
||||
config.saber.take_all_mints,
|
||||
&config.saber.mints
|
||||
),
|
||||
dex::generic::build_dex!(
|
||||
dex_raydium_cp::RaydiumCpDex::initialize(&mut router_rpc, HashMap::new()).await?,
|
||||
&mango_data,
|
||||
config.raydium_cp.enabled,
|
||||
config.raydium_cp.add_mango_tokens,
|
||||
config.raydium_cp.take_all_mints,
|
||||
&config.raydium_cp.mints
|
||||
),
|
||||
dex::generic::build_dex!(
|
||||
dex_raydium::RaydiumDex::initialize(&mut router_rpc, HashMap::new()).await?,
|
||||
&mango_data,
|
||||
config.raydium.enabled,
|
||||
config.raydium.add_mango_tokens,
|
||||
config.raydium.take_all_mints,
|
||||
&config.raydium.mints
|
||||
),
|
||||
dex::generic::build_dex!(
|
||||
dex_openbook_v2::OpenbookV2Dex::initialize(&mut router_rpc, HashMap::new()).await?,
|
||||
&mango_data,
|
||||
config.openbook_v2.enabled,
|
||||
config.openbook_v2.add_mango_tokens,
|
||||
config.openbook_v2.take_all_mints,
|
||||
&config.openbook_v2.mints
|
||||
),
|
||||
dex::generic::build_dex!(
|
||||
dex_infinity::InfinityDex::initialize(&mut router_rpc, HashMap::new()).await?,
|
||||
&mango_data,
|
||||
config.infinity.enabled,
|
||||
false,
|
||||
true,
|
||||
&vec![]
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect();
|
||||
|
||||
let edges = dexs.iter().flat_map(|x| x.edges()).collect_vec();
|
||||
|
||||
// these are around 380k mints
|
||||
let mints: HashSet<Pubkey> = chain!(
|
||||
edges.iter().map(|x| x.input_mint),
|
||||
edges.iter().map(|x| x.output_mint)
|
||||
)
|
||||
.collect();
|
||||
info!("Using {} mints", mints.len(),);
|
||||
|
||||
let token_cache = {
|
||||
let mint_metadata = request_mint_metadata(&source_config.rpc_http_url, &mints).await;
|
||||
let mut data: HashMap<Pubkey, token_cache::Decimals> = HashMap::new();
|
||||
for (mint_pubkey, Token { mint, decimals }) in mint_metadata {
|
||||
assert_eq!(mint_pubkey, mint);
|
||||
data.insert(mint_pubkey, decimals as Decimals);
|
||||
}
|
||||
TokenCache::new(data)
|
||||
};
|
||||
|
||||
let (slot_job, rpc_slot_sender) = slot_watcher::spawn_slot_watcher_job(&source_config);
|
||||
let ready_channels = dexs
|
||||
.iter()
|
||||
.map(|_| async_channel::bounded::<()>(1))
|
||||
.collect_vec();
|
||||
|
||||
let chain_data_wrapper =
|
||||
Arc::new(ChainDataAccountProvider::new(chain_data.clone())) as AccountProviderView;
|
||||
|
||||
let update_jobs = dexs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter_map(|(i, dex)| {
|
||||
spawn_updater_job(
|
||||
dex,
|
||||
&config,
|
||||
chain_data_wrapper.clone(),
|
||||
token_cache.clone(),
|
||||
price_cache.clone(),
|
||||
path_warming_amounts.clone(),
|
||||
price_feed.register_mint_sender(),
|
||||
ready_channels[i].0.clone(),
|
||||
rpc_slot_sender.subscribe(),
|
||||
account_update_sender.subscribe(),
|
||||
metadata_update_sender.subscribe(),
|
||||
price_feed.receiver(),
|
||||
exit_sender.subscribe(),
|
||||
)
|
||||
})
|
||||
.collect_vec();
|
||||
|
||||
let routing = Arc::new(Routing::new(
|
||||
&config,
|
||||
path_warming_amounts.clone(),
|
||||
edges.clone(),
|
||||
));
|
||||
let route_provider = Arc::new(RoutingRouteProvider {
|
||||
chain_data: chain_data_wrapper.clone(),
|
||||
routing,
|
||||
hot_mints: hot_mints.clone(),
|
||||
prices: price_cache.clone(),
|
||||
tokens: token_cache.clone(),
|
||||
config: config.safety_checks.clone().unwrap_or(Default::default()),
|
||||
});
|
||||
|
||||
let hash_provider = Arc::new(RpcHashProvider {
|
||||
rpc_client: rpc,
|
||||
last_update: Default::default(),
|
||||
});
|
||||
|
||||
let alt_provider = Arc::new(RpcAltProvider {
|
||||
rpc_client: build_rpc(&source_config),
|
||||
cache: Default::default(),
|
||||
});
|
||||
|
||||
let live_account_provider = Arc::new(LiveAccountProvider {
|
||||
rpc_client: build_blocking_rpc(&source_config),
|
||||
});
|
||||
|
||||
let ix_builder = Arc::new(SwapInstructionsBuilderImpl::new(
|
||||
SwapStepInstructionBuilderImpl {
|
||||
chain_data: chain_data_wrapper.clone(),
|
||||
},
|
||||
router_version as u8,
|
||||
));
|
||||
|
||||
let server_job = HttpServer::start(
|
||||
route_provider.clone(),
|
||||
hash_provider,
|
||||
alt_provider,
|
||||
live_account_provider,
|
||||
ix_builder,
|
||||
config.clone(),
|
||||
exit_sender.subscribe(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let filters = dexs
|
||||
.iter()
|
||||
.flat_map(|x| x.edges_per_pk.keys())
|
||||
.copied()
|
||||
.chain(
|
||||
dexs.iter()
|
||||
.filter_map(|x| match x.subscription_mode.clone() {
|
||||
DexSubscriptionMode::Accounts(a) => Some(a),
|
||||
DexSubscriptionMode::Mixed(m) => Some(m.accounts),
|
||||
_ => None,
|
||||
})
|
||||
.flatten(),
|
||||
)
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
debug_tools::set_global_filters(&filters);
|
||||
|
||||
info!(
|
||||
"Will only react to account writes for {} account(s)",
|
||||
filters.len()
|
||||
);
|
||||
|
||||
let subscribed_accounts = dexs
|
||||
.iter()
|
||||
.flat_map(|x| match &x.subscription_mode {
|
||||
DexSubscriptionMode::Accounts(x) => x.clone().into_iter(),
|
||||
DexSubscriptionMode::Programs(_) => HashSet::new().into_iter(),
|
||||
DexSubscriptionMode::Mixed(m) => m.accounts.clone().into_iter(),
|
||||
DexSubscriptionMode::Disabled => HashSet::new().into_iter(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
let subscribed_programs = dexs
|
||||
.iter()
|
||||
.flat_map(|x| match &x.subscription_mode {
|
||||
DexSubscriptionMode::Disabled => HashSet::new().into_iter(),
|
||||
DexSubscriptionMode::Accounts(_) => HashSet::new().into_iter(),
|
||||
DexSubscriptionMode::Programs(x) => x.clone().into_iter(),
|
||||
DexSubscriptionMode::Mixed(m) => m.programs.clone().into_iter(),
|
||||
})
|
||||
.collect();
|
||||
let subscribed_token_accounts = dexs
|
||||
.iter()
|
||||
.flat_map(|x| match &x.subscription_mode {
|
||||
DexSubscriptionMode::Disabled => HashSet::new().into_iter(),
|
||||
DexSubscriptionMode::Accounts(_) => HashSet::new().into_iter(),
|
||||
DexSubscriptionMode::Programs(_) => HashSet::new().into_iter(),
|
||||
DexSubscriptionMode::Mixed(m) => m.token_accounts_for_owner.clone().into_iter(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
let ef = exit_sender.subscribe();
|
||||
let sc = source_config.clone();
|
||||
let account_update_job = tokio_spawn("geyser", async move {
|
||||
if sc.use_quic.unwrap_or(false) {
|
||||
error!("not supported yet");
|
||||
} else {
|
||||
geyser::spawn_geyser_source(
|
||||
&sc,
|
||||
ef,
|
||||
account_write_sender,
|
||||
metadata_write_sender,
|
||||
slot_sender,
|
||||
&subscribed_accounts,
|
||||
&subscribed_programs,
|
||||
&subscribed_token_accounts,
|
||||
&filters,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
});
|
||||
|
||||
let ef = exit_flag.clone();
|
||||
let (tx_sender_job, tx_watcher_job) =
|
||||
spawn_tx_watcher_jobs(&config.routing, &source_config, &dexs, &exit_sender, ef);
|
||||
|
||||
let mango_watcher_job = mango::mango_fetcher::spawn_mango_watcher(&mango_data, &config);
|
||||
let path_warmer_job = spawn_path_warmer_job(
|
||||
&config.routing,
|
||||
hot_mints.clone(),
|
||||
mango_data.clone(),
|
||||
route_provider.clone(),
|
||||
token_cache,
|
||||
price_cache,
|
||||
path_warming_amounts,
|
||||
exit_flag.clone(),
|
||||
);
|
||||
|
||||
let (ready_sender, ready_receiver) = async_channel::bounded::<()>(1);
|
||||
let _ready_watcher_job = tokio::spawn(async move {
|
||||
for (_, ready) in ready_channels {
|
||||
ready.recv().await.unwrap()
|
||||
}
|
||||
|
||||
ready_sender.send(()).await.unwrap();
|
||||
});
|
||||
|
||||
let mut jobs: futures::stream::FuturesUnordered<_> = vec![
|
||||
server_job.join_handle,
|
||||
price_feed_job,
|
||||
price_cache_job,
|
||||
metadata_job,
|
||||
slot_job,
|
||||
tx_sender_job,
|
||||
tx_watcher_job,
|
||||
account_update_job,
|
||||
]
|
||||
.into_iter()
|
||||
.chain(update_jobs.into_iter())
|
||||
.chain(mango_watcher_job.into_iter())
|
||||
.chain(path_warmer_job.into_iter())
|
||||
.collect();
|
||||
|
||||
loop {
|
||||
tokio::select!(
|
||||
_ = jobs.next() => {
|
||||
error!("A critical job exited, aborting run..");
|
||||
exit(-1);
|
||||
},
|
||||
Ok(_) = ready_receiver.recv() => {
|
||||
info!("autobahn-router setup complete");
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
// unreachable
|
||||
}
|
||||
|
||||
fn build_price_feed(
|
||||
config: &Config,
|
||||
exit_sender: &broadcast::Sender<()>,
|
||||
) -> (Box<dyn PriceFeed>, JoinHandle<()>) {
|
||||
let x = CompositePriceFeed::start(config.price_feed.clone(), exit_sender.subscribe());
|
||||
(Box::new(x.0) as Box<dyn PriceFeed>, x.1)
|
||||
}
|
||||
|
||||
fn build_rpc(source_config: &AccountDataSourceConfig) -> RpcClient {
|
||||
RpcClient::new_with_timeouts_and_commitment(
|
||||
string_or_env(source_config.rpc_http_url.clone()),
|
||||
Duration::from_secs(60), // request timeout
|
||||
CommitmentConfig::confirmed(),
|
||||
Duration::from_secs(60), // confirmation timeout
|
||||
)
|
||||
}
|
||||
|
||||
fn build_blocking_rpc(source_config: &AccountDataSourceConfig) -> BlockingRpcClient {
|
||||
BlockingRpcClient::new_with_timeouts_and_commitment(
|
||||
string_or_env(source_config.rpc_http_url.clone()),
|
||||
Duration::from_secs(60), // request timeout
|
||||
CommitmentConfig::confirmed(),
|
||||
Duration::from_secs(60), // confirmation timeout
|
||||
)
|
||||
}
|
||||
|
||||
fn start_chaindata_updating(
|
||||
chain_data: ChainDataArcRw,
|
||||
account_writes: async_channel::Receiver<AccountOrSnapshotUpdate>,
|
||||
slot_updates: async_channel::Receiver<SlotUpdate>,
|
||||
account_update_sender: broadcast::Sender<(Pubkey, u64)>,
|
||||
mut exit: broadcast::Receiver<()>,
|
||||
) -> JoinHandle<()> {
|
||||
use mango_feeds_connector::chain_data::SlotData;
|
||||
|
||||
tokio_spawn("chain_data", async move {
|
||||
let mut most_recent_seen_slot = 0;
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = exit.recv() => {
|
||||
info!("shutting down chaindata update task");
|
||||
break;
|
||||
}
|
||||
res = account_writes.recv() => {
|
||||
let Ok(update) = res
|
||||
else {
|
||||
warn!("account write channel err {res:?}");
|
||||
continue;
|
||||
};
|
||||
|
||||
let mut writer = chain_data.write().unwrap();
|
||||
handle_updated_account(&mut most_recent_seen_slot, &mut writer, update, &account_update_sender);
|
||||
|
||||
let mut batchsize: u32 = 0;
|
||||
let started_at = Instant::now();
|
||||
'batch_loop: while let Ok(update) = account_writes.try_recv() {
|
||||
batchsize += 1;
|
||||
|
||||
handle_updated_account(&mut most_recent_seen_slot, &mut writer, update, &account_update_sender);
|
||||
|
||||
// budget for microbatch
|
||||
if batchsize > 10 || started_at.elapsed() > Duration::from_micros(500) {
|
||||
break 'batch_loop;
|
||||
}
|
||||
}
|
||||
}
|
||||
res = slot_updates.recv() => {
|
||||
let Ok(slot_update) = res
|
||||
else {
|
||||
warn!("slot channel err {res:?}");
|
||||
continue;
|
||||
};
|
||||
|
||||
debug!("chain_data updater got slot: {} ({:?}) -- channel sizes: {} {}", slot_update.slot, slot_update.status,
|
||||
slot_updates.len(), account_writes.len());
|
||||
|
||||
chain_data.write().unwrap().update_slot(SlotData {
|
||||
slot: slot_update.slot,
|
||||
parent: slot_update.parent,
|
||||
status: slot_update.status,
|
||||
chain: 0,
|
||||
});
|
||||
|
||||
// TODO: slot updates can significantly affect state, do we need to track what needs to be updated
|
||||
// when switching to a different fork?
|
||||
}
|
||||
// TODO: update Clock Sysvar
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn handle_updated_account(
|
||||
most_recent_seen_slot: &mut u64,
|
||||
chain_data: &mut RwLockWriteGuard<ChainData>,
|
||||
update: AccountOrSnapshotUpdate,
|
||||
account_update_sender: &broadcast::Sender<(Pubkey, u64)>,
|
||||
) {
|
||||
use mango_feeds_connector::chain_data::AccountData;
|
||||
use solana_sdk::account::WritableAccount;
|
||||
use solana_sdk::clock::Epoch;
|
||||
|
||||
fn one_update(
|
||||
most_recent_seen_slot: &mut u64,
|
||||
chain_data: &mut RwLockWriteGuard<ChainData>,
|
||||
account_update_sender: &broadcast::Sender<(Pubkey, u64)>,
|
||||
account_write: AccountWrite,
|
||||
) {
|
||||
chain_data.update_account(
|
||||
account_write.pubkey,
|
||||
AccountData {
|
||||
slot: account_write.slot,
|
||||
write_version: account_write.write_version,
|
||||
account: WritableAccount::create(
|
||||
account_write.lamports,
|
||||
account_write.data,
|
||||
account_write.owner,
|
||||
account_write.executable,
|
||||
account_write.rent_epoch as Epoch,
|
||||
),
|
||||
},
|
||||
);
|
||||
|
||||
if *most_recent_seen_slot != account_write.slot {
|
||||
debug!(
|
||||
"new slot seen: {} // chain_data.newest_processed_slot: {}",
|
||||
account_write.slot,
|
||||
chain_data.newest_processed_slot()
|
||||
);
|
||||
*most_recent_seen_slot = account_write.slot;
|
||||
}
|
||||
|
||||
// ignore failing sends when there are no receivers
|
||||
let _err = account_update_sender.send((account_write.pubkey, account_write.slot));
|
||||
}
|
||||
|
||||
match update {
|
||||
AccountOrSnapshotUpdate::AccountUpdate(account_write) => one_update(
|
||||
most_recent_seen_slot,
|
||||
chain_data,
|
||||
account_update_sender,
|
||||
account_write,
|
||||
),
|
||||
AccountOrSnapshotUpdate::SnapshotUpdate(snapshot) => {
|
||||
for account_write in snapshot {
|
||||
one_update(
|
||||
most_recent_seen_slot,
|
||||
chain_data,
|
||||
account_update_sender,
|
||||
account_write,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,62 @@
|
|||
use prometheus::core::GenericGauge;
|
||||
use prometheus::{
|
||||
histogram_opts, opts, register_gauge_vec, register_histogram_vec, register_int_counter,
|
||||
register_int_counter_vec, register_int_gauge, register_int_gauge_vec, GaugeVec, HistogramVec,
|
||||
IntCounter, IntCounterVec, IntGauge, IntGaugeVec,
|
||||
};
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
pub static ref GRPC_ACCOUNT_WRITES: IntCounter =
|
||||
register_int_counter!("grpc_account_writes", "Number of account updates via Geyser gRPC").unwrap();
|
||||
pub static ref GRPC_ACCOUNT_WRITE_QUEUE: IntGauge =
|
||||
register_int_gauge!("grpc_account_write_queue", "Items in account write queue via Geyser gPRC").unwrap();
|
||||
pub static ref GRPC_DEDUP_QUEUE: GenericGauge<prometheus::core::AtomicI64> =
|
||||
register_int_gauge!("grpc_dedup_queue", "Items in dedup queue via Geyser gPRC").unwrap();
|
||||
pub static ref GRPC_SLOT_UPDATE_QUEUE: GenericGauge<prometheus::core::AtomicI64> =
|
||||
register_int_gauge!("grpc_slot_update_queue", "Items in slot update queue via Geyser gPRC").unwrap();
|
||||
pub static ref GRPC_SLOT_UPDATES: IntCounter =
|
||||
register_int_counter!("grpc_slot_updates", "Number of slot updates via Geyser gPRC").unwrap();
|
||||
pub static ref ACCOUNT_SNAPSHOTS: IntCounterVec =
|
||||
register_int_counter_vec!(opts!("router_account_snapshots", "Number of account snapshots"), &["snapshot_type"]).unwrap();
|
||||
pub static ref GRPC_SNAPSHOT_ACCOUNT_WRITES: IntCounter =
|
||||
register_int_counter!("router_snapshot_account_writes", "Number of account writes from snapshot").unwrap();
|
||||
pub static ref GRPC_SOURCE_CONNECTION_RETRIES: IntCounterVec =
|
||||
register_int_counter_vec!(opts!("grpc_source_connection_retries", "gRPC source connection retries"), &["source_name"]).unwrap();
|
||||
pub static ref GRPC_NO_MESSAGE_FOR_DURATION_MS: IntGauge =
|
||||
register_int_gauge!("grpc_no_update_for_duration_ms", "Did not get any message from Geyser gPRC for this duration").unwrap();
|
||||
pub static ref GRPC_TO_EDGE_SLOT_LAG: IntGaugeVec =
|
||||
register_int_gauge_vec!(opts!("router_grpc_to_edge_slot_lag", "RPC Slot vs last slot used to update edges"), &["dex_name"]).unwrap();
|
||||
|
||||
pub static ref HTTP_REQUEST_TIMING: HistogramVec =
|
||||
register_histogram_vec!(
|
||||
histogram_opts!("router_http_request_timing", "Endpoint timing in seconds",
|
||||
// buckets (in seconds)
|
||||
vec![
|
||||
2e-6, 5e-6, 10e-6, 15e-6, 20e-6, 25e-6, 30e-6, 50e-6, 100e-6, 200e-6, 500e-6,
|
||||
2e-3, 5e-3, 10e-3, 25e-3, 50e-3, 100e-3, 200e-3, 500e-3,
|
||||
1.0, 2.0
|
||||
]),
|
||||
&["endpoint", "client"]).unwrap();
|
||||
pub static ref HTTP_REQUESTS_TOTAL: IntCounterVec =
|
||||
register_int_counter_vec!(opts!("router_http_requests_total", "Number of total endpoint requests"), &["endpoint", "client"]).unwrap();
|
||||
pub static ref HTTP_REQUESTS_FAILED: IntCounterVec =
|
||||
register_int_counter_vec!(opts!("router_http_requests_failed", "Number of failed endpoint requests"), &["endpoint", "client"]).unwrap();
|
||||
|
||||
pub static ref PATH_DISCOVERY_CACHE_HITS: IntCounter =
|
||||
register_int_counter!("router_path_discovery_cache_hits", "Cache hits in path discovery").unwrap();
|
||||
pub static ref PATH_DISCOVERY_CACHE_MISSES: IntCounter =
|
||||
register_int_counter!("router_path_discovery_cache_misses", "Cache misses in path discovery").unwrap();
|
||||
|
||||
pub static ref OBJECTPOOL_BEST_BY_NODE_NEW_ALLOCATIONS: IntCounter =
|
||||
register_int_counter!("router_objectpool_best_by_node_allocations", "Number of new allocations in object pool best_by_node").unwrap();
|
||||
pub static ref OBJECTPOOL_BEST_BY_NODE_REUSES: IntCounter =
|
||||
register_int_counter!("router_objectpool_best_by_node_reuses", "Number of reuses in object pool best_by_node").unwrap();
|
||||
pub static ref OBJECTPOOL_BEST_PATHS_BY_NODE_NEW_ALLOCATIONS: IntCounter =
|
||||
register_int_counter!("router_objectpool_best_paths_by_node_allocations", "Number of new allocations in object pool best_paths_by_node").unwrap();
|
||||
pub static ref OBJECTPOOL_BEST_PATHS_BY_NODE_REUSES: IntCounter =
|
||||
register_int_counter!("router_objectpool_best_paths_by_node_reuses", "Number of reuses in object pool best_paths_by_node").unwrap();
|
||||
|
||||
pub static ref REPRICING_DIFF_BPS: GaugeVec =
|
||||
register_gauge_vec!(opts!("router_repricing_diff_bps", "Router chaindata/live repricing diff (bps)"), &["pair"]).unwrap();
|
||||
|
||||
}
|
|
@ -0,0 +1,142 @@
|
|||
pub mod test {
|
||||
use router_feed_lib::router_rpc_client::RouterRpcClient;
|
||||
use router_lib::dex::{
|
||||
AccountProviderView, DexEdge, DexEdgeIdentifier, DexInterface, DexSubscriptionMode, Quote,
|
||||
SwapInstruction,
|
||||
};
|
||||
use solana_program::pubkey::Pubkey;
|
||||
use std::any::Any;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
|
||||
pub(crate) struct MockDexIdentifier {
|
||||
pub key: Pubkey,
|
||||
pub input_mint: Pubkey,
|
||||
pub output_mint: Pubkey,
|
||||
pub price: f64,
|
||||
}
|
||||
|
||||
impl DexEdgeIdentifier for MockDexIdentifier {
|
||||
fn key(&self) -> Pubkey {
|
||||
self.key
|
||||
}
|
||||
|
||||
fn desc(&self) -> String {
|
||||
format!("{} - {}", self.input_mint, self.output_mint)
|
||||
}
|
||||
|
||||
fn input_mint(&self) -> Pubkey {
|
||||
self.input_mint
|
||||
}
|
||||
|
||||
fn output_mint(&self) -> Pubkey {
|
||||
self.output_mint
|
||||
}
|
||||
|
||||
fn accounts_needed(&self) -> usize {
|
||||
0
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MockDexInterface {}
|
||||
|
||||
pub struct MockEdge {}
|
||||
impl DexEdge for MockEdge {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl DexInterface for MockDexInterface {
|
||||
async fn initialize(
|
||||
_rpc: &mut RouterRpcClient,
|
||||
_options: HashMap<String, String>,
|
||||
) -> anyhow::Result<Arc<dyn DexInterface>>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn subscription_mode(&self) -> DexSubscriptionMode {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn edges_per_pk(&self) -> HashMap<Pubkey, Vec<Arc<dyn DexEdgeIdentifier>>> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn program_ids(&self) -> HashSet<Pubkey> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn load(
|
||||
&self,
|
||||
_id: &Arc<dyn DexEdgeIdentifier>,
|
||||
_chain_data: &AccountProviderView,
|
||||
) -> anyhow::Result<Arc<dyn DexEdge>> {
|
||||
Ok(Arc::new(MockEdge {}) as Arc<dyn DexEdge>)
|
||||
}
|
||||
|
||||
fn quote(
|
||||
&self,
|
||||
id: &Arc<dyn DexEdgeIdentifier>,
|
||||
_edge: &Arc<dyn DexEdge>,
|
||||
_chain_data: &AccountProviderView,
|
||||
in_amount: u64,
|
||||
) -> anyhow::Result<Quote> {
|
||||
let id = id.as_any().downcast_ref::<MockDexIdentifier>().unwrap();
|
||||
let out_amount = (id.price * in_amount as f64).round() as u64;
|
||||
|
||||
Ok(Quote {
|
||||
in_amount,
|
||||
out_amount,
|
||||
fee_amount: 0,
|
||||
fee_mint: id.input_mint,
|
||||
})
|
||||
}
|
||||
|
||||
fn build_swap_ix(
|
||||
&self,
|
||||
_id: &Arc<dyn DexEdgeIdentifier>,
|
||||
_chain_data: &AccountProviderView,
|
||||
_wallet_pk: &Pubkey,
|
||||
_in_amount: u64,
|
||||
_out_amount: u64,
|
||||
_max_slippage_bps: i32,
|
||||
) -> anyhow::Result<SwapInstruction> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn supports_exact_out(&self, _id: &Arc<dyn DexEdgeIdentifier>) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn quote_exact_out(
|
||||
&self,
|
||||
id: &Arc<dyn DexEdgeIdentifier>,
|
||||
_edge: &Arc<dyn DexEdge>,
|
||||
_chain_data: &AccountProviderView,
|
||||
out_amount: u64,
|
||||
) -> anyhow::Result<Quote> {
|
||||
let id = id.as_any().downcast_ref::<MockDexIdentifier>().unwrap();
|
||||
let in_amount = (out_amount as f64 / id.price).round() as u64;
|
||||
|
||||
Ok(Quote {
|
||||
in_amount,
|
||||
out_amount,
|
||||
fee_amount: 0,
|
||||
fee_mint: id.input_mint,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,188 @@
|
|||
use itertools::Itertools;
|
||||
use router_lib::dex::SwapMode;
|
||||
use solana_program::pubkey::Pubkey;
|
||||
use std::collections::HashSet;
|
||||
use std::str::FromStr;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use tokio::task::JoinHandle;
|
||||
use tracing::log::trace;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::debug_tools;
|
||||
use crate::hot_mints::HotMintsCache;
|
||||
use crate::server::route_provider::RouteProvider;
|
||||
use crate::token_cache::TokenCache;
|
||||
use router_config_lib::{PathWarmingMode, RoutingConfig};
|
||||
use router_lib::mango::mango_fetcher::MangoMetadata;
|
||||
use router_lib::price_feeds::price_cache::PriceCache;
|
||||
|
||||
pub fn spawn_path_warmer_job<T>(
|
||||
config: &RoutingConfig,
|
||||
hot_mints_cache: Arc<RwLock<HotMintsCache>>,
|
||||
mango_metadata: Option<MangoMetadata>,
|
||||
route_provider: Arc<T>,
|
||||
token_cache: TokenCache,
|
||||
price_cache: PriceCache,
|
||||
path_warming_amounts: Vec<u64>,
|
||||
exit_flag: Arc<AtomicBool>,
|
||||
) -> Option<JoinHandle<()>>
|
||||
where
|
||||
T: RouteProvider + Send + Sync + 'static,
|
||||
{
|
||||
let mode = config
|
||||
.path_warming_mode
|
||||
.clone()
|
||||
.unwrap_or(PathWarmingMode::ConfiguredMints);
|
||||
let configured_mints = config
|
||||
.path_warming_for_mints
|
||||
.clone()
|
||||
.unwrap_or(vec![])
|
||||
.iter()
|
||||
.map(|x| Pubkey::from_str(x).expect("Invalid mint in path warming config"))
|
||||
.collect_vec();
|
||||
|
||||
match mode {
|
||||
PathWarmingMode::None => return None,
|
||||
PathWarmingMode::ConfiguredMints => {
|
||||
if configured_mints.is_empty() {
|
||||
warn!("No configured tokens => no path warming");
|
||||
return None;
|
||||
}
|
||||
}
|
||||
PathWarmingMode::MangoMints => {
|
||||
if mango_metadata.is_none() {
|
||||
warn!("Mango tokens unavailable => no path warming");
|
||||
return None;
|
||||
}
|
||||
}
|
||||
PathWarmingMode::HotMints => {}
|
||||
PathWarmingMode::All => {}
|
||||
};
|
||||
|
||||
let sol_mint = Pubkey::from_str("So11111111111111111111111111111111111111112").unwrap();
|
||||
let config = config.clone();
|
||||
let start = Instant::now();
|
||||
let job = tokio::spawn(async move {
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(
|
||||
config.path_warming_interval_secs.unwrap_or(10),
|
||||
));
|
||||
let config_max_accounts = config
|
||||
.path_warming_max_accounts
|
||||
.unwrap_or(vec![10_usize, 15, 20, 25, 30, 40]);
|
||||
interval.tick().await;
|
||||
|
||||
loop {
|
||||
interval.tick().await;
|
||||
if start.elapsed() < Duration::from_secs(60) {
|
||||
// do not start right away as not everything is ready yet
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut all_mints = token_cache.tokens();
|
||||
all_mints.insert(sol_mint);
|
||||
|
||||
let hot_mints = hot_mints_cache.read().unwrap().get();
|
||||
let mints = match generate_mints(
|
||||
&mode,
|
||||
&configured_mints,
|
||||
&hot_mints,
|
||||
&all_mints,
|
||||
&mango_metadata,
|
||||
) {
|
||||
Some(value) => value,
|
||||
None => break,
|
||||
};
|
||||
|
||||
debug!("Running a path warmup loop for {} mints", mints.len());
|
||||
let mut counter = 0;
|
||||
let mut skipped = 0;
|
||||
let time = Instant::now();
|
||||
|
||||
// prune edges for exact in
|
||||
route_provider.prepare_pruned_edges_and_cleanup_cache(&hot_mints, SwapMode::ExactIn);
|
||||
|
||||
// prune edges for exact out
|
||||
route_provider.prepare_pruned_edges_and_cleanup_cache(&hot_mints, SwapMode::ExactOut);
|
||||
|
||||
for from_mint in &mints {
|
||||
if exit_flag.load(Ordering::Relaxed) {
|
||||
tracing::log::warn!("shutting down path warmer job...");
|
||||
return ();
|
||||
}
|
||||
|
||||
let Some(price_ui) = price_cache.price_ui(*from_mint) else {
|
||||
skipped += 1;
|
||||
continue;
|
||||
};
|
||||
if price_ui <= 0.000001 {
|
||||
skipped += 1;
|
||||
continue;
|
||||
}
|
||||
let Ok(token) = token_cache.token(*from_mint) else {
|
||||
skipped += 1;
|
||||
continue;
|
||||
};
|
||||
|
||||
let decimals = token.decimals;
|
||||
let multiplier = 10u64.pow(decimals as u32) as f64;
|
||||
|
||||
trace!("Warming up {}", debug_tools::name(&from_mint),);
|
||||
|
||||
for amount_ui in &path_warming_amounts {
|
||||
let amount_native =
|
||||
((*amount_ui as f64 / price_ui) * multiplier).round() as u64;
|
||||
|
||||
for max_accounts in &config_max_accounts {
|
||||
match route_provider.prepare_cache_for_input_mint(
|
||||
*from_mint,
|
||||
amount_native,
|
||||
*max_accounts,
|
||||
|input, output| mints.contains(input) || mints.contains(output),
|
||||
) {
|
||||
Ok(_) => {}
|
||||
Err(e) => warn!("Error warming up path: {}", e),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
counter += 1;
|
||||
|
||||
if counter % 100 == 0 {
|
||||
debug!(
|
||||
"Done for {}/{} mints (skipped {})",
|
||||
counter,
|
||||
mints.len(),
|
||||
skipped
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
"Path warmup done in {:?} for {} mints",
|
||||
time.elapsed(),
|
||||
mints.len()
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
Some(job)
|
||||
}
|
||||
|
||||
fn generate_mints(
|
||||
mode: &PathWarmingMode,
|
||||
configured_mints: &Vec<Pubkey>,
|
||||
hot_mints: &HashSet<Pubkey>,
|
||||
all_mints: &HashSet<Pubkey>,
|
||||
mango_metadata: &Option<MangoMetadata>,
|
||||
) -> Option<HashSet<Pubkey>> {
|
||||
Some(match mode {
|
||||
PathWarmingMode::None => return None,
|
||||
PathWarmingMode::ConfiguredMints => configured_mints.clone().into_iter().collect(),
|
||||
PathWarmingMode::HotMints => hot_mints.clone().into_iter().collect(),
|
||||
PathWarmingMode::MangoMints => mango_metadata.as_ref().unwrap().mints.clone(),
|
||||
PathWarmingMode::All => all_mints.clone(),
|
||||
})
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
pub use anyhow::{anyhow, bail, Context};
|
||||
pub use futures::{stream, StreamExt, TryStreamExt};
|
||||
pub use itertools::Itertools;
|
||||
pub use solana_sdk::pubkey::Pubkey;
|
||||
pub use tokio::sync::broadcast;
|
||||
pub use tracing::{debug, error, info, trace, warn};
|
||||
|
||||
pub use std::collections::HashMap;
|
||||
pub use std::collections::HashSet;
|
||||
pub use std::str::FromStr;
|
||||
pub use std::sync::atomic;
|
||||
pub use std::time;
|
||||
pub use std::{cell::RefCell, sync::Arc, sync::RwLock};
|
||||
|
||||
pub use crate::edge::Edge;
|
||||
pub use crate::util::millis_since_epoch;
|
|
@ -0,0 +1,57 @@
|
|||
use std::time::Duration;
|
||||
|
||||
use prometheus::{Encoder, TextEncoder};
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio::{
|
||||
io::AsyncWriteExt,
|
||||
net::{TcpListener, TcpStream, ToSocketAddrs},
|
||||
};
|
||||
use tracing::error;
|
||||
|
||||
pub struct PrometheusSync;
|
||||
|
||||
impl PrometheusSync {
|
||||
fn create_response(payload: &str) -> String {
|
||||
format!(
|
||||
"HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n{}",
|
||||
payload.len(),
|
||||
payload
|
||||
)
|
||||
}
|
||||
|
||||
async fn handle_stream(stream: &mut TcpStream) -> anyhow::Result<()> {
|
||||
let mut metrics_buffer = Vec::new();
|
||||
let encoder = TextEncoder::new();
|
||||
|
||||
let metric_families = prometheus::gather();
|
||||
encoder
|
||||
.encode(&metric_families, &mut metrics_buffer)
|
||||
.unwrap();
|
||||
|
||||
let metrics_buffer = String::from_utf8(metrics_buffer).unwrap();
|
||||
let response = Self::create_response(&metrics_buffer);
|
||||
|
||||
stream.writable().await?;
|
||||
stream.write_all(response.as_bytes()).await?;
|
||||
|
||||
stream.flush().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn sync(addr: impl ToSocketAddrs + Send + 'static) -> JoinHandle<anyhow::Result<()>> {
|
||||
tokio::spawn(async move {
|
||||
let listener = TcpListener::bind(addr).await?;
|
||||
|
||||
loop {
|
||||
let Ok((mut stream, _addr)) = listener.accept().await else {
|
||||
error!("Error accepting prometheus stream");
|
||||
tokio::time::sleep(Duration::from_millis(1)).await;
|
||||
continue;
|
||||
};
|
||||
|
||||
let _ = Self::handle_stream(&mut stream).await;
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,198 @@
|
|||
use crate::metrics;
|
||||
use crate::routing_types::*;
|
||||
use opool::{Pool, PoolAllocator, RefGuard};
|
||||
use ordered_float::NotNan;
|
||||
use router_lib::dex::SwapMode;
|
||||
use tracing::{debug, trace};
|
||||
|
||||
pub struct RoutingObjectPools {
|
||||
best_by_node_pool: Pool<BestByNodeAllocator, Vec<BestVec3>>,
|
||||
best_paths_by_node_pool:
|
||||
Pool<BestPathsByNodeAllocator, MintVec<Vec<(NotNan<f64>, Vec<EdgeWithNodes>)>>>,
|
||||
best_paths_by_node_pool_exact_out:
|
||||
Pool<BestPathsByNodeAllocator, MintVec<Vec<(NotNan<f64>, Vec<EdgeWithNodes>)>>>,
|
||||
out_edges_per_node: usize,
|
||||
n_paths: usize,
|
||||
}
|
||||
|
||||
const POOL_SIZE_BEST_BY_NODE: usize = 5;
|
||||
const POOL_SIZE_BEST_PATHS_BY_NODE: usize = 5;
|
||||
|
||||
impl RoutingObjectPools {
|
||||
pub fn new(out_edges_per_node: usize, n_paths: usize) -> Self {
|
||||
debug!(
|
||||
"Init objectpool(size {}) for best_by_node with out_edges_per_node={}",
|
||||
POOL_SIZE_BEST_BY_NODE, out_edges_per_node
|
||||
);
|
||||
let pool_best_by_node = Pool::new_prefilled(
|
||||
POOL_SIZE_BEST_BY_NODE,
|
||||
BestByNodeAllocator { out_edges_per_node },
|
||||
);
|
||||
|
||||
debug!("Init objectpool(size {}) for best_paths_by_node with out_edges_per_node={}, n_paths={}",
|
||||
POOL_SIZE_BEST_PATHS_BY_NODE, out_edges_per_node, n_paths);
|
||||
let pool_best_paths = Pool::new_prefilled(
|
||||
POOL_SIZE_BEST_PATHS_BY_NODE,
|
||||
BestPathsByNodeAllocator {
|
||||
out_edges_per_node,
|
||||
n_paths,
|
||||
swap_mode: SwapMode::ExactIn,
|
||||
},
|
||||
);
|
||||
|
||||
debug!("Init objectpool(size {}) for best_paths_by_node_exact_out with out_edges_per_node={}, n_paths={}",
|
||||
POOL_SIZE_BEST_PATHS_BY_NODE, out_edges_per_node, n_paths);
|
||||
let pool_best_paths_exactout = Pool::new_prefilled(
|
||||
POOL_SIZE_BEST_PATHS_BY_NODE,
|
||||
BestPathsByNodeAllocator {
|
||||
out_edges_per_node,
|
||||
n_paths,
|
||||
swap_mode: SwapMode::ExactOut,
|
||||
},
|
||||
);
|
||||
|
||||
Self {
|
||||
best_by_node_pool: pool_best_by_node,
|
||||
best_paths_by_node_pool: pool_best_paths,
|
||||
best_paths_by_node_pool_exact_out: pool_best_paths_exactout,
|
||||
out_edges_per_node,
|
||||
n_paths,
|
||||
}
|
||||
}
|
||||
|
||||
/// get object from pool or create new one
|
||||
pub(crate) fn get_best_by_node(
|
||||
&self,
|
||||
expected_out_edges_per_node: usize,
|
||||
) -> RefGuard<BestByNodeAllocator, Vec<BestVec3>> {
|
||||
assert_eq!(
|
||||
expected_out_edges_per_node, self.out_edges_per_node,
|
||||
"requested data shape does not fit the pooled vecvec"
|
||||
);
|
||||
self.best_by_node_pool.get()
|
||||
}
|
||||
|
||||
/// get object from pool or create new one
|
||||
pub(crate) fn get_best_paths_by_node(
|
||||
&self,
|
||||
expected_out_edges_per_node: usize,
|
||||
expected_n_paths: usize,
|
||||
) -> RefGuard<BestPathsByNodeAllocator, MintVec<Vec<(NotNan<f64>, Vec<EdgeWithNodes>)>>> {
|
||||
assert_eq!(
|
||||
expected_out_edges_per_node, self.out_edges_per_node,
|
||||
"requested data shape does not fit the pooled one"
|
||||
);
|
||||
assert_eq!(
|
||||
expected_n_paths, self.n_paths,
|
||||
"requested data shape does not fit the pooled one"
|
||||
);
|
||||
self.best_paths_by_node_pool.get()
|
||||
}
|
||||
|
||||
pub(crate) fn get_best_paths_by_node_exact_out(
|
||||
&self,
|
||||
expected_out_edges_per_node: usize,
|
||||
expected_n_paths: usize,
|
||||
) -> RefGuard<BestPathsByNodeAllocator, MintVec<Vec<(NotNan<f64>, Vec<EdgeWithNodes>)>>> {
|
||||
assert_eq!(
|
||||
expected_out_edges_per_node, self.out_edges_per_node,
|
||||
"requested data shape does not fit the pooled one"
|
||||
);
|
||||
assert_eq!(
|
||||
expected_n_paths, self.n_paths,
|
||||
"requested data shape does not fit the pooled one"
|
||||
);
|
||||
self.best_paths_by_node_pool_exact_out.get()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BestByNodeAllocator {
|
||||
out_edges_per_node: usize,
|
||||
}
|
||||
|
||||
impl PoolAllocator<Vec<BestVec3>> for BestByNodeAllocator {
|
||||
fn reset(&self, obj: &mut Vec<BestVec3>) {
|
||||
trace!("RESET/REUSE pooled object best_by_node");
|
||||
metrics::OBJECTPOOL_BEST_BY_NODE_REUSES.inc();
|
||||
for best_vec in obj.iter_mut() {
|
||||
best_vec.fill(0.0);
|
||||
}
|
||||
}
|
||||
|
||||
/// 75MB for out_edges_per_node=393709
|
||||
#[inline]
|
||||
fn allocate(&self) -> Vec<BestVec3> {
|
||||
trace!("ALLOC bestvec object best_by_node");
|
||||
metrics::OBJECTPOOL_BEST_BY_NODE_NEW_ALLOCATIONS.inc();
|
||||
// Best amount received for token/account_size
|
||||
// 3 = number of path kept
|
||||
// 8 = (64/8) (max accounts/bucket_size)
|
||||
vec![[0f64; 3]; 8 * self.out_edges_per_node]
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BestPathsByNodeAllocator {
|
||||
out_edges_per_node: usize,
|
||||
n_paths: usize,
|
||||
swap_mode: SwapMode,
|
||||
}
|
||||
|
||||
impl PoolAllocator<MintVec<Vec<(NotNan<f64>, Vec<EdgeWithNodes>)>>> for BestPathsByNodeAllocator {
|
||||
fn reset(&self, obj: &mut MintVec<Vec<(NotNan<f64>, Vec<EdgeWithNodes>)>>) {
|
||||
trace!("RESET/REUSE pooled object best_paths_by_node");
|
||||
metrics::OBJECTPOOL_BEST_PATHS_BY_NODE_REUSES.inc();
|
||||
|
||||
obj.iter_mut().for_each(|path| {
|
||||
assert_eq!(path.len(), self.n_paths);
|
||||
});
|
||||
|
||||
let inf_value = match &self.swap_mode {
|
||||
SwapMode::ExactIn => f64::NEG_INFINITY,
|
||||
SwapMode::ExactOut => f64::INFINITY,
|
||||
};
|
||||
|
||||
obj.iter_mut().flatten().for_each(|(ref mut top, edges)| {
|
||||
*top = NotNan::new(inf_value).unwrap();
|
||||
edges.clear();
|
||||
});
|
||||
}
|
||||
|
||||
/// 72MB for out_edges_per_node=393709, n_paths=5
|
||||
#[inline]
|
||||
fn allocate(&self) -> MintVec<Vec<(NotNan<f64>, Vec<EdgeWithNodes>)>> {
|
||||
trace!("ALLOC vecvecpathedge object best_paths_by_node");
|
||||
metrics::OBJECTPOOL_BEST_PATHS_BY_NODE_NEW_ALLOCATIONS.inc();
|
||||
|
||||
let inf_value = match &self.swap_mode {
|
||||
SwapMode::ExactIn => f64::NEG_INFINITY,
|
||||
SwapMode::ExactOut => f64::INFINITY,
|
||||
};
|
||||
|
||||
MintVec::new_from_prototype(
|
||||
self.out_edges_per_node,
|
||||
vec![(NotNan::new(inf_value).unwrap(), vec![]); self.n_paths],
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn alloc_best_by_node_for_test(out_edges_per_node: usize) -> Vec<BestVec3> {
|
||||
vec![[0f64; 3]; 8 * out_edges_per_node]
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn alloc_best_paths_by_node_for_test(
|
||||
out_edges_per_node: usize,
|
||||
n_paths: usize,
|
||||
swap_mode: SwapMode,
|
||||
) -> MintVec<Vec<(NotNan<f64>, Vec<EdgeWithNodes>)>> {
|
||||
let inf_value = match &swap_mode {
|
||||
SwapMode::ExactIn => f64::NEG_INFINITY,
|
||||
SwapMode::ExactOut => f64::INFINITY,
|
||||
};
|
||||
|
||||
MintVec::new_from_prototype(
|
||||
out_edges_per_node,
|
||||
vec![(NotNan::new(inf_value).unwrap(), vec![]); n_paths],
|
||||
)
|
||||
}
|
|
@ -0,0 +1,186 @@
|
|||
use crate::edge::Edge;
|
||||
use mango_feeds_connector::chain_data::AccountData;
|
||||
use solana_program::pubkey::Pubkey;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::{Debug, Display};
|
||||
use std::ops::{Deref, DerefMut, Index, IndexMut};
|
||||
use std::sync::Arc;
|
||||
use std::vec::IntoIter;
|
||||
use tracing::log::trace;
|
||||
|
||||
/// Types
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RouteStep {
|
||||
pub edge: Arc<Edge>,
|
||||
pub in_amount: u64,
|
||||
pub out_amount: u64,
|
||||
pub fee_amount: u64,
|
||||
pub fee_mint: Pubkey,
|
||||
}
|
||||
|
||||
// no clone
|
||||
pub struct Route {
|
||||
pub input_mint: Pubkey,
|
||||
pub output_mint: Pubkey,
|
||||
pub in_amount: u64,
|
||||
pub out_amount: u64,
|
||||
pub price_impact_bps: u64,
|
||||
// TODO: allow for multiple paths
|
||||
pub steps: Vec<RouteStep>,
|
||||
pub slot: u64,
|
||||
pub accounts: Option<HashMap<Pubkey, AccountData>>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct EdgeWithNodes {
|
||||
pub(crate) source_node: MintNodeIndex,
|
||||
pub(crate) target_node: MintNodeIndex,
|
||||
pub(crate) edge: EdgeIndex,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct EdgeInfo {
|
||||
pub(crate) price: f64,
|
||||
pub(crate) accounts: usize,
|
||||
}
|
||||
|
||||
// very special type
|
||||
pub(crate) type BestVec3 = [f64; 3];
|
||||
|
||||
/// Mint index
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct MintNodeIndex {
|
||||
idx: u32,
|
||||
}
|
||||
|
||||
impl MintNodeIndex {
|
||||
// keep private
|
||||
fn idx(&self) -> usize {
|
||||
self.idx as usize
|
||||
}
|
||||
pub fn idx_raw(&self) -> u32 {
|
||||
self.idx
|
||||
}
|
||||
}
|
||||
|
||||
impl From<usize> for MintNodeIndex {
|
||||
fn from(idx: usize) -> Self {
|
||||
MintNodeIndex { idx: idx as u32 }
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for MintNodeIndex {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}/mi", self.idx)
|
||||
}
|
||||
}
|
||||
|
||||
/// Mint Vec
|
||||
|
||||
// indexed vector of mints
|
||||
// look, ma! no Clone
|
||||
pub struct MintVec<T>
|
||||
where
|
||||
T: Clone,
|
||||
{
|
||||
// Vec's little brother which cannot grow
|
||||
array: Box<[T]>,
|
||||
}
|
||||
|
||||
impl<T: Clone> From<Vec<T>> for MintVec<T> {
|
||||
fn from(initial: Vec<T>) -> Self {
|
||||
MintVec {
|
||||
array: initial.into_boxed_slice(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone> MintVec<T> {
|
||||
// clone each element from the prototype
|
||||
pub fn new_from_prototype(size: usize, prototype: T) -> Self {
|
||||
trace!("init MintVec of size {}", size);
|
||||
MintVec {
|
||||
array: vec![prototype.clone(); size].into_boxed_slice(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_from_constructor(size: usize, constructor: impl Fn() -> T) -> Self {
|
||||
trace!("init MintVec of size {}", size);
|
||||
MintVec {
|
||||
array: vec![constructor(); size].into_boxed_slice(),
|
||||
}
|
||||
}
|
||||
|
||||
// copy from another MintVec without memory allocation
|
||||
pub fn try_clone_from(&mut self, other: &Self) -> bool {
|
||||
if self.array.len() != other.array.len() {
|
||||
return false;
|
||||
}
|
||||
self.array.clone_from_slice(&other.array);
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone> Index<MintNodeIndex> for MintVec<T> {
|
||||
type Output = T;
|
||||
|
||||
fn index(&self, index: MintNodeIndex) -> &Self::Output {
|
||||
&self.array[index.idx()]
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone> IndexMut<MintNodeIndex> for MintVec<T> {
|
||||
fn index_mut(&mut self, index: MintNodeIndex) -> &mut Self::Output {
|
||||
&mut self.array[index.idx()]
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone> Deref for MintVec<T> {
|
||||
type Target = [T];
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.array
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone> DerefMut for MintVec<T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.array
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone> IntoIterator for MintVec<T> {
|
||||
type Item = T;
|
||||
type IntoIter = IntoIter<T>;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
self.array.into_vec().into_iter()
|
||||
}
|
||||
}
|
||||
|
||||
/// Edge index
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct EdgeIndex {
|
||||
idx: u32,
|
||||
}
|
||||
|
||||
impl EdgeIndex {
|
||||
pub fn idx(&self) -> usize {
|
||||
self.idx as usize
|
||||
}
|
||||
}
|
||||
|
||||
impl From<usize> for EdgeIndex {
|
||||
fn from(idx: usize) -> Self {
|
||||
EdgeIndex { idx: idx as u32 }
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for EdgeIndex {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}/ei", self.idx)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
use async_trait::async_trait;
|
||||
use solana_client::nonblocking::rpc_client::RpcClient;
|
||||
use solana_program::address_lookup_table::state::AddressLookupTable;
|
||||
use solana_program::address_lookup_table::AddressLookupTableAccount;
|
||||
use solana_program::pubkey::Pubkey;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::RwLock;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
#[async_trait]
|
||||
pub trait AltProvider {
|
||||
async fn get_alt(&self, address: Pubkey) -> anyhow::Result<AddressLookupTableAccount>;
|
||||
}
|
||||
|
||||
pub struct RpcAltProvider {
|
||||
pub rpc_client: RpcClient,
|
||||
pub cache: RwLock<HashMap<Pubkey, (Instant, Option<AddressLookupTableAccount>)>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl AltProvider for RpcAltProvider {
|
||||
async fn get_alt(&self, address: Pubkey) -> anyhow::Result<AddressLookupTableAccount> {
|
||||
{
|
||||
let locked = self.cache.read().unwrap();
|
||||
if let Some((update, hash)) = locked.get(&address) {
|
||||
if Instant::now().duration_since(*update) < Duration::from_secs(60 * 5) {
|
||||
if let Some(acc) = hash.clone() {
|
||||
return Ok(acc);
|
||||
} else {
|
||||
anyhow::bail!("address not found");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let Ok(alt_data) = self.rpc_client.get_account(&address).await else {
|
||||
let mut locked = self.cache.write().unwrap();
|
||||
locked.insert(address, (Instant::now(), None));
|
||||
anyhow::bail!("failed to load ALT");
|
||||
};
|
||||
|
||||
let account = AddressLookupTableAccount {
|
||||
key: address,
|
||||
addresses: AddressLookupTable::deserialize(alt_data.data.as_slice())
|
||||
.unwrap()
|
||||
.addresses
|
||||
.to_vec(),
|
||||
};
|
||||
let mut locked = self.cache.write().unwrap();
|
||||
locked.insert(address, (Instant::now(), Some(account.clone())));
|
||||
Ok(account)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
use axum::http::StatusCode;
|
||||
use axum::response::{IntoResponse, Response};
|
||||
use serde_derive::Serialize;
|
||||
|
||||
// see https://github.com/tokio-rs/axum/blob/main/examples/error-handling/src/main.rs
|
||||
// and https://github.com/tokio-rs/axum/blob/main/examples/anyhow-error-response/src/main.rs
|
||||
|
||||
pub enum AppError {
|
||||
Anyhow(anyhow::Error),
|
||||
}
|
||||
|
||||
struct AppJson<T>(T);
|
||||
|
||||
impl<T> IntoResponse for AppJson<T>
|
||||
where
|
||||
axum::Json<T>: IntoResponse,
|
||||
{
|
||||
fn into_response(self) -> Response {
|
||||
axum::Json(self.0).into_response()
|
||||
}
|
||||
}
|
||||
|
||||
// Tell axum how to convert `AppError` into a response.
|
||||
impl IntoResponse for AppError {
|
||||
fn into_response(self) -> Response {
|
||||
#[derive(Serialize)]
|
||||
struct ErrorResponse {
|
||||
message: String,
|
||||
}
|
||||
|
||||
let anyhow_message = match self {
|
||||
AppError::Anyhow(err) => err.to_string(),
|
||||
};
|
||||
|
||||
(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
AppJson(ErrorResponse {
|
||||
message: anyhow_message,
|
||||
}),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
}
|
||||
|
||||
// This enables using `?` on functions that return `Result<_, anyhow::Error>` to turn them into
|
||||
// `Result<_, AppError>`. That way you don't need to do that manually.
|
||||
impl<E> From<E> for AppError
|
||||
where
|
||||
E: Into<anyhow::Error>,
|
||||
{
|
||||
fn from(err: E) -> Self {
|
||||
AppError::Anyhow(err.into())
|
||||
}
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
use async_trait::async_trait;
|
||||
use solana_client::nonblocking::rpc_client::RpcClient;
|
||||
use solana_program::hash::Hash;
|
||||
use std::sync::RwLock;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
#[async_trait]
|
||||
pub trait HashProvider {
|
||||
async fn get_latest_hash(&self) -> anyhow::Result<Hash>;
|
||||
}
|
||||
|
||||
pub struct RpcHashProvider {
|
||||
pub rpc_client: RpcClient,
|
||||
pub last_update: RwLock<Option<(Instant, Hash)>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl HashProvider for RpcHashProvider {
|
||||
async fn get_latest_hash(&self) -> anyhow::Result<Hash> {
|
||||
{
|
||||
let locked = self.last_update.read().unwrap();
|
||||
if let Some((update, hash)) = *locked {
|
||||
if Instant::now().duration_since(update) < Duration::from_millis(500) {
|
||||
return Ok(hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let hash = self.rpc_client.get_latest_blockhash().await?;
|
||||
let mut locked = self.last_update.write().unwrap();
|
||||
*locked = Some((Instant::now(), hash));
|
||||
Ok(hash)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,639 @@
|
|||
use crate::prelude::*;
|
||||
use crate::server::errors::*;
|
||||
use crate::server::route_provider::RouteProvider;
|
||||
use axum::extract::Query;
|
||||
use axum::response::Html;
|
||||
use axum::{extract::Form, http::header::HeaderMap, routing, Json, Router};
|
||||
use router_lib::model::quote_request::QuoteRequest;
|
||||
use router_lib::model::quote_response::{QuoteAccount, QuoteResponse};
|
||||
use router_lib::model::swap_request::{SwapForm, SwapRequest};
|
||||
use router_lib::model::swap_response::{InstructionResponse, SwapIxResponse, SwapResponse};
|
||||
use serde_json::Value;
|
||||
use solana_program::address_lookup_table::AddressLookupTableAccount;
|
||||
use solana_program::message::VersionedMessage;
|
||||
use solana_sdk::account::ReadableAccount;
|
||||
use solana_sdk::compute_budget::ComputeBudgetInstruction;
|
||||
use solana_sdk::signature::NullSigner;
|
||||
use solana_sdk::transaction::VersionedTransaction;
|
||||
use std::time::Instant;
|
||||
use tokio::task::JoinHandle;
|
||||
use tower_http::cors::{AllowHeaders, AllowMethods, Any, CorsLayer};
|
||||
|
||||
use crate::alt::alt_optimizer;
|
||||
use crate::ix_builder::SwapInstructionsBuilder;
|
||||
use crate::routing_types::Route;
|
||||
use crate::server::alt_provider::AltProvider;
|
||||
use crate::server::hash_provider::HashProvider;
|
||||
use crate::{debug_tools, metrics};
|
||||
use router_config_lib::Config;
|
||||
use router_lib::dex::{AccountProvider, AccountProviderView, SwapMode};
|
||||
use router_lib::model::quote_response::{RoutePlan, SwapInfo};
|
||||
|
||||
// make sure the transaction can be executed
|
||||
const MAX_ACCOUNTS_PER_TX: usize = 64;
|
||||
const MAX_TX_SIZE: usize = 1232;
|
||||
|
||||
pub struct HttpServer {
|
||||
pub join_handle: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl HttpServer {
|
||||
pub async fn start<
|
||||
TRouteProvider: RouteProvider + Send + Sync + 'static,
|
||||
THashProvider: HashProvider + Send + Sync + 'static,
|
||||
TAltProvider: AltProvider + Send + Sync + 'static,
|
||||
TAccountProvider: AccountProvider + Send + Sync + 'static,
|
||||
TIxBuilder: SwapInstructionsBuilder + Send + Sync + 'static,
|
||||
>(
|
||||
route_provider: Arc<TRouteProvider>,
|
||||
hash_provider: Arc<THashProvider>,
|
||||
alt_provider: Arc<TAltProvider>,
|
||||
live_account_provider: Arc<TAccountProvider>,
|
||||
ix_builder: Arc<TIxBuilder>,
|
||||
config: Config,
|
||||
exit: tokio::sync::broadcast::Receiver<()>,
|
||||
) -> anyhow::Result<HttpServer> {
|
||||
let join_handle = HttpServer::new_server(
|
||||
route_provider,
|
||||
hash_provider,
|
||||
alt_provider,
|
||||
live_account_provider,
|
||||
ix_builder,
|
||||
config,
|
||||
exit,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(HttpServer { join_handle })
|
||||
}
|
||||
}
|
||||
|
||||
impl HttpServer {
|
||||
async fn new_server<
|
||||
TRouteProvider: RouteProvider + Send + Sync + 'static,
|
||||
THashProvider: HashProvider + Send + Sync + 'static,
|
||||
TAltProvider: AltProvider + Send + Sync + 'static,
|
||||
TAccountProvider: AccountProvider + Send + Sync + 'static,
|
||||
TIxBuilder: SwapInstructionsBuilder + Send + Sync + 'static,
|
||||
>(
|
||||
route_provider: Arc<TRouteProvider>,
|
||||
hash_provider: Arc<THashProvider>,
|
||||
alt_provider: Arc<TAltProvider>,
|
||||
live_account_provider: Arc<TAccountProvider>,
|
||||
ix_builder: Arc<TIxBuilder>,
|
||||
config: Config,
|
||||
exit: tokio::sync::broadcast::Receiver<()>,
|
||||
) -> anyhow::Result<JoinHandle<()>> {
|
||||
let addr = &config.server.address;
|
||||
let alt = config.routing.lookup_tables.clone();
|
||||
let should_reprice = config
|
||||
.debug_config
|
||||
.as_ref()
|
||||
.map(|x| x.reprice_using_live_rpc)
|
||||
.unwrap_or(false);
|
||||
let reprice_frequency = if should_reprice {
|
||||
config
|
||||
.debug_config
|
||||
.as_ref()
|
||||
.map(|x| x.reprice_probability)
|
||||
.unwrap_or(1.0)
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
let app = Self::setup_router(
|
||||
alt,
|
||||
route_provider,
|
||||
hash_provider,
|
||||
alt_provider,
|
||||
live_account_provider,
|
||||
ix_builder,
|
||||
reprice_frequency,
|
||||
)?;
|
||||
let listener = tokio::net::TcpListener::bind(addr).await?;
|
||||
let handle = axum::serve(listener, app).with_graceful_shutdown(Self::shutdown_signal(exit));
|
||||
|
||||
info!("HTTP Server started at {}", addr);
|
||||
|
||||
let join_handle = tokio::spawn(async move {
|
||||
handle.await.expect("HTTP Server failed");
|
||||
});
|
||||
|
||||
Ok(join_handle)
|
||||
}
|
||||
|
||||
async fn shutdown_signal(mut exit: tokio::sync::broadcast::Receiver<()>) {
|
||||
exit.recv()
|
||||
.await
|
||||
.expect("listening to exit broadcast failed");
|
||||
warn!("shutting down http server...");
|
||||
}
|
||||
|
||||
async fn quote_handler<
|
||||
TRouteProvider: RouteProvider + Send + Sync + 'static,
|
||||
THashProvider: HashProvider + Send + Sync + 'static,
|
||||
TAltProvider: AltProvider + Send + Sync + 'static,
|
||||
TAccountProvider: AccountProvider + Send + Sync + 'static,
|
||||
TIxBuilder: SwapInstructionsBuilder + Send + Sync + 'static,
|
||||
>(
|
||||
address_lookup_table_addresses: Vec<String>,
|
||||
route_provider: Arc<TRouteProvider>,
|
||||
hash_provider: Arc<THashProvider>,
|
||||
alt_provider: Arc<TAltProvider>,
|
||||
live_account_provider: Arc<TAccountProvider>,
|
||||
ix_builder: Arc<TIxBuilder>,
|
||||
reprice_probability: f64,
|
||||
Form(input): Form<QuoteRequest>,
|
||||
) -> Result<Json<Value>, AppError> {
|
||||
let started_at = Instant::now();
|
||||
let input_mint = Pubkey::from_str(&input.input_mint)?;
|
||||
let output_mint = Pubkey::from_str(&input.output_mint)?;
|
||||
let swap_mode = input.swap_mode.or(input.mode).unwrap_or_default();
|
||||
let mut max_accounts = input.max_accounts.unwrap_or(64) as usize;
|
||||
|
||||
let route = loop {
|
||||
let route_candidate = route_provider.best_quote(
|
||||
input_mint,
|
||||
output_mint,
|
||||
input.amount,
|
||||
max_accounts,
|
||||
swap_mode,
|
||||
)?;
|
||||
|
||||
let (bytes, accounts_count) = Self::build_swap_tx(
|
||||
address_lookup_table_addresses.clone(),
|
||||
hash_provider.clone(),
|
||||
alt_provider.clone(),
|
||||
ix_builder.clone(),
|
||||
&route_candidate,
|
||||
Pubkey::new_unique().to_string(),
|
||||
true,
|
||||
true,
|
||||
0,
|
||||
"0".to_string(),
|
||||
swap_mode,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let tx_size = bytes.len();
|
||||
if accounts_count <= MAX_ACCOUNTS_PER_TX && tx_size < MAX_TX_SIZE {
|
||||
break Ok(route_candidate);
|
||||
} else if max_accounts >= 10 {
|
||||
warn!("TX too big ({tx_size} bytes, {accounts_count} accounts), retrying with fewer accounts; max_accounts was {max_accounts}..");
|
||||
max_accounts -= 5;
|
||||
} else {
|
||||
break Err(anyhow::format_err!(
|
||||
"TX too big ({tx_size} bytes, {accounts_count} accounts), aborting"
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let route: Route = route?;
|
||||
|
||||
Self::log_repriced_amount(live_account_provider, reprice_probability, &route);
|
||||
|
||||
let other_amount_threshold = if swap_mode == SwapMode::ExactOut {
|
||||
(route.in_amount as f64 * (10_000f64 + input.slippage_bps as f64) / 10_000f64).floor()
|
||||
as u64
|
||||
} else {
|
||||
((route.out_amount as f64 * (10_000f64 - input.slippage_bps as f64)) / 10_000f64)
|
||||
.floor() as u64
|
||||
};
|
||||
|
||||
let route_plan = route
|
||||
.steps
|
||||
.iter()
|
||||
.map(|step| RoutePlan {
|
||||
percent: 100,
|
||||
swap_info: Some(SwapInfo {
|
||||
amm_key: step.edge.key().to_string(),
|
||||
label: Some(step.edge.dex.name().to_string()),
|
||||
input_mint: step.edge.input_mint.to_string(),
|
||||
output_mint: step.edge.output_mint.to_string(),
|
||||
in_amount: step.in_amount.to_string(),
|
||||
out_amount: step.out_amount.to_string(),
|
||||
fee_amount: step.fee_amount.to_string(),
|
||||
fee_mint: step.fee_mint.to_string(),
|
||||
}),
|
||||
})
|
||||
.collect_vec();
|
||||
|
||||
let accounts = match route.accounts {
|
||||
None => None,
|
||||
Some(a) => Some(
|
||||
a.iter()
|
||||
.map(|x| QuoteAccount {
|
||||
address: x.0.to_string(),
|
||||
slot: x.1.slot,
|
||||
data: x.1.account.data().iter().copied().collect::<Vec<u8>>(),
|
||||
})
|
||||
.collect(),
|
||||
),
|
||||
};
|
||||
|
||||
let context_slot = route.slot;
|
||||
let json_response = serde_json::json!(QuoteResponse {
|
||||
input_mint: input_mint.to_string(),
|
||||
in_amount: Some(route.in_amount.to_string()),
|
||||
output_mint: output_mint.to_string(),
|
||||
out_amount: route.out_amount.to_string(),
|
||||
other_amount_threshold: other_amount_threshold.to_string(),
|
||||
swap_mode: swap_mode.to_string(),
|
||||
slippage_bps: input.slippage_bps as i32,
|
||||
platform_fee: None, // TODO
|
||||
price_impact_pct: (route.price_impact_bps as f64 / 100.0).to_string(),
|
||||
route_plan,
|
||||
accounts,
|
||||
context_slot,
|
||||
time_taken: started_at.elapsed().as_secs_f64(),
|
||||
});
|
||||
|
||||
Ok(Json(json_response))
|
||||
}
|
||||
|
||||
async fn swap_handler<
|
||||
TRouteProvider: RouteProvider + Send + Sync + 'static,
|
||||
THashProvider: HashProvider + Send + Sync + 'static,
|
||||
TAltProvider: AltProvider + Send + Sync + 'static,
|
||||
TAccountProvider: AccountProvider + Send + Sync + 'static,
|
||||
TIxBuilder: SwapInstructionsBuilder + Send + Sync + 'static,
|
||||
>(
|
||||
address_lookup_table_addresses: Vec<String>,
|
||||
route_provider: Arc<TRouteProvider>,
|
||||
hash_provider: Arc<THashProvider>,
|
||||
alt_provider: Arc<TAltProvider>,
|
||||
live_account_provider: Arc<TAccountProvider>,
|
||||
ix_builder: Arc<TIxBuilder>,
|
||||
reprice_probability: f64,
|
||||
Query(_query): Query<SwapForm>,
|
||||
Json(input): Json<SwapRequest>,
|
||||
) -> Result<Json<Value>, AppError> {
|
||||
let route = route_provider.try_from(&input.quote_response)?;
|
||||
|
||||
Self::log_repriced_amount(live_account_provider, reprice_probability, &route);
|
||||
|
||||
let swap_mode: SwapMode = SwapMode::from_str(&input.quote_response.swap_mode)
|
||||
.map_err(|_| anyhow::Error::msg("Invalid SwapMode"))?;
|
||||
|
||||
let (bytes, _) = Self::build_swap_tx(
|
||||
address_lookup_table_addresses,
|
||||
hash_provider,
|
||||
alt_provider,
|
||||
ix_builder,
|
||||
&route,
|
||||
input.user_public_key,
|
||||
input.wrap_and_unwrap_sol,
|
||||
input.auto_create_out_ata,
|
||||
input.quote_response.slippage_bps,
|
||||
input.quote_response.other_amount_threshold,
|
||||
swap_mode,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let json_response = serde_json::json!(SwapResponse {
|
||||
swap_transaction: bytes,
|
||||
last_valid_block_height: input.quote_response.context_slot,
|
||||
priorization_fee_lamports: 100_000,
|
||||
});
|
||||
|
||||
Ok(Json(json_response))
|
||||
}
|
||||
|
||||
fn log_repriced_amount<TAccountProvider: AccountProvider + Send + Sync + 'static>(
|
||||
live_account_provider: Arc<TAccountProvider>,
|
||||
reprice_probability: f64,
|
||||
route: &Route,
|
||||
) {
|
||||
let should_reprice = rand::random::<f64>() < reprice_probability;
|
||||
if !should_reprice {
|
||||
return;
|
||||
}
|
||||
|
||||
let repriced_out_amount = reprice(&route, live_account_provider);
|
||||
match repriced_out_amount {
|
||||
Ok(repriced_out) => {
|
||||
let diff = ((repriced_out as f64 / route.out_amount as f64) - 1.0) * 10000.0;
|
||||
let pair = format!(
|
||||
"{}-{}",
|
||||
debug_tools::name(&route.input_mint),
|
||||
debug_tools::name(&route.output_mint)
|
||||
);
|
||||
metrics::REPRICING_DIFF_BPS
|
||||
.with_label_values(&[&pair])
|
||||
.set(diff);
|
||||
|
||||
info!(
|
||||
"Router quote: {}, Rpc quote: {}, Diff: {:.1}bps",
|
||||
route.out_amount, repriced_out, diff
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Repricing failed: {:?}", e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn build_swap_tx<
|
||||
THashProvider: HashProvider + Send + Sync + 'static,
|
||||
TAltProvider: AltProvider + Send + Sync + 'static,
|
||||
TIxBuilder: SwapInstructionsBuilder + Send + Sync + 'static,
|
||||
>(
|
||||
address_lookup_table_addresses: Vec<String>,
|
||||
hash_provider: Arc<THashProvider>,
|
||||
alt_provider: Arc<TAltProvider>,
|
||||
ix_builder: Arc<TIxBuilder>,
|
||||
route_plan: &Route,
|
||||
wallet_pk: String,
|
||||
wrap_unwrap_sol: bool,
|
||||
auto_create_out_ata: bool,
|
||||
slippage_bps: i32,
|
||||
other_amount_threshold: String,
|
||||
swap_mode: SwapMode,
|
||||
) -> Result<(Vec<u8>, usize), AppError> {
|
||||
let wallet_pk = Pubkey::from_str(&wallet_pk)?;
|
||||
|
||||
let ixs = ix_builder.build_ixs(
|
||||
&wallet_pk,
|
||||
route_plan,
|
||||
wrap_unwrap_sol,
|
||||
auto_create_out_ata,
|
||||
slippage_bps,
|
||||
other_amount_threshold.parse()?,
|
||||
swap_mode,
|
||||
)?;
|
||||
|
||||
let compute_budget_ixs = vec![
|
||||
ComputeBudgetInstruction::set_compute_unit_price(10_000), // ~0.01 lamport / CU
|
||||
ComputeBudgetInstruction::set_compute_unit_limit(ixs.cu_estimate),
|
||||
];
|
||||
|
||||
let transaction_addresses = ixs.accounts().into_iter().collect();
|
||||
let instructions = ixs
|
||||
.setup_instructions
|
||||
.into_iter()
|
||||
.chain(compute_budget_ixs.into_iter())
|
||||
.chain(vec![ixs.swap_instruction].into_iter())
|
||||
.chain(ixs.cleanup_instructions.into_iter())
|
||||
.collect_vec();
|
||||
|
||||
let all_alts = Self::load_all_alts(address_lookup_table_addresses, alt_provider).await;
|
||||
let alts = alt_optimizer::get_best_alt(&all_alts, &transaction_addresses)?;
|
||||
let accounts = transaction_addresses.iter().unique().count()
|
||||
+ alts.iter().map(|x| x.key).unique().count();
|
||||
|
||||
let v0_message = solana_sdk::message::v0::Message::try_compile(
|
||||
&wallet_pk,
|
||||
instructions.as_slice(),
|
||||
alts.as_slice(),
|
||||
hash_provider.get_latest_hash().await?,
|
||||
)?;
|
||||
|
||||
let message = VersionedMessage::V0(v0_message);
|
||||
let tx = VersionedTransaction::try_new(message, &[&NullSigner::new(&wallet_pk)])?;
|
||||
let bytes = bincode::serialize(&tx)?;
|
||||
|
||||
Ok((bytes, accounts))
|
||||
}
|
||||
|
||||
async fn swap_ix_handler<
|
||||
TRouteProvider: RouteProvider + Send + Sync + 'static,
|
||||
TAltProvider: AltProvider + Send + Sync + 'static,
|
||||
TIxBuilder: SwapInstructionsBuilder + Send + Sync + 'static,
|
||||
>(
|
||||
address_lookup_table_addresses: Vec<String>,
|
||||
route_provider: Arc<TRouteProvider>,
|
||||
alt_provider: Arc<TAltProvider>,
|
||||
ix_builder: Arc<TIxBuilder>,
|
||||
Query(_query): Query<SwapForm>,
|
||||
Json(input): Json<SwapRequest>,
|
||||
) -> Result<Json<Value>, AppError> {
|
||||
let wallet_pk = Pubkey::from_str(&input.user_public_key)?;
|
||||
|
||||
let route_plan = route_provider.try_from(&input.quote_response)?;
|
||||
let swap_mode: SwapMode = SwapMode::from_str(&input.quote_response.swap_mode)
|
||||
.map_err(|_| anyhow::Error::msg("Invalid SwapMode"))?;
|
||||
|
||||
let ixs = ix_builder.build_ixs(
|
||||
&wallet_pk,
|
||||
&route_plan,
|
||||
input.wrap_and_unwrap_sol,
|
||||
input.auto_create_out_ata,
|
||||
input.quote_response.slippage_bps,
|
||||
input.quote_response.other_amount_threshold.parse()?,
|
||||
swap_mode,
|
||||
)?;
|
||||
|
||||
let transaction_addresses = ixs.accounts().into_iter().collect();
|
||||
let all_alts = Self::load_all_alts(address_lookup_table_addresses, alt_provider).await;
|
||||
let alts = alt_optimizer::get_best_alt(&all_alts, &transaction_addresses)?;
|
||||
|
||||
let swap_ix = InstructionResponse::from_ix(ixs.swap_instruction)?;
|
||||
let setup_ixs: anyhow::Result<Vec<_>> = ixs
|
||||
.setup_instructions
|
||||
.into_iter()
|
||||
.map(|x| InstructionResponse::from_ix(x))
|
||||
.collect();
|
||||
let cleanup_ixs: anyhow::Result<Vec<_>> = ixs
|
||||
.cleanup_instructions
|
||||
.into_iter()
|
||||
.map(|x| InstructionResponse::from_ix(x))
|
||||
.collect();
|
||||
|
||||
let compute_budget_ixs = vec![
|
||||
InstructionResponse::from_ix(ComputeBudgetInstruction::set_compute_unit_price(10_000))?, // ~0.01 lamport / CU
|
||||
InstructionResponse::from_ix(ComputeBudgetInstruction::set_compute_unit_limit(
|
||||
ixs.cu_estimate,
|
||||
))?,
|
||||
];
|
||||
|
||||
let json_response = serde_json::json!(SwapIxResponse {
|
||||
token_ledger_instruction: None,
|
||||
compute_budget_instructions: Some(compute_budget_ixs),
|
||||
setup_instructions: Some(setup_ixs?),
|
||||
swap_instruction: swap_ix,
|
||||
cleanup_instructions: Some(cleanup_ixs?),
|
||||
address_lookup_table_addresses: Some(alts.iter().map(|x| x.key.to_string()).collect()),
|
||||
});
|
||||
|
||||
Ok(Json(json_response))
|
||||
}
|
||||
|
||||
async fn handler() -> Html<&'static str> {
|
||||
Html("マンゴールーター")
|
||||
}
|
||||
|
||||
fn extract_client_key(headers: &HeaderMap) -> &str {
|
||||
if let Some(client_key) = headers.get("x-client-key") {
|
||||
client_key.to_str().unwrap_or("invalid")
|
||||
} else {
|
||||
"unknown"
|
||||
}
|
||||
}
|
||||
|
||||
fn setup_router<
|
||||
TRouteProvider: RouteProvider + Send + Sync + 'static,
|
||||
THashProvider: HashProvider + Send + Sync + 'static,
|
||||
TAltProvider: AltProvider + Send + Sync + 'static,
|
||||
TAccountProvider: AccountProvider + Send + Sync + 'static,
|
||||
TIxBuilder: SwapInstructionsBuilder + Send + Sync + 'static,
|
||||
>(
|
||||
address_lookup_tables: Vec<String>,
|
||||
route_provider: Arc<TRouteProvider>,
|
||||
hash_provider: Arc<THashProvider>,
|
||||
alt_provider: Arc<TAltProvider>,
|
||||
live_account_provider: Arc<TAccountProvider>,
|
||||
ix_builder: Arc<TIxBuilder>,
|
||||
reprice_probability: f64,
|
||||
) -> anyhow::Result<Router<()>> {
|
||||
metrics::HTTP_REQUESTS_FAILED.reset();
|
||||
|
||||
let mut router = Router::new();
|
||||
let cors = CorsLayer::new()
|
||||
.allow_methods(AllowMethods::any())
|
||||
.allow_headers(AllowHeaders::any())
|
||||
.allow_origin(Any);
|
||||
|
||||
router = router.route("/", routing::get(Self::handler));
|
||||
|
||||
let alt = address_lookup_tables.clone();
|
||||
let rp = route_provider.clone();
|
||||
let hp = hash_provider.clone();
|
||||
let altp = alt_provider.clone();
|
||||
let lap = live_account_provider.clone();
|
||||
let ixb = ix_builder.clone();
|
||||
router = router.route(
|
||||
"/quote",
|
||||
routing::get(move |headers, form| async move {
|
||||
let client_key = Self::extract_client_key(&headers);
|
||||
let timer = metrics::HTTP_REQUEST_TIMING
|
||||
.with_label_values(&["quote", client_key])
|
||||
.start_timer();
|
||||
|
||||
let response =
|
||||
Self::quote_handler(alt, rp, hp, altp, lap, ixb, reprice_probability, form)
|
||||
.await;
|
||||
|
||||
match response {
|
||||
Ok(_) => {
|
||||
timer.observe_duration();
|
||||
metrics::HTTP_REQUESTS_TOTAL
|
||||
.with_label_values(&["quote", client_key])
|
||||
.inc();
|
||||
}
|
||||
Err(_) => {
|
||||
metrics::HTTP_REQUESTS_FAILED
|
||||
.with_label_values(&["quote", client_key])
|
||||
.inc();
|
||||
}
|
||||
}
|
||||
response
|
||||
}),
|
||||
);
|
||||
|
||||
let alt = address_lookup_tables.clone();
|
||||
let rp = route_provider.clone();
|
||||
let hp = hash_provider.clone();
|
||||
let altp = alt_provider.clone();
|
||||
let lap = live_account_provider.clone();
|
||||
let ixb = ix_builder.clone();
|
||||
router = router.route(
|
||||
"/swap",
|
||||
routing::post(move |headers, query, form| async move {
|
||||
let client_key = Self::extract_client_key(&headers);
|
||||
let timer = metrics::HTTP_REQUEST_TIMING
|
||||
.with_label_values(&["swap", client_key])
|
||||
.start_timer();
|
||||
|
||||
let response = Self::swap_handler(
|
||||
alt,
|
||||
rp,
|
||||
hp,
|
||||
altp,
|
||||
lap,
|
||||
ixb,
|
||||
reprice_probability,
|
||||
query,
|
||||
form,
|
||||
)
|
||||
.await;
|
||||
|
||||
match response {
|
||||
Ok(_) => {
|
||||
timer.observe_duration();
|
||||
metrics::HTTP_REQUESTS_TOTAL
|
||||
.with_label_values(&["swap", client_key])
|
||||
.inc();
|
||||
}
|
||||
Err(_) => {
|
||||
metrics::HTTP_REQUESTS_FAILED
|
||||
.with_label_values(&["swap", client_key])
|
||||
.inc();
|
||||
}
|
||||
}
|
||||
response
|
||||
}),
|
||||
);
|
||||
|
||||
let alt = address_lookup_tables.clone();
|
||||
let rp = route_provider.clone();
|
||||
let altp = alt_provider.clone();
|
||||
let ixb = ix_builder.clone();
|
||||
router = router.route(
|
||||
"/swap-instructions",
|
||||
routing::post(move |headers, query, form| async move {
|
||||
let client_key = Self::extract_client_key(&headers);
|
||||
let timer = metrics::HTTP_REQUEST_TIMING
|
||||
.with_label_values(&["swap-ix", client_key])
|
||||
.start_timer();
|
||||
|
||||
let response = Self::swap_ix_handler(alt, rp, altp, ixb, query, form).await;
|
||||
|
||||
match response {
|
||||
Ok(_) => {
|
||||
timer.observe_duration();
|
||||
metrics::HTTP_REQUESTS_TOTAL
|
||||
.with_label_values(&["swap-ix", client_key])
|
||||
.inc();
|
||||
}
|
||||
Err(_) => {
|
||||
metrics::HTTP_REQUESTS_FAILED
|
||||
.with_label_values(&["swap-ix", client_key])
|
||||
.inc();
|
||||
}
|
||||
}
|
||||
response
|
||||
}),
|
||||
);
|
||||
|
||||
router = router.layer(cors);
|
||||
Ok(router)
|
||||
}
|
||||
|
||||
async fn load_all_alts<TAltProvider: AltProvider + Send + Sync + 'static>(
|
||||
address_lookup_table_addresses: Vec<String>,
|
||||
alt_provider: Arc<TAltProvider>,
|
||||
) -> Vec<AddressLookupTableAccount> {
|
||||
let mut all_alts = vec![];
|
||||
for alt in address_lookup_table_addresses {
|
||||
match alt_provider.get_alt(Pubkey::from_str(&alt).unwrap()).await {
|
||||
Ok(alt) => all_alts.push(alt),
|
||||
Err(_) => {}
|
||||
}
|
||||
}
|
||||
all_alts
|
||||
}
|
||||
}
|
||||
|
||||
fn reprice<TAccountProvider: AccountProvider + Send + Sync + 'static>(
|
||||
route: &Route,
|
||||
account_provider: Arc<TAccountProvider>,
|
||||
) -> anyhow::Result<u64> {
|
||||
let account_provider = account_provider.clone() as AccountProviderView;
|
||||
let mut amount = route.in_amount;
|
||||
for step in &route.steps {
|
||||
let prepared_quote = step.edge.prepare(&account_provider)?;
|
||||
let quote = step.edge.quote(&prepared_quote, &account_provider, amount);
|
||||
amount = quote?.out_amount;
|
||||
}
|
||||
Ok(amount)
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
use mango_feeds_connector::chain_data::AccountData;
|
||||
use router_lib::dex::AccountProvider;
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_program::pubkey::Pubkey;
|
||||
use solana_sdk::account::AccountSharedData;
|
||||
use solana_sdk::commitment_config::CommitmentConfig;
|
||||
|
||||
pub struct LiveAccountProvider {
|
||||
pub rpc_client: RpcClient,
|
||||
}
|
||||
|
||||
impl AccountProvider for LiveAccountProvider {
|
||||
fn account(&self, address: &Pubkey) -> anyhow::Result<AccountData> {
|
||||
let response = self
|
||||
.rpc_client
|
||||
.get_account_with_commitment(address, CommitmentConfig::processed())?;
|
||||
let account = response
|
||||
.value
|
||||
.ok_or(anyhow::format_err!("failed to retrieve account"))?;
|
||||
|
||||
Ok(AccountData {
|
||||
slot: response.context.slot,
|
||||
write_version: 0,
|
||||
account: AccountSharedData::from(account),
|
||||
})
|
||||
}
|
||||
|
||||
fn newest_processed_slot(&self) -> u64 {
|
||||
panic!("not implemented")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
pub mod alt_provider;
|
||||
mod errors;
|
||||
pub mod hash_provider;
|
||||
pub mod http_server;
|
||||
pub mod live_account_provider;
|
||||
pub mod route_provider;
|
|
@ -0,0 +1,207 @@
|
|||
use solana_program::pubkey::Pubkey;
|
||||
|
||||
use router_lib::model::quote_response::QuoteResponse;
|
||||
|
||||
use crate::debug_tools;
|
||||
use crate::hot_mints::HotMintsCache;
|
||||
use crate::prelude::*;
|
||||
use crate::routing::Routing;
|
||||
use crate::routing_types::{Route, RouteStep};
|
||||
use crate::token_cache::TokenCache;
|
||||
use router_config_lib::SafetyCheckConfig;
|
||||
use router_lib::dex::{AccountProviderView, SwapMode};
|
||||
use router_lib::price_feeds::price_cache::PriceCache;
|
||||
|
||||
pub trait RouteProvider {
|
||||
fn prepare_pruned_edges_and_cleanup_cache(
|
||||
&self,
|
||||
hot_mints: &HashSet<Pubkey>,
|
||||
swap_mode: SwapMode,
|
||||
);
|
||||
|
||||
fn prepare_cache_for_input_mint<F>(
|
||||
&self,
|
||||
from_mint: Pubkey,
|
||||
amount_native: u64,
|
||||
max_accounts: usize,
|
||||
filter: F,
|
||||
) -> anyhow::Result<()>
|
||||
where
|
||||
F: Fn(&Pubkey, &Pubkey) -> bool;
|
||||
|
||||
fn best_quote(
|
||||
&self,
|
||||
from_mint: Pubkey,
|
||||
to_mint: Pubkey,
|
||||
amount_native: u64,
|
||||
max_accounts: usize,
|
||||
swap_mode: SwapMode,
|
||||
) -> anyhow::Result<Route>;
|
||||
|
||||
fn try_from(&self, quote_response: &QuoteResponse) -> anyhow::Result<Route>;
|
||||
}
|
||||
|
||||
pub struct RoutingRouteProvider {
|
||||
pub chain_data: AccountProviderView,
|
||||
pub routing: Arc<Routing>,
|
||||
pub prices: PriceCache,
|
||||
pub tokens: TokenCache,
|
||||
pub config: SafetyCheckConfig,
|
||||
pub hot_mints: Arc<RwLock<HotMintsCache>>,
|
||||
}
|
||||
|
||||
impl RouteProvider for RoutingRouteProvider {
|
||||
fn prepare_pruned_edges_and_cleanup_cache(
|
||||
&self,
|
||||
hot_mints: &HashSet<Pubkey>,
|
||||
swap_mode: SwapMode,
|
||||
) {
|
||||
self.routing
|
||||
.prepare_pruned_edges_and_cleanup_cache(hot_mints, swap_mode)
|
||||
}
|
||||
|
||||
fn prepare_cache_for_input_mint<F>(
|
||||
&self,
|
||||
from_mint: Pubkey,
|
||||
amount_native: u64,
|
||||
max_accounts: usize,
|
||||
filter: F,
|
||||
) -> anyhow::Result<()>
|
||||
where
|
||||
F: Fn(&Pubkey, &Pubkey) -> bool,
|
||||
{
|
||||
self.routing
|
||||
.prepare_cache_for_input_mint(&from_mint, amount_native, max_accounts, filter)
|
||||
}
|
||||
|
||||
// called per request
|
||||
#[tracing::instrument(skip_all, level = "trace")]
|
||||
fn best_quote(
|
||||
&self,
|
||||
from_mint: Pubkey,
|
||||
to_mint: Pubkey,
|
||||
amount_native: u64,
|
||||
max_accounts: usize,
|
||||
swap_mode: SwapMode,
|
||||
) -> anyhow::Result<Route> {
|
||||
let hot_mints = {
|
||||
let mut hot_mints_guard = self.hot_mints.write().unwrap();
|
||||
hot_mints_guard.add(from_mint);
|
||||
hot_mints_guard.add(to_mint);
|
||||
hot_mints_guard.get()
|
||||
};
|
||||
|
||||
let route = self.routing.find_best_route(
|
||||
&self.chain_data,
|
||||
&from_mint,
|
||||
&to_mint,
|
||||
amount_native,
|
||||
max_accounts,
|
||||
false,
|
||||
&hot_mints,
|
||||
None,
|
||||
swap_mode,
|
||||
)?;
|
||||
|
||||
if !self.config.check_quote_out_amount_deviation {
|
||||
return Ok(route);
|
||||
}
|
||||
|
||||
let in_token = self.tokens.token(from_mint)?;
|
||||
let out_token = self.tokens.token(to_mint)?;
|
||||
let in_multiplier = 10u64.pow(in_token.decimals as u32) as f64;
|
||||
let out_multiplier = 10u64.pow(out_token.decimals as u32) as f64;
|
||||
|
||||
let in_price_ui = self.prices.price_ui(from_mint);
|
||||
let out_price_ui = self.prices.price_ui(to_mint);
|
||||
|
||||
if in_price_ui.is_none() || out_price_ui.is_none() {
|
||||
error!("Refusing to quote - missing $ price, can't add safety check");
|
||||
anyhow::bail!("Refusing to quote - missing $ price, can't add safety check");
|
||||
}
|
||||
|
||||
let out_amount_native = route.out_amount;
|
||||
|
||||
let in_amount_usd = in_price_ui.unwrap_or(0.0) * amount_native as f64 / in_multiplier;
|
||||
let out_amount_usd =
|
||||
out_price_ui.unwrap_or(0.0) * out_amount_native as f64 / out_multiplier;
|
||||
|
||||
if out_amount_usd < self.config.min_quote_out_to_in_amount_ratio * in_amount_usd {
|
||||
error!(
|
||||
from = debug_tools::name(&from_mint),
|
||||
to = debug_tools::name(&to_mint),
|
||||
in_amount_usd,
|
||||
out_amount_usd,
|
||||
amount_native,
|
||||
out_amount_native,
|
||||
in_price_ui,
|
||||
out_price_ui,
|
||||
route = route.steps.iter().map(|x| x.edge.desc()).join(" -> "),
|
||||
"Very bad route - refusing it",
|
||||
);
|
||||
anyhow::bail!(
|
||||
"Very bad route - refusing it: in_amount={}$, out_amount={}$ ({} of {} => {} of {})\r\n{}\r\nin_price={:?}, out_price={:?}",
|
||||
in_amount_usd,
|
||||
out_amount_usd,
|
||||
amount_native,
|
||||
debug_tools::name(&from_mint),
|
||||
out_amount_native,
|
||||
debug_tools::name(&to_mint),
|
||||
route.steps.iter().map(|x| x.edge.desc()).join(" -> "),
|
||||
in_price_ui,
|
||||
out_price_ui,
|
||||
);
|
||||
}
|
||||
|
||||
info!(
|
||||
from = debug_tools::name(&from_mint),
|
||||
to = debug_tools::name(&to_mint),
|
||||
in_amount_usd,
|
||||
out_amount_usd,
|
||||
"Good route",
|
||||
);
|
||||
|
||||
Ok(route)
|
||||
}
|
||||
|
||||
fn try_from(&self, quote_response: &QuoteResponse) -> anyhow::Result<Route> {
|
||||
let input_mint = Pubkey::from_str("e_response.input_mint)?;
|
||||
let output_mint = Pubkey::from_str("e_response.output_mint)?;
|
||||
let in_amount = quote_response.in_amount.clone().unwrap().parse()?; // TODO Remove opt ? Handle exact out ?
|
||||
let out_amount = quote_response.out_amount.parse()?;
|
||||
let price_impact_pct: f64 = quote_response.price_impact_pct.parse()?;
|
||||
let price_impact_bps = (price_impact_pct * 100.0).round() as u64;
|
||||
let slot = quote_response.context_slot;
|
||||
|
||||
let steps: anyhow::Result<Vec<_>> = quote_response
|
||||
.route_plan
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|x| -> anyhow::Result<RouteStep> {
|
||||
let step = x.swap_info.unwrap(); // TODO
|
||||
Ok(RouteStep {
|
||||
edge: self.routing.find_edge(
|
||||
step.input_mint.parse()?,
|
||||
step.output_mint.parse()?,
|
||||
step.amm_key.parse()?,
|
||||
)?,
|
||||
in_amount: step.in_amount.parse()?,
|
||||
out_amount: step.out_amount.parse()?,
|
||||
fee_amount: step.fee_amount.parse()?,
|
||||
fee_mint: step.fee_mint.parse()?,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(Route {
|
||||
input_mint,
|
||||
output_mint,
|
||||
in_amount,
|
||||
out_amount,
|
||||
price_impact_bps,
|
||||
slot,
|
||||
steps: steps?,
|
||||
accounts: None, // TODO FAS
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
use router_config_lib::{string_or_env, AccountDataSourceConfig};
|
||||
use solana_client::nonblocking::rpc_client::RpcClient;
|
||||
use solana_sdk::commitment_config::CommitmentConfig;
|
||||
use std::time;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::sync::broadcast::Sender;
|
||||
use tokio::task::JoinHandle;
|
||||
|
||||
pub fn spawn_slot_watcher_job(config: &AccountDataSourceConfig) -> (JoinHandle<()>, Sender<u64>) {
|
||||
let (rpc_slot_sender, _) = broadcast::channel::<u64>(2048);
|
||||
let sender = rpc_slot_sender.clone();
|
||||
|
||||
let processed_rpc = RpcClient::new_with_timeouts_and_commitment(
|
||||
string_or_env(config.rpc_http_url.clone()),
|
||||
time::Duration::from_secs(60), // request timeout
|
||||
CommitmentConfig::processed(),
|
||||
time::Duration::from_secs(60), // confirmation timeout
|
||||
);
|
||||
let slot_job = tokio::spawn(async move {
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(5));
|
||||
interval.tick().await;
|
||||
loop {
|
||||
interval.tick().await;
|
||||
let slot = processed_rpc.get_slot().await;
|
||||
if let Ok(slot) = slot {
|
||||
// ignore error for now
|
||||
let _err = sender.send(slot);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
(slot_job, rpc_slot_sender)
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
use std::collections::HashSet;
|
||||
|
||||
use mango_feeds_connector::SlotUpdate;
|
||||
use solana_program::pubkey::Pubkey;
|
||||
|
||||
use router_config_lib::AccountDataSourceConfig;
|
||||
use router_feed_lib::account_write::AccountOrSnapshotUpdate;
|
||||
use router_feed_lib::get_program_account::FeedMetadata;
|
||||
|
||||
use crate::source::grpc_plugin_source;
|
||||
|
||||
pub async fn spawn_geyser_source(
|
||||
config: &AccountDataSourceConfig,
|
||||
exit_receiver: tokio::sync::broadcast::Receiver<()>,
|
||||
account_write_sender: async_channel::Sender<AccountOrSnapshotUpdate>,
|
||||
metadata_write_sender: async_channel::Sender<FeedMetadata>,
|
||||
slot_sender: async_channel::Sender<SlotUpdate>,
|
||||
subscribed_accounts: &HashSet<Pubkey>,
|
||||
subscribed_programs: &HashSet<Pubkey>,
|
||||
subscribed_token_accounts: &HashSet<Pubkey>,
|
||||
filters: &HashSet<Pubkey>,
|
||||
) {
|
||||
grpc_plugin_source::process_events(
|
||||
config.clone(),
|
||||
subscribed_accounts.clone(),
|
||||
subscribed_programs.clone(),
|
||||
subscribed_token_accounts.clone(),
|
||||
filters.clone(),
|
||||
account_write_sender,
|
||||
Some(metadata_write_sender),
|
||||
slot_sender,
|
||||
exit_receiver,
|
||||
)
|
||||
.await;
|
||||
}
|
|
@ -0,0 +1,787 @@
|
|||
use futures::stream::once;
|
||||
use itertools::Itertools;
|
||||
use jsonrpc_core::futures::StreamExt;
|
||||
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
|
||||
use tokio_stream::StreamMap;
|
||||
use yellowstone_grpc_proto::tonic::{
|
||||
metadata::MetadataValue,
|
||||
transport::{Channel, ClientTlsConfig},
|
||||
Request,
|
||||
};
|
||||
|
||||
use anchor_spl::token::spl_token;
|
||||
use async_channel::{Receiver, Sender};
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use std::{collections::HashMap, env, time::Duration};
|
||||
use tracing::*;
|
||||
|
||||
use yellowstone_grpc_proto::prelude::{
|
||||
geyser_client::GeyserClient, subscribe_update, CommitmentLevel, SubscribeRequest,
|
||||
SubscribeRequestFilterAccounts, SubscribeRequestFilterSlots,
|
||||
};
|
||||
|
||||
use crate::metrics;
|
||||
use mango_feeds_connector::{chain_data::SlotStatus, SlotUpdate};
|
||||
use router_config_lib::{AccountDataSourceConfig, GrpcSourceConfig};
|
||||
use router_feed_lib::account_write::{AccountOrSnapshotUpdate, AccountWrite};
|
||||
use router_feed_lib::get_program_account::{
|
||||
get_snapshot_gma, get_snapshot_gpa, get_snapshot_gta, CustomSnapshotProgramAccounts,
|
||||
FeedMetadata,
|
||||
};
|
||||
use router_feed_lib::utils::make_tls_config;
|
||||
use solana_program::clock::Slot;
|
||||
use tokio::sync::Semaphore;
|
||||
use yellowstone_grpc_proto::geyser::subscribe_request_filter_accounts_filter::Filter;
|
||||
use yellowstone_grpc_proto::geyser::{
|
||||
subscribe_request_filter_accounts_filter_memcmp, SubscribeRequestFilterAccountsFilter,
|
||||
SubscribeRequestFilterAccountsFilterMemcmp, SubscribeUpdateAccountInfo, SubscribeUpdateSlot,
|
||||
};
|
||||
use yellowstone_grpc_proto::tonic::codec::CompressionEncoding;
|
||||
|
||||
const MAX_GRPC_ACCOUNT_SUBSCRIPTIONS: usize = 100;
|
||||
const MAX_GMA_ACCOUNTS: usize = 100;
|
||||
|
||||
// limit number of concurrent gMA/gPA requests
|
||||
const MAX_PARALLEL_HEAVY_RPC_REQUESTS: usize = 4;
|
||||
|
||||
// GRPC network tuning
|
||||
// see https://github.com/hyperium/tonic/blob/v0.10.2/tonic/src/transport/channel/mod.rs
|
||||
const GPRC_CLIENT_BUFFER_SIZE: usize = 65536; // default: 1024
|
||||
// see https://github.com/hyperium/hyper/blob/v0.14.28/src/proto/h2/client.rs#L45
|
||||
const GRPC_CONN_WINDOW: u32 = 5242880; // 5MB
|
||||
const GRPC_STREAM_WINDOW: u32 = 4194304; // default: 2MB
|
||||
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum SourceMessage {
|
||||
GrpcAccountUpdate(Slot, SubscribeUpdateAccountInfo),
|
||||
GrpcSlotUpdate(SubscribeUpdateSlot),
|
||||
Snapshot(CustomSnapshotProgramAccounts),
|
||||
}
|
||||
|
||||
pub async fn feed_data_geyser(
|
||||
grpc_config: &GrpcSourceConfig,
|
||||
tls_config: Option<ClientTlsConfig>,
|
||||
snapshot_config: AccountDataSourceConfig,
|
||||
subscribed_accounts: &HashSet<Pubkey>,
|
||||
subscribed_programs: &HashSet<Pubkey>,
|
||||
subscribed_token_accounts: &HashSet<Pubkey>,
|
||||
sender: async_channel::Sender<SourceMessage>,
|
||||
) -> anyhow::Result<()> {
|
||||
let use_compression = snapshot_config.rpc_support_compression.unwrap_or(false);
|
||||
let grpc_connection_string = match &grpc_config.connection_string.chars().next().unwrap() {
|
||||
'$' => env::var(&grpc_config.connection_string[1..])
|
||||
.expect("reading connection string from env"),
|
||||
_ => grpc_config.connection_string.clone(),
|
||||
};
|
||||
let snapshot_rpc_http_url = match &snapshot_config.rpc_http_url.chars().next().unwrap() {
|
||||
'$' => env::var(&snapshot_config.rpc_http_url[1..])
|
||||
.expect("reading connection string from env"),
|
||||
_ => snapshot_config.rpc_http_url.clone(),
|
||||
};
|
||||
info!("connecting to grpc source {}", grpc_connection_string);
|
||||
let endpoint = Channel::from_shared(grpc_connection_string)?;
|
||||
// TODO add grpc compression option
|
||||
let channel = if let Some(tls) = tls_config {
|
||||
endpoint.tls_config(tls)?
|
||||
} else {
|
||||
endpoint
|
||||
}
|
||||
.tcp_nodelay(true)
|
||||
.http2_adaptive_window(true)
|
||||
.buffer_size(GPRC_CLIENT_BUFFER_SIZE)
|
||||
.initial_connection_window_size(GRPC_CONN_WINDOW)
|
||||
.initial_stream_window_size(GRPC_STREAM_WINDOW)
|
||||
.connect()
|
||||
.await?;
|
||||
let token: Option<MetadataValue<_>> = match &grpc_config.token {
|
||||
Some(token) => {
|
||||
if token.is_empty() {
|
||||
None
|
||||
} else {
|
||||
match token.chars().next().unwrap() {
|
||||
'$' => Some(
|
||||
env::var(&token[1..])
|
||||
.expect("reading token from env")
|
||||
.parse()?,
|
||||
),
|
||||
_ => Some(token.clone().parse()?),
|
||||
}
|
||||
}
|
||||
}
|
||||
None => None,
|
||||
};
|
||||
let mut client = GeyserClient::with_interceptor(channel, move |mut req: Request<()>| {
|
||||
if let Some(token) = &token {
|
||||
req.metadata_mut().insert("x-token", token.clone());
|
||||
}
|
||||
Ok(req)
|
||||
})
|
||||
.accept_compressed(CompressionEncoding::Gzip);
|
||||
|
||||
let mut accounts_filter: HashSet<Pubkey> = HashSet::new();
|
||||
let mut accounts = HashMap::new();
|
||||
let mut slots = HashMap::new();
|
||||
let blocks = HashMap::new();
|
||||
let transactions = HashMap::new();
|
||||
let blocks_meta = HashMap::new();
|
||||
|
||||
for program_id in subscribed_programs {
|
||||
accounts.insert(
|
||||
format!("client_owner_{program_id}").to_owned(),
|
||||
SubscribeRequestFilterAccounts {
|
||||
account: vec![],
|
||||
owner: vec![program_id.to_string()],
|
||||
filters: vec![],
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
for owner_id in subscribed_token_accounts {
|
||||
accounts.insert(
|
||||
format!("client_token_{owner_id}").to_owned(),
|
||||
SubscribeRequestFilterAccounts {
|
||||
account: vec![],
|
||||
owner: vec!["TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA".to_string()],
|
||||
filters: vec![
|
||||
SubscribeRequestFilterAccountsFilter {
|
||||
filter: Some(Filter::Datasize(165)),
|
||||
},
|
||||
SubscribeRequestFilterAccountsFilter {
|
||||
filter: Some(Filter::Memcmp(SubscribeRequestFilterAccountsFilterMemcmp {
|
||||
offset: 32,
|
||||
data: Some(
|
||||
subscribe_request_filter_accounts_filter_memcmp::Data::Bytes(
|
||||
owner_id.to_bytes().into_iter().collect(),
|
||||
),
|
||||
),
|
||||
})),
|
||||
},
|
||||
],
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
if subscribed_accounts.len() > 0 {
|
||||
accounts.insert(
|
||||
"client_accounts".to_owned(),
|
||||
SubscribeRequestFilterAccounts {
|
||||
account: subscribed_accounts.iter().map(Pubkey::to_string).collect(),
|
||||
owner: vec![],
|
||||
filters: vec![],
|
||||
},
|
||||
);
|
||||
accounts_filter.extend(subscribed_accounts);
|
||||
}
|
||||
|
||||
slots.insert(
|
||||
"client_slots".to_owned(),
|
||||
SubscribeRequestFilterSlots {
|
||||
filter_by_commitment: None,
|
||||
},
|
||||
);
|
||||
|
||||
// could use "merge_streams" see geyser-grpc-connector
|
||||
let mut subscriptions = StreamMap::new();
|
||||
|
||||
{
|
||||
let request = SubscribeRequest {
|
||||
blocks,
|
||||
blocks_meta,
|
||||
commitment: None,
|
||||
slots,
|
||||
transactions,
|
||||
accounts_data_slice: vec![],
|
||||
ping: None,
|
||||
..Default::default()
|
||||
};
|
||||
let response = client.subscribe(once(async move { request })).await?;
|
||||
subscriptions.insert(usize::MAX, response.into_inner());
|
||||
}
|
||||
|
||||
// account subscriptions may have at most 100 at a time
|
||||
let account_chunks = accounts
|
||||
.into_iter()
|
||||
.chunks(MAX_GRPC_ACCOUNT_SUBSCRIPTIONS)
|
||||
.into_iter()
|
||||
.map(|chunk| chunk.collect::<HashMap<String, SubscribeRequestFilterAccounts>>())
|
||||
.collect_vec();
|
||||
for (i, accounts) in account_chunks.into_iter().enumerate() {
|
||||
let request = SubscribeRequest {
|
||||
accounts,
|
||||
commitment: Some(CommitmentLevel::Processed as i32),
|
||||
accounts_data_slice: vec![],
|
||||
ping: None,
|
||||
..Default::default()
|
||||
};
|
||||
let response = client.subscribe(once(async move { request })).await?;
|
||||
subscriptions.insert(i, response.into_inner());
|
||||
}
|
||||
|
||||
// We can't get a snapshot immediately since the finalized snapshot would be for a
|
||||
// slot in the past and we'd be missing intermediate updates.
|
||||
//
|
||||
// Delay the request until the first slot we received all writes for becomes rooted
|
||||
// to avoid that problem - partially. The rooted slot will still be larger than the
|
||||
// finalized slot, so add a number of slots as a buffer.
|
||||
//
|
||||
// If that buffer isn't sufficient, there'll be a retry.
|
||||
|
||||
// The first slot that we will receive _all_ account writes for
|
||||
let mut first_full_slot: u64 = u64::MAX;
|
||||
|
||||
// If a snapshot should be performed when ready.
|
||||
let mut snapshot_needed = true;
|
||||
|
||||
// The highest "rooted" slot that has been seen.
|
||||
let mut max_rooted_slot = 0;
|
||||
|
||||
// Data for slots will arrive out of order. This value defines how many
|
||||
// slots after a slot was marked "rooted" we assume it'll not receive
|
||||
// any more account write information.
|
||||
//
|
||||
// This is important for the write_version mapping (to know when slots can
|
||||
// be dropped).
|
||||
let max_out_of_order_slots = 40;
|
||||
|
||||
// Number of slots that we expect "finalized" commitment to lag
|
||||
// behind "rooted". This matters for getProgramAccounts based snapshots,
|
||||
// which will have "finalized" commitment.
|
||||
let mut rooted_to_finalized_slots = 30;
|
||||
|
||||
let (snapshot_gma_sender, mut snapshot_gma_receiver) = tokio::sync::mpsc::unbounded_channel();
|
||||
// TODO log buffer size
|
||||
|
||||
// The plugin sends a ping every 5s or so
|
||||
let fatal_idle_timeout = Duration::from_secs(15);
|
||||
let mut re_snapshot_interval = tokio::time::interval(Duration::from_secs(
|
||||
snapshot_config
|
||||
.re_snapshot_interval_secs
|
||||
.unwrap_or(60 * 60 * 12),
|
||||
));
|
||||
re_snapshot_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
|
||||
re_snapshot_interval.tick().await;
|
||||
|
||||
// Highest slot that an account write came in for.
|
||||
let mut newest_write_slot: u64 = 0;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct WriteVersion {
|
||||
// Write version seen on-chain
|
||||
global: u64,
|
||||
// FIXME clarify,rename
|
||||
// The per-pubkey per-slot write version
|
||||
per_slot_write_version: u32,
|
||||
}
|
||||
|
||||
// map slot -> (pubkey -> WriteVersion)
|
||||
//
|
||||
// Since the write_version is a private indentifier per node it can't be used
|
||||
// to deduplicate events from multiple nodes. Here we rewrite it such that each
|
||||
// pubkey and each slot has a consecutive numbering of writes starting at 1.
|
||||
//
|
||||
// That number will be consistent for each node.
|
||||
let mut slot_pubkey_writes = HashMap::<u64, HashMap<[u8; 32], WriteVersion>>::new();
|
||||
|
||||
let mut last_message_received_at = Instant::now();
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
update = subscriptions.next() => {
|
||||
let Some(data) = update
|
||||
else {
|
||||
anyhow::bail!("geyser plugin has closed the stream");
|
||||
};
|
||||
use subscribe_update::UpdateOneof;
|
||||
let update = data.1?;
|
||||
// use account and slot updates to trigger snapshot loading
|
||||
match &update.update_oneof {
|
||||
Some(UpdateOneof::Slot(slot_update)) => {
|
||||
trace!("received slot update for slot {}", slot_update.slot);
|
||||
let status = slot_update.status;
|
||||
|
||||
debug!(
|
||||
"slot_update: {} ({})",
|
||||
slot_update.slot,
|
||||
slot_update.status
|
||||
);
|
||||
|
||||
if status == CommitmentLevel::Finalized as i32 {
|
||||
if first_full_slot == u64::MAX {
|
||||
// TODO: is this equivalent to before? what was highesy_write_slot?
|
||||
first_full_slot = slot_update.slot + 1;
|
||||
}
|
||||
// TODO rename rooted to finalized
|
||||
if slot_update.slot > max_rooted_slot {
|
||||
max_rooted_slot = slot_update.slot;
|
||||
|
||||
// drop data for slots that are well beyond rooted
|
||||
slot_pubkey_writes.retain(|&k, _| k >= max_rooted_slot - max_out_of_order_slots);
|
||||
}
|
||||
|
||||
let waiting_for_snapshot_slot = max_rooted_slot <= first_full_slot + rooted_to_finalized_slots;
|
||||
|
||||
if waiting_for_snapshot_slot {
|
||||
debug!("waiting for snapshot slot: rooted={}, first_full={}, slot={}", max_rooted_slot, first_full_slot, slot_update.slot);
|
||||
}
|
||||
|
||||
if snapshot_needed && !waiting_for_snapshot_slot {
|
||||
snapshot_needed = false;
|
||||
|
||||
debug!("snapshot slot reached - setting up snapshot tasks");
|
||||
|
||||
let permits_parallel_rpc_requests = Arc::new(Semaphore::new(MAX_PARALLEL_HEAVY_RPC_REQUESTS));
|
||||
|
||||
info!("Requesting snapshot from gMA for {} filter accounts", accounts_filter.len());
|
||||
for pubkey_chunk in accounts_filter.iter().chunks(MAX_GMA_ACCOUNTS).into_iter() {
|
||||
let rpc_http_url = snapshot_rpc_http_url.clone();
|
||||
let account_ids = pubkey_chunk.cloned().collect_vec();
|
||||
let sender = snapshot_gma_sender.clone();
|
||||
let permits = permits_parallel_rpc_requests.clone();
|
||||
tokio::spawn(async move {
|
||||
let _permit = permits.acquire().await.unwrap();
|
||||
let snapshot = get_snapshot_gma(&rpc_http_url, &account_ids).await;
|
||||
match sender.send(snapshot) {
|
||||
Ok(_) => {}
|
||||
Err(_) => {
|
||||
warn!("Could not send snapshot, grpc has probably reconnected");
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
info!("Requesting snapshot from gPA for {} program filter accounts", subscribed_programs.len());
|
||||
for program_id in subscribed_programs {
|
||||
let rpc_http_url = snapshot_rpc_http_url.clone();
|
||||
let program_id = *program_id;
|
||||
let sender = snapshot_gma_sender.clone();
|
||||
let permits = permits_parallel_rpc_requests.clone();
|
||||
tokio::spawn(async move {
|
||||
let _permit = permits.acquire().await.unwrap();
|
||||
let snapshot = get_snapshot_gpa(&rpc_http_url, &program_id, use_compression).await;
|
||||
match sender.send(snapshot) {
|
||||
Ok(_) => {}
|
||||
Err(_) => {
|
||||
warn!("Could not send snapshot, grpc has probably reconnected");
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
info!("Requesting snapshot from gTA for {} owners filter accounts", subscribed_token_accounts.len());
|
||||
for owner_id in subscribed_token_accounts {
|
||||
let rpc_http_url = snapshot_rpc_http_url.clone();
|
||||
let owner_id = owner_id.clone();
|
||||
let sender = snapshot_gma_sender.clone();
|
||||
let permits = permits_parallel_rpc_requests.clone();
|
||||
tokio::spawn(async move {
|
||||
let _permit = permits.acquire().await.unwrap();
|
||||
let snapshot = get_snapshot_gta(&rpc_http_url, &owner_id).await;
|
||||
match sender.send(snapshot) {
|
||||
Ok(_) => {}
|
||||
Err(_) => {
|
||||
warn!("Could not send snapshot, grpc has probably reconnected");
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
Some(UpdateOneof::Account(info)) => {
|
||||
let slot = info.slot;
|
||||
trace!("received account update for slot {}", slot);
|
||||
if slot < first_full_slot {
|
||||
// Don't try to process data for slots where we may have missed writes:
|
||||
// We could not map the write_version correctly for them.
|
||||
continue;
|
||||
}
|
||||
|
||||
if slot > newest_write_slot {
|
||||
newest_write_slot = slot;
|
||||
debug!(
|
||||
"newest_write_slot: {}",
|
||||
newest_write_slot
|
||||
);
|
||||
} else if max_rooted_slot > 0 && info.slot < max_rooted_slot - max_out_of_order_slots {
|
||||
anyhow::bail!("received write {} slots back from max rooted slot {}", max_rooted_slot - slot, max_rooted_slot);
|
||||
}
|
||||
|
||||
let pubkey_writes = slot_pubkey_writes.entry(slot).or_default();
|
||||
let mut info = info.account.clone().unwrap();
|
||||
|
||||
let pubkey_bytes = Pubkey::try_from(info.pubkey).unwrap().to_bytes();
|
||||
let write_version_mapping = pubkey_writes.entry(pubkey_bytes).or_insert(WriteVersion {
|
||||
global: info.write_version,
|
||||
per_slot_write_version: 1, // write version 0 is reserved for snapshots
|
||||
});
|
||||
|
||||
// We assume we will receive write versions for each pubkey in sequence.
|
||||
// If this is not the case, logic here does not work correctly because
|
||||
// a later write could arrive first.
|
||||
if info.write_version < write_version_mapping.global {
|
||||
anyhow::bail!("unexpected write version: got {}, expected >= {}", info.write_version, write_version_mapping.global);
|
||||
}
|
||||
|
||||
// Rewrite the update to use the local write version and bump it
|
||||
info.write_version = write_version_mapping.per_slot_write_version as u64;
|
||||
write_version_mapping.per_slot_write_version += 1;
|
||||
},
|
||||
Some(UpdateOneof::Ping(_)) => {
|
||||
trace!("received grpc ping");
|
||||
},
|
||||
Some(_) => {
|
||||
// ignore all other grpc update types
|
||||
},
|
||||
None => {
|
||||
unreachable!();
|
||||
}
|
||||
}
|
||||
|
||||
let elapsed = last_message_received_at.elapsed().as_millis();
|
||||
metrics::GRPC_NO_MESSAGE_FOR_DURATION_MS.set(elapsed as i64);
|
||||
last_message_received_at = Instant::now();
|
||||
|
||||
// send the incremental updates to the channel
|
||||
match update.update_oneof {
|
||||
Some(UpdateOneof::Account(account_update)) => {
|
||||
let info = account_update.account.unwrap();
|
||||
sender.send(SourceMessage::GrpcAccountUpdate(account_update.slot as Slot, info)).await.expect("send success");
|
||||
}
|
||||
Some(UpdateOneof::Slot(slot_update)) => {
|
||||
sender.send(SourceMessage::GrpcSlotUpdate(slot_update)).await.expect("send success");
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
},
|
||||
snapshot_message = snapshot_gma_receiver.recv() => {
|
||||
let Some(snapshot_result) = snapshot_message
|
||||
else {
|
||||
anyhow::bail!("snapshot channel closed");
|
||||
};
|
||||
let snapshot = snapshot_result?;
|
||||
debug!("snapshot (program={}, m_accounts={}) is for slot {}, first full slot was {}",
|
||||
snapshot.program_id.map(|x| x.to_string()).unwrap_or("none".to_string()),
|
||||
snapshot.accounts.len(),
|
||||
snapshot.slot,
|
||||
first_full_slot);
|
||||
|
||||
if snapshot.slot < first_full_slot {
|
||||
warn!(
|
||||
"snapshot is too old: has slot {}, expected {} minimum - request another one but also use this snapshot",
|
||||
snapshot.slot,
|
||||
first_full_slot
|
||||
);
|
||||
// try again in another 25 slots
|
||||
snapshot_needed = true;
|
||||
rooted_to_finalized_slots += 25;
|
||||
}
|
||||
|
||||
// New - Don't care if the snapshot is old, we want startup to work anyway
|
||||
// If an edge is not working properly, it will be disabled when swapping it
|
||||
sender
|
||||
.send(SourceMessage::Snapshot(snapshot))
|
||||
.await
|
||||
.expect("send success");
|
||||
|
||||
},
|
||||
_ = tokio::time::sleep(fatal_idle_timeout) => {
|
||||
anyhow::bail!("geyser plugin hasn't sent a message in too long");
|
||||
}
|
||||
_ = re_snapshot_interval.tick() => {
|
||||
info!("Re-snapshot hack");
|
||||
snapshot_needed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn process_events(
|
||||
config: AccountDataSourceConfig,
|
||||
subscription_accounts: HashSet<Pubkey>,
|
||||
subscription_programs: HashSet<Pubkey>,
|
||||
subscription_token_accounts: HashSet<Pubkey>,
|
||||
filters: HashSet<Pubkey>,
|
||||
account_write_queue_sender: async_channel::Sender<AccountOrSnapshotUpdate>,
|
||||
metdata_write_queue_sender: Option<async_channel::Sender<FeedMetadata>>,
|
||||
slot_queue_sender: async_channel::Sender<SlotUpdate>,
|
||||
mut exit: tokio::sync::broadcast::Receiver<()>,
|
||||
) {
|
||||
// Subscribe to geyser
|
||||
let (msg_sender, msg_receiver) =
|
||||
async_channel::bounded::<SourceMessage>(config.dedup_queue_size);
|
||||
let mut source_jobs = vec![];
|
||||
|
||||
// note: caller in main.rs ensures this
|
||||
assert_eq!(
|
||||
config.grpc_sources.len(),
|
||||
1,
|
||||
"only one grpc source supported"
|
||||
);
|
||||
for grpc_source in config.grpc_sources.clone() {
|
||||
let msg_sender = msg_sender.clone();
|
||||
let sub_accounts = subscription_accounts.clone();
|
||||
let sub_programs = subscription_programs.clone();
|
||||
let sub_token_accounts = subscription_token_accounts.clone();
|
||||
|
||||
// Make TLS config if configured
|
||||
let tls_config = grpc_source.tls.as_ref().map(make_tls_config).or_else(|| {
|
||||
if grpc_source.connection_string.starts_with("https") {
|
||||
Some(ClientTlsConfig::new())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
let cfg = config.clone();
|
||||
|
||||
source_jobs.push(tokio::spawn(async move {
|
||||
let mut error_count = 0;
|
||||
let mut last_error = Instant::now();
|
||||
|
||||
// Continuously reconnect on failure
|
||||
loop {
|
||||
let out = feed_data_geyser(
|
||||
&grpc_source,
|
||||
tls_config.clone(),
|
||||
cfg.clone(),
|
||||
&sub_accounts,
|
||||
&sub_programs,
|
||||
&sub_token_accounts,
|
||||
msg_sender.clone(),
|
||||
);
|
||||
if last_error.elapsed() > Duration::from_secs(60 * 10) {
|
||||
error_count = 0;
|
||||
}
|
||||
else if error_count > 10 {
|
||||
error!("error during communication with the geyser plugin - retried too many time, exiting..");
|
||||
break;
|
||||
}
|
||||
|
||||
match out.await {
|
||||
// happy case!
|
||||
Err(err) => {
|
||||
warn!(
|
||||
"error during communication with the geyser plugin - retrying: {:?}",
|
||||
err
|
||||
);
|
||||
last_error = Instant::now();
|
||||
error_count += 1;
|
||||
}
|
||||
// this should never happen
|
||||
Ok(_) => {
|
||||
error!("feed_data must return an error, not OK - continue");
|
||||
last_error = Instant::now();
|
||||
error_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
metrics::GRPC_SOURCE_CONNECTION_RETRIES
|
||||
.with_label_values(&[&grpc_source.name])
|
||||
.inc();
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_secs(
|
||||
grpc_source.retry_connection_sleep_secs,
|
||||
))
|
||||
.await;
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
// slot -> (pubkey -> write_version)
|
||||
//
|
||||
// To avoid unnecessarily sending requests to SQL, we track the latest write_version
|
||||
// for each (slot, pubkey). If an already-seen write_version comes in, it can be safely
|
||||
// discarded.
|
||||
let mut latest_write = HashMap::<Slot, HashMap<Pubkey, u64>>::new();
|
||||
|
||||
// Number of slots to retain in latest_write
|
||||
let latest_write_retention = 50;
|
||||
|
||||
let mut source_jobs: futures::stream::FuturesUnordered<_> = source_jobs.into_iter().collect();
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = source_jobs.next() => {
|
||||
warn!("shutting down grpc_plugin_source because subtask failed...");
|
||||
break;
|
||||
},
|
||||
_ = exit.recv() => {
|
||||
warn!("shutting down grpc_plugin_source...");
|
||||
break;
|
||||
}
|
||||
msg = msg_receiver.recv() => {
|
||||
match msg {
|
||||
Ok(msg) => {
|
||||
process_account_updated_from_sources(&account_write_queue_sender,
|
||||
&slot_queue_sender,
|
||||
&msg_receiver,
|
||||
msg,
|
||||
&mut latest_write,
|
||||
latest_write_retention,
|
||||
&metdata_write_queue_sender,
|
||||
&filters,
|
||||
).await ;
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("failed to process grpc event: {:?}", e);
|
||||
break;
|
||||
}
|
||||
};
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
// close all channels to notify downstream CSPs of error
|
||||
account_write_queue_sender.close();
|
||||
metdata_write_queue_sender.map(|s| s.close());
|
||||
slot_queue_sender.close();
|
||||
}
|
||||
|
||||
// consume channel with snapshot and update data
|
||||
async fn process_account_updated_from_sources(
|
||||
account_write_queue_sender: &Sender<AccountOrSnapshotUpdate>,
|
||||
slot_queue_sender: &Sender<SlotUpdate>,
|
||||
msg_receiver: &Receiver<SourceMessage>,
|
||||
msg: SourceMessage,
|
||||
latest_write: &mut HashMap<Slot, HashMap<Pubkey, u64>>,
|
||||
// in slots
|
||||
latest_write_retention: u64,
|
||||
// metric_account_writes: &mut MetricU64,
|
||||
// metric_account_queue: &mut MetricU64,
|
||||
// metric_dedup_queue: &mut MetricU64,
|
||||
// metric_slot_queue: &mut MetricU64,
|
||||
// metric_slot_updates: &mut MetricU64,
|
||||
// metric_snapshots: &mut MetricU64,
|
||||
// metric_snapshot_account_writes: &mut MetricU64,
|
||||
metdata_write_queue_sender: &Option<Sender<FeedMetadata>>,
|
||||
filters: &HashSet<Pubkey>,
|
||||
) {
|
||||
let metadata_sender = |msg| {
|
||||
if let Some(sender) = &metdata_write_queue_sender {
|
||||
sender.send_blocking(msg)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
};
|
||||
|
||||
metrics::GRPC_DEDUP_QUEUE.set(msg_receiver.len() as i64);
|
||||
match msg {
|
||||
SourceMessage::GrpcAccountUpdate(slot, update) => {
|
||||
assert!(update.pubkey.len() == 32);
|
||||
assert!(update.owner.len() == 32);
|
||||
|
||||
metrics::GRPC_ACCOUNT_WRITES.inc();
|
||||
metrics::GRPC_ACCOUNT_WRITE_QUEUE.set(account_write_queue_sender.len() as i64);
|
||||
|
||||
// Skip writes that a different server has already sent
|
||||
let pubkey_writes = latest_write.entry(slot).or_default();
|
||||
let pubkey = Pubkey::try_from(update.pubkey.clone()).unwrap();
|
||||
if !filters.contains(&pubkey) {
|
||||
return;
|
||||
}
|
||||
|
||||
let writes = pubkey_writes.entry(pubkey).or_insert(0);
|
||||
if update.write_version <= *writes {
|
||||
return;
|
||||
}
|
||||
*writes = update.write_version;
|
||||
latest_write.retain(|&k, _| k >= slot - latest_write_retention);
|
||||
|
||||
let owner = Pubkey::try_from(update.owner.clone()).unwrap();
|
||||
|
||||
account_write_queue_sender
|
||||
.send(AccountOrSnapshotUpdate::AccountUpdate(AccountWrite {
|
||||
pubkey,
|
||||
slot,
|
||||
write_version: update.write_version,
|
||||
lamports: update.lamports,
|
||||
owner,
|
||||
executable: update.executable,
|
||||
rent_epoch: update.rent_epoch,
|
||||
data: update.data,
|
||||
}))
|
||||
.await
|
||||
.expect("send success");
|
||||
}
|
||||
SourceMessage::GrpcSlotUpdate(update) => {
|
||||
metrics::GRPC_SLOT_UPDATES.inc();
|
||||
metrics::GRPC_SLOT_UPDATE_QUEUE.set(slot_queue_sender.len() as i64);
|
||||
|
||||
let status = CommitmentLevel::try_from(update.status).map(|v| match v {
|
||||
CommitmentLevel::Processed => SlotStatus::Processed,
|
||||
CommitmentLevel::Confirmed => SlotStatus::Confirmed,
|
||||
CommitmentLevel::Finalized => SlotStatus::Rooted,
|
||||
});
|
||||
if status.is_err() {
|
||||
error!("unexpected slot status: {}", update.status);
|
||||
return;
|
||||
}
|
||||
let slot_update = SlotUpdate {
|
||||
slot: update.slot,
|
||||
parent: update.parent,
|
||||
status: status.expect("qed"),
|
||||
};
|
||||
|
||||
slot_queue_sender
|
||||
.send(slot_update)
|
||||
.await
|
||||
.expect("send success");
|
||||
}
|
||||
SourceMessage::Snapshot(update) => {
|
||||
let label = if let Some(prg) = update.program_id {
|
||||
if prg == spl_token::ID {
|
||||
"gpa(tokens)"
|
||||
} else {
|
||||
"gpa"
|
||||
}
|
||||
} else {
|
||||
"gma"
|
||||
};
|
||||
metrics::ACCOUNT_SNAPSHOTS
|
||||
.with_label_values(&[&label])
|
||||
.inc();
|
||||
debug!(
|
||||
"processing snapshot for program_id {} -> size={} & missing size={}...",
|
||||
update
|
||||
.program_id
|
||||
.map(|x| x.to_string())
|
||||
.unwrap_or("".to_string()),
|
||||
update.accounts.len(),
|
||||
update.missing_accounts.len()
|
||||
);
|
||||
if let Err(e) = metadata_sender(FeedMetadata::SnapshotStart(update.program_id)) {
|
||||
warn!("failed to send feed matadata event: {}", e);
|
||||
}
|
||||
|
||||
let mut updated_accounts = vec![];
|
||||
for account in update.accounts {
|
||||
metrics::GRPC_SNAPSHOT_ACCOUNT_WRITES.inc();
|
||||
metrics::GRPC_ACCOUNT_WRITE_QUEUE.set(account_write_queue_sender.len() as i64);
|
||||
|
||||
if !filters.contains(&account.pubkey) {
|
||||
continue;
|
||||
}
|
||||
|
||||
updated_accounts.push(account);
|
||||
}
|
||||
account_write_queue_sender
|
||||
.send(AccountOrSnapshotUpdate::SnapshotUpdate(updated_accounts))
|
||||
.await
|
||||
.expect("send success");
|
||||
|
||||
for account in update.missing_accounts {
|
||||
if let Err(e) = metadata_sender(FeedMetadata::InvalidAccount(account)) {
|
||||
warn!("failed to send feed matadata event: {}", e);
|
||||
}
|
||||
}
|
||||
debug!("processing snapshot done");
|
||||
if let Err(e) = metadata_sender(FeedMetadata::SnapshotEnd(update.program_id)) {
|
||||
warn!("failed to send feed matadata event: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,113 @@
|
|||
use anchor_lang::AccountDeserialize;
|
||||
use anchor_spl::token::Mint;
|
||||
use futures_util::future::join_all;
|
||||
use itertools::Itertools;
|
||||
use jsonrpc_core_client::transports::http;
|
||||
use router_feed_lib::solana_rpc_minimal::rpc_accounts_scan::RpcAccountsScanClient;
|
||||
use solana_account_decoder::UiAccountEncoding;
|
||||
use solana_client::rpc_config::RpcAccountInfoConfig;
|
||||
use solana_program::pubkey::Pubkey;
|
||||
use solana_sdk::account::Account;
|
||||
use solana_sdk::commitment_config::CommitmentConfig;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use tokio::sync::Semaphore;
|
||||
use tracing::{info, trace};
|
||||
|
||||
const MAX_GMA_ACCOUNTS: usize = 100;
|
||||
// 4: 388028 mints -> 61 sec
|
||||
// 16: 388028 mints -> 35 sec
|
||||
const MAX_PARALLEL_HEAVY_RPC_REQUESTS: usize = 16;
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct Token {
|
||||
pub mint: Pubkey,
|
||||
pub decimals: u8,
|
||||
}
|
||||
|
||||
pub async fn request_mint_metadata(
|
||||
rpc_http_url: &str,
|
||||
mint_account_ids: &HashSet<Pubkey>,
|
||||
) -> HashMap<Pubkey, Token> {
|
||||
info!(
|
||||
"Requesting data for mint accounts via chunked gMA for {} pubkey ..",
|
||||
mint_account_ids.len()
|
||||
);
|
||||
let started_at = Instant::now();
|
||||
|
||||
let permits_parallel_rpc_requests = Arc::new(Semaphore::new(MAX_PARALLEL_HEAVY_RPC_REQUESTS));
|
||||
let rpc_client = http::connect::<RpcAccountsScanClient>(rpc_http_url)
|
||||
.await
|
||||
.unwrap();
|
||||
let rpc_client = Arc::new(rpc_client);
|
||||
let account_info_config = RpcAccountInfoConfig {
|
||||
encoding: Some(UiAccountEncoding::Binary),
|
||||
commitment: Some(CommitmentConfig::finalized()),
|
||||
data_slice: None,
|
||||
min_context_slot: None,
|
||||
};
|
||||
|
||||
let mut threads = Vec::new();
|
||||
let count = Arc::new(AtomicU64::new(0));
|
||||
for pubkey_chunk in mint_account_ids.iter().chunks(MAX_GMA_ACCOUNTS).into_iter() {
|
||||
let pubkey_chunk = pubkey_chunk.into_iter().cloned().collect_vec();
|
||||
let count = count.clone();
|
||||
let rpc_client = rpc_client.clone();
|
||||
let account_ids = pubkey_chunk.iter().map(|x| x.to_string()).collect_vec();
|
||||
let account_info_config = account_info_config.clone();
|
||||
let permits = permits_parallel_rpc_requests.clone();
|
||||
let jh_thread = tokio::spawn(async move {
|
||||
let _permit = permits.acquire().await.unwrap();
|
||||
let accounts = rpc_client
|
||||
.get_multiple_accounts(account_ids.clone(), Some(account_info_config))
|
||||
.await
|
||||
.unwrap()
|
||||
.value;
|
||||
let accounts = pubkey_chunk.iter().cloned().zip(accounts).collect_vec();
|
||||
|
||||
let mut mint_accounts: HashMap<Pubkey, Token> = HashMap::with_capacity(accounts.len());
|
||||
for (account_pk, ui_account) in accounts {
|
||||
if let Some(ui_account) = ui_account {
|
||||
let mut account: Account = ui_account.decode().unwrap();
|
||||
let data = account.data.as_mut_slice();
|
||||
let mint_account = Mint::try_deserialize(&mut &*data).unwrap();
|
||||
trace!(
|
||||
"Mint Account {}: decimals={}",
|
||||
account_pk.to_string(),
|
||||
mint_account.decimals
|
||||
);
|
||||
mint_accounts.insert(
|
||||
account_pk,
|
||||
Token {
|
||||
mint: account_pk,
|
||||
decimals: mint_account.decimals,
|
||||
},
|
||||
);
|
||||
count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
mint_accounts
|
||||
});
|
||||
threads.push(jh_thread);
|
||||
} // -- chunks
|
||||
|
||||
let mut merged: HashMap<Pubkey, Token> = HashMap::with_capacity(mint_account_ids.len());
|
||||
let maps = join_all(threads).await;
|
||||
for map in maps {
|
||||
let map = map.expect("thread must succeed");
|
||||
merged.extend(map);
|
||||
}
|
||||
|
||||
assert_eq!(merged.len() as u64, count.load(Ordering::Relaxed));
|
||||
|
||||
info!(
|
||||
"Received {} mint accounts via gMA in {:?}ms",
|
||||
count.load(Ordering::Relaxed),
|
||||
started_at.elapsed().as_secs_f64() * 1000.0
|
||||
);
|
||||
|
||||
merged
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
pub mod geyser;
|
||||
pub mod grpc_plugin_source;
|
||||
pub mod mint_accounts_source;
|
|
@ -0,0 +1,29 @@
|
|||
use solana_program::instruction::Instruction;
|
||||
use solana_program::pubkey::Pubkey;
|
||||
use std::collections::HashSet;
|
||||
|
||||
pub struct Swap {
|
||||
pub setup_instructions: Vec<Instruction>,
|
||||
pub swap_instruction: Instruction,
|
||||
pub cleanup_instructions: Vec<Instruction>,
|
||||
pub cu_estimate: u32,
|
||||
}
|
||||
|
||||
impl Swap {
|
||||
pub fn accounts(&self) -> HashSet<Pubkey> {
|
||||
let mut transaction_addresses = HashSet::new();
|
||||
|
||||
for ix in self
|
||||
.setup_instructions
|
||||
.iter()
|
||||
.chain(self.cleanup_instructions.iter())
|
||||
.chain([&self.swap_instruction].into_iter())
|
||||
{
|
||||
for acc in &ix.accounts {
|
||||
transaction_addresses.insert(acc.pubkey);
|
||||
}
|
||||
}
|
||||
|
||||
transaction_addresses
|
||||
}
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
use solana_program::program_stubs::{set_syscall_stubs, SyscallStubs};
|
||||
|
||||
// We don't want program output to spam out logs
|
||||
struct NoLogSyscallStubs;
|
||||
impl SyscallStubs for NoLogSyscallStubs {
|
||||
fn sol_log(&self, _message: &str) {
|
||||
// do nothing
|
||||
// TODO: optionally print it?
|
||||
}
|
||||
|
||||
fn sol_log_data(&self, _fields: &[&[u8]]) {
|
||||
// do nothing
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deactivate_program_logs() {
|
||||
set_syscall_stubs(Box::new(NoLogSyscallStubs {}));
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
use solana_program::pubkey::Pubkey;
|
||||
|
||||
pub trait ToPubkey {
|
||||
fn to_pubkey(&self) -> Pubkey;
|
||||
}
|
||||
|
||||
impl ToPubkey for u8 {
|
||||
fn to_pubkey(&self) -> Pubkey {
|
||||
Pubkey::new_from_array([*self; 32])
|
||||
}
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
use itertools::Itertools;
|
||||
use router_feed_lib::router_rpc_client::RouterRpcClient;
|
||||
use router_lib::dex::{DexEdgeIdentifier, DexInterface};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub async fn get_all_dex(
|
||||
mut rpc_client: &mut RouterRpcClient,
|
||||
) -> anyhow::Result<Vec<Arc<dyn DexInterface>>> {
|
||||
let orca_config = HashMap::from([
|
||||
(
|
||||
"program_id".to_string(),
|
||||
"whirLbMiicVdio4qvUfM5KAg6Ct8VwpYzGff3uctyCc".to_string(),
|
||||
),
|
||||
("program_name".to_string(), "Orca".to_string()),
|
||||
]);
|
||||
let cropper_config = HashMap::from([
|
||||
(
|
||||
"program_id".to_string(),
|
||||
"H8W3ctz92svYg6mkn1UtGfu2aQr2fnUFHM1RhScEtQDt".to_string(),
|
||||
),
|
||||
("program_name".to_string(), "Cropper".to_string()),
|
||||
]);
|
||||
|
||||
let dexs = [
|
||||
dex_orca::OrcaDex::initialize(&mut rpc_client, orca_config).await?,
|
||||
dex_orca::OrcaDex::initialize(&mut rpc_client, cropper_config).await?,
|
||||
dex_saber::SaberDex::initialize(&mut rpc_client, HashMap::new()).await?,
|
||||
dex_raydium_cp::RaydiumCpDex::initialize(&mut rpc_client, HashMap::new()).await?,
|
||||
dex_raydium::RaydiumDex::initialize(&mut rpc_client, HashMap::new()).await?,
|
||||
dex_openbook_v2::OpenbookV2Dex::initialize(&mut rpc_client, HashMap::new()).await?,
|
||||
dex_infinity::InfinityDex::initialize(&mut rpc_client, HashMap::new()).await?,
|
||||
];
|
||||
|
||||
Ok(dexs.into_iter().collect())
|
||||
}
|
||||
|
||||
pub fn get_edges_identifiers(dex: &Arc<dyn DexInterface>) -> Vec<Arc<dyn DexEdgeIdentifier>> {
|
||||
let edges_identifiers = dex
|
||||
.edges_per_pk()
|
||||
.into_iter()
|
||||
.map(|x| x.1)
|
||||
.flatten()
|
||||
.unique_by(|x| (x.key(), x.input_mint()))
|
||||
.sorted_by_key(|x| (x.key(), x.input_mint()))
|
||||
.collect_vec();
|
||||
edges_identifiers
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::syscallstubs;
|
||||
use crate::tests::dex_test_utils;
|
||||
use itertools::Itertools;
|
||||
use router_feed_lib::router_rpc_client::RouterRpcClientTrait;
|
||||
use router_lib::price_feeds::fillcity::FillCityPriceFeed;
|
||||
use router_lib::test_tools::rpc;
|
||||
use solana_client::client_error::reqwest::Client;
|
||||
use std::collections::HashSet;
|
||||
use std::env;
|
||||
use std::time::Duration;
|
||||
|
||||
#[tokio::test]
|
||||
async fn dump_all_dex_data() -> anyhow::Result<()> {
|
||||
if env::var("CI").is_ok() {
|
||||
println!("skipping test while running continuous integration");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
router_feed_lib::utils::tracing_subscriber_init();
|
||||
syscallstubs::deactivate_program_logs();
|
||||
|
||||
let rpc_url = env::var("RPC_HTTP_URL")?;
|
||||
let (mut rpc_client, _chain_data) = rpc::rpc_dumper_client(rpc_url, "all.lz4");
|
||||
|
||||
let dexs = dex_test_utils::get_all_dex(&mut rpc_client).await?;
|
||||
|
||||
for dex in &dexs {
|
||||
rpc::load_subscriptions(&mut rpc_client, dex.clone()).await?;
|
||||
}
|
||||
|
||||
let mut mints = HashSet::new();
|
||||
for dex in &dexs {
|
||||
let edges_identifiers = dex_test_utils::get_edges_identifiers(&dex);
|
||||
|
||||
for id in edges_identifiers {
|
||||
mints.insert(id.input_mint());
|
||||
mints.insert(id.output_mint());
|
||||
}
|
||||
}
|
||||
|
||||
println!("Adding some {} accounts", mints.len());
|
||||
rpc_client.get_multiple_accounts(&mints).await?;
|
||||
|
||||
let client = Client::new();
|
||||
let mints = mints.into_iter().collect_vec();
|
||||
let mut prices = vec![];
|
||||
|
||||
// let mut prices = router_test_lib::serialize::deserialize_from_file::<Vec<PriceUpdate>>(&"all-prices.lz4".to_string())?;
|
||||
// let mints = mints.iter().filter(|x| prices.iter().any(|y| y.mint == **x))
|
||||
// .copied()
|
||||
// .collect_vec();
|
||||
// println!("Missing prices for {} mints", mints.len());
|
||||
|
||||
for chunk in mints.chunks(150) {
|
||||
let res = FillCityPriceFeed::get_prices(&client, &chunk.iter().copied().collect_vec())
|
||||
.await?;
|
||||
|
||||
prices.extend(res);
|
||||
tokio::time::sleep(Duration::from_millis(700)).await;
|
||||
}
|
||||
|
||||
router_test_lib::serialize::serialize_to_file(&prices, &"all-prices.lz4".to_string());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
#[cfg(test)]
|
||||
pub mod dex_test_utils;
|
||||
|
||||
pub mod dump_all_dex;
|
||||
pub mod performance_tests;
|
||||
pub mod warmup_performance_tests;
|
|
@ -0,0 +1,214 @@
|
|||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::edge::Edge;
|
||||
use crate::routing::Routing;
|
||||
use crate::tests::dex_test_utils;
|
||||
use crate::{debug_tools, syscallstubs};
|
||||
use anchor_spl::token::spl_token::state::Mint;
|
||||
use itertools::{iproduct, Itertools};
|
||||
use router_config_lib::Config;
|
||||
use router_lib::dex::{AccountProviderView, ChainDataAccountProvider, SwapMode};
|
||||
use router_lib::price_feeds::price_feed::PriceUpdate;
|
||||
use router_lib::test_tools::rpc;
|
||||
use solana_program::program_pack::Pack;
|
||||
use solana_program::pubkey::Pubkey;
|
||||
use solana_sdk::account::ReadableAccount;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::env;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use tracing::{info, warn};
|
||||
|
||||
#[tokio::test]
|
||||
async fn path_finding_perf_test() -> anyhow::Result<()> {
|
||||
if env::var("CI").is_ok() {
|
||||
println!("skipping test while running continuous integration");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
router_feed_lib::utils::tracing_subscriber_init();
|
||||
syscallstubs::deactivate_program_logs();
|
||||
|
||||
let usdc = Pubkey::from_str("EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v").unwrap();
|
||||
let sol = Pubkey::from_str("So11111111111111111111111111111111111111112").unwrap();
|
||||
let _jupsol = Pubkey::from_str("jupSoLaHXQiZZTSfEWMTRRgpnyFm8f6sZdosWBjx93v").unwrap();
|
||||
let _vsol = Pubkey::from_str("vSoLxydx6akxyMD9XEcPvGYNGq6Nn66oqVb3UkGkei7").unwrap();
|
||||
let mnde = Pubkey::from_str("MNDEFzGvMt87ueuHvVU9VcTqsAP5b3fTGPsHuuPA5ey").unwrap();
|
||||
|
||||
let (mut rpc_client, chain_data) = rpc::rpc_replayer_client("all.lz4");
|
||||
let chain_data = Arc::new(ChainDataAccountProvider::new(chain_data)) as AccountProviderView;
|
||||
let dex_sources = dex_test_utils::get_all_dex(&mut rpc_client).await?;
|
||||
let mut dexs = vec![];
|
||||
for dex in dex_sources {
|
||||
dexs.push(
|
||||
crate::dex::generic::build_dex_internal(dex, &None, true, false, true, &vec![])
|
||||
.await?,
|
||||
);
|
||||
}
|
||||
let edges = dexs.iter().map(|x| x.edges()).flatten().collect_vec();
|
||||
|
||||
let mut config = Config::default();
|
||||
config.routing.path_cache_validity_ms = 0;
|
||||
config.routing.max_path_length = Some(3);
|
||||
config.routing.retain_path_count = Some(5);
|
||||
config.routing.max_edge_per_pair = Some(8);
|
||||
config.routing.max_edge_per_cold_pair = Some(3);
|
||||
let pwa = vec![100, 1_000, 10_000];
|
||||
|
||||
let prices = router_test_lib::serialize::deserialize_from_file::<Vec<PriceUpdate>>(
|
||||
&"all-prices.lz4".to_string(),
|
||||
)?
|
||||
.into_iter()
|
||||
.map(|x| (x.mint, x.price))
|
||||
.collect::<HashMap<Pubkey, f64>>();
|
||||
|
||||
for edge in &edges {
|
||||
let decimals = {
|
||||
let Ok(mint_account) = chain_data.account(&edge.input_mint) else {
|
||||
warn!("Missing mint {}", edge.input_mint);
|
||||
continue;
|
||||
};
|
||||
let mint = Mint::unpack(mint_account.account.data())?;
|
||||
mint.decimals
|
||||
};
|
||||
edge.update_internal(
|
||||
&chain_data,
|
||||
decimals,
|
||||
*prices.get(&edge.input_mint).unwrap_or(&0.0),
|
||||
&pwa,
|
||||
);
|
||||
}
|
||||
|
||||
let _mints = edges.iter().map(|x| x.input_mint).collect::<HashSet<_>>();
|
||||
let available_mints = get_reachable_mints(usdc, sol, &edges);
|
||||
|
||||
let hot_mints = available_mints
|
||||
.iter()
|
||||
.take(100)
|
||||
.map(|x| *x)
|
||||
.chain(vec![
|
||||
usdc,
|
||||
mnde,
|
||||
Pubkey::from_str("So11111111111111111111111111111111111111112").unwrap(),
|
||||
])
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
let routing = Routing::new(&config, pwa, edges);
|
||||
routing.prepare_pruned_edges_and_cleanup_cache(&hot_mints, SwapMode::ExactIn);
|
||||
|
||||
let input_mints = [usdc, sol];
|
||||
let sorted_mints = available_mints.iter().sorted();
|
||||
// let sorted_mints = &[jupsol, mnde, vsol];
|
||||
|
||||
let pairs = iproduct!(input_mints.iter(), sorted_mints.into_iter())
|
||||
.filter(|x| *x.0 != *x.1)
|
||||
.map(|x| (*x.0, *x.1))
|
||||
.collect::<Vec<(Pubkey, Pubkey)>>();
|
||||
|
||||
let mut failures = HashSet::new();
|
||||
|
||||
for p in pairs {
|
||||
for max_account in [30, 40, 64] {
|
||||
for amounts in [50_000_000, 300_000_000, 2_000_000_000] {
|
||||
let start = Instant::now();
|
||||
let path = routing.find_best_route(
|
||||
&chain_data,
|
||||
&p.0,
|
||||
&p.1,
|
||||
amounts,
|
||||
max_account,
|
||||
false,
|
||||
&hot_mints,
|
||||
None,
|
||||
SwapMode::ExactIn,
|
||||
);
|
||||
|
||||
match path {
|
||||
Ok(path) => {
|
||||
let in_amount_dollars =
|
||||
get_price(&chain_data, &prices, &p.0, path.in_amount)?;
|
||||
let out_amount_dollars =
|
||||
get_price(&chain_data, &prices, &p.1, path.out_amount)?;
|
||||
|
||||
if (out_amount_dollars as f64) < (in_amount_dollars as f64) * 0.7 {
|
||||
warn!(
|
||||
"{} -> {} in {}ms ({} hop(s)) [{}$ -> {}$]",
|
||||
debug_tools::name(&p.0),
|
||||
debug_tools::name(&p.1),
|
||||
start.elapsed().as_micros() as f64 / 1000.0,
|
||||
path.steps.len(),
|
||||
in_amount_dollars,
|
||||
out_amount_dollars,
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"{} -> {} in {}ms ({} hop(s)) [{}$ -> {}$]",
|
||||
debug_tools::name(&p.0),
|
||||
debug_tools::name(&p.1),
|
||||
start.elapsed().as_micros() as f64 / 1000.0,
|
||||
path.steps.len(),
|
||||
in_amount_dollars,
|
||||
out_amount_dollars,
|
||||
);
|
||||
}
|
||||
// println!("price_impact (bps): {}", path.price_impact_bps);
|
||||
}
|
||||
Err(_err) => {
|
||||
failures.insert(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (f, t) in failures {
|
||||
warn!("Quote failed for {} -> {}", f, t)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_price(
|
||||
chain_data: &AccountProviderView,
|
||||
prices: &HashMap<Pubkey, f64>,
|
||||
key: &Pubkey,
|
||||
amount: u64,
|
||||
) -> anyhow::Result<u64> {
|
||||
let decimals = {
|
||||
let mint_account = chain_data.account(key)?;
|
||||
let mint = Mint::unpack(mint_account.account.data())?;
|
||||
mint.decimals
|
||||
};
|
||||
|
||||
let p = *prices.get(key).unwrap_or(&0.0);
|
||||
let d = 10_u64.pow(decimals as u32) as f64;
|
||||
let amount_ui = (p * amount as f64).div_euclid(d);
|
||||
|
||||
Ok(amount_ui.floor() as u64)
|
||||
}
|
||||
|
||||
fn get_reachable_mints(usdc: Pubkey, sol: Pubkey, edges: &Vec<Arc<Edge>>) -> HashSet<Pubkey> {
|
||||
let mut available_mints = HashSet::from([usdc, sol]);
|
||||
loop {
|
||||
let mut any_new = false;
|
||||
|
||||
for edge in edges {
|
||||
if !edge.state.read().unwrap().is_valid() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if available_mints.contains(&edge.input_mint) {
|
||||
if available_mints.insert(edge.output_mint) {
|
||||
any_new = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if any_new == false {
|
||||
break;
|
||||
}
|
||||
}
|
||||
available_mints
|
||||
}
|
||||
}
|
|
@ -0,0 +1,131 @@
|
|||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::routing::Routing;
|
||||
use crate::syscallstubs;
|
||||
use crate::tests::dex_test_utils;
|
||||
use itertools::Itertools;
|
||||
use rand::random;
|
||||
use router_config_lib::Config;
|
||||
use router_lib::dex::{AccountProviderView, ChainDataAccountProvider, SwapMode};
|
||||
use router_lib::test_tools::rpc;
|
||||
use solana_program::pubkey::Pubkey;
|
||||
use std::collections::HashSet;
|
||||
use std::env;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
#[tokio::test]
|
||||
async fn path_warmup_perf_test() -> anyhow::Result<()> {
|
||||
if env::var("CI").is_ok() {
|
||||
println!("skipping test while running continuous integration");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
syscallstubs::deactivate_program_logs();
|
||||
|
||||
let (mut rpc_client, chain_data) = rpc::rpc_replayer_client("all.lz4");
|
||||
let chain_data = Arc::new(ChainDataAccountProvider::new(chain_data)) as AccountProviderView;
|
||||
let dex_sources = dex_test_utils::get_all_dex(&mut rpc_client).await?;
|
||||
let mut dexs = vec![];
|
||||
for dex in dex_sources {
|
||||
dexs.push(
|
||||
crate::dex::generic::build_dex_internal(dex, &None, true, false, true, &vec![])
|
||||
.await?,
|
||||
);
|
||||
}
|
||||
let edges = dexs.iter().map(|x| x.edges()).flatten().collect_vec();
|
||||
let pwa = vec![100];
|
||||
// let pwa = vec![100, 1_000, 10_000];
|
||||
|
||||
for edge in &edges {
|
||||
edge.update_internal(
|
||||
&chain_data,
|
||||
6, // TODO FAS Real decimals
|
||||
1.0 + random::<f64>(), // TODO FAS Real price
|
||||
&pwa,
|
||||
);
|
||||
}
|
||||
|
||||
let mut config = Config::default();
|
||||
config.routing.path_cache_validity_ms = 500 * 60 * 1_000; // berk
|
||||
config.routing.max_path_length = Some(3);
|
||||
config.routing.retain_path_count = Some(4);
|
||||
config.routing.max_edge_per_pair = Some(4);
|
||||
let routing = Routing::new(&config, pwa.clone(), edges.clone());
|
||||
let mints = edges.iter().map(|x| x.input_mint).collect::<HashSet<_>>();
|
||||
let configured_mints = mints
|
||||
.iter()
|
||||
.take(100)
|
||||
.map(|x| *x)
|
||||
.chain(vec![
|
||||
Pubkey::from_str("EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v").unwrap(),
|
||||
Pubkey::from_str("So11111111111111111111111111111111111111112").unwrap(),
|
||||
])
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
let start = Instant::now();
|
||||
let mut counter = 0;
|
||||
|
||||
routing.prepare_pruned_edges_and_cleanup_cache(&configured_mints, SwapMode::ExactIn);
|
||||
|
||||
println!("number of mints: {}", mints.len());
|
||||
println!("number of configured_mints: {}", configured_mints.len());
|
||||
println!("number of edges: {}", edges.len());
|
||||
|
||||
for m in &configured_mints {
|
||||
for amount_ui in &pwa {
|
||||
for max_accounts in [40] {
|
||||
// for max_accounts in [10, 15, 20, 25, 30, 40] {
|
||||
let _ = routing.prepare_cache_for_input_mint(
|
||||
m,
|
||||
*amount_ui,
|
||||
max_accounts,
|
||||
|i, o| configured_mints.contains(i) || configured_mints.contains(o),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
counter += 1;
|
||||
if counter % 100 == 0 {
|
||||
println!("-> {} in {:?}", counter, start.elapsed())
|
||||
}
|
||||
}
|
||||
|
||||
println!(
|
||||
"duration: {}ms",
|
||||
start.elapsed().as_micros() as f64 / 1000.0
|
||||
);
|
||||
|
||||
for _i in 0..3 {
|
||||
bench_one_path_resolve(&chain_data, &routing);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn bench_one_path_resolve(chain_data: &AccountProviderView, routing: &Routing) {
|
||||
let start = Instant::now();
|
||||
let path = routing
|
||||
.find_best_route(
|
||||
&chain_data,
|
||||
&Pubkey::from_str("EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v").unwrap(),
|
||||
&Pubkey::from_str("J1toso1uCk3RLmjorhTtrVwY9HJ7X8V9yYac6Y7kGCPn").unwrap(),
|
||||
50_000_000,
|
||||
40,
|
||||
false,
|
||||
&HashSet::new(),
|
||||
None,
|
||||
SwapMode::ExactIn,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
println!(
|
||||
"duration: {}ms",
|
||||
start.elapsed().as_micros() as f64 / 1000.0
|
||||
);
|
||||
println!("out_amount: {}", path.out_amount);
|
||||
println!("price_impact (bps): {}", path.price_impact_bps);
|
||||
println!("steps count: {}", path.steps.len());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
use solana_program::pubkey::Pubkey;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
use tracing::warn;
|
||||
|
||||
pub type Decimals = u8;
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct Token {
|
||||
pub mint: Pubkey,
|
||||
pub decimals: Decimals,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct TokenCache {
|
||||
tokens: Arc<HashMap<Pubkey, Decimals>>,
|
||||
}
|
||||
|
||||
impl TokenCache {
|
||||
pub fn new(data: HashMap<Pubkey, Decimals>) -> Self {
|
||||
Self {
|
||||
tokens: Arc::new(data),
|
||||
}
|
||||
}
|
||||
|
||||
// use Result over Option to be compatible
|
||||
pub fn token(&self, mint: Pubkey) -> anyhow::Result<Token> {
|
||||
self.tokens
|
||||
.get(&mint)
|
||||
.map(|&decimals| Token { mint, decimals })
|
||||
.ok_or_else(|| {
|
||||
// this should never happen
|
||||
warn!("Token not found in cache: {}", mint);
|
||||
anyhow::anyhow!("Token not found in cache")
|
||||
})
|
||||
}
|
||||
|
||||
pub fn tokens(&self) -> HashSet<Pubkey> {
|
||||
self.tokens
|
||||
.iter()
|
||||
.map(|(k, _)| *k)
|
||||
.collect::<HashSet<Pubkey>>()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,123 @@
|
|||
use crate::edge::Edge;
|
||||
use crate::edge_updater::Dex;
|
||||
use async_channel::Receiver;
|
||||
use router_config_lib::{AccountDataSourceConfig, RoutingConfig};
|
||||
use router_feed_lib::grpc_tx_watcher;
|
||||
use router_feed_lib::grpc_tx_watcher::ExecTx;
|
||||
use solana_program::pubkey::Pubkey;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::task::JoinHandle;
|
||||
use tracing::{info, warn};
|
||||
|
||||
pub fn spawn_tx_watcher_jobs(
|
||||
routing_config: &RoutingConfig,
|
||||
source_config: &AccountDataSourceConfig,
|
||||
dexs: &[Dex],
|
||||
exit_sender: &tokio::sync::broadcast::Sender<()>,
|
||||
exit_flag: Arc<AtomicBool>,
|
||||
) -> (JoinHandle<()>, JoinHandle<()>) {
|
||||
let (tx_sender, tx_receiver) = async_channel::unbounded::<ExecTx>();
|
||||
let _ef = exit_flag.clone();
|
||||
let source_config = source_config.clone();
|
||||
let routing_config = routing_config.clone();
|
||||
let dexs = dexs.iter().map(|x| x.clone()).collect();
|
||||
|
||||
let exit_receiver = exit_sender.subscribe();
|
||||
let tx_sender_job = tokio::spawn(async move {
|
||||
grpc_tx_watcher::process_tx_events(&source_config, tx_sender, exit_receiver).await;
|
||||
});
|
||||
|
||||
let exit_receiver = exit_sender.subscribe();
|
||||
let tx_watcher_job = tokio::spawn(async move {
|
||||
watch_tx_events(routing_config, tx_receiver, &dexs, exit_receiver).await;
|
||||
});
|
||||
|
||||
(tx_sender_job, tx_watcher_job)
|
||||
}
|
||||
|
||||
pub async fn watch_tx_events(
|
||||
config: RoutingConfig,
|
||||
tx_receiver: Receiver<ExecTx>,
|
||||
dexs: &Vec<Dex>,
|
||||
mut exit_receiver: tokio::sync::broadcast::Receiver<()>,
|
||||
) {
|
||||
let cooldown_duration_multihop =
|
||||
Duration::from_secs(config.cooldown_duration_multihop_secs.unwrap_or(15));
|
||||
let cooldown_duration_singlehop =
|
||||
Duration::from_secs(config.cooldown_duration_singlehop_secs.unwrap_or(45));
|
||||
|
||||
let edges_per_pk: HashMap<Pubkey, Vec<Arc<Edge>>> = dexs
|
||||
.iter()
|
||||
.map(|dex| dex.edges_per_pk.clone())
|
||||
.flatten()
|
||||
.collect();
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = exit_receiver.recv() => {
|
||||
warn!("shutting down watch_tx_events...");
|
||||
break;
|
||||
},
|
||||
msg = tx_receiver.recv() => {
|
||||
match msg {
|
||||
Ok(tx) => {
|
||||
handle_tx(tx, &edges_per_pk, &cooldown_duration_multihop, &cooldown_duration_singlehop).await;
|
||||
}
|
||||
Err(_) => {
|
||||
warn!("shutting down watch_tx_events...");
|
||||
break;
|
||||
}
|
||||
};
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_tx(
|
||||
tx: ExecTx,
|
||||
edges_per_pk: &HashMap<Pubkey, Vec<Arc<Edge>>>,
|
||||
cooldown_multi: &Duration,
|
||||
cooldown_single: &Duration,
|
||||
) {
|
||||
// This is very dirty
|
||||
// 1/ use accounts to try to find edges
|
||||
// 2/ in a multi hop, we don't know which one is fucked up,
|
||||
// so cooldown everything but for a lesser time that for a single hop
|
||||
|
||||
let instruction_data = tx.data.as_slice();
|
||||
let (_, instruction_data) = autobahn_executor::utils::read_u64(instruction_data);
|
||||
let (number_of_ix, _instruction_data) = autobahn_executor::utils::read_u8(instruction_data);
|
||||
let cooldown_duration = if number_of_ix > 1 {
|
||||
cooldown_multi
|
||||
} else {
|
||||
cooldown_single
|
||||
};
|
||||
|
||||
let mut impacted_edges = HashSet::new();
|
||||
for account in &tx.accounts {
|
||||
let Some(edges) = edges_per_pk.get(account) else {
|
||||
continue;
|
||||
};
|
||||
|
||||
for edge in edges {
|
||||
if impacted_edges.insert(edge.desc()) {
|
||||
if tx.is_success {
|
||||
let mut writer = edge.state.write().unwrap();
|
||||
writer.reset_cooldown();
|
||||
info!("resetting edge {}", edge.desc());
|
||||
} else {
|
||||
let mut writer = edge.state.write().unwrap();
|
||||
writer.add_cooldown(&cooldown_duration);
|
||||
info!("cooling down edge {}", edge.desc());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if impacted_edges.is_empty() {
|
||||
warn!("didn't find edge");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
use std::future::Future;
|
||||
use std::process;
|
||||
use tokio::task::JoinHandle;
|
||||
|
||||
use crate::prelude::*;
|
||||
|
||||
#[inline(always)]
|
||||
#[allow(unused_variables)]
|
||||
pub fn tokio_spawn<T>(name: &str, future: T) -> JoinHandle<T::Output>
|
||||
where
|
||||
T: Future + Send + 'static,
|
||||
T::Output: Send + 'static,
|
||||
{
|
||||
#[cfg(not(tokio_unstable))]
|
||||
{
|
||||
tokio::spawn(future)
|
||||
}
|
||||
|
||||
#[cfg(tokio_unstable)]
|
||||
{
|
||||
tokio::task::Builder::new()
|
||||
.name(name)
|
||||
.spawn(future)
|
||||
.expect("always Ok")
|
||||
}
|
||||
}
|
||||
|
||||
/// Panics if the local time is < unix epoch start
|
||||
pub fn millis_since_epoch() -> u64 {
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_millis() as u64
|
||||
}
|
||||
|
||||
pub fn print_git_version() {
|
||||
match option_env!("GITHUB_SHA") {
|
||||
Some(sha) => {
|
||||
info!("version is {}[github]", sha,);
|
||||
}
|
||||
None => {
|
||||
info!(
|
||||
"version is {}[{}{}]",
|
||||
env!("VERGEN_GIT_SHA"),
|
||||
env!("VERGEN_GIT_COMMIT_DATE"),
|
||||
if env!("VERGEN_GIT_DIRTY") == "true" {
|
||||
"-dirty"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn configure_panic_hook() {
|
||||
let default_panic = std::panic::take_hook();
|
||||
std::panic::set_hook(Box::new(move |panic_info| {
|
||||
default_panic(panic_info);
|
||||
error!("{}", panic_info);
|
||||
eprintln!("{}", panic_info);
|
||||
if let Some(location) = panic_info.location() {
|
||||
error!(
|
||||
"panic occurred in file '{}' at line {}",
|
||||
location.file(),
|
||||
location.line(),
|
||||
);
|
||||
} else {
|
||||
error!("panic occurred but can't get location information...");
|
||||
}
|
||||
process::exit(12);
|
||||
}));
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
#![allow(dead_code)]
|
||||
|
||||
use anyhow::Context;
|
||||
use itertools::Itertools;
|
||||
use router_lib::mango::mango_fetcher::MangoMetadata;
|
||||
use solana_program::pubkey::Pubkey;
|
||||
use std::collections::HashSet;
|
||||
use std::str::FromStr;
|
||||
|
||||
pub fn get_configured_mints(
|
||||
mango_metadata: &Option<MangoMetadata>,
|
||||
enabled: bool,
|
||||
add_mango_tokens: bool,
|
||||
configured_mints: &Vec<String>,
|
||||
) -> anyhow::Result<HashSet<Pubkey>> {
|
||||
if !enabled {
|
||||
return Ok(HashSet::new());
|
||||
}
|
||||
|
||||
let mut mints = configured_mints
|
||||
.iter()
|
||||
.map(|s| Pubkey::from_str(s).context(format!("mint {s}")))
|
||||
.collect::<anyhow::Result<Vec<Pubkey>>>()?;
|
||||
|
||||
if add_mango_tokens {
|
||||
match mango_metadata.as_ref() {
|
||||
None => anyhow::bail!("Failed to init dex - missing mango metadata"),
|
||||
Some(m) => mints.extend(m.mints.clone()),
|
||||
};
|
||||
}
|
||||
|
||||
let mints = mints
|
||||
.into_iter()
|
||||
.collect::<HashSet<Pubkey>>()
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
Ok(mints)
|
||||
}
|
||||
|
||||
// note used ATM
|
||||
pub(crate) fn filter_pools_and_mints<T, F>(
|
||||
pools: Vec<(Pubkey, T)>,
|
||||
mints: &HashSet<Pubkey>,
|
||||
take_all_mints: bool,
|
||||
mints_getter: F,
|
||||
) -> Vec<(Pubkey, T)>
|
||||
where
|
||||
F: Fn(&T) -> (Pubkey, Pubkey),
|
||||
{
|
||||
pools
|
||||
.into_iter()
|
||||
.filter(|(_pool_pk, pool)| {
|
||||
let keys = mints_getter(&pool);
|
||||
take_all_mints || mints.contains(&keys.0) && mints.contains(&keys.1)
|
||||
})
|
||||
.collect_vec()
|
||||
}
|
|
@ -0,0 +1,117 @@
|
|||
[infinity]
|
||||
enabled = true
|
||||
|
||||
[orca]
|
||||
enabled = true
|
||||
mints = []
|
||||
take_all_mints = true
|
||||
add_mango_tokens = false
|
||||
|
||||
[cropper]
|
||||
enabled = true
|
||||
mints = []
|
||||
take_all_mints = true
|
||||
add_mango_tokens = false
|
||||
|
||||
[openbook_v2]
|
||||
enabled = true
|
||||
mints = []
|
||||
take_all_mints = true
|
||||
add_mango_tokens = false
|
||||
|
||||
[raydium]
|
||||
enabled = true
|
||||
mints = []
|
||||
take_all_mints = false
|
||||
add_mango_tokens = true
|
||||
|
||||
[raydium_cp]
|
||||
enabled = true
|
||||
mints = []
|
||||
take_all_mints = true
|
||||
add_mango_tokens = false
|
||||
|
||||
[saber]
|
||||
enabled = true
|
||||
mints = []
|
||||
take_all_mints = true
|
||||
add_mango_tokens = false
|
||||
|
||||
|
||||
[routing]
|
||||
path_cache_validity_ms = 60000
|
||||
path_warming_mode = "ConfiguredMints"
|
||||
#path_warming_mode = "HotMints"
|
||||
path_warming_amounts = [100, 1000, 10_000]
|
||||
path_warming_for_mints = [
|
||||
"So11111111111111111111111111111111111111112", # SOL
|
||||
"EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", # USDC
|
||||
# "Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB", # USDT
|
||||
# "27G8MtK7VtTcCHkpASjSDdkWWYfoqT6ggEuKidVJidD4" # JLP
|
||||
]
|
||||
path_warming_interval_secs = 5
|
||||
path_warming_max_accounts = [20, 25, 30, 35, 40, 64]
|
||||
lookup_tables = ["87TgskchTNEv1uXkGQk1U4zt65tjqbfGAZWNMGAcRRPx",
|
||||
"AgCBUZ6UMWqPLftTxeAqpQxtrfiCyL2HgRfmmM6QTfCj",
|
||||
"A1v3qxN7HbUvtyPnnaoCrKonXjkFLaDHXk3S6R2QfEaw",
|
||||
"4C3USSPE5P4gejsvLXQNCr85v1N9NFPzLUmG88tdpuzr",
|
||||
"FgsBrNm3uTN5cDmsYikcC3R6bAekYBJ7n3CQhxzF9AjH",
|
||||
]
|
||||
cooldown_duration_multihop_secs = 30
|
||||
cooldown_duration_singlehop_secs = 60
|
||||
max_path_length = 3
|
||||
retain_path_count = 5
|
||||
max_edge_per_pair = 5
|
||||
max_edge_per_cold_pair = 2
|
||||
|
||||
[server]
|
||||
address = "[::]:8888"
|
||||
|
||||
[metrics]
|
||||
output_http = true
|
||||
prometheus_address = "[::]:9091"
|
||||
output_stdout = false
|
||||
|
||||
[[sources]]
|
||||
dedup_queue_size = 50000
|
||||
rpc_http_url = "$RPC_HTTP_URL"
|
||||
rpc_support_compression = true
|
||||
re_snapshot_interval_secs = 1200
|
||||
|
||||
[[sources.grpc_sources]]
|
||||
name = "router-other"
|
||||
connection_string = "$RPC_HTTP_URL_WITHOUT_TOKEN"
|
||||
token = "$RPC_TOKEN"
|
||||
retry_connection_sleep_secs = 30
|
||||
|
||||
[[sources]]
|
||||
region = "ams"
|
||||
dedup_queue_size = 50000
|
||||
rpc_http_url = "$AMS_RPC_HTTP_URL"
|
||||
rpc_support_compression = true
|
||||
re_snapshot_interval_secs = 1200
|
||||
|
||||
[[sources.grpc_sources]]
|
||||
name = "router-ams"
|
||||
connection_string = "$AMS_RPC_HTTP_URL_WITHOUT_TOKEN"
|
||||
token = "$AMS_RPC_TOKEN"
|
||||
retry_connection_sleep_secs = 30
|
||||
|
||||
[price_feed]
|
||||
birdeye_token = "$BIRDEYE_TOKEN"
|
||||
refresh_interval_secs = 1200 # every 20 min
|
||||
|
||||
[safety_checks]
|
||||
check_quote_out_amount_deviation = true
|
||||
min_quote_out_to_in_amount_ratio = 0.65
|
||||
|
||||
[hot_mints]
|
||||
always_hot_mints = [
|
||||
"So11111111111111111111111111111111111111112", # SOL
|
||||
"EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", # USDC
|
||||
]
|
||||
keep_latest_count = 50
|
||||
|
||||
[debug_config]
|
||||
reprice_using_live_rpc = true
|
||||
reprice_probability = 0.05
|
|
@ -0,0 +1,29 @@
|
|||
[package]
|
||||
name = "cli"
|
||||
version = "0.0.1"
|
||||
edition = "2021"
|
||||
|
||||
[[bin]]
|
||||
name = "cli"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
clap = { version = "4.5.4", features = ["derive"] }
|
||||
tokio = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
solana-sdk = { workspace = true }
|
||||
solana-client = { workspace = true }
|
||||
autobahn-executor = { path = "../../programs/autobahn-executor", version = "0.1.0" }
|
||||
router-lib = { path = "../../lib/router-lib/", version = "0.0.1" }
|
||||
router-config-lib = { path = "../../lib/router-config-lib/", version = "0.0.1" }
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
bytemuck = "1.16.1"
|
||||
|
||||
reqwest = { workspace = true, features = ["json"] }
|
||||
tracing = { version = "0.1", features = ["log"] }
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
atty = "0.2.14"
|
||||
bincode = "1.3.3"
|
||||
base64 = "0.22.1"
|
|
@ -0,0 +1,90 @@
|
|||
use clap::{Args, Parser, Subcommand};
|
||||
|
||||
#[derive(Parser, Debug, Clone)]
|
||||
#[clap()]
|
||||
pub struct Cli {
|
||||
#[clap(subcommand)]
|
||||
pub command: Command,
|
||||
}
|
||||
|
||||
#[derive(Args, Debug, Clone)]
|
||||
pub struct Rpc {
|
||||
#[clap(short, long, default_value = "m")]
|
||||
pub url: String,
|
||||
|
||||
#[clap(short, long, default_value = "")]
|
||||
pub fee_payer: String,
|
||||
}
|
||||
|
||||
#[derive(Args, Debug, Clone)]
|
||||
pub struct Quote {
|
||||
#[clap(long)]
|
||||
pub input_mint: String,
|
||||
|
||||
#[clap(long)]
|
||||
pub output_mint: String,
|
||||
|
||||
#[clap(short, long)]
|
||||
pub amount: u64,
|
||||
|
||||
#[clap(short, long, default_value = "50")]
|
||||
pub slippage_bps: u64,
|
||||
|
||||
#[clap(short, long, default_value = "http://localhost:8888")]
|
||||
pub router: String,
|
||||
|
||||
// can be either ExactIn or ExactOut
|
||||
#[clap(short, long, default_value = "ExactIn")]
|
||||
pub swap_mode: String,
|
||||
}
|
||||
|
||||
#[derive(Args, Debug, Clone)]
|
||||
pub struct Swap {
|
||||
#[clap(short, long)]
|
||||
pub owner: String,
|
||||
|
||||
#[clap(long)]
|
||||
pub input_mint: String,
|
||||
|
||||
#[clap(long)]
|
||||
pub output_mint: String,
|
||||
|
||||
#[clap(short, long)]
|
||||
pub amount: u64,
|
||||
|
||||
#[clap(short, long, default_value = "50")]
|
||||
pub slippage_bps: u64,
|
||||
|
||||
#[clap(short, long, default_value = "http://localhost:8888")]
|
||||
pub router: String,
|
||||
|
||||
#[clap(flatten)]
|
||||
pub rpc: Rpc,
|
||||
|
||||
// can be either ExactIn or ExactOut
|
||||
#[clap(short, long, default_value = "ExactIn")]
|
||||
pub swap_mode: String,
|
||||
}
|
||||
|
||||
#[derive(Args, Debug, Clone)]
|
||||
pub struct DownloadTestPrograms {
|
||||
#[clap(short, long)]
|
||||
pub config: String,
|
||||
|
||||
#[clap(flatten)]
|
||||
pub rpc: Rpc,
|
||||
}
|
||||
|
||||
#[derive(Args, Debug, Clone)]
|
||||
pub struct DecodeLog {
|
||||
#[clap(short, long)]
|
||||
pub data: String,
|
||||
}
|
||||
|
||||
#[derive(Subcommand, Debug, Clone)]
|
||||
pub enum Command {
|
||||
Swap(Swap),
|
||||
Quote(Quote),
|
||||
DownloadTestPrograms(DownloadTestPrograms),
|
||||
DecodeLog(DecodeLog),
|
||||
}
|
|
@ -0,0 +1,175 @@
|
|||
use crate::cli_args::{Cli, Command};
|
||||
use crate::util::{string_or_env, tracing_subscriber_init};
|
||||
use autobahn_executor::logs::*;
|
||||
use base64::prelude::BASE64_STANDARD;
|
||||
use base64::Engine;
|
||||
use bincode::serialize;
|
||||
use clap::Parser;
|
||||
use router_config_lib::Config;
|
||||
use router_lib::dex::SwapMode;
|
||||
use router_lib::router_client::RouterClient;
|
||||
use solana_client::nonblocking::rpc_client::RpcClient;
|
||||
use solana_client::rpc_client::RpcClient as BlockingRpcClient;
|
||||
use solana_client::rpc_config::{RpcSendTransactionConfig, RpcSimulateTransactionConfig};
|
||||
use solana_sdk::address_lookup_table::state::AddressLookupTable;
|
||||
use solana_sdk::address_lookup_table::AddressLookupTableAccount;
|
||||
use solana_sdk::commitment_config::CommitmentConfig;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::Keypair;
|
||||
use solana_sdk::signer::EncodableKey;
|
||||
use std::str::FromStr;
|
||||
|
||||
mod cli_args;
|
||||
mod util;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), anyhow::Error> {
|
||||
tracing_subscriber_init();
|
||||
let cli = Cli::parse();
|
||||
|
||||
match cli.command {
|
||||
Command::Swap(swap) => {
|
||||
let wallet = Keypair::read_from_file(swap.owner).expect("couldn't read keypair");
|
||||
let rpc_client = RpcClient::new(string_or_env(swap.rpc.url.clone()));
|
||||
let blocking_rpc_client = BlockingRpcClient::new(string_or_env(swap.rpc.url));
|
||||
let swap_client = RouterClient {
|
||||
http_client: reqwest::Client::builder().build()?,
|
||||
router_url: string_or_env(swap.router.to_string()),
|
||||
};
|
||||
|
||||
let Ok(swap_mode) = SwapMode::from_str(&swap.swap_mode) else {
|
||||
anyhow::bail!("Swap mode should be either ExactIn(default) or ExactOut");
|
||||
};
|
||||
|
||||
let quote = swap_client
|
||||
.quote(
|
||||
Pubkey::from_str(&swap.input_mint).unwrap(),
|
||||
Pubkey::from_str(&swap.output_mint).unwrap(),
|
||||
swap.amount,
|
||||
swap.slippage_bps,
|
||||
false,
|
||||
40,
|
||||
swap_mode,
|
||||
)
|
||||
.await?;
|
||||
|
||||
println!("quote: {:?}", quote);
|
||||
|
||||
let (latest_blockhash, _) = rpc_client
|
||||
.get_latest_blockhash_with_commitment(CommitmentConfig::finalized())
|
||||
.await?;
|
||||
|
||||
let load_alt = |alt_addr| {
|
||||
let alt_data = blocking_rpc_client.get_account(&alt_addr);
|
||||
|
||||
match alt_data {
|
||||
Ok(alt_data) => Some(AddressLookupTableAccount {
|
||||
key: alt_addr,
|
||||
addresses: AddressLookupTable::deserialize(alt_data.data.as_slice())
|
||||
.unwrap()
|
||||
.addresses
|
||||
.to_vec(),
|
||||
}),
|
||||
Err(_) => None,
|
||||
}
|
||||
};
|
||||
|
||||
let tx = swap_client
|
||||
.swap(load_alt, quote, &wallet, latest_blockhash)
|
||||
.await?;
|
||||
|
||||
let sim = rpc_client
|
||||
.simulate_transaction_with_config(
|
||||
&tx,
|
||||
RpcSimulateTransactionConfig {
|
||||
commitment: Some(CommitmentConfig::processed()),
|
||||
sig_verify: false,
|
||||
replace_recent_blockhash: true,
|
||||
encoding: None,
|
||||
accounts: None,
|
||||
min_context_slot: None,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
println!("sim swap: err={:?}", sim.value.err);
|
||||
if let Some(logs) = sim.value.logs {
|
||||
for log in logs.iter() {
|
||||
println!("{}", log);
|
||||
}
|
||||
}
|
||||
|
||||
let binary = serialize(&tx)?;
|
||||
let base = BASE64_STANDARD.encode(binary);
|
||||
println!(
|
||||
"inspect: got to https://explorer.solana.com/tx/inspector and paste {}",
|
||||
base
|
||||
);
|
||||
|
||||
let sig = rpc_client
|
||||
.send_and_confirm_transaction_with_spinner_and_config(
|
||||
&tx,
|
||||
CommitmentConfig::processed(),
|
||||
RpcSendTransactionConfig {
|
||||
skip_preflight: true,
|
||||
..RpcSendTransactionConfig::default()
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
println!("swap success: {}", sig);
|
||||
}
|
||||
Command::Quote(quote) => {
|
||||
let Ok(swap_mode) = SwapMode::from_str("e.swap_mode) else {
|
||||
anyhow::bail!("Swap mode should be either ExactIn(default) or ExactOut");
|
||||
};
|
||||
|
||||
let swap_client = RouterClient {
|
||||
http_client: reqwest::Client::builder().build()?,
|
||||
router_url: string_or_env(quote.router.to_string()),
|
||||
};
|
||||
|
||||
let quote = swap_client
|
||||
.quote(
|
||||
Pubkey::from_str("e.input_mint).unwrap(),
|
||||
Pubkey::from_str("e.output_mint).unwrap(),
|
||||
quote.amount,
|
||||
quote.slippage_bps,
|
||||
false,
|
||||
40,
|
||||
swap_mode,
|
||||
)
|
||||
.await?;
|
||||
|
||||
println!("quote: {:?}", quote);
|
||||
}
|
||||
Command::DownloadTestPrograms(download) => {
|
||||
let _config = Config::load(&download.config)?;
|
||||
}
|
||||
Command::DecodeLog(log) => {
|
||||
let decoded = BASE64_STANDARD.decode(log.data)?;
|
||||
let discriminant: &[u8; 8] = &decoded[..8].try_into().unwrap();
|
||||
match discriminant {
|
||||
&SWAP_EVENT_DISCRIMINANT => {
|
||||
let event = bytemuck::from_bytes::<SwapEvent>(&decoded[8..]);
|
||||
println!("SwapEvent - input_amount: {}, input_mint: {:?}, output_amount: {}, output_mint: {:?}", event.input_amount, event.input_mint, event.output_amount, event.output_mint);
|
||||
}
|
||||
&PLATFORM_FEE_LOG_DISCRIMINANT => {
|
||||
let event = bytemuck::from_bytes::<PlatformFeeLog>(&decoded[8..]);
|
||||
println!("PlatformFeeLog - user: {:?}, platform_token_account: {:?}, platform_fee: {}", event.user, event.platform_token_account, event.platform_fee);
|
||||
}
|
||||
&REFERRER_FEE_LOG_DISCRIMINANT => {
|
||||
let event = bytemuck::from_bytes::<ReferrerFeeLog>(&decoded[8..]);
|
||||
println!("ReferrerFeeLog - referree: {:?}, referer_token_account: {:?}, referrer_fee: {}", event.referee, event.referer_token_account, event.referrer_fee);
|
||||
}
|
||||
&REFERRER_WITHDRAW_LOG_DISCRIMINANT => {
|
||||
let event = bytemuck::from_bytes::<ReferrerWithdrawLog>(&decoded[8..]);
|
||||
println!("ReferrerWithdrawLog - referer: {:?}, referer_token_account: {:?}, amount: {}", event.referer, event.referer_token_account, event.amount);
|
||||
}
|
||||
_ => panic!("Unknown log discriminant"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
use std::env;
|
||||
|
||||
pub fn string_or_env(value_or_env: String) -> String {
|
||||
let value = match &value_or_env.chars().next().unwrap() {
|
||||
'$' => env::var(&value_or_env[1..]).expect("reading from env"),
|
||||
_ => value_or_env,
|
||||
};
|
||||
|
||||
value
|
||||
}
|
||||
|
||||
pub fn tracing_subscriber_init() {
|
||||
let format = tracing_subscriber::fmt::format().with_ansi(atty::is(atty::Stream::Stdout));
|
||||
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||
.event_format(format)
|
||||
.init();
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
[package]
|
||||
name = "comparer"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
async-channel = "1.6"
|
||||
bs58 = "0.3.1"
|
||||
bytemuck = "^1.7.2"
|
||||
bytes = "1.0"
|
||||
chrono = "0.4"
|
||||
futures = "0.3.17"
|
||||
futures-core = "0.3"
|
||||
futures-util = "0.3"
|
||||
itertools = "0.12"
|
||||
jemallocator = "0.5"
|
||||
jsonrpc-core = "18.0.0"
|
||||
jsonrpc-core-client = { version = "18.0.0", features = ["ws", "http", "tls"] }
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
sha2 = "0.10.6"
|
||||
shellexpand = "2.1.0"
|
||||
solana-account-decoder = "1.17"
|
||||
solana-client = { workspace = true }
|
||||
solana-logger = "1.17"
|
||||
solana-program = "1.17"
|
||||
solana-program-test = "1.17"
|
||||
solana-sdk = { workspace = true }
|
||||
solana-transaction-status = { version = "1.17" }
|
||||
spl-token = { version = "^3.0.0", features = ["no-entrypoint"] }
|
||||
tokio = { workspace = true }
|
||||
tokio-stream = { version = "0.1"}
|
||||
tokio-tungstenite = "0.21"
|
||||
toml = "0.5"
|
||||
tracing = { version = "0.1", features = ["log"] }
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
regex = "1.9.5"
|
||||
indexmap = "2.0.0"
|
||||
spl-associated-token-account = "1.0.5"
|
||||
yellowstone-grpc-proto = { workspace = true }
|
||||
lz4 = "1.24.0"
|
||||
autobahn-executor = { path = "../../programs/autobahn-executor" }
|
||||
router-feed-lib = { path = "../../lib/router-feed-lib" }
|
||||
router-config-lib = { path = "../../lib/router-config-lib" }
|
||||
router-lib = { path = "../../lib/router-lib/", version = "0.0.1" }
|
||||
base64 = "0.21.7"
|
||||
bincode = "1.3.3"
|
||||
services-mango-lib = { git = "https://github.com/blockworks-foundation/mango-v4.git" }
|
||||
tokio-postgres = { version = "0.7", features = ["with-chrono-0_4"] }
|
||||
tokio-postgres-rustls = "0.9.0"
|
||||
postgres_query = { git = "https://github.com/nolanderc/rust-postgres-query", rev = "b4422051c8a31fbba4a35f88004c1cefb1878dd5" }
|
||||
postgres-types = { version = "0.2", features = ["array-impls", "derive", "with-chrono-0_4"] }
|
||||
postgres-native-tls = "0.5"
|
||||
rand = "0.7.3"
|
||||
|
||||
[dev-dependencies]
|
||||
test-case = "*"
|
||||
|
||||
[build-dependencies]
|
||||
anyhow = { workspace = true }
|
||||
vergen-gitcl = { version = "1.0.0-beta.2", features = [] }
|
|
@ -0,0 +1,25 @@
|
|||
FROM rust:1.76.0 as base
|
||||
RUN cargo install cargo-chef@0.1.62 --locked
|
||||
RUN rustup component add rustfmt
|
||||
RUN apt-get update && apt-get install -y clang cmake ssh
|
||||
WORKDIR /app
|
||||
|
||||
FROM base AS plan
|
||||
COPY . .
|
||||
WORKDIR /app
|
||||
RUN cargo chef prepare --recipe-path recipe.json
|
||||
|
||||
FROM base as build
|
||||
COPY --from=plan /app/recipe.json recipe.json
|
||||
RUN cargo chef cook --release --recipe-path recipe.json
|
||||
COPY . .
|
||||
RUN cargo build --release --bin comparer
|
||||
|
||||
FROM debian:bookworm-slim as run
|
||||
RUN apt-get update && apt-get -y install ca-certificates libc6 libssl3 libssl-dev openssl
|
||||
|
||||
COPY --from=build /app/target/release/comparer /usr/local/bin/
|
||||
COPY --from=build /app/bin/comparer/template-config.toml /usr/local/bin/template-config.toml
|
||||
|
||||
RUN adduser --system --group --no-create-home mangouser
|
||||
USER mangouser
|
|
@ -0,0 +1,26 @@
|
|||
CREATE SCHEMA IF NOT EXISTS router AUTHORIZATION CURRENT_ROLE;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS router.comparison
|
||||
(
|
||||
timestamp TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
input_mint VARCHAR(64) NOT NULL,
|
||||
output_mint VARCHAR(64) NOT NULL,
|
||||
input_amount bigint NOT NULL,
|
||||
router_quote_output_amount bigint NOT NULL,
|
||||
jupiter_quote_output_amount bigint NOT NULL,
|
||||
router_simulation_success BOOLEAN NOT NULL,
|
||||
jupiter_simulation_success BOOLEAN NOT NULL,
|
||||
max_accounts bigint NOT NULL,
|
||||
router_accounts bigint NOT NULL,
|
||||
jupiter_accounts bigint NOT NULL,
|
||||
input_amount_in_dollars double precision NOT NULL,
|
||||
router_output_amount_in_dollars double precision NOT NULL,
|
||||
jupiter_output_amount_in_dollars double precision NOT NULL
|
||||
)
|
||||
|
||||
grant select, insert on router.comparison to router_indexer;
|
||||
|
||||
ALTER TABLE router.comparison ADD router_actual_output_amount bigint;
|
||||
ALTER TABLE router.comparison ADD jupiter_actual_output_amount bigint;
|
||||
ALTER TABLE router.comparison ADD router_error TEXT;
|
||||
ALTER TABLE router.comparison ADD jupiter_error TEXT;
|
|
@ -0,0 +1,500 @@
|
|||
use crate::config::Config;
|
||||
use crate::persister::PersistableState;
|
||||
use async_channel::Sender;
|
||||
use itertools::{iproduct, Itertools};
|
||||
use rand::seq::SliceRandom;
|
||||
use router_config_lib::PriceFeedConfig;
|
||||
use router_lib::dex::SwapMode;
|
||||
use router_lib::mango::mango_fetcher::fetch_mango_data;
|
||||
use router_lib::model::quote_response::QuoteResponse;
|
||||
use router_lib::price_feeds::composite::CompositePriceFeed;
|
||||
use router_lib::price_feeds::price_cache::PriceCache;
|
||||
use router_lib::price_feeds::price_feed::PriceFeed;
|
||||
use router_lib::router_client::RouterClient;
|
||||
use solana_client::client_error::reqwest;
|
||||
use solana_client::nonblocking::rpc_client::RpcClient;
|
||||
use solana_client::rpc_client::RpcClient as BlockingRpcClient;
|
||||
use solana_client::rpc_config::{
|
||||
RpcSimulateTransactionAccountsConfig, RpcSimulateTransactionConfig,
|
||||
};
|
||||
use solana_program::address_lookup_table::state::AddressLookupTable;
|
||||
use solana_program::address_lookup_table::AddressLookupTableAccount;
|
||||
use solana_program::program_pack::Pack;
|
||||
use solana_program::pubkey::Pubkey;
|
||||
use solana_sdk::account::{Account, ReadableAccount};
|
||||
use solana_sdk::commitment_config::CommitmentConfig;
|
||||
use solana_sdk::transaction::VersionedTransaction;
|
||||
use spl_associated_token_account::get_associated_token_address;
|
||||
use spl_token::state::Mint;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::broadcast::Receiver;
|
||||
use tokio::time::{sleep, Duration};
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
pub(crate) async fn run(
|
||||
config: &Config,
|
||||
sender: Sender<PersistableState>,
|
||||
mut exit: Receiver<()>,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut mints = get_mints(config).await?.into_iter().collect_vec();
|
||||
mints.shuffle(&mut rand::thread_rng());
|
||||
|
||||
info!(count = mints.len(), "Running with mints");
|
||||
|
||||
let (mut price_feed, _pf_job) = CompositePriceFeed::start(
|
||||
PriceFeedConfig {
|
||||
birdeye_token: config.birdeye_token.to_string(),
|
||||
birdeye_single_mode: None,
|
||||
refresh_interval_secs: 25,
|
||||
},
|
||||
exit.resubscribe(),
|
||||
);
|
||||
let (price_cache, _pc_job) = PriceCache::new(exit.resubscribe(), price_feed.receiver());
|
||||
for m in &mints {
|
||||
price_feed.register_mint_sender().send(*m).await?;
|
||||
}
|
||||
|
||||
let rpc_client =
|
||||
RpcClient::new_with_commitment(config.rpc_http_url.clone(), CommitmentConfig::confirmed());
|
||||
let mints_accounts = rpc_client
|
||||
.get_multiple_accounts(mints.iter().copied().collect_vec().as_slice())
|
||||
.await?
|
||||
.into_iter()
|
||||
.zip(&mints)
|
||||
.filter_map(|(account, key)| {
|
||||
if let Some(acc) = account {
|
||||
let mint_acc = Mint::unpack(acc.data().iter().as_ref()).ok();
|
||||
mint_acc.map(|ma| (*key, ma))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect::<HashMap<Pubkey, Mint>>();
|
||||
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(config.execution_interval_sec));
|
||||
|
||||
let usdc = Bot::usdc();
|
||||
let sol = Bot::sol();
|
||||
|
||||
let router_bot = build_bot(config, config.router.clone())?;
|
||||
let jupiter_bot = build_bot(config, config.jupiter.clone())?;
|
||||
|
||||
let other_tokens = vec![usdc, sol];
|
||||
|
||||
let amounts = &config.amounts;
|
||||
let max_accounts = [30, 40, 60_usize];
|
||||
|
||||
let test_cases = iproduct!(other_tokens, mints, amounts, max_accounts).collect_vec();
|
||||
info!(count = test_cases.len(), "Test cases");
|
||||
|
||||
let mut test_cases_index = 0;
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = exit.recv() => {
|
||||
warn!("shutting down persister...");
|
||||
break;
|
||||
},
|
||||
_ = interval.tick() => {
|
||||
if test_cases_index >= test_cases.len() {
|
||||
test_cases_index = 0;
|
||||
}
|
||||
|
||||
let test_case = &test_cases[test_cases_index];
|
||||
|
||||
let from_token = test_case.0;
|
||||
let to_token = test_case.1;
|
||||
|
||||
if from_token == to_token {
|
||||
test_cases_index += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
let amount_dollar = *test_case.2;
|
||||
let max_account = test_case.3;
|
||||
|
||||
let Some(price_ui) = price_cache.price_ui(from_token) else {
|
||||
test_cases_index += 1;
|
||||
continue;
|
||||
};
|
||||
let Some(decimals) = mints_accounts.get(&from_token) else {
|
||||
test_cases_index += 1;
|
||||
continue;
|
||||
};
|
||||
let Some(out_price_ui) = price_cache.price_ui(to_token) else {
|
||||
test_cases_index += 1;
|
||||
continue;
|
||||
};
|
||||
let Some(out_decimals) = mints_accounts.get(&to_token) else {
|
||||
test_cases_index += 1;
|
||||
continue;
|
||||
};
|
||||
|
||||
let multiplier = 10_u32.pow(decimals.decimals as u32) as f64;
|
||||
let amount_native =
|
||||
((amount_dollar as f64 / price_ui) * multiplier).round() as u64;
|
||||
|
||||
let out_multiplier = 10_u32.pow(out_decimals.decimals as u32) as f64;
|
||||
let out_fx_dollar = out_price_ui / out_multiplier;
|
||||
|
||||
info!(%from_token, %to_token, amount_dollar, amount_native, price_ui, out_price_ui, max_account, "Running test on");
|
||||
|
||||
let sndr = sender.clone();
|
||||
let r_bot = router_bot.clone();
|
||||
let j_bot = jupiter_bot.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
match simulate(&from_token, &to_token, amount_native, amount_dollar as f64, out_fx_dollar, r_bot, j_bot, max_account).await {
|
||||
Ok(state) => {sndr.send(state).await.expect("sending state must succeed");}
|
||||
Err(e) => { error!("failed to simulate: {:?}", e)}
|
||||
}
|
||||
});
|
||||
|
||||
test_cases_index += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn build_bot(config: &Config, url: String) -> anyhow::Result<Arc<Bot>> {
|
||||
let outgoing_rpc_client = RpcClient::new_with_commitment(
|
||||
config.outgoing_rpc_http_url.clone(),
|
||||
CommitmentConfig::processed(),
|
||||
);
|
||||
let blocking_rpc_client = BlockingRpcClient::new_with_commitment(
|
||||
config.rpc_http_url.clone(),
|
||||
CommitmentConfig::processed(),
|
||||
);
|
||||
let swap_client = RouterClient {
|
||||
http_client: reqwest::Client::builder().build()?,
|
||||
router_url: url,
|
||||
};
|
||||
|
||||
let bot = Bot {
|
||||
wallet: Pubkey::from_str(config.wallet_pubkey.as_str()).unwrap(),
|
||||
outgoing_rpc_client,
|
||||
blocking_rpc_client,
|
||||
swap_client,
|
||||
};
|
||||
|
||||
Ok(Arc::new(bot))
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn simulate(
|
||||
from: &Pubkey,
|
||||
to: &Pubkey,
|
||||
amount: u64,
|
||||
amount_dollars: f64,
|
||||
out_fx_dollars: f64,
|
||||
router_bot: Arc<Bot>,
|
||||
jupiter_bot: Arc<Bot>,
|
||||
max_accounts: usize,
|
||||
) -> anyhow::Result<PersistableState> {
|
||||
let bot = router_bot.clone();
|
||||
let load_alt = |alt_addr| {
|
||||
let alt_data = bot.blocking_rpc_client.get_account(&alt_addr);
|
||||
|
||||
match alt_data {
|
||||
Ok(alt_data) => Some(AddressLookupTableAccount {
|
||||
key: alt_addr,
|
||||
addresses: AddressLookupTable::deserialize(alt_data.data.as_slice())
|
||||
.unwrap()
|
||||
.addresses
|
||||
.to_vec(),
|
||||
}),
|
||||
Err(_) => None,
|
||||
}
|
||||
};
|
||||
|
||||
let from = *from;
|
||||
let to = *to;
|
||||
|
||||
// quote jup + autobahn-router
|
||||
let bot = router_bot.clone();
|
||||
let router = tokio::spawn(async move {
|
||||
bot.swap_client
|
||||
.quote(from, to, amount, 50, false, max_accounts, SwapMode::ExactIn)
|
||||
.await
|
||||
});
|
||||
let bot = jupiter_bot.clone();
|
||||
let jupiter = tokio::spawn(async move {
|
||||
bot.swap_client
|
||||
.quote(from, to, amount, 50, false, max_accounts, SwapMode::ExactIn)
|
||||
.await
|
||||
});
|
||||
|
||||
let router = router.await?;
|
||||
let jupiter = jupiter.await?;
|
||||
let router_route = build_route(&router);
|
||||
let jupiter_route = build_route(&jupiter);
|
||||
|
||||
// wait
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
|
||||
// simulate
|
||||
let router_result = simulate_swap(router_bot, load_alt, router, "autobahn").await?;
|
||||
let jupiter_result = simulate_swap(jupiter_bot, load_alt, jupiter, "jupiter").await?;
|
||||
|
||||
Ok(PersistableState {
|
||||
input_mint: from,
|
||||
output_mint: to,
|
||||
input_amount: amount,
|
||||
input_amount_in_dollars: amount_dollars,
|
||||
max_accounts,
|
||||
router_quote_output_amount: router_result.0,
|
||||
jupiter_quote_output_amount: jupiter_result.0,
|
||||
router_simulation_is_success: router_result.1,
|
||||
jupiter_simulation_is_success: jupiter_result.1,
|
||||
router_accounts: router_result.2,
|
||||
jupiter_accounts: jupiter_result.2,
|
||||
router_output_amount_in_dollars: router_result.0 as f64 * out_fx_dollars,
|
||||
jupiter_output_amount_in_dollars: jupiter_result.0 as f64 * out_fx_dollars,
|
||||
router_route,
|
||||
jupiter_route,
|
||||
router_actual_output_amount: router_result.3,
|
||||
jupiter_actual_output_amount: jupiter_result.3,
|
||||
router_error: router_result.4,
|
||||
jupiter_error: jupiter_result.4,
|
||||
})
|
||||
}
|
||||
|
||||
fn build_route(quote: &anyhow::Result<QuoteResponse>) -> String {
|
||||
match quote {
|
||||
Ok(quote) => {
|
||||
let steps = quote
|
||||
.route_plan
|
||||
.iter()
|
||||
.filter_map(|x| x.swap_info.clone().map(|step| step.label))
|
||||
.collect::<Vec<Option<String>>>();
|
||||
let steps = steps
|
||||
.into_iter()
|
||||
.map(|x| x.unwrap_or("?".to_string()))
|
||||
.collect::<HashSet<_>>();
|
||||
steps.iter().join(", ")
|
||||
}
|
||||
Err(_) => "".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn simulate_swap<F>(
|
||||
bot: Arc<Bot>,
|
||||
alt: F,
|
||||
quote: anyhow::Result<QuoteResponse>,
|
||||
name: &str,
|
||||
) -> anyhow::Result<(u64, bool, usize, u64, String)>
|
||||
where
|
||||
F: Fn(Pubkey) -> Option<AddressLookupTableAccount>,
|
||||
{
|
||||
let Ok(quote) = quote else {
|
||||
let err = quote.unwrap_err();
|
||||
warn!("Quote failed for {} with {:?}", name, err);
|
||||
return Ok((0, false, 0, 0, parse_error(err.to_string())));
|
||||
};
|
||||
|
||||
let latest_blockhash = bot
|
||||
.outgoing_rpc_client
|
||||
.get_latest_blockhash_with_commitment(CommitmentConfig::finalized())
|
||||
.await?
|
||||
.0;
|
||||
let out_amount = u64::from_str(quote.out_amount.as_str())?;
|
||||
|
||||
let tx = bot
|
||||
.swap_client
|
||||
.simulate_swap(alt, quote.clone(), &bot.wallet, latest_blockhash, true)
|
||||
.await;
|
||||
|
||||
let Ok(tx) = tx else {
|
||||
warn!("Failed to build TX for {}: {:?}", name, tx.unwrap_err());
|
||||
return Ok((out_amount, false, 0, 0, "failed to build TX".to_string()));
|
||||
};
|
||||
|
||||
let accounts = count_account(&tx);
|
||||
let out_token_account = get_ata(
|
||||
bot.wallet,
|
||||
Pubkey::from_str(quote.output_mint.as_str()).unwrap(),
|
||||
);
|
||||
|
||||
let initial_balance = bot
|
||||
.blocking_rpc_client
|
||||
.get_account(&out_token_account)
|
||||
.ok()
|
||||
.map(|x| get_balance(x).ok())
|
||||
.flatten();
|
||||
|
||||
let simulation_result = bot.blocking_rpc_client.simulate_transaction_with_config(
|
||||
&tx,
|
||||
RpcSimulateTransactionConfig {
|
||||
sig_verify: false,
|
||||
replace_recent_blockhash: false,
|
||||
commitment: Some(CommitmentConfig::processed()),
|
||||
encoding: None,
|
||||
accounts: Some(RpcSimulateTransactionAccountsConfig {
|
||||
encoding: None,
|
||||
addresses: vec![out_token_account.to_string()],
|
||||
}),
|
||||
min_context_slot: None,
|
||||
},
|
||||
);
|
||||
let Ok(simulation_result) = simulation_result else {
|
||||
let bytes = bincode::serialize(&tx).unwrap();
|
||||
let lut_used = tx
|
||||
.message
|
||||
.address_table_lookups()
|
||||
.map(|x| x.len())
|
||||
.unwrap_or(0);
|
||||
|
||||
warn!(
|
||||
"Failed to simulate TX for {}: (size={}, nb_lut={}) {:?}",
|
||||
name,
|
||||
bytes.len(),
|
||||
lut_used,
|
||||
simulation_result.unwrap_err()
|
||||
);
|
||||
return Ok((
|
||||
out_amount,
|
||||
false,
|
||||
accounts,
|
||||
0,
|
||||
"failed to simulate TX".to_string(),
|
||||
));
|
||||
};
|
||||
|
||||
if let Some(err) = simulation_result.value.err {
|
||||
warn!("Tx failed for {}: {:?}", name, err);
|
||||
|
||||
let mut is_slippage_error = false;
|
||||
let mut is_cu_error = false;
|
||||
|
||||
if let Some(logs) = simulation_result.value.logs {
|
||||
for l in &logs {
|
||||
warn!(" - {}", l);
|
||||
if l.contains("AmountOutBelowMinimum") {
|
||||
is_slippage_error = true;
|
||||
}
|
||||
if l.contains("Max slippage reached") {
|
||||
is_slippage_error = true;
|
||||
}
|
||||
if l.contains("exceeded CUs meter at BPF") {
|
||||
is_cu_error = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let err_str = if is_slippage_error {
|
||||
"Failed to execute TX : Max Slippage Reached"
|
||||
} else if is_cu_error {
|
||||
"Failed to execute TX : Exceeded CUs meter"
|
||||
} else {
|
||||
"Failed to execute TX"
|
||||
};
|
||||
|
||||
return Ok((out_amount, false, accounts, 0, err_str.to_string()));
|
||||
};
|
||||
|
||||
let Some(after_accounts) = simulation_result.value.accounts else {
|
||||
warn!("Tx success for {}: but missing accounts", name);
|
||||
return Ok((
|
||||
out_amount,
|
||||
false,
|
||||
accounts,
|
||||
0,
|
||||
"Missing simulation accounts".to_string(),
|
||||
));
|
||||
};
|
||||
|
||||
let Some(after_account) = after_accounts.into_iter().next().flatten() else {
|
||||
warn!("Tx success for {}: but missing account", name);
|
||||
return Ok((
|
||||
out_amount,
|
||||
false,
|
||||
accounts,
|
||||
0,
|
||||
"Missing simulation account".to_string(),
|
||||
));
|
||||
};
|
||||
|
||||
let Some(after_account) = after_account.decode::<Account>() else {
|
||||
warn!("Tx success for {}: but failed to decode account", name);
|
||||
return Ok((
|
||||
out_amount,
|
||||
false,
|
||||
accounts,
|
||||
0,
|
||||
"Failed to decode account".to_string(),
|
||||
));
|
||||
};
|
||||
|
||||
let final_balance = get_balance(after_account);
|
||||
let actual_amount = final_balance.unwrap_or(0) - initial_balance.unwrap_or(0);
|
||||
|
||||
info!("Tx success for {} with {} accounts", name, accounts);
|
||||
Ok((out_amount, true, accounts, actual_amount, "".to_string()))
|
||||
}
|
||||
|
||||
fn parse_error(err: String) -> String {
|
||||
if err.contains("no path between") {
|
||||
"no path found".to_string()
|
||||
} else if err.contains("bad route") {
|
||||
"bad route".to_string()
|
||||
} else {
|
||||
err
|
||||
}
|
||||
}
|
||||
|
||||
fn get_ata(wallet: Pubkey, mint: Pubkey) -> Pubkey {
|
||||
get_associated_token_address(&wallet, &mint)
|
||||
}
|
||||
|
||||
fn get_balance(account: Account) -> anyhow::Result<u64> {
|
||||
Ok(spl_token::state::Account::unpack(account.data.as_slice())?.amount)
|
||||
}
|
||||
|
||||
fn count_account(tx: &VersionedTransaction) -> usize {
|
||||
tx.message.static_account_keys().len()
|
||||
+ tx.message
|
||||
.address_table_lookups()
|
||||
.map(|x| x.len())
|
||||
.unwrap_or(0)
|
||||
}
|
||||
|
||||
async fn get_mints(config: &Config) -> anyhow::Result<HashSet<Pubkey>> {
|
||||
let configured_mints: HashSet<_> = config
|
||||
.mints
|
||||
.iter()
|
||||
.map(|x| Pubkey::from_str(x).unwrap())
|
||||
.collect();
|
||||
|
||||
let mints = if config.use_mango_tokens {
|
||||
let mango_mints = fetch_mango_data().await?;
|
||||
configured_mints
|
||||
.into_iter()
|
||||
.chain(mango_mints.mints.into_iter())
|
||||
.collect::<HashSet<_>>()
|
||||
} else {
|
||||
configured_mints
|
||||
};
|
||||
|
||||
Ok(mints)
|
||||
}
|
||||
|
||||
struct Bot {
|
||||
outgoing_rpc_client: RpcClient,
|
||||
swap_client: RouterClient,
|
||||
wallet: Pubkey,
|
||||
blocking_rpc_client: BlockingRpcClient,
|
||||
}
|
||||
|
||||
impl Bot {
|
||||
pub fn sol() -> Pubkey {
|
||||
Pubkey::from_str("So11111111111111111111111111111111111111112").unwrap()
|
||||
}
|
||||
|
||||
pub fn usdc() -> Pubkey {
|
||||
Pubkey::from_str("EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v").unwrap()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
use services_mango_lib::env_helper::string_or_env as serde_string_or_env;
|
||||
use services_mango_lib::postgres_configuration::PostgresConfiguration;
|
||||
|
||||
#[derive(Clone, Debug, Default, serde_derive::Deserialize)]
|
||||
pub struct Config {
|
||||
#[serde(deserialize_with = "serde_string_or_env")]
|
||||
pub rpc_http_url: String,
|
||||
|
||||
#[serde(deserialize_with = "serde_string_or_env")]
|
||||
pub outgoing_rpc_http_url: String,
|
||||
|
||||
pub postgres: PostgresConfiguration,
|
||||
pub persist: bool,
|
||||
|
||||
pub mints: Vec<String>,
|
||||
pub use_mango_tokens: bool,
|
||||
pub amounts: Vec<u64>,
|
||||
|
||||
#[serde(deserialize_with = "serde_string_or_env")]
|
||||
pub wallet_pubkey: String,
|
||||
|
||||
pub execution_interval_sec: u64,
|
||||
|
||||
#[serde(deserialize_with = "serde_string_or_env")]
|
||||
pub router: String,
|
||||
|
||||
#[serde(deserialize_with = "serde_string_or_env")]
|
||||
pub jupiter: String,
|
||||
|
||||
#[serde(deserialize_with = "serde_string_or_env")]
|
||||
pub birdeye_token: String,
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
use crate::persister::PersistableState;
|
||||
use futures_util::StreamExt;
|
||||
use router_feed_lib::utils;
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
use std::sync::{atomic, Arc};
|
||||
use tokio::sync::broadcast;
|
||||
use tracing::{error, info};
|
||||
|
||||
mod bot;
|
||||
mod config;
|
||||
mod persister;
|
||||
|
||||
#[tokio::main(worker_threads = 10)]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
utils::tracing_subscriber_init();
|
||||
let args: Vec<String> = std::env::args().collect();
|
||||
if args.len() < 2 {
|
||||
eprintln!("Please enter a config file path argument.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let config: config::Config = {
|
||||
let mut file = File::open(&args[1])?;
|
||||
let mut contents = String::new();
|
||||
file.read_to_string(&mut contents)?;
|
||||
toml::from_str(&contents).unwrap()
|
||||
};
|
||||
|
||||
let exit_flag: Arc<atomic::AtomicBool> = Arc::new(atomic::AtomicBool::new(false));
|
||||
let (exit_sender, _) = broadcast::channel(1);
|
||||
{
|
||||
let exit_flag = exit_flag.clone();
|
||||
let exit_sender = exit_sender.clone();
|
||||
tokio::spawn(async move {
|
||||
tokio::signal::ctrl_c().await.unwrap();
|
||||
info!("Received SIGINT, shutting down...");
|
||||
exit_flag.store(true, atomic::Ordering::Relaxed);
|
||||
exit_sender.send(()).unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
let (persistable_sender, persistable_receiver) = async_channel::unbounded::<PersistableState>();
|
||||
|
||||
let ef = exit_sender.subscribe();
|
||||
let cf = config.clone();
|
||||
|
||||
let bot_job = tokio::spawn(async move {
|
||||
match bot::run(&cf, persistable_sender, ef).await {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
error!("Bot job failed with {:?}", e);
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
let ef = exit_flag.clone();
|
||||
let persister_job = tokio::spawn(async move {
|
||||
persister::persist_tx_state(&config, &config.postgres, persistable_receiver, ef).await;
|
||||
});
|
||||
|
||||
let mut jobs: futures::stream::FuturesUnordered<_> =
|
||||
vec![bot_job, persister_job].into_iter().collect();
|
||||
|
||||
jobs.next().await;
|
||||
error!("A critical job exited, aborting run..");
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -0,0 +1,166 @@
|
|||
use crate::config::Config;
|
||||
use async_channel::Receiver;
|
||||
use services_mango_lib::postgres_configuration::PostgresConfiguration;
|
||||
use services_mango_lib::postgres_connection;
|
||||
use solana_program::pubkey::Pubkey;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::time::Instant;
|
||||
use tokio_postgres::Client;
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PersistableState {
|
||||
pub input_mint: Pubkey,
|
||||
pub output_mint: Pubkey,
|
||||
pub input_amount: u64,
|
||||
pub max_accounts: usize,
|
||||
pub input_amount_in_dollars: f64,
|
||||
pub router_quote_output_amount: u64,
|
||||
pub jupiter_quote_output_amount: u64,
|
||||
pub router_simulation_is_success: bool,
|
||||
pub jupiter_simulation_is_success: bool,
|
||||
pub router_accounts: usize,
|
||||
pub jupiter_accounts: usize,
|
||||
pub router_output_amount_in_dollars: f64,
|
||||
pub jupiter_output_amount_in_dollars: f64,
|
||||
pub router_route: String,
|
||||
pub jupiter_route: String,
|
||||
pub router_actual_output_amount: u64,
|
||||
pub jupiter_actual_output_amount: u64,
|
||||
pub router_error: String,
|
||||
pub jupiter_error: String,
|
||||
}
|
||||
|
||||
pub(crate) async fn persist_tx_state(
|
||||
config: &Config,
|
||||
postgres_config: &PostgresConfiguration,
|
||||
receiver: Receiver<PersistableState>,
|
||||
exit_flag: Arc<AtomicBool>,
|
||||
) {
|
||||
let mut last_error = Instant::now();
|
||||
let mut error_connecting: usize = 0;
|
||||
const ERROR_COUNT_SPAN: Duration = Duration::from_secs(10);
|
||||
const MAX_SEQUENTIAL_ERROR: usize = 10;
|
||||
while error_connecting < MAX_SEQUENTIAL_ERROR {
|
||||
let connection = if config.persist {
|
||||
match postgres_connection::connect(postgres_config).await {
|
||||
Ok(c) => Some(c),
|
||||
Err(e) => {
|
||||
error!("failed to connect to SQL server {e:?}...");
|
||||
if last_error.elapsed() < ERROR_COUNT_SPAN {
|
||||
error_connecting += 1;
|
||||
last_error = Instant::now();
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let mut scheduled_exit = false;
|
||||
loop {
|
||||
if exit_flag.load(Ordering::Relaxed) {
|
||||
scheduled_exit = true;
|
||||
warn!("shutting down persister...");
|
||||
break;
|
||||
}
|
||||
|
||||
let Ok(tx) = receiver.recv().await else {
|
||||
scheduled_exit = true;
|
||||
warn!("shutting down persister...");
|
||||
break;
|
||||
};
|
||||
|
||||
info!(
|
||||
%tx.input_mint,
|
||||
%tx.output_mint,
|
||||
tx.input_amount,
|
||||
tx.input_amount_in_dollars,
|
||||
tx.max_accounts,
|
||||
tx.jupiter_quote_output_amount,
|
||||
tx.jupiter_simulation_is_success,
|
||||
tx.router_quote_output_amount,
|
||||
tx.router_simulation_is_success,
|
||||
tx.router_accounts,
|
||||
tx.jupiter_accounts,
|
||||
tx.router_output_amount_in_dollars,
|
||||
tx.jupiter_output_amount_in_dollars,
|
||||
tx.router_route,
|
||||
tx.jupiter_route,
|
||||
tx.router_actual_output_amount,
|
||||
tx.jupiter_actual_output_amount,
|
||||
tx.router_error,
|
||||
tx.jupiter_error,
|
||||
"State"
|
||||
);
|
||||
|
||||
if config.persist {
|
||||
match persist(tx, &connection.as_ref().unwrap().0).await {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
warn!("persist failed with error => {:?}", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !scheduled_exit && last_error.elapsed() < ERROR_COUNT_SPAN {
|
||||
error_connecting += 1;
|
||||
last_error = Instant::now();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn persist(state: PersistableState, client: &Client) -> anyhow::Result<()> {
|
||||
let input_amount = state.input_amount as i64;
|
||||
let input_amount_in_dollars = state.input_amount_in_dollars;
|
||||
let input_mint = state.input_mint.to_string();
|
||||
let output_mint = state.output_mint.to_string();
|
||||
let router_is_success = state.router_simulation_is_success;
|
||||
let jupiter_is_success = state.jupiter_simulation_is_success;
|
||||
let router_quote = state.router_quote_output_amount as i64;
|
||||
let jupiter_quote = state.jupiter_quote_output_amount as i64;
|
||||
let router_actual_output_amount = state.router_actual_output_amount as i64;
|
||||
let jupiter_actual_output_amount = state.jupiter_actual_output_amount as i64;
|
||||
let max_accounts = state.max_accounts as i64;
|
||||
let router_accounts = state.router_accounts as i64;
|
||||
let jupiter_accounts = state.jupiter_accounts as i64;
|
||||
let router_output_amount_in_dollars = state.router_output_amount_in_dollars;
|
||||
let jupiter_output_amount_in_dollars = state.jupiter_output_amount_in_dollars;
|
||||
let router_error = state.router_error;
|
||||
let jupiter_error = state.jupiter_error;
|
||||
let timestamp = chrono::Utc::now();
|
||||
|
||||
let query = postgres_query::query!(
|
||||
"INSERT INTO router.comparison \
|
||||
(input_mint, output_mint, input_amount, input_amount_in_dollars, max_accounts, router_quote_output_amount, jupiter_quote_output_amount, router_simulation_success, jupiter_simulation_success, router_accounts, jupiter_accounts, router_output_amount_in_dollars, jupiter_output_amount_in_dollars, router_route, jupiter_route, router_actual_output_amount, jupiter_actual_output_amount, router_error, jupiter_error, timestamp) \
|
||||
VALUES ($input_mint, $output_mint, $input_amount, $input_amount_in_dollars, $max_accounts, $router_quote, $jupiter_quote, $router_is_success, $jupiter_is_success, $router_accounts, $jupiter_accounts, $router_output_amount_in_dollars, $jupiter_output_amount_in_dollars, $router_route, $jupiter_route, $router_actual_output_amount, $jupiter_actual_output_amount, $router_error, $jupiter_error, $timestamp)",
|
||||
input_mint,
|
||||
output_mint,
|
||||
input_amount,
|
||||
input_amount_in_dollars,
|
||||
max_accounts,
|
||||
router_quote,
|
||||
jupiter_quote,
|
||||
router_is_success,
|
||||
jupiter_is_success,
|
||||
router_accounts,
|
||||
jupiter_accounts,
|
||||
router_output_amount_in_dollars,
|
||||
jupiter_output_amount_in_dollars,
|
||||
router_route = state.router_route,
|
||||
jupiter_route = state.jupiter_route,
|
||||
router_actual_output_amount,
|
||||
jupiter_actual_output_amount,
|
||||
router_error,
|
||||
jupiter_error,
|
||||
timestamp,
|
||||
);
|
||||
|
||||
query.execute(client).await?;
|
||||
Ok(())
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
router = "$ROUTER_URL"
|
||||
jupiter = "https://quote-api.jup.ag/v6"
|
||||
rpc_http_url = "$RPC_URL"
|
||||
outgoing_rpc_http_url = "$RPC_URL"
|
||||
wallet_pubkey = "BbHG9GvPActFGogv3iNrpDAj4qpXr8t3jF16uGxXcKci"
|
||||
execution_interval_sec = 2
|
||||
amounts = [100, 5000]
|
||||
birdeye_token = "$BIRDEYE_TOKEN"
|
||||
|
||||
mints = [
|
||||
"So11111111111111111111111111111111111111112", # SOL
|
||||
"EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", # USDC
|
||||
"J1toso1uCk3RLmjorhTtrVwY9HJ7X8V9yYac6Y7kGCPn", # JitoSOL
|
||||
"jupSoLaHXQiZZTSfEWMTRRgpnyFm8f6sZdosWBjx93v", # JupSol
|
||||
"Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB", # USDT
|
||||
"5oVNBeEEQvYi1cX3ir8Dx5n1P7pdxydbGF2X4TxVusJm", # INF
|
||||
"MangoCzJ36AjZyKwVj3VnYU4GTonjfVEnJmvvWaxLac", # MNGO
|
||||
"DezXAZ8z7PnrnRJjz3wXBoRgixCa6xjnB7YaB1pPB263", # BONK
|
||||
"EKpQGSJtjMFqKZ9KQanSqYXRcF8fBopzLHYxdM65zcjm", # WIF
|
||||
"USDH1SM1ojwWUga67PGrgFWUHibbjqMvuMaDkRJTgkX", # USDH
|
||||
"MangmsBgFqJhW4cLUR9LxfVgMboY1xAoP8UUBiWwwuY", # MangoSol
|
||||
"DriFtupJYLTosbwoN8koMbEYSx54aFAVLddWsbksjwg7", # Drift
|
||||
"7GCihgDB8fe6KNjn2MYtkzZcRjQy3t9GHdC8uHYmW2hr", # Poccat
|
||||
"mSoLzYCxHdYgdzU16g5QSh3i5K3z3KZK7ytfqcJm7So", # mSOL
|
||||
"JUPyiwrYJFskUPiHa7hkeR8VUtAeFoSYbKedZNsDvCN", # JUP
|
||||
"27G8MtK7VtTcCHkpASjSDdkWWYfoqT6ggEuKidVJidD4", # JLP
|
||||
"KMNo3nJsBXfcpJTVhZcXLW7RmTwTt4GVFE7suUBo9sS", # KMNO
|
||||
"4DBgRWs63QzBwj71Qxiiz49DUZJi2QgUha7cbadepump", # DERP
|
||||
"3S8qX1MsMqRbiwKg2cQyx7nis1oHMgaCuc9c4VfvVdPN", # MOTHER
|
||||
"ukHH6c7mMyiWCf1b9pnWe25TSpkDDt3H5pQZgZ74J82", # BOME
|
||||
]
|
||||
use_mango_tokens = true
|
||||
persist = true
|
||||
|
||||
[postgres]
|
||||
connection_string = "$PG_CONNECTION_STRING"
|
||||
max_retry_count = 2
|
||||
allow_invalid_certs = true
|
||||
|
||||
[postgres.tls]
|
||||
ca_cert_path = "$PG_CA_CERT"
|
||||
client_key_path = "$PG_CLIENT_KEY"
|
|
@ -0,0 +1,61 @@
|
|||
[package]
|
||||
name = "indexer"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
async-channel = "1.6"
|
||||
bs58 = "0.3.1"
|
||||
bytemuck = "^1.7.2"
|
||||
bytes = "1.0"
|
||||
chrono = "0.4"
|
||||
futures = "0.3.17"
|
||||
futures-core = "0.3"
|
||||
futures-util = "0.3"
|
||||
itertools = "0.12"
|
||||
jemallocator = "0.5"
|
||||
jsonrpc-core = "18.0.0"
|
||||
jsonrpc-core-client = { version = "18.0.0", features = ["ws", "http", "tls"] }
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
sha2 = "0.10.6"
|
||||
shellexpand = "2.1.0"
|
||||
solana-account-decoder = "1.17"
|
||||
solana-client = { workspace = true }
|
||||
solana-logger = "1.17"
|
||||
solana-program = "1.17"
|
||||
solana-program-test = "1.17"
|
||||
solana-sdk = { workspace = true }
|
||||
solana-transaction-status = { version = "1.17" }
|
||||
tokio = { workspace = true }
|
||||
tokio-stream = { version = "0.1"}
|
||||
tokio-tungstenite = "0.21"
|
||||
toml = "0.5"
|
||||
tracing = { version = "0.1", features = ["log"] }
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
regex = "1.9.5"
|
||||
indexmap = "2.0.0"
|
||||
router-lib = { path = "../../lib/router-lib/", version = "0.0.1" }
|
||||
spl-associated-token-account = "1.0.5"
|
||||
yellowstone-grpc-proto = { workspace = true }
|
||||
lz4 = "1.24.0"
|
||||
autobahn-executor = { path = "../../programs/autobahn-executor" }
|
||||
router-feed-lib = { path = "../../lib/router-feed-lib" }
|
||||
router-config-lib = { path = "../../lib/router-config-lib" }
|
||||
base64 = "0.21.7"
|
||||
bincode = "1.3.3"
|
||||
services-mango-lib = { git = "https://github.com/blockworks-foundation/mango-v4.git" }
|
||||
tokio-postgres = { version = "0.7", features = ["with-chrono-0_4"] }
|
||||
tokio-postgres-rustls = "0.9.0"
|
||||
postgres_query = { git = "https://github.com/nolanderc/rust-postgres-query", rev = "b4422051c8a31fbba4a35f88004c1cefb1878dd5" }
|
||||
postgres-types = { version = "0.2", features = ["array-impls", "derive", "with-chrono-0_4"] }
|
||||
postgres-native-tls = "0.5"
|
||||
|
||||
[dev-dependencies]
|
||||
test-case = "*"
|
||||
|
||||
[build-dependencies]
|
||||
anyhow = { workspace = true }
|
||||
vergen-gitcl = { version = "1.0.0-beta.2", features = [] }
|
|
@ -0,0 +1,25 @@
|
|||
FROM rust:1.76.0 as base
|
||||
RUN cargo install cargo-chef@0.1.62 --locked
|
||||
RUN rustup component add rustfmt
|
||||
RUN apt-get update && apt-get install -y clang cmake ssh
|
||||
WORKDIR /app
|
||||
|
||||
FROM base AS plan
|
||||
COPY . .
|
||||
WORKDIR /app
|
||||
RUN cargo chef prepare --recipe-path recipe.json
|
||||
|
||||
FROM base as build
|
||||
COPY --from=plan /app/recipe.json recipe.json
|
||||
RUN cargo chef cook --release --recipe-path recipe.json
|
||||
COPY . .
|
||||
RUN cargo build --release --bin indexer
|
||||
|
||||
FROM debian:bookworm-slim as run
|
||||
RUN apt-get update && apt-get -y install ca-certificates libc6 libssl3 libssl-dev openssl
|
||||
|
||||
COPY --from=build /app/target/release/indexer /usr/local/bin/
|
||||
COPY --from=build /app/bin/indexer/template-config.toml /usr/local/bin/template-config.toml
|
||||
|
||||
RUN adduser --system --group --no-create-home mangouser
|
||||
USER mangouser
|
|
@ -0,0 +1,8 @@
|
|||
CREATE SCHEMA IF NOT EXISTS router AUTHORIZATION CURRENT_ROLE;
|
||||
CREATE TABLE IF NOT EXISTS router.tx_history
|
||||
(
|
||||
signature VARCHAR(88) NOT NULL,
|
||||
timestamp TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
is_success BOOLEAN NOT NULL,
|
||||
router_version INT NOT NULL
|
||||
);
|
|
@ -0,0 +1,14 @@
|
|||
use router_config_lib::AccountDataSourceConfig;
|
||||
use services_mango_lib::postgres_configuration::PostgresConfiguration;
|
||||
|
||||
#[derive(Clone, Debug, Default, serde_derive::Deserialize)]
|
||||
pub struct Config {
|
||||
pub source: AccountDataSourceConfig,
|
||||
pub metrics: MetricsConfig,
|
||||
pub postgres: PostgresConfiguration,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, serde_derive::Deserialize)]
|
||||
pub struct MetricsConfig {
|
||||
pub enabled: bool,
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
use crate::persister::PersistableTx;
|
||||
use futures_util::StreamExt;
|
||||
use router_feed_lib::grpc_tx_watcher::ExecTx;
|
||||
use router_feed_lib::{grpc_tx_watcher, utils};
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
use std::sync::{atomic, Arc};
|
||||
use tokio::sync::broadcast;
|
||||
use tracing::{error, info};
|
||||
|
||||
mod config;
|
||||
mod persister;
|
||||
mod tx_watcher;
|
||||
|
||||
#[tokio::main(worker_threads = 10)]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
utils::tracing_subscriber_init();
|
||||
let args: Vec<String> = std::env::args().collect();
|
||||
if args.len() < 2 {
|
||||
eprintln!("Please enter a config file path argument.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let config: config::Config = {
|
||||
let mut file = File::open(&args[1])?;
|
||||
let mut contents = String::new();
|
||||
file.read_to_string(&mut contents)?;
|
||||
toml::from_str(&contents).unwrap()
|
||||
};
|
||||
|
||||
let exit_flag: Arc<atomic::AtomicBool> = Arc::new(atomic::AtomicBool::new(false));
|
||||
let (exit_sender, _) = broadcast::channel(1);
|
||||
{
|
||||
let exit_flag = exit_flag.clone();
|
||||
let exit_sender = exit_sender.clone();
|
||||
tokio::spawn(async move {
|
||||
tokio::signal::ctrl_c().await.unwrap();
|
||||
info!("Received SIGINT, shutting down...");
|
||||
exit_flag.store(true, atomic::Ordering::Relaxed);
|
||||
exit_sender.send(()).unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
let (tx_sender, tx_receiver) = async_channel::unbounded::<ExecTx>();
|
||||
let (persistable_sender, persistable_receiver) = async_channel::unbounded::<PersistableTx>();
|
||||
|
||||
let ef = exit_sender.subscribe();
|
||||
let tx_sender_job = tokio::spawn(async move {
|
||||
grpc_tx_watcher::process_tx_events(&config.source, tx_sender, ef).await;
|
||||
});
|
||||
|
||||
let ef = exit_sender.subscribe();
|
||||
let watcher_job = tokio::spawn(async move {
|
||||
tx_watcher::watch_tx_events(tx_receiver, persistable_sender, ef).await;
|
||||
});
|
||||
|
||||
let ef = exit_flag.clone();
|
||||
let persister_job = tokio::spawn(async move {
|
||||
persister::persist_tx_state(&config.metrics, &config.postgres, persistable_receiver, ef)
|
||||
.await;
|
||||
});
|
||||
|
||||
let mut jobs: futures::stream::FuturesUnordered<_> =
|
||||
vec![tx_sender_job, watcher_job, persister_job]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
jobs.next().await;
|
||||
error!("A critical job exited, aborting run..");
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -0,0 +1,92 @@
|
|||
use crate::config::MetricsConfig;
|
||||
use async_channel::Receiver;
|
||||
use services_mango_lib::postgres_configuration::PostgresConfiguration;
|
||||
use services_mango_lib::postgres_connection;
|
||||
use solana_sdk::signature::Signature;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio_postgres::Client;
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PersistableTx {
|
||||
pub sig: Signature,
|
||||
pub is_success: bool,
|
||||
pub router_version: u8,
|
||||
}
|
||||
|
||||
pub(crate) async fn persist_tx_state(
|
||||
config: &MetricsConfig,
|
||||
postgres_config: &PostgresConfiguration,
|
||||
receiver: Receiver<PersistableTx>,
|
||||
exit_flag: Arc<AtomicBool>,
|
||||
) {
|
||||
let connection = if config.enabled {
|
||||
let Ok(c) = postgres_connection::connect(postgres_config).await else {
|
||||
error!("failed to connect to SQL server...");
|
||||
return;
|
||||
};
|
||||
Some(c)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let mut inserted = HashMap::new();
|
||||
|
||||
loop {
|
||||
if exit_flag.load(Ordering::Relaxed) {
|
||||
warn!("shutting down persist_tx_state...");
|
||||
break;
|
||||
}
|
||||
|
||||
let Ok(tx) = receiver.recv().await else {
|
||||
warn!("shutting down persist_tx_state...");
|
||||
break;
|
||||
};
|
||||
|
||||
info!(
|
||||
sig = tx.sig.to_string(),
|
||||
tx.is_success, tx.router_version, "TX"
|
||||
);
|
||||
|
||||
if config.enabled {
|
||||
if inserted.insert(tx.sig, Instant::now()).is_some() {
|
||||
continue;
|
||||
}
|
||||
|
||||
match persist(tx, &connection.as_ref().unwrap().0).await {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
warn!("persist failed with error => {:?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
if inserted.len() > 1000 {
|
||||
inserted.retain(|_, x| x.elapsed() < Duration::from_secs(3600));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn persist(tx: PersistableTx, client: &Client) -> anyhow::Result<()> {
|
||||
// TODO FAS - Batch insert, handle errors, etc...
|
||||
let signature = tx.sig.to_string();
|
||||
let is_success = tx.is_success;
|
||||
let router_version = tx.router_version as i32;
|
||||
let timestamp = chrono::Utc::now();
|
||||
|
||||
let query = postgres_query::query!(
|
||||
"INSERT INTO router.tx_history \
|
||||
(signature, is_success, router_version, timestamp) \
|
||||
VALUES($signature, $is_success, $router_version, $timestamp)",
|
||||
signature,
|
||||
is_success,
|
||||
router_version,
|
||||
timestamp,
|
||||
);
|
||||
|
||||
query.execute(client).await?;
|
||||
Ok(())
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
use crate::persister::PersistableTx;
|
||||
use async_channel::{Receiver, Sender};
|
||||
use router_feed_lib::grpc_tx_watcher::ExecTx;
|
||||
use tracing::{info, warn};
|
||||
|
||||
pub async fn watch_tx_events(
|
||||
receiver: Receiver<ExecTx>,
|
||||
sender: Sender<PersistableTx>,
|
||||
mut exit_flag: tokio::sync::broadcast::Receiver<()>,
|
||||
) {
|
||||
info!("Starting to watch TX");
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = exit_flag.recv() => {
|
||||
warn!("shutting down watch_tx_events...");
|
||||
break;
|
||||
},
|
||||
msg = receiver.recv() => {
|
||||
match msg {
|
||||
Err(_e) => {
|
||||
warn!("shutting down watch_tx_events...");
|
||||
break;
|
||||
},
|
||||
Ok(msg) => {
|
||||
handle_tx(&sender, msg).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_tx(sender: &Sender<PersistableTx>, msg: ExecTx) {
|
||||
let ix_discriminator = msg.data[0] & 15;
|
||||
let router_version = msg.data[0] >> 4;
|
||||
let is_success = msg.is_success;
|
||||
|
||||
if ix_discriminator != autobahn_executor::Instructions::ExecuteSwapV3 as u8
|
||||
&& ix_discriminator != autobahn_executor::Instructions::ExecuteSwapV2 as u8
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
info!(router_version, is_success, "Swap TX Received");
|
||||
|
||||
let is_insufficient_funds = msg.logs.iter().find(|x| x.contains("insufficient funds"));
|
||||
if is_insufficient_funds.is_some() {
|
||||
info!("ignoring tx {} - insufficient funds", msg.signature);
|
||||
return;
|
||||
}
|
||||
|
||||
sender
|
||||
.send(crate::persister::PersistableTx {
|
||||
sig: msg.signature,
|
||||
is_success,
|
||||
router_version,
|
||||
})
|
||||
.await
|
||||
.expect("sending must succeed");
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
[metrics]
|
||||
enabled = true
|
||||
|
||||
[source]
|
||||
rpc_http_url = "not used"
|
||||
dedup_queue_size = 50000
|
||||
|
||||
[[source.grpc_sources]]
|
||||
name = "router-indexer"
|
||||
connection_string = "$RPC_HTTP_URL_WITHOUT_TOKEN"
|
||||
token = "$RPC_TOKEN"
|
||||
retry_connection_sleep_secs = 30
|
||||
|
||||
[postgres]
|
||||
connection_string = "$PG_CONNECTION_STRING"
|
||||
max_retry_count = 2
|
||||
allow_invalid_certs = true
|
||||
|
||||
[postgres.tls]
|
||||
ca_cert_path = "$PG_CA_CERT"
|
||||
client_key_path = "$PG_CLIENT_KEY"
|
|
@ -0,0 +1,28 @@
|
|||
[package]
|
||||
name = "randbot"
|
||||
version = "0.0.1"
|
||||
edition = "2021"
|
||||
|
||||
[[bin]]
|
||||
name = "randbot"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
tokio = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
solana-sdk = { workspace = true }
|
||||
solana-client = { workspace = true }
|
||||
router-lib = { path = "../../lib/router-lib/", version = "0.0.1" }
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
spl-associated-token-account = "1.0.5"
|
||||
shellexpand = "3.1.0"
|
||||
solana-account-decoder = "1.17"
|
||||
|
||||
reqwest = { workspace = true, features = ["json"] }
|
||||
tracing = { version = "0.1", features = ["log"] }
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
atty = "0.2.14"
|
||||
toml = "0.5.11"
|
||||
rand = "0.7.3"
|
|
@ -0,0 +1,25 @@
|
|||
FROM rust:1.76.0 as base
|
||||
RUN cargo install cargo-chef@0.1.62 --locked
|
||||
RUN rustup component add rustfmt
|
||||
RUN apt-get update && apt-get install -y clang cmake ssh
|
||||
WORKDIR /app
|
||||
|
||||
FROM base AS plan
|
||||
COPY . .
|
||||
WORKDIR /app
|
||||
RUN cargo chef prepare --recipe-path recipe.json
|
||||
|
||||
FROM base as build
|
||||
COPY --from=plan /app/recipe.json recipe.json
|
||||
RUN cargo chef cook --release --recipe-path recipe.json
|
||||
COPY . .
|
||||
RUN cargo build --release --bin randbot
|
||||
|
||||
FROM debian:bookworm-slim as run
|
||||
RUN apt-get update && apt-get -y install ca-certificates libc6 libssl3 libssl-dev openssl
|
||||
|
||||
COPY --from=build /app/target/release/randbot /usr/local/bin/
|
||||
COPY --from=build /app/bin/randbot/template-config.toml /usr/local/bin/template-config.toml
|
||||
|
||||
RUN adduser --system --group --no-create-home mangouser
|
||||
USER mangouser
|
|
@ -0,0 +1,11 @@
|
|||
RandBot
|
||||
=======
|
||||
|
||||
Randomly trade to stress test router, while trying to keep wallet SOL/USDC only.
|
||||
|
||||
On each call, will swap N USDC to A, then A to B, then B to USDC.
|
||||
- N is randomly selected
|
||||
- A and B are randomly selected
|
||||
|
||||
On start, if there is an existing exposure, will swap eveyrthing to USDC (keeping some SOL for gas)
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
router = "http://127.0.0.1:8888"
|
||||
rpc_http_url = "$RPC_URL"
|
||||
outgoing_rpc_http_url = "$RPC_URL"
|
||||
owner = "$KEYPAIR_PATH"
|
||||
execution_interval_sec = 60
|
||||
|
||||
mints = [
|
||||
"So11111111111111111111111111111111111111112", # SOL
|
||||
"EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", # USDC
|
||||
"J1toso1uCk3RLmjorhTtrVwY9HJ7X8V9yYac6Y7kGCPn", # JitoSOL
|
||||
"Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB", # USDT
|
||||
"JUPyiwrYJFskUPiHa7hkeR8VUtAeFoSYbKedZNsDvCN", # JUP
|
||||
"5oVNBeEEQvYi1cX3ir8Dx5n1P7pdxydbGF2X4TxVusJm", # INF
|
||||
]
|
||||
use_mango_tokens = false
|
||||
|
||||
# Random amount will be selected from this list (in USDC native)
|
||||
amounts = [1000, 500000, 2000000]
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
use serde::{Deserialize, Deserializer};
|
||||
use std::env;
|
||||
|
||||
#[derive(Clone, Debug, serde_derive::Deserialize)]
|
||||
pub struct Config {
|
||||
#[serde(deserialize_with = "serde_string_or_env")]
|
||||
pub router: String,
|
||||
|
||||
#[serde(deserialize_with = "serde_string_or_env")]
|
||||
pub owner: String,
|
||||
|
||||
#[serde(deserialize_with = "serde_string_or_env")]
|
||||
pub rpc_http_url: String,
|
||||
|
||||
#[serde(deserialize_with = "serde_string_or_env")]
|
||||
pub outgoing_rpc_http_url: String,
|
||||
|
||||
pub mints: Vec<String>,
|
||||
pub use_mango_tokens: bool,
|
||||
|
||||
pub amounts: Vec<u64>,
|
||||
pub execution_interval_sec: u64,
|
||||
}
|
||||
|
||||
/// Get a string content, or the content of an Env variable it the string start with $
|
||||
///
|
||||
/// Example:
|
||||
/// - "abc" -> "abc"
|
||||
/// - "$something" -> read env variable named something and return it's content
|
||||
///
|
||||
/// *WARNING*: May kill the program if we are asking for anv environment variable that does not exist
|
||||
pub fn serde_string_or_env<'de, D>(deserializer: D) -> Result<String, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let value_or_env = String::deserialize(deserializer)?;
|
||||
let value = match &value_or_env.chars().next().unwrap() {
|
||||
'$' => env::var(&value_or_env[1..]).expect("reading from env"),
|
||||
_ => value_or_env,
|
||||
};
|
||||
Ok(value)
|
||||
}
|
|
@ -0,0 +1,403 @@
|
|||
use crate::util::{keypair_from_cli, tracing_subscriber_init};
|
||||
use rand::seq::SliceRandom;
|
||||
use router_lib::dex::SwapMode;
|
||||
use router_lib::mango::mango_fetcher::fetch_mango_data;
|
||||
use router_lib::router_client::RouterClient;
|
||||
use solana_client::nonblocking::rpc_client::RpcClient;
|
||||
use solana_client::rpc_client::RpcClient as BlockingRpcClient;
|
||||
use solana_client::rpc_config::{RpcSendTransactionConfig, RpcSimulateTransactionConfig};
|
||||
use solana_sdk::account::ReadableAccount;
|
||||
use solana_sdk::address_lookup_table::state::AddressLookupTable;
|
||||
use solana_sdk::address_lookup_table::AddressLookupTableAccount;
|
||||
use solana_sdk::commitment_config::{CommitmentConfig, CommitmentLevel};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::Keypair;
|
||||
use solana_sdk::signature::Signer;
|
||||
use spl_associated_token_account::get_associated_token_address;
|
||||
use std::cmp::min;
|
||||
use std::collections::HashSet;
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
use std::str::FromStr;
|
||||
use tokio::time::{sleep, Duration};
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
mod config;
|
||||
mod util;
|
||||
|
||||
struct Bot {
|
||||
rpc_client: RpcClient,
|
||||
outgoing_rpc_client: RpcClient,
|
||||
swap_client: RouterClient,
|
||||
wallet: Keypair,
|
||||
min_sol: u64,
|
||||
blocking_rpc_client: BlockingRpcClient,
|
||||
}
|
||||
|
||||
impl Bot {
|
||||
pub fn sol() -> Pubkey {
|
||||
Pubkey::from_str("So11111111111111111111111111111111111111112").unwrap()
|
||||
}
|
||||
|
||||
pub fn usdc() -> Pubkey {
|
||||
Pubkey::from_str("EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v").unwrap()
|
||||
}
|
||||
|
||||
pub async fn swap(&self, from: &Pubkey, to: &Pubkey, mut amount: u64) -> anyhow::Result<u64> {
|
||||
let max_attempt = 4;
|
||||
for i in 1..max_attempt + 1 {
|
||||
if *from == Bot::sol() {
|
||||
let balance = self.balance(from).await?;
|
||||
if balance <= self.min_sol {
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
let max_amount = balance - self.min_sol;
|
||||
amount = min(max_amount, amount);
|
||||
} else {
|
||||
let balance_from = self.balance(from).await?;
|
||||
amount = min(balance_from, amount);
|
||||
}
|
||||
|
||||
info!("swap {} {} => {}", amount, from, to);
|
||||
let balance_before = self.balance(to).await?;
|
||||
|
||||
match self.swap_internal(from, to, amount).await {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
if i == max_attempt {
|
||||
anyhow::bail!("failed to swap: {}", e);
|
||||
}
|
||||
|
||||
let duration_secs = (i as f64 * 3.0 * 1.2_f64.powi(i)).ceil() as u64;
|
||||
warn!(
|
||||
"swap failed with error: {} (sleeping for {} before retrying)",
|
||||
e, duration_secs
|
||||
);
|
||||
sleep(Duration::from_secs(duration_secs)).await;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
info!("swap confirmed");
|
||||
|
||||
let balance_after = self.balance(to).await?;
|
||||
|
||||
// if mint is sol, can actually decrease because of fees and ATA creation
|
||||
if balance_after < balance_before && *to == Bot::sol() {
|
||||
return Ok(0);
|
||||
} else {
|
||||
return Ok(balance_after - balance_before);
|
||||
}
|
||||
}
|
||||
|
||||
anyhow::bail!("Failed to swap")
|
||||
}
|
||||
|
||||
async fn swap_internal(&self, from: &Pubkey, to: &Pubkey, amount: u64) -> anyhow::Result<()> {
|
||||
let quote = self
|
||||
.swap_client
|
||||
.quote(*from, *to, amount, 50, false, 28, SwapMode::ExactIn)
|
||||
.await?;
|
||||
|
||||
info!(
|
||||
"quote {} {} => {} {}",
|
||||
quote.in_amount.clone().expect("in amount"),
|
||||
from,
|
||||
quote.out_amount,
|
||||
to
|
||||
);
|
||||
debug!("{:?}", quote.clone());
|
||||
|
||||
let load_alt = |alt_addr| {
|
||||
let alt_data = self.blocking_rpc_client.get_account(&alt_addr);
|
||||
|
||||
match alt_data {
|
||||
Ok(alt_data) => Some(AddressLookupTableAccount {
|
||||
key: alt_addr,
|
||||
addresses: AddressLookupTable::deserialize(alt_data.data.as_slice())
|
||||
.unwrap()
|
||||
.addresses
|
||||
.to_vec(),
|
||||
}),
|
||||
Err(_) => None,
|
||||
}
|
||||
};
|
||||
|
||||
let quote_slot = quote.context_slot;
|
||||
let (latest_blockhash, _) = self
|
||||
.outgoing_rpc_client
|
||||
.get_latest_blockhash_with_commitment(CommitmentConfig::finalized())
|
||||
.await?;
|
||||
let latest_slot = self
|
||||
.outgoing_rpc_client
|
||||
.get_slot_with_commitment(CommitmentConfig::processed())
|
||||
.await?;
|
||||
let tx = self
|
||||
.swap_client
|
||||
.swap(load_alt, quote.clone(), &self.wallet, latest_blockhash)
|
||||
.await?;
|
||||
|
||||
info!(
|
||||
"swap sig: {} / quote slot: {} / latest_slot: {}",
|
||||
tx.signatures[0], quote_slot, latest_slot
|
||||
);
|
||||
|
||||
if let Some(router_accounts) = quote.accounts {
|
||||
let simulation_result = self
|
||||
.outgoing_rpc_client
|
||||
.simulate_transaction_with_config(
|
||||
&tx,
|
||||
RpcSimulateTransactionConfig {
|
||||
sig_verify: false,
|
||||
replace_recent_blockhash: false,
|
||||
commitment: Some(CommitmentConfig::processed()),
|
||||
encoding: None,
|
||||
accounts: None,
|
||||
min_context_slot: None,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
match simulation_result {
|
||||
Ok(s) => {
|
||||
if s.value.err.is_some() {
|
||||
warn!("Simulation failed! {:?}", s.value.err.unwrap());
|
||||
|
||||
let addresses = router_accounts
|
||||
.iter()
|
||||
.map(|x| Pubkey::from_str(x.address.as_str()).unwrap())
|
||||
.collect::<Vec<_>>();
|
||||
let rpc_accounts = self
|
||||
.outgoing_rpc_client
|
||||
.get_multiple_accounts_with_commitment(
|
||||
&addresses,
|
||||
CommitmentConfig::processed(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
warn!(
|
||||
"- Has rpc_accounts ?: {} (slot={})",
|
||||
rpc_accounts.value.len(),
|
||||
rpc_accounts.context.slot
|
||||
);
|
||||
warn!("- Has router_accounts ?: {}", router_accounts.len());
|
||||
|
||||
for (rpc_account, router_account) in
|
||||
rpc_accounts.value.iter().zip(router_accounts.iter())
|
||||
{
|
||||
let Some(rpc_account) = rpc_account else {
|
||||
warn!(" - empty for {}", router_account.address);
|
||||
continue;
|
||||
};
|
||||
|
||||
if rpc_account.data() != router_account.data.as_slice() {
|
||||
warn!(
|
||||
"- Difference for account: {}, slot={}",
|
||||
router_account.address, router_account.slot
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Simulation error! {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.outgoing_rpc_client
|
||||
.send_and_confirm_transaction_with_spinner_and_config(
|
||||
&tx,
|
||||
CommitmentConfig::confirmed(),
|
||||
RpcSendTransactionConfig {
|
||||
skip_preflight: true,
|
||||
preflight_commitment: Some(CommitmentLevel::Confirmed),
|
||||
..RpcSendTransactionConfig::default()
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn balance(&self, mint: &Pubkey) -> anyhow::Result<u64> {
|
||||
let max_attempt = 4;
|
||||
for i in 1..max_attempt + 1 {
|
||||
match self.balance_internal(mint).await {
|
||||
Ok(res) => return Ok(res),
|
||||
Err(e) => {
|
||||
if i == max_attempt {
|
||||
break;
|
||||
}
|
||||
|
||||
let duration_secs = (i as f64 * 3.0 * 1.2_f64.powi(i)).ceil() as u64;
|
||||
warn!(
|
||||
"failed to retrieve balance: {} (sleeping for {} before retrying)",
|
||||
e, duration_secs
|
||||
);
|
||||
sleep(Duration::from_secs(duration_secs)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
anyhow::bail!("failed to retrieve balance (RPC issue ?)");
|
||||
}
|
||||
|
||||
pub async fn balance_internal(&self, mint: &Pubkey) -> anyhow::Result<u64> {
|
||||
if *mint == Bot::sol() {
|
||||
let balance = self.rpc_client.get_balance(&self.wallet.pubkey()).await?;
|
||||
debug!("balance of SOL is {}", balance);
|
||||
return Ok(balance);
|
||||
}
|
||||
|
||||
let ata = get_associated_token_address(&self.wallet.pubkey(), mint);
|
||||
let account_res = self.rpc_client.get_account(&ata).await;
|
||||
|
||||
match account_res {
|
||||
Ok(account) => {
|
||||
let balance_data = &account.data[64..(64 + 8)];
|
||||
let balance = u64::from_le_bytes(balance_data.try_into().unwrap());
|
||||
|
||||
debug!("balance of {} is {}", mint, balance);
|
||||
|
||||
Ok(balance)
|
||||
}
|
||||
Err(e) => {
|
||||
error!("failed to retrieve balance of {} ({})", mint, e);
|
||||
Ok(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), anyhow::Error> {
|
||||
tracing_subscriber_init();
|
||||
let args: Vec<String> = std::env::args().collect();
|
||||
if args.len() < 2 {
|
||||
eprintln!("Please enter a config file path argument.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let config: config::Config = {
|
||||
let mut file = File::open(&args[1])?;
|
||||
let mut contents = String::new();
|
||||
file.read_to_string(&mut contents)?;
|
||||
toml::from_str(&contents).unwrap()
|
||||
};
|
||||
|
||||
let mints: HashSet<_> = config
|
||||
.mints
|
||||
.iter()
|
||||
.map(|x| Pubkey::from_str(x).unwrap())
|
||||
.collect();
|
||||
let mints = if config.use_mango_tokens {
|
||||
let mango_mints = fetch_mango_data().await?;
|
||||
mints
|
||||
.into_iter()
|
||||
.chain(mango_mints.mints.into_iter())
|
||||
.collect::<HashSet<_>>()
|
||||
} else {
|
||||
mints
|
||||
};
|
||||
|
||||
info!("Using {} mints", mints.len());
|
||||
|
||||
let wallet = keypair_from_cli(config.owner.as_str());
|
||||
let rpc_client =
|
||||
RpcClient::new_with_commitment(config.rpc_http_url.clone(), CommitmentConfig::confirmed());
|
||||
let outgoing_rpc_client = RpcClient::new_with_commitment(
|
||||
config.outgoing_rpc_http_url.clone(),
|
||||
CommitmentConfig::confirmed(),
|
||||
);
|
||||
let blocking_rpc_client =
|
||||
BlockingRpcClient::new_with_commitment(config.rpc_http_url, CommitmentConfig::confirmed());
|
||||
let swap_client = RouterClient {
|
||||
http_client: reqwest::Client::builder().build()?,
|
||||
router_url: config.router,
|
||||
};
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(config.execution_interval_sec));
|
||||
|
||||
let min_sol = 100_000_000; // 0.1 SOL
|
||||
let bot = Bot {
|
||||
wallet,
|
||||
rpc_client,
|
||||
outgoing_rpc_client,
|
||||
blocking_rpc_client,
|
||||
swap_client,
|
||||
min_sol,
|
||||
};
|
||||
|
||||
let usdc = Bot::usdc();
|
||||
let sol = Bot::sol();
|
||||
|
||||
// Step 1 - Move all to USDC (except min SOL for gas)
|
||||
info!("#1 --- Startup ---");
|
||||
for mint in &mints {
|
||||
let balance = bot.balance(mint).await.expect("failed to get balance");
|
||||
|
||||
if balance > 100
|
||||
/* dust */
|
||||
{
|
||||
if *mint == sol {
|
||||
info!(
|
||||
"Startup balance: {} for {}; will keep at least {}",
|
||||
balance, mint, bot.min_sol
|
||||
);
|
||||
} else {
|
||||
info!("Startup balance: {} for {}", balance, mint);
|
||||
}
|
||||
|
||||
if *mint == usdc {
|
||||
continue;
|
||||
}
|
||||
|
||||
match bot.swap(mint, &usdc, balance).await {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
error!("Rebalancing swap failed: {:?}", e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2 - Random swaps
|
||||
// - USDC => A
|
||||
// - A => B
|
||||
// - B => USDC
|
||||
loop {
|
||||
// refill SOL if needed
|
||||
let mut sol_balance = bot.balance(&sol).await?;
|
||||
while sol_balance < bot.min_sol {
|
||||
info!("## --- Refill - USDC => SOL ---");
|
||||
bot.swap(&usdc, &sol, 1_000_000) // 1$
|
||||
.await
|
||||
.expect("Refill swap failed");
|
||||
sol_balance = bot.balance(&sol).await?;
|
||||
}
|
||||
|
||||
info!("#2 --- USDC => X => Y => USDC ---");
|
||||
let amount = *config.amounts.choose(&mut rand::thread_rng()).unwrap();
|
||||
let mut tokens = mints.iter().filter(|x| **x != usdc).collect::<Vec<_>>();
|
||||
tokens.shuffle(&mut rand::thread_rng());
|
||||
|
||||
let amount = bot
|
||||
.swap(&usdc, tokens[0], amount)
|
||||
.await
|
||||
.expect("First swap failed");
|
||||
|
||||
info!("---");
|
||||
let amount = bot
|
||||
.swap(tokens[0], tokens[1], amount)
|
||||
.await
|
||||
.expect("Second swap failed");
|
||||
|
||||
info!("---");
|
||||
let _amount = bot
|
||||
.swap(tokens[1], &usdc, amount)
|
||||
.await
|
||||
.expect("Third swap failed");
|
||||
|
||||
interval.tick().await;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
use solana_sdk::signature::Keypair;
|
||||
use solana_sdk::signer::keypair;
|
||||
use std::str::FromStr;
|
||||
|
||||
pub fn tracing_subscriber_init() {
|
||||
let format = tracing_subscriber::fmt::format().with_ansi(atty::is(atty::Stream::Stdout));
|
||||
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||
.event_format(format)
|
||||
.init();
|
||||
}
|
||||
|
||||
pub fn keypair_from_cli(keypair: &str) -> Keypair {
|
||||
let maybe_keypair = keypair::read_keypair(&mut keypair.as_bytes());
|
||||
match maybe_keypair {
|
||||
Ok(keypair) => keypair,
|
||||
Err(_) => {
|
||||
let path = std::path::PathBuf::from_str(&shellexpand::tilde(keypair)).unwrap();
|
||||
keypair::read_keypair_file(path)
|
||||
.unwrap_or_else(|_| panic!("Failed to read keypair from {}", keypair))
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
router = "http://router-1.internal:8888"
|
||||
rpc_http_url = "$RPC_URL"
|
||||
outgoing_rpc_http_url = "$RPC_URL"
|
||||
owner = "$KEYPAIR_PATH"
|
||||
execution_interval_sec = 120
|
||||
|
||||
mints = []
|
||||
use_mango_tokens = true
|
||||
|
||||
# Random amount will be selected from this list (in USDC native)
|
||||
amounts = [2000000, 30000000]
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
<svg width="188" height="188" viewBox="0 0 188 188" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g clip-path="url(#clip0_425_201)">
|
||||
<circle cx="94" cy="94" r="94" fill="#D6DF20"/>
|
||||
<path d="M93.956 38.499L30.375 134.499H85.9175L93.956 38.499Z" fill="#060606"/>
|
||||
<path d="M102.083 134.499H157.625L93.956 38.499L102.083 134.499Z" fill="#060606"/>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_425_201">
|
||||
<rect width="188" height="188" fill="white"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
</svg>
|
After Width: | Height: | Size: 462 B |
|
@ -0,0 +1,24 @@
|
|||
# fly.toml app configuration file generated for router-comparer on 2024-07-26T09:28:17+02:00
|
||||
#
|
||||
# See https://fly.io/docs/reference/configuration/ for information about how to use this file.
|
||||
#
|
||||
|
||||
app = 'router-comparer'
|
||||
primary_region = 'ams'
|
||||
kill_signal = 'SIGTERM'
|
||||
kill_timeout = '30s'
|
||||
|
||||
[experimental]
|
||||
cmd = ['comparer', '/usr/local/bin/template-config.toml']
|
||||
|
||||
[build]
|
||||
dockerfile = 'bin/comparer/Dockerfile'
|
||||
|
||||
[[restart]]
|
||||
policy = 'always'
|
||||
retries = 10
|
||||
|
||||
[[vm]]
|
||||
memory = '512mb'
|
||||
cpu_kind = 'shared'
|
||||
cpus = 1
|
|
@ -0,0 +1,14 @@
|
|||
app = "router-indexer"
|
||||
primary_region = "nrt"
|
||||
kill_signal = "SIGTERM"
|
||||
kill_timeout = "30s"
|
||||
|
||||
[build]
|
||||
dockerfile = 'bin/indexer/Dockerfile'
|
||||
|
||||
[experimental]
|
||||
cmd = ["indexer", "/usr/local/bin/template-config.toml"]
|
||||
|
||||
[[restart]]
|
||||
policy = "always"
|
||||
retries = 10
|
|
@ -0,0 +1,14 @@
|
|||
app = "router-randbot"
|
||||
primary_region = "nrt"
|
||||
kill_signal = "SIGTERM"
|
||||
kill_timeout = "30s"
|
||||
|
||||
[build]
|
||||
dockerfile = 'bin/randbot/Dockerfile'
|
||||
|
||||
[experimental]
|
||||
cmd = ["randbot", "/usr/local/bin/template-config.toml"]
|
||||
|
||||
[[restart]]
|
||||
policy = "always"
|
||||
retries = 10
|
|
@ -0,0 +1,22 @@
|
|||
app = "router-1"
|
||||
primary_region = "ams"
|
||||
kill_signal = "SIGTERM"
|
||||
kill_timeout = "30s"
|
||||
|
||||
[build]
|
||||
dockerfile = 'bin/autobahn-router/Dockerfile'
|
||||
|
||||
[experimental]
|
||||
cmd = ["autobahn-router", "/usr/local/bin/template-config.toml"]
|
||||
|
||||
[[vm]]
|
||||
size = "shared-cpu-4x"
|
||||
memory = "8gb"
|
||||
|
||||
[[restart]]
|
||||
policy = "always"
|
||||
retries = 10
|
||||
|
||||
[metrics]
|
||||
port = 9091
|
||||
path = "/metrics"
|
|
@ -0,0 +1,15 @@
|
|||
build:
|
||||
cargo build
|
||||
cargo build-sbf
|
||||
|
||||
lint:
|
||||
cargo clippy --no-deps --tests --features test-bpf
|
||||
|
||||
test-all:
|
||||
cargo build-sbf
|
||||
cargo test-sbf
|
||||
cargo test
|
||||
|
||||
test TEST_NAME:
|
||||
cargo build-sbf
|
||||
cargo test-sbf -- {{ TEST_NAME }}
|
|
@ -0,0 +1,42 @@
|
|||
[package]
|
||||
name = "dex-infinity"
|
||||
version = "0.0.1"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
doctest = false
|
||||
|
||||
[dependencies]
|
||||
router-lib = { path = "../router-lib", version = "0.0.1" }
|
||||
router-feed-lib = { path = "../router-feed-lib", version = "0.1" }
|
||||
solana-account-decoder = "1.17"
|
||||
solana-client = { workspace = true }
|
||||
solana-sdk = { workspace = true }
|
||||
solana-logger = "1.17"
|
||||
solana-program = "1.17"
|
||||
solana-program-test = "1.17"
|
||||
anchor-lang = "0.29.0"
|
||||
anchor-client = "0.29.0"
|
||||
anchor-spl = "0.29.0"
|
||||
anyhow = "1.0.86"
|
||||
itertools = "0.10.5"
|
||||
async-trait = "0.1.79"
|
||||
chrono = "0.4.38"
|
||||
sha2 = "0.10.8"
|
||||
tracing = "0.1.40"
|
||||
spl-associated-token-account = "1.0.5"
|
||||
|
||||
# infinity
|
||||
solana-readonly-account = { version = "1.1.0", features=["solana-sdk"] }
|
||||
solana-sdk-macro = "1.17"
|
||||
jupiter-amm-interface = { git = "https://github.com/jup-ag/jupiter-amm-interface.git", version = "0.3.3" }
|
||||
sanctum-lst-list = { git = "https://github.com/igneous-labs/sanctum-lst-list.git", branch = "master" }
|
||||
sanctum-token-ratio = { git = "https://github.com/igneous-labs/sanctum-solana-utils.git", rev = "f6c40a0", version = "0.2.0" }
|
||||
s-controller-lib = { git = "https://github.com/igneous-labs/S", branch = "master" }
|
||||
s-jup-interface = { git = "https://github.com/igneous-labs/S", branch = "master" }
|
||||
s-sol-val-calc-prog-aggregate = { git = "https://github.com/igneous-labs/S", branch = "master" }
|
||||
flat_fee_interface = { git = "https://github.com/igneous-labs/S", branch = "master" }
|
||||
serde = { version = "1.0.203", features = ["derive"] }
|
||||
|
||||
[dev-dependencies]
|
||||
router-test-lib = { path = "../router-test-lib", version = "0.1" }
|
Binary file not shown.
|
@ -0,0 +1,49 @@
|
|||
use std::any::Any;
|
||||
|
||||
use s_jup_interface::SPoolJup;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
|
||||
use router_lib::dex::{DexEdge, DexEdgeIdentifier};
|
||||
|
||||
pub struct InfinityEdge {
|
||||
pub data: SPoolJup,
|
||||
}
|
||||
|
||||
pub struct InfinityEdgeIdentifier {
|
||||
pub input_mint: Pubkey,
|
||||
pub output_mint: Pubkey,
|
||||
pub is_output_lp: bool,
|
||||
pub accounts_needed: usize,
|
||||
}
|
||||
|
||||
impl DexEdge for InfinityEdge {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl DexEdgeIdentifier for InfinityEdgeIdentifier {
|
||||
fn key(&self) -> Pubkey {
|
||||
self.input_mint
|
||||
}
|
||||
|
||||
fn desc(&self) -> String {
|
||||
format!("Infinity_{}", self.input_mint)
|
||||
}
|
||||
|
||||
fn input_mint(&self) -> Pubkey {
|
||||
self.input_mint
|
||||
}
|
||||
|
||||
fn output_mint(&self) -> Pubkey {
|
||||
self.output_mint
|
||||
}
|
||||
|
||||
fn accounts_needed(&self) -> usize {
|
||||
self.accounts_needed
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
|
@ -0,0 +1,275 @@
|
|||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
use jupiter_amm_interface::{Amm, QuoteParams, SwapMode};
|
||||
use s_jup_interface::{SPoolInitAccounts, SPoolInitKeys, SPoolJup};
|
||||
use s_sol_val_calc_prog_aggregate::{LstSolValCalc, MutableLstSolValCalc};
|
||||
use sanctum_lst_list::{
|
||||
inf_s_program, lido_program, marinade_program, sanctum_spl_multi_stake_pool_program,
|
||||
sanctum_spl_stake_pool_program, spl_stake_pool_program, SanctumLstList,
|
||||
};
|
||||
use solana_sdk::account::Account;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk_macro::pubkey;
|
||||
|
||||
use router_feed_lib::router_rpc_client::{RouterRpcClient, RouterRpcClientTrait};
|
||||
use router_lib::dex::{
|
||||
AccountProviderView, DexEdge, DexEdgeIdentifier, DexInterface, DexSubscriptionMode, Quote,
|
||||
SwapInstruction,
|
||||
};
|
||||
|
||||
use crate::edge::{InfinityEdge, InfinityEdgeIdentifier};
|
||||
use crate::ix_builder;
|
||||
|
||||
pub const INF_LP_PK: Pubkey = pubkey!("5oVNBeEEQvYi1cX3ir8Dx5n1P7pdxydbGF2X4TxVusJm");
|
||||
|
||||
pub struct InfinityDex {
|
||||
pub edges: HashMap<Pubkey, Vec<Arc<dyn DexEdgeIdentifier>>>,
|
||||
pub subscribed_pks: HashSet<Pubkey>,
|
||||
pub programs: Vec<(Pubkey, String)>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl DexInterface for InfinityDex {
|
||||
async fn initialize(
|
||||
rpc: &mut RouterRpcClient,
|
||||
_options: HashMap<String, String>,
|
||||
) -> anyhow::Result<Arc<dyn DexInterface>>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
let program_id = s_controller_lib::program::ID;
|
||||
let SanctumLstList { sanctum_lst_list } = SanctumLstList::load();
|
||||
|
||||
let SPoolInitKeys {
|
||||
lst_state_list,
|
||||
pool_state,
|
||||
} = SPoolJup::init_keys(program_id);
|
||||
let lst_state_list_account = rpc.get_account(&lst_state_list).await.unwrap();
|
||||
let pool_state_account = rpc.get_account(&pool_state).await.unwrap();
|
||||
|
||||
let amm: s_jup_interface::SPool<Account, Account> = SPoolJup::from_init_accounts(
|
||||
program_id,
|
||||
SPoolInitAccounts {
|
||||
lst_state_list: lst_state_list_account.clone(),
|
||||
pool_state: pool_state_account.clone(),
|
||||
},
|
||||
&sanctum_lst_list,
|
||||
)?;
|
||||
|
||||
let subscribed_pks =
|
||||
HashSet::<Pubkey>::from_iter(amm.get_accounts_to_update_full().iter().copied());
|
||||
|
||||
let mut edges_per_pk: HashMap<Pubkey, Vec<Arc<dyn DexEdgeIdentifier>>> = HashMap::new();
|
||||
|
||||
for lst_data in amm.lst_data_list.iter().flatten() {
|
||||
let lst_mint = lst_data.sol_val_calc.lst_mint();
|
||||
let account_metas = lst_data.sol_val_calc.ix_accounts();
|
||||
let num_accounts_for_tx = account_metas.len();
|
||||
for pk in lst_data.sol_val_calc.get_accounts_to_update() {
|
||||
let edges = vec![
|
||||
Arc::new(InfinityEdgeIdentifier {
|
||||
input_mint: INF_LP_PK,
|
||||
output_mint: lst_mint,
|
||||
accounts_needed: 10 + num_accounts_for_tx,
|
||||
is_output_lp: true,
|
||||
}) as Arc<dyn DexEdgeIdentifier>,
|
||||
Arc::new(InfinityEdgeIdentifier {
|
||||
input_mint: lst_mint,
|
||||
output_mint: INF_LP_PK,
|
||||
accounts_needed: 10 + num_accounts_for_tx,
|
||||
is_output_lp: false,
|
||||
}),
|
||||
];
|
||||
|
||||
if let Some(edges_per_pk) = edges_per_pk.get_mut(&pk) {
|
||||
edges_per_pk.extend(edges.iter().cloned());
|
||||
} else {
|
||||
edges_per_pk.insert(pk, edges);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let programs = amm.program_dependencies();
|
||||
|
||||
// TODO Why is there more subscribed than in the update map ?
|
||||
|
||||
let dex = InfinityDex {
|
||||
edges: edges_per_pk,
|
||||
subscribed_pks,
|
||||
programs,
|
||||
};
|
||||
|
||||
Ok(Arc::new(dex))
|
||||
}
|
||||
|
||||
fn program_ids(&self) -> HashSet<Pubkey> {
|
||||
[
|
||||
s_controller_lib::program::ID,
|
||||
sanctum_spl_multi_stake_pool_program::ID,
|
||||
sanctum_spl_stake_pool_program::ID,
|
||||
lido_program::ID,
|
||||
marinade_program::ID,
|
||||
inf_s_program::ID,
|
||||
flat_fee_interface::ID,
|
||||
spl_stake_pool_program::ID,
|
||||
]
|
||||
.into_iter()
|
||||
.chain(self.programs.iter().map(|x| x.0))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
"Infinity".to_string()
|
||||
}
|
||||
|
||||
fn subscription_mode(&self) -> DexSubscriptionMode {
|
||||
DexSubscriptionMode::Accounts(
|
||||
self.edges
|
||||
.keys()
|
||||
.cloned()
|
||||
.chain(self.subscribed_pks.iter().cloned())
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
|
||||
fn edges_per_pk(&self) -> HashMap<Pubkey, Vec<Arc<dyn DexEdgeIdentifier>>> {
|
||||
self.edges.clone()
|
||||
}
|
||||
|
||||
fn load(
|
||||
&self,
|
||||
_id: &Arc<dyn DexEdgeIdentifier>,
|
||||
chain_data: &AccountProviderView,
|
||||
) -> anyhow::Result<Arc<dyn DexEdge>> {
|
||||
let program_id = s_controller_lib::program::ID;
|
||||
let SanctumLstList { sanctum_lst_list } = SanctumLstList::load();
|
||||
|
||||
let SPoolInitKeys {
|
||||
lst_state_list,
|
||||
pool_state,
|
||||
} = SPoolJup::init_keys(program_id);
|
||||
|
||||
let lst_state_list_account = &chain_data.account(&lst_state_list).unwrap().account;
|
||||
let pool_state_account = &chain_data.account(&pool_state).unwrap().account;
|
||||
let mut amm: s_jup_interface::SPool<Account, Account> = SPoolJup::from_init_accounts(
|
||||
program_id,
|
||||
SPoolInitAccounts {
|
||||
lst_state_list: lst_state_list_account.clone().into(),
|
||||
pool_state: pool_state_account.clone().into(),
|
||||
},
|
||||
&sanctum_lst_list,
|
||||
)?;
|
||||
|
||||
let mut update: HashMap<Pubkey, Account> = HashMap::new();
|
||||
|
||||
for pk in amm.get_accounts_to_update_full().iter() {
|
||||
if let Ok(acc) = chain_data.account(pk) {
|
||||
update.insert(*pk, acc.account.clone().into());
|
||||
}
|
||||
}
|
||||
amm.update_full(&update)?;
|
||||
|
||||
return Ok(Arc::new(InfinityEdge { data: amm }));
|
||||
}
|
||||
|
||||
fn quote(
|
||||
&self,
|
||||
id: &Arc<dyn DexEdgeIdentifier>,
|
||||
edge: &Arc<dyn DexEdge>,
|
||||
_chain_data: &AccountProviderView,
|
||||
in_amount: u64,
|
||||
) -> anyhow::Result<Quote> {
|
||||
let id = id
|
||||
.as_any()
|
||||
.downcast_ref::<InfinityEdgeIdentifier>()
|
||||
.unwrap();
|
||||
let edge = edge.as_any().downcast_ref::<InfinityEdge>().unwrap();
|
||||
|
||||
let (input_mint, output_mint) = (id.input_mint, id.output_mint);
|
||||
|
||||
let quote = edge.data.quote(&QuoteParams {
|
||||
amount: in_amount,
|
||||
input_mint,
|
||||
output_mint,
|
||||
swap_mode: SwapMode::ExactIn,
|
||||
})?;
|
||||
|
||||
let out_amount = if quote.not_enough_liquidity {
|
||||
0
|
||||
} else {
|
||||
quote.out_amount
|
||||
};
|
||||
|
||||
Ok(Quote {
|
||||
in_amount,
|
||||
out_amount,
|
||||
fee_amount: quote.fee_amount,
|
||||
fee_mint: quote.fee_mint,
|
||||
})
|
||||
}
|
||||
|
||||
fn build_swap_ix(
|
||||
&self,
|
||||
id: &Arc<dyn DexEdgeIdentifier>,
|
||||
chain_data: &AccountProviderView,
|
||||
wallet_pk: &Pubkey,
|
||||
in_amount: u64,
|
||||
out_amount: u64,
|
||||
max_slippage_bps: i32,
|
||||
) -> anyhow::Result<SwapInstruction> {
|
||||
let id = id
|
||||
.as_any()
|
||||
.downcast_ref::<InfinityEdgeIdentifier>()
|
||||
.unwrap();
|
||||
ix_builder::build_swap_ix(
|
||||
id,
|
||||
chain_data,
|
||||
wallet_pk,
|
||||
in_amount,
|
||||
out_amount,
|
||||
max_slippage_bps,
|
||||
)
|
||||
}
|
||||
|
||||
fn supports_exact_out(&self, _id: &Arc<dyn DexEdgeIdentifier>) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn quote_exact_out(
|
||||
&self,
|
||||
id: &Arc<dyn DexEdgeIdentifier>,
|
||||
edge: &Arc<dyn DexEdge>,
|
||||
_chain_data: &AccountProviderView,
|
||||
out_amount: u64,
|
||||
) -> anyhow::Result<Quote> {
|
||||
let id = id
|
||||
.as_any()
|
||||
.downcast_ref::<InfinityEdgeIdentifier>()
|
||||
.unwrap();
|
||||
let edge = edge.as_any().downcast_ref::<InfinityEdge>().unwrap();
|
||||
|
||||
let (input_mint, output_mint) = (id.input_mint, id.output_mint);
|
||||
|
||||
let quote = edge.data.quote(&QuoteParams {
|
||||
amount: out_amount,
|
||||
input_mint,
|
||||
output_mint,
|
||||
swap_mode: SwapMode::ExactOut,
|
||||
})?;
|
||||
|
||||
let in_amount = if quote.not_enough_liquidity {
|
||||
u64::MAX
|
||||
} else {
|
||||
quote.in_amount
|
||||
};
|
||||
|
||||
Ok(Quote {
|
||||
in_amount,
|
||||
out_amount,
|
||||
fee_amount: quote.fee_amount,
|
||||
fee_mint: quote.fee_mint,
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
use jupiter_amm_interface::SwapParams;
|
||||
use s_jup_interface::{SPoolInitAccounts, SPoolInitKeys, SPoolJup};
|
||||
use sanctum_lst_list::SanctumLstList;
|
||||
use solana_sdk::{account::Account, pubkey::Pubkey};
|
||||
use spl_associated_token_account::get_associated_token_address;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use router_lib::dex::{AccountProviderView, SwapInstruction};
|
||||
|
||||
use crate::edge::InfinityEdgeIdentifier;
|
||||
|
||||
pub fn build_swap_ix(
|
||||
id: &InfinityEdgeIdentifier,
|
||||
chain_data: &AccountProviderView,
|
||||
wallet_pk: &Pubkey,
|
||||
in_amount: u64,
|
||||
out_amount: u64,
|
||||
max_slippage_bps: i32,
|
||||
) -> anyhow::Result<SwapInstruction> {
|
||||
let program_id = s_controller_lib::program::ID;
|
||||
let SanctumLstList { sanctum_lst_list } = SanctumLstList::load();
|
||||
|
||||
let SPoolInitKeys {
|
||||
lst_state_list,
|
||||
pool_state,
|
||||
} = SPoolJup::init_keys(program_id);
|
||||
|
||||
let lst_state_list_account = &chain_data.account(&lst_state_list).unwrap().account;
|
||||
let pool_state_account = &chain_data.account(&pool_state).unwrap().account;
|
||||
let mut amm: s_jup_interface::SPool<Account, Account> = SPoolJup::from_init_accounts(
|
||||
program_id,
|
||||
SPoolInitAccounts {
|
||||
lst_state_list: lst_state_list_account.clone().into(),
|
||||
pool_state: pool_state_account.clone().into(),
|
||||
},
|
||||
&sanctum_lst_list,
|
||||
)?;
|
||||
|
||||
let mut update: HashMap<Pubkey, Account> = HashMap::new();
|
||||
|
||||
for pk in amm.get_accounts_to_update_full().iter() {
|
||||
if let Ok(acc) = chain_data.account(pk) {
|
||||
update.insert(*pk, acc.account.clone().into());
|
||||
}
|
||||
}
|
||||
amm.update_full(&update)?;
|
||||
|
||||
let (in_mint, out_mint) = (id.input_mint, id.output_mint);
|
||||
|
||||
let in_pubkey = get_associated_token_address(wallet_pk, &in_mint);
|
||||
let out_pubkey = get_associated_token_address(wallet_pk, &out_mint);
|
||||
let min_out_amount =
|
||||
((out_amount as f64 * (10_000f64 - max_slippage_bps as f64)) / 10_000f64).floor() as u64; // TODO
|
||||
|
||||
let instruction = amm.swap_ix(
|
||||
&SwapParams {
|
||||
in_amount,
|
||||
out_amount: min_out_amount,
|
||||
source_mint: in_mint,
|
||||
destination_mint: out_mint,
|
||||
source_token_account: in_pubkey,
|
||||
destination_token_account: out_pubkey,
|
||||
token_transfer_authority: *wallet_pk,
|
||||
open_order_address: None,
|
||||
quote_mint_to_referrer: None,
|
||||
jupiter_program_id: &Pubkey::default(),
|
||||
missing_dynamic_accounts_as_default: false,
|
||||
},
|
||||
jupiter_amm_interface::SwapMode::ExactIn,
|
||||
)?;
|
||||
|
||||
let in_amount_offset = 6; // same for mint & burn
|
||||
|
||||
return Ok(SwapInstruction {
|
||||
instruction,
|
||||
out_pubkey,
|
||||
out_mint,
|
||||
in_amount_offset,
|
||||
cu_estimate: None,
|
||||
});
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
mod edge;
|
||||
mod infinity;
|
||||
mod ix_builder;
|
||||
|
||||
pub use infinity::InfinityDex;
|
|
@ -0,0 +1,41 @@
|
|||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
|
||||
use solana_program_test::tokio;
|
||||
|
||||
use router_lib::dex::DexInterface;
|
||||
use router_lib::test_tools::{generate_dex_rpc_dump, rpc};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_dump_input_data_infinity() -> anyhow::Result<()> {
|
||||
if router_test_lib::config_should_dump_mainnet_data() {
|
||||
step_1_infinity().await?;
|
||||
}
|
||||
|
||||
step_2_infinity().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn step_1_infinity() -> anyhow::Result<()> {
|
||||
let rpc_url = env::var("RPC_HTTP_URL")?;
|
||||
let (mut rpc_client, chain_data) = rpc::rpc_dumper_client(rpc_url, "infinity_dump.lz4");
|
||||
|
||||
let options = HashMap::from([]);
|
||||
let dex = dex_infinity::InfinityDex::initialize(&mut rpc_client, options).await?;
|
||||
|
||||
generate_dex_rpc_dump::run_dump_mainnet_data(dex, rpc_client, chain_data).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn step_2_infinity() -> anyhow::Result<()> {
|
||||
let (mut rpc_client, chain_data) = rpc::rpc_replayer_client("infinity_dump.lz4");
|
||||
|
||||
let options = HashMap::from([]);
|
||||
let dex = dex_infinity::InfinityDex::initialize(&mut rpc_client, options).await?;
|
||||
|
||||
generate_dex_rpc_dump::run_dump_swap_ix("infinity_swap.lz4", dex, chain_data).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
[package]
|
||||
name = "dex-openbook-v2"
|
||||
version = "0.0.1"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
doctest = false
|
||||
|
||||
[dependencies]
|
||||
router-lib = { path = "../router-lib", version = "0.0.1" }
|
||||
router-feed-lib = { path = "../router-feed-lib", version = "0.1" }
|
||||
solana-account-decoder = "1.17"
|
||||
solana-client = { workspace = true }
|
||||
solana-sdk = { workspace = true }
|
||||
solana-logger = "1.17"
|
||||
solana-program = "1.17"
|
||||
solana-program-test = "1.17"
|
||||
anchor-lang = "0.29.0"
|
||||
anchor-client = "0.29.0"
|
||||
anchor-spl = "0.29.0"
|
||||
anyhow = "1.0.86"
|
||||
itertools = "0.10.5"
|
||||
async-trait = "0.1.79"
|
||||
chrono = "0.4.38"
|
||||
sha2 = "0.10.8"
|
||||
tracing = "0.1.40"
|
||||
spl-associated-token-account = "1.0.5"
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
mango-feeds-connector = { workspace = true }
|
||||
bytemuck = "1.16.1"
|
||||
|
||||
# obv2
|
||||
openbook-v2 = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
router-test-lib = { path = "../router-test-lib", version = "0.1" }
|
|
@ -0,0 +1,78 @@
|
|||
use router_lib::dex::{AccountProviderView, DexEdge, DexEdgeIdentifier};
|
||||
use solana_program::pubkey::Pubkey;
|
||||
use solana_sdk::account::ReadableAccount;
|
||||
use std::any::Any;
|
||||
use tracing::warn;
|
||||
|
||||
pub struct OpenbookV2EdgeIdentifier {
|
||||
pub market: Pubkey,
|
||||
pub bids: Pubkey,
|
||||
pub asks: Pubkey,
|
||||
pub mint_a: Pubkey,
|
||||
pub mint_b: Pubkey,
|
||||
pub event_heap: Pubkey,
|
||||
pub is_bid: bool,
|
||||
pub account_needed: usize,
|
||||
}
|
||||
|
||||
pub struct OpenbookV2Edge {
|
||||
pub market: openbook_v2::state::Market,
|
||||
pub bids: Option<openbook_v2::state::BookSide>,
|
||||
pub asks: Option<openbook_v2::state::BookSide>,
|
||||
}
|
||||
|
||||
impl DexEdge for OpenbookV2Edge {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl DexEdgeIdentifier for OpenbookV2EdgeIdentifier {
|
||||
fn key(&self) -> Pubkey {
|
||||
self.market
|
||||
}
|
||||
|
||||
fn desc(&self) -> String {
|
||||
format!("OpenbookV2_{}", self.market)
|
||||
}
|
||||
|
||||
fn input_mint(&self) -> Pubkey {
|
||||
self.mint_a
|
||||
}
|
||||
|
||||
fn output_mint(&self) -> Pubkey {
|
||||
self.mint_b
|
||||
}
|
||||
|
||||
fn accounts_needed(&self) -> usize {
|
||||
self.account_needed
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
pub fn load_anchor<T: bytemuck::Pod>(
|
||||
chain_data: &AccountProviderView,
|
||||
address: &Pubkey,
|
||||
) -> anyhow::Result<T> {
|
||||
let account = chain_data.account(address)?;
|
||||
let data = bytemuck::try_from_bytes::<T>(&account.account.data()[8..]);
|
||||
match data {
|
||||
Ok(data) => Ok(*data),
|
||||
Err(e) => {
|
||||
let size = account.account.data().len();
|
||||
warn!(
|
||||
"Failed to deserialize account {} (of size={}) {:?}",
|
||||
address, size, e
|
||||
);
|
||||
anyhow::bail!(
|
||||
"Failed to deserialize account {} (of size={}) {:?}",
|
||||
address,
|
||||
size,
|
||||
e
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
mod edge;
|
||||
mod openbook_v2_dex;
|
||||
mod openbook_v2_ix_builder;
|
||||
|
||||
pub use edge::OpenbookV2Edge;
|
||||
pub use openbook_v2_dex::OpenbookV2Dex;
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue