Compare commits

...

159 Commits

Author SHA1 Message Date
Daniel Chew cf7987f4c5
feat(target_chains/fuel): add governance contract (#1518)
* add governance contract

* add fuel ci

* add rust-toolchain

* add executes_governance_instruction test

* add test for SetValidPeriod

* add test for AuthorizeGovernanceDataSourceTransfer

* remove SetWormholeAddress

* add test for SetDataSources

* remove WormholeAddressSetEvent

* remove SetWormholeAddress

* remove SetWormholeAddressPayload

* remove SetWormholeAddressPayload and SetWormholeAddress imports

* remove GovernanceAction::SetWormholeAddress

* address comments

* refactor test

* add comments
2024-05-09 17:42:28 +01:00
Amin Moghaddam 6e0bd0569b
feat(contract_manager)Add option to test for all entropy contracts (#1559) 2024-05-09 12:36:14 +01:00
Pavel Strakhov 1e1be9dbeb
refactor(target_chains/starknet): remove old config instructions and owner from wormhole (#1558) 2024-05-09 09:42:03 +01:00
Pavel Strakhov d105a7aa86
refactor(target_chains/starknet): use EthAddress and is_eth_signature_valid (#1556) 2024-05-08 07:20:30 +01:00
Pavel Strakhov dd9b07b5e4
refactor(target_chains/starknet): generalize array_try_into (#1555) 2024-05-07 16:18:18 +01:00
Pavel Strakhov e26c9d1a30
refactor(target_chains/starknet): split pyth module (#1554) 2024-05-07 14:20:59 +01:00
Pavel Strakhov 9dddd3d1e7
feat(target_chains/starknet): handle wormhole guardian set upgrade VAA (#1550)
* feat(target_chains/starknet): handle wormhole guardian set upgrade VAA

* test(target_chains/starknet): add failing tests for governance

* doc(target_chains/starknet): add comment about wormhole governance
2024-05-07 10:33:09 +01:00
Dev Kalra 77c68c5069
fix(fortuna): watch blocks from start and infinite get queries (#1551)
* fix: watch blocks from start and infinite get queries

* formatting

* fix

* undo small change
2024-05-07 14:21:52 +05:30
Pavel Strakhov bf2c8b5d43
refactor(target_chains/starknet): blanket impl for unwrap_with_felt252 (#1549) 2024-05-07 08:04:55 +01:00
Reisen 3f07c27243
chore(aptos): bump to 3.1.0 toolchain/cli (#1543) 2024-05-06 16:36:01 +01:00
Pavel Strakhov 42b64ac09f
refactor(target_chains/starknet): remove Result from merkle_tree and pyth setters (#1548)
* refactor(target_chains/starknet): remove Result from merkle_tree

* refactor(target_chains/starknet): remove Result from pyth contract setters
2024-05-06 16:21:36 +01:00
Pavel Strakhov 55cbe62997
feat(target_chains/starknet): wormhole governance VAA verification (#1547)
* feat(target_chains/starknet): wormhole governance VAA verification

* refactor(target_chains/starknet): rename VM to VerifiedVM
2024-05-06 14:07:49 +01:00
Pavel Strakhov 94b36c4961
refactor(target_chains/starknet): remove Result from wormhole (#1541) 2024-05-06 11:27:28 +01:00
Jayant Krishnamurthy ff6b11023c
[price_pusher] Option to ignore gas objects (#1545)
* gr

* bump version
2024-05-03 21:41:14 -07:00
Jayant Krishnamurthy 4966b956df
[price_pusher] Sui pusher debugging messages (#1544)
* add logging

* version

* gr
2024-05-03 18:04:43 -07:00
Dev Kalra 10dc4a05b8
feat(fortuna_staging): use spans to create a hierarchy of logs (#1540)
* use spans to create a hierarchy of logs

* minor imp

* remove chain id

* add a sequence processing

* added comments

* consistent with other threads

* extract method out

* add field to process block range

* add field to watch events logs

* rename method

* extract process batch method

* tidy

* update log for eth

* remove comment

* update version

* address feedback
2024-05-03 21:27:42 +05:30
Ali Behjati 586a4398bd
feat(price_pusher): add more options to evm pusher (#1538)
This change makes gasLimit configurable and also adds an
updateFeeMultiplier which is useful in Hedera as they have an
inconsistency between the `value` passed in tx and the `value` on-chain.
2024-05-03 09:59:19 +02:00
guibescos 020ecdf5da
Solve (#1539) 2024-05-03 09:46:09 +02:00
Pavel Strakhov 308599714f
refactor(target_chains/starknet): remove Result from reader (#1536) 2024-05-02 15:48:55 +01:00
Dev Kalra 587a6fa524
feat(contract_manager): upgrade deploy scripts to use wormhole store (#1523)
* upgrade deploy scripts to use wormhole store

* deploy not find

* deploy price feed and entropy to taiko

* rename type

* rename method

* address feedback

* update js docs

* deploy to olive testnet

* pre commit

* rename deploy config to base deploy config
2024-05-02 16:48:16 +05:30
guibescos a592c6bc33
fix: publish workflow (#1532)
* dry run

* check

* Fix workflows

* rexport price feed message
2024-05-01 19:11:25 +01:00
Aditya Arora 31483a9fc7
chore-add-evm-new-chains (#1533) 2024-05-01 19:35:35 +02:00
cipherZ 3d9781ed58
Update README.md (#1526)
* Update README.md

changed method doc

* Update README.md

---------

Co-authored-by: guibescos <59208140+guibescos@users.noreply.github.com>
2024-05-01 15:20:02 +01:00
guibescos 344f8a9e47
feat: add anchor to pythnet sdk (#1531)
* add anchor to pythnet sdk

* bump

* bump

* bump

* please work

* Solve
2024-05-01 14:39:17 +01:00
Amin Moghaddam b2cb7c878a
fix(fortuna): Fortuna improvements (#1529)
* More logging on failure of deserialization
* Log chain id if the provider registration is failing
* Fix sample config
* Fix dysfunctional rpc address from blast
2024-05-01 13:19:37 +02:00
Pavel Strakhov 4e630edac0
feat(target_chains/starknet): fee collection (#1527)
* feat(target_chains/starknet): fee collection

* refactor(target_chains/starknet): renames and comments
2024-04-30 23:03:29 +01:00
guibescos 20d99bceb7
feat: enable remote stake delegation (#1501)
* feat: enable remote stake delegation

* Cleanup space

* Go

* feat: drive-by priority fees

* fix: pr comments

* fix: pr comments
2024-04-30 15:46:44 +01:00
Dev Kalra d2ce2ecd33
feat(fortuna): add a cli arg to run keeper (#1517)
* optional run keeper

* update comment

* instead of flag use the keeper key file

* update version

* rename method
2024-04-30 20:10:48 +05:30
guibescos 2095da34e9
feat: add input boxes (#1515)
* feat: add send usd app

* fix: cargo tomls

* fix: pre-commit

* fix: improve code quality

* fix: fix names and texts

* fix: pre-commit

* feat: add send usd example to monorepo

* fix: connection endpoint for send usd example

* fix: priceUpdateData

* fix: packages

* fix: remove unused variables

* fix: packages

* fix: test

* fix: tests

* fix: test

* remove file

* fix

* go

* Try removing test script

* Remove npm run test from the text

* Add input box

* is this the same

* pre-commit

---------

Co-authored-by: keyvan <keyvankhademi@gmail.com>
2024-04-30 14:07:05 +01:00
Pavel Strakhov a8dbabc7f9
feat(target_chains/starknet): add fee configuration (#1525) 2024-04-30 13:19:24 +01:00
Pavel Strakhov cae194eb62
Starknet: update_price_feeds and latest_price_info (#1482)
* feat(target_chains/starknet): update_price_feeds and latest_price_info

* test(target_chains/starknet): basic test for pyth contract

* chore(target_chains/starknet): update deploy script

* feat(target_chains/starknet): added get_price_unsafe and get_ema_price_unsafe

* refactor(target_chains/starknet): match on UpdateType and MessageType
2024-04-30 12:26:41 +01:00
NinaLua 8d32b4c2fc
chore: remove repetitive words (#1524) 2024-04-30 12:17:10 +01:00
Ali Behjati a203808a44
refactor(cosmwasm/tools): update cosmjs dependencies (#1514)
* refactor(cosmwasm/tools): update cosmjs dependencies

We needed to update cosmjs dependencies to support Xion, which is based
on a new CometBFT-based variation of tendermint. This change also
includes the artifacts for the Xion testnet network.

* fix: pin a dependency to get nextjs to work

* fix: address review comments
2024-04-29 19:34:57 +02:00
Aditya Arora 24a08a06c5
chore(pricefeed-evm-sdk): Improve doc comments (#1521) 2024-04-29 12:42:30 -04:00
Pavel Strakhov f212907a8b test(target_chains/starknet): add byte array tests 2024-04-29 14:51:06 +01:00
Ali Behjati ef922220ee
chore(contract_manager): Rename package to @pythnetwork/contract-manager (#1507)
This change renames the contract manager package name to @pythnetwork/contract-manager to be consistent with our package names.
2024-04-29 15:25:05 +02:00
Jayant Krishnamurthy 6da2e1ba53
[hermes] Add deprecation notices to old API methods (#1516)
* [hermes] Add deprecation notices for doc purposes

* bump version

* deprecation
2024-04-29 14:26:41 +02:00
Pavel Strakhov 050a3412f9 refactor(target_chains/starknet): move ByteArray to a separate module 2024-04-29 12:57:59 +01:00
Dev Kalra cf90bff236
feat(fortuna): support multiple hashchains (#1509)
* introduce provider config

* get provider chain config in order

* hash chain with multiple pebble chains

* update script to get metadata

* update version

* comments and move things around

* update comment

* minor fixes

* separate pr for this

* rename provider-config

* sample config

* auto sort commitments

* use seed and chain length

* refactor and simplify hashchain and offset vec

* better formatting

* make commitments private

* optional chain in provider-config

* set default value of chain length

* Version 5.0.0

* update comments

* version update

* optional provider config
2024-04-26 16:59:16 +05:30
Keyvan Khademi 37ee3b46bd
feat: add solana send usd example app (#1471)
* feat: add send usd app

* fix: cargo tomls

* fix: pre-commit

* fix: improve code quality

* fix: fix names and texts

* fix: pre-commit

* feat: add send usd example to monorepo

* fix: connection endpoint for send usd example

* fix: priceUpdateData

* fix: packages

* fix: remove unused variables

* fix: packages

* fix: test

* fix: tests

* fix: test

* remove file

* fix

* go

* Try removing test script

* Remove npm run test from the text

---------

Co-authored-by: Guillermo Bescos <g.bescos@yahoo.com>
2024-04-25 20:19:11 +01:00
Dev Kalra b47ee059d7
feat(contract-manager): implement a script to get the entropy current registration (#1512)
* write a script to get the current registration

* simplify

* correct description

* catch only rpc errors

* refactor and simplify
2024-04-25 21:00:39 +05:30
Daniel Chew c2da454637
add fuel contract by Fuel Labs (#1513) 2024-04-25 22:51:41 +09:00
Dev Kalra 567b4a6597
fix(fortuna/setup-provider): compare chain-length command line input (#1511)
* fix setup provider job

* only need to replace chain length input

* remove space
2024-04-25 17:41:06 +05:30
Dani Mehrjerdi 2014d1e205
feat(express-relay): Add simulation_failed to bid status (#1503) 2024-04-25 14:37:21 +04:00
Aditya Arora 93a71f2eef
pre-commit (#1510) 2024-04-25 09:15:51 +02:00
Dev Kalra 9437d51843
feat(contract_manager): add keeper balance to list entry (#1506)
* add keeper balance to list entry

* don't fix it as not sure
2024-04-24 16:37:14 +05:30
Dev Kalra d31cefb446
feat(contract_manager): separate store for wormhole (#1493)
* rename wormhole contract as per other names

* store for wormhole

* fix var name

* rename var

* rename contract based on other namings

* add yaml for aptos and cosmwasm
2024-04-24 16:32:16 +05:30
Dev Kalra 48a5faf4d9
specify only channel and date for the toolchain (#1505) 2024-04-24 15:59:40 +05:30
Keyvan Khademi b110bbca5c
feat(xc-admin-frontend): instructions summary in proposal page + improve ui in proposal row + refactor the code (#1478)
* refactor: move proposals to a folder

* refactor: use @images instead of relative paths

* refactor: split proposals into multiple files

* refactor: add type for proposal status

* refactor: add eslint and fix errors

* refactor: fix eslint errors

* refactor: fix eslint

* refactor: fix prettier

* refactor: remove any

* refactor: Proposals.tsx

* feat: add basic instructions summary

* feat: add unknown instruction

* fix: revert package-lock.json

* fix: update package-lock.json

* fix: pre-commit

* fix: ts error

* fix: remove message buffer dependency

* fix: revert back the cluster default

* feat: add support for different types of instructions

* feat: add transaction index to proposal row

* feat: improve the proposal row ui

* fix: display bigint properly (#1499)

---------

Co-authored-by: guibescos <59208140+guibescos@users.noreply.github.com>
2024-04-23 13:24:44 -07:00
Dev Kalra d05df508a8
deploy entropy on sei (#1500) 2024-04-23 20:58:06 +05:30
Aditya Arora d51e5712f4
redepoloyed (#1477) 2024-04-23 11:15:07 -04:00
Ali Behjati 1a3e3a7c00
refactor(hermes): match mapping address argument style with the rest (#1498) 2024-04-23 15:31:27 +02:00
Dev Kalra 4b8b9bfd87
feat(contract_manager): latency script for entropy v2 (#1494)
* latency script for entropy v2

* add block number difference

* correct desc

* refactor request randomness

* refactor and use chain as arg instead of contract

* unnecessary condition

* js doc

* correct desc

* use blockhash
2024-04-23 18:59:52 +05:30
Pavel Strakhov c7883c822b doc(target_chains/starknet): add readme 2024-04-23 13:05:10 +01:00
Pavel Strakhov b30604c5ba doc(target_chains/starknet): add local deployment script 2024-04-23 12:41:59 +01:00
Ali Behjati d50488ef5c
refactor(hermes): Change Pythnet mapping account env var (#1495)
Prefix the env var with Pythnet to be more clear.
2024-04-23 12:41:50 +02:00
Ali Behjati 64037e5b4a
fix(hermes): ignore no subscriber error on broadcast (#1492)
This change ignores the errors appearing when there are
no subsribers to price updates. The issue was fixed before
and was missed during the refactor.
2024-04-23 11:21:22 +02:00
guibescos 4445c73443
Go (#1489) 2024-04-22 20:44:10 +01:00
guibescos 5b494689d2
chore: sample configs for Solana pusher (#1491)
* Continue

* Sample configs
2024-04-22 20:43:59 +01:00
Dev Kalra e46821d423
feat(xc_admin_frontend): parse entropy and executor abis (#1481)
* parse entropy and executor abis

* correct import

* move parse to xc admin frontend

* undo change

* fix deps

* add comment

* comment

* revert changes and then some minor change

* fix unknown by typecast
2024-04-23 00:15:36 +05:30
Ali Behjati 644b54676c
fix(hermes): update cache.rs path in dockerignore (#1490)
Our dockerignore ignores all the files containing cache in their name
and hermes had an exception here. This change was missed in moving
Hermes around.
2024-04-22 19:42:14 +02:00
Ali Behjati f9292177e9
fix(hermes): reconnect on wh connection termination (#1488)
* fix(hermes): reconnect on wh connection termination

`tokio::select` disables the branch that runs the wh connection
if it returns OK and it never gets checked again. This change
changes the `run` return to never return OK.

* refactor(hermes): use Result<!> in pythnet network listener thread
2024-04-22 19:07:22 +02:00
guibescos 1b13bf651a
fix(solana_pusher): use processed to poll (#1486)
* Do it

* Do it
2024-04-22 17:41:49 +01:00
Pavel Strakhov e8c198065e feat(target_chains/starknet): add merkle tree utils 2024-04-22 17:38:31 +01:00
Pavel Strakhov a1e4fc0924 feat(target_chains/starknet): add utils for decoding signed integers, move array_felt252_to_bytes31 to utils 2024-04-22 17:38:31 +01:00
Anirudh Suresh 67132c0572
try python version env spec (#1484)
* try python version env spec

* Test it out on pull request

* test env change

* test env change 2

* test env change 3

* test env change 4

* address circular import

* test

* test

* test

* test

* test

* undoing test

---------

Co-authored-by: Amin Moghaddam <amin@pyth.network>
2024-04-22 12:25:20 -04:00
guibescos c7c3527bfe
fix: initialize guardian expiration to the right value (#1485) 2024-04-22 17:14:42 +01:00
Reisen 8b76d8c19a
refactor(hermes): state->aggregate downcasting (#1479) 2024-04-22 17:07:27 +01:00
Dani Mehrjerdi bdc2e967b0
refactor(express-relay): Update execution params hash data (#1476) 2024-04-20 11:25:11 +04:00
Pavel Strakhov e04edcfece fix(target_chains/starknet): make parse_and_verify_vm a read-only method 2024-04-19 17:48:31 +01:00
Pavel Strakhov ffbe02b4f6 fix(target_chains/starknet): verify new guardian set before writing to storage 2024-04-19 17:48:31 +01:00
Pavel Strakhov 26bbe4a0ef refactor(target_chains/starknet): check value in Hasher::push_num_bytes 2024-04-19 17:48:31 +01:00
Pavel Strakhov 8b66d0f814 refactor(target_chains/starknet): use enum errors 2024-04-19 17:48:31 +01:00
Pavel Strakhov 0a219fbead refactor(target_chains/starknet): errors modules, reexport errors 2024-04-19 17:48:31 +01:00
Pavel Strakhov 30c741ed49 feat(target_chains/starknet): add multi-purpose keccak hasher 2024-04-19 17:48:31 +01:00
Pavel Strakhov 5fac32fa40 chore(target_chains/starknet): add workflow 2024-04-19 17:48:31 +01:00
Pavel Strakhov 6e62328528 test(target_chains/starknet): add wormhole contract tests 2024-04-19 17:48:31 +01:00
Pavel Strakhov 2d9c6d3028 feat(target_chains/starknet): wormhole VAA verification and parsing 2024-04-19 17:48:31 +01:00
guibescos 508de75839
chore: fix ci (#1475)
* Fix ci

* cleanup

* Go
2024-04-19 17:46:53 +01:00
Dev Kalra 7bbcfa80d4
feat(fortuna): improve logging (#1472)
* update existing logs in keeper

* handle provider error

* handle implicit errors

* address feedback
2024-04-19 22:08:24 +05:30
guibescos 0d6c35fce8
feat: propagate errors in send transactions (#1469)
* Go

* Go

* Propagate errors

* lint

* Bump version
2024-04-19 16:57:00 +01:00
Diyahir c58b675a63
Force Node version (#1473) 2024-04-19 15:40:55 +01:00
Aditya Arora 899a995e2e
sei_devnet_redeploy (#1474) 2024-04-19 10:38:30 -04:00
guibescos 5a676978db
feat: add pyth_push_oracle to dockerfile (#1470)
* Add pusher to dcokerfile

* Update
2024-04-19 15:36:25 +01:00
Anirudh Suresh 3f6a14897d
fix per sdk pypi workflow (#1454)
* python3.12 -> python3.11?

* test poetry build works

* add back the publishing

---------

Co-authored-by: ani <ani@Anirudhs-MacBook-Pro.local>
2024-04-19 10:08:11 -04:00
Ali Behjati 481a428e88
chore(contract_manager): add scripts and changes to upgrade wh guardi… (#1468)
* chore(contract_manager): add scripts and changes to upgrade wh guardian set

This change adds `sync_wormhole_guardian_set.ts` script to update all of
our evm and cosmwasm contracts and has a couple of fixes in different
places to make sure everything works fine.

* fix: address review comments
2024-04-19 10:52:12 +02:00
Jayant Krishnamurthy 3f58a2a8b3
[price_pusher] add shebang command (#1467) 2024-04-18 12:11:40 -07:00
Keyvan Khademi 76205745c8
feat: add solana target chain example (#1446)
* feat: add send usd example for solana

* fix: rust fmt

* fix: remove unused dependency ahash

* fix: imports

* refactor: use get_price_no_older_than

* fix: package name

* fix: fix naming conventions

* feat: add workspace members in Anchor.toml

* fix: set maximum age to 1 hour

* fix: use public crates for send usd example
2024-04-18 08:11:50 -07:00
Dani Mehrjerdi 93efd61ea4
feat!(express_relay): Update bid's signature to eip712 (#1455) 2024-04-18 18:55:18 +04:00
Reisen 8be6a9ad1c
refactor(price_pusher): re-organize in monorepo (#1464)
* refactor(price_pusher): re-organize in monorepo

* revert(price_pusher): undo gitignore
2024-04-18 13:34:46 +01:00
guibescos 76ec4e3322
feat: move VAA_SPLIT_INDEX (#1466)
* feat: move VAA_SPLIT_INDEX

* CI
2024-04-17 18:14:11 +01:00
guibescos 56cbace282
feat: encourage using random treasury id (#1465)
* Encourage random treasury id

* GO
2024-04-17 17:47:14 +01:00
Reisen ba435bac76 refactor(fortuna): re-organize in monorepo 2024-04-17 15:21:57 +01:00
guibescos 73798b9bdd
Add 4th guardian set (#1457) 2024-04-17 15:01:32 +01:00
Dev Kalra 02ad78bcf1
Update coinflip contracts to use entropy v2 (#1461)
* update coinflip contracts to use entropy v2

* correct formatting
2024-04-17 19:27:33 +05:30
guibescos 8d92ad9931
feat: support closing vaas (#1460)
* feat: support closing vaas

* Go

* Max out

* Cleanup

* Refactor, add comments

* Add max

* Remove script

* bump solana utils

* Revert "Fix: guardian set (#1459)"

This reverts commit d9c85d8f9d.

* Update compute budget

* Go

* Restore

* Bump
2024-04-17 14:49:06 +01:00
Aditya Arora c12a58e0e4
Bugfix: price-service: Add bn.js dependency (#1458)
* price-service-added-bn.js

* requested changes
2024-04-17 09:30:34 -04:00
Amin Moghaddam ee1d61ac71
Fix js checks (#1462) 2024-04-17 09:45:17 +02:00
guibescos d9c85d8f9d
Fix: guardian set (#1459)
* Decrease compute budget

* Go

* fix
2024-04-16 14:36:24 +01:00
Reisen 70c2c8ec4b refactor(hermes): fix README run command 2024-04-15 15:12:13 +01:00
Reisen 933e61dcb8 refactor(hermes): fix hermes Docker workdir 2024-04-15 15:12:13 +01:00
Reisen 45065e2851 refactor(hermes): re-organize in monorepo 2024-04-15 15:12:13 +01:00
Daniel Chew 0789d615d4
format (#1453) 2024-04-15 22:59:30 +09:00
Daniel Chew a7bb9160c4
feat(hermes): add additional sse features (#1443)
* add allow_unordered query param

* add benchmarks_only query params

* update docs

* bump

* address comments

* address comments

* address comments
2024-04-15 21:38:46 +09:00
Ali Behjati 392a3df7eb
fix(hermes): ignore broadcast send result (#1450)
Sending over the broadcast channel only fails when there are no
receivers. We should ignore it instead of propagating it.
2024-04-15 09:55:52 +02:00
Jayant Krishnamurthy a60733559c
[solana push oracle] Idempotent updates (#1452)
* idempotent updates

* clippy
2024-04-12 14:37:25 -07:00
guibescos 8fba519ce3
Skip (#1451) 2024-04-12 21:23:01 +01:00
Daniel Chew bdc40fec3f
fix task_price_feeds_metadata_updater (#1449) 2024-04-12 20:44:32 +09:00
Ali Behjati 729b18e596
chore(target_chains/ethereum): add morph testnet network (#1445) 2024-04-12 10:09:21 +02:00
Anirudh Suresh 1135f00da2
adapt sdks (#1434)
* adapt sdks

* update the sdks to reflect bid status ups

* some js changes

* python hex fix

* update gen types

* bump versions

* fix docs

---------

Co-authored-by: ani <ani@Anirudhs-MBP.cable.rcn.com>
Co-authored-by: ani <ani@Anirudhs-MacBook-Pro.local>
2024-04-11 16:22:35 -04:00
guibescos 7673097c37
fix: don't use hardcoded bundlesize (#1448)
* feat: jito script

* Go

* Go

* Checkpoint

* Checkpoint

* Rename

* Make tip account random

* Go

* Jito pusher

* Go

* lint

* Lint

* Bump

* bugfix
2024-04-11 12:03:35 -07:00
Jayant Krishnamurthy 443f1455c4
support string (#1447) 2024-04-11 19:35:17 +01:00
guibescos c727195e9c
feat: jito pusher (#1444)
* feat: jito script

* Go

* Go

* Checkpoint

* Checkpoint

* Rename

* Make tip account random

* Go

* Jito pusher

* Go

* lint

* Lint

* Bump
2024-04-11 19:14:10 +01:00
guibescos 0aeae8ca40
feat: send transactions with Jito (#1442)
* feat: jito script

* Go

* Go

* Checkpoint

* Checkpoint

* Rename

* Make tip account random

* Go
2024-04-11 14:30:43 +01:00
Amin Moghaddam ce36d80ae4
fix(contract_manager): use public rpc for blast testnet (#1441) 2024-04-11 13:06:52 +02:00
Dev Kalra 34d94e3177
feat(contract_manager): implement upgrade evm entropy contracts script (#1417)
* implement upgrade evm entropy contracts script

* check proposal for entropy contract upgrades

* refactor scripts

* minor changes in check proposal

* fix comments

* correct comment

* log something and continue

* log only if the owner and executor address doesn't match

* use web3 for abi encoding

* remove unused

* extract code digest code

* feedback implement
2024-04-11 15:11:55 +05:30
Daniel Chew 3c5a913a80
feat(hermes): add sse endpoint (#1425)
* add initial sse code

* fix typo

* add more error handling

* fix formatting

* revert import format

* add error handling for nonexistent price feeds in the middle of sub

* refactor

* format

* add comment

* Update hermes/src/api/sse.rs

Co-authored-by: Reisen <Reisen@users.noreply.github.com>

* refactor

* bump

---------

Co-authored-by: Reisen <Reisen@users.noreply.github.com>
2024-04-11 11:04:27 +09:00
Ali Behjati e1f9783062
chore(target_chains/ethereum): add Blast and Mode gas claim patches (#1440)
This change adds the change in our implementation that allowed support
for enabling gas claims on Blast and Mode as a reference.
2024-04-10 19:32:11 +02:00
Reisen ce4019b63f refactor(hermes): state->price_feed_metadata downcasting 2024-04-10 16:48:18 +01:00
guibescos d1c5d93c8e
feat(price_pusher): use retry send transaction logic in pusher (#1438)
* Checkpoint

* Nits

* Nits

* fix: type

* Bump pusher version
2024-04-10 16:24:37 +01:00
Jayant Krishnamurthy ee455f1196
[solidity sdk] Add zerolend AggregatorV3 adapter to SDK (#1437)
* add zerolend cl adapter to sdk

* bump versions
2024-04-10 06:56:42 -07:00
Pavel Strakhov 2c7dfa92dd feat(target_chains/starknet): create project 2024-04-10 11:25:52 +01:00
Reisen b4ed825cd6 refactor(hermes): state->benchmarks downcasting 2024-04-10 09:45:24 +01:00
Reisen 110c6dcea3 refactor(hermes): no need to Box futures 2024-04-10 09:22:43 +01:00
guibescos d627a49764
feat: add ending condition to proposer_server (#1430)
* Checkpoint

* Checkpoint

* Continue

* Revert

* Revert

* Revert

* Update proposer

* Clean

* Lint

* nit

* Refactor crank-executor

* Small refactor

* Go

* Go

* Move comment
2024-04-09 19:54:53 +01:00
guibescos 299dec1d79
fix: increase compute units (#1433) 2024-04-09 18:58:59 +01:00
Reisen 68a2ce1221 refactor(hermes): state->cache downcasting 2024-04-09 18:36:23 +01:00
guibescos 62d189e3b5
feat(solana_utils): support account lookup table (#1424)
* feat: support account lookup table

* remove console log

* Support ALT

* Commas

* feat: support lta

* Go

* Bump
2024-04-09 13:51:29 +01:00
guibescos bb830e1760
docs(solana_sdk): Improve docs (#1414)
* Checkpoint

* Checkpoint

* Checkpoint

* Checkpoint

* fix: pusher

* Checkpoint

* Works

* fix: pass pusher program id

* Add docs

* 0.1.0

* Bump npm package

* Go

* Comment

* Add customizable shard id

* Allow configurable priority fees

* Update readme

* Update text

* readme updates

* Readme

* More text

* More text

* Review

* Text

* readme

* add processed commitment

* fix comment

* whoops

* Set compute units to a more reasonable value

---------

Co-authored-by: Jayant Krishnamurthy <jayantkrishnamurthy@gmail.com>
2024-04-09 13:42:28 +01:00
guibescos 972a9a1e1d
perf: improve tx land rate (#1429)
* Checkpoint

* Checkpoint

* Continue

* Revert

* Revert

* Revert

* Update proposer

* Clean

* Lint
2024-04-09 11:24:45 +01:00
guibescos 0e885e3ca7
feature: export merkle price update (#1428)
* export MerklePriceUpdate

* CI

---------

Co-authored-by: Silviu Troscot <silviu.troscot10@gmail.com>
2024-04-08 19:44:02 +01:00
guibescos a632ee4bd2
fix(solana_pusher): forgot await (#1423)
* fix(solana_pusher): forgot await

* pr comments
2024-04-05 19:13:09 +01:00
guibescos 44cad44f44
feat(pusher): cleanup vaa accounts (#1422) 2024-04-05 16:45:02 +01:00
guibescos 8110e03ccb
Fix pusher dockerfile (#1420) 2024-04-05 12:24:08 +01:00
Ali Behjati 2398afefa7
chore(target_chains/cosmwasm): add rol_testnet network (#1419) 2024-04-04 19:05:39 +02:00
Dev Kalra 80b4dd96de
add support for priority fee (#1418) 2024-04-04 16:37:31 +05:30
guibescos ecf347909f
chore(solana): use instruction builders (#1415) 2024-04-04 11:11:31 +01:00
Jayant Krishnamurthy 5afb187f0d
whoops (#1416) 2024-04-04 10:09:50 +02:00
Dev Kalra 6295674efa
feat(fortuna-v2): implement a keeper service for entropy v2 (#1366)
* extract code to run api

* save

* some changes

* add exit checks

* retry for sub threads and fetch events

* handle events

* remove unused

* compiling

* add logs to keeper

* add simulation and some fixed

* refactoring keeper

* backlog refactoring works

* extract handle event

* extract watch blocks in a method

* handle events extracted

* remove res block from backlog method

* remove res block from watch_blocks

* remove res block from process events

* load private key from file

* add gas limit to blockchain config

* remove unused imports

* remove a log

* gas param u256

* spell simulate

* rename keeper private keeper file

* wait for only api to exit

* remove exit check from keeper

* remove is valid request method as simulate will cover things

* remove some parameters

* remove exit check from keeper

* use saturating sub

* correct condition

* update logging statement

* combine logs

* use nonce manager to send transaction

* poll instead of stream and add nonce middleware

* remove unused

* fix tests

* add ws support to streaming

* Refactor and improve error handling

* replace simulation with gas estimation

* add polling support for when no wss url

* version update

* test check

* update comment

* update key comment

* rename chain_config to chain_state

* update version

* pad gas estimate

* add comments

---------

Co-authored-by: Amin Moghaddam <amin@pyth.network>
2024-04-04 01:14:20 +05:30
guibescos 050b8275f7
feat(price_pusher): solana price pusher (#1408)
* Checkpoint

* Checkpoint

* Checkpoint

* Checkpoint

* fix: pusher

* Checkpoint

* Works

* fix: pass pusher program id

* Add docs

* 0.1.0

* Bump npm package

* Go

* Comment

* Add customizable shard id

* Allow configurable priority fees
2024-04-03 15:42:44 +01:00
Daniel Chew 450a483679
feat(xc-admin): add price feed council signers (#1413)
* add price feed council signers

* update signers.json

* update signers.json

* update signers.json
2024-04-03 15:48:10 +09:00
Aditya Arora c0c03945d0
Update price evm Oracle Example (#1411) 2024-04-02 14:42:46 -04:00
Aditya Arora c2fde0f6dc
chore-add-evm-linea-sepolia (#1410) 2024-04-02 10:51:34 -04:00
guibescos 866b6a5b4b
feat: pyth pull-based push oracle (#1370)
* feat: implement oracle instance

* Go

* Remove key

* Go

* Add instance id, fix conditional deser

* Go

* Rename

* Revert changes to cli

* Checkpoint

* Cleanup deps

* Refactor tests

* Cleanup deps

* Write test

* Fix comment

* Shard id

* ADd tests

* Extract common test utils

* Fix test

* Better name

* Cleanup

* Instance -> shard

* Update test

* Make shard id a u16
2024-04-01 14:43:47 +01:00
guibescos a888ba318c
chore: env variables for rpcs (#1407)
* chore: env variables for rpcs

* Remove console log
2024-03-29 17:51:56 +00:00
Aditya Arora 77db9ee53b
feat(target_chains/ethereum/sdk/solidity): add convertToUint method to the sdk (#1390)
* Moving convertToUint to utils

* pre-commit fix

* reversing OracleSwap example

* pre-commit]

* added test

* abi-gen

* Added solc to sdk

* resolved comments
2024-03-28 17:23:41 -04:00
guibescos 9328b73284
chore: publish xc-admin to ghcr (#1406)
* perf: use solana hash precompile

* chore: xc-admin push to ghcr

* Trigger CI

* Check

* Fix

* Fix boolean

* Fix env variable

* Go

* Push new worflow

* Cleanup

* Go:

* Add packages write
2024-03-28 18:24:52 +00:00
Pavel Strakhov cd543bcd6a refactor(target_chains/ethereum): remove duplicated tests 2024-03-28 14:50:01 +00:00
Pavel Strakhov f134c2d31c chore: bump pyth-sdk-solidity version to 3.0.0 2024-03-28 14:50:01 +00:00
Pavel Strakhov 7352256c63 refactor(target_chains/ethereum): remove price attestations from tests 2024-03-28 14:50:01 +00:00
Pavel Strakhov d23f6c0d11 refactor(target_chains/ethereum): rename tests and vars in GasBenchmark 2024-03-28 14:50:01 +00:00
Pavel Strakhov f4617b484a refactor(target_chains/ethereum): rename attestations to messages in PythTest 2024-03-28 14:50:01 +00:00
Pavel Strakhov 8843d0f875 refactor(target_chains/ethereum): clean up unused batch update code 2024-03-28 14:50:01 +00:00
Pavel Strakhov 06965d38cc refactor(target_chains/ethereum): move unused functions from Pyth to VerificationExperiments tests 2024-03-28 14:50:01 +00:00
Pavel Strakhov a2db288210 refactor(target_chains/ethereum): remove legacy batch updates support from parsePriceFeedUpdates and getUpdateFee 2024-03-28 14:50:01 +00:00
Pavel Strakhov 8a70ca769b refactor(target_chains/ethereum): remove legacy batch updates support from updatePriceFeeds 2024-03-28 14:50:01 +00:00
Pavel Strakhov 01f878cf5a refactor(target_chains/ethereum): remove truffle tests for batch updates 2024-03-28 14:50:01 +00:00
Jayant Krishnamurthy 0f7a9cc334
[contract_manager] Add logic for tracking fee denominations and dollar values (#1394)
* tokens

* progress

* progress

* progress

* infra for storing tokens and using them in fee calculations

* precommit

* cleanup

* cleanup

* fix
2024-03-28 06:26:04 -07:00
Dev Kalra 6fb5ab483d
fix(IEntropy): revealWithCallback comment (#1404)
* fix reveal comment

* update comment
2024-03-28 14:55:31 +05:30
435 changed files with 47563 additions and 14106 deletions

View File

@ -15,5 +15,4 @@
.git .git
hermes/wormhole !apps/hermes/src/state/cache.rs
!hermes/src/state/cache.rs

View File

@ -21,10 +21,10 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Download CLI - name: Download CLI
run: wget https://github.com/aptos-labs/aptos-core/releases/download/aptos-cli-v1.0.4/aptos-cli-1.0.4-Ubuntu-22.04-x86_64.zip run: wget https://github.com/aptos-labs/aptos-core/releases/download/aptos-cli-v3.1.0/aptos-cli-3.1.0-Ubuntu-22.04-x86_64.zip
- name: Unzip CLI - name: Unzip CLI
run: unzip aptos-cli-1.0.4-Ubuntu-22.04-x86_64.zip run: unzip aptos-cli-3.1.0-Ubuntu-22.04-x86_64.zip
- name: Run tests - name: Run tests
run: ./aptos move test run: ./aptos move test

View File

@ -2,10 +2,10 @@ name: Check Fortuna
on: on:
pull_request: pull_request:
paths: [fortuna/**] paths: [apps/fortuna/**]
push: push:
branches: [main] branches: [main]
paths: [fortuna/**] paths: [apps/fortuna/**]
jobs: jobs:
test: test:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -17,4 +17,4 @@ jobs:
toolchain: nightly-2023-07-23 toolchain: nightly-2023-07-23
override: true override: true
- name: Run executor tests - name: Run executor tests
run: cargo test --manifest-path ./fortuna/Cargo.toml run: cargo test --manifest-path ./apps/fortuna/Cargo.toml

35
.github/workflows/ci-fuel-contract.yml vendored Normal file
View File

@ -0,0 +1,35 @@
name: Test Fuel Contract
on:
pull_request:
paths:
- target_chains/fuel/**
push:
branches:
- main
paths:
- target_chains/fuel/**
env:
CARGO_TERM_COLOR: always
jobs:
build:
runs-on: ubuntu-latest
defaults:
run:
working-directory: target_chains/fuel/contracts/
steps:
- uses: actions/checkout@v2
- name: Install Fuel toolchain
run: |
curl https://install.fuel.network | sh
echo "$HOME/.fuelup/bin" >> $GITHUB_PATH
- name: Build with Forc
run: forc build --verbose
- name: Run tests with Forc
run: forc test --verbose
- name: Build
run: cargo build --verbose
- name: Run tests
run: cargo test --verbose

View File

@ -2,10 +2,10 @@ name: Check Hermes
on: on:
pull_request: pull_request:
paths: [hermes/**] paths: [apps/hermes/**]
push: push:
branches: [main] branches: [main]
paths: [hermes/**] paths: [apps/hermes/**]
jobs: jobs:
test: test:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -20,4 +20,4 @@ jobs:
- name: Install protoc - name: Install protoc
uses: arduino/setup-protoc@v3 uses: arduino/setup-protoc@v3
- name: Run executor tests - name: Run executor tests
run: cargo test --manifest-path ./hermes/Cargo.toml run: cargo test --manifest-path ./apps/hermes/Cargo.toml

View File

@ -0,0 +1,37 @@
name: Starknet contract
on:
pull_request:
paths:
- target_chains/starknet/contracts/**
push:
branches:
- main
paths:
- target_chains/starknet/contracts/**
jobs:
check:
name: Starknet Foundry tests
runs-on: ubuntu-latest
defaults:
run:
working-directory: target_chains/starknet/contracts/
steps:
- uses: actions/checkout@v3
- name: Install Scarb
uses: software-mansion/setup-scarb@v1
with:
tool-versions: target_chains/starknet/contracts/.tool-versions
- name: Install Starknet Foundry
uses: foundry-rs/setup-snfoundry@v3
with:
tool-versions: target_chains/starknet/contracts/.tool-versions
- name: Install Starkli
run: curl https://get.starkli.sh | sh && . ~/.config/.starkli/env && starkliup -v $(awk '/starkli/{print $2}' .tool-versions)
- name: Install Katana
run: curl -L https://install.dojoengine.org | bash && PATH="$PATH:$HOME/.config/.dojo/bin" dojoup -v $(awk '/dojo/{print $2}' .tool-versions)
- name: Check formatting
run: scarb fmt --check
- name: Run tests
run: snforge test
- name: Test local deployment script
run: bash -c 'PATH="$PATH:$HOME/.config/.dojo/bin" katana & . ~/.config/.starkli/env && deploy/local_deploy'

View File

@ -12,7 +12,7 @@ jobs:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- uses: actions/setup-node@v2 - uses: actions/setup-node@v2
with: with:
node-version: "16" node-version: "18"
registry-url: "https://registry.npmjs.org" registry-url: "https://registry.npmjs.org"
- run: npm ci - run: npm ci
- run: npx lerna run build --no-private - run: npx lerna run build --no-private

View File

@ -11,8 +11,14 @@ jobs:
steps: steps:
- name: Checkout sources - name: Checkout sources
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
default: true
profile: minimal
- run: cargo publish --token ${CARGO_REGISTRY_TOKEN} - run: cargo +stable-x86_64-unknown-linux-gnu publish --token ${CARGO_REGISTRY_TOKEN}
env: env:
CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
working-directory: "target_chains/solana/pyth_solana_receiver_sdk" working-directory: "target_chains/solana/pyth_solana_receiver_sdk"

View File

@ -46,7 +46,7 @@ jobs:
uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4 uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
with: with:
context: . context: .
file: "./fortuna/Dockerfile" file: "./apps/fortuna/Dockerfile"
push: true push: true
tags: ${{ steps.metadata_fortuna.outputs.tags }} tags: ${{ steps.metadata_fortuna.outputs.tags }}
labels: ${{ steps.metadata_fortuna.outputs.labels }} labels: ${{ steps.metadata_fortuna.outputs.labels }}

View File

@ -37,7 +37,7 @@ jobs:
env: env:
AWS_REGION: us-east-1 AWS_REGION: us-east-1
- run: | - run: |
DOCKER_BUILDKIT=1 docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -f hermes/Dockerfile . DOCKER_BUILDKIT=1 docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -f apps/hermes/Dockerfile .
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
env: env:
ECR_REGISTRY: public.ecr.aws ECR_REGISTRY: public.ecr.aws

View File

@ -40,7 +40,7 @@ jobs:
id: ecr_login id: ecr_login
- run: | - run: |
DOCKER_BUILDKIT=1 docker build -t lerna -f Dockerfile.lerna . DOCKER_BUILDKIT=1 docker build -t lerna -f Dockerfile.lerna .
DOCKER_BUILDKIT=1 docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -f price_pusher/Dockerfile . DOCKER_BUILDKIT=1 docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -f apps/price_pusher/Dockerfile .
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
env: env:
ECR_REGISTRY: public.ecr.aws ECR_REGISTRY: public.ecr.aws

View File

@ -6,8 +6,12 @@ on:
permissions: permissions:
contents: read contents: read
id-token: write id-token: write
packages: write
env:
REGISTRY: ghcr.io
IMAGE_NAME: pyth-network/xc-admin-frontend
jobs: jobs:
xc-admin-image: xc-admin-frontend-image:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
@ -16,23 +20,17 @@ jobs:
SHORT_HASH=$(echo ${{ github.sha }} | cut -c1-7) SHORT_HASH=$(echo ${{ github.sha }} | cut -c1-7)
TIMESTAMP=$(date +%s) TIMESTAMP=$(date +%s)
echo "IMAGE_TAG=${TIMESTAMP}-${SHORT_HASH}" >> "${GITHUB_ENV}" echo "IMAGE_TAG=${TIMESTAMP}-${SHORT_HASH}" >> "${GITHUB_ENV}"
- uses: aws-actions/configure-aws-credentials@8a84b07f2009032ade05a88a28750d733cc30db1 - name: Log in to the Container registry
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
with: with:
role-to-assume: arn:aws:iam::192824654885:role/github-actions-ecr registry: ${{ env.REGISTRY }}
aws-region: eu-west-2 username: ${{ github.actor }}
- uses: aws-actions/amazon-ecr-login@v1 password: ${{ secrets.GITHUB_TOKEN }}
id: ecr_login
- name: Build docker image - name: Build docker image
run: | run: |
DOCKER_BUILDKIT=1 docker build -t lerna -f Dockerfile.lerna . DOCKER_BUILDKIT=1 docker build -t lerna -f Dockerfile.lerna .
DOCKER_BUILDKIT=1 docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -f governance/xc_admin/packages/xc_admin_frontend/Dockerfile . DOCKER_BUILDKIT=1 docker build -t ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.IMAGE_TAG }} -f governance/xc_admin/packages/xc_admin_frontend/Dockerfile .
env:
ECR_REGISTRY: ${{ steps.ecr_login.outputs.registry }}
ECR_REPOSITORY: xc-admin-frontend
- name: Push docker image - name: Push docker image
if: github.ref == 'refs/heads/main' if: github.ref == 'refs/heads/main'
run: | run: |
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG docker push ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.IMAGE_TAG }}
env:
ECR_REGISTRY: ${{ steps.ecr_login.outputs.registry }}
ECR_REPOSITORY: xc-admin-frontend

View File

@ -6,6 +6,10 @@ on:
permissions: permissions:
contents: read contents: read
id-token: write id-token: write
packages: write
env:
REGISTRY: ghcr.io
IMAGE_NAME: pyth-network/xc-admin
jobs: jobs:
xc-admin-image: xc-admin-image:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -17,16 +21,16 @@ jobs:
PREFIX="refs/tags/xc-admin-" PREFIX="refs/tags/xc-admin-"
VERSION="${GITHUB_REF:${#PREFIX}}" VERSION="${GITHUB_REF:${#PREFIX}}"
echo "IMAGE_TAG=${VERSION}" >> "${GITHUB_ENV}" echo "IMAGE_TAG=${VERSION}" >> "${GITHUB_ENV}"
- uses: aws-actions/configure-aws-credentials@8a84b07f2009032ade05a88a28750d733cc30db1 - name: Log in to the Container registry
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
with: with:
role-to-assume: arn:aws:iam::192824654885:role/github-actions-ecr registry: ${{ env.REGISTRY }}
aws-region: eu-west-2 username: ${{ github.actor }}
- uses: aws-actions/amazon-ecr-login@v1 password: ${{ secrets.GITHUB_TOKEN }}
id: ecr_login - name: Build docker image
- run: | run: |
DOCKER_BUILDKIT=1 docker build -t lerna -f Dockerfile.lerna . DOCKER_BUILDKIT=1 docker build -t lerna -f Dockerfile.lerna .
DOCKER_BUILDKIT=1 docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -f governance/xc_admin/Dockerfile . DOCKER_BUILDKIT=1 docker build -t ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.IMAGE_TAG }} -f governance/xc_admin/Dockerfile .
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG - name: Push docker image
env: run: |
ECR_REGISTRY: ${{ steps.ecr_login.outputs.registry }} docker push ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.IMAGE_TAG }}
ECR_REPOSITORY: xc-admin

View File

@ -5,12 +5,17 @@ on:
tags: tags:
- "python-v*" - "python-v*"
env:
PYTHON_VERSION: "3.11"
jobs: jobs:
deploy: deploy:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- uses: actions/setup-python@v2 - uses: actions/setup-python@v2
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install dependencies - name: Install dependencies
run: | run: |
python3 -m pip install --upgrade poetry python3 -m pip install --upgrade poetry

1
.npmrc Normal file
View File

@ -0,0 +1 @@
engine-strict=true

View File

@ -47,22 +47,22 @@ repos:
- id: cargo-fmt-hermes - id: cargo-fmt-hermes
name: Cargo format for Hermes name: Cargo format for Hermes
language: "rust" language: "rust"
entry: cargo +nightly-2024-03-26 fmt --manifest-path ./hermes/Cargo.toml --all -- --config-path rustfmt.toml entry: cargo +nightly-2024-03-26 fmt --manifest-path ./apps/hermes/Cargo.toml --all -- --config-path rustfmt.toml
pass_filenames: false pass_filenames: false
files: hermes files: apps/hermes
- id: cargo-clippy-hermes - id: cargo-clippy-hermes
name: Cargo clippy for Hermes name: Cargo clippy for Hermes
language: "rust" language: "rust"
entry: cargo +nightly-2024-03-26 clippy --manifest-path ./hermes/Cargo.toml --tests --fix --allow-dirty --allow-staged -- -D warnings entry: cargo +nightly-2024-03-26 clippy --manifest-path ./apps/hermes/Cargo.toml --tests --fix --allow-dirty --allow-staged -- -D warnings
pass_filenames: false pass_filenames: false
files: hermes files: apps/hermes
# Hooks for Fortuna # Hooks for Fortuna
- id: cargo-fmt-fortuna - id: cargo-fmt-fortuna
name: Cargo format for Fortuna name: Cargo format for Fortuna
language: "rust" language: "rust"
entry: cargo +nightly-2023-07-23 fmt --manifest-path ./fortuna/Cargo.toml --all -- --config-path rustfmt.toml entry: cargo +nightly-2023-07-23 fmt --manifest-path ./apps/fortuna/Cargo.toml --all -- --config-path rustfmt.toml
pass_filenames: false pass_filenames: false
files: fortuna files: apps/fortuna
# Hooks for message buffer contract # Hooks for message buffer contract
- id: cargo-fmt-message-buffer - id: cargo-fmt-message-buffer
name: Cargo format for message buffer contract name: Cargo format for message buffer contract
@ -80,13 +80,13 @@ repos:
- id: cargo-fmt-pythnet-sdk - id: cargo-fmt-pythnet-sdk
name: Cargo format for pythnet SDK name: Cargo format for pythnet SDK
language: "rust" language: "rust"
entry: cargo +nightly-2023-07-23 fmt --manifest-path ./pythnet/pythnet_sdk/Cargo.toml --all -- --config-path rustfmt.toml entry: cargo +nightly-2024-03-26 fmt --manifest-path ./pythnet/pythnet_sdk/Cargo.toml --all -- --config-path rustfmt.toml
pass_filenames: false pass_filenames: false
files: pythnet/pythnet_sdk files: pythnet/pythnet_sdk
- id: cargo-clippy-pythnet-sdk - id: cargo-clippy-pythnet-sdk
name: Cargo clippy for pythnet SDK name: Cargo clippy for pythnet SDK
language: "rust" language: "rust"
entry: cargo +nightly-2023-07-23 clippy --manifest-path ./pythnet/pythnet_sdk/Cargo.toml --tests --fix --allow-dirty --allow-staged -- -D warnings entry: cargo +nightly-2024-03-26 clippy --manifest-path ./pythnet/pythnet_sdk/Cargo.toml --tests --fix --allow-dirty --allow-staged -- -D warnings
pass_filenames: false pass_filenames: false
files: pythnet/pythnet_sdk files: pythnet/pythnet_sdk
# Hooks for solana receiver contract # Hooks for solana receiver contract

View File

@ -16,7 +16,7 @@ contracts, SDKs, and examples.
## Hermes ## Hermes
> [hermes](./hermes/) > [hermes](./apps/hermes/)
Hermes is an off-chain service which constantly observes Pythnet and the Hermes is an off-chain service which constantly observes Pythnet and the
Wormhole network watching for price updates emitted from the Pyth contract. It Wormhole network watching for price updates emitted from the Pyth contract. It
@ -79,10 +79,11 @@ Lerna has some common failure modes that you may encounter:
1. `npm ci` fails with a typescript compilation error about a missing package. 1. `npm ci` fails with a typescript compilation error about a missing package.
This error likely means that the failing package has a `prepare` entry compiling the typescript in its `package.json`. This error likely means that the failing package has a `prepare` entry compiling the typescript in its `package.json`.
Fix this error by moving that logic to the `prepublishOnly` entry. Fix this error by moving that logic to the `prepublishOnly` entry.
1. The software builds locally but fails in CI, or vice-versa. 2. The software builds locally but fails in CI, or vice-versa.
This error likely means that some local build caches need to be cleaned. This error likely means that some local build caches need to be cleaned.
The build error may not indicate that this is a caching issue, e.g., it may appear that the packages are being built in the wrong order. The build error may not indicate that this is a caching issue, e.g., it may appear that the packages are being built in the wrong order.
Delete `node_modules/`, `lib/` and `tsconfig.tsbuildinfo` from each package's subdirectory. then try again. Delete `node_modules/`, `lib/` and `tsconfig.tsbuildinfo` from each package's subdirectory. then try again.
3. `npm ci` fails due to wrong node version. Make sure to be using `v18`. Node version `v21` is not supported and known to cause issues.
## Audit / Feature Status ## Audit / Feature Status

View File

@ -1,4 +1,4 @@
/target /target
config.yaml *config.yaml
*secret* *secret*
*private-key* *private-key*

View File

@ -522,9 +522,9 @@ dependencies = [
[[package]] [[package]]
name = "cargo_metadata" name = "cargo_metadata"
version = "0.17.0" version = "0.18.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e7daec1a2a2129eeba1644b220b4647ec537b0b5d4bfd6876fcc5a540056b592" checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037"
dependencies = [ dependencies = [
"camino", "camino",
"cargo-platform", "cargo-platform",
@ -1031,9 +1031,9 @@ dependencies = [
[[package]] [[package]]
name = "enr" name = "enr"
version = "0.9.1" version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fe81b5c06ecfdbc71dd845216f225f53b62a10cb8a16c946836a3467f701d05b" checksum = "2a3d8dc56e02f954cac8eb489772c552c473346fc34f67412bb6244fd647f7e4"
dependencies = [ dependencies = [
"base64 0.21.4", "base64 0.21.4",
"bytes", "bytes",
@ -1146,9 +1146,9 @@ dependencies = [
[[package]] [[package]]
name = "ethers" name = "ethers"
version = "2.0.10" version = "2.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1ad13497f6e0a24292fc7b408e30d22fe9dc262da1f40d7b542c3a44e7fc0476" checksum = "816841ea989f0c69e459af1cf23a6b0033b19a55424a1ea3a30099becdb8dec0"
dependencies = [ dependencies = [
"ethers-addressbook", "ethers-addressbook",
"ethers-contract", "ethers-contract",
@ -1162,9 +1162,9 @@ dependencies = [
[[package]] [[package]]
name = "ethers-addressbook" name = "ethers-addressbook"
version = "2.0.10" version = "2.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c6e9e8acd0ed348403cc73a670c24daba3226c40b98dc1a41903766b3ab6240a" checksum = "5495afd16b4faa556c3bba1f21b98b4983e53c1755022377051a975c3b021759"
dependencies = [ dependencies = [
"ethers-core", "ethers-core",
"once_cell", "once_cell",
@ -1174,9 +1174,9 @@ dependencies = [
[[package]] [[package]]
name = "ethers-contract" name = "ethers-contract"
version = "2.0.10" version = "2.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d79269278125006bb0552349c03593ffa9702112ca88bc7046cc669f148fb47c" checksum = "6fceafa3578c836eeb874af87abacfb041f92b4da0a78a5edd042564b8ecdaaa"
dependencies = [ dependencies = [
"const-hex", "const-hex",
"ethers-contract-abigen", "ethers-contract-abigen",
@ -1193,9 +1193,9 @@ dependencies = [
[[package]] [[package]]
name = "ethers-contract-abigen" name = "ethers-contract-abigen"
version = "2.0.10" version = "2.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce95a43c939b2e4e2f3191c5ad4a1f279780b8a39139c9905b43a7433531e2ab" checksum = "04ba01fbc2331a38c429eb95d4a570166781f14290ef9fdb144278a90b5a739b"
dependencies = [ dependencies = [
"Inflector", "Inflector",
"const-hex", "const-hex",
@ -1211,15 +1211,15 @@ dependencies = [
"serde", "serde",
"serde_json", "serde_json",
"syn 2.0.38", "syn 2.0.38",
"toml 0.7.8", "toml 0.8.12",
"walkdir", "walkdir",
] ]
[[package]] [[package]]
name = "ethers-contract-derive" name = "ethers-contract-derive"
version = "2.0.10" version = "2.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e9ce44906fc871b3ee8c69a695ca7ec7f70e50cb379c9b9cb5e532269e492f6" checksum = "87689dcabc0051cde10caaade298f9e9093d65f6125c14575db3fd8c669a168f"
dependencies = [ dependencies = [
"Inflector", "Inflector",
"const-hex", "const-hex",
@ -1233,9 +1233,9 @@ dependencies = [
[[package]] [[package]]
name = "ethers-core" name = "ethers-core"
version = "2.0.10" version = "2.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c0a17f0708692024db9956b31d7a20163607d2745953f5ae8125ab368ba280ad" checksum = "82d80cc6ad30b14a48ab786523af33b37f28a8623fc06afd55324816ef18fb1f"
dependencies = [ dependencies = [
"arrayvec", "arrayvec",
"bytes", "bytes",
@ -1253,7 +1253,7 @@ dependencies = [
"rlp", "rlp",
"serde", "serde",
"serde_json", "serde_json",
"strum 0.25.0", "strum 0.26.2",
"syn 2.0.38", "syn 2.0.38",
"tempfile", "tempfile",
"thiserror", "thiserror",
@ -1263,10 +1263,11 @@ dependencies = [
[[package]] [[package]]
name = "ethers-etherscan" name = "ethers-etherscan"
version = "2.0.10" version = "2.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e53451ea4a8128fbce33966da71132cf9e1040dcfd2a2084fd7733ada7b2045" checksum = "e79e5973c26d4baf0ce55520bd732314328cabe53193286671b47144145b9649"
dependencies = [ dependencies = [
"chrono",
"ethers-core", "ethers-core",
"reqwest", "reqwest",
"semver", "semver",
@ -1278,9 +1279,9 @@ dependencies = [
[[package]] [[package]]
name = "ethers-middleware" name = "ethers-middleware"
version = "2.0.10" version = "2.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "473f1ccd0c793871bbc248729fa8df7e6d2981d6226e4343e3bbaa9281074d5d" checksum = "48f9fdf09aec667c099909d91908d5eaf9be1bd0e2500ba4172c1d28bfaa43de"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"auto_impl", "auto_impl",
@ -1305,9 +1306,9 @@ dependencies = [
[[package]] [[package]]
name = "ethers-providers" name = "ethers-providers"
version = "2.0.10" version = "2.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6838fa110e57d572336178b7c79e94ff88ef976306852d8cb87d9e5b1fc7c0b5" checksum = "6434c9a33891f1effc9c75472e12666db2fa5a0fec4b29af6221680a6fe83ab2"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"auto_impl", "auto_impl",
@ -1316,6 +1317,7 @@ dependencies = [
"const-hex", "const-hex",
"enr", "enr",
"ethers-core", "ethers-core",
"futures-channel",
"futures-core", "futures-core",
"futures-timer", "futures-timer",
"futures-util", "futures-util",
@ -1342,9 +1344,9 @@ dependencies = [
[[package]] [[package]]
name = "ethers-signers" name = "ethers-signers"
version = "2.0.10" version = "2.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ea44bec930f12292866166f9ddbea6aa76304850e4d8dcd66dc492b43d00ff1" checksum = "228875491c782ad851773b652dd8ecac62cda8571d3bc32a5853644dd26766c2"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"coins-bip32", "coins-bip32",
@ -1361,9 +1363,9 @@ dependencies = [
[[package]] [[package]]
name = "ethers-solc" name = "ethers-solc"
version = "2.0.10" version = "2.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de34e484e7ae3cab99fbfd013d6c5dc7f9013676a4e0e414d8b12e1213e8b3ba" checksum = "66244a771d9163282646dbeffe0e6eca4dda4146b6498644e678ac6089b11edd"
dependencies = [ dependencies = [
"cfg-if", "cfg-if",
"const-hex", "const-hex",
@ -1486,7 +1488,7 @@ dependencies = [
[[package]] [[package]]
name = "fortuna" name = "fortuna"
version = "3.3.4" version = "5.2.2"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"axum", "axum",
@ -1498,6 +1500,7 @@ dependencies = [
"clap", "clap",
"ethabi", "ethabi",
"ethers", "ethers",
"futures",
"hex", "hex",
"lazy_static", "lazy_static",
"once_cell", "once_cell",
@ -2758,7 +2761,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919"
dependencies = [ dependencies = [
"once_cell", "once_cell",
"toml_edit", "toml_edit 0.19.15",
] ]
[[package]] [[package]]
@ -2819,7 +2822,7 @@ dependencies = [
[[package]] [[package]]
name = "pythnet-sdk" name = "pythnet-sdk"
version = "2.0.0" version = "2.1.0"
dependencies = [ dependencies = [
"bincode", "bincode",
"borsh", "borsh",
@ -3387,9 +3390,9 @@ dependencies = [
[[package]] [[package]]
name = "serde_spanned" name = "serde_spanned"
version = "0.6.3" version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1"
dependencies = [ dependencies = [
"serde", "serde",
] ]
@ -3581,9 +3584,9 @@ dependencies = [
[[package]] [[package]]
name = "solang-parser" name = "solang-parser"
version = "0.3.2" version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7cb9fa2fa2fa6837be8a2495486ff92e3ffe68a99b6eeba288e139efdd842457" checksum = "c425ce1c59f4b154717592f0bdf4715c3a1d55058883622d3157e1f0908a5b26"
dependencies = [ dependencies = [
"itertools 0.11.0", "itertools 0.11.0",
"lalrpop", "lalrpop",
@ -3645,11 +3648,11 @@ dependencies = [
[[package]] [[package]]
name = "strum" name = "strum"
version = "0.25.0" version = "0.26.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29"
dependencies = [ dependencies = [
"strum_macros 0.25.2", "strum_macros 0.26.2",
] ]
[[package]] [[package]]
@ -3667,9 +3670,9 @@ dependencies = [
[[package]] [[package]]
name = "strum_macros" name = "strum_macros"
version = "0.25.2" version = "0.26.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ad8d03b598d3d0fff69bf533ee3ef19b8eeb342729596df84bcc7e1f96ec4059" checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946"
dependencies = [ dependencies = [
"heck", "heck",
"proc-macro2", "proc-macro2",
@ -3955,21 +3958,21 @@ dependencies = [
[[package]] [[package]]
name = "toml" name = "toml"
version = "0.7.8" version = "0.8.12"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257" checksum = "e9dd1545e8208b4a5af1aa9bbd0b4cf7e9ea08fabc5d0a5c67fcaafa17433aa3"
dependencies = [ dependencies = [
"serde", "serde",
"serde_spanned", "serde_spanned",
"toml_datetime", "toml_datetime",
"toml_edit", "toml_edit 0.22.9",
] ]
[[package]] [[package]]
name = "toml_datetime" name = "toml_datetime"
version = "0.6.3" version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1"
dependencies = [ dependencies = [
"serde", "serde",
] ]
@ -3979,12 +3982,23 @@ name = "toml_edit"
version = "0.19.15" version = "0.19.15"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421"
dependencies = [
"indexmap 2.0.2",
"toml_datetime",
"winnow 0.5.16",
]
[[package]]
name = "toml_edit"
version = "0.22.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e40bb779c5187258fd7aad0eb68cb8706a0a81fa712fbea808ab43c4b8374c4"
dependencies = [ dependencies = [
"indexmap 2.0.2", "indexmap 2.0.2",
"serde", "serde",
"serde_spanned", "serde_spanned",
"toml_datetime", "toml_datetime",
"winnow", "winnow 0.6.5",
] ]
[[package]] [[package]]
@ -4512,6 +4526,15 @@ dependencies = [
"memchr", "memchr",
] ]
[[package]]
name = "winnow"
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dffa400e67ed5a4dd237983829e66475f0a4a26938c4b04c21baede6262215b8"
dependencies = [
"memchr",
]
[[package]] [[package]]
name = "winreg" name = "winreg"
version = "0.50.0" version = "0.50.0"

View File

@ -1,6 +1,6 @@
[package] [package]
name = "fortuna" name = "fortuna"
version = "3.3.4" version = "5.2.2"
edition = "2021" edition = "2021"
[dependencies] [dependencies]
@ -12,10 +12,11 @@ bincode = "1.3.3"
byteorder = "1.5.0" byteorder = "1.5.0"
clap = { version = "4.4.6", features = ["derive", "cargo", "env"] } clap = { version = "4.4.6", features = ["derive", "cargo", "env"] }
ethabi = "18.0.0" ethabi = "18.0.0"
ethers = "2.0.10" ethers = { version = "2.0.14", features = ["ws"] }
futures = { version = "0.3.28" }
hex = "0.4.3" hex = "0.4.3"
prometheus-client = { version = "0.21.2" } prometheus-client = { version = "0.21.2" }
pythnet-sdk = { path = "../pythnet/pythnet_sdk", features = ["strum"] } pythnet-sdk = { path = "../../pythnet/pythnet_sdk", features = ["strum"] }
rand = "0.8.5" rand = "0.8.5"
reqwest = { version = "0.11.22", features = ["json", "blocking"] } reqwest = { version = "0.11.22", features = ["json", "blocking"] }
serde = { version = "1.0.188", features = ["derive"] } serde = { version = "1.0.188", features = ["derive"] }
@ -34,5 +35,6 @@ once_cell = "1.18.0"
lazy_static = "1.4.0" lazy_static = "1.4.0"
url = "2.5.0" url = "2.5.0"
[dev-dependencies] [dev-dependencies]
axum-test = "13.1.1" axum-test = "13.1.1"

View File

@ -7,15 +7,15 @@ RUN rustup default nightly-2023-07-23
# Build # Build
WORKDIR /src WORKDIR /src
COPY fortuna fortuna COPY apps/fortuna apps/fortuna
COPY pythnet pythnet COPY pythnet pythnet
COPY target_chains/ethereum/entropy_sdk/solidity/abis target_chains/ethereum/entropy_sdk/solidity/abis COPY target_chains/ethereum/entropy_sdk/solidity/abis target_chains/ethereum/entropy_sdk/solidity/abis
WORKDIR /src/fortuna WORKDIR /src/apps/fortuna
RUN --mount=type=cache,target=/root/.cargo/registry cargo build --release RUN --mount=type=cache,target=/root/.cargo/registry cargo build --release
FROM rust:${RUST_VERSION} FROM rust:${RUST_VERSION}
# Copy artifacts from other images # Copy artifacts from other images
COPY --from=build /src/fortuna/target/release/fortuna /usr/local/bin/ COPY --from=build /src/apps/fortuna/target/release/fortuna /usr/local/bin/

View File

@ -4,3 +4,4 @@ chains:
contract_addr: 0x8250f4aF4B972684F7b336503E2D6dFeDeB1487a contract_addr: 0x8250f4aF4B972684F7b336503E2D6dFeDeB1487a
reveal_delay_blocks: 0 reveal_delay_blocks: 0
legacy_tx: true legacy_tx: true
gas_limit: 500000

View File

@ -0,0 +1,7 @@
chains:
lightlink-pegasus:
commitments:
# prettier-ignore
- seed: [219,125,217,197,234,88,208,120,21,181,172,143,239,102,41,233,167,212,237,106,37,255,184,165,238,121,230,155,116,158,173,48]
chain_length: 10000
original_commitment_sequence_number: 104

View File

@ -0,0 +1 @@
nightly-2023-07-23

View File

@ -73,6 +73,8 @@ impl ApiState {
/// The state of the randomness service for a single blockchain. /// The state of the randomness service for a single blockchain.
#[derive(Clone)] #[derive(Clone)]
pub struct BlockchainState { pub struct BlockchainState {
/// The chain id for this blockchain, useful for logging
pub id: ChainId,
/// The hash chain(s) required to serve random numbers for this blockchain /// The hash chain(s) required to serve random numbers for this blockchain
pub state: Arc<HashChainState>, pub state: Arc<HashChainState>,
/// The contract that the server is fulfilling requests for. /// The contract that the server is fulfilling requests for.
@ -245,6 +247,7 @@ mod test {
let eth_read = Arc::new(MockEntropyReader::with_requests(10, &[])); let eth_read = Arc::new(MockEntropyReader::with_requests(10, &[]));
let eth_state = BlockchainState { let eth_state = BlockchainState {
id: "ethereum".into(),
state: ETH_CHAIN.clone(), state: ETH_CHAIN.clone(),
contract: eth_read.clone(), contract: eth_read.clone(),
provider_address: PROVIDER, provider_address: PROVIDER,
@ -255,6 +258,7 @@ mod test {
let avax_read = Arc::new(MockEntropyReader::with_requests(10, &[])); let avax_read = Arc::new(MockEntropyReader::with_requests(10, &[]));
let avax_state = BlockchainState { let avax_state = BlockchainState {
id: "avalanche".into(),
state: AVAX_CHAIN.clone(), state: AVAX_CHAIN.clone(),
contract: avax_read.clone(), contract: avax_read.clone(),
provider_address: PROVIDER, provider_address: PROVIDER,

View File

@ -5,6 +5,7 @@ use {
BlockNumber, BlockNumber,
BlockStatus, BlockStatus,
EntropyReader, EntropyReader,
RequestedWithCallbackEvent,
}, },
config::EthereumConfig, config::EthereumConfig,
}, },
@ -18,6 +19,7 @@ use {
abi::RawLog, abi::RawLog,
contract::{ contract::{
abigen, abigen,
ContractError,
EthLogDecode, EthLogDecode,
}, },
core::types::Address, core::types::Address,
@ -27,6 +29,7 @@ use {
TransformerError, TransformerError,
TransformerMiddleware, TransformerMiddleware,
}, },
NonceManagerMiddleware,
SignerMiddleware, SignerMiddleware,
}, },
prelude::TransactionRequest, prelude::TransactionRequest,
@ -42,6 +45,7 @@ use {
types::{ types::{
transaction::eip2718::TypedTransaction, transaction::eip2718::TypedTransaction,
BlockNumber as EthersBlockNumber, BlockNumber as EthersBlockNumber,
U256,
}, },
}, },
sha3::{ sha3::{
@ -55,11 +59,14 @@ use {
// contract in the same repo. // contract in the same repo.
abigen!( abigen!(
PythRandom, PythRandom,
"../target_chains/ethereum/entropy_sdk/solidity/abis/IEntropy.json" "../../target_chains/ethereum/entropy_sdk/solidity/abis/IEntropy.json"
); );
pub type SignablePythContract = PythRandom< pub type SignablePythContract = PythRandom<
TransformerMiddleware<SignerMiddleware<Provider<Http>, LocalWallet>, LegacyTxTransformer>, TransformerMiddleware<
NonceManagerMiddleware<SignerMiddleware<Provider<Http>, LocalWallet>>,
LegacyTxTransformer,
>,
>; >;
pub type PythContract = PythRandom<Provider<Http>>; pub type PythContract = PythRandom<Provider<Http>>;
@ -97,10 +104,12 @@ impl SignablePythContract {
.parse::<LocalWallet>()? .parse::<LocalWallet>()?
.with_chain_id(chain_id.as_u64()); .with_chain_id(chain_id.as_u64());
let address = wallet__.address();
Ok(PythRandom::new( Ok(PythRandom::new(
chain_config.contract_addr, chain_config.contract_addr,
Arc::new(TransformerMiddleware::new( Arc::new(TransformerMiddleware::new(
SignerMiddleware::new(provider, wallet__), NonceManagerMiddleware::new(SignerMiddleware::new(provider, wallet__), address),
transformer, transformer,
)), )),
)) ))
@ -225,4 +234,57 @@ impl EntropyReader for PythContract {
.ok_or_else(|| Error::msg("pending confirmation"))? .ok_or_else(|| Error::msg("pending confirmation"))?
.as_u64()) .as_u64())
} }
async fn get_request_with_callback_events(
&self,
from_block: BlockNumber,
to_block: BlockNumber,
) -> Result<Vec<RequestedWithCallbackEvent>> {
let mut event = self.requested_with_callback_filter();
event.filter = event.filter.from_block(from_block).to_block(to_block);
let res: Vec<RequestedWithCallbackFilter> = event.query().await?;
Ok(res
.iter()
.map(|r| RequestedWithCallbackEvent {
sequence_number: r.sequence_number,
user_random_number: r.user_random_number,
provider_address: r.request.provider,
})
.collect())
}
async fn estimate_reveal_with_callback_gas(
&self,
provider: Address,
sequence_number: u64,
user_random_number: [u8; 32],
provider_revelation: [u8; 32],
) -> Result<Option<U256>> {
let result: Result<U256, ContractError<Provider<Http>>> = self
.reveal_with_callback(
provider,
sequence_number,
user_random_number,
provider_revelation,
)
.estimate_gas()
.await;
match result {
Ok(gas) => Ok(Some(gas)),
Err(e) => match e {
ContractError::ProviderError { e } => Err(anyhow!(e)),
_ => {
tracing::info!(
sequence_number = sequence_number,
"Gas estimation failed. error: {:?}",
e
);
Ok(None)
}
},
}
}
} }

View File

@ -4,6 +4,7 @@ use {
ethers::types::{ ethers::types::{
Address, Address,
BlockNumber as EthersBlockNumber, BlockNumber as EthersBlockNumber,
U256,
}, },
}; };
@ -32,6 +33,13 @@ impl Into<EthersBlockNumber> for BlockStatus {
} }
} }
#[derive(Clone)]
pub struct RequestedWithCallbackEvent {
pub sequence_number: u64,
pub user_random_number: [u8; 32],
pub provider_address: Address,
}
/// EntropyReader is the read-only interface of the Entropy contract. /// EntropyReader is the read-only interface of the Entropy contract.
#[async_trait] #[async_trait]
pub trait EntropyReader: Send + Sync { pub trait EntropyReader: Send + Sync {
@ -42,6 +50,22 @@ pub trait EntropyReader: Send + Sync {
-> Result<Option<Request>>; -> Result<Option<Request>>;
async fn get_block_number(&self, confirmed_block_status: BlockStatus) -> Result<BlockNumber>; async fn get_block_number(&self, confirmed_block_status: BlockStatus) -> Result<BlockNumber>;
async fn get_request_with_callback_events(
&self,
from_block: BlockNumber,
to_block: BlockNumber,
) -> Result<Vec<RequestedWithCallbackEvent>>;
/// Simulate a reveal with callback. Returns Some(gas) if the estimation was successful.
/// Returns None otherwise. Returns an error if the gas could not be estimated.
async fn estimate_reveal_with_callback_gas(
&self,
provider: Address,
sequence_number: u64,
user_random_number: [u8; 32],
provider_revelation: [u8; 32],
) -> Result<Option<U256>>;
} }
/// An in-flight request stored in the contract. /// An in-flight request stored in the contract.
@ -68,7 +92,10 @@ pub mod mock {
}, },
anyhow::Result, anyhow::Result,
axum::async_trait, axum::async_trait,
ethers::types::Address, ethers::types::{
Address,
U256,
},
std::sync::RwLock, std::sync::RwLock,
}; };
@ -147,5 +174,23 @@ pub mod mock {
) -> Result<BlockNumber> { ) -> Result<BlockNumber> {
Ok(*self.block_number.read().unwrap()) Ok(*self.block_number.read().unwrap())
} }
async fn get_request_with_callback_events(
&self,
_from_block: BlockNumber,
_to_block: BlockNumber,
) -> Result<Vec<super::RequestedWithCallbackEvent>> {
Ok(vec![])
}
async fn estimate_reveal_with_callback_gas(
&self,
provider: Address,
sequence_number: u64,
user_random_number: [u8; 32],
provider_revelation: [u8; 32],
) -> Result<Option<U256>> {
Ok(Some(U256::from(5)))
}
} }
} }

View File

@ -0,0 +1,228 @@
use {
crate::{
api::{
self,
BlockchainState,
ChainId,
},
chain::ethereum::PythContract,
command::register_provider::CommitmentMetadata,
config::{
Commitment,
Config,
ProviderConfig,
RunOptions,
},
keeper,
state::{
HashChainState,
PebbleHashChain,
},
},
anyhow::{
anyhow,
Error,
Result,
},
axum::Router,
std::{
collections::HashMap,
net::SocketAddr,
sync::Arc,
},
tokio::{
spawn,
sync::watch,
},
tower_http::cors::CorsLayer,
utoipa::OpenApi,
utoipa_swagger_ui::SwaggerUi,
};
pub async fn run_api(
socket_addr: SocketAddr,
chains: HashMap<String, api::BlockchainState>,
mut rx_exit: watch::Receiver<bool>,
) -> Result<()> {
#[derive(OpenApi)]
#[openapi(
paths(
crate::api::revelation,
crate::api::chain_ids,
),
components(
schemas(
crate::api::GetRandomValueResponse,
crate::api::Blob,
crate::api::BinaryEncoding,
)
),
tags(
(name = "fortuna", description = "Random number service for the Pyth Entropy protocol")
)
)]
struct ApiDoc;
let metrics_registry = api::Metrics::new();
let api_state = api::ApiState {
chains: Arc::new(chains),
metrics: Arc::new(metrics_registry),
};
// Initialize Axum Router. Note the type here is a `Router<State>` due to the use of the
// `with_state` method which replaces `Body` with `State` in the type signature.
let app = Router::new();
let app = app
.merge(SwaggerUi::new("/docs").url("/docs/openapi.json", ApiDoc::openapi()))
.merge(api::routes(api_state))
// Permissive CORS layer to allow all origins
.layer(CorsLayer::permissive());
tracing::info!("Starting server on: {:?}", &socket_addr);
// Binds the axum's server to the configured address and port. This is a blocking call and will
// not return until the server is shutdown.
axum::Server::try_bind(&socket_addr)?
.serve(app.into_make_service())
.with_graceful_shutdown(async {
// It can return an error or an Ok(()). In both cases, we would shut down.
// As Ok(()) means, exit signal (ctrl + c) was received.
// And Err(e) means, the sender was dropped which should not be the case.
let _ = rx_exit.changed().await;
tracing::info!("Shutting down RPC server...");
})
.await?;
Ok(())
}
pub async fn run_keeper(
chains: HashMap<String, api::BlockchainState>,
config: Config,
private_key: String,
) -> Result<()> {
let mut handles = Vec::new();
for (chain_id, chain_config) in chains {
let chain_eth_config = config
.chains
.get(&chain_id)
.expect("All chains should be present in the config file")
.clone();
let private_key = private_key.clone();
handles.push(spawn(keeper::run_keeper_threads(
private_key,
chain_eth_config,
chain_config.clone(),
)));
}
Ok(())
}
pub async fn run(opts: &RunOptions) -> Result<()> {
let config = Config::load(&opts.config.config)?;
let provider_config = opts
.provider_config
.provider_config
.as_ref()
.map(|path| ProviderConfig::load(&path).expect("Failed to load provider config"));
let secret = opts.randomness.load_secret()?;
let (tx_exit, rx_exit) = watch::channel(false);
let mut chains: HashMap<ChainId, BlockchainState> = HashMap::new();
for (chain_id, chain_config) in &config.chains {
let contract = Arc::new(PythContract::from_config(&chain_config)?);
let provider_chain_config = provider_config
.as_ref()
.and_then(|c| c.get_chain_config(chain_id));
let mut provider_commitments = provider_chain_config
.as_ref()
.map(|c| c.get_sorted_commitments())
.unwrap_or_else(|| Vec::new());
let provider_info = contract.get_provider_info(opts.provider).call().await?;
let latest_metadata =
bincode::deserialize::<CommitmentMetadata>(&provider_info.commitment_metadata)
.map_err(|e| {
anyhow!(
"Chain: {} - Failed to deserialize commitment metadata: {}",
&chain_id,
e
)
})?;
provider_commitments.push(Commitment {
seed: latest_metadata.seed,
chain_length: latest_metadata.chain_length,
original_commitment_sequence_number: provider_info.original_commitment_sequence_number,
});
// TODO: we may want to load the hash chain in a lazy/fault-tolerant way. If there are many blockchains,
// then it's more likely that some RPC fails. We should tolerate these faults and generate the hash chain
// later when a user request comes in for that chain.
let mut offsets = Vec::<usize>::new();
let mut hash_chains = Vec::<PebbleHashChain>::new();
for commitment in &provider_commitments {
let offset = commitment.original_commitment_sequence_number.try_into()?;
offsets.push(offset);
let pebble_hash_chain = PebbleHashChain::from_config(
&secret,
&chain_id,
&opts.provider,
&chain_config.contract_addr,
&commitment.seed,
commitment.chain_length,
)?;
hash_chains.push(pebble_hash_chain);
}
let chain_state = HashChainState {
offsets,
hash_chains,
};
if chain_state.reveal(provider_info.original_commitment_sequence_number)?
!= provider_info.original_commitment
{
return Err(anyhow!("The root of the generated hash chain for chain id {} does not match the commitment. Are the secret and chain length configured correctly?", &chain_id).into());
} else {
tracing::info!("Root of chain id {} matches commitment", &chain_id);
}
let state = api::BlockchainState {
id: chain_id.clone(),
state: Arc::new(chain_state),
contract,
provider_address: opts.provider,
reveal_delay_blocks: chain_config.reveal_delay_blocks,
confirmed_block_status: chain_config.confirmed_block_status,
};
chains.insert(chain_id.clone(), state);
}
// Listen for Ctrl+C so we can set the exit flag and wait for a graceful shutdown.
spawn(async move {
tracing::info!("Registered shutdown signal handler...");
tokio::signal::ctrl_c().await.unwrap();
tracing::info!("Shut down signal received, waiting for tasks...");
// no need to handle error here, as it will only occur when all the
// receiver has been dropped and that's what we want to do
tx_exit.send(true)?;
Ok::<(), Error>(())
});
if let Some(keeper_private_key) = opts.load_keeper_private_key()? {
spawn(run_keeper(chains.clone(), config, keeper_private_key));
}
run_api(opts.addr.clone(), chains, rx_exit).await?;
Ok(())
}

View File

@ -16,7 +16,10 @@ use {
PebbleHashChain, PebbleHashChain,
}, },
}, },
anyhow::Result, anyhow::{
anyhow,
Result,
},
ethers::{ ethers::{
abi::Bytes as AbiBytes, abi::Bytes as AbiBytes,
signers::{ signers::{
@ -66,7 +69,14 @@ pub async fn setup_provider(opts: &SetupProviderOptions) -> Result<()> {
register = true; register = true;
} else { } else {
let metadata = let metadata =
bincode::deserialize::<CommitmentMetadata>(&provider_info.commitment_metadata)?; bincode::deserialize::<CommitmentMetadata>(&provider_info.commitment_metadata)
.map_err(|e| {
anyhow!(
"Chain: {} - Failed to deserialize commitment metadata: {}",
&chain_id,
e
)
})?;
let hash_chain = PebbleHashChain::from_config( let hash_chain = PebbleHashChain::from_config(
&secret, &secret,
@ -74,7 +84,7 @@ pub async fn setup_provider(opts: &SetupProviderOptions) -> Result<()> {
&provider_address, &provider_address,
&chain_config.contract_addr, &chain_config.contract_addr,
&metadata.seed, &metadata.seed,
metadata.chain_length, opts.randomness.chain_length,
)?; )?;
let chain_state = HashChainState { let chain_state = HashChainState {
offsets: vec![provider_info offsets: vec![provider_info
@ -105,7 +115,8 @@ pub async fn setup_provider(opts: &SetupProviderOptions) -> Result<()> {
fee: opts.fee, fee: opts.fee,
uri, uri,
}) })
.await?; .await
.map_err(|e| anyhow!("Chain: {} - Failed to register provider: {}", &chain_id, e))?;
tracing::info!("{}: registered", &chain_id); tracing::info!("{}: registered", &chain_id);
} else { } else {
if provider_info.fee_in_wei != opts.fee { if provider_info.fee_in_wei != opts.fee {

View File

@ -18,7 +18,10 @@ use {
Args, Args,
Parser, Parser,
}, },
ethers::types::Address, ethers::types::{
Address,
U256,
},
std::{ std::{
collections::HashMap, collections::HashMap,
fs, fs,
@ -94,7 +97,7 @@ pub struct RandomnessOptions {
/// The length of the hash chain to generate. /// The length of the hash chain to generate.
#[arg(long = "chain-length")] #[arg(long = "chain-length")]
#[arg(env = "FORTUNA_CHAIN_LENGTH")] #[arg(env = "FORTUNA_CHAIN_LENGTH")]
#[arg(default_value = "10000")] #[arg(default_value = "100000")]
pub chain_length: u64, pub chain_length: u64,
} }
@ -131,6 +134,9 @@ pub struct EthereumConfig {
/// URL of a Geth RPC endpoint to use for interacting with the blockchain. /// URL of a Geth RPC endpoint to use for interacting with the blockchain.
pub geth_rpc_addr: String, pub geth_rpc_addr: String,
/// URL of a Geth RPC wss endpoint to use for subscribing to blockchain events.
pub geth_rpc_wss: Option<String>,
/// Address of a Pyth Randomness contract to interact with. /// Address of a Pyth Randomness contract to interact with.
pub contract_addr: Address, pub contract_addr: Address,
@ -148,4 +154,61 @@ pub struct EthereumConfig {
/// For example, Finalized, Safe, Latest /// For example, Finalized, Safe, Latest
#[serde(default)] #[serde(default)]
pub confirmed_block_status: BlockStatus, pub confirmed_block_status: BlockStatus,
/// The gas limit to use for entropy callback transactions.
pub gas_limit: U256,
}
#[derive(Args, Clone, Debug)]
#[command(next_help_heading = "Provider Config Options")]
#[group(id = "ProviderConfig")]
pub struct ProviderConfigOptions {
#[arg(long = "provider-config")]
#[arg(env = "FORTUNA_PROVIDER_CONFIG")]
pub provider_config: Option<String>,
}
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
pub struct ProviderConfig {
pub chains: HashMap<ChainId, ProviderChainConfig>,
}
impl ProviderConfig {
pub fn load(path: &str) -> Result<ProviderConfig> {
// Open and read the YAML file
let yaml_content = fs::read_to_string(path)?;
let config: ProviderConfig = serde_yaml::from_str(&yaml_content)?;
Ok(config)
}
/// Get the provider chain config. The method returns an Option for ProviderChainConfig.
/// We may not have past any commitments for a chain. For example, for a new chain
pub fn get_chain_config(&self, chain_id: &ChainId) -> Option<ProviderChainConfig> {
self.chains.get(chain_id).map(|x| x.clone())
}
}
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
pub struct ProviderChainConfig {
commitments: Vec<Commitment>,
}
impl ProviderChainConfig {
/// Returns a clone of the commitments in the sorted order.
/// `HashChainState` requires offsets to be in order.
pub fn get_sorted_commitments(&self) -> Vec<Commitment> {
let mut sorted_commitments = self.commitments.clone();
sorted_commitments.sort_by(|c1, c2| {
c1.original_commitment_sequence_number
.cmp(&c2.original_commitment_sequence_number)
});
sorted_commitments
}
}
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
pub struct Commitment {
pub seed: [u8; 32],
pub chain_length: u64,
pub original_commitment_sequence_number: u64,
} }

View File

@ -0,0 +1,55 @@
use {
crate::config::{
ConfigOptions,
ProviderConfigOptions,
RandomnessOptions,
},
anyhow::Result,
clap::Args,
ethers::types::Address,
std::{
fs,
net::SocketAddr,
},
};
/// Run the webservice
#[derive(Args, Clone, Debug)]
pub struct RunOptions {
#[command(flatten)]
pub config: ConfigOptions,
#[command(flatten)]
pub provider_config: ProviderConfigOptions,
#[command(flatten)]
pub randomness: RandomnessOptions,
/// Address and port the HTTP server will bind to.
#[arg(long = "rpc-listen-addr")]
#[arg(default_value = super::DEFAULT_RPC_ADDR)]
#[arg(env = "RPC_ADDR")]
pub addr: SocketAddr,
/// The public key of the provider whose requests the server will respond to.
#[arg(long = "provider")]
#[arg(env = "FORTUNA_PROVIDER")]
pub provider: Address,
/// If provided, the keeper will run alongside the Fortuna API service.
/// It should be a path to a file containing a 20-byte (40 char) hex encoded Ethereum private key.
/// This key is required to submit transactions for entropy callback requests.
/// This key should not be a registered provider.
#[arg(long = "keeper-private-key")]
#[arg(env = "KEEPER_PRIVATE_KEY")]
pub keeper_private_key_file: Option<String>,
}
impl RunOptions {
pub fn load_keeper_private_key(&self) -> Result<Option<String>> {
if let Some(ref keeper_private_key_file) = self.keeper_private_key_file {
return Ok(Some(fs::read_to_string(keeper_private_key_file)?));
}
return Ok(None);
}
}

487
apps/fortuna/src/keeper.rs Normal file
View File

@ -0,0 +1,487 @@
use {
crate::{
api::{
self,
BlockchainState,
},
chain::{
ethereum::SignablePythContract,
reader::{
BlockNumber,
RequestedWithCallbackEvent,
},
},
config::EthereumConfig,
},
anyhow::{
anyhow,
Result,
},
ethers::{
contract::ContractError,
providers::{
Middleware,
Provider,
Ws,
},
types::U256,
},
futures::StreamExt,
std::sync::Arc,
tokio::{
spawn,
sync::mpsc,
time::{
self,
Duration,
},
},
tracing::{
self,
Instrument,
},
};
#[derive(Debug)]
pub struct BlockRange {
pub from: BlockNumber,
pub to: BlockNumber,
}
/// How much to wait before retrying in case of an RPC error
const RETRY_INTERVAL: Duration = Duration::from_secs(5);
/// How many blocks to look back for events that might be missed when starting the keeper
const BACKLOG_RANGE: u64 = 1000;
/// How many blocks to fetch events for in a single rpc call
const BLOCK_BATCH_SIZE: u64 = 100;
/// How much to wait before polling the next latest block
const POLL_INTERVAL: Duration = Duration::from_secs(5);
/// Get the latest safe block number for the chain. Retry internally if there is an error.
async fn get_latest_safe_block(chain_state: &BlockchainState) -> BlockNumber {
loop {
match chain_state
.contract
.get_block_number(chain_state.confirmed_block_status)
.await
{
Ok(latest_confirmed_block) => {
tracing::info!(
"Fetched latest safe block {}",
latest_confirmed_block - chain_state.reveal_delay_blocks
);
return latest_confirmed_block - chain_state.reveal_delay_blocks;
}
Err(e) => {
tracing::error!("Error while getting block number. error: {:?}", e);
time::sleep(RETRY_INTERVAL).await;
}
}
}
}
/// Run threads to handle events for the last `BACKLOG_RANGE` blocks, watch for new blocks and
/// handle any events for the new blocks.
#[tracing::instrument(name="keeper", skip_all, fields(chain_id=chain_state.id))]
pub async fn run_keeper_threads(
private_key: String,
chain_eth_config: EthereumConfig,
chain_state: BlockchainState,
) {
tracing::info!("starting keeper");
let latest_safe_block = get_latest_safe_block(&chain_state).in_current_span().await;
tracing::info!("latest safe block: {}", &latest_safe_block);
let contract = Arc::new(
SignablePythContract::from_config(&chain_eth_config, &private_key)
.await
.expect("Chain config should be valid"),
);
// Spawn a thread to handle the events from last BACKLOG_RANGE blocks.
spawn(
process_backlog(
BlockRange {
from: latest_safe_block.saturating_sub(BACKLOG_RANGE),
to: latest_safe_block,
},
contract.clone(),
chain_eth_config.gas_limit,
chain_state.clone(),
)
.in_current_span(),
);
let (tx, rx) = mpsc::channel::<BlockRange>(1000);
// Spawn a thread to watch for new blocks and send the range of blocks for which events has not been handled to the `tx` channel.
spawn(
watch_blocks_wrapper(
chain_state.clone(),
latest_safe_block,
tx,
chain_eth_config.geth_rpc_wss.clone(),
)
.in_current_span(),
);
// Spawn a thread that listens for block ranges on the `rx` channel and processes the events for those blocks.
spawn(
process_new_blocks(
chain_state.clone(),
rx,
Arc::clone(&contract),
chain_eth_config.gas_limit,
)
.in_current_span(),
);
}
/// Process an event for a chain. It estimates the gas for the reveal with callback and
/// submits the transaction if the gas estimate is below the gas limit.
/// It will return an Error if the gas estimation failed with a provider error or if the
/// reveal with callback failed with a provider error.
pub async fn process_event(
event: RequestedWithCallbackEvent,
chain_config: &BlockchainState,
contract: &Arc<SignablePythContract>,
gas_limit: U256,
) -> Result<()> {
if chain_config.provider_address != event.provider_address {
return Ok(());
}
let provider_revelation = match chain_config.state.reveal(event.sequence_number) {
Ok(result) => result,
Err(e) => {
tracing::error!(
sequence_number = &event.sequence_number,
"Error while revealing with error: {:?}",
e
);
return Ok(());
}
};
let gas_estimate_res = chain_config
.contract
.estimate_reveal_with_callback_gas(
event.provider_address,
event.sequence_number,
event.user_random_number,
provider_revelation,
)
.in_current_span()
.await;
match gas_estimate_res {
Ok(gas_estimate_option) => match gas_estimate_option {
Some(gas_estimate) => {
// Pad the gas estimate by 33%
let (gas_estimate, _) = gas_estimate
.saturating_mul(U256::from(4))
.div_mod(U256::from(3));
if gas_estimate > gas_limit {
tracing::error!(
sequence_number = &event.sequence_number,
"Gas estimate for reveal with callback is higher than the gas limit"
);
return Ok(());
}
let contract_call = contract
.reveal_with_callback(
event.provider_address,
event.sequence_number,
event.user_random_number,
provider_revelation,
)
.gas(gas_estimate);
let res = contract_call.send().await;
let pending_tx = match res {
Ok(pending_tx) => pending_tx,
Err(e) => match e {
// If there is a provider error, we weren't able to send the transaction.
// We will return an error. So, that the caller can decide what to do (retry).
ContractError::ProviderError { e } => return Err(e.into()),
// For all the other errors, it is likely the case we won't be able to reveal for
// ever. We will return an Ok(()) to signal that we have processed this reveal
// and concluded that its Ok to not reveal.
_ => {
tracing::error!(
sequence_number = &event.sequence_number,
"Error while revealing with error: {:?}",
e
);
return Ok(());
}
},
};
match pending_tx.await {
Ok(res) => {
tracing::info!(
sequence_number = &event.sequence_number,
"Revealed with res: {:?}",
res
);
Ok(())
}
Err(e) => {
tracing::error!(
sequence_number = &event.sequence_number,
"Error while revealing with error: {:?}",
e
);
Err(e.into())
}
}
}
None => {
tracing::info!(
sequence_number = &event.sequence_number,
"Not processing event"
);
Ok(())
}
},
Err(e) => {
tracing::error!(
sequence_number = &event.sequence_number,
"Error while simulating reveal with error: {:?}",
e
);
Err(e)
}
}
}
/// Process a range of blocks in batches. It calls the `process_single_block_batch` method for each batch.
#[tracing::instrument(skip_all, fields(range_from_block=block_range.from, range_to_block=block_range.to))]
pub async fn process_block_range(
block_range: BlockRange,
contract: Arc<SignablePythContract>,
gas_limit: U256,
chain_state: api::BlockchainState,
) {
let BlockRange {
from: first_block,
to: last_block,
} = block_range;
let mut current_block = first_block;
while current_block <= last_block {
let mut to_block = current_block + BLOCK_BATCH_SIZE;
if to_block > last_block {
to_block = last_block;
}
process_single_block_batch(
BlockRange {
from: current_block,
to: to_block,
},
contract.clone(),
gas_limit,
chain_state.clone(),
)
.in_current_span()
.await;
current_block = to_block + 1;
}
}
/// Process a batch of blocks for a chain. It will fetch events for all the blocks in a single call for the provided batch
/// and then try to process them one by one. If the process fails, it will retry indefinitely.
#[tracing::instrument(name="batch", skip_all, fields(batch_from_block=block_range.from, batch_to_block=block_range.to))]
pub async fn process_single_block_batch(
block_range: BlockRange,
contract: Arc<SignablePythContract>,
gas_limit: U256,
chain_state: api::BlockchainState,
) {
loop {
let events_res = chain_state
.contract
.get_request_with_callback_events(block_range.from, block_range.to)
.await;
match events_res {
Ok(events) => {
tracing::info!(num_of_events = &events.len(), "Processing",);
for event in &events {
tracing::info!(sequence_number = &event.sequence_number, "Processing event",);
while let Err(e) =
process_event(event.clone(), &chain_state, &contract, gas_limit)
.in_current_span()
.await
{
tracing::error!(
sequence_number = &event.sequence_number,
"Error while processing event. Waiting for {} seconds before retry. error: {:?}",
RETRY_INTERVAL.as_secs(),
e
);
time::sleep(RETRY_INTERVAL).await;
}
tracing::info!(sequence_number = &event.sequence_number, "Processed event",);
}
tracing::info!(num_of_events = &events.len(), "Processed",);
break;
}
Err(e) => {
tracing::error!(
"Error while getting events. Waiting for {} seconds before retry. error: {:?}",
RETRY_INTERVAL.as_secs(),
e
);
time::sleep(RETRY_INTERVAL).await;
}
}
}
}
/// Wrapper for the `watch_blocks` method. If there was an error while watching, it will retry after a delay.
/// It retries indefinitely.
#[tracing::instrument(name="watch_blocks", skip_all, fields(initial_safe_block=latest_safe_block))]
pub async fn watch_blocks_wrapper(
chain_state: BlockchainState,
latest_safe_block: BlockNumber,
tx: mpsc::Sender<BlockRange>,
geth_rpc_wss: Option<String>,
) {
let mut last_safe_block_processed = latest_safe_block;
loop {
if let Err(e) = watch_blocks(
chain_state.clone(),
&mut last_safe_block_processed,
tx.clone(),
geth_rpc_wss.clone(),
)
.in_current_span()
.await
{
tracing::error!("watching blocks. error: {:?}", e);
time::sleep(RETRY_INTERVAL).await;
}
}
}
/// Watch for new blocks and send the range of blocks for which events have not been handled to the `tx` channel.
/// We are subscribing to new blocks instead of events. If we miss some blocks, it will be fine as we are sending
/// block ranges to the `tx` channel. If we have subscribed to events, we could have missed those and won't even
/// know about it.
pub async fn watch_blocks(
chain_state: BlockchainState,
last_safe_block_processed: &mut BlockNumber,
tx: mpsc::Sender<BlockRange>,
geth_rpc_wss: Option<String>,
) -> Result<()> {
tracing::info!("Watching blocks to handle new events");
let provider_option = match geth_rpc_wss {
Some(wss) => Some(match Provider::<Ws>::connect(wss.clone()).await {
Ok(provider) => provider,
Err(e) => {
tracing::error!("Error while connecting to wss: {}. error: {:?}", wss, e);
return Err(e.into());
}
}),
None => {
tracing::info!("No wss provided");
None
}
};
let mut stream_option = match provider_option {
Some(ref provider) => Some(match provider.subscribe_blocks().await {
Ok(client) => client,
Err(e) => {
tracing::error!("Error while subscribing to blocks. error {:?}", e);
return Err(e.into());
}
}),
None => None,
};
loop {
match stream_option {
Some(ref mut stream) => {
if let None = stream.next().await {
tracing::error!("Error blocks subscription stream ended");
return Err(anyhow!("Error blocks subscription stream ended"));
}
}
None => {
time::sleep(POLL_INTERVAL).await;
}
}
let latest_safe_block = get_latest_safe_block(&chain_state).in_current_span().await;
if latest_safe_block > *last_safe_block_processed {
match tx
.send(BlockRange {
from: *last_safe_block_processed + 1,
to: latest_safe_block,
})
.await
{
Ok(_) => {
tracing::info!(
from_block = *last_safe_block_processed + 1,
to_block = &latest_safe_block,
"Block range sent to handle events",
);
*last_safe_block_processed = latest_safe_block;
}
Err(e) => {
tracing::error!(
"Error while sending block range to handle events. These will be handled in next call. error: {:?}",
e
);
}
};
}
}
}
/// It waits on rx channel to receive block ranges and then calls process_block_range to process them.
#[tracing::instrument(skip_all)]
pub async fn process_new_blocks(
chain_state: BlockchainState,
mut rx: mpsc::Receiver<BlockRange>,
contract: Arc<SignablePythContract>,
gas_limit: U256,
) {
tracing::info!("Waiting for new block ranges to process");
loop {
if let Some(block_range) = rx.recv().await {
process_block_range(
block_range,
Arc::clone(&contract),
gas_limit,
chain_state.clone(),
)
.in_current_span()
.await;
}
}
}
/// Processes the backlog_range for a chain.
#[tracing::instrument(skip_all)]
pub async fn process_backlog(
backlog_range: BlockRange,
contract: Arc<SignablePythContract>,
gas_limit: U256,
chain_state: BlockchainState,
) {
tracing::info!("Processing backlog");
process_block_range(backlog_range, contract, gas_limit, chain_state)
.in_current_span()
.await;
tracing::info!("Backlog processed");
}

View File

@ -11,6 +11,7 @@ pub mod api;
pub mod chain; pub mod chain;
pub mod command; pub mod command;
pub mod config; pub mod config;
pub mod keeper;
pub mod state; pub mod state;
// Server TODO list: // Server TODO list:

View File

@ -1796,7 +1796,7 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
[[package]] [[package]]
name = "hermes" name = "hermes"
version = "0.5.3" version = "0.5.9"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"async-trait", "async-trait",
@ -1839,6 +1839,7 @@ dependencies = [
"solana-sdk", "solana-sdk",
"strum", "strum",
"tokio", "tokio",
"tokio-stream",
"tonic", "tonic",
"tonic-build", "tonic-build",
"tower-http", "tower-http",
@ -3137,7 +3138,7 @@ dependencies = [
[[package]] [[package]]
name = "pythnet-sdk" name = "pythnet-sdk"
version = "2.0.0" version = "2.1.0"
dependencies = [ dependencies = [
"bincode", "bincode",
"borsh 0.10.3", "borsh 0.10.3",
@ -5188,9 +5189,9 @@ dependencies = [
[[package]] [[package]]
name = "termcolor" name = "termcolor"
version = "1.4.1" version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755"
dependencies = [ dependencies = [
"winapi-util", "winapi-util",
] ]
@ -5385,6 +5386,7 @@ dependencies = [
"futures-core", "futures-core",
"pin-project-lite", "pin-project-lite",
"tokio", "tokio",
"tokio-util",
] ]
[[package]] [[package]]

View File

@ -1,6 +1,6 @@
[package] [package]
name = "hermes" name = "hermes"
version = "0.5.3" version = "0.5.9"
description = "Hermes is an agent that provides Verified Prices from the Pythnet Pyth Oracle." description = "Hermes is an agent that provides Verified Prices from the Pythnet Pyth Oracle."
edition = "2021" edition = "2021"
@ -31,7 +31,7 @@ prometheus-client = { version = "0.21.2" }
prost = { version = "0.12.1" } prost = { version = "0.12.1" }
pyth-sdk = { version = "0.8.0" } pyth-sdk = { version = "0.8.0" }
pyth-sdk-solana = { version = "0.9.0" } pyth-sdk-solana = { version = "0.9.0" }
pythnet-sdk = { path = "../pythnet/pythnet_sdk/", version = "2.0.0", features = ["strum"] } pythnet-sdk = { path = "../../pythnet/pythnet_sdk/", version = "2.0.0", features = ["strum"] }
rand = { version = "0.8.5" } rand = { version = "0.8.5" }
reqwest = { version = "0.11.14", features = ["blocking", "json"] } reqwest = { version = "0.11.14", features = ["blocking", "json"] }
secp256k1 = { version = "0.27.0", features = ["rand", "recovery", "serde"] } secp256k1 = { version = "0.27.0", features = ["rand", "recovery", "serde"] }
@ -42,6 +42,7 @@ serde_wormhole = { git = "https://github.com/wormhole-foundation/wormhol
sha3 = { version = "0.10.4" } sha3 = { version = "0.10.4" }
strum = { version = "0.24.1", features = ["derive"] } strum = { version = "0.24.1", features = ["derive"] }
tokio = { version = "1.26.0", features = ["full"] } tokio = { version = "1.26.0", features = ["full"] }
tokio-stream = { version = "0.1.15", features = ["full"] }
tonic = { version = "0.10.1", features = ["tls"] } tonic = { version = "0.10.1", features = ["tls"] }
tower-http = { version = "0.4.0", features = ["cors"] } tower-http = { version = "0.4.0", features = ["cors"] }
tracing = { version = "0.1.37", features = ["log"] } tracing = { version = "0.1.37", features = ["log"] }

View File

@ -12,15 +12,15 @@ RUN rustup default nightly-2024-03-26
# Build # Build
WORKDIR /src WORKDIR /src
COPY hermes hermes COPY apps/hermes apps/hermes
COPY pythnet/pythnet_sdk pythnet/pythnet_sdk COPY pythnet/pythnet_sdk pythnet/pythnet_sdk
WORKDIR /src/hermes WORKDIR /src/apps/hermes
RUN --mount=type=cache,target=/root/.cargo/registry cargo build --release RUN --mount=type=cache,target=/root/.cargo/registry cargo build --release
FROM rust:1.77.0 FROM rust:1.77.0
# Copy artifacts from other images # Copy artifacts from other images
COPY --from=build /src/hermes/target/release/hermes /usr/local/bin/ COPY --from=build /src/apps/hermes/target/release/hermes /usr/local/bin/

View File

@ -35,14 +35,14 @@ To set up and run a Hermes node, follow the steps below:
``` ```
4. **Build the project**: Navigate to the project directory and run the following command to build the project: 4. **Build the project**: Navigate to the project directory and run the following command to build the project:
```bash ```bash
cd hermes cd apps/hermes
cargo build --release cargo build --release
``` ```
This will create a binary in the target/release directory. This will create a binary in the target/release directory.
5. **Run the node**: To run Hermes for Pythnet, use the following command: 5. **Run the node**: To run Hermes for Pythnet, use the following command:
```bash ```bash
./target/release/hermes run \ cargo run --release -- run \
--pythnet-http-addr https://pythnet-rpc/ \ --pythnet-http-addr https://pythnet-rpc/ \
--pythnet-ws-addr wss://pythnet-rpc/ \ --pythnet-ws-addr wss://pythnet-rpc/ \
--wormhole-spy-rpc-addr https://wormhole-spy-rpc/ --wormhole-spy-rpc-addr https://wormhole-spy-rpc/

View File

@ -1,6 +1,5 @@
use { use {
crate::{ crate::{
aggregate::AggregationEvent,
config::RunOptions, config::RunOptions,
state::State, state::State,
}, },
@ -14,7 +13,6 @@ use {
ipnet::IpNet, ipnet::IpNet,
serde_qs::axum::QsQueryConfig, serde_qs::axum::QsQueryConfig,
std::sync::Arc, std::sync::Arc,
tokio::sync::broadcast::Sender,
tower_http::cors::CorsLayer, tower_http::cors::CorsLayer,
utoipa::OpenApi, utoipa::OpenApi,
utoipa_swagger_ui::SwaggerUi, utoipa_swagger_ui::SwaggerUi,
@ -26,20 +24,29 @@ mod rest;
pub mod types; pub mod types;
mod ws; mod ws;
#[derive(Clone)] pub struct ApiState<S = State> {
pub struct ApiState { pub state: Arc<S>,
pub state: Arc<State>,
pub ws: Arc<ws::WsState>, pub ws: Arc<ws::WsState>,
pub metrics: Arc<metrics_middleware::Metrics>, pub metrics: Arc<metrics_middleware::Metrics>,
pub update_tx: Sender<AggregationEvent>,
} }
impl ApiState { /// Manually implement `Clone` as the derive macro will try and slap `Clone` on
/// `State` which should not be Clone.
impl<S> Clone for ApiState<S> {
fn clone(&self) -> Self {
Self {
state: self.state.clone(),
ws: self.ws.clone(),
metrics: self.metrics.clone(),
}
}
}
impl ApiState<State> {
pub fn new( pub fn new(
state: Arc<State>, state: Arc<State>,
ws_whitelist: Vec<IpNet>, ws_whitelist: Vec<IpNet>,
requester_ip_header_name: String, requester_ip_header_name: String,
update_tx: Sender<AggregationEvent>,
) -> Self { ) -> Self {
Self { Self {
metrics: Arc::new(metrics_middleware::Metrics::new(state.clone())), metrics: Arc::new(metrics_middleware::Metrics::new(state.clone())),
@ -49,24 +56,18 @@ impl ApiState {
state.clone(), state.clone(),
)), )),
state, state,
update_tx,
} }
} }
} }
#[tracing::instrument(skip(opts, state, update_tx))] #[tracing::instrument(skip(opts, state))]
pub async fn spawn( pub async fn spawn(opts: RunOptions, state: Arc<State>) -> Result<()> {
opts: RunOptions,
state: Arc<State>,
update_tx: Sender<AggregationEvent>,
) -> Result<()> {
let state = { let state = {
let opts = opts.clone(); let opts = opts.clone();
ApiState::new( ApiState::new(
state, state,
opts.rpc.ws_whitelist, opts.rpc.ws_whitelist,
opts.rpc.requester_ip_header_name, opts.rpc.requester_ip_header_name,
update_tx,
) )
}; };
@ -93,6 +94,7 @@ pub async fn run(opts: RunOptions, state: ApiState) -> Result<()> {
rest::latest_price_updates, rest::latest_price_updates,
rest::timestamp_price_updates, rest::timestamp_price_updates,
rest::price_feeds_metadata, rest::price_feeds_metadata,
rest::price_stream_sse_handler,
), ),
components( components(
schemas( schemas(
@ -122,6 +124,7 @@ pub async fn run(opts: RunOptions, state: ApiState) -> Result<()> {
// Initialize Axum Router. Note the type here is a `Router<State>` due to the use of the // Initialize Axum Router. Note the type here is a `Router<State>` due to the use of the
// `with_state` method which replaces `Body` with `State` in the type signature. // `with_state` method which replaces `Body` with `State` in the type signature.
let app = Router::new(); let app = Router::new();
#[allow(deprecated)]
let app = app let app = app
.merge(SwaggerUi::new("/docs").url("/docs/openapi.json", ApiDoc::openapi())) .merge(SwaggerUi::new("/docs").url("/docs/openapi.json", ApiDoc::openapi()))
.route("/", get(rest::index)) .route("/", get(rest::index))
@ -131,6 +134,10 @@ pub async fn run(opts: RunOptions, state: ApiState) -> Result<()> {
.route("/api/latest_price_feeds", get(rest::latest_price_feeds)) .route("/api/latest_price_feeds", get(rest::latest_price_feeds))
.route("/api/latest_vaas", get(rest::latest_vaas)) .route("/api/latest_vaas", get(rest::latest_vaas))
.route("/api/price_feed_ids", get(rest::price_feed_ids)) .route("/api/price_feed_ids", get(rest::price_feed_ids))
.route(
"/v2/updates/price/stream",
get(rest::price_stream_sse_handler),
)
.route("/v2/updates/price/latest", get(rest::latest_price_updates)) .route("/v2/updates/price/latest", get(rest::latest_price_updates))
.route( .route(
"/v2/updates/price/:publish_time", "/v2/updates/price/:publish_time",

View File

@ -1,4 +1,4 @@
use crate::aggregate::UnixTimestamp; use crate::state::aggregate::UnixTimestamp;
// Example values for the utoipa API docs. // Example values for the utoipa API docs.
// Note that each of these expressions is only evaluated once when the documentation is created, // Note that each of these expressions is only evaluated once when the documentation is created,

View File

@ -1,5 +1,6 @@
use { use {
super::ApiState, super::ApiState,
crate::state::aggregate::Aggregates,
axum::{ axum::{
http::StatusCode, http::StatusCode,
response::{ response::{
@ -21,6 +22,7 @@ mod price_feed_ids;
mod ready; mod ready;
mod v2; mod v2;
pub use { pub use {
get_price_feed::*, get_price_feed::*,
get_vaa::*, get_vaa::*,
@ -34,10 +36,12 @@ pub use {
v2::{ v2::{
latest_price_updates::*, latest_price_updates::*,
price_feeds_metadata::*, price_feeds_metadata::*,
sse::*,
timestamp_price_updates::*, timestamp_price_updates::*,
}, },
}; };
#[derive(Debug)]
pub enum RestError { pub enum RestError {
BenchmarkPriceNotUnique, BenchmarkPriceNotUnique,
UpdateDataNotFound, UpdateDataNotFound,
@ -90,11 +94,15 @@ impl IntoResponse for RestError {
} }
/// Verify that the price ids exist in the aggregate state. /// Verify that the price ids exist in the aggregate state.
pub async fn verify_price_ids_exist( pub async fn verify_price_ids_exist<S>(
state: &ApiState, state: &ApiState<S>,
price_ids: &[PriceIdentifier], price_ids: &[PriceIdentifier],
) -> Result<(), RestError> { ) -> Result<(), RestError>
let all_ids = crate::aggregate::get_price_feed_ids(&*state.state).await; where
S: Aggregates,
{
let state = &*state.state;
let all_ids = Aggregates::get_price_feed_ids(state).await;
let missing_ids = price_ids let missing_ids = price_ids
.iter() .iter()
.filter(|id| !all_ids.contains(id)) .filter(|id| !all_ids.contains(id))

View File

@ -1,10 +1,6 @@
use { use {
super::verify_price_ids_exist, super::verify_price_ids_exist,
crate::{ crate::{
aggregate::{
RequestTime,
UnixTimestamp,
},
api::{ api::{
doc_examples, doc_examples,
rest::RestError, rest::RestError,
@ -12,6 +8,12 @@ use {
PriceIdInput, PriceIdInput,
RpcPriceFeed, RpcPriceFeed,
}, },
ApiState,
},
state::aggregate::{
Aggregates,
RequestTime,
UnixTimestamp,
}, },
}, },
anyhow::Result, anyhow::Result,
@ -47,6 +49,8 @@ pub struct GetPriceFeedQueryParams {
binary: bool, binary: bool,
} }
/// **Deprecated: use /v2/updates/price/{publish_time} instead**
///
/// Get a price update for a price feed with a specific timestamp /// Get a price update for a price feed with a specific timestamp
/// ///
/// Given a price feed id and timestamp, retrieve the Pyth price update closest to that timestamp. /// Given a price feed id and timestamp, retrieve the Pyth price update closest to that timestamp.
@ -60,16 +64,20 @@ pub struct GetPriceFeedQueryParams {
GetPriceFeedQueryParams GetPriceFeedQueryParams
) )
)] )]
pub async fn get_price_feed( #[deprecated]
State(state): State<crate::api::ApiState>, pub async fn get_price_feed<S>(
State(state): State<ApiState<S>>,
QsQuery(params): QsQuery<GetPriceFeedQueryParams>, QsQuery(params): QsQuery<GetPriceFeedQueryParams>,
) -> Result<Json<RpcPriceFeed>, RestError> { ) -> Result<Json<RpcPriceFeed>, RestError>
where
S: Aggregates,
{
let price_id: PriceIdentifier = params.id.into(); let price_id: PriceIdentifier = params.id.into();
verify_price_ids_exist(&state, &[price_id]).await?; verify_price_ids_exist(&state, &[price_id]).await?;
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data( let state = &*state.state;
&*state.state, let price_feeds_with_update_data = Aggregates::get_price_feeds_with_update_data(
state,
&[price_id], &[price_id],
RequestTime::FirstAfter(params.publish_time), RequestTime::FirstAfter(params.publish_time),
) )

View File

@ -1,15 +1,16 @@
use { use {
super::verify_price_ids_exist, super::verify_price_ids_exist,
crate::{ crate::{
aggregate::{
get_price_feeds_with_update_data,
RequestTime,
UnixTimestamp,
},
api::{ api::{
doc_examples, doc_examples,
rest::RestError, rest::RestError,
types::PriceIdInput, types::PriceIdInput,
ApiState,
},
state::aggregate::{
Aggregates,
RequestTime,
UnixTimestamp,
}, },
}, },
anyhow::Result, anyhow::Result,
@ -54,6 +55,8 @@ pub struct GetVaaResponse {
publish_time: UnixTimestamp, publish_time: UnixTimestamp,
} }
/// **Deprecated: use /v2/updates/price/{publish_time} instead**
///
/// Get a VAA for a price feed with a specific timestamp /// Get a VAA for a price feed with a specific timestamp
/// ///
/// Given a price feed id and timestamp, retrieve the Pyth price update closest to that timestamp. /// Given a price feed id and timestamp, retrieve the Pyth price update closest to that timestamp.
@ -68,16 +71,20 @@ pub struct GetVaaResponse {
GetVaaQueryParams GetVaaQueryParams
) )
)] )]
pub async fn get_vaa( #[deprecated]
State(state): State<crate::api::ApiState>, pub async fn get_vaa<S>(
State(state): State<ApiState<S>>,
QsQuery(params): QsQuery<GetVaaQueryParams>, QsQuery(params): QsQuery<GetVaaQueryParams>,
) -> Result<Json<GetVaaResponse>, RestError> { ) -> Result<Json<GetVaaResponse>, RestError>
where
S: Aggregates,
{
let price_id: PriceIdentifier = params.id.into(); let price_id: PriceIdentifier = params.id.into();
verify_price_ids_exist(&state, &[price_id]).await?; verify_price_ids_exist(&state, &[price_id]).await?;
let price_feeds_with_update_data = get_price_feeds_with_update_data( let state = &*state.state;
&*state.state, let price_feeds_with_update_data = Aggregates::get_price_feeds_with_update_data(
state,
&[price_id], &[price_id],
RequestTime::FirstAfter(params.publish_time), RequestTime::FirstAfter(params.publish_time),
) )

View File

@ -1,11 +1,15 @@
use { use {
super::verify_price_ids_exist, super::verify_price_ids_exist,
crate::{ crate::{
aggregate::{ api::{
rest::RestError,
ApiState,
},
state::aggregate::{
Aggregates,
RequestTime, RequestTime,
UnixTimestamp, UnixTimestamp,
}, },
api::rest::RestError,
}, },
anyhow::Result, anyhow::Result,
axum::{ axum::{
@ -42,6 +46,8 @@ pub struct GetVaaCcipResponse {
data: String, // TODO: Use a typed wrapper for the hex output with leading 0x. data: String, // TODO: Use a typed wrapper for the hex output with leading 0x.
} }
/// **Deprecated: use /v2/updates/price/{publish_time} instead**
///
/// Get a VAA for a price feed using CCIP /// Get a VAA for a price feed using CCIP
/// ///
/// This endpoint accepts a single argument which is a hex-encoded byte string of the following form: /// This endpoint accepts a single argument which is a hex-encoded byte string of the following form:
@ -56,25 +62,30 @@ pub struct GetVaaCcipResponse {
GetVaaCcipQueryParams GetVaaCcipQueryParams
) )
)] )]
pub async fn get_vaa_ccip( #[deprecated]
State(state): State<crate::api::ApiState>, pub async fn get_vaa_ccip<S>(
State(state): State<ApiState<S>>,
QsQuery(params): QsQuery<GetVaaCcipQueryParams>, QsQuery(params): QsQuery<GetVaaCcipQueryParams>,
) -> Result<Json<GetVaaCcipResponse>, RestError> { ) -> Result<Json<GetVaaCcipResponse>, RestError>
where
S: Aggregates,
{
let price_id: PriceIdentifier = PriceIdentifier::new( let price_id: PriceIdentifier = PriceIdentifier::new(
params.data[0..32] params.data[0..32]
.try_into() .try_into()
.map_err(|_| RestError::InvalidCCIPInput)?, .map_err(|_| RestError::InvalidCCIPInput)?,
); );
verify_price_ids_exist(&state, &[price_id]).await?;
let publish_time = UnixTimestamp::from_be_bytes( let publish_time = UnixTimestamp::from_be_bytes(
params.data[32..40] params.data[32..40]
.try_into() .try_into()
.map_err(|_| RestError::InvalidCCIPInput)?, .map_err(|_| RestError::InvalidCCIPInput)?,
); );
verify_price_ids_exist(&state, &[price_id]).await?; let state = &*state.state;
let price_feeds_with_update_data = Aggregates::get_price_feeds_with_update_data(
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data( state,
&*state.state,
&[price_id], &[price_id],
RequestTime::FirstAfter(publish_time), RequestTime::FirstAfter(publish_time),
) )

View File

@ -17,6 +17,7 @@ pub async fn index() -> impl IntoResponse {
"/api/get_vaa?id=<price_feed_id>&publish_time=<publish_time_in_unix_timestamp>", "/api/get_vaa?id=<price_feed_id>&publish_time=<publish_time_in_unix_timestamp>",
"/api/get_vaa_ccip?data=<0x<price_feed_id_32_bytes>+<publish_time_unix_timestamp_be_8_bytes>>", "/api/get_vaa_ccip?data=<0x<price_feed_id_32_bytes>+<publish_time_unix_timestamp_be_8_bytes>>",
"/v2/updates/price/latest?ids[]=<price_feed_id>&ids[]=<price_feed_id_2>&..(&encoding=hex|base64)(&parsed=false)", "/v2/updates/price/latest?ids[]=<price_feed_id>&ids[]=<price_feed_id_2>&..(&encoding=hex|base64)(&parsed=false)",
"/v2/updates/price/stream?ids[]=<price_feed_id>&ids[]=<price_feed_id_2>&..(&encoding=hex|base64)(&parsed=false)(&allow_unordered=false)(&benchmarks_only=false)",
"/v2/updates/price/<timestamp>?ids[]=<price_feed_id>&ids[]=<price_feed_id_2>&..(&encoding=hex|base64)(&parsed=false)", "/v2/updates/price/<timestamp>?ids[]=<price_feed_id>&ids[]=<price_feed_id_2>&..(&encoding=hex|base64)(&parsed=false)",
"/v2/price_feeds?(query=btc)(&asset_type=crypto|equity|fx|metal|rates)", "/v2/price_feeds?(query=btc)(&asset_type=crypto|equity|fx|metal|rates)",
]) ])

View File

@ -1,13 +1,17 @@
use { use {
super::verify_price_ids_exist, super::verify_price_ids_exist,
crate::{ crate::{
aggregate::RequestTime,
api::{ api::{
rest::RestError, rest::RestError,
types::{ types::{
PriceIdInput, PriceIdInput,
RpcPriceFeed, RpcPriceFeed,
}, },
ApiState,
},
state::aggregate::{
Aggregates,
RequestTime,
}, },
}, },
anyhow::Result, anyhow::Result,
@ -46,6 +50,8 @@ pub struct LatestPriceFeedsQueryParams {
binary: bool, binary: bool,
} }
/// **Deprecated: use /v2/updates/price/latest instead**
///
/// Get the latest price updates by price feed id. /// Get the latest price updates by price feed id.
/// ///
/// Given a collection of price feed ids, retrieve the latest Pyth price for each price feed. /// Given a collection of price feed ids, retrieve the latest Pyth price for each price feed.
@ -59,19 +65,20 @@ pub struct LatestPriceFeedsQueryParams {
LatestPriceFeedsQueryParams LatestPriceFeedsQueryParams
) )
)] )]
pub async fn latest_price_feeds( #[deprecated]
State(state): State<crate::api::ApiState>, pub async fn latest_price_feeds<S>(
State(state): State<ApiState<S>>,
QsQuery(params): QsQuery<LatestPriceFeedsQueryParams>, QsQuery(params): QsQuery<LatestPriceFeedsQueryParams>,
) -> Result<Json<Vec<RpcPriceFeed>>, RestError> { ) -> Result<Json<Vec<RpcPriceFeed>>, RestError>
where
S: Aggregates,
{
let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(|id| id.into()).collect(); let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(|id| id.into()).collect();
verify_price_ids_exist(&state, &price_ids).await?; verify_price_ids_exist(&state, &price_ids).await?;
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data( let state = &*state.state;
&*state.state, let price_feeds_with_update_data =
&price_ids, Aggregates::get_price_feeds_with_update_data(state, &price_ids, RequestTime::Latest)
RequestTime::Latest,
)
.await .await
.map_err(|e| { .map_err(|e| {
tracing::warn!( tracing::warn!(

View File

@ -1,11 +1,15 @@
use { use {
super::verify_price_ids_exist, super::verify_price_ids_exist,
crate::{ crate::{
aggregate::RequestTime,
api::{ api::{
doc_examples, doc_examples,
rest::RestError, rest::RestError,
types::PriceIdInput, types::PriceIdInput,
ApiState,
},
state::aggregate::{
Aggregates,
RequestTime,
}, },
}, },
anyhow::Result, anyhow::Result,
@ -39,6 +43,8 @@ pub struct LatestVaasQueryParams {
} }
/// **Deprecated: use /v2/updates/price/latest instead**
///
/// Get VAAs for a set of price feed ids. /// Get VAAs for a set of price feed ids.
/// ///
/// Given a collection of price feed ids, retrieve the latest VAA for each. The returned VAA(s) can /// Given a collection of price feed ids, retrieve the latest VAA for each. The returned VAA(s) can
@ -54,19 +60,20 @@ pub struct LatestVaasQueryParams {
(status = 200, description = "VAAs retrieved successfully", body = Vec<String>, example=json!([doc_examples::vaa_example()])) (status = 200, description = "VAAs retrieved successfully", body = Vec<String>, example=json!([doc_examples::vaa_example()]))
), ),
)] )]
pub async fn latest_vaas( #[deprecated]
State(state): State<crate::api::ApiState>, pub async fn latest_vaas<S>(
State(state): State<ApiState<S>>,
QsQuery(params): QsQuery<LatestVaasQueryParams>, QsQuery(params): QsQuery<LatestVaasQueryParams>,
) -> Result<Json<Vec<String>>, RestError> { ) -> Result<Json<Vec<String>>, RestError>
where
S: Aggregates,
{
let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(|id| id.into()).collect(); let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(|id| id.into()).collect();
verify_price_ids_exist(&state, &price_ids).await?; verify_price_ids_exist(&state, &price_ids).await?;
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data( let state = &*state.state;
&*state.state, let price_feeds_with_update_data =
&price_ids, Aggregates::get_price_feeds_with_update_data(state, &price_ids, RequestTime::Latest)
RequestTime::Latest,
)
.await .await
.map_err(|e| { .map_err(|e| {
tracing::warn!( tracing::warn!(

View File

@ -1,7 +1,11 @@
use { use {
crate::api::{ crate::{
api::{
rest::RestError, rest::RestError,
types::RpcPriceIdentifier, types::RpcPriceIdentifier,
ApiState,
},
state::aggregate::Aggregates,
}, },
anyhow::Result, anyhow::Result,
axum::{ axum::{
@ -10,6 +14,8 @@ use {
}, },
}; };
/// **Deprecated: use /v2/price_feeds instead**
///
/// Get the set of price feed IDs. /// Get the set of price feed IDs.
/// ///
/// This endpoint fetches all of the price feed IDs for which price updates can be retrieved. /// This endpoint fetches all of the price feed IDs for which price updates can be retrieved.
@ -21,10 +27,15 @@ use {
(status = 200, description = "Price feed ids retrieved successfully", body = Vec<RpcPriceIdentifier>) (status = 200, description = "Price feed ids retrieved successfully", body = Vec<RpcPriceIdentifier>)
), ),
)] )]
pub async fn price_feed_ids( #[deprecated]
State(state): State<crate::api::ApiState>, pub async fn price_feed_ids<S>(
) -> Result<Json<Vec<RpcPriceIdentifier>>, RestError> { State(state): State<ApiState<S>>,
let price_feed_ids = crate::aggregate::get_price_feed_ids(&*state.state) ) -> Result<Json<Vec<RpcPriceIdentifier>>, RestError>
where
S: Aggregates,
{
let state = &*state.state;
let price_feed_ids = Aggregates::get_price_feed_ids(state)
.await .await
.into_iter() .into_iter()
.map(RpcPriceIdentifier::from) .map(RpcPriceIdentifier::from)

View File

@ -0,0 +1,25 @@
use {
crate::{
api::ApiState,
state::aggregate::Aggregates,
},
axum::{
extract::State,
http::StatusCode,
response::{
IntoResponse,
Response,
},
},
};
pub async fn ready<S>(State(state): State<ApiState<S>>) -> Response
where
S: Aggregates,
{
let state = &*state.state;
match Aggregates::is_ready(state).await {
true => (StatusCode::OK, "OK").into_response(),
false => (StatusCode::SERVICE_UNAVAILABLE, "Service Unavailable").into_response(),
}
}

View File

@ -1,6 +1,5 @@
use { use {
crate::{ crate::{
aggregate::RequestTime,
api::{ api::{
rest::{ rest::{
verify_price_ids_exist, verify_price_ids_exist,
@ -13,6 +12,11 @@ use {
PriceIdInput, PriceIdInput,
PriceUpdate, PriceUpdate,
}, },
ApiState,
},
state::aggregate::{
Aggregates,
RequestTime,
}, },
}, },
anyhow::Result, anyhow::Result,
@ -46,11 +50,11 @@ pub struct LatestPriceUpdatesQueryParams {
#[param(example = "e62df6c8b4a85fe1a67db44dc12de5db330f7ac66b72dc658afedf0f4a415b43")] #[param(example = "e62df6c8b4a85fe1a67db44dc12de5db330f7ac66b72dc658afedf0f4a415b43")]
ids: Vec<PriceIdInput>, ids: Vec<PriceIdInput>,
/// If true, include the parsed price update in the `parsed` field of each returned feed. /// If true, include the parsed price update in the `parsed` field of each returned feed. Default is `hex`.
#[serde(default)] #[serde(default)]
encoding: EncodingType, encoding: EncodingType,
/// If true, include the parsed price update in the `parsed` field of each returned feed. /// If true, include the parsed price update in the `parsed` field of each returned feed. Default is `true`.
#[serde(default = "default_true")] #[serde(default = "default_true")]
parsed: bool, parsed: bool,
} }
@ -73,19 +77,19 @@ fn default_true() -> bool {
LatestPriceUpdatesQueryParams LatestPriceUpdatesQueryParams
) )
)] )]
pub async fn latest_price_updates( pub async fn latest_price_updates<S>(
State(state): State<crate::api::ApiState>, State(state): State<ApiState<S>>,
QsQuery(params): QsQuery<LatestPriceUpdatesQueryParams>, QsQuery(params): QsQuery<LatestPriceUpdatesQueryParams>,
) -> Result<Json<PriceUpdate>, RestError> { ) -> Result<Json<PriceUpdate>, RestError>
where
S: Aggregates,
{
let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(|id| id.into()).collect(); let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(|id| id.into()).collect();
verify_price_ids_exist(&state, &price_ids).await?; verify_price_ids_exist(&state, &price_ids).await?;
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data( let state = &*state.state;
&*state.state, let price_feeds_with_update_data =
&price_ids, Aggregates::get_price_feeds_with_update_data(state, &price_ids, RequestTime::Latest)
RequestTime::Latest,
)
.await .await
.map_err(|e| { .map_err(|e| {
tracing::warn!( tracing::warn!(

View File

@ -1,3 +1,4 @@
pub mod latest_price_updates; pub mod latest_price_updates;
pub mod price_feeds_metadata; pub mod price_feeds_metadata;
pub mod sse;
pub mod timestamp_price_updates; pub mod timestamp_price_updates;

View File

@ -6,8 +6,9 @@ use {
AssetType, AssetType,
PriceFeedMetadata, PriceFeedMetadata,
}, },
ApiState,
}, },
price_feeds_metadata::get_price_feeds_metadata, price_feeds_metadata::PriceFeedMeta,
}, },
anyhow::Result, anyhow::Result,
axum::{ axum::{
@ -46,12 +47,16 @@ pub struct PriceFeedsMetadataQueryParams {
PriceFeedsMetadataQueryParams PriceFeedsMetadataQueryParams
) )
)] )]
pub async fn price_feeds_metadata( pub async fn price_feeds_metadata<S>(
State(state): State<crate::api::ApiState>, State(state): State<ApiState<S>>,
QsQuery(params): QsQuery<PriceFeedsMetadataQueryParams>, QsQuery(params): QsQuery<PriceFeedsMetadataQueryParams>,
) -> Result<Json<Vec<PriceFeedMetadata>>, RestError> { ) -> Result<Json<Vec<PriceFeedMetadata>>, RestError>
let price_feeds_metadata = where
get_price_feeds_metadata(&state.state, params.query, params.asset_type) S: PriceFeedMeta,
{
let state = &state.state;
let price_feeds_metadata = state
.get_price_feeds_metadata(params.query, params.asset_type)
.await .await
.map_err(|e| { .map_err(|e| {
tracing::warn!("RPC connection error: {}", e); tracing::warn!("RPC connection error: {}", e);

View File

@ -0,0 +1,235 @@
use {
crate::{
api::{
rest::{
verify_price_ids_exist,
RestError,
},
types::{
BinaryPriceUpdate,
EncodingType,
ParsedPriceUpdate,
PriceIdInput,
PriceUpdate,
RpcPriceIdentifier,
},
ApiState,
},
state::aggregate::{
Aggregates,
AggregationEvent,
RequestTime,
},
},
anyhow::Result,
axum::{
extract::State,
response::sse::{
Event,
KeepAlive,
Sse,
},
},
futures::Stream,
pyth_sdk::PriceIdentifier,
serde::Deserialize,
serde_qs::axum::QsQuery,
std::convert::Infallible,
tokio::sync::broadcast,
tokio_stream::{
wrappers::BroadcastStream,
StreamExt as _,
},
utoipa::IntoParams,
};
#[derive(Debug, Deserialize, IntoParams)]
#[into_params(parameter_in = Query)]
pub struct StreamPriceUpdatesQueryParams {
/// Get the most recent price update for this set of price feed ids.
///
/// This parameter can be provided multiple times to retrieve multiple price updates,
/// for example see the following query string:
///
/// ```
/// ?ids[]=a12...&ids[]=b4c...
/// ```
#[param(rename = "ids[]")]
#[param(example = "e62df6c8b4a85fe1a67db44dc12de5db330f7ac66b72dc658afedf0f4a415b43")]
ids: Vec<PriceIdInput>,
/// If true, include the parsed price update in the `parsed` field of each returned feed. Default is `hex`.
#[serde(default)]
encoding: EncodingType,
/// If true, include the parsed price update in the `parsed` field of each returned feed. Default is `true`.
#[serde(default = "default_true")]
parsed: bool,
/// If true, allows unordered price updates to be included in the stream.
#[serde(default)]
allow_unordered: bool,
/// If true, only include benchmark prices that are the initial price updates at a given timestamp (i.e., prevPubTime != pubTime).
#[serde(default)]
benchmarks_only: bool,
}
fn default_true() -> bool {
true
}
#[utoipa::path(
get,
path = "/v2/updates/price/stream",
responses(
(status = 200, description = "Price updates retrieved successfully", body = PriceUpdate),
(status = 404, description = "Price ids not found", body = String)
),
params(StreamPriceUpdatesQueryParams)
)]
/// SSE route handler for streaming price updates.
pub async fn price_stream_sse_handler<S>(
State(state): State<ApiState<S>>,
QsQuery(params): QsQuery<StreamPriceUpdatesQueryParams>,
) -> Result<Sse<impl Stream<Item = Result<Event, Infallible>>>, RestError>
where
S: Aggregates,
S: Sync,
S: Send,
S: 'static,
{
let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(Into::into).collect();
verify_price_ids_exist(&state, &price_ids).await?;
// Clone the update_tx receiver to listen for new price updates
let update_rx: broadcast::Receiver<AggregationEvent> = Aggregates::subscribe(&*state.state);
// Convert the broadcast receiver into a Stream
let stream = BroadcastStream::new(update_rx);
let sse_stream = stream.then(move |message| {
let state_clone = state.clone(); // Clone again to use inside the async block
let price_ids_clone = price_ids.clone(); // Clone again for use inside the async block
async move {
match message {
Ok(event) => {
match handle_aggregation_event(
event,
state_clone,
price_ids_clone,
params.encoding,
params.parsed,
params.benchmarks_only,
params.allow_unordered,
)
.await
{
Ok(Some(update)) => Ok(Event::default()
.json_data(update)
.unwrap_or_else(|e| error_event(e))),
Ok(None) => Ok(Event::default().comment("No update available")),
Err(e) => Ok(error_event(e)),
}
}
Err(e) => Ok(error_event(e)),
}
}
});
Ok(Sse::new(sse_stream).keep_alive(KeepAlive::default()))
}
async fn handle_aggregation_event<S>(
event: AggregationEvent,
state: ApiState<S>,
mut price_ids: Vec<PriceIdentifier>,
encoding: EncodingType,
parsed: bool,
benchmarks_only: bool,
allow_unordered: bool,
) -> Result<Option<PriceUpdate>>
where
S: Aggregates,
{
// Handle out-of-order events
if let AggregationEvent::OutOfOrder { .. } = event {
if !allow_unordered {
return Ok(None);
}
}
// We check for available price feed ids to ensure that the price feed ids provided exists since price feeds can be removed.
let available_price_feed_ids = Aggregates::get_price_feed_ids(&*state.state).await;
price_ids.retain(|price_feed_id| available_price_feed_ids.contains(price_feed_id));
let mut price_feeds_with_update_data = Aggregates::get_price_feeds_with_update_data(
&*state.state,
&price_ids,
RequestTime::AtSlot(event.slot()),
)
.await?;
let mut parsed_price_updates: Vec<ParsedPriceUpdate> = price_feeds_with_update_data
.price_feeds
.into_iter()
.map(|price_feed| price_feed.into())
.collect();
if benchmarks_only {
// Remove those with metadata.prev_publish_time != price.publish_time from parsed_price_updates
parsed_price_updates.retain(|price_feed| {
price_feed
.metadata
.prev_publish_time
.map_or(false, |prev_time| {
prev_time != price_feed.price.publish_time
})
});
// Retain price id in price_ids that are in parsed_price_updates
price_ids.retain(|price_id| {
parsed_price_updates
.iter()
.any(|price_feed| price_feed.id == RpcPriceIdentifier::from(*price_id))
});
price_feeds_with_update_data = Aggregates::get_price_feeds_with_update_data(
&*state.state,
&price_ids,
RequestTime::AtSlot(event.slot()),
)
.await?;
}
// Check if price_ids is empty after filtering and return None if it is
if price_ids.is_empty() {
return Ok(None);
}
let price_update_data = price_feeds_with_update_data.update_data;
let encoded_data: Vec<String> = price_update_data
.into_iter()
.map(|data| encoding.encode_str(&data))
.collect();
let binary_price_update = BinaryPriceUpdate {
encoding,
data: encoded_data,
};
Ok(Some(PriceUpdate {
binary: binary_price_update,
parsed: if parsed {
Some(parsed_price_updates)
} else {
None
},
}))
}
fn error_event<E: std::fmt::Debug>(e: E) -> Event {
Event::default()
.event("error")
.data(format!("Error receiving update: {:?}", e))
}

View File

@ -1,9 +1,5 @@
use { use {
crate::{ crate::{
aggregate::{
RequestTime,
UnixTimestamp,
},
api::{ api::{
doc_examples, doc_examples,
rest::{ rest::{
@ -17,6 +13,12 @@ use {
PriceIdInput, PriceIdInput,
PriceUpdate, PriceUpdate,
}, },
ApiState,
},
state::aggregate::{
Aggregates,
RequestTime,
UnixTimestamp,
}, },
}, },
anyhow::Result, anyhow::Result,
@ -58,11 +60,11 @@ pub struct TimestampPriceUpdatesQueryParams {
#[param(example = "e62df6c8b4a85fe1a67db44dc12de5db330f7ac66b72dc658afedf0f4a415b43")] #[param(example = "e62df6c8b4a85fe1a67db44dc12de5db330f7ac66b72dc658afedf0f4a415b43")]
ids: Vec<PriceIdInput>, ids: Vec<PriceIdInput>,
/// If true, include the parsed price update in the `parsed` field of each returned feed. /// If true, include the parsed price update in the `parsed` field of each returned feed. Default is `hex`.
#[serde(default)] #[serde(default)]
encoding: EncodingType, encoding: EncodingType,
/// If true, include the parsed price update in the `parsed` field of each returned feed. /// If true, include the parsed price update in the `parsed` field of each returned feed. Default is `true`.
#[serde(default = "default_true")] #[serde(default = "default_true")]
parsed: bool, parsed: bool,
} }
@ -87,18 +89,22 @@ fn default_true() -> bool {
TimestampPriceUpdatesQueryParams TimestampPriceUpdatesQueryParams
) )
)] )]
pub async fn timestamp_price_updates( pub async fn timestamp_price_updates<S>(
State(state): State<crate::api::ApiState>, State(state): State<ApiState<S>>,
Path(path_params): Path<TimestampPriceUpdatesPathParams>, Path(path_params): Path<TimestampPriceUpdatesPathParams>,
QsQuery(query_params): QsQuery<TimestampPriceUpdatesQueryParams>, QsQuery(query_params): QsQuery<TimestampPriceUpdatesQueryParams>,
) -> Result<Json<PriceUpdate>, RestError> { ) -> Result<Json<PriceUpdate>, RestError>
where
S: Aggregates,
{
let price_ids: Vec<PriceIdentifier> = let price_ids: Vec<PriceIdentifier> =
query_params.ids.into_iter().map(|id| id.into()).collect(); query_params.ids.into_iter().map(|id| id.into()).collect();
verify_price_ids_exist(&state, &price_ids).await?; verify_price_ids_exist(&state, &price_ids).await?;
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data( let state = &*state.state;
&*state.state, let price_feeds_with_update_data = Aggregates::get_price_feeds_with_update_data(
state,
&price_ids, &price_ids,
RequestTime::FirstAfter(path_params.publish_time), RequestTime::FirstAfter(path_params.publish_time),
) )

View File

@ -1,6 +1,6 @@
use { use {
super::doc_examples, super::doc_examples,
crate::aggregate::{ crate::state::aggregate::{
PriceFeedUpdate, PriceFeedUpdate,
PriceFeedsWithUpdateData, PriceFeedsWithUpdateData,
Slot, Slot,

View File

@ -1,14 +1,18 @@
use { use {
super::types::{ super::{
types::{
PriceIdInput, PriceIdInput,
RpcPriceFeed, RpcPriceFeed,
}, },
crate::{ ApiState,
},
crate::state::{
aggregate::{ aggregate::{
Aggregates,
AggregationEvent, AggregationEvent,
RequestTime, RequestTime,
}, },
state::State, State,
}, },
anyhow::{ anyhow::{
anyhow, anyhow,
@ -212,11 +216,10 @@ pub async fn ws_route_handler(
} }
#[tracing::instrument(skip(stream, state, subscriber_ip))] #[tracing::instrument(skip(stream, state, subscriber_ip))]
async fn websocket_handler( async fn websocket_handler<S>(stream: WebSocket, state: ApiState<S>, subscriber_ip: Option<IpAddr>)
stream: WebSocket, where
state: super::ApiState, S: Aggregates,
subscriber_ip: Option<IpAddr>, {
) {
let ws_state = state.ws.clone(); let ws_state = state.ws.clone();
// Retain the recent rate limit data for the IP addresses to // Retain the recent rate limit data for the IP addresses to
@ -235,7 +238,7 @@ async fn websocket_handler(
}) })
.inc(); .inc();
let notify_receiver = state.update_tx.subscribe(); let notify_receiver = Aggregates::subscribe(&*state.state);
let (sender, receiver) = stream.split(); let (sender, receiver) = stream.split();
let mut subscriber = Subscriber::new( let mut subscriber = Subscriber::new(
id, id,
@ -254,11 +257,11 @@ pub type SubscriberId = usize;
/// Subscriber is an actor that handles a single websocket connection. /// Subscriber is an actor that handles a single websocket connection.
/// It listens to the store for updates and sends them to the client. /// It listens to the store for updates and sends them to the client.
pub struct Subscriber { pub struct Subscriber<S> {
id: SubscriberId, id: SubscriberId,
ip_addr: Option<IpAddr>, ip_addr: Option<IpAddr>,
closed: bool, closed: bool,
store: Arc<State>, state: Arc<S>,
ws_state: Arc<WsState>, ws_state: Arc<WsState>,
notify_receiver: Receiver<AggregationEvent>, notify_receiver: Receiver<AggregationEvent>,
receiver: SplitStream<WebSocket>, receiver: SplitStream<WebSocket>,
@ -269,11 +272,14 @@ pub struct Subscriber {
responded_to_ping: bool, responded_to_ping: bool,
} }
impl Subscriber { impl<S> Subscriber<S>
where
S: Aggregates,
{
pub fn new( pub fn new(
id: SubscriberId, id: SubscriberId,
ip_addr: Option<IpAddr>, ip_addr: Option<IpAddr>,
store: Arc<State>, state: Arc<S>,
ws_state: Arc<WsState>, ws_state: Arc<WsState>,
notify_receiver: Receiver<AggregationEvent>, notify_receiver: Receiver<AggregationEvent>,
receiver: SplitStream<WebSocket>, receiver: SplitStream<WebSocket>,
@ -283,7 +289,7 @@ impl Subscriber {
id, id,
ip_addr, ip_addr,
closed: false, closed: false,
store, state,
ws_state, ws_state,
notify_receiver, notify_receiver,
receiver, receiver,
@ -350,8 +356,9 @@ impl Subscriber {
.cloned() .cloned()
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let updates = match crate::aggregate::get_price_feeds_with_update_data( let state = &*self.state;
&*self.store, let updates = match Aggregates::get_price_feeds_with_update_data(
state,
&price_feed_ids, &price_feed_ids,
RequestTime::AtSlot(event.slot()), RequestTime::AtSlot(event.slot()),
) )
@ -364,8 +371,7 @@ impl Subscriber {
// subscription. In this case we just remove the non-existing // subscription. In this case we just remove the non-existing
// price feed from the list and will keep sending updates for // price feed from the list and will keep sending updates for
// the rest. // the rest.
let available_price_feed_ids = let available_price_feed_ids = Aggregates::get_price_feed_ids(state).await;
crate::aggregate::get_price_feed_ids(&*self.store).await;
self.price_feeds_with_config self.price_feeds_with_config
.retain(|price_feed_id, _| available_price_feed_ids.contains(price_feed_id)); .retain(|price_feed_id, _| available_price_feed_ids.contains(price_feed_id));
@ -376,8 +382,8 @@ impl Subscriber {
.cloned() .cloned()
.collect::<Vec<_>>(); .collect::<Vec<_>>();
crate::aggregate::get_price_feeds_with_update_data( Aggregates::get_price_feeds_with_update_data(
&*self.store, state,
&price_feed_ids, &price_feed_ids,
RequestTime::AtSlot(event.slot()), RequestTime::AtSlot(event.slot()),
) )
@ -545,7 +551,7 @@ impl Subscriber {
allow_out_of_order, allow_out_of_order,
}) => { }) => {
let price_ids: Vec<PriceIdentifier> = ids.into_iter().map(|id| id.into()).collect(); let price_ids: Vec<PriceIdentifier> = ids.into_iter().map(|id| id.into()).collect();
let available_price_ids = crate::aggregate::get_price_feed_ids(&*self.store).await; let available_price_ids = Aggregates::get_price_feed_ids(&*self.state).await;
let not_found_price_ids: Vec<&PriceIdentifier> = price_ids let not_found_price_ids: Vec<&PriceIdentifier> = price_ids
.iter() .iter()

View File

@ -19,9 +19,9 @@ pub struct Options {
#[arg(env = "PYTHNET_HTTP_ADDR")] #[arg(env = "PYTHNET_HTTP_ADDR")]
pub http_addr: String, pub http_addr: String,
/// Pyth mapping account address. /// Pyth mapping account address on Pythnet.
#[arg(long = "mapping-address")] #[arg(long = "pythnet-mapping-addr")]
#[arg(default_value = DEFAULT_PYTHNET_MAPPING_ADDR)] #[arg(default_value = DEFAULT_PYTHNET_MAPPING_ADDR)]
#[arg(env = "MAPPING_ADDRESS")] #[arg(env = "PYTHNET_MAPPING_ADDR")]
pub mapping_addr: Pubkey, pub mapping_addr: Pubkey,
} }

View File

@ -17,7 +17,6 @@ use {
}, },
}; };
mod aggregate;
mod api; mod api;
mod config; mod config;
mod metrics_server; mod metrics_server;
@ -28,14 +27,14 @@ mod state;
lazy_static! { lazy_static! {
/// A static exit flag to indicate to running threads that we're shutting down. This is used to /// A static exit flag to indicate to running threads that we're shutting down. This is used to
/// gracefully shutdown the application. /// gracefully shut down the application.
/// ///
/// We make this global based on the fact the: /// We make this global based on the fact the:
/// - The `Sender` side does not rely on any async runtime. /// - The `Sender` side does not rely on any async runtime.
/// - Exit logic doesn't really require carefully threading this value through the app. /// - Exit logic doesn't really require carefully threading this value through the app.
/// - The `Receiver` side of a watch channel performs the detection based on if the change /// - The `Receiver` side of a watch channel performs the detection based on if the change
/// happened after the subscribe, so it means all listeners should always be notified /// happened after the subscribe, so it means all listeners should always be notified
/// currectly. /// correctly.
pub static ref EXIT: watch::Sender<bool> = watch::channel(false).0; pub static ref EXIT: watch::Sender<bool> = watch::channel(false).0;
} }
@ -54,7 +53,7 @@ async fn init() -> Result<()> {
let (update_tx, _) = tokio::sync::broadcast::channel(1000); let (update_tx, _) = tokio::sync::broadcast::channel(1000);
// Initialize a cache store with a 1000 element circular buffer. // Initialize a cache store with a 1000 element circular buffer.
let store = State::new(update_tx.clone(), 1000, opts.benchmarks.endpoint.clone()); let state = State::new(update_tx.clone(), 1000, opts.benchmarks.endpoint.clone());
// Listen for Ctrl+C so we can set the exit flag and wait for a graceful shutdown. // Listen for Ctrl+C so we can set the exit flag and wait for a graceful shutdown.
spawn(async move { spawn(async move {
@ -66,11 +65,11 @@ async fn init() -> Result<()> {
// Spawn all worker tasks, and wait for all to complete (which will happen if a shutdown // Spawn all worker tasks, and wait for all to complete (which will happen if a shutdown
// signal has been observed). // signal has been observed).
let tasks = join_all([ let tasks = join_all(vec![
Box::pin(spawn(network::wormhole::spawn(opts.clone(), store.clone()))), spawn(network::wormhole::spawn(opts.clone(), state.clone())),
Box::pin(spawn(network::pythnet::spawn(opts.clone(), store.clone()))), spawn(network::pythnet::spawn(opts.clone(), state.clone())),
Box::pin(spawn(metrics_server::run(opts.clone(), store.clone()))), spawn(metrics_server::run(opts.clone(), state.clone())),
Box::pin(spawn(api::spawn(opts.clone(), store.clone(), update_tx))), spawn(api::spawn(opts.clone(), state.clone())),
]) ])
.await; .await;

View File

@ -4,10 +4,6 @@
use { use {
crate::{ crate::{
aggregate::{
AccumulatorMessages,
Update,
},
api::types::PriceFeedMetadata, api::types::PriceFeedMetadata,
config::RunOptions, config::RunOptions,
network::wormhole::{ network::wormhole::{
@ -17,10 +13,17 @@ use {
GuardianSetData, GuardianSetData,
}, },
price_feeds_metadata::{ price_feeds_metadata::{
store_price_feeds_metadata, PriceFeedMeta,
DEFAULT_PRICE_FEEDS_CACHE_UPDATE_INTERVAL, DEFAULT_PRICE_FEEDS_CACHE_UPDATE_INTERVAL,
}, },
state::State, state::{
aggregate::{
AccumulatorMessages,
Aggregates,
Update,
},
State,
},
}, },
anyhow::{ anyhow::{
anyhow, anyhow,
@ -136,7 +139,7 @@ async fn fetch_bridge_data(
} }
} }
pub async fn run(store: Arc<State>, pythnet_ws_endpoint: String) -> Result<()> { pub async fn run(store: Arc<State>, pythnet_ws_endpoint: String) -> Result<!> {
let client = PubsubClient::new(pythnet_ws_endpoint.as_ref()).await?; let client = PubsubClient::new(pythnet_ws_endpoint.as_ref()).await?;
let config = RpcProgramAccountsConfig { let config = RpcProgramAccountsConfig {
@ -157,9 +160,7 @@ pub async fn run(store: Arc<State>, pythnet_ws_endpoint: String) -> Result<()> {
.program_subscribe(&system_program::id(), Some(config)) .program_subscribe(&system_program::id(), Some(config))
.await?; .await?;
loop { while let Some(update) = notif.next().await {
match notif.next().await {
Some(update) => {
let account: Account = match update.value.account.decode() { let account: Account = match update.value.account.decode() {
Some(account) => account, Some(account) => account,
None => { None => {
@ -182,8 +183,8 @@ pub async fn run(store: Arc<State>, pythnet_ws_endpoint: String) -> Result<()> {
if candidate.to_string() == update.value.pubkey { if candidate.to_string() == update.value.pubkey {
let store = store.clone(); let store = store.clone();
tokio::spawn(async move { tokio::spawn(async move {
if let Err(err) = crate::aggregate::store_update( if let Err(err) = Aggregates::store_update(
&store, &*store,
Update::AccumulatorMessages(accumulator_messages), Update::AccumulatorMessages(accumulator_messages),
) )
.await .await
@ -205,11 +206,8 @@ pub async fn run(store: Arc<State>, pythnet_ws_endpoint: String) -> Result<()> {
} }
}; };
} }
None => {
return Err(anyhow!("Pythnet network listener terminated")); Err(anyhow!("Pythnet network listener connection terminated"))
}
}
}
} }
/// Fetch existing GuardianSet accounts from Wormhole. /// Fetch existing GuardianSet accounts from Wormhole.
@ -325,6 +323,19 @@ pub async fn spawn(opts: RunOptions, state: Arc<State>) -> Result<()> {
let price_feeds_state = state.clone(); let price_feeds_state = state.clone();
let mut exit = crate::EXIT.subscribe(); let mut exit = crate::EXIT.subscribe();
tokio::spawn(async move { tokio::spawn(async move {
// Run fetch and store once before the loop
if let Err(e) = fetch_and_store_price_feeds_metadata(
price_feeds_state.as_ref(),
&opts.pythnet.mapping_addr,
&rpc_client,
)
.await
{
tracing::error!(
"Error in initial fetching and storing price feeds metadata: {}",
e
);
}
loop { loop {
tokio::select! { tokio::select! {
_ = exit.changed() => break, _ = exit.changed() => break,
@ -353,13 +364,18 @@ pub async fn spawn(opts: RunOptions, state: Arc<State>) -> Result<()> {
} }
pub async fn fetch_and_store_price_feeds_metadata( pub async fn fetch_and_store_price_feeds_metadata<S>(
state: &State, state: &S,
mapping_address: &Pubkey, mapping_address: &Pubkey,
rpc_client: &RpcClient, rpc_client: &RpcClient,
) -> Result<Vec<PriceFeedMetadata>> { ) -> Result<Vec<PriceFeedMetadata>>
where
S: PriceFeedMeta,
{
let price_feeds_metadata = fetch_price_feeds_metadata(mapping_address, rpc_client).await?; let price_feeds_metadata = fetch_price_feeds_metadata(mapping_address, rpc_client).await?;
store_price_feeds_metadata(state, &price_feeds_metadata).await?; state
.store_price_feeds_metadata(&price_feeds_metadata)
.await?;
Ok(price_feeds_metadata) Ok(price_feeds_metadata)
} }

View File

@ -7,7 +7,13 @@
use { use {
crate::{ crate::{
config::RunOptions, config::RunOptions,
state::State, state::{
aggregate::{
Aggregates,
Update,
},
State,
},
}, },
anyhow::{ anyhow::{
anyhow, anyhow,
@ -43,7 +49,11 @@ use {
Digest, Digest,
Keccak256, Keccak256,
}, },
std::sync::Arc, std::{
sync::Arc,
time::Duration,
},
tokio::time::Instant,
tonic::Request, tonic::Request,
wormhole_sdk::{ wormhole_sdk::{
vaa::{ vaa::{
@ -100,10 +110,10 @@ pub struct BridgeConfig {
/// GuardianSetData extracted from wormhole bridge account, due to no API. /// GuardianSetData extracted from wormhole bridge account, due to no API.
#[derive(borsh::BorshDeserialize)] #[derive(borsh::BorshDeserialize)]
pub struct GuardianSetData { pub struct GuardianSetData {
pub index: u32, pub _index: u32,
pub keys: Vec<[u8; 20]>, pub keys: Vec<[u8; 20]>,
pub creation_time: u32, pub _creation_time: u32,
pub expiration_time: u32, pub _expiration_time: u32,
} }
/// Update the guardian set with the given ID in the state. /// Update the guardian set with the given ID in the state.
@ -152,10 +162,16 @@ mod proto {
pub async fn spawn(opts: RunOptions, state: Arc<State>) -> Result<()> { pub async fn spawn(opts: RunOptions, state: Arc<State>) -> Result<()> {
let mut exit = crate::EXIT.subscribe(); let mut exit = crate::EXIT.subscribe();
loop { loop {
let current_time = Instant::now();
tokio::select! { tokio::select! {
_ = exit.changed() => break, _ = exit.changed() => break,
Err(err) = run(opts.clone(), state.clone()) => { Err(err) = run(opts.clone(), state.clone()) => {
tracing::error!(error = ?err, "Wormhole gRPC service failed."); tracing::error!(error = ?err, "Wormhole gRPC service failed.");
if current_time.elapsed() < Duration::from_secs(30) {
tracing::error!("Wormhole listener restarting too quickly. Sleep 1s.");
tokio::time::sleep(Duration::from_secs(1)).await;
}
} }
} }
} }
@ -164,7 +180,7 @@ pub async fn spawn(opts: RunOptions, state: Arc<State>) -> Result<()> {
} }
#[tracing::instrument(skip(opts, state))] #[tracing::instrument(skip(opts, state))]
async fn run(opts: RunOptions, state: Arc<State>) -> Result<()> { async fn run(opts: RunOptions, state: Arc<State>) -> Result<!> {
let mut client = SpyRpcServiceClient::connect(opts.wormhole.spy_rpc_addr).await?; let mut client = SpyRpcServiceClient::connect(opts.wormhole.spy_rpc_addr).await?;
let mut stream = client let mut stream = client
.subscribe_signed_vaa(Request::new(SubscribeSignedVaaRequest { .subscribe_signed_vaa(Request::new(SubscribeSignedVaaRequest {
@ -184,7 +200,7 @@ async fn run(opts: RunOptions, state: Arc<State>) -> Result<()> {
} }
} }
Ok(()) Err(anyhow!("Wormhole gRPC stream terminated."))
} }
/// Process a message received via a Wormhole gRPC connection. /// Process a message received via a Wormhole gRPC connection.
@ -225,7 +241,10 @@ pub async fn process_message(state: Arc<State>, vaa_bytes: Vec<u8>) -> Result<()
)?; )?;
// Finally, store the resulting VAA in Hermes. // Finally, store the resulting VAA in Hermes.
store_vaa(state.clone(), vaa.sequence, vaa_bytes).await?; let sequence = vaa.sequence;
tokio::spawn(async move {
store_vaa(state.clone(), sequence, vaa_bytes).await;
});
Ok(()) Ok(())
} }
@ -334,16 +353,14 @@ pub fn verify_vaa<'a>(
} }
#[tracing::instrument(skip(state, vaa_bytes))] #[tracing::instrument(skip(state, vaa_bytes))]
pub async fn store_vaa(state: Arc<State>, sequence: u64, vaa_bytes: Vec<u8>) -> Result<()> { pub async fn store_vaa(state: Arc<State>, sequence: u64, vaa_bytes: Vec<u8>) {
// Check VAA hasn't already been seen, this may have been checked previously // Check VAA hasn't already been seen, this may have been checked previously
// but due to async nature It's possible other threads have mutated the state // but due to async nature it's possible other threads have mutated the state
// since this VAA started processing. // since this VAA started processing.
let mut observed_vaa_seqs = state.observed_vaa_seqs.write().await; let mut observed_vaa_seqs = state.observed_vaa_seqs.write().await;
ensure!( if observed_vaa_seqs.contains(&sequence) {
!observed_vaa_seqs.contains(&sequence), return;
"Previously observed VAA: {}", }
sequence,
);
// Clear old cached VAA sequences. // Clear old cached VAA sequences.
while observed_vaa_seqs.len() > OBSERVED_CACHE_SIZE { while observed_vaa_seqs.len() > OBSERVED_CACHE_SIZE {
@ -351,5 +368,7 @@ pub async fn store_vaa(state: Arc<State>, sequence: u64, vaa_bytes: Vec<u8>) ->
} }
// Hand the VAA to the aggregate store. // Hand the VAA to the aggregate store.
crate::aggregate::store_update(&state, crate::aggregate::Update::Vaa(vaa_bytes)).await if let Err(e) = Aggregates::store_update(&*state, Update::Vaa(vaa_bytes)).await {
tracing::error!(error = ?e, "Failed to store VAA in aggregate store.");
}
} }

View File

@ -0,0 +1,96 @@
use {
crate::{
api::types::{
AssetType,
PriceFeedMetadata,
},
state::State,
},
anyhow::Result,
tokio::sync::RwLock,
};
pub const DEFAULT_PRICE_FEEDS_CACHE_UPDATE_INTERVAL: u64 = 600;
pub struct PriceFeedMetaState {
pub data: RwLock<Vec<PriceFeedMetadata>>,
}
impl PriceFeedMetaState {
pub fn new() -> Self {
Self {
data: RwLock::new(Vec::new()),
}
}
}
/// Allow downcasting State into CacheState for functions that depend on the `Cache` service.
impl<'a> From<&'a State> for &'a PriceFeedMetaState {
fn from(state: &'a State) -> &'a PriceFeedMetaState {
&state.price_feed_meta
}
}
#[async_trait::async_trait]
pub trait PriceFeedMeta {
async fn retrieve_price_feeds_metadata(&self) -> Result<Vec<PriceFeedMetadata>>;
async fn store_price_feeds_metadata(
&self,
price_feeds_metadata: &[PriceFeedMetadata],
) -> Result<()>;
async fn get_price_feeds_metadata(
&self,
query: Option<String>,
asset_type: Option<AssetType>,
) -> Result<Vec<PriceFeedMetadata>>;
}
#[async_trait::async_trait]
impl<T> PriceFeedMeta for T
where
for<'a> &'a T: Into<&'a PriceFeedMetaState>,
T: Sync,
{
async fn retrieve_price_feeds_metadata(&self) -> Result<Vec<PriceFeedMetadata>> {
let price_feeds_metadata = self.into().data.read().await;
Ok(price_feeds_metadata.clone())
}
async fn store_price_feeds_metadata(
&self,
price_feeds_metadata: &[PriceFeedMetadata],
) -> Result<()> {
let mut price_feeds_metadata_write_guard = self.into().data.write().await;
*price_feeds_metadata_write_guard = price_feeds_metadata.to_vec();
Ok(())
}
async fn get_price_feeds_metadata(
&self,
query: Option<String>,
asset_type: Option<AssetType>,
) -> Result<Vec<PriceFeedMetadata>> {
let mut price_feeds_metadata = self.retrieve_price_feeds_metadata().await?;
// Filter by query if provided
if let Some(query_str) = &query {
price_feeds_metadata.retain(|feed| {
feed.attributes.get("symbol").map_or(false, |symbol| {
symbol.to_lowercase().contains(&query_str.to_lowercase())
})
});
}
// Filter by asset_type if provided
if let Some(asset_type) = &asset_type {
price_feeds_metadata.retain(|feed| {
feed.attributes.get("asset_type").map_or(false, |type_str| {
type_str.to_lowercase() == asset_type.to_string().to_lowercase()
})
});
}
Ok(price_feeds_metadata)
}
}

View File

@ -1,14 +1,17 @@
//! This module contains the global state of the application. //! This module contains the global state of the application.
use { use {
self::cache::Cache, self::{
crate::{
aggregate::{ aggregate::{
AggregateState, AggregateState,
AggregationEvent, AggregationEvent,
}, },
api::types::PriceFeedMetadata, benchmarks::BenchmarksState,
cache::CacheState,
},
crate::{
network::wormhole::GuardianSet, network::wormhole::GuardianSet,
price_feeds_metadata::PriceFeedMetaState,
}, },
prometheus_client::registry::Registry, prometheus_client::registry::Registry,
reqwest::Url, reqwest::Url,
@ -25,13 +28,22 @@ use {
}, },
}; };
pub mod aggregate;
pub mod benchmarks; pub mod benchmarks;
pub mod cache; pub mod cache;
pub struct State { pub struct State {
/// Storage is a short-lived cache of the state of all the updates that have been passed to the /// State for the `Cache` service for short-lived storage of updates.
/// store. pub cache: CacheState,
pub cache: Cache,
/// State for the `Benchmarks` service for looking up historical updates.
pub benchmarks: BenchmarksState,
/// State for the `PriceFeedMeta` service for looking up metadata related to Pyth price feeds.
pub price_feed_meta: PriceFeedMetaState,
/// State for accessing/storing Pyth price aggregates.
pub aggregates: AggregateState,
/// Sequence numbers of lately observed Vaas. Store uses this set /// Sequence numbers of lately observed Vaas. Store uses this set
/// to ignore the previously observed Vaas as a performance boost. /// to ignore the previously observed Vaas as a performance boost.
@ -40,20 +52,8 @@ pub struct State {
/// Wormhole guardian sets. It is used to verify Vaas before using them. /// Wormhole guardian sets. It is used to verify Vaas before using them.
pub guardian_set: RwLock<BTreeMap<u32, GuardianSet>>, pub guardian_set: RwLock<BTreeMap<u32, GuardianSet>>,
/// The sender to the channel between Store and Api to notify completed updates.
pub api_update_tx: Sender<AggregationEvent>,
/// The aggregate module state.
pub aggregate_state: RwLock<AggregateState>,
/// Benchmarks endpoint
pub benchmarks_endpoint: Option<Url>,
/// Metrics registry /// Metrics registry
pub metrics_registry: RwLock<Registry>, pub metrics_registry: RwLock<Registry>,
/// Price feeds metadata
pub price_feeds_metadata: RwLock<Vec<PriceFeedMetadata>>,
} }
impl State { impl State {
@ -64,14 +64,13 @@ impl State {
) -> Arc<Self> { ) -> Arc<Self> {
let mut metrics_registry = Registry::default(); let mut metrics_registry = Registry::default();
Arc::new(Self { Arc::new(Self {
cache: Cache::new(cache_size), cache: CacheState::new(cache_size),
benchmarks: BenchmarksState::new(benchmarks_endpoint),
price_feed_meta: PriceFeedMetaState::new(),
aggregates: AggregateState::new(update_tx, &mut metrics_registry),
observed_vaa_seqs: RwLock::new(Default::default()), observed_vaa_seqs: RwLock::new(Default::default()),
guardian_set: RwLock::new(Default::default()), guardian_set: RwLock::new(Default::default()),
api_update_tx: update_tx,
aggregate_state: RwLock::new(AggregateState::new(&mut metrics_registry)),
benchmarks_endpoint,
metrics_registry: RwLock::new(metrics_registry), metrics_registry: RwLock::new(metrics_registry),
price_feeds_metadata: RwLock::new(Default::default()),
}) })
} }
} }

View File

@ -20,10 +20,11 @@ use {
}, },
crate::{ crate::{
network::wormhole::VaaBytes, network::wormhole::VaaBytes,
price_feeds_metadata::PriceFeedMeta,
state::{ state::{
benchmarks::Benchmarks, benchmarks::Benchmarks,
cache::{ cache::{
AggregateCache, Cache,
MessageState, MessageState,
MessageStateFilter, MessageStateFilter,
}, },
@ -59,6 +60,13 @@ use {
collections::HashSet, collections::HashSet,
time::Duration, time::Duration,
}, },
tokio::sync::{
broadcast::{
Receiver,
Sender,
},
RwLock,
},
wormhole_sdk::Vaa, wormhole_sdk::Vaa,
}; };
@ -102,8 +110,7 @@ impl AggregationEvent {
} }
} }
#[derive(Clone, Debug)] pub struct AggregateStateData {
pub struct AggregateState {
/// The latest completed slot. This is used to check whether a completed state is new or out of /// The latest completed slot. This is used to check whether a completed state is new or out of
/// order. /// order.
pub latest_completed_slot: Option<Slot>, pub latest_completed_slot: Option<Slot>,
@ -119,7 +126,7 @@ pub struct AggregateState {
pub metrics: metrics::Metrics, pub metrics: metrics::Metrics,
} }
impl AggregateState { impl AggregateStateData {
pub fn new(metrics_registry: &mut Registry) -> Self { pub fn new(metrics_registry: &mut Registry) -> Self {
Self { Self {
latest_completed_slot: None, latest_completed_slot: None,
@ -130,6 +137,20 @@ impl AggregateState {
} }
} }
pub struct AggregateState {
pub data: RwLock<AggregateStateData>,
pub api_update_tx: Sender<AggregationEvent>,
}
impl AggregateState {
pub fn new(update_tx: Sender<AggregationEvent>, metrics_registry: &mut Registry) -> Self {
Self {
data: RwLock::new(AggregateStateData::new(metrics_registry)),
api_update_tx: update_tx,
}
}
}
/// Accumulator messages coming from Pythnet validators. /// Accumulator messages coming from Pythnet validators.
/// ///
/// The validators writes the accumulator messages using Borsh with /// The validators writes the accumulator messages using Borsh with
@ -177,9 +198,48 @@ const READINESS_STALENESS_THRESHOLD: Duration = Duration::from_secs(30);
/// 10 slots is almost 5 seconds. /// 10 slots is almost 5 seconds.
const READINESS_MAX_ALLOWED_SLOT_LAG: Slot = 10; const READINESS_MAX_ALLOWED_SLOT_LAG: Slot = 10;
/// Stores the update data in the store #[async_trait::async_trait]
#[tracing::instrument(skip(state, update))] pub trait Aggregates
pub async fn store_update(state: &State, update: Update) -> Result<()> { where
Self: Cache,
Self: Benchmarks,
Self: PriceFeedMeta,
{
fn subscribe(&self) -> Receiver<AggregationEvent>;
async fn is_ready(&self) -> bool;
async fn store_update(&self, update: Update) -> Result<()>;
async fn get_price_feed_ids(&self) -> HashSet<PriceIdentifier>;
async fn get_price_feeds_with_update_data(
&self,
price_ids: &[PriceIdentifier],
request_time: RequestTime,
) -> Result<PriceFeedsWithUpdateData>;
}
/// Allow downcasting State into CacheState for functions that depend on the `Cache` service.
impl<'a> From<&'a State> for &'a AggregateState {
fn from(state: &'a State) -> &'a AggregateState {
&state.aggregates
}
}
#[async_trait::async_trait]
impl<T> Aggregates for T
where
for<'a> &'a T: Into<&'a AggregateState>,
T: Sync,
T: Send,
T: Cache,
T: Benchmarks,
T: PriceFeedMeta,
{
fn subscribe(&self) -> Receiver<AggregationEvent> {
self.into().api_update_tx.subscribe()
}
/// Stores the update data in the store
#[tracing::instrument(skip(self, update))]
async fn store_update(&self, update: Update) -> Result<()> {
// The slot that the update is originating from. It should be available // The slot that the update is originating from. It should be available
// in all the updates. // in all the updates.
let slot = match update { let slot = match update {
@ -192,14 +252,14 @@ pub async fn store_update(state: &State, update: Update) -> Result<()> {
tracing::info!(slot = proof.slot, "Storing VAA Merkle Proof."); tracing::info!(slot = proof.slot, "Storing VAA Merkle Proof.");
store_wormhole_merkle_verified_message( store_wormhole_merkle_verified_message(
state, self,
proof.clone(), proof.clone(),
update_vaa.to_owned(), update_vaa.to_owned(),
) )
.await?; .await?;
state self.into()
.aggregate_state .data
.write() .write()
.await .await
.metrics .metrics
@ -213,12 +273,11 @@ pub async fn store_update(state: &State, update: Update) -> Result<()> {
let slot = accumulator_messages.slot; let slot = accumulator_messages.slot;
tracing::info!(slot = slot, "Storing Accumulator Messages."); tracing::info!(slot = slot, "Storing Accumulator Messages.");
state self.store_accumulator_messages(accumulator_messages)
.store_accumulator_messages(accumulator_messages)
.await?; .await?;
state self.into()
.aggregate_state .data
.write() .write()
.await .await
.metrics .metrics
@ -229,15 +288,15 @@ pub async fn store_update(state: &State, update: Update) -> Result<()> {
// Update the aggregate state with the latest observed slot // Update the aggregate state with the latest observed slot
{ {
let mut aggregate_state = state.aggregate_state.write().await; let mut aggregate_state = self.into().data.write().await;
aggregate_state.latest_observed_slot = aggregate_state aggregate_state.latest_observed_slot = aggregate_state
.latest_observed_slot .latest_observed_slot
.map(|latest| latest.max(slot)) .map(|latest| latest.max(slot))
.or(Some(slot)); .or(Some(slot));
} }
let accumulator_messages = state.fetch_accumulator_messages(slot).await?; let accumulator_messages = self.fetch_accumulator_messages(slot).await?;
let wormhole_merkle_state = state.fetch_wormhole_merkle_state(slot).await?; let wormhole_merkle_state = self.fetch_wormhole_merkle_state(slot).await?;
let (accumulator_messages, wormhole_merkle_state) = let (accumulator_messages, wormhole_merkle_state) =
match (accumulator_messages, wormhole_merkle_state) { match (accumulator_messages, wormhole_merkle_state) {
@ -259,28 +318,32 @@ pub async fn store_update(state: &State, update: Update) -> Result<()> {
.collect::<HashSet<_>>(); .collect::<HashSet<_>>();
tracing::info!(len = message_states.len(), "Storing Message States."); tracing::info!(len = message_states.len(), "Storing Message States.");
state.store_message_states(message_states).await?; self.store_message_states(message_states).await?;
// Update the aggregate state // Update the aggregate state
let mut aggregate_state = state.aggregate_state.write().await; let mut aggregate_state = self.into().data.write().await;
// Check if the update is new or out of order // Send update event to subscribers. We are purposefully ignoring the result
match aggregate_state.latest_completed_slot { // because there might be no subscribers.
let _ = match aggregate_state.latest_completed_slot {
None => { None => {
aggregate_state.latest_completed_slot.replace(slot); aggregate_state.latest_completed_slot.replace(slot);
state.api_update_tx.send(AggregationEvent::New { slot })?; self.into()
.api_update_tx
.send(AggregationEvent::New { slot })
} }
Some(latest) if slot > latest => { Some(latest) if slot > latest => {
state.prune_removed_keys(message_state_keys).await; self.prune_removed_keys(message_state_keys).await;
aggregate_state.latest_completed_slot.replace(slot); aggregate_state.latest_completed_slot.replace(slot);
state.api_update_tx.send(AggregationEvent::New { slot })?; self.into()
}
_ => {
state
.api_update_tx .api_update_tx
.send(AggregationEvent::OutOfOrder { slot })?; .send(AggregationEvent::New { slot })
}
} }
_ => self
.into()
.api_update_tx
.send(AggregationEvent::OutOfOrder { slot }),
};
aggregate_state.latest_completed_slot = aggregate_state aggregate_state.latest_completed_slot = aggregate_state
.latest_completed_slot .latest_completed_slot
@ -296,6 +359,59 @@ pub async fn store_update(state: &State, update: Update) -> Result<()> {
.observe(slot, metrics::Event::CompletedUpdate); .observe(slot, metrics::Event::CompletedUpdate);
Ok(()) Ok(())
}
async fn get_price_feeds_with_update_data(
&self,
price_ids: &[PriceIdentifier],
request_time: RequestTime,
) -> Result<PriceFeedsWithUpdateData> {
match get_verified_price_feeds(self, price_ids, request_time.clone()).await {
Ok(price_feeds_with_update_data) => Ok(price_feeds_with_update_data),
Err(e) => {
if let RequestTime::FirstAfter(publish_time) = request_time {
return Benchmarks::get_verified_price_feeds(self, price_ids, publish_time)
.await;
}
Err(e)
}
}
}
async fn get_price_feed_ids(&self) -> HashSet<PriceIdentifier> {
Cache::message_state_keys(self)
.await
.iter()
.map(|key| PriceIdentifier::new(key.feed_id))
.collect()
}
async fn is_ready(&self) -> bool {
let metadata = self.into().data.read().await;
let price_feeds_metadata = PriceFeedMeta::retrieve_price_feeds_metadata(self)
.await
.unwrap();
let has_completed_recently = match metadata.latest_completed_update_at.as_ref() {
Some(latest_completed_update_time) => {
latest_completed_update_time.elapsed() < READINESS_STALENESS_THRESHOLD
}
None => false,
};
let is_not_behind = match (
metadata.latest_completed_slot,
metadata.latest_observed_slot,
) {
(Some(latest_completed_slot), Some(latest_observed_slot)) => {
latest_observed_slot - latest_completed_slot <= READINESS_MAX_ALLOWED_SLOT_LAG
}
_ => false,
};
let is_metadata_loaded = !price_feeds_metadata.is_empty();
has_completed_recently && is_not_behind && is_metadata_loaded
}
} }
#[tracing::instrument(skip(accumulator_messages, wormhole_merkle_state))] #[tracing::instrument(skip(accumulator_messages, wormhole_merkle_state))]
@ -336,7 +452,7 @@ async fn get_verified_price_feeds<S>(
request_time: RequestTime, request_time: RequestTime,
) -> Result<PriceFeedsWithUpdateData> ) -> Result<PriceFeedsWithUpdateData>
where where
S: AggregateCache, S: Cache,
{ {
let messages = state let messages = state
.fetch_message_states( .fetch_message_states(
@ -390,71 +506,12 @@ where
}) })
} }
pub async fn get_price_feeds_with_update_data<S>(
state: &S,
price_ids: &[PriceIdentifier],
request_time: RequestTime,
) -> Result<PriceFeedsWithUpdateData>
where
S: AggregateCache,
S: Benchmarks,
{
match get_verified_price_feeds(state, price_ids, request_time.clone()).await {
Ok(price_feeds_with_update_data) => Ok(price_feeds_with_update_data),
Err(e) => {
if let RequestTime::FirstAfter(publish_time) = request_time {
return Benchmarks::get_verified_price_feeds(state, price_ids, publish_time).await;
}
Err(e)
}
}
}
pub async fn get_price_feed_ids<S>(state: &S) -> HashSet<PriceIdentifier>
where
S: AggregateCache,
{
state
.message_state_keys()
.await
.iter()
.map(|key| PriceIdentifier::new(key.feed_id))
.collect()
}
pub async fn is_ready(state: &State) -> bool {
let metadata = state.aggregate_state.read().await;
let price_feeds_metadata = state.price_feeds_metadata.read().await;
let has_completed_recently = match metadata.latest_completed_update_at.as_ref() {
Some(latest_completed_update_time) => {
latest_completed_update_time.elapsed() < READINESS_STALENESS_THRESHOLD
}
None => false,
};
let is_not_behind = match (
metadata.latest_completed_slot,
metadata.latest_observed_slot,
) {
(Some(latest_completed_slot), Some(latest_observed_slot)) => {
latest_observed_slot - latest_completed_slot <= READINESS_MAX_ALLOWED_SLOT_LAG
}
_ => false,
};
let is_metadata_loaded = !price_feeds_metadata.is_empty();
has_completed_recently && is_not_behind && is_metadata_loaded
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use { use {
super::*, super::*,
crate::{ crate::{
api::types::PriceFeedMetadata, api::types::PriceFeedMetadata,
price_feeds_metadata::store_price_feeds_metadata,
state::test::setup_state, state::test::setup_state,
}, },
futures::future::join_all, futures::future::join_all,
@ -468,10 +525,7 @@ mod test {
Accumulator, Accumulator,
}, },
hashers::keccak256_160::Keccak160, hashers::keccak256_160::Keccak160,
messages::{ messages::PriceFeedMessage,
Message,
PriceFeedMessage,
},
wire::v1::{ wire::v1::{
AccumulatorUpdateData, AccumulatorUpdateData,
Proof, Proof,
@ -559,7 +613,7 @@ mod test {
} }
pub async fn store_multiple_concurrent_valid_updates(state: Arc<State>, updates: Vec<Update>) { pub async fn store_multiple_concurrent_valid_updates(state: Arc<State>, updates: Vec<Update>) {
let res = join_all(updates.into_iter().map(|u| store_update(&state, u))).await; let res = join_all(updates.into_iter().map(|u| (&state).store_update(u))).await;
// Check that all store_update calls succeeded // Check that all store_update calls succeeded
assert!(res.into_iter().all(|r| r.is_ok())); assert!(res.into_iter().all(|r| r.is_ok()));
} }
@ -585,14 +639,14 @@ mod test {
// Check the price ids are stored correctly // Check the price ids are stored correctly
assert_eq!( assert_eq!(
get_price_feed_ids(&*state).await, (&*state).get_price_feed_ids().await,
vec![PriceIdentifier::new([100; 32])].into_iter().collect() vec![PriceIdentifier::new([100; 32])].into_iter().collect()
); );
// Check get_price_feeds_with_update_data retrieves the correct // Check get_price_feeds_with_update_data retrieves the correct
// price feed with correct update data. // price feed with correct update data.
let price_feeds_with_update_data = get_price_feeds_with_update_data( let price_feeds_with_update_data = (&*state)
&*state, .get_price_feeds_with_update_data(
&[PriceIdentifier::new([100; 32])], &[PriceIdentifier::new([100; 32])],
RequestTime::Latest, RequestTime::Latest,
) )
@ -710,7 +764,7 @@ mod test {
// Check the price ids are stored correctly // Check the price ids are stored correctly
assert_eq!( assert_eq!(
get_price_feed_ids(&*state).await, (&*state).get_price_feed_ids().await,
vec![ vec![
PriceIdentifier::new([100; 32]), PriceIdentifier::new([100; 32]),
PriceIdentifier::new([200; 32]) PriceIdentifier::new([200; 32])
@ -720,8 +774,8 @@ mod test {
); );
// Check that price feed 2 exists // Check that price feed 2 exists
assert!(get_price_feeds_with_update_data( assert!((&*state)
&*state, .get_price_feeds_with_update_data(
&[PriceIdentifier::new([200; 32])], &[PriceIdentifier::new([200; 32])],
RequestTime::Latest, RequestTime::Latest,
) )
@ -747,12 +801,12 @@ mod test {
// Check that price feed 2 does not exist anymore // Check that price feed 2 does not exist anymore
assert_eq!( assert_eq!(
get_price_feed_ids(&*state).await, (&*state).get_price_feed_ids().await,
vec![PriceIdentifier::new([100; 32]),].into_iter().collect() vec![PriceIdentifier::new([100; 32]),].into_iter().collect()
); );
assert!(get_price_feeds_with_update_data( assert!((&*state)
&*state, .get_price_feeds_with_update_data(
&[PriceIdentifier::new([200; 32])], &[PriceIdentifier::new([200; 32])],
RequestTime::Latest, RequestTime::Latest,
) )
@ -793,8 +847,8 @@ mod test {
MockClock::advance(Duration::from_secs(1)); MockClock::advance(Duration::from_secs(1));
// Get the price feeds with update data // Get the price feeds with update data
let price_feeds_with_update_data = get_price_feeds_with_update_data( let price_feeds_with_update_data = (&*state)
&*state, .get_price_feeds_with_update_data(
&[PriceIdentifier::new([100; 32])], &[PriceIdentifier::new([100; 32])],
RequestTime::Latest, RequestTime::Latest,
) )
@ -810,24 +864,22 @@ mod test {
// Add a dummy price feeds metadata // Add a dummy price feeds metadata
store_price_feeds_metadata( state
&state, .store_price_feeds_metadata(&[PriceFeedMetadata {
&[PriceFeedMetadata {
id: PriceIdentifier::new([100; 32]), id: PriceIdentifier::new([100; 32]),
attributes: Default::default(), attributes: Default::default(),
}], }])
)
.await .await
.unwrap(); .unwrap();
// Check the state is ready // Check the state is ready
assert!(is_ready(&state).await); assert!((&state).is_ready().await);
// Advance the clock to make the prices stale // Advance the clock to make the prices stale
MockClock::advance_system_time(READINESS_STALENESS_THRESHOLD); MockClock::advance_system_time(READINESS_STALENESS_THRESHOLD);
MockClock::advance(READINESS_STALENESS_THRESHOLD); MockClock::advance(READINESS_STALENESS_THRESHOLD);
// Check the state is not ready // Check the state is not ready
assert!(!is_ready(&state).await); assert!(!(&state).is_ready().await);
} }
/// Test that the state retains the latest slots upon cache eviction. /// Test that the state retains the latest slots upon cache eviction.
@ -870,8 +922,8 @@ mod test {
// Check the last 100 slots are retained // Check the last 100 slots are retained
for slot in 900..1000 { for slot in 900..1000 {
let price_feeds_with_update_data = get_price_feeds_with_update_data( let price_feeds_with_update_data = (&*state)
&*state, .get_price_feeds_with_update_data(
&[ &[
PriceIdentifier::new([100; 32]), PriceIdentifier::new([100; 32]),
PriceIdentifier::new([200; 32]), PriceIdentifier::new([200; 32]),
@ -887,8 +939,8 @@ mod test {
// Check nothing else is retained // Check nothing else is retained
for slot in 0..900 { for slot in 0..900 {
assert!(get_price_feeds_with_update_data( assert!((&*state)
&*state, .get_price_feeds_with_update_data(
&[ &[
PriceIdentifier::new([100; 32]), PriceIdentifier::new([100; 32]),
PriceIdentifier::new([200; 32]) PriceIdentifier::new([200; 32])

View File

@ -7,7 +7,7 @@ use {
crate::{ crate::{
network::wormhole::VaaBytes, network::wormhole::VaaBytes,
state::cache::{ state::cache::{
AggregateCache, Cache,
MessageState, MessageState,
}, },
}, },
@ -70,14 +70,14 @@ impl From<MessageState> for RawMessageWithMerkleProof {
} }
pub async fn store_wormhole_merkle_verified_message<S>( pub async fn store_wormhole_merkle_verified_message<S>(
store: &S, state: &S,
root: WormholeMerkleRoot, root: WormholeMerkleRoot,
vaa: VaaBytes, vaa: VaaBytes,
) -> Result<()> ) -> Result<()>
where where
S: AggregateCache, S: Cache,
{ {
store state
.store_wormhole_merkle_state(WormholeMerkleState { root, vaa }) .store_wormhole_merkle_state(WormholeMerkleState { root, vaa })
.await?; .await?;
Ok(()) Ok(())

View File

@ -1,19 +1,21 @@
//! This module communicates with Pyth Benchmarks, an API for historical price feeds and their updates. //! This module communicates with Pyth Benchmarks, an API for historical price feeds and their updates.
use { use {
crate::{ super::{
aggregate::{ aggregate::{
PriceFeedsWithUpdateData, PriceFeedsWithUpdateData,
UnixTimestamp, UnixTimestamp,
}, },
api::types::PriceUpdate, State,
}, },
crate::api::types::PriceUpdate,
anyhow::Result, anyhow::Result,
base64::{ base64::{
engine::general_purpose::STANDARD as base64_standard_engine, engine::general_purpose::STANDARD as base64_standard_engine,
Engine as _, Engine as _,
}, },
pyth_sdk::PriceIdentifier, pyth_sdk::PriceIdentifier,
reqwest::Url,
serde::Deserialize, serde::Deserialize,
}; };
@ -50,6 +52,23 @@ impl TryFrom<BinaryBlob> for Vec<Vec<u8>> {
} }
} }
pub struct BenchmarksState {
endpoint: Option<Url>,
}
impl BenchmarksState {
pub fn new(url: Option<Url>) -> Self {
Self { endpoint: url }
}
}
/// Allow downcasting State into BenchmarksState for functions that depend on the `Benchmarks` service.
impl<'a> From<&'a State> for &'a BenchmarksState {
fn from(state: &'a State) -> &'a BenchmarksState {
&state.benchmarks
}
}
#[async_trait::async_trait] #[async_trait::async_trait]
pub trait Benchmarks { pub trait Benchmarks {
async fn get_verified_price_feeds( async fn get_verified_price_feeds(
@ -60,21 +79,25 @@ pub trait Benchmarks {
} }
#[async_trait::async_trait] #[async_trait::async_trait]
impl Benchmarks for crate::state::State { impl<T> Benchmarks for T
where
for<'a> &'a T: Into<&'a BenchmarksState>,
T: Sync,
{
async fn get_verified_price_feeds( async fn get_verified_price_feeds(
&self, &self,
price_ids: &[PriceIdentifier], price_ids: &[PriceIdentifier],
publish_time: UnixTimestamp, publish_time: UnixTimestamp,
) -> Result<PriceFeedsWithUpdateData> { ) -> Result<PriceFeedsWithUpdateData> {
let endpoint = self let endpoint = self
.benchmarks_endpoint .into()
.endpoint
.as_ref() .as_ref()
.ok_or_else(|| anyhow::anyhow!("Benchmarks endpoint is not set"))? .ok_or_else(|| anyhow::anyhow!("Benchmarks endpoint is not set"))?
.join(&format!("/v1/updates/price/{}", publish_time)) .join(&format!("/v1/updates/price/{}", publish_time))
.unwrap(); .unwrap();
let client = reqwest::Client::new(); let mut request = reqwest::Client::new()
let mut request = client
.get(endpoint) .get(endpoint)
.timeout(BENCHMARKS_REQUEST_TIMEOUT) .timeout(BENCHMARKS_REQUEST_TIMEOUT)
.query(&[("encoding", "hex")]) .query(&[("encoding", "hex")])

View File

@ -1,5 +1,6 @@
use { use {
crate::aggregate::{ super::State,
crate::state::aggregate::{
wormhole_merkle::WormholeMerkleState, wormhole_merkle::WormholeMerkleState,
AccumulatorMessages, AccumulatorMessages,
ProofSet, ProofSet,
@ -96,23 +97,186 @@ pub enum MessageStateFilter {
Only(MessageType), Only(MessageType),
} }
pub struct Cache { /// A Cache of AccumulatorMessage by slot. We do not write to this cache much, so we can use a simple RwLock instead of a DashMap.
/// Accumulator messages cache type AccumulatorMessagesCache = Arc<RwLock<BTreeMap<Slot, AccumulatorMessages>>>;
///
/// We do not write to this cache much, so we can use a simple RwLock instead of a DashMap.
accumulator_messages_cache: Arc<RwLock<BTreeMap<Slot, AccumulatorMessages>>>,
/// Wormhole merkle state cache /// A Cache of WormholeMerkleState by slot. We do not write to this cache much, so we can use a simple RwLock instead of a DashMap.
/// type WormholeMerkleStateCache = Arc<RwLock<BTreeMap<Slot, WormholeMerkleState>>>;
/// We do not write to this cache much, so we can use a simple RwLock instead of a DashMap.
wormhole_merkle_state_cache: Arc<RwLock<BTreeMap<Slot, WormholeMerkleState>>>,
message_cache: Arc<RwLock<HashMap<MessageStateKey, BTreeMap<MessageStateTime, MessageState>>>>, /// A Cache of `Time<->MessageState` by feed id.
type MessageCache = Arc<RwLock<HashMap<MessageStateKey, BTreeMap<MessageStateTime, MessageState>>>>;
/// A collection of caches for various program state.
pub struct CacheState {
accumulator_messages_cache: AccumulatorMessagesCache,
wormhole_merkle_state_cache: WormholeMerkleStateCache,
message_cache: MessageCache,
cache_size: u64, cache_size: u64,
} }
impl CacheState {
pub fn new(size: u64) -> Self {
Self {
accumulator_messages_cache: Arc::new(RwLock::new(BTreeMap::new())),
wormhole_merkle_state_cache: Arc::new(RwLock::new(BTreeMap::new())),
message_cache: Arc::new(RwLock::new(HashMap::new())),
cache_size: size,
}
}
}
/// Allow downcasting State into CacheState for functions that depend on the `Cache` service.
impl<'a> From<&'a State> for &'a CacheState {
fn from(state: &'a State) -> &'a CacheState {
&state.cache
}
}
#[async_trait::async_trait]
pub trait Cache {
async fn store_message_states(&self, message_states: Vec<MessageState>) -> Result<()>;
async fn prune_removed_keys(&self, current_keys: HashSet<MessageStateKey>);
async fn store_accumulator_messages(
&self,
accumulator_messages: AccumulatorMessages,
) -> Result<()>;
async fn fetch_accumulator_messages(&self, slot: Slot) -> Result<Option<AccumulatorMessages>>;
async fn store_wormhole_merkle_state(
&self,
wormhole_merkle_state: WormholeMerkleState,
) -> Result<()>;
async fn fetch_wormhole_merkle_state(&self, slot: Slot) -> Result<Option<WormholeMerkleState>>;
async fn message_state_keys(&self) -> Vec<MessageStateKey>;
async fn fetch_message_states(
&self,
ids: Vec<FeedId>,
request_time: RequestTime,
filter: MessageStateFilter,
) -> Result<Vec<MessageState>>;
}
#[async_trait::async_trait]
impl<T> Cache for T
where
for<'a> &'a T: Into<&'a CacheState>,
T: Sync,
{
async fn message_state_keys(&self) -> Vec<MessageStateKey> {
self.into()
.message_cache
.read()
.await
.iter()
.map(|entry| entry.0.clone())
.collect::<Vec<_>>()
}
async fn store_message_states(&self, message_states: Vec<MessageState>) -> Result<()> {
let mut message_cache = self.into().message_cache.write().await;
for message_state in message_states {
let key = message_state.key();
let time = message_state.time();
let cache = message_cache.entry(key).or_insert_with(BTreeMap::new);
cache.insert(time, message_state);
// Remove the earliest message states if the cache size is exceeded
while cache.len() > self.into().cache_size as usize {
cache.pop_first();
}
}
Ok(())
}
/// This method takes the current feed ids and prunes the cache for the keys
/// that are not present in the current feed ids.
///
/// There is a side-effect of this: if a key gets removed, we will
/// lose the cache for that key and cannot retrieve it for historical
/// price queries.
async fn prune_removed_keys(&self, current_keys: HashSet<MessageStateKey>) {
let mut message_cache = self.into().message_cache.write().await;
// Sometimes, some keys are removed from the accumulator. We track which keys are not
// present in the message states and remove them from the cache.
let keys_in_cache = message_cache
.iter()
.map(|(key, _)| key.clone())
.collect::<HashSet<_>>();
for key in keys_in_cache {
if !current_keys.contains(&key) {
tracing::info!("Feed {:?} seems to be removed. Removing it from cache", key);
message_cache.remove(&key);
}
}
}
async fn fetch_message_states(
&self,
ids: Vec<FeedId>,
request_time: RequestTime,
filter: MessageStateFilter,
) -> Result<Vec<MessageState>> {
join_all(ids.into_iter().flat_map(|id| {
let request_time = request_time.clone();
let message_types: Vec<MessageType> = match filter {
MessageStateFilter::All => MessageType::iter().collect(),
MessageStateFilter::Only(t) => vec![t],
};
message_types.into_iter().map(move |message_type| {
let key = MessageStateKey {
feed_id: id,
type_: message_type,
};
retrieve_message_state(self.into(), key, request_time.clone())
})
}))
.await
.into_iter()
.collect::<Option<Vec<_>>>()
.ok_or(anyhow!("Message not found"))
}
async fn store_accumulator_messages(
&self,
accumulator_messages: AccumulatorMessages,
) -> Result<()> {
let mut cache = self.into().accumulator_messages_cache.write().await;
cache.insert(accumulator_messages.slot, accumulator_messages);
while cache.len() > self.into().cache_size as usize {
cache.pop_first();
}
Ok(())
}
async fn fetch_accumulator_messages(&self, slot: Slot) -> Result<Option<AccumulatorMessages>> {
let cache = self.into().accumulator_messages_cache.read().await;
Ok(cache.get(&slot).cloned())
}
async fn store_wormhole_merkle_state(
&self,
wormhole_merkle_state: WormholeMerkleState,
) -> Result<()> {
let mut cache = self.into().wormhole_merkle_state_cache.write().await;
cache.insert(wormhole_merkle_state.root.slot, wormhole_merkle_state);
while cache.len() > self.into().cache_size as usize {
cache.pop_first();
}
Ok(())
}
async fn fetch_wormhole_merkle_state(&self, slot: Slot) -> Result<Option<WormholeMerkleState>> {
let cache = self.into().wormhole_merkle_state_cache.read().await;
Ok(cache.get(&slot).cloned())
}
}
async fn retrieve_message_state( async fn retrieve_message_state(
cache: &Cache, cache: &CacheState,
key: MessageStateKey, key: MessageStateKey,
request_time: RequestTime, request_time: RequestTime,
) -> Option<MessageState> { ) -> Option<MessageState> {
@ -156,179 +320,19 @@ async fn retrieve_message_state(
} }
} }
impl Cache {
pub fn new(cache_size: u64) -> Self {
Self {
message_cache: Arc::new(RwLock::new(HashMap::new())),
accumulator_messages_cache: Arc::new(RwLock::new(BTreeMap::new())),
wormhole_merkle_state_cache: Arc::new(RwLock::new(BTreeMap::new())),
cache_size,
}
}
}
#[async_trait::async_trait]
pub trait AggregateCache {
async fn message_state_keys(&self) -> Vec<MessageStateKey>;
async fn store_message_states(&self, message_states: Vec<MessageState>) -> Result<()>;
async fn prune_removed_keys(&self, current_keys: HashSet<MessageStateKey>);
async fn fetch_message_states(
&self,
ids: Vec<FeedId>,
request_time: RequestTime,
filter: MessageStateFilter,
) -> Result<Vec<MessageState>>;
async fn store_accumulator_messages(
&self,
accumulator_messages: AccumulatorMessages,
) -> Result<()>;
async fn fetch_accumulator_messages(&self, slot: Slot) -> Result<Option<AccumulatorMessages>>;
async fn store_wormhole_merkle_state(
&self,
wormhole_merkle_state: WormholeMerkleState,
) -> Result<()>;
async fn fetch_wormhole_merkle_state(&self, slot: Slot) -> Result<Option<WormholeMerkleState>>;
}
#[async_trait::async_trait]
impl AggregateCache for crate::state::State {
async fn message_state_keys(&self) -> Vec<MessageStateKey> {
self.cache
.message_cache
.read()
.await
.iter()
.map(|entry| entry.0.clone())
.collect::<Vec<_>>()
}
async fn store_message_states(&self, message_states: Vec<MessageState>) -> Result<()> {
let mut message_cache = self.cache.message_cache.write().await;
for message_state in message_states {
let key = message_state.key();
let time = message_state.time();
let cache = message_cache.entry(key).or_insert_with(BTreeMap::new);
cache.insert(time, message_state);
// Remove the earliest message states if the cache size is exceeded
while cache.len() > self.cache.cache_size as usize {
cache.pop_first();
}
}
Ok(())
}
/// This method takes the current feed ids and prunes the cache for the keys
/// that are not present in the current feed ids.
///
/// There is a side-effect of this: if a key gets removed, we will
/// lose the cache for that key and cannot retrieve it for historical
/// price queries.
async fn prune_removed_keys(&self, current_keys: HashSet<MessageStateKey>) {
let mut message_cache = self.cache.message_cache.write().await;
// Sometimes, some keys are removed from the accumulator. We track which keys are not
// present in the message states and remove them from the cache.
let keys_in_cache = message_cache
.iter()
.map(|(key, _)| key.clone())
.collect::<HashSet<_>>();
for key in keys_in_cache {
if !current_keys.contains(&key) {
tracing::info!("Feed {:?} seems to be removed. Removing it from cache", key);
message_cache.remove(&key);
}
}
}
async fn fetch_message_states(
&self,
ids: Vec<FeedId>,
request_time: RequestTime,
filter: MessageStateFilter,
) -> Result<Vec<MessageState>> {
join_all(ids.into_iter().flat_map(|id| {
let request_time = request_time.clone();
let message_types: Vec<MessageType> = match filter {
MessageStateFilter::All => MessageType::iter().collect(),
MessageStateFilter::Only(t) => vec![t],
};
message_types.into_iter().map(move |message_type| {
let key = MessageStateKey {
feed_id: id,
type_: message_type,
};
retrieve_message_state(&self.cache, key, request_time.clone())
})
}))
.await
.into_iter()
.collect::<Option<Vec<_>>>()
.ok_or(anyhow!("Message not found"))
}
async fn store_accumulator_messages(
&self,
accumulator_messages: AccumulatorMessages,
) -> Result<()> {
let mut cache = self.cache.accumulator_messages_cache.write().await;
cache.insert(accumulator_messages.slot, accumulator_messages);
while cache.len() > self.cache.cache_size as usize {
cache.pop_first();
}
Ok(())
}
async fn fetch_accumulator_messages(&self, slot: Slot) -> Result<Option<AccumulatorMessages>> {
let cache = self.cache.accumulator_messages_cache.read().await;
Ok(cache.get(&slot).cloned())
}
async fn store_wormhole_merkle_state(
&self,
wormhole_merkle_state: WormholeMerkleState,
) -> Result<()> {
let mut cache = self.cache.wormhole_merkle_state_cache.write().await;
cache.insert(wormhole_merkle_state.root.slot, wormhole_merkle_state);
while cache.len() > self.cache.cache_size as usize {
cache.pop_first();
}
Ok(())
}
async fn fetch_wormhole_merkle_state(&self, slot: Slot) -> Result<Option<WormholeMerkleState>> {
let cache = self.cache.wormhole_merkle_state_cache.read().await;
Ok(cache.get(&slot).cloned())
}
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use { use {
super::*, super::*,
crate::{ crate::state::{
aggregate::{ aggregate::wormhole_merkle::WormholeMerkleMessageProof,
wormhole_merkle::{ test::setup_state,
WormholeMerkleMessageProof,
WormholeMerkleState,
},
AccumulatorMessages,
ProofSet,
},
state::test::setup_state,
}, },
pyth_sdk::UnixTimestamp, pyth_sdk::UnixTimestamp,
pythnet_sdk::{ pythnet_sdk::{
accumulators::merkle::MerklePath, accumulators::merkle::MerklePath,
hashers::keccak256_160::Keccak160, hashers::keccak256_160::Keccak160,
messages::{ messages::PriceFeedMessage,
Message,
PriceFeedMessage,
},
wire::v1::WormholeMerkleRoot, wire::v1::WormholeMerkleRoot,
}, },
}; };
@ -369,7 +373,7 @@ mod test {
slot: Slot, slot: Slot,
) -> MessageState ) -> MessageState
where where
S: AggregateCache, S: Cache,
{ {
let message_state = create_dummy_price_feed_message_state(feed_id, publish_time, slot); let message_state = create_dummy_price_feed_message_state(feed_id, publish_time, slot);
state state

Some files were not shown because too many files have changed in this diff Show More