Compare commits
159 Commits
pyth-evm-e
...
main
Author | SHA1 | Date |
---|---|---|
|
cf7987f4c5 | |
|
6e0bd0569b | |
|
1e1be9dbeb | |
|
d105a7aa86 | |
|
dd9b07b5e4 | |
|
e26c9d1a30 | |
|
9dddd3d1e7 | |
|
77c68c5069 | |
|
bf2c8b5d43 | |
|
3f07c27243 | |
|
42b64ac09f | |
|
55cbe62997 | |
|
94b36c4961 | |
|
ff6b11023c | |
|
4966b956df | |
|
10dc4a05b8 | |
|
586a4398bd | |
|
020ecdf5da | |
|
308599714f | |
|
587a6fa524 | |
|
a592c6bc33 | |
|
31483a9fc7 | |
|
3d9781ed58 | |
|
344f8a9e47 | |
|
b2cb7c878a | |
|
4e630edac0 | |
|
20d99bceb7 | |
|
d2ce2ecd33 | |
|
2095da34e9 | |
|
a8dbabc7f9 | |
|
cae194eb62 | |
|
8d32b4c2fc | |
|
a203808a44 | |
|
24a08a06c5 | |
|
f212907a8b | |
|
ef922220ee | |
|
6da2e1ba53 | |
|
050a3412f9 | |
|
cf90bff236 | |
|
37ee3b46bd | |
|
b47ee059d7 | |
|
c2da454637 | |
|
567b4a6597 | |
|
2014d1e205 | |
|
93a71f2eef | |
|
9437d51843 | |
|
d31cefb446 | |
|
48a5faf4d9 | |
|
b110bbca5c | |
|
d05df508a8 | |
|
d51e5712f4 | |
|
1a3e3a7c00 | |
|
4b8b9bfd87 | |
|
c7883c822b | |
|
b30604c5ba | |
|
d50488ef5c | |
|
64037e5b4a | |
|
4445c73443 | |
|
5b494689d2 | |
|
e46821d423 | |
|
644b54676c | |
|
f9292177e9 | |
|
1b13bf651a | |
|
e8c198065e | |
|
a1e4fc0924 | |
|
67132c0572 | |
|
c7c3527bfe | |
|
8b76d8c19a | |
|
bdc2e967b0 | |
|
e04edcfece | |
|
ffbe02b4f6 | |
|
26bbe4a0ef | |
|
8b66d0f814 | |
|
0a219fbead | |
|
30c741ed49 | |
|
5fac32fa40 | |
|
6e62328528 | |
|
2d9c6d3028 | |
|
508de75839 | |
|
7bbcfa80d4 | |
|
0d6c35fce8 | |
|
c58b675a63 | |
|
899a995e2e | |
|
5a676978db | |
|
3f6a14897d | |
|
481a428e88 | |
|
3f58a2a8b3 | |
|
76205745c8 | |
|
93efd61ea4 | |
|
8be6a9ad1c | |
|
76ec4e3322 | |
|
56cbace282 | |
|
ba435bac76 | |
|
73798b9bdd | |
|
02ad78bcf1 | |
|
8d92ad9931 | |
|
c12a58e0e4 | |
|
ee1d61ac71 | |
|
d9c85d8f9d | |
|
70c2c8ec4b | |
|
933e61dcb8 | |
|
45065e2851 | |
|
0789d615d4 | |
|
a7bb9160c4 | |
|
392a3df7eb | |
|
a60733559c | |
|
8fba519ce3 | |
|
bdc40fec3f | |
|
729b18e596 | |
|
1135f00da2 | |
|
7673097c37 | |
|
443f1455c4 | |
|
c727195e9c | |
|
0aeae8ca40 | |
|
ce36d80ae4 | |
|
34d94e3177 | |
|
3c5a913a80 | |
|
e1f9783062 | |
|
ce4019b63f | |
|
d1c5d93c8e | |
|
ee455f1196 | |
|
2c7dfa92dd | |
|
b4ed825cd6 | |
|
110c6dcea3 | |
|
d627a49764 | |
|
299dec1d79 | |
|
68a2ce1221 | |
|
62d189e3b5 | |
|
bb830e1760 | |
|
972a9a1e1d | |
|
0e885e3ca7 | |
|
a632ee4bd2 | |
|
44cad44f44 | |
|
8110e03ccb | |
|
2398afefa7 | |
|
80b4dd96de | |
|
ecf347909f | |
|
5afb187f0d | |
|
6295674efa | |
|
050b8275f7 | |
|
450a483679 | |
|
c0c03945d0 | |
|
c2fde0f6dc | |
|
866b6a5b4b | |
|
a888ba318c | |
|
77db9ee53b | |
|
9328b73284 | |
|
cd543bcd6a | |
|
f134c2d31c | |
|
7352256c63 | |
|
d23f6c0d11 | |
|
f4617b484a | |
|
8843d0f875 | |
|
06965d38cc | |
|
a2db288210 | |
|
8a70ca769b | |
|
01f878cf5a | |
|
0f7a9cc334 | |
|
6fb5ab483d |
|
@ -15,5 +15,4 @@
|
||||||
|
|
||||||
.git
|
.git
|
||||||
|
|
||||||
hermes/wormhole
|
!apps/hermes/src/state/cache.rs
|
||||||
!hermes/src/state/cache.rs
|
|
||||||
|
|
|
@ -21,10 +21,10 @@ jobs:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Download CLI
|
- name: Download CLI
|
||||||
run: wget https://github.com/aptos-labs/aptos-core/releases/download/aptos-cli-v1.0.4/aptos-cli-1.0.4-Ubuntu-22.04-x86_64.zip
|
run: wget https://github.com/aptos-labs/aptos-core/releases/download/aptos-cli-v3.1.0/aptos-cli-3.1.0-Ubuntu-22.04-x86_64.zip
|
||||||
|
|
||||||
- name: Unzip CLI
|
- name: Unzip CLI
|
||||||
run: unzip aptos-cli-1.0.4-Ubuntu-22.04-x86_64.zip
|
run: unzip aptos-cli-3.1.0-Ubuntu-22.04-x86_64.zip
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: ./aptos move test
|
run: ./aptos move test
|
||||||
|
|
|
@ -2,10 +2,10 @@ name: Check Fortuna
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
paths: [fortuna/**]
|
paths: [apps/fortuna/**]
|
||||||
push:
|
push:
|
||||||
branches: [main]
|
branches: [main]
|
||||||
paths: [fortuna/**]
|
paths: [apps/fortuna/**]
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -17,4 +17,4 @@ jobs:
|
||||||
toolchain: nightly-2023-07-23
|
toolchain: nightly-2023-07-23
|
||||||
override: true
|
override: true
|
||||||
- name: Run executor tests
|
- name: Run executor tests
|
||||||
run: cargo test --manifest-path ./fortuna/Cargo.toml
|
run: cargo test --manifest-path ./apps/fortuna/Cargo.toml
|
||||||
|
|
|
@ -0,0 +1,35 @@
|
||||||
|
name: Test Fuel Contract
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- target_chains/fuel/**
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
paths:
|
||||||
|
- target_chains/fuel/**
|
||||||
|
|
||||||
|
env:
|
||||||
|
CARGO_TERM_COLOR: always
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
working-directory: target_chains/fuel/contracts/
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: Install Fuel toolchain
|
||||||
|
run: |
|
||||||
|
curl https://install.fuel.network | sh
|
||||||
|
echo "$HOME/.fuelup/bin" >> $GITHUB_PATH
|
||||||
|
- name: Build with Forc
|
||||||
|
run: forc build --verbose
|
||||||
|
- name: Run tests with Forc
|
||||||
|
run: forc test --verbose
|
||||||
|
- name: Build
|
||||||
|
run: cargo build --verbose
|
||||||
|
- name: Run tests
|
||||||
|
run: cargo test --verbose
|
|
@ -2,10 +2,10 @@ name: Check Hermes
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
paths: [hermes/**]
|
paths: [apps/hermes/**]
|
||||||
push:
|
push:
|
||||||
branches: [main]
|
branches: [main]
|
||||||
paths: [hermes/**]
|
paths: [apps/hermes/**]
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -20,4 +20,4 @@ jobs:
|
||||||
- name: Install protoc
|
- name: Install protoc
|
||||||
uses: arduino/setup-protoc@v3
|
uses: arduino/setup-protoc@v3
|
||||||
- name: Run executor tests
|
- name: Run executor tests
|
||||||
run: cargo test --manifest-path ./hermes/Cargo.toml
|
run: cargo test --manifest-path ./apps/hermes/Cargo.toml
|
||||||
|
|
|
@ -0,0 +1,37 @@
|
||||||
|
name: Starknet contract
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- target_chains/starknet/contracts/**
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
paths:
|
||||||
|
- target_chains/starknet/contracts/**
|
||||||
|
jobs:
|
||||||
|
check:
|
||||||
|
name: Starknet Foundry tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
working-directory: target_chains/starknet/contracts/
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- name: Install Scarb
|
||||||
|
uses: software-mansion/setup-scarb@v1
|
||||||
|
with:
|
||||||
|
tool-versions: target_chains/starknet/contracts/.tool-versions
|
||||||
|
- name: Install Starknet Foundry
|
||||||
|
uses: foundry-rs/setup-snfoundry@v3
|
||||||
|
with:
|
||||||
|
tool-versions: target_chains/starknet/contracts/.tool-versions
|
||||||
|
- name: Install Starkli
|
||||||
|
run: curl https://get.starkli.sh | sh && . ~/.config/.starkli/env && starkliup -v $(awk '/starkli/{print $2}' .tool-versions)
|
||||||
|
- name: Install Katana
|
||||||
|
run: curl -L https://install.dojoengine.org | bash && PATH="$PATH:$HOME/.config/.dojo/bin" dojoup -v $(awk '/dojo/{print $2}' .tool-versions)
|
||||||
|
- name: Check formatting
|
||||||
|
run: scarb fmt --check
|
||||||
|
- name: Run tests
|
||||||
|
run: snforge test
|
||||||
|
- name: Test local deployment script
|
||||||
|
run: bash -c 'PATH="$PATH:$HOME/.config/.dojo/bin" katana & . ~/.config/.starkli/env && deploy/local_deploy'
|
|
@ -12,7 +12,7 @@ jobs:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
- uses: actions/setup-node@v2
|
- uses: actions/setup-node@v2
|
||||||
with:
|
with:
|
||||||
node-version: "16"
|
node-version: "18"
|
||||||
registry-url: "https://registry.npmjs.org"
|
registry-url: "https://registry.npmjs.org"
|
||||||
- run: npm ci
|
- run: npm ci
|
||||||
- run: npx lerna run build --no-private
|
- run: npx lerna run build --no-private
|
||||||
|
|
|
@ -11,8 +11,14 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout sources
|
- name: Checkout sources
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
|
- name: Install Rust
|
||||||
|
uses: actions-rs/toolchain@v1
|
||||||
|
with:
|
||||||
|
toolchain: stable
|
||||||
|
default: true
|
||||||
|
profile: minimal
|
||||||
|
|
||||||
- run: cargo publish --token ${CARGO_REGISTRY_TOKEN}
|
- run: cargo +stable-x86_64-unknown-linux-gnu publish --token ${CARGO_REGISTRY_TOKEN}
|
||||||
env:
|
env:
|
||||||
CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
|
CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
|
||||||
working-directory: "target_chains/solana/pyth_solana_receiver_sdk"
|
working-directory: "target_chains/solana/pyth_solana_receiver_sdk"
|
||||||
|
|
|
@ -46,7 +46,7 @@ jobs:
|
||||||
uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
|
uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: "./fortuna/Dockerfile"
|
file: "./apps/fortuna/Dockerfile"
|
||||||
push: true
|
push: true
|
||||||
tags: ${{ steps.metadata_fortuna.outputs.tags }}
|
tags: ${{ steps.metadata_fortuna.outputs.tags }}
|
||||||
labels: ${{ steps.metadata_fortuna.outputs.labels }}
|
labels: ${{ steps.metadata_fortuna.outputs.labels }}
|
||||||
|
|
|
@ -37,7 +37,7 @@ jobs:
|
||||||
env:
|
env:
|
||||||
AWS_REGION: us-east-1
|
AWS_REGION: us-east-1
|
||||||
- run: |
|
- run: |
|
||||||
DOCKER_BUILDKIT=1 docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -f hermes/Dockerfile .
|
DOCKER_BUILDKIT=1 docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -f apps/hermes/Dockerfile .
|
||||||
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
|
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
|
||||||
env:
|
env:
|
||||||
ECR_REGISTRY: public.ecr.aws
|
ECR_REGISTRY: public.ecr.aws
|
||||||
|
|
|
@ -40,7 +40,7 @@ jobs:
|
||||||
id: ecr_login
|
id: ecr_login
|
||||||
- run: |
|
- run: |
|
||||||
DOCKER_BUILDKIT=1 docker build -t lerna -f Dockerfile.lerna .
|
DOCKER_BUILDKIT=1 docker build -t lerna -f Dockerfile.lerna .
|
||||||
DOCKER_BUILDKIT=1 docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -f price_pusher/Dockerfile .
|
DOCKER_BUILDKIT=1 docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -f apps/price_pusher/Dockerfile .
|
||||||
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
|
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
|
||||||
env:
|
env:
|
||||||
ECR_REGISTRY: public.ecr.aws
|
ECR_REGISTRY: public.ecr.aws
|
||||||
|
|
|
@ -6,8 +6,12 @@ on:
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
id-token: write
|
id-token: write
|
||||||
|
packages: write
|
||||||
|
env:
|
||||||
|
REGISTRY: ghcr.io
|
||||||
|
IMAGE_NAME: pyth-network/xc-admin-frontend
|
||||||
jobs:
|
jobs:
|
||||||
xc-admin-image:
|
xc-admin-frontend-image:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
|
@ -16,23 +20,17 @@ jobs:
|
||||||
SHORT_HASH=$(echo ${{ github.sha }} | cut -c1-7)
|
SHORT_HASH=$(echo ${{ github.sha }} | cut -c1-7)
|
||||||
TIMESTAMP=$(date +%s)
|
TIMESTAMP=$(date +%s)
|
||||||
echo "IMAGE_TAG=${TIMESTAMP}-${SHORT_HASH}" >> "${GITHUB_ENV}"
|
echo "IMAGE_TAG=${TIMESTAMP}-${SHORT_HASH}" >> "${GITHUB_ENV}"
|
||||||
- uses: aws-actions/configure-aws-credentials@8a84b07f2009032ade05a88a28750d733cc30db1
|
- name: Log in to the Container registry
|
||||||
|
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
|
||||||
with:
|
with:
|
||||||
role-to-assume: arn:aws:iam::192824654885:role/github-actions-ecr
|
registry: ${{ env.REGISTRY }}
|
||||||
aws-region: eu-west-2
|
username: ${{ github.actor }}
|
||||||
- uses: aws-actions/amazon-ecr-login@v1
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
id: ecr_login
|
|
||||||
- name: Build docker image
|
- name: Build docker image
|
||||||
run: |
|
run: |
|
||||||
DOCKER_BUILDKIT=1 docker build -t lerna -f Dockerfile.lerna .
|
DOCKER_BUILDKIT=1 docker build -t lerna -f Dockerfile.lerna .
|
||||||
DOCKER_BUILDKIT=1 docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -f governance/xc_admin/packages/xc_admin_frontend/Dockerfile .
|
DOCKER_BUILDKIT=1 docker build -t ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.IMAGE_TAG }} -f governance/xc_admin/packages/xc_admin_frontend/Dockerfile .
|
||||||
env:
|
|
||||||
ECR_REGISTRY: ${{ steps.ecr_login.outputs.registry }}
|
|
||||||
ECR_REPOSITORY: xc-admin-frontend
|
|
||||||
- name: Push docker image
|
- name: Push docker image
|
||||||
if: github.ref == 'refs/heads/main'
|
if: github.ref == 'refs/heads/main'
|
||||||
run: |
|
run: |
|
||||||
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
|
docker push ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.IMAGE_TAG }}
|
||||||
env:
|
|
||||||
ECR_REGISTRY: ${{ steps.ecr_login.outputs.registry }}
|
|
||||||
ECR_REPOSITORY: xc-admin-frontend
|
|
||||||
|
|
|
@ -6,6 +6,10 @@ on:
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
id-token: write
|
id-token: write
|
||||||
|
packages: write
|
||||||
|
env:
|
||||||
|
REGISTRY: ghcr.io
|
||||||
|
IMAGE_NAME: pyth-network/xc-admin
|
||||||
jobs:
|
jobs:
|
||||||
xc-admin-image:
|
xc-admin-image:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -17,16 +21,16 @@ jobs:
|
||||||
PREFIX="refs/tags/xc-admin-"
|
PREFIX="refs/tags/xc-admin-"
|
||||||
VERSION="${GITHUB_REF:${#PREFIX}}"
|
VERSION="${GITHUB_REF:${#PREFIX}}"
|
||||||
echo "IMAGE_TAG=${VERSION}" >> "${GITHUB_ENV}"
|
echo "IMAGE_TAG=${VERSION}" >> "${GITHUB_ENV}"
|
||||||
- uses: aws-actions/configure-aws-credentials@8a84b07f2009032ade05a88a28750d733cc30db1
|
- name: Log in to the Container registry
|
||||||
|
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
|
||||||
with:
|
with:
|
||||||
role-to-assume: arn:aws:iam::192824654885:role/github-actions-ecr
|
registry: ${{ env.REGISTRY }}
|
||||||
aws-region: eu-west-2
|
username: ${{ github.actor }}
|
||||||
- uses: aws-actions/amazon-ecr-login@v1
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
id: ecr_login
|
- name: Build docker image
|
||||||
- run: |
|
run: |
|
||||||
DOCKER_BUILDKIT=1 docker build -t lerna -f Dockerfile.lerna .
|
DOCKER_BUILDKIT=1 docker build -t lerna -f Dockerfile.lerna .
|
||||||
DOCKER_BUILDKIT=1 docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -f governance/xc_admin/Dockerfile .
|
DOCKER_BUILDKIT=1 docker build -t ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.IMAGE_TAG }} -f governance/xc_admin/Dockerfile .
|
||||||
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
|
- name: Push docker image
|
||||||
env:
|
run: |
|
||||||
ECR_REGISTRY: ${{ steps.ecr_login.outputs.registry }}
|
docker push ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.IMAGE_TAG }}
|
||||||
ECR_REPOSITORY: xc-admin
|
|
||||||
|
|
|
@ -5,12 +5,17 @@ on:
|
||||||
tags:
|
tags:
|
||||||
- "python-v*"
|
- "python-v*"
|
||||||
|
|
||||||
|
env:
|
||||||
|
PYTHON_VERSION: "3.11"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
deploy:
|
deploy:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
- uses: actions/setup-python@v2
|
- uses: actions/setup-python@v2
|
||||||
|
with:
|
||||||
|
python-version: ${{ env.PYTHON_VERSION }}
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python3 -m pip install --upgrade poetry
|
python3 -m pip install --upgrade poetry
|
||||||
|
|
|
@ -47,22 +47,22 @@ repos:
|
||||||
- id: cargo-fmt-hermes
|
- id: cargo-fmt-hermes
|
||||||
name: Cargo format for Hermes
|
name: Cargo format for Hermes
|
||||||
language: "rust"
|
language: "rust"
|
||||||
entry: cargo +nightly-2024-03-26 fmt --manifest-path ./hermes/Cargo.toml --all -- --config-path rustfmt.toml
|
entry: cargo +nightly-2024-03-26 fmt --manifest-path ./apps/hermes/Cargo.toml --all -- --config-path rustfmt.toml
|
||||||
pass_filenames: false
|
pass_filenames: false
|
||||||
files: hermes
|
files: apps/hermes
|
||||||
- id: cargo-clippy-hermes
|
- id: cargo-clippy-hermes
|
||||||
name: Cargo clippy for Hermes
|
name: Cargo clippy for Hermes
|
||||||
language: "rust"
|
language: "rust"
|
||||||
entry: cargo +nightly-2024-03-26 clippy --manifest-path ./hermes/Cargo.toml --tests --fix --allow-dirty --allow-staged -- -D warnings
|
entry: cargo +nightly-2024-03-26 clippy --manifest-path ./apps/hermes/Cargo.toml --tests --fix --allow-dirty --allow-staged -- -D warnings
|
||||||
pass_filenames: false
|
pass_filenames: false
|
||||||
files: hermes
|
files: apps/hermes
|
||||||
# Hooks for Fortuna
|
# Hooks for Fortuna
|
||||||
- id: cargo-fmt-fortuna
|
- id: cargo-fmt-fortuna
|
||||||
name: Cargo format for Fortuna
|
name: Cargo format for Fortuna
|
||||||
language: "rust"
|
language: "rust"
|
||||||
entry: cargo +nightly-2023-07-23 fmt --manifest-path ./fortuna/Cargo.toml --all -- --config-path rustfmt.toml
|
entry: cargo +nightly-2023-07-23 fmt --manifest-path ./apps/fortuna/Cargo.toml --all -- --config-path rustfmt.toml
|
||||||
pass_filenames: false
|
pass_filenames: false
|
||||||
files: fortuna
|
files: apps/fortuna
|
||||||
# Hooks for message buffer contract
|
# Hooks for message buffer contract
|
||||||
- id: cargo-fmt-message-buffer
|
- id: cargo-fmt-message-buffer
|
||||||
name: Cargo format for message buffer contract
|
name: Cargo format for message buffer contract
|
||||||
|
@ -80,13 +80,13 @@ repos:
|
||||||
- id: cargo-fmt-pythnet-sdk
|
- id: cargo-fmt-pythnet-sdk
|
||||||
name: Cargo format for pythnet SDK
|
name: Cargo format for pythnet SDK
|
||||||
language: "rust"
|
language: "rust"
|
||||||
entry: cargo +nightly-2023-07-23 fmt --manifest-path ./pythnet/pythnet_sdk/Cargo.toml --all -- --config-path rustfmt.toml
|
entry: cargo +nightly-2024-03-26 fmt --manifest-path ./pythnet/pythnet_sdk/Cargo.toml --all -- --config-path rustfmt.toml
|
||||||
pass_filenames: false
|
pass_filenames: false
|
||||||
files: pythnet/pythnet_sdk
|
files: pythnet/pythnet_sdk
|
||||||
- id: cargo-clippy-pythnet-sdk
|
- id: cargo-clippy-pythnet-sdk
|
||||||
name: Cargo clippy for pythnet SDK
|
name: Cargo clippy for pythnet SDK
|
||||||
language: "rust"
|
language: "rust"
|
||||||
entry: cargo +nightly-2023-07-23 clippy --manifest-path ./pythnet/pythnet_sdk/Cargo.toml --tests --fix --allow-dirty --allow-staged -- -D warnings
|
entry: cargo +nightly-2024-03-26 clippy --manifest-path ./pythnet/pythnet_sdk/Cargo.toml --tests --fix --allow-dirty --allow-staged -- -D warnings
|
||||||
pass_filenames: false
|
pass_filenames: false
|
||||||
files: pythnet/pythnet_sdk
|
files: pythnet/pythnet_sdk
|
||||||
# Hooks for solana receiver contract
|
# Hooks for solana receiver contract
|
||||||
|
|
|
@ -16,7 +16,7 @@ contracts, SDKs, and examples.
|
||||||
|
|
||||||
## Hermes
|
## Hermes
|
||||||
|
|
||||||
> [hermes](./hermes/)
|
> [hermes](./apps/hermes/)
|
||||||
|
|
||||||
Hermes is an off-chain service which constantly observes Pythnet and the
|
Hermes is an off-chain service which constantly observes Pythnet and the
|
||||||
Wormhole network watching for price updates emitted from the Pyth contract. It
|
Wormhole network watching for price updates emitted from the Pyth contract. It
|
||||||
|
@ -79,10 +79,11 @@ Lerna has some common failure modes that you may encounter:
|
||||||
1. `npm ci` fails with a typescript compilation error about a missing package.
|
1. `npm ci` fails with a typescript compilation error about a missing package.
|
||||||
This error likely means that the failing package has a `prepare` entry compiling the typescript in its `package.json`.
|
This error likely means that the failing package has a `prepare` entry compiling the typescript in its `package.json`.
|
||||||
Fix this error by moving that logic to the `prepublishOnly` entry.
|
Fix this error by moving that logic to the `prepublishOnly` entry.
|
||||||
1. The software builds locally but fails in CI, or vice-versa.
|
2. The software builds locally but fails in CI, or vice-versa.
|
||||||
This error likely means that some local build caches need to be cleaned.
|
This error likely means that some local build caches need to be cleaned.
|
||||||
The build error may not indicate that this is a caching issue, e.g., it may appear that the packages are being built in the wrong order.
|
The build error may not indicate that this is a caching issue, e.g., it may appear that the packages are being built in the wrong order.
|
||||||
Delete `node_modules/`, `lib/` and `tsconfig.tsbuildinfo` from each package's subdirectory. then try again.
|
Delete `node_modules/`, `lib/` and `tsconfig.tsbuildinfo` from each package's subdirectory. then try again.
|
||||||
|
3. `npm ci` fails due to wrong node version. Make sure to be using `v18`. Node version `v21` is not supported and known to cause issues.
|
||||||
|
|
||||||
## Audit / Feature Status
|
## Audit / Feature Status
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/target
|
/target
|
||||||
config.yaml
|
*config.yaml
|
||||||
*secret*
|
*secret*
|
||||||
*private-key*
|
*private-key*
|
|
@ -522,9 +522,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cargo_metadata"
|
name = "cargo_metadata"
|
||||||
version = "0.17.0"
|
version = "0.18.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "e7daec1a2a2129eeba1644b220b4647ec537b0b5d4bfd6876fcc5a540056b592"
|
checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"camino",
|
"camino",
|
||||||
"cargo-platform",
|
"cargo-platform",
|
||||||
|
@ -1031,9 +1031,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "enr"
|
name = "enr"
|
||||||
version = "0.9.1"
|
version = "0.10.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "fe81b5c06ecfdbc71dd845216f225f53b62a10cb8a16c946836a3467f701d05b"
|
checksum = "2a3d8dc56e02f954cac8eb489772c552c473346fc34f67412bb6244fd647f7e4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"base64 0.21.4",
|
"base64 0.21.4",
|
||||||
"bytes",
|
"bytes",
|
||||||
|
@ -1146,9 +1146,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ethers"
|
name = "ethers"
|
||||||
version = "2.0.10"
|
version = "2.0.14"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "1ad13497f6e0a24292fc7b408e30d22fe9dc262da1f40d7b542c3a44e7fc0476"
|
checksum = "816841ea989f0c69e459af1cf23a6b0033b19a55424a1ea3a30099becdb8dec0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ethers-addressbook",
|
"ethers-addressbook",
|
||||||
"ethers-contract",
|
"ethers-contract",
|
||||||
|
@ -1162,9 +1162,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ethers-addressbook"
|
name = "ethers-addressbook"
|
||||||
version = "2.0.10"
|
version = "2.0.14"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "c6e9e8acd0ed348403cc73a670c24daba3226c40b98dc1a41903766b3ab6240a"
|
checksum = "5495afd16b4faa556c3bba1f21b98b4983e53c1755022377051a975c3b021759"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ethers-core",
|
"ethers-core",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
|
@ -1174,9 +1174,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ethers-contract"
|
name = "ethers-contract"
|
||||||
version = "2.0.10"
|
version = "2.0.14"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "d79269278125006bb0552349c03593ffa9702112ca88bc7046cc669f148fb47c"
|
checksum = "6fceafa3578c836eeb874af87abacfb041f92b4da0a78a5edd042564b8ecdaaa"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"const-hex",
|
"const-hex",
|
||||||
"ethers-contract-abigen",
|
"ethers-contract-abigen",
|
||||||
|
@ -1193,9 +1193,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ethers-contract-abigen"
|
name = "ethers-contract-abigen"
|
||||||
version = "2.0.10"
|
version = "2.0.14"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "ce95a43c939b2e4e2f3191c5ad4a1f279780b8a39139c9905b43a7433531e2ab"
|
checksum = "04ba01fbc2331a38c429eb95d4a570166781f14290ef9fdb144278a90b5a739b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"Inflector",
|
"Inflector",
|
||||||
"const-hex",
|
"const-hex",
|
||||||
|
@ -1211,15 +1211,15 @@ dependencies = [
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"syn 2.0.38",
|
"syn 2.0.38",
|
||||||
"toml 0.7.8",
|
"toml 0.8.12",
|
||||||
"walkdir",
|
"walkdir",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ethers-contract-derive"
|
name = "ethers-contract-derive"
|
||||||
version = "2.0.10"
|
version = "2.0.14"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "8e9ce44906fc871b3ee8c69a695ca7ec7f70e50cb379c9b9cb5e532269e492f6"
|
checksum = "87689dcabc0051cde10caaade298f9e9093d65f6125c14575db3fd8c669a168f"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"Inflector",
|
"Inflector",
|
||||||
"const-hex",
|
"const-hex",
|
||||||
|
@ -1233,9 +1233,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ethers-core"
|
name = "ethers-core"
|
||||||
version = "2.0.10"
|
version = "2.0.14"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "c0a17f0708692024db9956b31d7a20163607d2745953f5ae8125ab368ba280ad"
|
checksum = "82d80cc6ad30b14a48ab786523af33b37f28a8623fc06afd55324816ef18fb1f"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrayvec",
|
"arrayvec",
|
||||||
"bytes",
|
"bytes",
|
||||||
|
@ -1253,7 +1253,7 @@ dependencies = [
|
||||||
"rlp",
|
"rlp",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"strum 0.25.0",
|
"strum 0.26.2",
|
||||||
"syn 2.0.38",
|
"syn 2.0.38",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
"thiserror",
|
"thiserror",
|
||||||
|
@ -1263,10 +1263,11 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ethers-etherscan"
|
name = "ethers-etherscan"
|
||||||
version = "2.0.10"
|
version = "2.0.14"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0e53451ea4a8128fbce33966da71132cf9e1040dcfd2a2084fd7733ada7b2045"
|
checksum = "e79e5973c26d4baf0ce55520bd732314328cabe53193286671b47144145b9649"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"chrono",
|
||||||
"ethers-core",
|
"ethers-core",
|
||||||
"reqwest",
|
"reqwest",
|
||||||
"semver",
|
"semver",
|
||||||
|
@ -1278,9 +1279,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ethers-middleware"
|
name = "ethers-middleware"
|
||||||
version = "2.0.10"
|
version = "2.0.14"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "473f1ccd0c793871bbc248729fa8df7e6d2981d6226e4343e3bbaa9281074d5d"
|
checksum = "48f9fdf09aec667c099909d91908d5eaf9be1bd0e2500ba4172c1d28bfaa43de"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"auto_impl",
|
"auto_impl",
|
||||||
|
@ -1305,9 +1306,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ethers-providers"
|
name = "ethers-providers"
|
||||||
version = "2.0.10"
|
version = "2.0.14"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "6838fa110e57d572336178b7c79e94ff88ef976306852d8cb87d9e5b1fc7c0b5"
|
checksum = "6434c9a33891f1effc9c75472e12666db2fa5a0fec4b29af6221680a6fe83ab2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"auto_impl",
|
"auto_impl",
|
||||||
|
@ -1316,6 +1317,7 @@ dependencies = [
|
||||||
"const-hex",
|
"const-hex",
|
||||||
"enr",
|
"enr",
|
||||||
"ethers-core",
|
"ethers-core",
|
||||||
|
"futures-channel",
|
||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-timer",
|
"futures-timer",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
|
@ -1342,9 +1344,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ethers-signers"
|
name = "ethers-signers"
|
||||||
version = "2.0.10"
|
version = "2.0.14"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "5ea44bec930f12292866166f9ddbea6aa76304850e4d8dcd66dc492b43d00ff1"
|
checksum = "228875491c782ad851773b652dd8ecac62cda8571d3bc32a5853644dd26766c2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"coins-bip32",
|
"coins-bip32",
|
||||||
|
@ -1361,9 +1363,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ethers-solc"
|
name = "ethers-solc"
|
||||||
version = "2.0.10"
|
version = "2.0.14"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "de34e484e7ae3cab99fbfd013d6c5dc7f9013676a4e0e414d8b12e1213e8b3ba"
|
checksum = "66244a771d9163282646dbeffe0e6eca4dda4146b6498644e678ac6089b11edd"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
"const-hex",
|
"const-hex",
|
||||||
|
@ -1486,7 +1488,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "fortuna"
|
name = "fortuna"
|
||||||
version = "3.3.4"
|
version = "5.2.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"axum",
|
"axum",
|
||||||
|
@ -1498,6 +1500,7 @@ dependencies = [
|
||||||
"clap",
|
"clap",
|
||||||
"ethabi",
|
"ethabi",
|
||||||
"ethers",
|
"ethers",
|
||||||
|
"futures",
|
||||||
"hex",
|
"hex",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
|
@ -2758,7 +2761,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919"
|
checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"toml_edit",
|
"toml_edit 0.19.15",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -2819,7 +2822,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pythnet-sdk"
|
name = "pythnet-sdk"
|
||||||
version = "2.0.0"
|
version = "2.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode",
|
"bincode",
|
||||||
"borsh",
|
"borsh",
|
||||||
|
@ -3387,9 +3390,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde_spanned"
|
name = "serde_spanned"
|
||||||
version = "0.6.3"
|
version = "0.6.5"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186"
|
checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"serde",
|
"serde",
|
||||||
]
|
]
|
||||||
|
@ -3581,9 +3584,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solang-parser"
|
name = "solang-parser"
|
||||||
version = "0.3.2"
|
version = "0.3.3"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "7cb9fa2fa2fa6837be8a2495486ff92e3ffe68a99b6eeba288e139efdd842457"
|
checksum = "c425ce1c59f4b154717592f0bdf4715c3a1d55058883622d3157e1f0908a5b26"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"itertools 0.11.0",
|
"itertools 0.11.0",
|
||||||
"lalrpop",
|
"lalrpop",
|
||||||
|
@ -3645,11 +3648,11 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "strum"
|
name = "strum"
|
||||||
version = "0.25.0"
|
version = "0.26.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125"
|
checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"strum_macros 0.25.2",
|
"strum_macros 0.26.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -3667,9 +3670,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "strum_macros"
|
name = "strum_macros"
|
||||||
version = "0.25.2"
|
version = "0.26.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "ad8d03b598d3d0fff69bf533ee3ef19b8eeb342729596df84bcc7e1f96ec4059"
|
checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"heck",
|
"heck",
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
|
@ -3955,21 +3958,21 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "toml"
|
name = "toml"
|
||||||
version = "0.7.8"
|
version = "0.8.12"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257"
|
checksum = "e9dd1545e8208b4a5af1aa9bbd0b4cf7e9ea08fabc5d0a5c67fcaafa17433aa3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"serde",
|
"serde",
|
||||||
"serde_spanned",
|
"serde_spanned",
|
||||||
"toml_datetime",
|
"toml_datetime",
|
||||||
"toml_edit",
|
"toml_edit 0.22.9",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "toml_datetime"
|
name = "toml_datetime"
|
||||||
version = "0.6.3"
|
version = "0.6.5"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b"
|
checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"serde",
|
"serde",
|
||||||
]
|
]
|
||||||
|
@ -3979,12 +3982,23 @@ name = "toml_edit"
|
||||||
version = "0.19.15"
|
version = "0.19.15"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421"
|
checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421"
|
||||||
|
dependencies = [
|
||||||
|
"indexmap 2.0.2",
|
||||||
|
"toml_datetime",
|
||||||
|
"winnow 0.5.16",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "toml_edit"
|
||||||
|
version = "0.22.9"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "8e40bb779c5187258fd7aad0eb68cb8706a0a81fa712fbea808ab43c4b8374c4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"indexmap 2.0.2",
|
"indexmap 2.0.2",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_spanned",
|
"serde_spanned",
|
||||||
"toml_datetime",
|
"toml_datetime",
|
||||||
"winnow",
|
"winnow 0.6.5",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -4512,6 +4526,15 @@ dependencies = [
|
||||||
"memchr",
|
"memchr",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "winnow"
|
||||||
|
version = "0.6.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "dffa400e67ed5a4dd237983829e66475f0a4a26938c4b04c21baede6262215b8"
|
||||||
|
dependencies = [
|
||||||
|
"memchr",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "winreg"
|
name = "winreg"
|
||||||
version = "0.50.0"
|
version = "0.50.0"
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "fortuna"
|
name = "fortuna"
|
||||||
version = "3.3.4"
|
version = "5.2.2"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
@ -12,10 +12,11 @@ bincode = "1.3.3"
|
||||||
byteorder = "1.5.0"
|
byteorder = "1.5.0"
|
||||||
clap = { version = "4.4.6", features = ["derive", "cargo", "env"] }
|
clap = { version = "4.4.6", features = ["derive", "cargo", "env"] }
|
||||||
ethabi = "18.0.0"
|
ethabi = "18.0.0"
|
||||||
ethers = "2.0.10"
|
ethers = { version = "2.0.14", features = ["ws"] }
|
||||||
|
futures = { version = "0.3.28" }
|
||||||
hex = "0.4.3"
|
hex = "0.4.3"
|
||||||
prometheus-client = { version = "0.21.2" }
|
prometheus-client = { version = "0.21.2" }
|
||||||
pythnet-sdk = { path = "../pythnet/pythnet_sdk", features = ["strum"] }
|
pythnet-sdk = { path = "../../pythnet/pythnet_sdk", features = ["strum"] }
|
||||||
rand = "0.8.5"
|
rand = "0.8.5"
|
||||||
reqwest = { version = "0.11.22", features = ["json", "blocking"] }
|
reqwest = { version = "0.11.22", features = ["json", "blocking"] }
|
||||||
serde = { version = "1.0.188", features = ["derive"] }
|
serde = { version = "1.0.188", features = ["derive"] }
|
||||||
|
@ -34,5 +35,6 @@ once_cell = "1.18.0"
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
url = "2.5.0"
|
url = "2.5.0"
|
||||||
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
axum-test = "13.1.1"
|
axum-test = "13.1.1"
|
|
@ -7,15 +7,15 @@ RUN rustup default nightly-2023-07-23
|
||||||
|
|
||||||
# Build
|
# Build
|
||||||
WORKDIR /src
|
WORKDIR /src
|
||||||
COPY fortuna fortuna
|
COPY apps/fortuna apps/fortuna
|
||||||
COPY pythnet pythnet
|
COPY pythnet pythnet
|
||||||
COPY target_chains/ethereum/entropy_sdk/solidity/abis target_chains/ethereum/entropy_sdk/solidity/abis
|
COPY target_chains/ethereum/entropy_sdk/solidity/abis target_chains/ethereum/entropy_sdk/solidity/abis
|
||||||
|
|
||||||
WORKDIR /src/fortuna
|
WORKDIR /src/apps/fortuna
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/registry cargo build --release
|
RUN --mount=type=cache,target=/root/.cargo/registry cargo build --release
|
||||||
|
|
||||||
|
|
||||||
FROM rust:${RUST_VERSION}
|
FROM rust:${RUST_VERSION}
|
||||||
# Copy artifacts from other images
|
# Copy artifacts from other images
|
||||||
COPY --from=build /src/fortuna/target/release/fortuna /usr/local/bin/
|
COPY --from=build /src/apps/fortuna/target/release/fortuna /usr/local/bin/
|
|
@ -4,3 +4,4 @@ chains:
|
||||||
contract_addr: 0x8250f4aF4B972684F7b336503E2D6dFeDeB1487a
|
contract_addr: 0x8250f4aF4B972684F7b336503E2D6dFeDeB1487a
|
||||||
reveal_delay_blocks: 0
|
reveal_delay_blocks: 0
|
||||||
legacy_tx: true
|
legacy_tx: true
|
||||||
|
gas_limit: 500000
|
|
@ -0,0 +1,7 @@
|
||||||
|
chains:
|
||||||
|
lightlink-pegasus:
|
||||||
|
commitments:
|
||||||
|
# prettier-ignore
|
||||||
|
- seed: [219,125,217,197,234,88,208,120,21,181,172,143,239,102,41,233,167,212,237,106,37,255,184,165,238,121,230,155,116,158,173,48]
|
||||||
|
chain_length: 10000
|
||||||
|
original_commitment_sequence_number: 104
|
|
@ -0,0 +1 @@
|
||||||
|
nightly-2023-07-23
|
|
@ -73,6 +73,8 @@ impl ApiState {
|
||||||
/// The state of the randomness service for a single blockchain.
|
/// The state of the randomness service for a single blockchain.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct BlockchainState {
|
pub struct BlockchainState {
|
||||||
|
/// The chain id for this blockchain, useful for logging
|
||||||
|
pub id: ChainId,
|
||||||
/// The hash chain(s) required to serve random numbers for this blockchain
|
/// The hash chain(s) required to serve random numbers for this blockchain
|
||||||
pub state: Arc<HashChainState>,
|
pub state: Arc<HashChainState>,
|
||||||
/// The contract that the server is fulfilling requests for.
|
/// The contract that the server is fulfilling requests for.
|
||||||
|
@ -245,6 +247,7 @@ mod test {
|
||||||
let eth_read = Arc::new(MockEntropyReader::with_requests(10, &[]));
|
let eth_read = Arc::new(MockEntropyReader::with_requests(10, &[]));
|
||||||
|
|
||||||
let eth_state = BlockchainState {
|
let eth_state = BlockchainState {
|
||||||
|
id: "ethereum".into(),
|
||||||
state: ETH_CHAIN.clone(),
|
state: ETH_CHAIN.clone(),
|
||||||
contract: eth_read.clone(),
|
contract: eth_read.clone(),
|
||||||
provider_address: PROVIDER,
|
provider_address: PROVIDER,
|
||||||
|
@ -255,6 +258,7 @@ mod test {
|
||||||
let avax_read = Arc::new(MockEntropyReader::with_requests(10, &[]));
|
let avax_read = Arc::new(MockEntropyReader::with_requests(10, &[]));
|
||||||
|
|
||||||
let avax_state = BlockchainState {
|
let avax_state = BlockchainState {
|
||||||
|
id: "avalanche".into(),
|
||||||
state: AVAX_CHAIN.clone(),
|
state: AVAX_CHAIN.clone(),
|
||||||
contract: avax_read.clone(),
|
contract: avax_read.clone(),
|
||||||
provider_address: PROVIDER,
|
provider_address: PROVIDER,
|
|
@ -5,6 +5,7 @@ use {
|
||||||
BlockNumber,
|
BlockNumber,
|
||||||
BlockStatus,
|
BlockStatus,
|
||||||
EntropyReader,
|
EntropyReader,
|
||||||
|
RequestedWithCallbackEvent,
|
||||||
},
|
},
|
||||||
config::EthereumConfig,
|
config::EthereumConfig,
|
||||||
},
|
},
|
||||||
|
@ -18,6 +19,7 @@ use {
|
||||||
abi::RawLog,
|
abi::RawLog,
|
||||||
contract::{
|
contract::{
|
||||||
abigen,
|
abigen,
|
||||||
|
ContractError,
|
||||||
EthLogDecode,
|
EthLogDecode,
|
||||||
},
|
},
|
||||||
core::types::Address,
|
core::types::Address,
|
||||||
|
@ -27,6 +29,7 @@ use {
|
||||||
TransformerError,
|
TransformerError,
|
||||||
TransformerMiddleware,
|
TransformerMiddleware,
|
||||||
},
|
},
|
||||||
|
NonceManagerMiddleware,
|
||||||
SignerMiddleware,
|
SignerMiddleware,
|
||||||
},
|
},
|
||||||
prelude::TransactionRequest,
|
prelude::TransactionRequest,
|
||||||
|
@ -42,6 +45,7 @@ use {
|
||||||
types::{
|
types::{
|
||||||
transaction::eip2718::TypedTransaction,
|
transaction::eip2718::TypedTransaction,
|
||||||
BlockNumber as EthersBlockNumber,
|
BlockNumber as EthersBlockNumber,
|
||||||
|
U256,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
sha3::{
|
sha3::{
|
||||||
|
@ -55,11 +59,14 @@ use {
|
||||||
// contract in the same repo.
|
// contract in the same repo.
|
||||||
abigen!(
|
abigen!(
|
||||||
PythRandom,
|
PythRandom,
|
||||||
"../target_chains/ethereum/entropy_sdk/solidity/abis/IEntropy.json"
|
"../../target_chains/ethereum/entropy_sdk/solidity/abis/IEntropy.json"
|
||||||
);
|
);
|
||||||
|
|
||||||
pub type SignablePythContract = PythRandom<
|
pub type SignablePythContract = PythRandom<
|
||||||
TransformerMiddleware<SignerMiddleware<Provider<Http>, LocalWallet>, LegacyTxTransformer>,
|
TransformerMiddleware<
|
||||||
|
NonceManagerMiddleware<SignerMiddleware<Provider<Http>, LocalWallet>>,
|
||||||
|
LegacyTxTransformer,
|
||||||
|
>,
|
||||||
>;
|
>;
|
||||||
pub type PythContract = PythRandom<Provider<Http>>;
|
pub type PythContract = PythRandom<Provider<Http>>;
|
||||||
|
|
||||||
|
@ -97,10 +104,12 @@ impl SignablePythContract {
|
||||||
.parse::<LocalWallet>()?
|
.parse::<LocalWallet>()?
|
||||||
.with_chain_id(chain_id.as_u64());
|
.with_chain_id(chain_id.as_u64());
|
||||||
|
|
||||||
|
let address = wallet__.address();
|
||||||
|
|
||||||
Ok(PythRandom::new(
|
Ok(PythRandom::new(
|
||||||
chain_config.contract_addr,
|
chain_config.contract_addr,
|
||||||
Arc::new(TransformerMiddleware::new(
|
Arc::new(TransformerMiddleware::new(
|
||||||
SignerMiddleware::new(provider, wallet__),
|
NonceManagerMiddleware::new(SignerMiddleware::new(provider, wallet__), address),
|
||||||
transformer,
|
transformer,
|
||||||
)),
|
)),
|
||||||
))
|
))
|
||||||
|
@ -225,4 +234,57 @@ impl EntropyReader for PythContract {
|
||||||
.ok_or_else(|| Error::msg("pending confirmation"))?
|
.ok_or_else(|| Error::msg("pending confirmation"))?
|
||||||
.as_u64())
|
.as_u64())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn get_request_with_callback_events(
|
||||||
|
&self,
|
||||||
|
from_block: BlockNumber,
|
||||||
|
to_block: BlockNumber,
|
||||||
|
) -> Result<Vec<RequestedWithCallbackEvent>> {
|
||||||
|
let mut event = self.requested_with_callback_filter();
|
||||||
|
event.filter = event.filter.from_block(from_block).to_block(to_block);
|
||||||
|
|
||||||
|
let res: Vec<RequestedWithCallbackFilter> = event.query().await?;
|
||||||
|
|
||||||
|
Ok(res
|
||||||
|
.iter()
|
||||||
|
.map(|r| RequestedWithCallbackEvent {
|
||||||
|
sequence_number: r.sequence_number,
|
||||||
|
user_random_number: r.user_random_number,
|
||||||
|
provider_address: r.request.provider,
|
||||||
|
})
|
||||||
|
.collect())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn estimate_reveal_with_callback_gas(
|
||||||
|
&self,
|
||||||
|
provider: Address,
|
||||||
|
sequence_number: u64,
|
||||||
|
user_random_number: [u8; 32],
|
||||||
|
provider_revelation: [u8; 32],
|
||||||
|
) -> Result<Option<U256>> {
|
||||||
|
let result: Result<U256, ContractError<Provider<Http>>> = self
|
||||||
|
.reveal_with_callback(
|
||||||
|
provider,
|
||||||
|
sequence_number,
|
||||||
|
user_random_number,
|
||||||
|
provider_revelation,
|
||||||
|
)
|
||||||
|
.estimate_gas()
|
||||||
|
.await;
|
||||||
|
|
||||||
|
match result {
|
||||||
|
Ok(gas) => Ok(Some(gas)),
|
||||||
|
Err(e) => match e {
|
||||||
|
ContractError::ProviderError { e } => Err(anyhow!(e)),
|
||||||
|
_ => {
|
||||||
|
tracing::info!(
|
||||||
|
sequence_number = sequence_number,
|
||||||
|
"Gas estimation failed. error: {:?}",
|
||||||
|
e
|
||||||
|
);
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -4,6 +4,7 @@ use {
|
||||||
ethers::types::{
|
ethers::types::{
|
||||||
Address,
|
Address,
|
||||||
BlockNumber as EthersBlockNumber,
|
BlockNumber as EthersBlockNumber,
|
||||||
|
U256,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -32,6 +33,13 @@ impl Into<EthersBlockNumber> for BlockStatus {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct RequestedWithCallbackEvent {
|
||||||
|
pub sequence_number: u64,
|
||||||
|
pub user_random_number: [u8; 32],
|
||||||
|
pub provider_address: Address,
|
||||||
|
}
|
||||||
|
|
||||||
/// EntropyReader is the read-only interface of the Entropy contract.
|
/// EntropyReader is the read-only interface of the Entropy contract.
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait EntropyReader: Send + Sync {
|
pub trait EntropyReader: Send + Sync {
|
||||||
|
@ -42,6 +50,22 @@ pub trait EntropyReader: Send + Sync {
|
||||||
-> Result<Option<Request>>;
|
-> Result<Option<Request>>;
|
||||||
|
|
||||||
async fn get_block_number(&self, confirmed_block_status: BlockStatus) -> Result<BlockNumber>;
|
async fn get_block_number(&self, confirmed_block_status: BlockStatus) -> Result<BlockNumber>;
|
||||||
|
|
||||||
|
async fn get_request_with_callback_events(
|
||||||
|
&self,
|
||||||
|
from_block: BlockNumber,
|
||||||
|
to_block: BlockNumber,
|
||||||
|
) -> Result<Vec<RequestedWithCallbackEvent>>;
|
||||||
|
|
||||||
|
/// Simulate a reveal with callback. Returns Some(gas) if the estimation was successful.
|
||||||
|
/// Returns None otherwise. Returns an error if the gas could not be estimated.
|
||||||
|
async fn estimate_reveal_with_callback_gas(
|
||||||
|
&self,
|
||||||
|
provider: Address,
|
||||||
|
sequence_number: u64,
|
||||||
|
user_random_number: [u8; 32],
|
||||||
|
provider_revelation: [u8; 32],
|
||||||
|
) -> Result<Option<U256>>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An in-flight request stored in the contract.
|
/// An in-flight request stored in the contract.
|
||||||
|
@ -68,7 +92,10 @@ pub mod mock {
|
||||||
},
|
},
|
||||||
anyhow::Result,
|
anyhow::Result,
|
||||||
axum::async_trait,
|
axum::async_trait,
|
||||||
ethers::types::Address,
|
ethers::types::{
|
||||||
|
Address,
|
||||||
|
U256,
|
||||||
|
},
|
||||||
std::sync::RwLock,
|
std::sync::RwLock,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -147,5 +174,23 @@ pub mod mock {
|
||||||
) -> Result<BlockNumber> {
|
) -> Result<BlockNumber> {
|
||||||
Ok(*self.block_number.read().unwrap())
|
Ok(*self.block_number.read().unwrap())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn get_request_with_callback_events(
|
||||||
|
&self,
|
||||||
|
_from_block: BlockNumber,
|
||||||
|
_to_block: BlockNumber,
|
||||||
|
) -> Result<Vec<super::RequestedWithCallbackEvent>> {
|
||||||
|
Ok(vec![])
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn estimate_reveal_with_callback_gas(
|
||||||
|
&self,
|
||||||
|
provider: Address,
|
||||||
|
sequence_number: u64,
|
||||||
|
user_random_number: [u8; 32],
|
||||||
|
provider_revelation: [u8; 32],
|
||||||
|
) -> Result<Option<U256>> {
|
||||||
|
Ok(Some(U256::from(5)))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -0,0 +1,228 @@
|
||||||
|
use {
|
||||||
|
crate::{
|
||||||
|
api::{
|
||||||
|
self,
|
||||||
|
BlockchainState,
|
||||||
|
ChainId,
|
||||||
|
},
|
||||||
|
chain::ethereum::PythContract,
|
||||||
|
command::register_provider::CommitmentMetadata,
|
||||||
|
config::{
|
||||||
|
Commitment,
|
||||||
|
Config,
|
||||||
|
ProviderConfig,
|
||||||
|
RunOptions,
|
||||||
|
},
|
||||||
|
keeper,
|
||||||
|
state::{
|
||||||
|
HashChainState,
|
||||||
|
PebbleHashChain,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
anyhow::{
|
||||||
|
anyhow,
|
||||||
|
Error,
|
||||||
|
Result,
|
||||||
|
},
|
||||||
|
axum::Router,
|
||||||
|
std::{
|
||||||
|
collections::HashMap,
|
||||||
|
net::SocketAddr,
|
||||||
|
sync::Arc,
|
||||||
|
},
|
||||||
|
tokio::{
|
||||||
|
spawn,
|
||||||
|
sync::watch,
|
||||||
|
},
|
||||||
|
tower_http::cors::CorsLayer,
|
||||||
|
utoipa::OpenApi,
|
||||||
|
utoipa_swagger_ui::SwaggerUi,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub async fn run_api(
|
||||||
|
socket_addr: SocketAddr,
|
||||||
|
chains: HashMap<String, api::BlockchainState>,
|
||||||
|
mut rx_exit: watch::Receiver<bool>,
|
||||||
|
) -> Result<()> {
|
||||||
|
#[derive(OpenApi)]
|
||||||
|
#[openapi(
|
||||||
|
paths(
|
||||||
|
crate::api::revelation,
|
||||||
|
crate::api::chain_ids,
|
||||||
|
),
|
||||||
|
components(
|
||||||
|
schemas(
|
||||||
|
crate::api::GetRandomValueResponse,
|
||||||
|
crate::api::Blob,
|
||||||
|
crate::api::BinaryEncoding,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
tags(
|
||||||
|
(name = "fortuna", description = "Random number service for the Pyth Entropy protocol")
|
||||||
|
)
|
||||||
|
)]
|
||||||
|
struct ApiDoc;
|
||||||
|
|
||||||
|
let metrics_registry = api::Metrics::new();
|
||||||
|
let api_state = api::ApiState {
|
||||||
|
chains: Arc::new(chains),
|
||||||
|
metrics: Arc::new(metrics_registry),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Initialize Axum Router. Note the type here is a `Router<State>` due to the use of the
|
||||||
|
// `with_state` method which replaces `Body` with `State` in the type signature.
|
||||||
|
let app = Router::new();
|
||||||
|
let app = app
|
||||||
|
.merge(SwaggerUi::new("/docs").url("/docs/openapi.json", ApiDoc::openapi()))
|
||||||
|
.merge(api::routes(api_state))
|
||||||
|
// Permissive CORS layer to allow all origins
|
||||||
|
.layer(CorsLayer::permissive());
|
||||||
|
|
||||||
|
tracing::info!("Starting server on: {:?}", &socket_addr);
|
||||||
|
// Binds the axum's server to the configured address and port. This is a blocking call and will
|
||||||
|
// not return until the server is shutdown.
|
||||||
|
axum::Server::try_bind(&socket_addr)?
|
||||||
|
.serve(app.into_make_service())
|
||||||
|
.with_graceful_shutdown(async {
|
||||||
|
// It can return an error or an Ok(()). In both cases, we would shut down.
|
||||||
|
// As Ok(()) means, exit signal (ctrl + c) was received.
|
||||||
|
// And Err(e) means, the sender was dropped which should not be the case.
|
||||||
|
let _ = rx_exit.changed().await;
|
||||||
|
|
||||||
|
tracing::info!("Shutting down RPC server...");
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
pub async fn run_keeper(
|
||||||
|
chains: HashMap<String, api::BlockchainState>,
|
||||||
|
config: Config,
|
||||||
|
private_key: String,
|
||||||
|
) -> Result<()> {
|
||||||
|
let mut handles = Vec::new();
|
||||||
|
for (chain_id, chain_config) in chains {
|
||||||
|
let chain_eth_config = config
|
||||||
|
.chains
|
||||||
|
.get(&chain_id)
|
||||||
|
.expect("All chains should be present in the config file")
|
||||||
|
.clone();
|
||||||
|
let private_key = private_key.clone();
|
||||||
|
handles.push(spawn(keeper::run_keeper_threads(
|
||||||
|
private_key,
|
||||||
|
chain_eth_config,
|
||||||
|
chain_config.clone(),
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn run(opts: &RunOptions) -> Result<()> {
|
||||||
|
let config = Config::load(&opts.config.config)?;
|
||||||
|
let provider_config = opts
|
||||||
|
.provider_config
|
||||||
|
.provider_config
|
||||||
|
.as_ref()
|
||||||
|
.map(|path| ProviderConfig::load(&path).expect("Failed to load provider config"));
|
||||||
|
let secret = opts.randomness.load_secret()?;
|
||||||
|
let (tx_exit, rx_exit) = watch::channel(false);
|
||||||
|
|
||||||
|
let mut chains: HashMap<ChainId, BlockchainState> = HashMap::new();
|
||||||
|
for (chain_id, chain_config) in &config.chains {
|
||||||
|
let contract = Arc::new(PythContract::from_config(&chain_config)?);
|
||||||
|
let provider_chain_config = provider_config
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|c| c.get_chain_config(chain_id));
|
||||||
|
let mut provider_commitments = provider_chain_config
|
||||||
|
.as_ref()
|
||||||
|
.map(|c| c.get_sorted_commitments())
|
||||||
|
.unwrap_or_else(|| Vec::new());
|
||||||
|
|
||||||
|
let provider_info = contract.get_provider_info(opts.provider).call().await?;
|
||||||
|
let latest_metadata =
|
||||||
|
bincode::deserialize::<CommitmentMetadata>(&provider_info.commitment_metadata)
|
||||||
|
.map_err(|e| {
|
||||||
|
anyhow!(
|
||||||
|
"Chain: {} - Failed to deserialize commitment metadata: {}",
|
||||||
|
&chain_id,
|
||||||
|
e
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
provider_commitments.push(Commitment {
|
||||||
|
seed: latest_metadata.seed,
|
||||||
|
chain_length: latest_metadata.chain_length,
|
||||||
|
original_commitment_sequence_number: provider_info.original_commitment_sequence_number,
|
||||||
|
});
|
||||||
|
|
||||||
|
// TODO: we may want to load the hash chain in a lazy/fault-tolerant way. If there are many blockchains,
|
||||||
|
// then it's more likely that some RPC fails. We should tolerate these faults and generate the hash chain
|
||||||
|
// later when a user request comes in for that chain.
|
||||||
|
|
||||||
|
let mut offsets = Vec::<usize>::new();
|
||||||
|
let mut hash_chains = Vec::<PebbleHashChain>::new();
|
||||||
|
|
||||||
|
for commitment in &provider_commitments {
|
||||||
|
let offset = commitment.original_commitment_sequence_number.try_into()?;
|
||||||
|
offsets.push(offset);
|
||||||
|
|
||||||
|
let pebble_hash_chain = PebbleHashChain::from_config(
|
||||||
|
&secret,
|
||||||
|
&chain_id,
|
||||||
|
&opts.provider,
|
||||||
|
&chain_config.contract_addr,
|
||||||
|
&commitment.seed,
|
||||||
|
commitment.chain_length,
|
||||||
|
)?;
|
||||||
|
hash_chains.push(pebble_hash_chain);
|
||||||
|
}
|
||||||
|
|
||||||
|
let chain_state = HashChainState {
|
||||||
|
offsets,
|
||||||
|
hash_chains,
|
||||||
|
};
|
||||||
|
|
||||||
|
if chain_state.reveal(provider_info.original_commitment_sequence_number)?
|
||||||
|
!= provider_info.original_commitment
|
||||||
|
{
|
||||||
|
return Err(anyhow!("The root of the generated hash chain for chain id {} does not match the commitment. Are the secret and chain length configured correctly?", &chain_id).into());
|
||||||
|
} else {
|
||||||
|
tracing::info!("Root of chain id {} matches commitment", &chain_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
let state = api::BlockchainState {
|
||||||
|
id: chain_id.clone(),
|
||||||
|
state: Arc::new(chain_state),
|
||||||
|
contract,
|
||||||
|
provider_address: opts.provider,
|
||||||
|
reveal_delay_blocks: chain_config.reveal_delay_blocks,
|
||||||
|
confirmed_block_status: chain_config.confirmed_block_status,
|
||||||
|
};
|
||||||
|
|
||||||
|
chains.insert(chain_id.clone(), state);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Listen for Ctrl+C so we can set the exit flag and wait for a graceful shutdown.
|
||||||
|
spawn(async move {
|
||||||
|
tracing::info!("Registered shutdown signal handler...");
|
||||||
|
tokio::signal::ctrl_c().await.unwrap();
|
||||||
|
tracing::info!("Shut down signal received, waiting for tasks...");
|
||||||
|
// no need to handle error here, as it will only occur when all the
|
||||||
|
// receiver has been dropped and that's what we want to do
|
||||||
|
tx_exit.send(true)?;
|
||||||
|
|
||||||
|
Ok::<(), Error>(())
|
||||||
|
});
|
||||||
|
|
||||||
|
if let Some(keeper_private_key) = opts.load_keeper_private_key()? {
|
||||||
|
spawn(run_keeper(chains.clone(), config, keeper_private_key));
|
||||||
|
}
|
||||||
|
|
||||||
|
run_api(opts.addr.clone(), chains, rx_exit).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
|
@ -16,7 +16,10 @@ use {
|
||||||
PebbleHashChain,
|
PebbleHashChain,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
anyhow::Result,
|
anyhow::{
|
||||||
|
anyhow,
|
||||||
|
Result,
|
||||||
|
},
|
||||||
ethers::{
|
ethers::{
|
||||||
abi::Bytes as AbiBytes,
|
abi::Bytes as AbiBytes,
|
||||||
signers::{
|
signers::{
|
||||||
|
@ -66,7 +69,14 @@ pub async fn setup_provider(opts: &SetupProviderOptions) -> Result<()> {
|
||||||
register = true;
|
register = true;
|
||||||
} else {
|
} else {
|
||||||
let metadata =
|
let metadata =
|
||||||
bincode::deserialize::<CommitmentMetadata>(&provider_info.commitment_metadata)?;
|
bincode::deserialize::<CommitmentMetadata>(&provider_info.commitment_metadata)
|
||||||
|
.map_err(|e| {
|
||||||
|
anyhow!(
|
||||||
|
"Chain: {} - Failed to deserialize commitment metadata: {}",
|
||||||
|
&chain_id,
|
||||||
|
e
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
let hash_chain = PebbleHashChain::from_config(
|
let hash_chain = PebbleHashChain::from_config(
|
||||||
&secret,
|
&secret,
|
||||||
|
@ -74,7 +84,7 @@ pub async fn setup_provider(opts: &SetupProviderOptions) -> Result<()> {
|
||||||
&provider_address,
|
&provider_address,
|
||||||
&chain_config.contract_addr,
|
&chain_config.contract_addr,
|
||||||
&metadata.seed,
|
&metadata.seed,
|
||||||
metadata.chain_length,
|
opts.randomness.chain_length,
|
||||||
)?;
|
)?;
|
||||||
let chain_state = HashChainState {
|
let chain_state = HashChainState {
|
||||||
offsets: vec![provider_info
|
offsets: vec![provider_info
|
||||||
|
@ -105,7 +115,8 @@ pub async fn setup_provider(opts: &SetupProviderOptions) -> Result<()> {
|
||||||
fee: opts.fee,
|
fee: opts.fee,
|
||||||
uri,
|
uri,
|
||||||
})
|
})
|
||||||
.await?;
|
.await
|
||||||
|
.map_err(|e| anyhow!("Chain: {} - Failed to register provider: {}", &chain_id, e))?;
|
||||||
tracing::info!("{}: registered", &chain_id);
|
tracing::info!("{}: registered", &chain_id);
|
||||||
} else {
|
} else {
|
||||||
if provider_info.fee_in_wei != opts.fee {
|
if provider_info.fee_in_wei != opts.fee {
|
|
@ -18,7 +18,10 @@ use {
|
||||||
Args,
|
Args,
|
||||||
Parser,
|
Parser,
|
||||||
},
|
},
|
||||||
ethers::types::Address,
|
ethers::types::{
|
||||||
|
Address,
|
||||||
|
U256,
|
||||||
|
},
|
||||||
std::{
|
std::{
|
||||||
collections::HashMap,
|
collections::HashMap,
|
||||||
fs,
|
fs,
|
||||||
|
@ -94,7 +97,7 @@ pub struct RandomnessOptions {
|
||||||
/// The length of the hash chain to generate.
|
/// The length of the hash chain to generate.
|
||||||
#[arg(long = "chain-length")]
|
#[arg(long = "chain-length")]
|
||||||
#[arg(env = "FORTUNA_CHAIN_LENGTH")]
|
#[arg(env = "FORTUNA_CHAIN_LENGTH")]
|
||||||
#[arg(default_value = "10000")]
|
#[arg(default_value = "100000")]
|
||||||
pub chain_length: u64,
|
pub chain_length: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -131,6 +134,9 @@ pub struct EthereumConfig {
|
||||||
/// URL of a Geth RPC endpoint to use for interacting with the blockchain.
|
/// URL of a Geth RPC endpoint to use for interacting with the blockchain.
|
||||||
pub geth_rpc_addr: String,
|
pub geth_rpc_addr: String,
|
||||||
|
|
||||||
|
/// URL of a Geth RPC wss endpoint to use for subscribing to blockchain events.
|
||||||
|
pub geth_rpc_wss: Option<String>,
|
||||||
|
|
||||||
/// Address of a Pyth Randomness contract to interact with.
|
/// Address of a Pyth Randomness contract to interact with.
|
||||||
pub contract_addr: Address,
|
pub contract_addr: Address,
|
||||||
|
|
||||||
|
@ -148,4 +154,61 @@ pub struct EthereumConfig {
|
||||||
/// For example, Finalized, Safe, Latest
|
/// For example, Finalized, Safe, Latest
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub confirmed_block_status: BlockStatus,
|
pub confirmed_block_status: BlockStatus,
|
||||||
|
|
||||||
|
/// The gas limit to use for entropy callback transactions.
|
||||||
|
pub gas_limit: U256,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Args, Clone, Debug)]
|
||||||
|
#[command(next_help_heading = "Provider Config Options")]
|
||||||
|
#[group(id = "ProviderConfig")]
|
||||||
|
pub struct ProviderConfigOptions {
|
||||||
|
#[arg(long = "provider-config")]
|
||||||
|
#[arg(env = "FORTUNA_PROVIDER_CONFIG")]
|
||||||
|
pub provider_config: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
|
||||||
|
pub struct ProviderConfig {
|
||||||
|
pub chains: HashMap<ChainId, ProviderChainConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ProviderConfig {
|
||||||
|
pub fn load(path: &str) -> Result<ProviderConfig> {
|
||||||
|
// Open and read the YAML file
|
||||||
|
let yaml_content = fs::read_to_string(path)?;
|
||||||
|
let config: ProviderConfig = serde_yaml::from_str(&yaml_content)?;
|
||||||
|
Ok(config)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the provider chain config. The method returns an Option for ProviderChainConfig.
|
||||||
|
/// We may not have past any commitments for a chain. For example, for a new chain
|
||||||
|
pub fn get_chain_config(&self, chain_id: &ChainId) -> Option<ProviderChainConfig> {
|
||||||
|
self.chains.get(chain_id).map(|x| x.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
|
||||||
|
pub struct ProviderChainConfig {
|
||||||
|
commitments: Vec<Commitment>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ProviderChainConfig {
|
||||||
|
/// Returns a clone of the commitments in the sorted order.
|
||||||
|
/// `HashChainState` requires offsets to be in order.
|
||||||
|
pub fn get_sorted_commitments(&self) -> Vec<Commitment> {
|
||||||
|
let mut sorted_commitments = self.commitments.clone();
|
||||||
|
sorted_commitments.sort_by(|c1, c2| {
|
||||||
|
c1.original_commitment_sequence_number
|
||||||
|
.cmp(&c2.original_commitment_sequence_number)
|
||||||
|
});
|
||||||
|
sorted_commitments
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
|
||||||
|
pub struct Commitment {
|
||||||
|
pub seed: [u8; 32],
|
||||||
|
pub chain_length: u64,
|
||||||
|
pub original_commitment_sequence_number: u64,
|
||||||
}
|
}
|
|
@ -0,0 +1,55 @@
|
||||||
|
use {
|
||||||
|
crate::config::{
|
||||||
|
ConfigOptions,
|
||||||
|
ProviderConfigOptions,
|
||||||
|
RandomnessOptions,
|
||||||
|
},
|
||||||
|
anyhow::Result,
|
||||||
|
clap::Args,
|
||||||
|
ethers::types::Address,
|
||||||
|
std::{
|
||||||
|
fs,
|
||||||
|
net::SocketAddr,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Run the webservice
|
||||||
|
#[derive(Args, Clone, Debug)]
|
||||||
|
pub struct RunOptions {
|
||||||
|
#[command(flatten)]
|
||||||
|
pub config: ConfigOptions,
|
||||||
|
|
||||||
|
#[command(flatten)]
|
||||||
|
pub provider_config: ProviderConfigOptions,
|
||||||
|
|
||||||
|
#[command(flatten)]
|
||||||
|
pub randomness: RandomnessOptions,
|
||||||
|
|
||||||
|
/// Address and port the HTTP server will bind to.
|
||||||
|
#[arg(long = "rpc-listen-addr")]
|
||||||
|
#[arg(default_value = super::DEFAULT_RPC_ADDR)]
|
||||||
|
#[arg(env = "RPC_ADDR")]
|
||||||
|
pub addr: SocketAddr,
|
||||||
|
|
||||||
|
/// The public key of the provider whose requests the server will respond to.
|
||||||
|
#[arg(long = "provider")]
|
||||||
|
#[arg(env = "FORTUNA_PROVIDER")]
|
||||||
|
pub provider: Address,
|
||||||
|
|
||||||
|
/// If provided, the keeper will run alongside the Fortuna API service.
|
||||||
|
/// It should be a path to a file containing a 20-byte (40 char) hex encoded Ethereum private key.
|
||||||
|
/// This key is required to submit transactions for entropy callback requests.
|
||||||
|
/// This key should not be a registered provider.
|
||||||
|
#[arg(long = "keeper-private-key")]
|
||||||
|
#[arg(env = "KEEPER_PRIVATE_KEY")]
|
||||||
|
pub keeper_private_key_file: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RunOptions {
|
||||||
|
pub fn load_keeper_private_key(&self) -> Result<Option<String>> {
|
||||||
|
if let Some(ref keeper_private_key_file) = self.keeper_private_key_file {
|
||||||
|
return Ok(Some(fs::read_to_string(keeper_private_key_file)?));
|
||||||
|
}
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,487 @@
|
||||||
|
use {
|
||||||
|
crate::{
|
||||||
|
api::{
|
||||||
|
self,
|
||||||
|
BlockchainState,
|
||||||
|
},
|
||||||
|
chain::{
|
||||||
|
ethereum::SignablePythContract,
|
||||||
|
reader::{
|
||||||
|
BlockNumber,
|
||||||
|
RequestedWithCallbackEvent,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
config::EthereumConfig,
|
||||||
|
},
|
||||||
|
anyhow::{
|
||||||
|
anyhow,
|
||||||
|
Result,
|
||||||
|
},
|
||||||
|
ethers::{
|
||||||
|
contract::ContractError,
|
||||||
|
providers::{
|
||||||
|
Middleware,
|
||||||
|
Provider,
|
||||||
|
Ws,
|
||||||
|
},
|
||||||
|
types::U256,
|
||||||
|
},
|
||||||
|
futures::StreamExt,
|
||||||
|
std::sync::Arc,
|
||||||
|
tokio::{
|
||||||
|
spawn,
|
||||||
|
sync::mpsc,
|
||||||
|
time::{
|
||||||
|
self,
|
||||||
|
Duration,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
tracing::{
|
||||||
|
self,
|
||||||
|
Instrument,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct BlockRange {
|
||||||
|
pub from: BlockNumber,
|
||||||
|
pub to: BlockNumber,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// How much to wait before retrying in case of an RPC error
|
||||||
|
const RETRY_INTERVAL: Duration = Duration::from_secs(5);
|
||||||
|
/// How many blocks to look back for events that might be missed when starting the keeper
|
||||||
|
const BACKLOG_RANGE: u64 = 1000;
|
||||||
|
/// How many blocks to fetch events for in a single rpc call
|
||||||
|
const BLOCK_BATCH_SIZE: u64 = 100;
|
||||||
|
/// How much to wait before polling the next latest block
|
||||||
|
const POLL_INTERVAL: Duration = Duration::from_secs(5);
|
||||||
|
|
||||||
|
|
||||||
|
/// Get the latest safe block number for the chain. Retry internally if there is an error.
|
||||||
|
async fn get_latest_safe_block(chain_state: &BlockchainState) -> BlockNumber {
|
||||||
|
loop {
|
||||||
|
match chain_state
|
||||||
|
.contract
|
||||||
|
.get_block_number(chain_state.confirmed_block_status)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(latest_confirmed_block) => {
|
||||||
|
tracing::info!(
|
||||||
|
"Fetched latest safe block {}",
|
||||||
|
latest_confirmed_block - chain_state.reveal_delay_blocks
|
||||||
|
);
|
||||||
|
return latest_confirmed_block - chain_state.reveal_delay_blocks;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!("Error while getting block number. error: {:?}", e);
|
||||||
|
time::sleep(RETRY_INTERVAL).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Run threads to handle events for the last `BACKLOG_RANGE` blocks, watch for new blocks and
|
||||||
|
/// handle any events for the new blocks.
|
||||||
|
#[tracing::instrument(name="keeper", skip_all, fields(chain_id=chain_state.id))]
|
||||||
|
pub async fn run_keeper_threads(
|
||||||
|
private_key: String,
|
||||||
|
chain_eth_config: EthereumConfig,
|
||||||
|
chain_state: BlockchainState,
|
||||||
|
) {
|
||||||
|
tracing::info!("starting keeper");
|
||||||
|
let latest_safe_block = get_latest_safe_block(&chain_state).in_current_span().await;
|
||||||
|
tracing::info!("latest safe block: {}", &latest_safe_block);
|
||||||
|
|
||||||
|
let contract = Arc::new(
|
||||||
|
SignablePythContract::from_config(&chain_eth_config, &private_key)
|
||||||
|
.await
|
||||||
|
.expect("Chain config should be valid"),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Spawn a thread to handle the events from last BACKLOG_RANGE blocks.
|
||||||
|
spawn(
|
||||||
|
process_backlog(
|
||||||
|
BlockRange {
|
||||||
|
from: latest_safe_block.saturating_sub(BACKLOG_RANGE),
|
||||||
|
to: latest_safe_block,
|
||||||
|
},
|
||||||
|
contract.clone(),
|
||||||
|
chain_eth_config.gas_limit,
|
||||||
|
chain_state.clone(),
|
||||||
|
)
|
||||||
|
.in_current_span(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let (tx, rx) = mpsc::channel::<BlockRange>(1000);
|
||||||
|
// Spawn a thread to watch for new blocks and send the range of blocks for which events has not been handled to the `tx` channel.
|
||||||
|
spawn(
|
||||||
|
watch_blocks_wrapper(
|
||||||
|
chain_state.clone(),
|
||||||
|
latest_safe_block,
|
||||||
|
tx,
|
||||||
|
chain_eth_config.geth_rpc_wss.clone(),
|
||||||
|
)
|
||||||
|
.in_current_span(),
|
||||||
|
);
|
||||||
|
// Spawn a thread that listens for block ranges on the `rx` channel and processes the events for those blocks.
|
||||||
|
spawn(
|
||||||
|
process_new_blocks(
|
||||||
|
chain_state.clone(),
|
||||||
|
rx,
|
||||||
|
Arc::clone(&contract),
|
||||||
|
chain_eth_config.gas_limit,
|
||||||
|
)
|
||||||
|
.in_current_span(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Process an event for a chain. It estimates the gas for the reveal with callback and
|
||||||
|
/// submits the transaction if the gas estimate is below the gas limit.
|
||||||
|
/// It will return an Error if the gas estimation failed with a provider error or if the
|
||||||
|
/// reveal with callback failed with a provider error.
|
||||||
|
pub async fn process_event(
|
||||||
|
event: RequestedWithCallbackEvent,
|
||||||
|
chain_config: &BlockchainState,
|
||||||
|
contract: &Arc<SignablePythContract>,
|
||||||
|
gas_limit: U256,
|
||||||
|
) -> Result<()> {
|
||||||
|
if chain_config.provider_address != event.provider_address {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
let provider_revelation = match chain_config.state.reveal(event.sequence_number) {
|
||||||
|
Ok(result) => result,
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!(
|
||||||
|
sequence_number = &event.sequence_number,
|
||||||
|
"Error while revealing with error: {:?}",
|
||||||
|
e
|
||||||
|
);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let gas_estimate_res = chain_config
|
||||||
|
.contract
|
||||||
|
.estimate_reveal_with_callback_gas(
|
||||||
|
event.provider_address,
|
||||||
|
event.sequence_number,
|
||||||
|
event.user_random_number,
|
||||||
|
provider_revelation,
|
||||||
|
)
|
||||||
|
.in_current_span()
|
||||||
|
.await;
|
||||||
|
|
||||||
|
match gas_estimate_res {
|
||||||
|
Ok(gas_estimate_option) => match gas_estimate_option {
|
||||||
|
Some(gas_estimate) => {
|
||||||
|
// Pad the gas estimate by 33%
|
||||||
|
let (gas_estimate, _) = gas_estimate
|
||||||
|
.saturating_mul(U256::from(4))
|
||||||
|
.div_mod(U256::from(3));
|
||||||
|
|
||||||
|
if gas_estimate > gas_limit {
|
||||||
|
tracing::error!(
|
||||||
|
sequence_number = &event.sequence_number,
|
||||||
|
"Gas estimate for reveal with callback is higher than the gas limit"
|
||||||
|
);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let contract_call = contract
|
||||||
|
.reveal_with_callback(
|
||||||
|
event.provider_address,
|
||||||
|
event.sequence_number,
|
||||||
|
event.user_random_number,
|
||||||
|
provider_revelation,
|
||||||
|
)
|
||||||
|
.gas(gas_estimate);
|
||||||
|
|
||||||
|
let res = contract_call.send().await;
|
||||||
|
|
||||||
|
let pending_tx = match res {
|
||||||
|
Ok(pending_tx) => pending_tx,
|
||||||
|
Err(e) => match e {
|
||||||
|
// If there is a provider error, we weren't able to send the transaction.
|
||||||
|
// We will return an error. So, that the caller can decide what to do (retry).
|
||||||
|
ContractError::ProviderError { e } => return Err(e.into()),
|
||||||
|
// For all the other errors, it is likely the case we won't be able to reveal for
|
||||||
|
// ever. We will return an Ok(()) to signal that we have processed this reveal
|
||||||
|
// and concluded that its Ok to not reveal.
|
||||||
|
_ => {
|
||||||
|
tracing::error!(
|
||||||
|
sequence_number = &event.sequence_number,
|
||||||
|
"Error while revealing with error: {:?}",
|
||||||
|
e
|
||||||
|
);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
match pending_tx.await {
|
||||||
|
Ok(res) => {
|
||||||
|
tracing::info!(
|
||||||
|
sequence_number = &event.sequence_number,
|
||||||
|
"Revealed with res: {:?}",
|
||||||
|
res
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!(
|
||||||
|
sequence_number = &event.sequence_number,
|
||||||
|
"Error while revealing with error: {:?}",
|
||||||
|
e
|
||||||
|
);
|
||||||
|
Err(e.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
tracing::info!(
|
||||||
|
sequence_number = &event.sequence_number,
|
||||||
|
"Not processing event"
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!(
|
||||||
|
sequence_number = &event.sequence_number,
|
||||||
|
"Error while simulating reveal with error: {:?}",
|
||||||
|
e
|
||||||
|
);
|
||||||
|
Err(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Process a range of blocks in batches. It calls the `process_single_block_batch` method for each batch.
|
||||||
|
#[tracing::instrument(skip_all, fields(range_from_block=block_range.from, range_to_block=block_range.to))]
|
||||||
|
pub async fn process_block_range(
|
||||||
|
block_range: BlockRange,
|
||||||
|
contract: Arc<SignablePythContract>,
|
||||||
|
gas_limit: U256,
|
||||||
|
chain_state: api::BlockchainState,
|
||||||
|
) {
|
||||||
|
let BlockRange {
|
||||||
|
from: first_block,
|
||||||
|
to: last_block,
|
||||||
|
} = block_range;
|
||||||
|
let mut current_block = first_block;
|
||||||
|
while current_block <= last_block {
|
||||||
|
let mut to_block = current_block + BLOCK_BATCH_SIZE;
|
||||||
|
if to_block > last_block {
|
||||||
|
to_block = last_block;
|
||||||
|
}
|
||||||
|
|
||||||
|
process_single_block_batch(
|
||||||
|
BlockRange {
|
||||||
|
from: current_block,
|
||||||
|
to: to_block,
|
||||||
|
},
|
||||||
|
contract.clone(),
|
||||||
|
gas_limit,
|
||||||
|
chain_state.clone(),
|
||||||
|
)
|
||||||
|
.in_current_span()
|
||||||
|
.await;
|
||||||
|
|
||||||
|
current_block = to_block + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Process a batch of blocks for a chain. It will fetch events for all the blocks in a single call for the provided batch
|
||||||
|
/// and then try to process them one by one. If the process fails, it will retry indefinitely.
|
||||||
|
#[tracing::instrument(name="batch", skip_all, fields(batch_from_block=block_range.from, batch_to_block=block_range.to))]
|
||||||
|
pub async fn process_single_block_batch(
|
||||||
|
block_range: BlockRange,
|
||||||
|
contract: Arc<SignablePythContract>,
|
||||||
|
gas_limit: U256,
|
||||||
|
chain_state: api::BlockchainState,
|
||||||
|
) {
|
||||||
|
loop {
|
||||||
|
let events_res = chain_state
|
||||||
|
.contract
|
||||||
|
.get_request_with_callback_events(block_range.from, block_range.to)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
match events_res {
|
||||||
|
Ok(events) => {
|
||||||
|
tracing::info!(num_of_events = &events.len(), "Processing",);
|
||||||
|
for event in &events {
|
||||||
|
tracing::info!(sequence_number = &event.sequence_number, "Processing event",);
|
||||||
|
while let Err(e) =
|
||||||
|
process_event(event.clone(), &chain_state, &contract, gas_limit)
|
||||||
|
.in_current_span()
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
tracing::error!(
|
||||||
|
sequence_number = &event.sequence_number,
|
||||||
|
"Error while processing event. Waiting for {} seconds before retry. error: {:?}",
|
||||||
|
RETRY_INTERVAL.as_secs(),
|
||||||
|
e
|
||||||
|
);
|
||||||
|
time::sleep(RETRY_INTERVAL).await;
|
||||||
|
}
|
||||||
|
tracing::info!(sequence_number = &event.sequence_number, "Processed event",);
|
||||||
|
}
|
||||||
|
tracing::info!(num_of_events = &events.len(), "Processed",);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!(
|
||||||
|
"Error while getting events. Waiting for {} seconds before retry. error: {:?}",
|
||||||
|
RETRY_INTERVAL.as_secs(),
|
||||||
|
e
|
||||||
|
);
|
||||||
|
time::sleep(RETRY_INTERVAL).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wrapper for the `watch_blocks` method. If there was an error while watching, it will retry after a delay.
|
||||||
|
/// It retries indefinitely.
|
||||||
|
#[tracing::instrument(name="watch_blocks", skip_all, fields(initial_safe_block=latest_safe_block))]
|
||||||
|
pub async fn watch_blocks_wrapper(
|
||||||
|
chain_state: BlockchainState,
|
||||||
|
latest_safe_block: BlockNumber,
|
||||||
|
tx: mpsc::Sender<BlockRange>,
|
||||||
|
geth_rpc_wss: Option<String>,
|
||||||
|
) {
|
||||||
|
let mut last_safe_block_processed = latest_safe_block;
|
||||||
|
loop {
|
||||||
|
if let Err(e) = watch_blocks(
|
||||||
|
chain_state.clone(),
|
||||||
|
&mut last_safe_block_processed,
|
||||||
|
tx.clone(),
|
||||||
|
geth_rpc_wss.clone(),
|
||||||
|
)
|
||||||
|
.in_current_span()
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
tracing::error!("watching blocks. error: {:?}", e);
|
||||||
|
time::sleep(RETRY_INTERVAL).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Watch for new blocks and send the range of blocks for which events have not been handled to the `tx` channel.
|
||||||
|
/// We are subscribing to new blocks instead of events. If we miss some blocks, it will be fine as we are sending
|
||||||
|
/// block ranges to the `tx` channel. If we have subscribed to events, we could have missed those and won't even
|
||||||
|
/// know about it.
|
||||||
|
pub async fn watch_blocks(
|
||||||
|
chain_state: BlockchainState,
|
||||||
|
last_safe_block_processed: &mut BlockNumber,
|
||||||
|
tx: mpsc::Sender<BlockRange>,
|
||||||
|
geth_rpc_wss: Option<String>,
|
||||||
|
) -> Result<()> {
|
||||||
|
tracing::info!("Watching blocks to handle new events");
|
||||||
|
|
||||||
|
let provider_option = match geth_rpc_wss {
|
||||||
|
Some(wss) => Some(match Provider::<Ws>::connect(wss.clone()).await {
|
||||||
|
Ok(provider) => provider,
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!("Error while connecting to wss: {}. error: {:?}", wss, e);
|
||||||
|
return Err(e.into());
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
None => {
|
||||||
|
tracing::info!("No wss provided");
|
||||||
|
None
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut stream_option = match provider_option {
|
||||||
|
Some(ref provider) => Some(match provider.subscribe_blocks().await {
|
||||||
|
Ok(client) => client,
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!("Error while subscribing to blocks. error {:?}", e);
|
||||||
|
return Err(e.into());
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
loop {
|
||||||
|
match stream_option {
|
||||||
|
Some(ref mut stream) => {
|
||||||
|
if let None = stream.next().await {
|
||||||
|
tracing::error!("Error blocks subscription stream ended");
|
||||||
|
return Err(anyhow!("Error blocks subscription stream ended"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
time::sleep(POLL_INTERVAL).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let latest_safe_block = get_latest_safe_block(&chain_state).in_current_span().await;
|
||||||
|
if latest_safe_block > *last_safe_block_processed {
|
||||||
|
match tx
|
||||||
|
.send(BlockRange {
|
||||||
|
from: *last_safe_block_processed + 1,
|
||||||
|
to: latest_safe_block,
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(_) => {
|
||||||
|
tracing::info!(
|
||||||
|
from_block = *last_safe_block_processed + 1,
|
||||||
|
to_block = &latest_safe_block,
|
||||||
|
"Block range sent to handle events",
|
||||||
|
);
|
||||||
|
*last_safe_block_processed = latest_safe_block;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!(
|
||||||
|
"Error while sending block range to handle events. These will be handled in next call. error: {:?}",
|
||||||
|
e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// It waits on rx channel to receive block ranges and then calls process_block_range to process them.
|
||||||
|
#[tracing::instrument(skip_all)]
|
||||||
|
pub async fn process_new_blocks(
|
||||||
|
chain_state: BlockchainState,
|
||||||
|
mut rx: mpsc::Receiver<BlockRange>,
|
||||||
|
contract: Arc<SignablePythContract>,
|
||||||
|
gas_limit: U256,
|
||||||
|
) {
|
||||||
|
tracing::info!("Waiting for new block ranges to process");
|
||||||
|
loop {
|
||||||
|
if let Some(block_range) = rx.recv().await {
|
||||||
|
process_block_range(
|
||||||
|
block_range,
|
||||||
|
Arc::clone(&contract),
|
||||||
|
gas_limit,
|
||||||
|
chain_state.clone(),
|
||||||
|
)
|
||||||
|
.in_current_span()
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Processes the backlog_range for a chain.
|
||||||
|
#[tracing::instrument(skip_all)]
|
||||||
|
pub async fn process_backlog(
|
||||||
|
backlog_range: BlockRange,
|
||||||
|
contract: Arc<SignablePythContract>,
|
||||||
|
gas_limit: U256,
|
||||||
|
chain_state: BlockchainState,
|
||||||
|
) {
|
||||||
|
tracing::info!("Processing backlog");
|
||||||
|
process_block_range(backlog_range, contract, gas_limit, chain_state)
|
||||||
|
.in_current_span()
|
||||||
|
.await;
|
||||||
|
tracing::info!("Backlog processed");
|
||||||
|
}
|
|
@ -11,6 +11,7 @@ pub mod api;
|
||||||
pub mod chain;
|
pub mod chain;
|
||||||
pub mod command;
|
pub mod command;
|
||||||
pub mod config;
|
pub mod config;
|
||||||
|
pub mod keeper;
|
||||||
pub mod state;
|
pub mod state;
|
||||||
|
|
||||||
// Server TODO list:
|
// Server TODO list:
|
|
@ -1796,7 +1796,7 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hermes"
|
name = "hermes"
|
||||||
version = "0.5.3"
|
version = "0.5.9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1839,6 +1839,7 @@ dependencies = [
|
||||||
"solana-sdk",
|
"solana-sdk",
|
||||||
"strum",
|
"strum",
|
||||||
"tokio",
|
"tokio",
|
||||||
|
"tokio-stream",
|
||||||
"tonic",
|
"tonic",
|
||||||
"tonic-build",
|
"tonic-build",
|
||||||
"tower-http",
|
"tower-http",
|
||||||
|
@ -3137,7 +3138,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pythnet-sdk"
|
name = "pythnet-sdk"
|
||||||
version = "2.0.0"
|
version = "2.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode",
|
"bincode",
|
||||||
"borsh 0.10.3",
|
"borsh 0.10.3",
|
||||||
|
@ -5188,9 +5189,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "termcolor"
|
name = "termcolor"
|
||||||
version = "1.4.1"
|
version = "1.1.3"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755"
|
checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"winapi-util",
|
"winapi-util",
|
||||||
]
|
]
|
||||||
|
@ -5385,6 +5386,7 @@ dependencies = [
|
||||||
"futures-core",
|
"futures-core",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
"tokio",
|
"tokio",
|
||||||
|
"tokio-util",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "hermes"
|
name = "hermes"
|
||||||
version = "0.5.3"
|
version = "0.5.9"
|
||||||
description = "Hermes is an agent that provides Verified Prices from the Pythnet Pyth Oracle."
|
description = "Hermes is an agent that provides Verified Prices from the Pythnet Pyth Oracle."
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
|
@ -31,7 +31,7 @@ prometheus-client = { version = "0.21.2" }
|
||||||
prost = { version = "0.12.1" }
|
prost = { version = "0.12.1" }
|
||||||
pyth-sdk = { version = "0.8.0" }
|
pyth-sdk = { version = "0.8.0" }
|
||||||
pyth-sdk-solana = { version = "0.9.0" }
|
pyth-sdk-solana = { version = "0.9.0" }
|
||||||
pythnet-sdk = { path = "../pythnet/pythnet_sdk/", version = "2.0.0", features = ["strum"] }
|
pythnet-sdk = { path = "../../pythnet/pythnet_sdk/", version = "2.0.0", features = ["strum"] }
|
||||||
rand = { version = "0.8.5" }
|
rand = { version = "0.8.5" }
|
||||||
reqwest = { version = "0.11.14", features = ["blocking", "json"] }
|
reqwest = { version = "0.11.14", features = ["blocking", "json"] }
|
||||||
secp256k1 = { version = "0.27.0", features = ["rand", "recovery", "serde"] }
|
secp256k1 = { version = "0.27.0", features = ["rand", "recovery", "serde"] }
|
||||||
|
@ -42,6 +42,7 @@ serde_wormhole = { git = "https://github.com/wormhole-foundation/wormhol
|
||||||
sha3 = { version = "0.10.4" }
|
sha3 = { version = "0.10.4" }
|
||||||
strum = { version = "0.24.1", features = ["derive"] }
|
strum = { version = "0.24.1", features = ["derive"] }
|
||||||
tokio = { version = "1.26.0", features = ["full"] }
|
tokio = { version = "1.26.0", features = ["full"] }
|
||||||
|
tokio-stream = { version = "0.1.15", features = ["full"] }
|
||||||
tonic = { version = "0.10.1", features = ["tls"] }
|
tonic = { version = "0.10.1", features = ["tls"] }
|
||||||
tower-http = { version = "0.4.0", features = ["cors"] }
|
tower-http = { version = "0.4.0", features = ["cors"] }
|
||||||
tracing = { version = "0.1.37", features = ["log"] }
|
tracing = { version = "0.1.37", features = ["log"] }
|
|
@ -12,15 +12,15 @@ RUN rustup default nightly-2024-03-26
|
||||||
|
|
||||||
# Build
|
# Build
|
||||||
WORKDIR /src
|
WORKDIR /src
|
||||||
COPY hermes hermes
|
COPY apps/hermes apps/hermes
|
||||||
COPY pythnet/pythnet_sdk pythnet/pythnet_sdk
|
COPY pythnet/pythnet_sdk pythnet/pythnet_sdk
|
||||||
|
|
||||||
|
|
||||||
WORKDIR /src/hermes
|
WORKDIR /src/apps/hermes
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/registry cargo build --release
|
RUN --mount=type=cache,target=/root/.cargo/registry cargo build --release
|
||||||
|
|
||||||
FROM rust:1.77.0
|
FROM rust:1.77.0
|
||||||
|
|
||||||
# Copy artifacts from other images
|
# Copy artifacts from other images
|
||||||
COPY --from=build /src/hermes/target/release/hermes /usr/local/bin/
|
COPY --from=build /src/apps/hermes/target/release/hermes /usr/local/bin/
|
|
@ -35,14 +35,14 @@ To set up and run a Hermes node, follow the steps below:
|
||||||
```
|
```
|
||||||
4. **Build the project**: Navigate to the project directory and run the following command to build the project:
|
4. **Build the project**: Navigate to the project directory and run the following command to build the project:
|
||||||
```bash
|
```bash
|
||||||
cd hermes
|
cd apps/hermes
|
||||||
cargo build --release
|
cargo build --release
|
||||||
```
|
```
|
||||||
This will create a binary in the target/release directory.
|
This will create a binary in the target/release directory.
|
||||||
5. **Run the node**: To run Hermes for Pythnet, use the following command:
|
5. **Run the node**: To run Hermes for Pythnet, use the following command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./target/release/hermes run \
|
cargo run --release -- run \
|
||||||
--pythnet-http-addr https://pythnet-rpc/ \
|
--pythnet-http-addr https://pythnet-rpc/ \
|
||||||
--pythnet-ws-addr wss://pythnet-rpc/ \
|
--pythnet-ws-addr wss://pythnet-rpc/ \
|
||||||
--wormhole-spy-rpc-addr https://wormhole-spy-rpc/
|
--wormhole-spy-rpc-addr https://wormhole-spy-rpc/
|
|
@ -1,6 +1,5 @@
|
||||||
use {
|
use {
|
||||||
crate::{
|
crate::{
|
||||||
aggregate::AggregationEvent,
|
|
||||||
config::RunOptions,
|
config::RunOptions,
|
||||||
state::State,
|
state::State,
|
||||||
},
|
},
|
||||||
|
@ -14,7 +13,6 @@ use {
|
||||||
ipnet::IpNet,
|
ipnet::IpNet,
|
||||||
serde_qs::axum::QsQueryConfig,
|
serde_qs::axum::QsQueryConfig,
|
||||||
std::sync::Arc,
|
std::sync::Arc,
|
||||||
tokio::sync::broadcast::Sender,
|
|
||||||
tower_http::cors::CorsLayer,
|
tower_http::cors::CorsLayer,
|
||||||
utoipa::OpenApi,
|
utoipa::OpenApi,
|
||||||
utoipa_swagger_ui::SwaggerUi,
|
utoipa_swagger_ui::SwaggerUi,
|
||||||
|
@ -26,20 +24,29 @@ mod rest;
|
||||||
pub mod types;
|
pub mod types;
|
||||||
mod ws;
|
mod ws;
|
||||||
|
|
||||||
#[derive(Clone)]
|
pub struct ApiState<S = State> {
|
||||||
pub struct ApiState {
|
pub state: Arc<S>,
|
||||||
pub state: Arc<State>,
|
|
||||||
pub ws: Arc<ws::WsState>,
|
pub ws: Arc<ws::WsState>,
|
||||||
pub metrics: Arc<metrics_middleware::Metrics>,
|
pub metrics: Arc<metrics_middleware::Metrics>,
|
||||||
pub update_tx: Sender<AggregationEvent>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ApiState {
|
/// Manually implement `Clone` as the derive macro will try and slap `Clone` on
|
||||||
|
/// `State` which should not be Clone.
|
||||||
|
impl<S> Clone for ApiState<S> {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
Self {
|
||||||
|
state: self.state.clone(),
|
||||||
|
ws: self.ws.clone(),
|
||||||
|
metrics: self.metrics.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ApiState<State> {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
state: Arc<State>,
|
state: Arc<State>,
|
||||||
ws_whitelist: Vec<IpNet>,
|
ws_whitelist: Vec<IpNet>,
|
||||||
requester_ip_header_name: String,
|
requester_ip_header_name: String,
|
||||||
update_tx: Sender<AggregationEvent>,
|
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
metrics: Arc::new(metrics_middleware::Metrics::new(state.clone())),
|
metrics: Arc::new(metrics_middleware::Metrics::new(state.clone())),
|
||||||
|
@ -49,24 +56,18 @@ impl ApiState {
|
||||||
state.clone(),
|
state.clone(),
|
||||||
)),
|
)),
|
||||||
state,
|
state,
|
||||||
update_tx,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(opts, state, update_tx))]
|
#[tracing::instrument(skip(opts, state))]
|
||||||
pub async fn spawn(
|
pub async fn spawn(opts: RunOptions, state: Arc<State>) -> Result<()> {
|
||||||
opts: RunOptions,
|
|
||||||
state: Arc<State>,
|
|
||||||
update_tx: Sender<AggregationEvent>,
|
|
||||||
) -> Result<()> {
|
|
||||||
let state = {
|
let state = {
|
||||||
let opts = opts.clone();
|
let opts = opts.clone();
|
||||||
ApiState::new(
|
ApiState::new(
|
||||||
state,
|
state,
|
||||||
opts.rpc.ws_whitelist,
|
opts.rpc.ws_whitelist,
|
||||||
opts.rpc.requester_ip_header_name,
|
opts.rpc.requester_ip_header_name,
|
||||||
update_tx,
|
|
||||||
)
|
)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -93,6 +94,7 @@ pub async fn run(opts: RunOptions, state: ApiState) -> Result<()> {
|
||||||
rest::latest_price_updates,
|
rest::latest_price_updates,
|
||||||
rest::timestamp_price_updates,
|
rest::timestamp_price_updates,
|
||||||
rest::price_feeds_metadata,
|
rest::price_feeds_metadata,
|
||||||
|
rest::price_stream_sse_handler,
|
||||||
),
|
),
|
||||||
components(
|
components(
|
||||||
schemas(
|
schemas(
|
||||||
|
@ -122,6 +124,7 @@ pub async fn run(opts: RunOptions, state: ApiState) -> Result<()> {
|
||||||
// Initialize Axum Router. Note the type here is a `Router<State>` due to the use of the
|
// Initialize Axum Router. Note the type here is a `Router<State>` due to the use of the
|
||||||
// `with_state` method which replaces `Body` with `State` in the type signature.
|
// `with_state` method which replaces `Body` with `State` in the type signature.
|
||||||
let app = Router::new();
|
let app = Router::new();
|
||||||
|
#[allow(deprecated)]
|
||||||
let app = app
|
let app = app
|
||||||
.merge(SwaggerUi::new("/docs").url("/docs/openapi.json", ApiDoc::openapi()))
|
.merge(SwaggerUi::new("/docs").url("/docs/openapi.json", ApiDoc::openapi()))
|
||||||
.route("/", get(rest::index))
|
.route("/", get(rest::index))
|
||||||
|
@ -131,6 +134,10 @@ pub async fn run(opts: RunOptions, state: ApiState) -> Result<()> {
|
||||||
.route("/api/latest_price_feeds", get(rest::latest_price_feeds))
|
.route("/api/latest_price_feeds", get(rest::latest_price_feeds))
|
||||||
.route("/api/latest_vaas", get(rest::latest_vaas))
|
.route("/api/latest_vaas", get(rest::latest_vaas))
|
||||||
.route("/api/price_feed_ids", get(rest::price_feed_ids))
|
.route("/api/price_feed_ids", get(rest::price_feed_ids))
|
||||||
|
.route(
|
||||||
|
"/v2/updates/price/stream",
|
||||||
|
get(rest::price_stream_sse_handler),
|
||||||
|
)
|
||||||
.route("/v2/updates/price/latest", get(rest::latest_price_updates))
|
.route("/v2/updates/price/latest", get(rest::latest_price_updates))
|
||||||
.route(
|
.route(
|
||||||
"/v2/updates/price/:publish_time",
|
"/v2/updates/price/:publish_time",
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::aggregate::UnixTimestamp;
|
use crate::state::aggregate::UnixTimestamp;
|
||||||
|
|
||||||
// Example values for the utoipa API docs.
|
// Example values for the utoipa API docs.
|
||||||
// Note that each of these expressions is only evaluated once when the documentation is created,
|
// Note that each of these expressions is only evaluated once when the documentation is created,
|
|
@ -1,5 +1,6 @@
|
||||||
use {
|
use {
|
||||||
super::ApiState,
|
super::ApiState,
|
||||||
|
crate::state::aggregate::Aggregates,
|
||||||
axum::{
|
axum::{
|
||||||
http::StatusCode,
|
http::StatusCode,
|
||||||
response::{
|
response::{
|
||||||
|
@ -21,6 +22,7 @@ mod price_feed_ids;
|
||||||
mod ready;
|
mod ready;
|
||||||
mod v2;
|
mod v2;
|
||||||
|
|
||||||
|
|
||||||
pub use {
|
pub use {
|
||||||
get_price_feed::*,
|
get_price_feed::*,
|
||||||
get_vaa::*,
|
get_vaa::*,
|
||||||
|
@ -34,10 +36,12 @@ pub use {
|
||||||
v2::{
|
v2::{
|
||||||
latest_price_updates::*,
|
latest_price_updates::*,
|
||||||
price_feeds_metadata::*,
|
price_feeds_metadata::*,
|
||||||
|
sse::*,
|
||||||
timestamp_price_updates::*,
|
timestamp_price_updates::*,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
pub enum RestError {
|
pub enum RestError {
|
||||||
BenchmarkPriceNotUnique,
|
BenchmarkPriceNotUnique,
|
||||||
UpdateDataNotFound,
|
UpdateDataNotFound,
|
||||||
|
@ -90,11 +94,15 @@ impl IntoResponse for RestError {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verify that the price ids exist in the aggregate state.
|
/// Verify that the price ids exist in the aggregate state.
|
||||||
pub async fn verify_price_ids_exist(
|
pub async fn verify_price_ids_exist<S>(
|
||||||
state: &ApiState,
|
state: &ApiState<S>,
|
||||||
price_ids: &[PriceIdentifier],
|
price_ids: &[PriceIdentifier],
|
||||||
) -> Result<(), RestError> {
|
) -> Result<(), RestError>
|
||||||
let all_ids = crate::aggregate::get_price_feed_ids(&*state.state).await;
|
where
|
||||||
|
S: Aggregates,
|
||||||
|
{
|
||||||
|
let state = &*state.state;
|
||||||
|
let all_ids = Aggregates::get_price_feed_ids(state).await;
|
||||||
let missing_ids = price_ids
|
let missing_ids = price_ids
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|id| !all_ids.contains(id))
|
.filter(|id| !all_ids.contains(id))
|
|
@ -1,10 +1,6 @@
|
||||||
use {
|
use {
|
||||||
super::verify_price_ids_exist,
|
super::verify_price_ids_exist,
|
||||||
crate::{
|
crate::{
|
||||||
aggregate::{
|
|
||||||
RequestTime,
|
|
||||||
UnixTimestamp,
|
|
||||||
},
|
|
||||||
api::{
|
api::{
|
||||||
doc_examples,
|
doc_examples,
|
||||||
rest::RestError,
|
rest::RestError,
|
||||||
|
@ -12,6 +8,12 @@ use {
|
||||||
PriceIdInput,
|
PriceIdInput,
|
||||||
RpcPriceFeed,
|
RpcPriceFeed,
|
||||||
},
|
},
|
||||||
|
ApiState,
|
||||||
|
},
|
||||||
|
state::aggregate::{
|
||||||
|
Aggregates,
|
||||||
|
RequestTime,
|
||||||
|
UnixTimestamp,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
anyhow::Result,
|
anyhow::Result,
|
||||||
|
@ -47,6 +49,8 @@ pub struct GetPriceFeedQueryParams {
|
||||||
binary: bool,
|
binary: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// **Deprecated: use /v2/updates/price/{publish_time} instead**
|
||||||
|
///
|
||||||
/// Get a price update for a price feed with a specific timestamp
|
/// Get a price update for a price feed with a specific timestamp
|
||||||
///
|
///
|
||||||
/// Given a price feed id and timestamp, retrieve the Pyth price update closest to that timestamp.
|
/// Given a price feed id and timestamp, retrieve the Pyth price update closest to that timestamp.
|
||||||
|
@ -60,16 +64,20 @@ pub struct GetPriceFeedQueryParams {
|
||||||
GetPriceFeedQueryParams
|
GetPriceFeedQueryParams
|
||||||
)
|
)
|
||||||
)]
|
)]
|
||||||
pub async fn get_price_feed(
|
#[deprecated]
|
||||||
State(state): State<crate::api::ApiState>,
|
pub async fn get_price_feed<S>(
|
||||||
|
State(state): State<ApiState<S>>,
|
||||||
QsQuery(params): QsQuery<GetPriceFeedQueryParams>,
|
QsQuery(params): QsQuery<GetPriceFeedQueryParams>,
|
||||||
) -> Result<Json<RpcPriceFeed>, RestError> {
|
) -> Result<Json<RpcPriceFeed>, RestError>
|
||||||
|
where
|
||||||
|
S: Aggregates,
|
||||||
|
{
|
||||||
let price_id: PriceIdentifier = params.id.into();
|
let price_id: PriceIdentifier = params.id.into();
|
||||||
|
|
||||||
verify_price_ids_exist(&state, &[price_id]).await?;
|
verify_price_ids_exist(&state, &[price_id]).await?;
|
||||||
|
|
||||||
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
|
let state = &*state.state;
|
||||||
&*state.state,
|
let price_feeds_with_update_data = Aggregates::get_price_feeds_with_update_data(
|
||||||
|
state,
|
||||||
&[price_id],
|
&[price_id],
|
||||||
RequestTime::FirstAfter(params.publish_time),
|
RequestTime::FirstAfter(params.publish_time),
|
||||||
)
|
)
|
|
@ -1,15 +1,16 @@
|
||||||
use {
|
use {
|
||||||
super::verify_price_ids_exist,
|
super::verify_price_ids_exist,
|
||||||
crate::{
|
crate::{
|
||||||
aggregate::{
|
|
||||||
get_price_feeds_with_update_data,
|
|
||||||
RequestTime,
|
|
||||||
UnixTimestamp,
|
|
||||||
},
|
|
||||||
api::{
|
api::{
|
||||||
doc_examples,
|
doc_examples,
|
||||||
rest::RestError,
|
rest::RestError,
|
||||||
types::PriceIdInput,
|
types::PriceIdInput,
|
||||||
|
ApiState,
|
||||||
|
},
|
||||||
|
state::aggregate::{
|
||||||
|
Aggregates,
|
||||||
|
RequestTime,
|
||||||
|
UnixTimestamp,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
anyhow::Result,
|
anyhow::Result,
|
||||||
|
@ -54,6 +55,8 @@ pub struct GetVaaResponse {
|
||||||
publish_time: UnixTimestamp,
|
publish_time: UnixTimestamp,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// **Deprecated: use /v2/updates/price/{publish_time} instead**
|
||||||
|
///
|
||||||
/// Get a VAA for a price feed with a specific timestamp
|
/// Get a VAA for a price feed with a specific timestamp
|
||||||
///
|
///
|
||||||
/// Given a price feed id and timestamp, retrieve the Pyth price update closest to that timestamp.
|
/// Given a price feed id and timestamp, retrieve the Pyth price update closest to that timestamp.
|
||||||
|
@ -68,16 +71,20 @@ pub struct GetVaaResponse {
|
||||||
GetVaaQueryParams
|
GetVaaQueryParams
|
||||||
)
|
)
|
||||||
)]
|
)]
|
||||||
pub async fn get_vaa(
|
#[deprecated]
|
||||||
State(state): State<crate::api::ApiState>,
|
pub async fn get_vaa<S>(
|
||||||
|
State(state): State<ApiState<S>>,
|
||||||
QsQuery(params): QsQuery<GetVaaQueryParams>,
|
QsQuery(params): QsQuery<GetVaaQueryParams>,
|
||||||
) -> Result<Json<GetVaaResponse>, RestError> {
|
) -> Result<Json<GetVaaResponse>, RestError>
|
||||||
|
where
|
||||||
|
S: Aggregates,
|
||||||
|
{
|
||||||
let price_id: PriceIdentifier = params.id.into();
|
let price_id: PriceIdentifier = params.id.into();
|
||||||
|
|
||||||
verify_price_ids_exist(&state, &[price_id]).await?;
|
verify_price_ids_exist(&state, &[price_id]).await?;
|
||||||
|
|
||||||
let price_feeds_with_update_data = get_price_feeds_with_update_data(
|
let state = &*state.state;
|
||||||
&*state.state,
|
let price_feeds_with_update_data = Aggregates::get_price_feeds_with_update_data(
|
||||||
|
state,
|
||||||
&[price_id],
|
&[price_id],
|
||||||
RequestTime::FirstAfter(params.publish_time),
|
RequestTime::FirstAfter(params.publish_time),
|
||||||
)
|
)
|
|
@ -1,11 +1,15 @@
|
||||||
use {
|
use {
|
||||||
super::verify_price_ids_exist,
|
super::verify_price_ids_exist,
|
||||||
crate::{
|
crate::{
|
||||||
aggregate::{
|
api::{
|
||||||
|
rest::RestError,
|
||||||
|
ApiState,
|
||||||
|
},
|
||||||
|
state::aggregate::{
|
||||||
|
Aggregates,
|
||||||
RequestTime,
|
RequestTime,
|
||||||
UnixTimestamp,
|
UnixTimestamp,
|
||||||
},
|
},
|
||||||
api::rest::RestError,
|
|
||||||
},
|
},
|
||||||
anyhow::Result,
|
anyhow::Result,
|
||||||
axum::{
|
axum::{
|
||||||
|
@ -42,6 +46,8 @@ pub struct GetVaaCcipResponse {
|
||||||
data: String, // TODO: Use a typed wrapper for the hex output with leading 0x.
|
data: String, // TODO: Use a typed wrapper for the hex output with leading 0x.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// **Deprecated: use /v2/updates/price/{publish_time} instead**
|
||||||
|
///
|
||||||
/// Get a VAA for a price feed using CCIP
|
/// Get a VAA for a price feed using CCIP
|
||||||
///
|
///
|
||||||
/// This endpoint accepts a single argument which is a hex-encoded byte string of the following form:
|
/// This endpoint accepts a single argument which is a hex-encoded byte string of the following form:
|
||||||
|
@ -56,25 +62,30 @@ pub struct GetVaaCcipResponse {
|
||||||
GetVaaCcipQueryParams
|
GetVaaCcipQueryParams
|
||||||
)
|
)
|
||||||
)]
|
)]
|
||||||
pub async fn get_vaa_ccip(
|
#[deprecated]
|
||||||
State(state): State<crate::api::ApiState>,
|
pub async fn get_vaa_ccip<S>(
|
||||||
|
State(state): State<ApiState<S>>,
|
||||||
QsQuery(params): QsQuery<GetVaaCcipQueryParams>,
|
QsQuery(params): QsQuery<GetVaaCcipQueryParams>,
|
||||||
) -> Result<Json<GetVaaCcipResponse>, RestError> {
|
) -> Result<Json<GetVaaCcipResponse>, RestError>
|
||||||
|
where
|
||||||
|
S: Aggregates,
|
||||||
|
{
|
||||||
let price_id: PriceIdentifier = PriceIdentifier::new(
|
let price_id: PriceIdentifier = PriceIdentifier::new(
|
||||||
params.data[0..32]
|
params.data[0..32]
|
||||||
.try_into()
|
.try_into()
|
||||||
.map_err(|_| RestError::InvalidCCIPInput)?,
|
.map_err(|_| RestError::InvalidCCIPInput)?,
|
||||||
);
|
);
|
||||||
|
verify_price_ids_exist(&state, &[price_id]).await?;
|
||||||
|
|
||||||
let publish_time = UnixTimestamp::from_be_bytes(
|
let publish_time = UnixTimestamp::from_be_bytes(
|
||||||
params.data[32..40]
|
params.data[32..40]
|
||||||
.try_into()
|
.try_into()
|
||||||
.map_err(|_| RestError::InvalidCCIPInput)?,
|
.map_err(|_| RestError::InvalidCCIPInput)?,
|
||||||
);
|
);
|
||||||
|
|
||||||
verify_price_ids_exist(&state, &[price_id]).await?;
|
let state = &*state.state;
|
||||||
|
let price_feeds_with_update_data = Aggregates::get_price_feeds_with_update_data(
|
||||||
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
|
state,
|
||||||
&*state.state,
|
|
||||||
&[price_id],
|
&[price_id],
|
||||||
RequestTime::FirstAfter(publish_time),
|
RequestTime::FirstAfter(publish_time),
|
||||||
)
|
)
|
|
@ -17,6 +17,7 @@ pub async fn index() -> impl IntoResponse {
|
||||||
"/api/get_vaa?id=<price_feed_id>&publish_time=<publish_time_in_unix_timestamp>",
|
"/api/get_vaa?id=<price_feed_id>&publish_time=<publish_time_in_unix_timestamp>",
|
||||||
"/api/get_vaa_ccip?data=<0x<price_feed_id_32_bytes>+<publish_time_unix_timestamp_be_8_bytes>>",
|
"/api/get_vaa_ccip?data=<0x<price_feed_id_32_bytes>+<publish_time_unix_timestamp_be_8_bytes>>",
|
||||||
"/v2/updates/price/latest?ids[]=<price_feed_id>&ids[]=<price_feed_id_2>&..(&encoding=hex|base64)(&parsed=false)",
|
"/v2/updates/price/latest?ids[]=<price_feed_id>&ids[]=<price_feed_id_2>&..(&encoding=hex|base64)(&parsed=false)",
|
||||||
|
"/v2/updates/price/stream?ids[]=<price_feed_id>&ids[]=<price_feed_id_2>&..(&encoding=hex|base64)(&parsed=false)(&allow_unordered=false)(&benchmarks_only=false)",
|
||||||
"/v2/updates/price/<timestamp>?ids[]=<price_feed_id>&ids[]=<price_feed_id_2>&..(&encoding=hex|base64)(&parsed=false)",
|
"/v2/updates/price/<timestamp>?ids[]=<price_feed_id>&ids[]=<price_feed_id_2>&..(&encoding=hex|base64)(&parsed=false)",
|
||||||
"/v2/price_feeds?(query=btc)(&asset_type=crypto|equity|fx|metal|rates)",
|
"/v2/price_feeds?(query=btc)(&asset_type=crypto|equity|fx|metal|rates)",
|
||||||
])
|
])
|
|
@ -1,13 +1,17 @@
|
||||||
use {
|
use {
|
||||||
super::verify_price_ids_exist,
|
super::verify_price_ids_exist,
|
||||||
crate::{
|
crate::{
|
||||||
aggregate::RequestTime,
|
|
||||||
api::{
|
api::{
|
||||||
rest::RestError,
|
rest::RestError,
|
||||||
types::{
|
types::{
|
||||||
PriceIdInput,
|
PriceIdInput,
|
||||||
RpcPriceFeed,
|
RpcPriceFeed,
|
||||||
},
|
},
|
||||||
|
ApiState,
|
||||||
|
},
|
||||||
|
state::aggregate::{
|
||||||
|
Aggregates,
|
||||||
|
RequestTime,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
anyhow::Result,
|
anyhow::Result,
|
||||||
|
@ -46,6 +50,8 @@ pub struct LatestPriceFeedsQueryParams {
|
||||||
binary: bool,
|
binary: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// **Deprecated: use /v2/updates/price/latest instead**
|
||||||
|
///
|
||||||
/// Get the latest price updates by price feed id.
|
/// Get the latest price updates by price feed id.
|
||||||
///
|
///
|
||||||
/// Given a collection of price feed ids, retrieve the latest Pyth price for each price feed.
|
/// Given a collection of price feed ids, retrieve the latest Pyth price for each price feed.
|
||||||
|
@ -59,19 +65,20 @@ pub struct LatestPriceFeedsQueryParams {
|
||||||
LatestPriceFeedsQueryParams
|
LatestPriceFeedsQueryParams
|
||||||
)
|
)
|
||||||
)]
|
)]
|
||||||
pub async fn latest_price_feeds(
|
#[deprecated]
|
||||||
State(state): State<crate::api::ApiState>,
|
pub async fn latest_price_feeds<S>(
|
||||||
|
State(state): State<ApiState<S>>,
|
||||||
QsQuery(params): QsQuery<LatestPriceFeedsQueryParams>,
|
QsQuery(params): QsQuery<LatestPriceFeedsQueryParams>,
|
||||||
) -> Result<Json<Vec<RpcPriceFeed>>, RestError> {
|
) -> Result<Json<Vec<RpcPriceFeed>>, RestError>
|
||||||
|
where
|
||||||
|
S: Aggregates,
|
||||||
|
{
|
||||||
let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(|id| id.into()).collect();
|
let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(|id| id.into()).collect();
|
||||||
|
|
||||||
verify_price_ids_exist(&state, &price_ids).await?;
|
verify_price_ids_exist(&state, &price_ids).await?;
|
||||||
|
|
||||||
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
|
let state = &*state.state;
|
||||||
&*state.state,
|
let price_feeds_with_update_data =
|
||||||
&price_ids,
|
Aggregates::get_price_feeds_with_update_data(state, &price_ids, RequestTime::Latest)
|
||||||
RequestTime::Latest,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
tracing::warn!(
|
tracing::warn!(
|
|
@ -1,11 +1,15 @@
|
||||||
use {
|
use {
|
||||||
super::verify_price_ids_exist,
|
super::verify_price_ids_exist,
|
||||||
crate::{
|
crate::{
|
||||||
aggregate::RequestTime,
|
|
||||||
api::{
|
api::{
|
||||||
doc_examples,
|
doc_examples,
|
||||||
rest::RestError,
|
rest::RestError,
|
||||||
types::PriceIdInput,
|
types::PriceIdInput,
|
||||||
|
ApiState,
|
||||||
|
},
|
||||||
|
state::aggregate::{
|
||||||
|
Aggregates,
|
||||||
|
RequestTime,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
anyhow::Result,
|
anyhow::Result,
|
||||||
|
@ -39,6 +43,8 @@ pub struct LatestVaasQueryParams {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// **Deprecated: use /v2/updates/price/latest instead**
|
||||||
|
///
|
||||||
/// Get VAAs for a set of price feed ids.
|
/// Get VAAs for a set of price feed ids.
|
||||||
///
|
///
|
||||||
/// Given a collection of price feed ids, retrieve the latest VAA for each. The returned VAA(s) can
|
/// Given a collection of price feed ids, retrieve the latest VAA for each. The returned VAA(s) can
|
||||||
|
@ -54,19 +60,20 @@ pub struct LatestVaasQueryParams {
|
||||||
(status = 200, description = "VAAs retrieved successfully", body = Vec<String>, example=json!([doc_examples::vaa_example()]))
|
(status = 200, description = "VAAs retrieved successfully", body = Vec<String>, example=json!([doc_examples::vaa_example()]))
|
||||||
),
|
),
|
||||||
)]
|
)]
|
||||||
pub async fn latest_vaas(
|
#[deprecated]
|
||||||
State(state): State<crate::api::ApiState>,
|
pub async fn latest_vaas<S>(
|
||||||
|
State(state): State<ApiState<S>>,
|
||||||
QsQuery(params): QsQuery<LatestVaasQueryParams>,
|
QsQuery(params): QsQuery<LatestVaasQueryParams>,
|
||||||
) -> Result<Json<Vec<String>>, RestError> {
|
) -> Result<Json<Vec<String>>, RestError>
|
||||||
|
where
|
||||||
|
S: Aggregates,
|
||||||
|
{
|
||||||
let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(|id| id.into()).collect();
|
let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(|id| id.into()).collect();
|
||||||
|
|
||||||
verify_price_ids_exist(&state, &price_ids).await?;
|
verify_price_ids_exist(&state, &price_ids).await?;
|
||||||
|
|
||||||
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
|
let state = &*state.state;
|
||||||
&*state.state,
|
let price_feeds_with_update_data =
|
||||||
&price_ids,
|
Aggregates::get_price_feeds_with_update_data(state, &price_ids, RequestTime::Latest)
|
||||||
RequestTime::Latest,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
tracing::warn!(
|
tracing::warn!(
|
|
@ -1,7 +1,11 @@
|
||||||
use {
|
use {
|
||||||
crate::api::{
|
crate::{
|
||||||
|
api::{
|
||||||
rest::RestError,
|
rest::RestError,
|
||||||
types::RpcPriceIdentifier,
|
types::RpcPriceIdentifier,
|
||||||
|
ApiState,
|
||||||
|
},
|
||||||
|
state::aggregate::Aggregates,
|
||||||
},
|
},
|
||||||
anyhow::Result,
|
anyhow::Result,
|
||||||
axum::{
|
axum::{
|
||||||
|
@ -10,6 +14,8 @@ use {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// **Deprecated: use /v2/price_feeds instead**
|
||||||
|
///
|
||||||
/// Get the set of price feed IDs.
|
/// Get the set of price feed IDs.
|
||||||
///
|
///
|
||||||
/// This endpoint fetches all of the price feed IDs for which price updates can be retrieved.
|
/// This endpoint fetches all of the price feed IDs for which price updates can be retrieved.
|
||||||
|
@ -21,10 +27,15 @@ use {
|
||||||
(status = 200, description = "Price feed ids retrieved successfully", body = Vec<RpcPriceIdentifier>)
|
(status = 200, description = "Price feed ids retrieved successfully", body = Vec<RpcPriceIdentifier>)
|
||||||
),
|
),
|
||||||
)]
|
)]
|
||||||
pub async fn price_feed_ids(
|
#[deprecated]
|
||||||
State(state): State<crate::api::ApiState>,
|
pub async fn price_feed_ids<S>(
|
||||||
) -> Result<Json<Vec<RpcPriceIdentifier>>, RestError> {
|
State(state): State<ApiState<S>>,
|
||||||
let price_feed_ids = crate::aggregate::get_price_feed_ids(&*state.state)
|
) -> Result<Json<Vec<RpcPriceIdentifier>>, RestError>
|
||||||
|
where
|
||||||
|
S: Aggregates,
|
||||||
|
{
|
||||||
|
let state = &*state.state;
|
||||||
|
let price_feed_ids = Aggregates::get_price_feed_ids(state)
|
||||||
.await
|
.await
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(RpcPriceIdentifier::from)
|
.map(RpcPriceIdentifier::from)
|
|
@ -0,0 +1,25 @@
|
||||||
|
use {
|
||||||
|
crate::{
|
||||||
|
api::ApiState,
|
||||||
|
state::aggregate::Aggregates,
|
||||||
|
},
|
||||||
|
axum::{
|
||||||
|
extract::State,
|
||||||
|
http::StatusCode,
|
||||||
|
response::{
|
||||||
|
IntoResponse,
|
||||||
|
Response,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
pub async fn ready<S>(State(state): State<ApiState<S>>) -> Response
|
||||||
|
where
|
||||||
|
S: Aggregates,
|
||||||
|
{
|
||||||
|
let state = &*state.state;
|
||||||
|
match Aggregates::is_ready(state).await {
|
||||||
|
true => (StatusCode::OK, "OK").into_response(),
|
||||||
|
false => (StatusCode::SERVICE_UNAVAILABLE, "Service Unavailable").into_response(),
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,6 +1,5 @@
|
||||||
use {
|
use {
|
||||||
crate::{
|
crate::{
|
||||||
aggregate::RequestTime,
|
|
||||||
api::{
|
api::{
|
||||||
rest::{
|
rest::{
|
||||||
verify_price_ids_exist,
|
verify_price_ids_exist,
|
||||||
|
@ -13,6 +12,11 @@ use {
|
||||||
PriceIdInput,
|
PriceIdInput,
|
||||||
PriceUpdate,
|
PriceUpdate,
|
||||||
},
|
},
|
||||||
|
ApiState,
|
||||||
|
},
|
||||||
|
state::aggregate::{
|
||||||
|
Aggregates,
|
||||||
|
RequestTime,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
anyhow::Result,
|
anyhow::Result,
|
||||||
|
@ -46,11 +50,11 @@ pub struct LatestPriceUpdatesQueryParams {
|
||||||
#[param(example = "e62df6c8b4a85fe1a67db44dc12de5db330f7ac66b72dc658afedf0f4a415b43")]
|
#[param(example = "e62df6c8b4a85fe1a67db44dc12de5db330f7ac66b72dc658afedf0f4a415b43")]
|
||||||
ids: Vec<PriceIdInput>,
|
ids: Vec<PriceIdInput>,
|
||||||
|
|
||||||
/// If true, include the parsed price update in the `parsed` field of each returned feed.
|
/// If true, include the parsed price update in the `parsed` field of each returned feed. Default is `hex`.
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
encoding: EncodingType,
|
encoding: EncodingType,
|
||||||
|
|
||||||
/// If true, include the parsed price update in the `parsed` field of each returned feed.
|
/// If true, include the parsed price update in the `parsed` field of each returned feed. Default is `true`.
|
||||||
#[serde(default = "default_true")]
|
#[serde(default = "default_true")]
|
||||||
parsed: bool,
|
parsed: bool,
|
||||||
}
|
}
|
||||||
|
@ -73,19 +77,19 @@ fn default_true() -> bool {
|
||||||
LatestPriceUpdatesQueryParams
|
LatestPriceUpdatesQueryParams
|
||||||
)
|
)
|
||||||
)]
|
)]
|
||||||
pub async fn latest_price_updates(
|
pub async fn latest_price_updates<S>(
|
||||||
State(state): State<crate::api::ApiState>,
|
State(state): State<ApiState<S>>,
|
||||||
QsQuery(params): QsQuery<LatestPriceUpdatesQueryParams>,
|
QsQuery(params): QsQuery<LatestPriceUpdatesQueryParams>,
|
||||||
) -> Result<Json<PriceUpdate>, RestError> {
|
) -> Result<Json<PriceUpdate>, RestError>
|
||||||
|
where
|
||||||
|
S: Aggregates,
|
||||||
|
{
|
||||||
let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(|id| id.into()).collect();
|
let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(|id| id.into()).collect();
|
||||||
|
|
||||||
verify_price_ids_exist(&state, &price_ids).await?;
|
verify_price_ids_exist(&state, &price_ids).await?;
|
||||||
|
|
||||||
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
|
let state = &*state.state;
|
||||||
&*state.state,
|
let price_feeds_with_update_data =
|
||||||
&price_ids,
|
Aggregates::get_price_feeds_with_update_data(state, &price_ids, RequestTime::Latest)
|
||||||
RequestTime::Latest,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
tracing::warn!(
|
tracing::warn!(
|
|
@ -1,3 +1,4 @@
|
||||||
pub mod latest_price_updates;
|
pub mod latest_price_updates;
|
||||||
pub mod price_feeds_metadata;
|
pub mod price_feeds_metadata;
|
||||||
|
pub mod sse;
|
||||||
pub mod timestamp_price_updates;
|
pub mod timestamp_price_updates;
|
|
@ -6,8 +6,9 @@ use {
|
||||||
AssetType,
|
AssetType,
|
||||||
PriceFeedMetadata,
|
PriceFeedMetadata,
|
||||||
},
|
},
|
||||||
|
ApiState,
|
||||||
},
|
},
|
||||||
price_feeds_metadata::get_price_feeds_metadata,
|
price_feeds_metadata::PriceFeedMeta,
|
||||||
},
|
},
|
||||||
anyhow::Result,
|
anyhow::Result,
|
||||||
axum::{
|
axum::{
|
||||||
|
@ -46,12 +47,16 @@ pub struct PriceFeedsMetadataQueryParams {
|
||||||
PriceFeedsMetadataQueryParams
|
PriceFeedsMetadataQueryParams
|
||||||
)
|
)
|
||||||
)]
|
)]
|
||||||
pub async fn price_feeds_metadata(
|
pub async fn price_feeds_metadata<S>(
|
||||||
State(state): State<crate::api::ApiState>,
|
State(state): State<ApiState<S>>,
|
||||||
QsQuery(params): QsQuery<PriceFeedsMetadataQueryParams>,
|
QsQuery(params): QsQuery<PriceFeedsMetadataQueryParams>,
|
||||||
) -> Result<Json<Vec<PriceFeedMetadata>>, RestError> {
|
) -> Result<Json<Vec<PriceFeedMetadata>>, RestError>
|
||||||
let price_feeds_metadata =
|
where
|
||||||
get_price_feeds_metadata(&state.state, params.query, params.asset_type)
|
S: PriceFeedMeta,
|
||||||
|
{
|
||||||
|
let state = &state.state;
|
||||||
|
let price_feeds_metadata = state
|
||||||
|
.get_price_feeds_metadata(params.query, params.asset_type)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
tracing::warn!("RPC connection error: {}", e);
|
tracing::warn!("RPC connection error: {}", e);
|
|
@ -0,0 +1,235 @@
|
||||||
|
use {
|
||||||
|
crate::{
|
||||||
|
api::{
|
||||||
|
rest::{
|
||||||
|
verify_price_ids_exist,
|
||||||
|
RestError,
|
||||||
|
},
|
||||||
|
types::{
|
||||||
|
BinaryPriceUpdate,
|
||||||
|
EncodingType,
|
||||||
|
ParsedPriceUpdate,
|
||||||
|
PriceIdInput,
|
||||||
|
PriceUpdate,
|
||||||
|
RpcPriceIdentifier,
|
||||||
|
},
|
||||||
|
ApiState,
|
||||||
|
},
|
||||||
|
state::aggregate::{
|
||||||
|
Aggregates,
|
||||||
|
AggregationEvent,
|
||||||
|
RequestTime,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
anyhow::Result,
|
||||||
|
axum::{
|
||||||
|
extract::State,
|
||||||
|
response::sse::{
|
||||||
|
Event,
|
||||||
|
KeepAlive,
|
||||||
|
Sse,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
futures::Stream,
|
||||||
|
pyth_sdk::PriceIdentifier,
|
||||||
|
serde::Deserialize,
|
||||||
|
serde_qs::axum::QsQuery,
|
||||||
|
std::convert::Infallible,
|
||||||
|
tokio::sync::broadcast,
|
||||||
|
tokio_stream::{
|
||||||
|
wrappers::BroadcastStream,
|
||||||
|
StreamExt as _,
|
||||||
|
},
|
||||||
|
utoipa::IntoParams,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize, IntoParams)]
|
||||||
|
#[into_params(parameter_in = Query)]
|
||||||
|
pub struct StreamPriceUpdatesQueryParams {
|
||||||
|
/// Get the most recent price update for this set of price feed ids.
|
||||||
|
///
|
||||||
|
/// This parameter can be provided multiple times to retrieve multiple price updates,
|
||||||
|
/// for example see the following query string:
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// ?ids[]=a12...&ids[]=b4c...
|
||||||
|
/// ```
|
||||||
|
#[param(rename = "ids[]")]
|
||||||
|
#[param(example = "e62df6c8b4a85fe1a67db44dc12de5db330f7ac66b72dc658afedf0f4a415b43")]
|
||||||
|
ids: Vec<PriceIdInput>,
|
||||||
|
|
||||||
|
/// If true, include the parsed price update in the `parsed` field of each returned feed. Default is `hex`.
|
||||||
|
#[serde(default)]
|
||||||
|
encoding: EncodingType,
|
||||||
|
|
||||||
|
/// If true, include the parsed price update in the `parsed` field of each returned feed. Default is `true`.
|
||||||
|
#[serde(default = "default_true")]
|
||||||
|
parsed: bool,
|
||||||
|
|
||||||
|
/// If true, allows unordered price updates to be included in the stream.
|
||||||
|
#[serde(default)]
|
||||||
|
allow_unordered: bool,
|
||||||
|
|
||||||
|
/// If true, only include benchmark prices that are the initial price updates at a given timestamp (i.e., prevPubTime != pubTime).
|
||||||
|
#[serde(default)]
|
||||||
|
benchmarks_only: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_true() -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
#[utoipa::path(
|
||||||
|
get,
|
||||||
|
path = "/v2/updates/price/stream",
|
||||||
|
responses(
|
||||||
|
(status = 200, description = "Price updates retrieved successfully", body = PriceUpdate),
|
||||||
|
(status = 404, description = "Price ids not found", body = String)
|
||||||
|
),
|
||||||
|
params(StreamPriceUpdatesQueryParams)
|
||||||
|
)]
|
||||||
|
/// SSE route handler for streaming price updates.
|
||||||
|
pub async fn price_stream_sse_handler<S>(
|
||||||
|
State(state): State<ApiState<S>>,
|
||||||
|
QsQuery(params): QsQuery<StreamPriceUpdatesQueryParams>,
|
||||||
|
) -> Result<Sse<impl Stream<Item = Result<Event, Infallible>>>, RestError>
|
||||||
|
where
|
||||||
|
S: Aggregates,
|
||||||
|
S: Sync,
|
||||||
|
S: Send,
|
||||||
|
S: 'static,
|
||||||
|
{
|
||||||
|
let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(Into::into).collect();
|
||||||
|
|
||||||
|
verify_price_ids_exist(&state, &price_ids).await?;
|
||||||
|
|
||||||
|
// Clone the update_tx receiver to listen for new price updates
|
||||||
|
let update_rx: broadcast::Receiver<AggregationEvent> = Aggregates::subscribe(&*state.state);
|
||||||
|
|
||||||
|
// Convert the broadcast receiver into a Stream
|
||||||
|
let stream = BroadcastStream::new(update_rx);
|
||||||
|
|
||||||
|
let sse_stream = stream.then(move |message| {
|
||||||
|
let state_clone = state.clone(); // Clone again to use inside the async block
|
||||||
|
let price_ids_clone = price_ids.clone(); // Clone again for use inside the async block
|
||||||
|
async move {
|
||||||
|
match message {
|
||||||
|
Ok(event) => {
|
||||||
|
match handle_aggregation_event(
|
||||||
|
event,
|
||||||
|
state_clone,
|
||||||
|
price_ids_clone,
|
||||||
|
params.encoding,
|
||||||
|
params.parsed,
|
||||||
|
params.benchmarks_only,
|
||||||
|
params.allow_unordered,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(Some(update)) => Ok(Event::default()
|
||||||
|
.json_data(update)
|
||||||
|
.unwrap_or_else(|e| error_event(e))),
|
||||||
|
Ok(None) => Ok(Event::default().comment("No update available")),
|
||||||
|
Err(e) => Ok(error_event(e)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => Ok(error_event(e)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(Sse::new(sse_stream).keep_alive(KeepAlive::default()))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_aggregation_event<S>(
|
||||||
|
event: AggregationEvent,
|
||||||
|
state: ApiState<S>,
|
||||||
|
mut price_ids: Vec<PriceIdentifier>,
|
||||||
|
encoding: EncodingType,
|
||||||
|
parsed: bool,
|
||||||
|
benchmarks_only: bool,
|
||||||
|
allow_unordered: bool,
|
||||||
|
) -> Result<Option<PriceUpdate>>
|
||||||
|
where
|
||||||
|
S: Aggregates,
|
||||||
|
{
|
||||||
|
// Handle out-of-order events
|
||||||
|
if let AggregationEvent::OutOfOrder { .. } = event {
|
||||||
|
if !allow_unordered {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We check for available price feed ids to ensure that the price feed ids provided exists since price feeds can be removed.
|
||||||
|
let available_price_feed_ids = Aggregates::get_price_feed_ids(&*state.state).await;
|
||||||
|
|
||||||
|
price_ids.retain(|price_feed_id| available_price_feed_ids.contains(price_feed_id));
|
||||||
|
|
||||||
|
let mut price_feeds_with_update_data = Aggregates::get_price_feeds_with_update_data(
|
||||||
|
&*state.state,
|
||||||
|
&price_ids,
|
||||||
|
RequestTime::AtSlot(event.slot()),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let mut parsed_price_updates: Vec<ParsedPriceUpdate> = price_feeds_with_update_data
|
||||||
|
.price_feeds
|
||||||
|
.into_iter()
|
||||||
|
.map(|price_feed| price_feed.into())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
|
||||||
|
if benchmarks_only {
|
||||||
|
// Remove those with metadata.prev_publish_time != price.publish_time from parsed_price_updates
|
||||||
|
parsed_price_updates.retain(|price_feed| {
|
||||||
|
price_feed
|
||||||
|
.metadata
|
||||||
|
.prev_publish_time
|
||||||
|
.map_or(false, |prev_time| {
|
||||||
|
prev_time != price_feed.price.publish_time
|
||||||
|
})
|
||||||
|
});
|
||||||
|
// Retain price id in price_ids that are in parsed_price_updates
|
||||||
|
price_ids.retain(|price_id| {
|
||||||
|
parsed_price_updates
|
||||||
|
.iter()
|
||||||
|
.any(|price_feed| price_feed.id == RpcPriceIdentifier::from(*price_id))
|
||||||
|
});
|
||||||
|
price_feeds_with_update_data = Aggregates::get_price_feeds_with_update_data(
|
||||||
|
&*state.state,
|
||||||
|
&price_ids,
|
||||||
|
RequestTime::AtSlot(event.slot()),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if price_ids is empty after filtering and return None if it is
|
||||||
|
if price_ids.is_empty() {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
let price_update_data = price_feeds_with_update_data.update_data;
|
||||||
|
let encoded_data: Vec<String> = price_update_data
|
||||||
|
.into_iter()
|
||||||
|
.map(|data| encoding.encode_str(&data))
|
||||||
|
.collect();
|
||||||
|
let binary_price_update = BinaryPriceUpdate {
|
||||||
|
encoding,
|
||||||
|
data: encoded_data,
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Some(PriceUpdate {
|
||||||
|
binary: binary_price_update,
|
||||||
|
parsed: if parsed {
|
||||||
|
Some(parsed_price_updates)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
},
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn error_event<E: std::fmt::Debug>(e: E) -> Event {
|
||||||
|
Event::default()
|
||||||
|
.event("error")
|
||||||
|
.data(format!("Error receiving update: {:?}", e))
|
||||||
|
}
|
|
@ -1,9 +1,5 @@
|
||||||
use {
|
use {
|
||||||
crate::{
|
crate::{
|
||||||
aggregate::{
|
|
||||||
RequestTime,
|
|
||||||
UnixTimestamp,
|
|
||||||
},
|
|
||||||
api::{
|
api::{
|
||||||
doc_examples,
|
doc_examples,
|
||||||
rest::{
|
rest::{
|
||||||
|
@ -17,6 +13,12 @@ use {
|
||||||
PriceIdInput,
|
PriceIdInput,
|
||||||
PriceUpdate,
|
PriceUpdate,
|
||||||
},
|
},
|
||||||
|
ApiState,
|
||||||
|
},
|
||||||
|
state::aggregate::{
|
||||||
|
Aggregates,
|
||||||
|
RequestTime,
|
||||||
|
UnixTimestamp,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
anyhow::Result,
|
anyhow::Result,
|
||||||
|
@ -58,11 +60,11 @@ pub struct TimestampPriceUpdatesQueryParams {
|
||||||
#[param(example = "e62df6c8b4a85fe1a67db44dc12de5db330f7ac66b72dc658afedf0f4a415b43")]
|
#[param(example = "e62df6c8b4a85fe1a67db44dc12de5db330f7ac66b72dc658afedf0f4a415b43")]
|
||||||
ids: Vec<PriceIdInput>,
|
ids: Vec<PriceIdInput>,
|
||||||
|
|
||||||
/// If true, include the parsed price update in the `parsed` field of each returned feed.
|
/// If true, include the parsed price update in the `parsed` field of each returned feed. Default is `hex`.
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
encoding: EncodingType,
|
encoding: EncodingType,
|
||||||
|
|
||||||
/// If true, include the parsed price update in the `parsed` field of each returned feed.
|
/// If true, include the parsed price update in the `parsed` field of each returned feed. Default is `true`.
|
||||||
#[serde(default = "default_true")]
|
#[serde(default = "default_true")]
|
||||||
parsed: bool,
|
parsed: bool,
|
||||||
}
|
}
|
||||||
|
@ -87,18 +89,22 @@ fn default_true() -> bool {
|
||||||
TimestampPriceUpdatesQueryParams
|
TimestampPriceUpdatesQueryParams
|
||||||
)
|
)
|
||||||
)]
|
)]
|
||||||
pub async fn timestamp_price_updates(
|
pub async fn timestamp_price_updates<S>(
|
||||||
State(state): State<crate::api::ApiState>,
|
State(state): State<ApiState<S>>,
|
||||||
Path(path_params): Path<TimestampPriceUpdatesPathParams>,
|
Path(path_params): Path<TimestampPriceUpdatesPathParams>,
|
||||||
QsQuery(query_params): QsQuery<TimestampPriceUpdatesQueryParams>,
|
QsQuery(query_params): QsQuery<TimestampPriceUpdatesQueryParams>,
|
||||||
) -> Result<Json<PriceUpdate>, RestError> {
|
) -> Result<Json<PriceUpdate>, RestError>
|
||||||
|
where
|
||||||
|
S: Aggregates,
|
||||||
|
{
|
||||||
let price_ids: Vec<PriceIdentifier> =
|
let price_ids: Vec<PriceIdentifier> =
|
||||||
query_params.ids.into_iter().map(|id| id.into()).collect();
|
query_params.ids.into_iter().map(|id| id.into()).collect();
|
||||||
|
|
||||||
verify_price_ids_exist(&state, &price_ids).await?;
|
verify_price_ids_exist(&state, &price_ids).await?;
|
||||||
|
|
||||||
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
|
let state = &*state.state;
|
||||||
&*state.state,
|
let price_feeds_with_update_data = Aggregates::get_price_feeds_with_update_data(
|
||||||
|
state,
|
||||||
&price_ids,
|
&price_ids,
|
||||||
RequestTime::FirstAfter(path_params.publish_time),
|
RequestTime::FirstAfter(path_params.publish_time),
|
||||||
)
|
)
|
|
@ -1,6 +1,6 @@
|
||||||
use {
|
use {
|
||||||
super::doc_examples,
|
super::doc_examples,
|
||||||
crate::aggregate::{
|
crate::state::aggregate::{
|
||||||
PriceFeedUpdate,
|
PriceFeedUpdate,
|
||||||
PriceFeedsWithUpdateData,
|
PriceFeedsWithUpdateData,
|
||||||
Slot,
|
Slot,
|
|
@ -1,14 +1,18 @@
|
||||||
use {
|
use {
|
||||||
super::types::{
|
super::{
|
||||||
|
types::{
|
||||||
PriceIdInput,
|
PriceIdInput,
|
||||||
RpcPriceFeed,
|
RpcPriceFeed,
|
||||||
},
|
},
|
||||||
crate::{
|
ApiState,
|
||||||
|
},
|
||||||
|
crate::state::{
|
||||||
aggregate::{
|
aggregate::{
|
||||||
|
Aggregates,
|
||||||
AggregationEvent,
|
AggregationEvent,
|
||||||
RequestTime,
|
RequestTime,
|
||||||
},
|
},
|
||||||
state::State,
|
State,
|
||||||
},
|
},
|
||||||
anyhow::{
|
anyhow::{
|
||||||
anyhow,
|
anyhow,
|
||||||
|
@ -212,11 +216,10 @@ pub async fn ws_route_handler(
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(stream, state, subscriber_ip))]
|
#[tracing::instrument(skip(stream, state, subscriber_ip))]
|
||||||
async fn websocket_handler(
|
async fn websocket_handler<S>(stream: WebSocket, state: ApiState<S>, subscriber_ip: Option<IpAddr>)
|
||||||
stream: WebSocket,
|
where
|
||||||
state: super::ApiState,
|
S: Aggregates,
|
||||||
subscriber_ip: Option<IpAddr>,
|
{
|
||||||
) {
|
|
||||||
let ws_state = state.ws.clone();
|
let ws_state = state.ws.clone();
|
||||||
|
|
||||||
// Retain the recent rate limit data for the IP addresses to
|
// Retain the recent rate limit data for the IP addresses to
|
||||||
|
@ -235,7 +238,7 @@ async fn websocket_handler(
|
||||||
})
|
})
|
||||||
.inc();
|
.inc();
|
||||||
|
|
||||||
let notify_receiver = state.update_tx.subscribe();
|
let notify_receiver = Aggregates::subscribe(&*state.state);
|
||||||
let (sender, receiver) = stream.split();
|
let (sender, receiver) = stream.split();
|
||||||
let mut subscriber = Subscriber::new(
|
let mut subscriber = Subscriber::new(
|
||||||
id,
|
id,
|
||||||
|
@ -254,11 +257,11 @@ pub type SubscriberId = usize;
|
||||||
|
|
||||||
/// Subscriber is an actor that handles a single websocket connection.
|
/// Subscriber is an actor that handles a single websocket connection.
|
||||||
/// It listens to the store for updates and sends them to the client.
|
/// It listens to the store for updates and sends them to the client.
|
||||||
pub struct Subscriber {
|
pub struct Subscriber<S> {
|
||||||
id: SubscriberId,
|
id: SubscriberId,
|
||||||
ip_addr: Option<IpAddr>,
|
ip_addr: Option<IpAddr>,
|
||||||
closed: bool,
|
closed: bool,
|
||||||
store: Arc<State>,
|
state: Arc<S>,
|
||||||
ws_state: Arc<WsState>,
|
ws_state: Arc<WsState>,
|
||||||
notify_receiver: Receiver<AggregationEvent>,
|
notify_receiver: Receiver<AggregationEvent>,
|
||||||
receiver: SplitStream<WebSocket>,
|
receiver: SplitStream<WebSocket>,
|
||||||
|
@ -269,11 +272,14 @@ pub struct Subscriber {
|
||||||
responded_to_ping: bool,
|
responded_to_ping: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Subscriber {
|
impl<S> Subscriber<S>
|
||||||
|
where
|
||||||
|
S: Aggregates,
|
||||||
|
{
|
||||||
pub fn new(
|
pub fn new(
|
||||||
id: SubscriberId,
|
id: SubscriberId,
|
||||||
ip_addr: Option<IpAddr>,
|
ip_addr: Option<IpAddr>,
|
||||||
store: Arc<State>,
|
state: Arc<S>,
|
||||||
ws_state: Arc<WsState>,
|
ws_state: Arc<WsState>,
|
||||||
notify_receiver: Receiver<AggregationEvent>,
|
notify_receiver: Receiver<AggregationEvent>,
|
||||||
receiver: SplitStream<WebSocket>,
|
receiver: SplitStream<WebSocket>,
|
||||||
|
@ -283,7 +289,7 @@ impl Subscriber {
|
||||||
id,
|
id,
|
||||||
ip_addr,
|
ip_addr,
|
||||||
closed: false,
|
closed: false,
|
||||||
store,
|
state,
|
||||||
ws_state,
|
ws_state,
|
||||||
notify_receiver,
|
notify_receiver,
|
||||||
receiver,
|
receiver,
|
||||||
|
@ -350,8 +356,9 @@ impl Subscriber {
|
||||||
.cloned()
|
.cloned()
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
let updates = match crate::aggregate::get_price_feeds_with_update_data(
|
let state = &*self.state;
|
||||||
&*self.store,
|
let updates = match Aggregates::get_price_feeds_with_update_data(
|
||||||
|
state,
|
||||||
&price_feed_ids,
|
&price_feed_ids,
|
||||||
RequestTime::AtSlot(event.slot()),
|
RequestTime::AtSlot(event.slot()),
|
||||||
)
|
)
|
||||||
|
@ -364,8 +371,7 @@ impl Subscriber {
|
||||||
// subscription. In this case we just remove the non-existing
|
// subscription. In this case we just remove the non-existing
|
||||||
// price feed from the list and will keep sending updates for
|
// price feed from the list and will keep sending updates for
|
||||||
// the rest.
|
// the rest.
|
||||||
let available_price_feed_ids =
|
let available_price_feed_ids = Aggregates::get_price_feed_ids(state).await;
|
||||||
crate::aggregate::get_price_feed_ids(&*self.store).await;
|
|
||||||
|
|
||||||
self.price_feeds_with_config
|
self.price_feeds_with_config
|
||||||
.retain(|price_feed_id, _| available_price_feed_ids.contains(price_feed_id));
|
.retain(|price_feed_id, _| available_price_feed_ids.contains(price_feed_id));
|
||||||
|
@ -376,8 +382,8 @@ impl Subscriber {
|
||||||
.cloned()
|
.cloned()
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
crate::aggregate::get_price_feeds_with_update_data(
|
Aggregates::get_price_feeds_with_update_data(
|
||||||
&*self.store,
|
state,
|
||||||
&price_feed_ids,
|
&price_feed_ids,
|
||||||
RequestTime::AtSlot(event.slot()),
|
RequestTime::AtSlot(event.slot()),
|
||||||
)
|
)
|
||||||
|
@ -545,7 +551,7 @@ impl Subscriber {
|
||||||
allow_out_of_order,
|
allow_out_of_order,
|
||||||
}) => {
|
}) => {
|
||||||
let price_ids: Vec<PriceIdentifier> = ids.into_iter().map(|id| id.into()).collect();
|
let price_ids: Vec<PriceIdentifier> = ids.into_iter().map(|id| id.into()).collect();
|
||||||
let available_price_ids = crate::aggregate::get_price_feed_ids(&*self.store).await;
|
let available_price_ids = Aggregates::get_price_feed_ids(&*self.state).await;
|
||||||
|
|
||||||
let not_found_price_ids: Vec<&PriceIdentifier> = price_ids
|
let not_found_price_ids: Vec<&PriceIdentifier> = price_ids
|
||||||
.iter()
|
.iter()
|
|
@ -19,9 +19,9 @@ pub struct Options {
|
||||||
#[arg(env = "PYTHNET_HTTP_ADDR")]
|
#[arg(env = "PYTHNET_HTTP_ADDR")]
|
||||||
pub http_addr: String,
|
pub http_addr: String,
|
||||||
|
|
||||||
/// Pyth mapping account address.
|
/// Pyth mapping account address on Pythnet.
|
||||||
#[arg(long = "mapping-address")]
|
#[arg(long = "pythnet-mapping-addr")]
|
||||||
#[arg(default_value = DEFAULT_PYTHNET_MAPPING_ADDR)]
|
#[arg(default_value = DEFAULT_PYTHNET_MAPPING_ADDR)]
|
||||||
#[arg(env = "MAPPING_ADDRESS")]
|
#[arg(env = "PYTHNET_MAPPING_ADDR")]
|
||||||
pub mapping_addr: Pubkey,
|
pub mapping_addr: Pubkey,
|
||||||
}
|
}
|
|
@ -17,7 +17,6 @@ use {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
mod aggregate;
|
|
||||||
mod api;
|
mod api;
|
||||||
mod config;
|
mod config;
|
||||||
mod metrics_server;
|
mod metrics_server;
|
||||||
|
@ -35,7 +34,7 @@ lazy_static! {
|
||||||
/// - Exit logic doesn't really require carefully threading this value through the app.
|
/// - Exit logic doesn't really require carefully threading this value through the app.
|
||||||
/// - The `Receiver` side of a watch channel performs the detection based on if the change
|
/// - The `Receiver` side of a watch channel performs the detection based on if the change
|
||||||
/// happened after the subscribe, so it means all listeners should always be notified
|
/// happened after the subscribe, so it means all listeners should always be notified
|
||||||
/// currectly.
|
/// correctly.
|
||||||
pub static ref EXIT: watch::Sender<bool> = watch::channel(false).0;
|
pub static ref EXIT: watch::Sender<bool> = watch::channel(false).0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -54,7 +53,7 @@ async fn init() -> Result<()> {
|
||||||
let (update_tx, _) = tokio::sync::broadcast::channel(1000);
|
let (update_tx, _) = tokio::sync::broadcast::channel(1000);
|
||||||
|
|
||||||
// Initialize a cache store with a 1000 element circular buffer.
|
// Initialize a cache store with a 1000 element circular buffer.
|
||||||
let store = State::new(update_tx.clone(), 1000, opts.benchmarks.endpoint.clone());
|
let state = State::new(update_tx.clone(), 1000, opts.benchmarks.endpoint.clone());
|
||||||
|
|
||||||
// Listen for Ctrl+C so we can set the exit flag and wait for a graceful shutdown.
|
// Listen for Ctrl+C so we can set the exit flag and wait for a graceful shutdown.
|
||||||
spawn(async move {
|
spawn(async move {
|
||||||
|
@ -66,11 +65,11 @@ async fn init() -> Result<()> {
|
||||||
|
|
||||||
// Spawn all worker tasks, and wait for all to complete (which will happen if a shutdown
|
// Spawn all worker tasks, and wait for all to complete (which will happen if a shutdown
|
||||||
// signal has been observed).
|
// signal has been observed).
|
||||||
let tasks = join_all([
|
let tasks = join_all(vec![
|
||||||
Box::pin(spawn(network::wormhole::spawn(opts.clone(), store.clone()))),
|
spawn(network::wormhole::spawn(opts.clone(), state.clone())),
|
||||||
Box::pin(spawn(network::pythnet::spawn(opts.clone(), store.clone()))),
|
spawn(network::pythnet::spawn(opts.clone(), state.clone())),
|
||||||
Box::pin(spawn(metrics_server::run(opts.clone(), store.clone()))),
|
spawn(metrics_server::run(opts.clone(), state.clone())),
|
||||||
Box::pin(spawn(api::spawn(opts.clone(), store.clone(), update_tx))),
|
spawn(api::spawn(opts.clone(), state.clone())),
|
||||||
])
|
])
|
||||||
.await;
|
.await;
|
||||||
|
|
|
@ -4,10 +4,6 @@
|
||||||
|
|
||||||
use {
|
use {
|
||||||
crate::{
|
crate::{
|
||||||
aggregate::{
|
|
||||||
AccumulatorMessages,
|
|
||||||
Update,
|
|
||||||
},
|
|
||||||
api::types::PriceFeedMetadata,
|
api::types::PriceFeedMetadata,
|
||||||
config::RunOptions,
|
config::RunOptions,
|
||||||
network::wormhole::{
|
network::wormhole::{
|
||||||
|
@ -17,10 +13,17 @@ use {
|
||||||
GuardianSetData,
|
GuardianSetData,
|
||||||
},
|
},
|
||||||
price_feeds_metadata::{
|
price_feeds_metadata::{
|
||||||
store_price_feeds_metadata,
|
PriceFeedMeta,
|
||||||
DEFAULT_PRICE_FEEDS_CACHE_UPDATE_INTERVAL,
|
DEFAULT_PRICE_FEEDS_CACHE_UPDATE_INTERVAL,
|
||||||
},
|
},
|
||||||
state::State,
|
state::{
|
||||||
|
aggregate::{
|
||||||
|
AccumulatorMessages,
|
||||||
|
Aggregates,
|
||||||
|
Update,
|
||||||
|
},
|
||||||
|
State,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
anyhow::{
|
anyhow::{
|
||||||
anyhow,
|
anyhow,
|
||||||
|
@ -136,7 +139,7 @@ async fn fetch_bridge_data(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn run(store: Arc<State>, pythnet_ws_endpoint: String) -> Result<()> {
|
pub async fn run(store: Arc<State>, pythnet_ws_endpoint: String) -> Result<!> {
|
||||||
let client = PubsubClient::new(pythnet_ws_endpoint.as_ref()).await?;
|
let client = PubsubClient::new(pythnet_ws_endpoint.as_ref()).await?;
|
||||||
|
|
||||||
let config = RpcProgramAccountsConfig {
|
let config = RpcProgramAccountsConfig {
|
||||||
|
@ -157,9 +160,7 @@ pub async fn run(store: Arc<State>, pythnet_ws_endpoint: String) -> Result<()> {
|
||||||
.program_subscribe(&system_program::id(), Some(config))
|
.program_subscribe(&system_program::id(), Some(config))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
loop {
|
while let Some(update) = notif.next().await {
|
||||||
match notif.next().await {
|
|
||||||
Some(update) => {
|
|
||||||
let account: Account = match update.value.account.decode() {
|
let account: Account = match update.value.account.decode() {
|
||||||
Some(account) => account,
|
Some(account) => account,
|
||||||
None => {
|
None => {
|
||||||
|
@ -182,8 +183,8 @@ pub async fn run(store: Arc<State>, pythnet_ws_endpoint: String) -> Result<()> {
|
||||||
if candidate.to_string() == update.value.pubkey {
|
if candidate.to_string() == update.value.pubkey {
|
||||||
let store = store.clone();
|
let store = store.clone();
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
if let Err(err) = crate::aggregate::store_update(
|
if let Err(err) = Aggregates::store_update(
|
||||||
&store,
|
&*store,
|
||||||
Update::AccumulatorMessages(accumulator_messages),
|
Update::AccumulatorMessages(accumulator_messages),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
|
@ -205,11 +206,8 @@ pub async fn run(store: Arc<State>, pythnet_ws_endpoint: String) -> Result<()> {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
None => {
|
|
||||||
return Err(anyhow!("Pythnet network listener terminated"));
|
Err(anyhow!("Pythnet network listener connection terminated"))
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Fetch existing GuardianSet accounts from Wormhole.
|
/// Fetch existing GuardianSet accounts from Wormhole.
|
||||||
|
@ -325,6 +323,19 @@ pub async fn spawn(opts: RunOptions, state: Arc<State>) -> Result<()> {
|
||||||
let price_feeds_state = state.clone();
|
let price_feeds_state = state.clone();
|
||||||
let mut exit = crate::EXIT.subscribe();
|
let mut exit = crate::EXIT.subscribe();
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
|
// Run fetch and store once before the loop
|
||||||
|
if let Err(e) = fetch_and_store_price_feeds_metadata(
|
||||||
|
price_feeds_state.as_ref(),
|
||||||
|
&opts.pythnet.mapping_addr,
|
||||||
|
&rpc_client,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
tracing::error!(
|
||||||
|
"Error in initial fetching and storing price feeds metadata: {}",
|
||||||
|
e
|
||||||
|
);
|
||||||
|
}
|
||||||
loop {
|
loop {
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = exit.changed() => break,
|
_ = exit.changed() => break,
|
||||||
|
@ -353,13 +364,18 @@ pub async fn spawn(opts: RunOptions, state: Arc<State>) -> Result<()> {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
pub async fn fetch_and_store_price_feeds_metadata(
|
pub async fn fetch_and_store_price_feeds_metadata<S>(
|
||||||
state: &State,
|
state: &S,
|
||||||
mapping_address: &Pubkey,
|
mapping_address: &Pubkey,
|
||||||
rpc_client: &RpcClient,
|
rpc_client: &RpcClient,
|
||||||
) -> Result<Vec<PriceFeedMetadata>> {
|
) -> Result<Vec<PriceFeedMetadata>>
|
||||||
|
where
|
||||||
|
S: PriceFeedMeta,
|
||||||
|
{
|
||||||
let price_feeds_metadata = fetch_price_feeds_metadata(mapping_address, rpc_client).await?;
|
let price_feeds_metadata = fetch_price_feeds_metadata(mapping_address, rpc_client).await?;
|
||||||
store_price_feeds_metadata(state, &price_feeds_metadata).await?;
|
state
|
||||||
|
.store_price_feeds_metadata(&price_feeds_metadata)
|
||||||
|
.await?;
|
||||||
Ok(price_feeds_metadata)
|
Ok(price_feeds_metadata)
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,7 +7,13 @@
|
||||||
use {
|
use {
|
||||||
crate::{
|
crate::{
|
||||||
config::RunOptions,
|
config::RunOptions,
|
||||||
state::State,
|
state::{
|
||||||
|
aggregate::{
|
||||||
|
Aggregates,
|
||||||
|
Update,
|
||||||
|
},
|
||||||
|
State,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
anyhow::{
|
anyhow::{
|
||||||
anyhow,
|
anyhow,
|
||||||
|
@ -43,7 +49,11 @@ use {
|
||||||
Digest,
|
Digest,
|
||||||
Keccak256,
|
Keccak256,
|
||||||
},
|
},
|
||||||
std::sync::Arc,
|
std::{
|
||||||
|
sync::Arc,
|
||||||
|
time::Duration,
|
||||||
|
},
|
||||||
|
tokio::time::Instant,
|
||||||
tonic::Request,
|
tonic::Request,
|
||||||
wormhole_sdk::{
|
wormhole_sdk::{
|
||||||
vaa::{
|
vaa::{
|
||||||
|
@ -100,10 +110,10 @@ pub struct BridgeConfig {
|
||||||
/// GuardianSetData extracted from wormhole bridge account, due to no API.
|
/// GuardianSetData extracted from wormhole bridge account, due to no API.
|
||||||
#[derive(borsh::BorshDeserialize)]
|
#[derive(borsh::BorshDeserialize)]
|
||||||
pub struct GuardianSetData {
|
pub struct GuardianSetData {
|
||||||
pub index: u32,
|
pub _index: u32,
|
||||||
pub keys: Vec<[u8; 20]>,
|
pub keys: Vec<[u8; 20]>,
|
||||||
pub creation_time: u32,
|
pub _creation_time: u32,
|
||||||
pub expiration_time: u32,
|
pub _expiration_time: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Update the guardian set with the given ID in the state.
|
/// Update the guardian set with the given ID in the state.
|
||||||
|
@ -152,10 +162,16 @@ mod proto {
|
||||||
pub async fn spawn(opts: RunOptions, state: Arc<State>) -> Result<()> {
|
pub async fn spawn(opts: RunOptions, state: Arc<State>) -> Result<()> {
|
||||||
let mut exit = crate::EXIT.subscribe();
|
let mut exit = crate::EXIT.subscribe();
|
||||||
loop {
|
loop {
|
||||||
|
let current_time = Instant::now();
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = exit.changed() => break,
|
_ = exit.changed() => break,
|
||||||
Err(err) = run(opts.clone(), state.clone()) => {
|
Err(err) = run(opts.clone(), state.clone()) => {
|
||||||
tracing::error!(error = ?err, "Wormhole gRPC service failed.");
|
tracing::error!(error = ?err, "Wormhole gRPC service failed.");
|
||||||
|
|
||||||
|
if current_time.elapsed() < Duration::from_secs(30) {
|
||||||
|
tracing::error!("Wormhole listener restarting too quickly. Sleep 1s.");
|
||||||
|
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -164,7 +180,7 @@ pub async fn spawn(opts: RunOptions, state: Arc<State>) -> Result<()> {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(opts, state))]
|
#[tracing::instrument(skip(opts, state))]
|
||||||
async fn run(opts: RunOptions, state: Arc<State>) -> Result<()> {
|
async fn run(opts: RunOptions, state: Arc<State>) -> Result<!> {
|
||||||
let mut client = SpyRpcServiceClient::connect(opts.wormhole.spy_rpc_addr).await?;
|
let mut client = SpyRpcServiceClient::connect(opts.wormhole.spy_rpc_addr).await?;
|
||||||
let mut stream = client
|
let mut stream = client
|
||||||
.subscribe_signed_vaa(Request::new(SubscribeSignedVaaRequest {
|
.subscribe_signed_vaa(Request::new(SubscribeSignedVaaRequest {
|
||||||
|
@ -184,7 +200,7 @@ async fn run(opts: RunOptions, state: Arc<State>) -> Result<()> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Err(anyhow!("Wormhole gRPC stream terminated."))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process a message received via a Wormhole gRPC connection.
|
/// Process a message received via a Wormhole gRPC connection.
|
||||||
|
@ -225,7 +241,10 @@ pub async fn process_message(state: Arc<State>, vaa_bytes: Vec<u8>) -> Result<()
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// Finally, store the resulting VAA in Hermes.
|
// Finally, store the resulting VAA in Hermes.
|
||||||
store_vaa(state.clone(), vaa.sequence, vaa_bytes).await?;
|
let sequence = vaa.sequence;
|
||||||
|
tokio::spawn(async move {
|
||||||
|
store_vaa(state.clone(), sequence, vaa_bytes).await;
|
||||||
|
});
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -334,16 +353,14 @@ pub fn verify_vaa<'a>(
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(state, vaa_bytes))]
|
#[tracing::instrument(skip(state, vaa_bytes))]
|
||||||
pub async fn store_vaa(state: Arc<State>, sequence: u64, vaa_bytes: Vec<u8>) -> Result<()> {
|
pub async fn store_vaa(state: Arc<State>, sequence: u64, vaa_bytes: Vec<u8>) {
|
||||||
// Check VAA hasn't already been seen, this may have been checked previously
|
// Check VAA hasn't already been seen, this may have been checked previously
|
||||||
// but due to async nature It's possible other threads have mutated the state
|
// but due to async nature it's possible other threads have mutated the state
|
||||||
// since this VAA started processing.
|
// since this VAA started processing.
|
||||||
let mut observed_vaa_seqs = state.observed_vaa_seqs.write().await;
|
let mut observed_vaa_seqs = state.observed_vaa_seqs.write().await;
|
||||||
ensure!(
|
if observed_vaa_seqs.contains(&sequence) {
|
||||||
!observed_vaa_seqs.contains(&sequence),
|
return;
|
||||||
"Previously observed VAA: {}",
|
}
|
||||||
sequence,
|
|
||||||
);
|
|
||||||
|
|
||||||
// Clear old cached VAA sequences.
|
// Clear old cached VAA sequences.
|
||||||
while observed_vaa_seqs.len() > OBSERVED_CACHE_SIZE {
|
while observed_vaa_seqs.len() > OBSERVED_CACHE_SIZE {
|
||||||
|
@ -351,5 +368,7 @@ pub async fn store_vaa(state: Arc<State>, sequence: u64, vaa_bytes: Vec<u8>) ->
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hand the VAA to the aggregate store.
|
// Hand the VAA to the aggregate store.
|
||||||
crate::aggregate::store_update(&state, crate::aggregate::Update::Vaa(vaa_bytes)).await
|
if let Err(e) = Aggregates::store_update(&*state, Update::Vaa(vaa_bytes)).await {
|
||||||
|
tracing::error!(error = ?e, "Failed to store VAA in aggregate store.");
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -0,0 +1,96 @@
|
||||||
|
use {
|
||||||
|
crate::{
|
||||||
|
api::types::{
|
||||||
|
AssetType,
|
||||||
|
PriceFeedMetadata,
|
||||||
|
},
|
||||||
|
state::State,
|
||||||
|
},
|
||||||
|
anyhow::Result,
|
||||||
|
tokio::sync::RwLock,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const DEFAULT_PRICE_FEEDS_CACHE_UPDATE_INTERVAL: u64 = 600;
|
||||||
|
|
||||||
|
pub struct PriceFeedMetaState {
|
||||||
|
pub data: RwLock<Vec<PriceFeedMetadata>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PriceFeedMetaState {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
data: RwLock::new(Vec::new()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Allow downcasting State into CacheState for functions that depend on the `Cache` service.
|
||||||
|
impl<'a> From<&'a State> for &'a PriceFeedMetaState {
|
||||||
|
fn from(state: &'a State) -> &'a PriceFeedMetaState {
|
||||||
|
&state.price_feed_meta
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
pub trait PriceFeedMeta {
|
||||||
|
async fn retrieve_price_feeds_metadata(&self) -> Result<Vec<PriceFeedMetadata>>;
|
||||||
|
async fn store_price_feeds_metadata(
|
||||||
|
&self,
|
||||||
|
price_feeds_metadata: &[PriceFeedMetadata],
|
||||||
|
) -> Result<()>;
|
||||||
|
async fn get_price_feeds_metadata(
|
||||||
|
&self,
|
||||||
|
query: Option<String>,
|
||||||
|
asset_type: Option<AssetType>,
|
||||||
|
) -> Result<Vec<PriceFeedMetadata>>;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl<T> PriceFeedMeta for T
|
||||||
|
where
|
||||||
|
for<'a> &'a T: Into<&'a PriceFeedMetaState>,
|
||||||
|
T: Sync,
|
||||||
|
{
|
||||||
|
async fn retrieve_price_feeds_metadata(&self) -> Result<Vec<PriceFeedMetadata>> {
|
||||||
|
let price_feeds_metadata = self.into().data.read().await;
|
||||||
|
Ok(price_feeds_metadata.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn store_price_feeds_metadata(
|
||||||
|
&self,
|
||||||
|
price_feeds_metadata: &[PriceFeedMetadata],
|
||||||
|
) -> Result<()> {
|
||||||
|
let mut price_feeds_metadata_write_guard = self.into().data.write().await;
|
||||||
|
*price_feeds_metadata_write_guard = price_feeds_metadata.to_vec();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
async fn get_price_feeds_metadata(
|
||||||
|
&self,
|
||||||
|
query: Option<String>,
|
||||||
|
asset_type: Option<AssetType>,
|
||||||
|
) -> Result<Vec<PriceFeedMetadata>> {
|
||||||
|
let mut price_feeds_metadata = self.retrieve_price_feeds_metadata().await?;
|
||||||
|
|
||||||
|
// Filter by query if provided
|
||||||
|
if let Some(query_str) = &query {
|
||||||
|
price_feeds_metadata.retain(|feed| {
|
||||||
|
feed.attributes.get("symbol").map_or(false, |symbol| {
|
||||||
|
symbol.to_lowercase().contains(&query_str.to_lowercase())
|
||||||
|
})
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter by asset_type if provided
|
||||||
|
if let Some(asset_type) = &asset_type {
|
||||||
|
price_feeds_metadata.retain(|feed| {
|
||||||
|
feed.attributes.get("asset_type").map_or(false, |type_str| {
|
||||||
|
type_str.to_lowercase() == asset_type.to_string().to_lowercase()
|
||||||
|
})
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(price_feeds_metadata)
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,14 +1,17 @@
|
||||||
//! This module contains the global state of the application.
|
//! This module contains the global state of the application.
|
||||||
|
|
||||||
use {
|
use {
|
||||||
self::cache::Cache,
|
self::{
|
||||||
crate::{
|
|
||||||
aggregate::{
|
aggregate::{
|
||||||
AggregateState,
|
AggregateState,
|
||||||
AggregationEvent,
|
AggregationEvent,
|
||||||
},
|
},
|
||||||
api::types::PriceFeedMetadata,
|
benchmarks::BenchmarksState,
|
||||||
|
cache::CacheState,
|
||||||
|
},
|
||||||
|
crate::{
|
||||||
network::wormhole::GuardianSet,
|
network::wormhole::GuardianSet,
|
||||||
|
price_feeds_metadata::PriceFeedMetaState,
|
||||||
},
|
},
|
||||||
prometheus_client::registry::Registry,
|
prometheus_client::registry::Registry,
|
||||||
reqwest::Url,
|
reqwest::Url,
|
||||||
|
@ -25,13 +28,22 @@ use {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub mod aggregate;
|
||||||
pub mod benchmarks;
|
pub mod benchmarks;
|
||||||
pub mod cache;
|
pub mod cache;
|
||||||
|
|
||||||
pub struct State {
|
pub struct State {
|
||||||
/// Storage is a short-lived cache of the state of all the updates that have been passed to the
|
/// State for the `Cache` service for short-lived storage of updates.
|
||||||
/// store.
|
pub cache: CacheState,
|
||||||
pub cache: Cache,
|
|
||||||
|
/// State for the `Benchmarks` service for looking up historical updates.
|
||||||
|
pub benchmarks: BenchmarksState,
|
||||||
|
|
||||||
|
/// State for the `PriceFeedMeta` service for looking up metadata related to Pyth price feeds.
|
||||||
|
pub price_feed_meta: PriceFeedMetaState,
|
||||||
|
|
||||||
|
/// State for accessing/storing Pyth price aggregates.
|
||||||
|
pub aggregates: AggregateState,
|
||||||
|
|
||||||
/// Sequence numbers of lately observed Vaas. Store uses this set
|
/// Sequence numbers of lately observed Vaas. Store uses this set
|
||||||
/// to ignore the previously observed Vaas as a performance boost.
|
/// to ignore the previously observed Vaas as a performance boost.
|
||||||
|
@ -40,20 +52,8 @@ pub struct State {
|
||||||
/// Wormhole guardian sets. It is used to verify Vaas before using them.
|
/// Wormhole guardian sets. It is used to verify Vaas before using them.
|
||||||
pub guardian_set: RwLock<BTreeMap<u32, GuardianSet>>,
|
pub guardian_set: RwLock<BTreeMap<u32, GuardianSet>>,
|
||||||
|
|
||||||
/// The sender to the channel between Store and Api to notify completed updates.
|
|
||||||
pub api_update_tx: Sender<AggregationEvent>,
|
|
||||||
|
|
||||||
/// The aggregate module state.
|
|
||||||
pub aggregate_state: RwLock<AggregateState>,
|
|
||||||
|
|
||||||
/// Benchmarks endpoint
|
|
||||||
pub benchmarks_endpoint: Option<Url>,
|
|
||||||
|
|
||||||
/// Metrics registry
|
/// Metrics registry
|
||||||
pub metrics_registry: RwLock<Registry>,
|
pub metrics_registry: RwLock<Registry>,
|
||||||
|
|
||||||
/// Price feeds metadata
|
|
||||||
pub price_feeds_metadata: RwLock<Vec<PriceFeedMetadata>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl State {
|
impl State {
|
||||||
|
@ -64,14 +64,13 @@ impl State {
|
||||||
) -> Arc<Self> {
|
) -> Arc<Self> {
|
||||||
let mut metrics_registry = Registry::default();
|
let mut metrics_registry = Registry::default();
|
||||||
Arc::new(Self {
|
Arc::new(Self {
|
||||||
cache: Cache::new(cache_size),
|
cache: CacheState::new(cache_size),
|
||||||
|
benchmarks: BenchmarksState::new(benchmarks_endpoint),
|
||||||
|
price_feed_meta: PriceFeedMetaState::new(),
|
||||||
|
aggregates: AggregateState::new(update_tx, &mut metrics_registry),
|
||||||
observed_vaa_seqs: RwLock::new(Default::default()),
|
observed_vaa_seqs: RwLock::new(Default::default()),
|
||||||
guardian_set: RwLock::new(Default::default()),
|
guardian_set: RwLock::new(Default::default()),
|
||||||
api_update_tx: update_tx,
|
|
||||||
aggregate_state: RwLock::new(AggregateState::new(&mut metrics_registry)),
|
|
||||||
benchmarks_endpoint,
|
|
||||||
metrics_registry: RwLock::new(metrics_registry),
|
metrics_registry: RwLock::new(metrics_registry),
|
||||||
price_feeds_metadata: RwLock::new(Default::default()),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -20,10 +20,11 @@ use {
|
||||||
},
|
},
|
||||||
crate::{
|
crate::{
|
||||||
network::wormhole::VaaBytes,
|
network::wormhole::VaaBytes,
|
||||||
|
price_feeds_metadata::PriceFeedMeta,
|
||||||
state::{
|
state::{
|
||||||
benchmarks::Benchmarks,
|
benchmarks::Benchmarks,
|
||||||
cache::{
|
cache::{
|
||||||
AggregateCache,
|
Cache,
|
||||||
MessageState,
|
MessageState,
|
||||||
MessageStateFilter,
|
MessageStateFilter,
|
||||||
},
|
},
|
||||||
|
@ -59,6 +60,13 @@ use {
|
||||||
collections::HashSet,
|
collections::HashSet,
|
||||||
time::Duration,
|
time::Duration,
|
||||||
},
|
},
|
||||||
|
tokio::sync::{
|
||||||
|
broadcast::{
|
||||||
|
Receiver,
|
||||||
|
Sender,
|
||||||
|
},
|
||||||
|
RwLock,
|
||||||
|
},
|
||||||
wormhole_sdk::Vaa,
|
wormhole_sdk::Vaa,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -102,8 +110,7 @@ impl AggregationEvent {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
pub struct AggregateStateData {
|
||||||
pub struct AggregateState {
|
|
||||||
/// The latest completed slot. This is used to check whether a completed state is new or out of
|
/// The latest completed slot. This is used to check whether a completed state is new or out of
|
||||||
/// order.
|
/// order.
|
||||||
pub latest_completed_slot: Option<Slot>,
|
pub latest_completed_slot: Option<Slot>,
|
||||||
|
@ -119,7 +126,7 @@ pub struct AggregateState {
|
||||||
pub metrics: metrics::Metrics,
|
pub metrics: metrics::Metrics,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AggregateState {
|
impl AggregateStateData {
|
||||||
pub fn new(metrics_registry: &mut Registry) -> Self {
|
pub fn new(metrics_registry: &mut Registry) -> Self {
|
||||||
Self {
|
Self {
|
||||||
latest_completed_slot: None,
|
latest_completed_slot: None,
|
||||||
|
@ -130,6 +137,20 @@ impl AggregateState {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub struct AggregateState {
|
||||||
|
pub data: RwLock<AggregateStateData>,
|
||||||
|
pub api_update_tx: Sender<AggregationEvent>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AggregateState {
|
||||||
|
pub fn new(update_tx: Sender<AggregationEvent>, metrics_registry: &mut Registry) -> Self {
|
||||||
|
Self {
|
||||||
|
data: RwLock::new(AggregateStateData::new(metrics_registry)),
|
||||||
|
api_update_tx: update_tx,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Accumulator messages coming from Pythnet validators.
|
/// Accumulator messages coming from Pythnet validators.
|
||||||
///
|
///
|
||||||
/// The validators writes the accumulator messages using Borsh with
|
/// The validators writes the accumulator messages using Borsh with
|
||||||
|
@ -177,9 +198,48 @@ const READINESS_STALENESS_THRESHOLD: Duration = Duration::from_secs(30);
|
||||||
/// 10 slots is almost 5 seconds.
|
/// 10 slots is almost 5 seconds.
|
||||||
const READINESS_MAX_ALLOWED_SLOT_LAG: Slot = 10;
|
const READINESS_MAX_ALLOWED_SLOT_LAG: Slot = 10;
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
pub trait Aggregates
|
||||||
|
where
|
||||||
|
Self: Cache,
|
||||||
|
Self: Benchmarks,
|
||||||
|
Self: PriceFeedMeta,
|
||||||
|
{
|
||||||
|
fn subscribe(&self) -> Receiver<AggregationEvent>;
|
||||||
|
async fn is_ready(&self) -> bool;
|
||||||
|
async fn store_update(&self, update: Update) -> Result<()>;
|
||||||
|
async fn get_price_feed_ids(&self) -> HashSet<PriceIdentifier>;
|
||||||
|
async fn get_price_feeds_with_update_data(
|
||||||
|
&self,
|
||||||
|
price_ids: &[PriceIdentifier],
|
||||||
|
request_time: RequestTime,
|
||||||
|
) -> Result<PriceFeedsWithUpdateData>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Allow downcasting State into CacheState for functions that depend on the `Cache` service.
|
||||||
|
impl<'a> From<&'a State> for &'a AggregateState {
|
||||||
|
fn from(state: &'a State) -> &'a AggregateState {
|
||||||
|
&state.aggregates
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl<T> Aggregates for T
|
||||||
|
where
|
||||||
|
for<'a> &'a T: Into<&'a AggregateState>,
|
||||||
|
T: Sync,
|
||||||
|
T: Send,
|
||||||
|
T: Cache,
|
||||||
|
T: Benchmarks,
|
||||||
|
T: PriceFeedMeta,
|
||||||
|
{
|
||||||
|
fn subscribe(&self) -> Receiver<AggregationEvent> {
|
||||||
|
self.into().api_update_tx.subscribe()
|
||||||
|
}
|
||||||
|
|
||||||
/// Stores the update data in the store
|
/// Stores the update data in the store
|
||||||
#[tracing::instrument(skip(state, update))]
|
#[tracing::instrument(skip(self, update))]
|
||||||
pub async fn store_update(state: &State, update: Update) -> Result<()> {
|
async fn store_update(&self, update: Update) -> Result<()> {
|
||||||
// The slot that the update is originating from. It should be available
|
// The slot that the update is originating from. It should be available
|
||||||
// in all the updates.
|
// in all the updates.
|
||||||
let slot = match update {
|
let slot = match update {
|
||||||
|
@ -192,14 +252,14 @@ pub async fn store_update(state: &State, update: Update) -> Result<()> {
|
||||||
tracing::info!(slot = proof.slot, "Storing VAA Merkle Proof.");
|
tracing::info!(slot = proof.slot, "Storing VAA Merkle Proof.");
|
||||||
|
|
||||||
store_wormhole_merkle_verified_message(
|
store_wormhole_merkle_verified_message(
|
||||||
state,
|
self,
|
||||||
proof.clone(),
|
proof.clone(),
|
||||||
update_vaa.to_owned(),
|
update_vaa.to_owned(),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
state
|
self.into()
|
||||||
.aggregate_state
|
.data
|
||||||
.write()
|
.write()
|
||||||
.await
|
.await
|
||||||
.metrics
|
.metrics
|
||||||
|
@ -213,12 +273,11 @@ pub async fn store_update(state: &State, update: Update) -> Result<()> {
|
||||||
let slot = accumulator_messages.slot;
|
let slot = accumulator_messages.slot;
|
||||||
tracing::info!(slot = slot, "Storing Accumulator Messages.");
|
tracing::info!(slot = slot, "Storing Accumulator Messages.");
|
||||||
|
|
||||||
state
|
self.store_accumulator_messages(accumulator_messages)
|
||||||
.store_accumulator_messages(accumulator_messages)
|
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
state
|
self.into()
|
||||||
.aggregate_state
|
.data
|
||||||
.write()
|
.write()
|
||||||
.await
|
.await
|
||||||
.metrics
|
.metrics
|
||||||
|
@ -229,15 +288,15 @@ pub async fn store_update(state: &State, update: Update) -> Result<()> {
|
||||||
|
|
||||||
// Update the aggregate state with the latest observed slot
|
// Update the aggregate state with the latest observed slot
|
||||||
{
|
{
|
||||||
let mut aggregate_state = state.aggregate_state.write().await;
|
let mut aggregate_state = self.into().data.write().await;
|
||||||
aggregate_state.latest_observed_slot = aggregate_state
|
aggregate_state.latest_observed_slot = aggregate_state
|
||||||
.latest_observed_slot
|
.latest_observed_slot
|
||||||
.map(|latest| latest.max(slot))
|
.map(|latest| latest.max(slot))
|
||||||
.or(Some(slot));
|
.or(Some(slot));
|
||||||
}
|
}
|
||||||
|
|
||||||
let accumulator_messages = state.fetch_accumulator_messages(slot).await?;
|
let accumulator_messages = self.fetch_accumulator_messages(slot).await?;
|
||||||
let wormhole_merkle_state = state.fetch_wormhole_merkle_state(slot).await?;
|
let wormhole_merkle_state = self.fetch_wormhole_merkle_state(slot).await?;
|
||||||
|
|
||||||
let (accumulator_messages, wormhole_merkle_state) =
|
let (accumulator_messages, wormhole_merkle_state) =
|
||||||
match (accumulator_messages, wormhole_merkle_state) {
|
match (accumulator_messages, wormhole_merkle_state) {
|
||||||
|
@ -259,28 +318,32 @@ pub async fn store_update(state: &State, update: Update) -> Result<()> {
|
||||||
.collect::<HashSet<_>>();
|
.collect::<HashSet<_>>();
|
||||||
|
|
||||||
tracing::info!(len = message_states.len(), "Storing Message States.");
|
tracing::info!(len = message_states.len(), "Storing Message States.");
|
||||||
state.store_message_states(message_states).await?;
|
self.store_message_states(message_states).await?;
|
||||||
|
|
||||||
// Update the aggregate state
|
// Update the aggregate state
|
||||||
let mut aggregate_state = state.aggregate_state.write().await;
|
let mut aggregate_state = self.into().data.write().await;
|
||||||
|
|
||||||
// Check if the update is new or out of order
|
// Send update event to subscribers. We are purposefully ignoring the result
|
||||||
match aggregate_state.latest_completed_slot {
|
// because there might be no subscribers.
|
||||||
|
let _ = match aggregate_state.latest_completed_slot {
|
||||||
None => {
|
None => {
|
||||||
aggregate_state.latest_completed_slot.replace(slot);
|
aggregate_state.latest_completed_slot.replace(slot);
|
||||||
state.api_update_tx.send(AggregationEvent::New { slot })?;
|
self.into()
|
||||||
|
.api_update_tx
|
||||||
|
.send(AggregationEvent::New { slot })
|
||||||
}
|
}
|
||||||
Some(latest) if slot > latest => {
|
Some(latest) if slot > latest => {
|
||||||
state.prune_removed_keys(message_state_keys).await;
|
self.prune_removed_keys(message_state_keys).await;
|
||||||
aggregate_state.latest_completed_slot.replace(slot);
|
aggregate_state.latest_completed_slot.replace(slot);
|
||||||
state.api_update_tx.send(AggregationEvent::New { slot })?;
|
self.into()
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
state
|
|
||||||
.api_update_tx
|
.api_update_tx
|
||||||
.send(AggregationEvent::OutOfOrder { slot })?;
|
.send(AggregationEvent::New { slot })
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
_ => self
|
||||||
|
.into()
|
||||||
|
.api_update_tx
|
||||||
|
.send(AggregationEvent::OutOfOrder { slot }),
|
||||||
|
};
|
||||||
|
|
||||||
aggregate_state.latest_completed_slot = aggregate_state
|
aggregate_state.latest_completed_slot = aggregate_state
|
||||||
.latest_completed_slot
|
.latest_completed_slot
|
||||||
|
@ -298,6 +361,59 @@ pub async fn store_update(state: &State, update: Update) -> Result<()> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn get_price_feeds_with_update_data(
|
||||||
|
&self,
|
||||||
|
price_ids: &[PriceIdentifier],
|
||||||
|
request_time: RequestTime,
|
||||||
|
) -> Result<PriceFeedsWithUpdateData> {
|
||||||
|
match get_verified_price_feeds(self, price_ids, request_time.clone()).await {
|
||||||
|
Ok(price_feeds_with_update_data) => Ok(price_feeds_with_update_data),
|
||||||
|
Err(e) => {
|
||||||
|
if let RequestTime::FirstAfter(publish_time) = request_time {
|
||||||
|
return Benchmarks::get_verified_price_feeds(self, price_ids, publish_time)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
Err(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_price_feed_ids(&self) -> HashSet<PriceIdentifier> {
|
||||||
|
Cache::message_state_keys(self)
|
||||||
|
.await
|
||||||
|
.iter()
|
||||||
|
.map(|key| PriceIdentifier::new(key.feed_id))
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn is_ready(&self) -> bool {
|
||||||
|
let metadata = self.into().data.read().await;
|
||||||
|
let price_feeds_metadata = PriceFeedMeta::retrieve_price_feeds_metadata(self)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let has_completed_recently = match metadata.latest_completed_update_at.as_ref() {
|
||||||
|
Some(latest_completed_update_time) => {
|
||||||
|
latest_completed_update_time.elapsed() < READINESS_STALENESS_THRESHOLD
|
||||||
|
}
|
||||||
|
None => false,
|
||||||
|
};
|
||||||
|
|
||||||
|
let is_not_behind = match (
|
||||||
|
metadata.latest_completed_slot,
|
||||||
|
metadata.latest_observed_slot,
|
||||||
|
) {
|
||||||
|
(Some(latest_completed_slot), Some(latest_observed_slot)) => {
|
||||||
|
latest_observed_slot - latest_completed_slot <= READINESS_MAX_ALLOWED_SLOT_LAG
|
||||||
|
}
|
||||||
|
_ => false,
|
||||||
|
};
|
||||||
|
|
||||||
|
let is_metadata_loaded = !price_feeds_metadata.is_empty();
|
||||||
|
has_completed_recently && is_not_behind && is_metadata_loaded
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(accumulator_messages, wormhole_merkle_state))]
|
#[tracing::instrument(skip(accumulator_messages, wormhole_merkle_state))]
|
||||||
fn build_message_states(
|
fn build_message_states(
|
||||||
accumulator_messages: AccumulatorMessages,
|
accumulator_messages: AccumulatorMessages,
|
||||||
|
@ -336,7 +452,7 @@ async fn get_verified_price_feeds<S>(
|
||||||
request_time: RequestTime,
|
request_time: RequestTime,
|
||||||
) -> Result<PriceFeedsWithUpdateData>
|
) -> Result<PriceFeedsWithUpdateData>
|
||||||
where
|
where
|
||||||
S: AggregateCache,
|
S: Cache,
|
||||||
{
|
{
|
||||||
let messages = state
|
let messages = state
|
||||||
.fetch_message_states(
|
.fetch_message_states(
|
||||||
|
@ -390,71 +506,12 @@ where
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_price_feeds_with_update_data<S>(
|
|
||||||
state: &S,
|
|
||||||
price_ids: &[PriceIdentifier],
|
|
||||||
request_time: RequestTime,
|
|
||||||
) -> Result<PriceFeedsWithUpdateData>
|
|
||||||
where
|
|
||||||
S: AggregateCache,
|
|
||||||
S: Benchmarks,
|
|
||||||
{
|
|
||||||
match get_verified_price_feeds(state, price_ids, request_time.clone()).await {
|
|
||||||
Ok(price_feeds_with_update_data) => Ok(price_feeds_with_update_data),
|
|
||||||
Err(e) => {
|
|
||||||
if let RequestTime::FirstAfter(publish_time) = request_time {
|
|
||||||
return Benchmarks::get_verified_price_feeds(state, price_ids, publish_time).await;
|
|
||||||
}
|
|
||||||
Err(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_price_feed_ids<S>(state: &S) -> HashSet<PriceIdentifier>
|
|
||||||
where
|
|
||||||
S: AggregateCache,
|
|
||||||
{
|
|
||||||
state
|
|
||||||
.message_state_keys()
|
|
||||||
.await
|
|
||||||
.iter()
|
|
||||||
.map(|key| PriceIdentifier::new(key.feed_id))
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn is_ready(state: &State) -> bool {
|
|
||||||
let metadata = state.aggregate_state.read().await;
|
|
||||||
let price_feeds_metadata = state.price_feeds_metadata.read().await;
|
|
||||||
|
|
||||||
let has_completed_recently = match metadata.latest_completed_update_at.as_ref() {
|
|
||||||
Some(latest_completed_update_time) => {
|
|
||||||
latest_completed_update_time.elapsed() < READINESS_STALENESS_THRESHOLD
|
|
||||||
}
|
|
||||||
None => false,
|
|
||||||
};
|
|
||||||
|
|
||||||
let is_not_behind = match (
|
|
||||||
metadata.latest_completed_slot,
|
|
||||||
metadata.latest_observed_slot,
|
|
||||||
) {
|
|
||||||
(Some(latest_completed_slot), Some(latest_observed_slot)) => {
|
|
||||||
latest_observed_slot - latest_completed_slot <= READINESS_MAX_ALLOWED_SLOT_LAG
|
|
||||||
}
|
|
||||||
_ => false,
|
|
||||||
};
|
|
||||||
|
|
||||||
let is_metadata_loaded = !price_feeds_metadata.is_empty();
|
|
||||||
|
|
||||||
has_completed_recently && is_not_behind && is_metadata_loaded
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use {
|
use {
|
||||||
super::*,
|
super::*,
|
||||||
crate::{
|
crate::{
|
||||||
api::types::PriceFeedMetadata,
|
api::types::PriceFeedMetadata,
|
||||||
price_feeds_metadata::store_price_feeds_metadata,
|
|
||||||
state::test::setup_state,
|
state::test::setup_state,
|
||||||
},
|
},
|
||||||
futures::future::join_all,
|
futures::future::join_all,
|
||||||
|
@ -468,10 +525,7 @@ mod test {
|
||||||
Accumulator,
|
Accumulator,
|
||||||
},
|
},
|
||||||
hashers::keccak256_160::Keccak160,
|
hashers::keccak256_160::Keccak160,
|
||||||
messages::{
|
messages::PriceFeedMessage,
|
||||||
Message,
|
|
||||||
PriceFeedMessage,
|
|
||||||
},
|
|
||||||
wire::v1::{
|
wire::v1::{
|
||||||
AccumulatorUpdateData,
|
AccumulatorUpdateData,
|
||||||
Proof,
|
Proof,
|
||||||
|
@ -559,7 +613,7 @@ mod test {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn store_multiple_concurrent_valid_updates(state: Arc<State>, updates: Vec<Update>) {
|
pub async fn store_multiple_concurrent_valid_updates(state: Arc<State>, updates: Vec<Update>) {
|
||||||
let res = join_all(updates.into_iter().map(|u| store_update(&state, u))).await;
|
let res = join_all(updates.into_iter().map(|u| (&state).store_update(u))).await;
|
||||||
// Check that all store_update calls succeeded
|
// Check that all store_update calls succeeded
|
||||||
assert!(res.into_iter().all(|r| r.is_ok()));
|
assert!(res.into_iter().all(|r| r.is_ok()));
|
||||||
}
|
}
|
||||||
|
@ -585,14 +639,14 @@ mod test {
|
||||||
|
|
||||||
// Check the price ids are stored correctly
|
// Check the price ids are stored correctly
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
get_price_feed_ids(&*state).await,
|
(&*state).get_price_feed_ids().await,
|
||||||
vec![PriceIdentifier::new([100; 32])].into_iter().collect()
|
vec![PriceIdentifier::new([100; 32])].into_iter().collect()
|
||||||
);
|
);
|
||||||
|
|
||||||
// Check get_price_feeds_with_update_data retrieves the correct
|
// Check get_price_feeds_with_update_data retrieves the correct
|
||||||
// price feed with correct update data.
|
// price feed with correct update data.
|
||||||
let price_feeds_with_update_data = get_price_feeds_with_update_data(
|
let price_feeds_with_update_data = (&*state)
|
||||||
&*state,
|
.get_price_feeds_with_update_data(
|
||||||
&[PriceIdentifier::new([100; 32])],
|
&[PriceIdentifier::new([100; 32])],
|
||||||
RequestTime::Latest,
|
RequestTime::Latest,
|
||||||
)
|
)
|
||||||
|
@ -710,7 +764,7 @@ mod test {
|
||||||
|
|
||||||
// Check the price ids are stored correctly
|
// Check the price ids are stored correctly
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
get_price_feed_ids(&*state).await,
|
(&*state).get_price_feed_ids().await,
|
||||||
vec![
|
vec![
|
||||||
PriceIdentifier::new([100; 32]),
|
PriceIdentifier::new([100; 32]),
|
||||||
PriceIdentifier::new([200; 32])
|
PriceIdentifier::new([200; 32])
|
||||||
|
@ -720,8 +774,8 @@ mod test {
|
||||||
);
|
);
|
||||||
|
|
||||||
// Check that price feed 2 exists
|
// Check that price feed 2 exists
|
||||||
assert!(get_price_feeds_with_update_data(
|
assert!((&*state)
|
||||||
&*state,
|
.get_price_feeds_with_update_data(
|
||||||
&[PriceIdentifier::new([200; 32])],
|
&[PriceIdentifier::new([200; 32])],
|
||||||
RequestTime::Latest,
|
RequestTime::Latest,
|
||||||
)
|
)
|
||||||
|
@ -747,12 +801,12 @@ mod test {
|
||||||
|
|
||||||
// Check that price feed 2 does not exist anymore
|
// Check that price feed 2 does not exist anymore
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
get_price_feed_ids(&*state).await,
|
(&*state).get_price_feed_ids().await,
|
||||||
vec![PriceIdentifier::new([100; 32]),].into_iter().collect()
|
vec![PriceIdentifier::new([100; 32]),].into_iter().collect()
|
||||||
);
|
);
|
||||||
|
|
||||||
assert!(get_price_feeds_with_update_data(
|
assert!((&*state)
|
||||||
&*state,
|
.get_price_feeds_with_update_data(
|
||||||
&[PriceIdentifier::new([200; 32])],
|
&[PriceIdentifier::new([200; 32])],
|
||||||
RequestTime::Latest,
|
RequestTime::Latest,
|
||||||
)
|
)
|
||||||
|
@ -793,8 +847,8 @@ mod test {
|
||||||
MockClock::advance(Duration::from_secs(1));
|
MockClock::advance(Duration::from_secs(1));
|
||||||
|
|
||||||
// Get the price feeds with update data
|
// Get the price feeds with update data
|
||||||
let price_feeds_with_update_data = get_price_feeds_with_update_data(
|
let price_feeds_with_update_data = (&*state)
|
||||||
&*state,
|
.get_price_feeds_with_update_data(
|
||||||
&[PriceIdentifier::new([100; 32])],
|
&[PriceIdentifier::new([100; 32])],
|
||||||
RequestTime::Latest,
|
RequestTime::Latest,
|
||||||
)
|
)
|
||||||
|
@ -810,24 +864,22 @@ mod test {
|
||||||
|
|
||||||
|
|
||||||
// Add a dummy price feeds metadata
|
// Add a dummy price feeds metadata
|
||||||
store_price_feeds_metadata(
|
state
|
||||||
&state,
|
.store_price_feeds_metadata(&[PriceFeedMetadata {
|
||||||
&[PriceFeedMetadata {
|
|
||||||
id: PriceIdentifier::new([100; 32]),
|
id: PriceIdentifier::new([100; 32]),
|
||||||
attributes: Default::default(),
|
attributes: Default::default(),
|
||||||
}],
|
}])
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
// Check the state is ready
|
// Check the state is ready
|
||||||
assert!(is_ready(&state).await);
|
assert!((&state).is_ready().await);
|
||||||
|
|
||||||
// Advance the clock to make the prices stale
|
// Advance the clock to make the prices stale
|
||||||
MockClock::advance_system_time(READINESS_STALENESS_THRESHOLD);
|
MockClock::advance_system_time(READINESS_STALENESS_THRESHOLD);
|
||||||
MockClock::advance(READINESS_STALENESS_THRESHOLD);
|
MockClock::advance(READINESS_STALENESS_THRESHOLD);
|
||||||
// Check the state is not ready
|
// Check the state is not ready
|
||||||
assert!(!is_ready(&state).await);
|
assert!(!(&state).is_ready().await);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Test that the state retains the latest slots upon cache eviction.
|
/// Test that the state retains the latest slots upon cache eviction.
|
||||||
|
@ -870,8 +922,8 @@ mod test {
|
||||||
|
|
||||||
// Check the last 100 slots are retained
|
// Check the last 100 slots are retained
|
||||||
for slot in 900..1000 {
|
for slot in 900..1000 {
|
||||||
let price_feeds_with_update_data = get_price_feeds_with_update_data(
|
let price_feeds_with_update_data = (&*state)
|
||||||
&*state,
|
.get_price_feeds_with_update_data(
|
||||||
&[
|
&[
|
||||||
PriceIdentifier::new([100; 32]),
|
PriceIdentifier::new([100; 32]),
|
||||||
PriceIdentifier::new([200; 32]),
|
PriceIdentifier::new([200; 32]),
|
||||||
|
@ -887,8 +939,8 @@ mod test {
|
||||||
|
|
||||||
// Check nothing else is retained
|
// Check nothing else is retained
|
||||||
for slot in 0..900 {
|
for slot in 0..900 {
|
||||||
assert!(get_price_feeds_with_update_data(
|
assert!((&*state)
|
||||||
&*state,
|
.get_price_feeds_with_update_data(
|
||||||
&[
|
&[
|
||||||
PriceIdentifier::new([100; 32]),
|
PriceIdentifier::new([100; 32]),
|
||||||
PriceIdentifier::new([200; 32])
|
PriceIdentifier::new([200; 32])
|
|
@ -7,7 +7,7 @@ use {
|
||||||
crate::{
|
crate::{
|
||||||
network::wormhole::VaaBytes,
|
network::wormhole::VaaBytes,
|
||||||
state::cache::{
|
state::cache::{
|
||||||
AggregateCache,
|
Cache,
|
||||||
MessageState,
|
MessageState,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -70,14 +70,14 @@ impl From<MessageState> for RawMessageWithMerkleProof {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn store_wormhole_merkle_verified_message<S>(
|
pub async fn store_wormhole_merkle_verified_message<S>(
|
||||||
store: &S,
|
state: &S,
|
||||||
root: WormholeMerkleRoot,
|
root: WormholeMerkleRoot,
|
||||||
vaa: VaaBytes,
|
vaa: VaaBytes,
|
||||||
) -> Result<()>
|
) -> Result<()>
|
||||||
where
|
where
|
||||||
S: AggregateCache,
|
S: Cache,
|
||||||
{
|
{
|
||||||
store
|
state
|
||||||
.store_wormhole_merkle_state(WormholeMerkleState { root, vaa })
|
.store_wormhole_merkle_state(WormholeMerkleState { root, vaa })
|
||||||
.await?;
|
.await?;
|
||||||
Ok(())
|
Ok(())
|
|
@ -1,19 +1,21 @@
|
||||||
//! This module communicates with Pyth Benchmarks, an API for historical price feeds and their updates.
|
//! This module communicates with Pyth Benchmarks, an API for historical price feeds and their updates.
|
||||||
|
|
||||||
use {
|
use {
|
||||||
crate::{
|
super::{
|
||||||
aggregate::{
|
aggregate::{
|
||||||
PriceFeedsWithUpdateData,
|
PriceFeedsWithUpdateData,
|
||||||
UnixTimestamp,
|
UnixTimestamp,
|
||||||
},
|
},
|
||||||
api::types::PriceUpdate,
|
State,
|
||||||
},
|
},
|
||||||
|
crate::api::types::PriceUpdate,
|
||||||
anyhow::Result,
|
anyhow::Result,
|
||||||
base64::{
|
base64::{
|
||||||
engine::general_purpose::STANDARD as base64_standard_engine,
|
engine::general_purpose::STANDARD as base64_standard_engine,
|
||||||
Engine as _,
|
Engine as _,
|
||||||
},
|
},
|
||||||
pyth_sdk::PriceIdentifier,
|
pyth_sdk::PriceIdentifier,
|
||||||
|
reqwest::Url,
|
||||||
serde::Deserialize,
|
serde::Deserialize,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -50,6 +52,23 @@ impl TryFrom<BinaryBlob> for Vec<Vec<u8>> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub struct BenchmarksState {
|
||||||
|
endpoint: Option<Url>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BenchmarksState {
|
||||||
|
pub fn new(url: Option<Url>) -> Self {
|
||||||
|
Self { endpoint: url }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Allow downcasting State into BenchmarksState for functions that depend on the `Benchmarks` service.
|
||||||
|
impl<'a> From<&'a State> for &'a BenchmarksState {
|
||||||
|
fn from(state: &'a State) -> &'a BenchmarksState {
|
||||||
|
&state.benchmarks
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
pub trait Benchmarks {
|
pub trait Benchmarks {
|
||||||
async fn get_verified_price_feeds(
|
async fn get_verified_price_feeds(
|
||||||
|
@ -60,21 +79,25 @@ pub trait Benchmarks {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl Benchmarks for crate::state::State {
|
impl<T> Benchmarks for T
|
||||||
|
where
|
||||||
|
for<'a> &'a T: Into<&'a BenchmarksState>,
|
||||||
|
T: Sync,
|
||||||
|
{
|
||||||
async fn get_verified_price_feeds(
|
async fn get_verified_price_feeds(
|
||||||
&self,
|
&self,
|
||||||
price_ids: &[PriceIdentifier],
|
price_ids: &[PriceIdentifier],
|
||||||
publish_time: UnixTimestamp,
|
publish_time: UnixTimestamp,
|
||||||
) -> Result<PriceFeedsWithUpdateData> {
|
) -> Result<PriceFeedsWithUpdateData> {
|
||||||
let endpoint = self
|
let endpoint = self
|
||||||
.benchmarks_endpoint
|
.into()
|
||||||
|
.endpoint
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.ok_or_else(|| anyhow::anyhow!("Benchmarks endpoint is not set"))?
|
.ok_or_else(|| anyhow::anyhow!("Benchmarks endpoint is not set"))?
|
||||||
.join(&format!("/v1/updates/price/{}", publish_time))
|
.join(&format!("/v1/updates/price/{}", publish_time))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let client = reqwest::Client::new();
|
let mut request = reqwest::Client::new()
|
||||||
let mut request = client
|
|
||||||
.get(endpoint)
|
.get(endpoint)
|
||||||
.timeout(BENCHMARKS_REQUEST_TIMEOUT)
|
.timeout(BENCHMARKS_REQUEST_TIMEOUT)
|
||||||
.query(&[("encoding", "hex")])
|
.query(&[("encoding", "hex")])
|
|
@ -1,5 +1,6 @@
|
||||||
use {
|
use {
|
||||||
crate::aggregate::{
|
super::State,
|
||||||
|
crate::state::aggregate::{
|
||||||
wormhole_merkle::WormholeMerkleState,
|
wormhole_merkle::WormholeMerkleState,
|
||||||
AccumulatorMessages,
|
AccumulatorMessages,
|
||||||
ProofSet,
|
ProofSet,
|
||||||
|
@ -96,23 +97,186 @@ pub enum MessageStateFilter {
|
||||||
Only(MessageType),
|
Only(MessageType),
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct Cache {
|
/// A Cache of AccumulatorMessage by slot. We do not write to this cache much, so we can use a simple RwLock instead of a DashMap.
|
||||||
/// Accumulator messages cache
|
type AccumulatorMessagesCache = Arc<RwLock<BTreeMap<Slot, AccumulatorMessages>>>;
|
||||||
///
|
|
||||||
/// We do not write to this cache much, so we can use a simple RwLock instead of a DashMap.
|
|
||||||
accumulator_messages_cache: Arc<RwLock<BTreeMap<Slot, AccumulatorMessages>>>,
|
|
||||||
|
|
||||||
/// Wormhole merkle state cache
|
/// A Cache of WormholeMerkleState by slot. We do not write to this cache much, so we can use a simple RwLock instead of a DashMap.
|
||||||
///
|
type WormholeMerkleStateCache = Arc<RwLock<BTreeMap<Slot, WormholeMerkleState>>>;
|
||||||
/// We do not write to this cache much, so we can use a simple RwLock instead of a DashMap.
|
|
||||||
wormhole_merkle_state_cache: Arc<RwLock<BTreeMap<Slot, WormholeMerkleState>>>,
|
|
||||||
|
|
||||||
message_cache: Arc<RwLock<HashMap<MessageStateKey, BTreeMap<MessageStateTime, MessageState>>>>,
|
/// A Cache of `Time<->MessageState` by feed id.
|
||||||
|
type MessageCache = Arc<RwLock<HashMap<MessageStateKey, BTreeMap<MessageStateTime, MessageState>>>>;
|
||||||
|
|
||||||
|
/// A collection of caches for various program state.
|
||||||
|
pub struct CacheState {
|
||||||
|
accumulator_messages_cache: AccumulatorMessagesCache,
|
||||||
|
wormhole_merkle_state_cache: WormholeMerkleStateCache,
|
||||||
|
message_cache: MessageCache,
|
||||||
cache_size: u64,
|
cache_size: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl CacheState {
|
||||||
|
pub fn new(size: u64) -> Self {
|
||||||
|
Self {
|
||||||
|
accumulator_messages_cache: Arc::new(RwLock::new(BTreeMap::new())),
|
||||||
|
wormhole_merkle_state_cache: Arc::new(RwLock::new(BTreeMap::new())),
|
||||||
|
message_cache: Arc::new(RwLock::new(HashMap::new())),
|
||||||
|
cache_size: size,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Allow downcasting State into CacheState for functions that depend on the `Cache` service.
|
||||||
|
impl<'a> From<&'a State> for &'a CacheState {
|
||||||
|
fn from(state: &'a State) -> &'a CacheState {
|
||||||
|
&state.cache
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
pub trait Cache {
|
||||||
|
async fn store_message_states(&self, message_states: Vec<MessageState>) -> Result<()>;
|
||||||
|
async fn prune_removed_keys(&self, current_keys: HashSet<MessageStateKey>);
|
||||||
|
async fn store_accumulator_messages(
|
||||||
|
&self,
|
||||||
|
accumulator_messages: AccumulatorMessages,
|
||||||
|
) -> Result<()>;
|
||||||
|
async fn fetch_accumulator_messages(&self, slot: Slot) -> Result<Option<AccumulatorMessages>>;
|
||||||
|
async fn store_wormhole_merkle_state(
|
||||||
|
&self,
|
||||||
|
wormhole_merkle_state: WormholeMerkleState,
|
||||||
|
) -> Result<()>;
|
||||||
|
async fn fetch_wormhole_merkle_state(&self, slot: Slot) -> Result<Option<WormholeMerkleState>>;
|
||||||
|
async fn message_state_keys(&self) -> Vec<MessageStateKey>;
|
||||||
|
async fn fetch_message_states(
|
||||||
|
&self,
|
||||||
|
ids: Vec<FeedId>,
|
||||||
|
request_time: RequestTime,
|
||||||
|
filter: MessageStateFilter,
|
||||||
|
) -> Result<Vec<MessageState>>;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl<T> Cache for T
|
||||||
|
where
|
||||||
|
for<'a> &'a T: Into<&'a CacheState>,
|
||||||
|
T: Sync,
|
||||||
|
{
|
||||||
|
async fn message_state_keys(&self) -> Vec<MessageStateKey> {
|
||||||
|
self.into()
|
||||||
|
.message_cache
|
||||||
|
.read()
|
||||||
|
.await
|
||||||
|
.iter()
|
||||||
|
.map(|entry| entry.0.clone())
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn store_message_states(&self, message_states: Vec<MessageState>) -> Result<()> {
|
||||||
|
let mut message_cache = self.into().message_cache.write().await;
|
||||||
|
|
||||||
|
for message_state in message_states {
|
||||||
|
let key = message_state.key();
|
||||||
|
let time = message_state.time();
|
||||||
|
let cache = message_cache.entry(key).or_insert_with(BTreeMap::new);
|
||||||
|
cache.insert(time, message_state);
|
||||||
|
|
||||||
|
// Remove the earliest message states if the cache size is exceeded
|
||||||
|
while cache.len() > self.into().cache_size as usize {
|
||||||
|
cache.pop_first();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This method takes the current feed ids and prunes the cache for the keys
|
||||||
|
/// that are not present in the current feed ids.
|
||||||
|
///
|
||||||
|
/// There is a side-effect of this: if a key gets removed, we will
|
||||||
|
/// lose the cache for that key and cannot retrieve it for historical
|
||||||
|
/// price queries.
|
||||||
|
async fn prune_removed_keys(&self, current_keys: HashSet<MessageStateKey>) {
|
||||||
|
let mut message_cache = self.into().message_cache.write().await;
|
||||||
|
|
||||||
|
// Sometimes, some keys are removed from the accumulator. We track which keys are not
|
||||||
|
// present in the message states and remove them from the cache.
|
||||||
|
let keys_in_cache = message_cache
|
||||||
|
.iter()
|
||||||
|
.map(|(key, _)| key.clone())
|
||||||
|
.collect::<HashSet<_>>();
|
||||||
|
|
||||||
|
for key in keys_in_cache {
|
||||||
|
if !current_keys.contains(&key) {
|
||||||
|
tracing::info!("Feed {:?} seems to be removed. Removing it from cache", key);
|
||||||
|
message_cache.remove(&key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn fetch_message_states(
|
||||||
|
&self,
|
||||||
|
ids: Vec<FeedId>,
|
||||||
|
request_time: RequestTime,
|
||||||
|
filter: MessageStateFilter,
|
||||||
|
) -> Result<Vec<MessageState>> {
|
||||||
|
join_all(ids.into_iter().flat_map(|id| {
|
||||||
|
let request_time = request_time.clone();
|
||||||
|
let message_types: Vec<MessageType> = match filter {
|
||||||
|
MessageStateFilter::All => MessageType::iter().collect(),
|
||||||
|
MessageStateFilter::Only(t) => vec![t],
|
||||||
|
};
|
||||||
|
|
||||||
|
message_types.into_iter().map(move |message_type| {
|
||||||
|
let key = MessageStateKey {
|
||||||
|
feed_id: id,
|
||||||
|
type_: message_type,
|
||||||
|
};
|
||||||
|
retrieve_message_state(self.into(), key, request_time.clone())
|
||||||
|
})
|
||||||
|
}))
|
||||||
|
.await
|
||||||
|
.into_iter()
|
||||||
|
.collect::<Option<Vec<_>>>()
|
||||||
|
.ok_or(anyhow!("Message not found"))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn store_accumulator_messages(
|
||||||
|
&self,
|
||||||
|
accumulator_messages: AccumulatorMessages,
|
||||||
|
) -> Result<()> {
|
||||||
|
let mut cache = self.into().accumulator_messages_cache.write().await;
|
||||||
|
cache.insert(accumulator_messages.slot, accumulator_messages);
|
||||||
|
while cache.len() > self.into().cache_size as usize {
|
||||||
|
cache.pop_first();
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn fetch_accumulator_messages(&self, slot: Slot) -> Result<Option<AccumulatorMessages>> {
|
||||||
|
let cache = self.into().accumulator_messages_cache.read().await;
|
||||||
|
Ok(cache.get(&slot).cloned())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn store_wormhole_merkle_state(
|
||||||
|
&self,
|
||||||
|
wormhole_merkle_state: WormholeMerkleState,
|
||||||
|
) -> Result<()> {
|
||||||
|
let mut cache = self.into().wormhole_merkle_state_cache.write().await;
|
||||||
|
cache.insert(wormhole_merkle_state.root.slot, wormhole_merkle_state);
|
||||||
|
while cache.len() > self.into().cache_size as usize {
|
||||||
|
cache.pop_first();
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn fetch_wormhole_merkle_state(&self, slot: Slot) -> Result<Option<WormholeMerkleState>> {
|
||||||
|
let cache = self.into().wormhole_merkle_state_cache.read().await;
|
||||||
|
Ok(cache.get(&slot).cloned())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
async fn retrieve_message_state(
|
async fn retrieve_message_state(
|
||||||
cache: &Cache,
|
cache: &CacheState,
|
||||||
key: MessageStateKey,
|
key: MessageStateKey,
|
||||||
request_time: RequestTime,
|
request_time: RequestTime,
|
||||||
) -> Option<MessageState> {
|
) -> Option<MessageState> {
|
||||||
|
@ -156,179 +320,19 @@ async fn retrieve_message_state(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Cache {
|
|
||||||
pub fn new(cache_size: u64) -> Self {
|
|
||||||
Self {
|
|
||||||
message_cache: Arc::new(RwLock::new(HashMap::new())),
|
|
||||||
accumulator_messages_cache: Arc::new(RwLock::new(BTreeMap::new())),
|
|
||||||
wormhole_merkle_state_cache: Arc::new(RwLock::new(BTreeMap::new())),
|
|
||||||
cache_size,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
pub trait AggregateCache {
|
|
||||||
async fn message_state_keys(&self) -> Vec<MessageStateKey>;
|
|
||||||
async fn store_message_states(&self, message_states: Vec<MessageState>) -> Result<()>;
|
|
||||||
async fn prune_removed_keys(&self, current_keys: HashSet<MessageStateKey>);
|
|
||||||
async fn fetch_message_states(
|
|
||||||
&self,
|
|
||||||
ids: Vec<FeedId>,
|
|
||||||
request_time: RequestTime,
|
|
||||||
filter: MessageStateFilter,
|
|
||||||
) -> Result<Vec<MessageState>>;
|
|
||||||
async fn store_accumulator_messages(
|
|
||||||
&self,
|
|
||||||
accumulator_messages: AccumulatorMessages,
|
|
||||||
) -> Result<()>;
|
|
||||||
async fn fetch_accumulator_messages(&self, slot: Slot) -> Result<Option<AccumulatorMessages>>;
|
|
||||||
async fn store_wormhole_merkle_state(
|
|
||||||
&self,
|
|
||||||
wormhole_merkle_state: WormholeMerkleState,
|
|
||||||
) -> Result<()>;
|
|
||||||
async fn fetch_wormhole_merkle_state(&self, slot: Slot) -> Result<Option<WormholeMerkleState>>;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl AggregateCache for crate::state::State {
|
|
||||||
async fn message_state_keys(&self) -> Vec<MessageStateKey> {
|
|
||||||
self.cache
|
|
||||||
.message_cache
|
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.iter()
|
|
||||||
.map(|entry| entry.0.clone())
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn store_message_states(&self, message_states: Vec<MessageState>) -> Result<()> {
|
|
||||||
let mut message_cache = self.cache.message_cache.write().await;
|
|
||||||
|
|
||||||
for message_state in message_states {
|
|
||||||
let key = message_state.key();
|
|
||||||
let time = message_state.time();
|
|
||||||
let cache = message_cache.entry(key).or_insert_with(BTreeMap::new);
|
|
||||||
cache.insert(time, message_state);
|
|
||||||
|
|
||||||
// Remove the earliest message states if the cache size is exceeded
|
|
||||||
while cache.len() > self.cache.cache_size as usize {
|
|
||||||
cache.pop_first();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// This method takes the current feed ids and prunes the cache for the keys
|
|
||||||
/// that are not present in the current feed ids.
|
|
||||||
///
|
|
||||||
/// There is a side-effect of this: if a key gets removed, we will
|
|
||||||
/// lose the cache for that key and cannot retrieve it for historical
|
|
||||||
/// price queries.
|
|
||||||
async fn prune_removed_keys(&self, current_keys: HashSet<MessageStateKey>) {
|
|
||||||
let mut message_cache = self.cache.message_cache.write().await;
|
|
||||||
|
|
||||||
// Sometimes, some keys are removed from the accumulator. We track which keys are not
|
|
||||||
// present in the message states and remove them from the cache.
|
|
||||||
let keys_in_cache = message_cache
|
|
||||||
.iter()
|
|
||||||
.map(|(key, _)| key.clone())
|
|
||||||
.collect::<HashSet<_>>();
|
|
||||||
|
|
||||||
for key in keys_in_cache {
|
|
||||||
if !current_keys.contains(&key) {
|
|
||||||
tracing::info!("Feed {:?} seems to be removed. Removing it from cache", key);
|
|
||||||
message_cache.remove(&key);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn fetch_message_states(
|
|
||||||
&self,
|
|
||||||
ids: Vec<FeedId>,
|
|
||||||
request_time: RequestTime,
|
|
||||||
filter: MessageStateFilter,
|
|
||||||
) -> Result<Vec<MessageState>> {
|
|
||||||
join_all(ids.into_iter().flat_map(|id| {
|
|
||||||
let request_time = request_time.clone();
|
|
||||||
let message_types: Vec<MessageType> = match filter {
|
|
||||||
MessageStateFilter::All => MessageType::iter().collect(),
|
|
||||||
MessageStateFilter::Only(t) => vec![t],
|
|
||||||
};
|
|
||||||
|
|
||||||
message_types.into_iter().map(move |message_type| {
|
|
||||||
let key = MessageStateKey {
|
|
||||||
feed_id: id,
|
|
||||||
type_: message_type,
|
|
||||||
};
|
|
||||||
retrieve_message_state(&self.cache, key, request_time.clone())
|
|
||||||
})
|
|
||||||
}))
|
|
||||||
.await
|
|
||||||
.into_iter()
|
|
||||||
.collect::<Option<Vec<_>>>()
|
|
||||||
.ok_or(anyhow!("Message not found"))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn store_accumulator_messages(
|
|
||||||
&self,
|
|
||||||
accumulator_messages: AccumulatorMessages,
|
|
||||||
) -> Result<()> {
|
|
||||||
let mut cache = self.cache.accumulator_messages_cache.write().await;
|
|
||||||
cache.insert(accumulator_messages.slot, accumulator_messages);
|
|
||||||
while cache.len() > self.cache.cache_size as usize {
|
|
||||||
cache.pop_first();
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn fetch_accumulator_messages(&self, slot: Slot) -> Result<Option<AccumulatorMessages>> {
|
|
||||||
let cache = self.cache.accumulator_messages_cache.read().await;
|
|
||||||
Ok(cache.get(&slot).cloned())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn store_wormhole_merkle_state(
|
|
||||||
&self,
|
|
||||||
wormhole_merkle_state: WormholeMerkleState,
|
|
||||||
) -> Result<()> {
|
|
||||||
let mut cache = self.cache.wormhole_merkle_state_cache.write().await;
|
|
||||||
cache.insert(wormhole_merkle_state.root.slot, wormhole_merkle_state);
|
|
||||||
while cache.len() > self.cache.cache_size as usize {
|
|
||||||
cache.pop_first();
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn fetch_wormhole_merkle_state(&self, slot: Slot) -> Result<Option<WormholeMerkleState>> {
|
|
||||||
let cache = self.cache.wormhole_merkle_state_cache.read().await;
|
|
||||||
Ok(cache.get(&slot).cloned())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use {
|
use {
|
||||||
super::*,
|
super::*,
|
||||||
crate::{
|
crate::state::{
|
||||||
aggregate::{
|
aggregate::wormhole_merkle::WormholeMerkleMessageProof,
|
||||||
wormhole_merkle::{
|
test::setup_state,
|
||||||
WormholeMerkleMessageProof,
|
|
||||||
WormholeMerkleState,
|
|
||||||
},
|
|
||||||
AccumulatorMessages,
|
|
||||||
ProofSet,
|
|
||||||
},
|
|
||||||
state::test::setup_state,
|
|
||||||
},
|
},
|
||||||
pyth_sdk::UnixTimestamp,
|
pyth_sdk::UnixTimestamp,
|
||||||
pythnet_sdk::{
|
pythnet_sdk::{
|
||||||
accumulators::merkle::MerklePath,
|
accumulators::merkle::MerklePath,
|
||||||
hashers::keccak256_160::Keccak160,
|
hashers::keccak256_160::Keccak160,
|
||||||
messages::{
|
messages::PriceFeedMessage,
|
||||||
Message,
|
|
||||||
PriceFeedMessage,
|
|
||||||
},
|
|
||||||
wire::v1::WormholeMerkleRoot,
|
wire::v1::WormholeMerkleRoot,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
@ -369,7 +373,7 @@ mod test {
|
||||||
slot: Slot,
|
slot: Slot,
|
||||||
) -> MessageState
|
) -> MessageState
|
||||||
where
|
where
|
||||||
S: AggregateCache,
|
S: Cache,
|
||||||
{
|
{
|
||||||
let message_state = create_dummy_price_feed_message_state(feed_id, publish_time, slot);
|
let message_state = create_dummy_price_feed_message_state(feed_id, publish_time, slot);
|
||||||
state
|
state
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue