chore: remove attester and price service (#1272)

This commit is contained in:
Ali Behjati 2024-02-01 16:53:07 +01:00 committed by GitHub
parent 7e65fd6597
commit f5b78e5a8c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
69 changed files with 27 additions and 11050 deletions

View File

@ -1,44 +0,0 @@
name: Build and Push Attester Image
on:
push:
tags:
- pyth-wormhole-attester-v*
workflow_dispatch:
inputs:
dispatch_description:
description: "Dispatch description"
required: true
type: string
permissions:
contents: read
id-token: write
jobs:
p2w-attest-image:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set image tag to version of the git tag
if: ${{ startsWith(github.ref, 'refs/tags/pyth-wormhole-attester-v') }}
run: |
PREFIX="refs/tags/pyth-wormhole-attester-"
VERSION="${GITHUB_REF:${#PREFIX}}"
echo "IMAGE_TAG=${VERSION}" >> "${GITHUB_ENV}"
- name: Set image tag to the git commit hash
if: ${{ !startsWith(github.ref, 'refs/tags/pyth-wormhole-attester-v') }}
run: |
echo "IMAGE_TAG=${{ github.sha }}" >> "${GITHUB_ENV}"
- uses: aws-actions/configure-aws-credentials@8a84b07f2009032ade05a88a28750d733cc30db1
with:
role-to-assume: arn:aws:iam::192824654885:role/github-actions-ecr
aws-region: eu-west-2
- uses: docker/login-action@v2
with:
registry: public.ecr.aws
env:
AWS_REGION: us-east-1
- run: |
DOCKER_BUILDKIT=1 docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -f wormhole-attester/client/Dockerfile.p2w-attest .
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
env:
ECR_REGISTRY: public.ecr.aws
ECR_REPOSITORY: pyth-network/xc-attest

View File

@ -1,45 +0,0 @@
name: Build and Push Price Service Image
on:
push:
tags:
- pyth-price-server-v*
workflow_dispatch:
inputs:
dispatch_description:
description: "Dispatch description"
required: true
type: string
permissions:
contents: read
id-token: write
jobs:
price-server-image:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set image tag to version of the git tag
if: ${{ startsWith(github.ref, 'refs/tags/pyth-price-server-v') }}
run: |
PREFIX="refs/tags/pyth-price-server-"
VERSION="${GITHUB_REF:${#PREFIX}}"
echo "IMAGE_TAG=${VERSION}" >> "${GITHUB_ENV}"
- name: Set image tag to the git commit hash
if: ${{ !startsWith(github.ref, 'refs/tags/pyth-price-server-v') }}
run: |
echo "IMAGE_TAG=${{ github.sha }}" >> "${GITHUB_ENV}"
- uses: aws-actions/configure-aws-credentials@8a84b07f2009032ade05a88a28750d733cc30db1
with:
role-to-assume: arn:aws:iam::192824654885:role/github-actions-ecr
aws-region: eu-west-2
- uses: docker/login-action@v2
with:
registry: public.ecr.aws
env:
AWS_REGION: us-east-1
- run: |
DOCKER_BUILDKIT=1 docker build -t lerna -f Dockerfile.lerna .
DOCKER_BUILDKIT=1 docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -f price_service/server/Dockerfile .
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
env:
ECR_REGISTRY: public.ecr.aws
ECR_REPOSITORY: pyth-network/xc-server

View File

@ -23,6 +23,7 @@
"@pythnetwork/client": "^2.17.0",
"@solana/web3.js": "^1.76.0",
"@sqds/mesh": "^1.0.6",
"@types/cors": "^2.8.17",
"cors": "^2.8.5",
"ts-node": "^10.9.1",
"xc_admin_common": "*"

1549
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -4,7 +4,6 @@
"governance/xc_admin/packages/*",
"governance/multisig_wh_message_builder",
"price_pusher",
"price_service/server",
"price_service/sdk/js",
"price_service/client/js",
"pythnet/message_buffer",
@ -19,7 +18,6 @@
"target_chains/ethereum/examples/oracle_swap/app",
"target_chains/sui/sdk/js",
"target_chains/sui/cli",
"wormhole_attester/sdk/js",
"contract_manager"
],
"dependencies": {

View File

@ -1,3 +0,0 @@
node_modules
lib
.env

View File

@ -1,24 +0,0 @@
# Local DevNet:
SPY_SERVICE_HOST=0.0.0.0:7072
# Filters (if provided) should be valid JSON like below:
# These filters tell the spy to only retrieve messages sent from certain chains/contracts.
# See the docker-compose.<network>.yaml files for the appropriate configuration for a
# testnet/mainnet pyth price_service deployment.
SPY_SERVICE_FILTERS=[{"chain_id":1,"emitter_address":"71f8dcb863d176e2c420ad6610cf687359612b6fb392e0642b0ca6b1f186aa3b"}]
WORMHOLE_CLUSTER=localnet
# Number of seconds to sync with spy to be sure to have latest messages
READINESS_SPY_SYNC_TIME_SECONDS=60
READINESS_NUM_LOADED_SYMBOLS=5
WS_PORT=6200
REST_PORT=4200
PROM_PORT=8081
# The default is to log with level info.
#LOG_LEVEL=debug
REMOVE_EXPIRED_VALUES_INTERVAL_SECONDS=60
CACHE_TTL_SECONDS=300

View File

@ -1 +0,0 @@
/lib

View File

@ -1,18 +0,0 @@
# Defined in Dockerfile.lerna
FROM lerna
USER root
RUN apt-get update && apt-get install -y ncat
WORKDIR /home/node/
USER 1000
COPY --chown=1000:1000 price_service/server price_service/server
COPY --chown=1000:1000 price_service/sdk/js price_service/sdk/js
COPY --chown=1000:1000 wormhole_attester/sdk/js wormhole_attester/sdk/js
RUN npx lerna run build --scope="@pythnetwork/price-service-server" --include-dependencies
WORKDIR /home/node/price_service/server
CMD [ "npm", "run", "start" ]

View File

@ -1,72 +0,0 @@
# Pyth Price Service
** Pyth price service is deprecated. Please use [Hermes](../../hermes/) instead. **
The Pyth price service is a webservice that listens to the Wormhole Network for Pyth price updates and serves them via a
convenient web API. The service allows users to easily query for recent price updates via a REST API, or subscribe to
a websocket for streaming updates. [Price service JS client](https://github.com/pyth-network/pyth-crosschain/tree/main/price_service/sdk/js) connects
to an instance of the price service in order to fetch on-demand price updates.
## Wormhole Spy
The price service depends on a Wormhole Spy to stream Pyth messages from the Wormhole Network to it. The
[spy](https://github.com/wormhole-foundation/wormhole/blob/main/node/cmd/spy/spy.go) is a Wormhole component that listens to the Wormhole verified
messages from the Wormhole Network peer-to-peer network; then, it streams the messages that are coming from certain emitters (e.g., Pyth data emitters) to its subscribers.
The price service subscribes to the spy to fetch all verified prices coming from the Pyth data sources. The Pyth data sources should
be defined in `SPY_SERVICE_FILTERS` environment variable as a JSON array.
## Run
This repository contains testnet and mainnet docker-compose files to run
both the price service and spy. To run the mainnet docker compose file run
the following command:
```
docker compose -f docker-compose.mainnet.yaml up
```
Now your own instance of the price service is up and running! Running the following command should give you the Pyth price of ETH/USD :
```
curl localhost:4200/api/latest_price_feeds?ids[]=0xff61491a931112ddf1bd8147cd1b641375f79f5825126d665480874634fd0ace
```
If everything is working, you should get an output like this :
```
[{"ema_price":{"conf":"52359655","expo":-8,"price":"169041770000","publish_time":1675365813},"id":"ff61491a931112ddf1bd8147cd1b641375f79f5825126d665480874634fd0ace","price":{"conf":"64041644","expo":-8,"price":"167043958356","publish_time":1675365813}}]
```
The compose files use a public release of Pyth price service and spy. If you wish to change the
price service you should:
1. Build an image for using it first according to the section below.
2. Change the price service image to your local docker image (e.g., `pyth_price_server`)
### Self-Hosting
If you would like to host your own instance of the price service, we recommend running the process on a 4 core machine with 4 GB of RAM.
We also recommend using a host like [Latitude](https://www.latitude.sh/) or [Hetzner](https://www.hetzner.com/) and avoiding cloud service providers like AWS in order to reduce the cost.
The price service participates in a peer-to-peer network which can use a lot of bandwidth.
Cloud hosts like AWS charge high fees for bandwidth, which makes running the service much more expensive than necessary.
Using one of the recommended hosts above should cost $10-50 / month.
## Build an image
Build the image from [the repo root](../../) like below. It will create a
local image named `pyth_price_server`.
```
$ docker buildx build -f tilt_devnet/docker_images/Dockerfile.lerna -t lerna .
$ docker buildx build -f price_service/server/Dockerfile -t pyth_price_server .
```
If you wish to build price service without docker, please follow the instruction of the price service
[`Dockerfile`](./Dockerfile)
## Known Issues
The spy sometimes fails to connect to the peer-to-peer network on initialization. If this happens, the price
service will not be able to retrieve any data. You can fix this problem by quitting, removing the containers from Docker,
then restarting both containers. Simply stopping and starting the services tends not to work.

View File

@ -1,61 +0,0 @@
services:
spy:
# Find latest Guardian images in https://github.com/wormhole-foundation/wormhole/pkgs/container/guardiand
image: ghcr.io/wormhole-foundation/guardiand:v2.23.28
restart: on-failure
command:
- "spy"
- "--nodeKey"
- "/node.key"
- "--spyRPC"
- "[::]:7072"
- "--bootstrap"
- "/dns4/wormhole-v2-mainnet-bootstrap.xlabs.xyz/udp/8999/quic/p2p/12D3KooWNQ9tVrcb64tw6bNs2CaNrUGPM7yRrKvBBheQ5yCyPHKC,/dns4/wormhole.mcf.rocks/udp/8999/quic/p2p/12D3KooWDZVv7BhZ8yFLkarNdaSWaB43D6UbQwExJ8nnGAEmfHcU,/dns4/wormhole-v2-mainnet-bootstrap.staking.fund/udp/8999/quic/p2p/12D3KooWG8obDX9DNi1KUwZNu9xkGwfKqTp2GFwuuHpWZ3nQruS1"
- "--network"
- "/wormhole/mainnet/2"
- "--logLevel"
- "warn"
price-service:
# Find latest price service images https://gallery.ecr.aws/pyth-network/xc-server
image: public.ecr.aws/pyth-network/xc-server:v3.0.8
restart: on-failure
# Or alternatively use a locally built image
# image: pyth_price_server
environment:
SPY_SERVICE_HOST: "spy:7072"
SPY_SERVICE_FILTERS: |
[
{
"chain_id": 1,
"emitter_address": "6bb14509a612f01fbbc4cffeebd4bbfb492a86df717ebe92eb6df432a3f00a25"
},
{
"chain_id": 26,
"emitter_address": "f8cd23c2ab91237730770bbea08d61005cdda0984348f3f6eecb559638c0bba0"
}
]
REST_PORT: "4200"
PROM_PORT: "8081"
READINESS_SPY_SYNC_TIME_SECONDS: "5"
READINESS_NUM_LOADED_SYMBOLS: "280"
LOG_LEVEL: warning
WORMHOLE_CLUSTER: mainnet
DB_API_CLUSTER: pythnet
REMOVE_EXPIRED_VALUES_INTERVAL_SECONDS: "60"
CACHE_TTL_SECONDS: "300"
DB_API_ENDPOINT: "https://web-api.pyth.network"
ports:
- "4200:4200"
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:4200/ready",
]
start_period: 20s
depends_on:
- spy

View File

@ -1,61 +0,0 @@
services:
spy:
# Find latest Guardian images in https://github.com/wormhole-foundation/wormhole/pkgs/container/guardiand
image: ghcr.io/wormhole-foundation/guardiand:v2.23.28
restart: on-failure
command:
- "spy"
- "--nodeKey"
- "/node.key"
- "--spyRPC"
- "[::]:7072"
- "--bootstrap"
- "/dns4/t-guardian-01.nodes.stable.io/udp/8999/quic/p2p/12D3KooWCW3LGUtkCVkHZmVSZHzL3C4WRKWfqAiJPz1NR7dT9Bxh,/dns4/t-guardian-02.nodes.stable.io/udp/8999/quic/p2p/12D3KooWJXA6goBCiWM8ucjzc4jVUBSqL9Rri6UpjHbkMPErz5zK"
- "--network"
- "/wormhole/testnet/2/1"
- "--logLevel"
- "warn"
price-service:
# Find latest price service images https://gallery.ecr.aws/pyth-network/xc-server
image: public.ecr.aws/pyth-network/xc-server:v3.0.8
restart: on-failure
# Or alternatively use a locally built image
# image: pyth_price_server
environment:
SPY_SERVICE_HOST: "spy:7072"
SPY_SERVICE_FILTERS: |
[
{
"chain_id": 1,
"emitter_address": "f346195ac02f37d60d4db8ffa6ef74cb1be3550047543a4a9ee9acf4d78697b0"
},
{
"chain_id": 26,
"emitter_address": "a27839d641b07743c0cb5f68c51f8cd31d2c0762bec00dc6fcd25433ef1ab5b6"
}
]
REST_PORT: "4200"
PROM_PORT: "8081"
READINESS_SPY_SYNC_TIME_SECONDS: "5"
READINESS_NUM_LOADED_SYMBOLS: "280"
LOG_LEVEL: warning
WORMHOLE_CLUSTER: testnet
DB_API_CLUSTER: devnet
REMOVE_EXPIRED_VALUES_INTERVAL_SECONDS: "60"
CACHE_TTL_SECONDS: "300"
DB_API_ENDPOINT: "https://web-api.pyth.network"
ports:
- "4200:4200"
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:4200/ready",
]
start_period: 20s
depends_on:
- spy

View File

@ -1,5 +0,0 @@
/** @type {import('ts-jest/dist/types').InitialOptionsTsJest} */
module.exports = {
preset: "ts-jest",
testEnvironment: "node",
};

View File

@ -1,69 +0,0 @@
{
"name": "@pythnetwork/price-service-server",
"version": "3.1.2",
"description": "Webservice for retrieving prices from the Pyth oracle.",
"private": "true",
"main": "index.js",
"scripts": {
"format": "prettier --write \"src/**/*.ts\"",
"build": "tsc",
"start": "node lib/index.js",
"test": "jest src/",
"lint": "tslint -p tsconfig.json",
"preversion": "npm run lint",
"version": "npm run format && git add -A src"
},
"author": "",
"license": "Apache-2.0",
"devDependencies": {
"@types/jest": "^29.4.0",
"@types/keccak": "^3.0.1",
"@types/long": "^4.0.1",
"@types/node": "^16.6.1",
"@types/node-fetch": "^2.6.2",
"@types/secp256k1": "^4.0.3",
"@types/supertest": "^2.0.12",
"jest": "^29.4.0",
"prettier": "^2.3.2",
"supertest": "^6.2.3",
"ts-jest": "^29.0.5",
"tslint": "^6.1.3",
"tslint-config-prettier": "^1.18.0",
"typescript": "^4.3.5"
},
"dependencies": {
"@certusone/wormhole-sdk": "^0.9.9",
"@certusone/wormhole-spydk": "^0.0.1",
"@pythnetwork/price-service-sdk": "*",
"@pythnetwork/wormhole-attester-sdk": "*",
"@types/cors": "^2.8.12",
"@types/express": "^4.17.13",
"@types/morgan": "^1.9.3",
"@types/response-time": "^2.3.5",
"@types/ws": "^8.5.3",
"cors": "^2.8.5",
"dotenv": "^10.0.0",
"express": "^4.17.2",
"express-validation": "^4.0.1",
"http-status-codes": "^2.2.0",
"joi": "^17.6.0",
"keccak": "^3.0.3",
"lru-cache": "^7.14.1",
"morgan": "^1.10.0",
"node-fetch": "^2.6.1",
"prom-client": "^14.0.1",
"response-time": "^2.3.2",
"secp256k1": "^5.0.0",
"ts-retry-promise": "^0.7.0",
"winston": "^3.3.3",
"ws": "^8.12.0"
},
"directories": {
"lib": "lib"
},
"keywords": [],
"optionalDependencies": {
"bufferutil": "^4.0.6",
"utf-8-validate": "^5.0.9"
}
}

View File

@ -1,139 +0,0 @@
import { VaaConfig, VaaCache } from "../listen";
describe("VAA Cache works", () => {
test("Setting and getting works as expected", async () => {
const cache = new VaaCache();
expect(cache.get("a", 3)).toBeUndefined();
cache.set("a", 1, 0, "a-1");
expect(cache.get("a", 3)).toBeUndefined();
cache.set("a", 4, 3, "a-2");
expect(cache.get("a", 3)).toEqual<VaaConfig>({
publishTime: 4,
lastAttestedPublishTime: 3,
vaa: "a-2",
});
cache.set("a", 10, 9, "a-3");
cache.set("a", 10, 10, "a-4");
cache.set("a", 10, 10, "a-5");
cache.set("a", 10, 10, "a-6");
cache.set("a", 11, 11, "a-7");
// Adding some elements with other keys to make sure
// they are not stored separately.
cache.set("b", 3, 2, "b-1");
cache.set("b", 7, 6, "b-2");
cache.set("b", 9, 8, "b-3");
expect(cache.get("a", 3)).toEqual<VaaConfig>({
publishTime: 4,
lastAttestedPublishTime: 3,
vaa: "a-2",
});
expect(cache.get("a", 4)).toEqual<VaaConfig>({
publishTime: 4,
lastAttestedPublishTime: 3,
vaa: "a-2",
});
expect(cache.get("a", 5)).toEqual<VaaConfig>({
publishTime: 10,
lastAttestedPublishTime: 9,
vaa: "a-3",
});
// There are multiple elements at 10, but we prefer to return the one with a lower lastAttestedPublishTime.
expect(cache.get("a", 10)).toEqual<VaaConfig>({
publishTime: 10,
lastAttestedPublishTime: 9,
vaa: "a-3",
});
// If the cache only contains elements where the lastAttestedPublishTime==publishTime, those will be returned.
// Note that this behavior is undesirable (as this means we can return a noncanonical VAA for a query time);
// this test simply documents it.
expect(cache.get("a", 11)).toEqual<VaaConfig>({
publishTime: 11,
lastAttestedPublishTime: 11,
vaa: "a-7",
});
expect(cache.get("b", 3)).toEqual<VaaConfig>({
publishTime: 3,
lastAttestedPublishTime: 2,
vaa: "b-1",
});
expect(cache.get("b", 4)).toEqual<VaaConfig>({
publishTime: 7,
lastAttestedPublishTime: 6,
vaa: "b-2",
});
// When no item item more recent than asked pubTime is asked it should return undefined
expect(cache.get("a", 12)).toBeUndefined();
expect(cache.get("b", 10)).toBeUndefined();
// When the asked pubTime is less than the first existing pubTime we are not sure that
// this is the first vaa after that time, so we should return undefined.
expect(cache.get("a", 0)).toBeUndefined();
expect(cache.get("b", 1)).toBeUndefined();
expect(cache.get("b", 2)).toBeUndefined();
});
test("removeExpiredValues clears the old values", async () => {
jest.useFakeTimers();
// TTL of 500 seconds for the cache
const cache = new VaaCache(500);
cache.set("a", 300, 299, "a-1");
cache.set("a", 700, 699, "a-2");
cache.set("a", 900, 899, "a-3");
expect(cache.get("a", 300)).toEqual<VaaConfig>({
publishTime: 300,
lastAttestedPublishTime: 299,
vaa: "a-1",
});
expect(cache.get("a", 500)).toEqual<VaaConfig>({
publishTime: 700,
lastAttestedPublishTime: 699,
vaa: "a-2",
});
// Set time to second 1000
jest.setSystemTime(1000 * 1000);
cache.removeExpiredValues();
expect(cache.get("a", 300)).toBeUndefined();
expect(cache.get("a", 500)).toBeUndefined();
});
test("the cache clean loop works", async () => {
jest.useFakeTimers();
// TTL of 500 seconds for the cache and cleanup of every 100 seconds
const cache = new VaaCache(500, 100);
cache.runRemoveExpiredValuesLoop();
cache.set("a", 300, 299, "a-1");
cache.set("a", 700, 699, "a-2");
cache.set("a", 900, 899, "a-3");
expect(cache.get("a", 900)).toEqual<VaaConfig>({
publishTime: 900,
lastAttestedPublishTime: 899,
vaa: "a-3",
});
// Set time to second 2000. Everything should be evicted from cache now.
jest.setSystemTime(2000 * 1000);
jest.advanceTimersToNextTimer();
expect(cache.get("a", 900)).toBeUndefined();
});
});

View File

@ -1,566 +0,0 @@
import {
HexString,
Price,
PriceFeed,
PriceFeedMetadata,
} from "@pythnetwork/price-service-sdk";
import express, { Express } from "express";
import { StatusCodes } from "http-status-codes";
import request from "supertest";
import { PriceInfo, PriceStore, VaaCache, VaaConfig } from "../listen";
import { RestAPI, VaaResponse } from "../rest";
let priceInfo: PriceStore;
let app: Express;
let priceInfoMap: Map<string, PriceInfo>;
let vaasCache: VaaCache;
function expandTo64Len(id: string): string {
return id.repeat(64).substring(0, 64);
}
function dummyPriceFeed(id: string): PriceFeed {
return new PriceFeed({
emaPrice: new Price({
conf: "1",
expo: 2,
price: "3",
publishTime: 4,
}),
id,
price: new Price({
conf: "5",
expo: 6,
price: "7",
publishTime: 8,
}),
});
}
function dummyPriceInfoPair(
id: HexString,
seqNum: number,
vaa: HexString
): [HexString, PriceInfo] {
return [
id,
{
priceFeed: dummyPriceFeed(id),
publishTime: 1,
attestationTime: 2,
seqNum,
vaa: Buffer.from(vaa, "hex"),
emitterChainId: 0,
priceServiceReceiveTime: 0,
lastAttestedPublishTime: 0,
},
];
}
// Add some dummy data to the provided vaa cache.
function addAbcdDataToCache(id: string, cache: VaaCache) {
cache.set(id, 10, 9, "abcd10");
cache.set(id, 20, 19, "abcd20");
cache.set(id, 30, 29, "abcd30");
}
beforeAll(async () => {
priceInfoMap = new Map<string, PriceInfo>([
dummyPriceInfoPair(expandTo64Len("abcd"), 1, "a1b2c3d4"),
dummyPriceInfoPair(expandTo64Len("ef01"), 1, "a1b2c3d4"),
dummyPriceInfoPair(expandTo64Len("3456"), 2, "bad01bad"),
dummyPriceInfoPair(expandTo64Len("10101"), 3, "bidbidbid"),
]);
vaasCache = new VaaCache();
priceInfo = {
getLatestPriceInfo: (priceFeedId: string) => {
return priceInfoMap.get(priceFeedId);
},
addUpdateListener: (_callback: (priceInfo: PriceInfo) => any) => undefined,
getPriceIds: () => new Set(),
getVaa: (vaasCacheKey: string, publishTime: number) => {
return vaasCache.get(vaasCacheKey, publishTime);
},
};
const api = new RestAPI({ port: 8889 }, priceInfo, () => true);
app = await api.createApp();
});
describe("Latest Price Feed Endpoint", () => {
test("When called with valid ids, returns correct price feed", async () => {
const ids = [expandTo64Len("abcd"), expandTo64Len("3456")];
const resp = await request(app)
.get("/api/latest_price_feeds")
.query({ ids });
expect(resp.status).toBe(StatusCodes.OK);
expect(resp.body.length).toBe(2);
expect(resp.body).toContainEqual(dummyPriceFeed(ids[0]).toJson());
expect(resp.body).toContainEqual(dummyPriceFeed(ids[1]).toJson());
});
test("When called with valid ids with leading 0x, returns correct price feed", async () => {
const ids = [expandTo64Len("abcd"), expandTo64Len("3456")];
const resp = await request(app)
.get("/api/latest_price_feeds")
.query({
ids: ids.map((id) => "0x" + id), // Add 0x to the queries
});
expect(resp.status).toBe(StatusCodes.OK);
expect(resp.body.length).toBe(2);
// Please note that the response id is without 0x
expect(resp.body).toContainEqual(dummyPriceFeed(ids[0]).toJson());
expect(resp.body).toContainEqual(dummyPriceFeed(ids[1]).toJson());
});
test("When called with valid ids and verbose flag set to true, returns correct price feed with verbose information", async () => {
const ids = [expandTo64Len("abcd"), expandTo64Len("3456")];
const resp = await request(app)
.get("/api/latest_price_feeds")
.query({ ids, verbose: true });
expect(resp.status).toBe(StatusCodes.OK);
expect(resp.body.length).toBe(2);
expect(resp.body).toContainEqual({
...priceInfoMap.get(ids[0])!.priceFeed.toJson(),
metadata: new PriceFeedMetadata({
attestationTime: priceInfoMap.get(ids[0])!.attestationTime,
emitterChain: priceInfoMap.get(ids[0])!.emitterChainId,
receiveTime: priceInfoMap.get(ids[0])!.priceServiceReceiveTime,
sequenceNumber: priceInfoMap.get(ids[0])!.seqNum,
}).toJson(),
});
expect(resp.body).toContainEqual({
...priceInfoMap.get(ids[1])!.priceFeed.toJson(),
metadata: new PriceFeedMetadata({
attestationTime: priceInfoMap.get(ids[1])!.attestationTime,
emitterChain: priceInfoMap.get(ids[1])!.emitterChainId,
receiveTime: priceInfoMap.get(ids[1])!.priceServiceReceiveTime,
sequenceNumber: priceInfoMap.get(ids[1])!.seqNum,
}).toJson(),
});
});
test("When called with valid ids and binary flag set to true, returns correct price feed with binary vaa", async () => {
const ids = [expandTo64Len("abcd"), expandTo64Len("3456")];
const resp = await request(app)
.get("/api/latest_price_feeds")
.query({ ids, binary: true });
expect(resp.status).toBe(StatusCodes.OK);
expect(resp.body.length).toBe(2);
expect(resp.body).toContainEqual({
...priceInfoMap.get(ids[0])!.priceFeed.toJson(),
vaa: priceInfoMap.get(ids[0])!.vaa.toString("base64"),
});
expect(resp.body).toContainEqual({
...priceInfoMap.get(ids[1])!.priceFeed.toJson(),
vaa: priceInfoMap.get(ids[1])!.vaa.toString("base64"),
});
});
test("When called with a target_chain, returns correct price feed with binary vaa encoded properly", async () => {
const ids = [expandTo64Len("abcd"), expandTo64Len("3456")];
const resp = await request(app)
.get("/api/latest_price_feeds")
.query({ ids, target_chain: "evm" });
expect(resp.status).toBe(StatusCodes.OK);
expect(resp.body.length).toBe(2);
expect(resp.body).toContainEqual({
...priceInfoMap.get(ids[0])!.priceFeed.toJson(),
vaa: "0x" + priceInfoMap.get(ids[0])!.vaa.toString("hex"),
});
expect(resp.body).toContainEqual({
...priceInfoMap.get(ids[1])!.priceFeed.toJson(),
vaa: "0x" + priceInfoMap.get(ids[1])!.vaa.toString("hex"),
});
});
test("When called with some non-existent ids within ids, returns error mentioning non-existent ids", async () => {
const ids = [
expandTo64Len("ab01"),
expandTo64Len("3456"),
expandTo64Len("effe"),
];
const resp = await request(app)
.get("/api/latest_price_feeds")
.query({ ids });
expect(resp.status).toBe(StatusCodes.BAD_REQUEST);
expect(resp.body.message).toContain(ids[0]);
expect(resp.body.message).not.toContain(ids[1]);
expect(resp.body.message).toContain(ids[2]);
});
});
describe("Latest Vaa Bytes Endpoint", () => {
test("When called with valid ids, returns vaa bytes as array, merged if necessary", async () => {
const ids = [
expandTo64Len("abcd"),
expandTo64Len("ef01"),
expandTo64Len("3456"),
];
const resp = await request(app).get("/api/latest_vaas").query({ ids });
expect(resp.status).toBe(StatusCodes.OK);
expect(resp.body.length).toBe(2);
expect(resp.body).toContain(
Buffer.from("a1b2c3d4", "hex").toString("base64")
);
expect(resp.body).toContain(
Buffer.from("bad01bad", "hex").toString("base64")
);
});
test("When called with target_chain, returns vaa bytes encoded correctly", async () => {
const ids = [
expandTo64Len("abcd"),
expandTo64Len("ef01"),
expandTo64Len("3456"),
];
const resp = await request(app)
.get("/api/latest_vaas")
.query({ ids, target_chain: "evm" });
expect(resp.status).toBe(StatusCodes.OK);
expect(resp.body.length).toBe(2);
expect(resp.body).toContain("0xa1b2c3d4");
expect(resp.body).toContain("0xbad01bad");
});
test("When called with valid ids with leading 0x, returns vaa bytes as array, merged if necessary", async () => {
const ids = [
expandTo64Len("abcd"),
expandTo64Len("ef01"),
expandTo64Len("3456"),
];
const resp = await request(app)
.get("/api/latest_vaas")
.query({
ids: ids.map((id) => "0x" + id), // Add 0x to the queries
});
expect(resp.status).toBe(StatusCodes.OK);
expect(resp.body.length).toBe(2);
expect(resp.body).toContain(
Buffer.from("a1b2c3d4", "hex").toString("base64")
);
expect(resp.body).toContain(
Buffer.from("bad01bad", "hex").toString("base64")
);
});
test("When called with some non-existent ids within ids, returns error mentioning non-existent ids", async () => {
const ids = [
expandTo64Len("ab01"),
expandTo64Len("3456"),
expandTo64Len("effe"),
];
const resp = await request(app).get("/api/latest_vaas").query({ ids });
expect(resp.status).toBe(StatusCodes.BAD_REQUEST);
expect(resp.body.message).toContain(ids[0]);
expect(resp.body.message).not.toContain(ids[1]);
expect(resp.body.message).toContain(ids[2]);
});
});
describe("Get VAA endpoint and Get VAA CCIP", () => {
test("When called with valid id and timestamp in the cache returns the correct answer", async () => {
const id = expandTo64Len("abcd");
addAbcdDataToCache(id, vaasCache);
const resp = await request(app).get("/api/get_vaa").query({
id,
publish_time: 16,
});
expect(resp.status).toBe(StatusCodes.OK);
expect(resp.body).toEqual<VaaResponse>({
vaa: "abcd20",
publishTime: 20,
});
const pubTime16AsHex64Bit = "0000000000000010";
const ccipResp = await request(app)
.get("/api/get_vaa_ccip")
.query({
data: "0x" + id + pubTime16AsHex64Bit,
});
expect(ccipResp.status).toBe(StatusCodes.OK);
expect(ccipResp.body).toEqual({
data: "0x" + Buffer.from("abcd20", "base64").toString("hex"),
});
});
test("When called with valid id with leading 0x and timestamp in the cache returns the correct answer", async () => {
const id = expandTo64Len("abcd");
addAbcdDataToCache(id, vaasCache);
const resp = await request(app)
.get("/api/get_vaa")
.query({
id: "0x" + id,
publish_time: 16,
});
expect(resp.status).toBe(StatusCodes.OK);
expect(resp.body).toEqual<VaaResponse>({
vaa: "abcd20",
publishTime: 20,
});
});
test("When called with target_chain, encodes resulting VAA in the right format", async () => {
const id = expandTo64Len("abcd");
addAbcdDataToCache(id, vaasCache);
const resp = await request(app)
.get("/api/get_vaa")
.query({
id: "0x" + id,
publish_time: 16,
target_chain: "evm",
});
expect(resp.status).toBe(StatusCodes.OK);
expect(resp.body).toEqual<VaaResponse>({
vaa: "0x" + Buffer.from("abcd20", "base64").toString("hex"),
publishTime: 20,
});
});
test("When called with invalid id returns price id found", async () => {
// dead does not exist in the ids
const id = expandTo64Len("dead");
const resp = await request(app).get("/api/get_vaa").query({
id,
publish_time: 16,
});
expect(resp.status).toBe(StatusCodes.BAD_REQUEST);
expect(resp.body.message).toContain(id);
const pubTime16AsHex64Bit = "0000000000000010";
const ccipResp = await request(app)
.get("/api/get_vaa_ccip")
.query({
data: "0x" + id + pubTime16AsHex64Bit,
});
expect(ccipResp.status).toBe(StatusCodes.BAD_REQUEST);
expect(ccipResp.body.message).toContain(id);
});
test("When called with valid id and timestamp not in the cache without db returns vaa not found", async () => {
const id = expandTo64Len("abcd");
addAbcdDataToCache(id, vaasCache);
const resp = await request(app)
.get("/api/get_vaa")
.query({
id: "0x" + id,
publish_time: 5,
});
expect(resp.status).toBe(StatusCodes.NOT_FOUND);
const pubTime5AsHex64Bit = "0000000000000005";
const ccipResp = await request(app)
.get("/api/get_vaa_ccip")
.query({
data: "0x" + id + pubTime5AsHex64Bit,
});
// On CCIP we expect bad gateway so the client want to retry other ccip endpoints.
expect(ccipResp.status).toBe(StatusCodes.BAD_GATEWAY);
});
test("When called with valid id and timestamp not in the cache with db returns ok", async () => {
const dbBackend = express();
dbBackend.get("/vaa", (req, res) => {
const priceId = req.query.id;
const pubTime = Number(req.query.publishTime);
const cluster = req.query.cluster;
res.json([
{
vaa: `${cluster}${priceId}${pubTime}`,
publishTime: new Date(pubTime * 1000).toISOString(),
},
]);
});
const dbApp = dbBackend.listen({ port: 37777 });
const apiWithDb = new RestAPI(
{
port: 8889,
dbApiCluster: "pythnet",
dbApiEndpoint: "http://localhost:37777",
},
priceInfo,
() => true
);
const appWithDb = await apiWithDb.createApp();
const id = expandTo64Len("abcd");
addAbcdDataToCache(id, vaasCache);
const resp = await request(appWithDb)
.get("/api/get_vaa")
.query({
id: "0x" + id,
publish_time: 5,
});
expect(resp.status).toBe(StatusCodes.OK);
expect(resp.body).toEqual<VaaResponse>({
vaa: `pythnet${id}5`,
publishTime: 5,
});
const pubTime5AsHex64Bit = "0000000000000005";
const ccipResp = await request(appWithDb)
.get("/api/get_vaa_ccip")
.query({
data: "0x" + id + pubTime5AsHex64Bit,
});
expect(ccipResp.status).toBe(StatusCodes.OK);
expect(ccipResp.body).toEqual({
data: "0x" + Buffer.from(`pythnet${id}5`, "base64").toString("hex"),
});
dbApp.close();
});
test(
"When called with valid id and timestamp not in the cache" +
"and not in the db returns vaa not found",
async () => {
const dbBackend = express();
dbBackend.get("/vaa", (_req, res) => {
// Return an empty array when vaa is not there, this is the same
// behaviour as our api.
res.json([]);
});
const dbApp = dbBackend.listen({ port: 37777 });
const apiWithDb = new RestAPI(
{
port: 8889,
dbApiCluster: "pythnet",
dbApiEndpoint: "http://localhost:37777",
},
priceInfo,
() => true
);
const appWithDb = await apiWithDb.createApp();
const id = expandTo64Len("abcd");
addAbcdDataToCache(id, vaasCache);
const resp = await request(appWithDb)
.get("/api/get_vaa")
.query({
id: "0x" + id,
publish_time: 5,
});
expect(resp.status).toBe(StatusCodes.NOT_FOUND);
const pubTime5AsHex64Bit = "0000000000000005";
const ccipResp = await request(appWithDb)
.get("/api/get_vaa_ccip")
.query({
data: "0x" + id + pubTime5AsHex64Bit,
});
// On CCIP we expect bad gateway so the client want to retry other ccip endpoints.
expect(ccipResp.status).toBe(StatusCodes.BAD_GATEWAY);
dbApp.close();
}
);
test(
"When called with valid id and timestamp not in the cache" +
"and db is not available returns internal server error",
async () => {
const apiWithDb = new RestAPI(
{
port: 8889,
dbApiCluster: "pythnet",
dbApiEndpoint: "http://localhost:37777",
},
priceInfo,
() => true
);
const appWithDb = await apiWithDb.createApp();
const id = expandTo64Len("abcd");
addAbcdDataToCache(id, vaasCache);
const resp = await request(appWithDb)
.get("/api/get_vaa")
.query({
id: "0x" + id,
publish_time: 5,
});
expect(resp.status).toBe(StatusCodes.INTERNAL_SERVER_ERROR);
const pubTime5AsHex64Bit = "0000000000000005";
const ccipResp = await request(appWithDb)
.get("/api/get_vaa_ccip")
.query({
data: "0x" + id + pubTime5AsHex64Bit,
});
expect(ccipResp.status).toBe(StatusCodes.INTERNAL_SERVER_ERROR);
}
);
test("vaaToPriceInfo works with accumulator update data", () => {
// An update data taken from Hermes with the following price feed:
// {
// "id":"e62df6c8b4a85fe1a67db44dc12de5db330f7ac66b72dc658afedf0f4a415b43",
// "price":{"price":"2836040669135","conf":"3282830965","expo":-8,"publish_time":1692280808},
// "ema_price":{"price":"2845324900000","conf":"3211773100","expo":-8,"publish_time":1692280808},
// "metadata":{"slot":89783664,"emitter_chain":26,"price_service_receive_time":1692280809}
// }
const updateData = Buffer.from(
"UE5BVQEAAAADuAEAAAADDQAsKPsmb7Vz7io3taJQKgoi1m/z0kqKgtpmlkv+ZuunX2Iegsf+8fuUtpHPLKgCWPU8PN2x9NyAZz5" +
"BY9M3SWwJAALYlM0U7f2GFWfEjKwSJlHZ5sf+n6KXCocVC66ImS2o0TD0SBhTWcp0KdcuzR1rY1jfIHaFpVneroRLbTjNrk/WAA" +
"MuAYxPVPf1DR30wYQo12Dbf+in3akTjhKERNQ+nPwRjxAyIQD+52LU3Rh2VL7nOIStMNTiBMaiWHywaPoXowWAAQbillhhX4MR+" +
"7h81PfxHIbiXBmER4c5M7spilWKkROb+VXhrqnVJL162t9TdhYk56PDIhvXO1Tm/ldjVJw130y0AAk6qpccfsxDZEmVN8LI4z87" +
"39Ni/kb+CB3yW2l2dWhKTjBeNanhK6TCCoNH/jRzWfrjrEk5zjNrUr82JwL4fR1OAQrYZescxbH26m8QHiH+RHzwlXpUKJgbHD5" +
"NnWtB7oFb9AFM15jbjd4yIEBEtAlXPE0Q4j+X+DLnCtZbLSQiYNh5AQvz70LTbYry1lEExuUcO+IRJiysw5AFyqZ9Y1E//WKIqg" +
"EysfcnHwoOxtDtAc5Z9sTUEYfPqQ1d27k3Yk0X7dvCAQ10cdG0qYHb+bQrYRIKKnb0aeCjkCs0HZQY2fXYmimyfTNfECclmPW9k" +
"+CfOvW0JKuFxC1l11zJ3zjsgN/peA8BAQ5oIFQGjq9qmf5gegE1DjuzXsGksKao6nsjTXYIspCczCe2h5KNQ9l5hws11hauUKS2" +
"0JoOYjHwxPD2x0adJKvkAQ+4UjVcZgVEQP8y3caqUDH81Ikcadz2bESpYg93dpnzZTH6A7Ue+RL34PTNx6cCRzukwQuhiStuyL1" +
"WYEIrLI4nABAjGv3EBXjWaPLUj59OzVnGkzxkr6C4KDjMmpsYNzx7I2lp2iQV46TM78El8i9h7twiEDUOSdC5CmfQjRpkP72yAB" +
"GVAQELUm2/SjkpF0O+/rVDgA/Y2/wMacD1ZDahdyvSNSFThn5NyRYA1JXGgIDxoYeAZgkr1gL1cjCLWiO+Bs9QARIiCvHfIkn2a" +
"YhYHQq/u6cHB/2DxE3OgbCZyTv8OVO55hQDkJ1gDwAec+IJ4M5Od4OxWEu+OywhJT7zUmwZko9MAGTeJ+kAAAAAABrhAfrtrFhR" +
"4yubI7X5QRqMK6xKrj7U3XuBHdGnLqSqcQAAAAAAWllxAUFVV1YAAAAAAAVZ/XAAACcQ8Xfx5wQ+nj1rn6IeTUAy+VER1nUBAFU" +
"A5i32yLSoX+GmfbRNwS3l2zMPesZrctxliv7fD0pBW0MAAAKUUTJXzwAAAADDrAZ1////+AAAAABk3ifoAAAAAGTeJ+cAAAKWep" +
"R2oAAAAAC/b8SsCasjFzENKvXWwOycuzCVaDWfm0IuuuesmamDKl2lNXss15orlNN+xHVNEEIIq7Xg8GRZGVLt43fkg7xli6EPQ" +
"/Nyxl6SixiYteNt1uTTh4M1lQTUjPxKnkE5JEea4RnhOWgmSAWMf8ft4KgE7hvRifV1JP0rOsNgsOYFRbs6iDKW1qLpxgZLMAiO" +
"clwS3Tjw2hj8sPfq1NHeVttsBEK5SIM14GjAuD/p2V0+NqHqMHxU/kfftg==",
"base64"
);
const priceInfo = RestAPI.vaaToPriceInfo(
"e62df6c8b4a85fe1a67db44dc12de5db330f7ac66b72dc658afedf0f4a415b43",
updateData
);
expect(priceInfo).toBeDefined();
expect(priceInfo?.priceFeed).toEqual(
new PriceFeed({
id: "e62df6c8b4a85fe1a67db44dc12de5db330f7ac66b72dc658afedf0f4a415b43",
price: new Price({
price: "2836040669135",
conf: "3282830965",
publishTime: 1692280808,
expo: -8,
}),
emaPrice: new Price({
price: "2845324900000",
conf: "3211773100",
publishTime: 1692280808,
expo: -8,
}),
})
);
expect(priceInfo?.emitterChainId).toEqual(26);
});
});

View File

@ -1,46 +0,0 @@
import { isValidVaa } from "../vaa";
import { GuardianSignature, parseVaa } from "@certusone/wormhole-sdk";
import { randomBytes } from "crypto";
const VAA = Buffer.from(
"AQAAAAMNABIL4Zs/yZlmGGUiAZujW8PMYR2ffWKTcuSNMRL+Yr3uQrVO1qxLToA8iksg/NWfsD3NeMSJujxSgd4fnjmqtSYBAiP92Eci7vsIVouS93bSack2bYg5ERXxZpcTb9LSWpEmILv62jbAd1HcbWu1w8WVbm++nqgbHH5S8eUY57QytegABIBcyvERWN2j9kb74zvQy+AEfXW6wjbrRKzlMvOUaKYpMG9nRzXkxd6wehsVFgV+i3G/lykR1hcrvgIczEPCuIYACPApsIJGheEpt/VQ4d36Tc0ZMzqq/kw1mTDJ8eKHikHeL8yFfo+Q9PtYK0CF1UYTKVpl32kFTtU+ubdKM7oVHMYBCiw25jnpX5+KOzxSTy+9Q5ovM3zqcN3yJBSbF80VL9N2AnehBhMTr1DylzpcYppdly4w/Iz5OHFGoZqT8dVgeY0AC0MseKj4EN0XUIGj8kXQ0CZKczfxywJPiueGTkAD6VkAOwpxnfZu212yXHAbojECKqtRCvb4UobTu+RK0pyemb0BDJKvSJ8RALV4CAGGWiS7XzHfa+/SxzCB6zxUsiOh0FGGEZBK+6i//7YUY83TOXp5SZzDGA0aH5tLXd6peL6np4ABDeHulcBX2LA1cIpmH+nqQLRq5zDPlKNBa6RVwHQUBVotBAWnCoTjOv+8xPZssl7r/BidPUbdu7j+0MGB/4/Oh6wBDub405biSsppFuBFxrBuFrJJdnsf3NvU5TWKF61aZKFtcWpxzyxNDsB3Nd7g+QYafiMkyL4okvvcthYaoiEzwX0BD1qhc5333/TKKbInkZcsitd0F/isWptZygRNqsh29f/xNuFyD4915mNWtsx3OaRAAkPcq21YzJb7ObzUB0OjhVcBEK46eqvVfpDHkF/w6+GWKACsICaAdgwDkmEwrCxXY2BgJe7cXkmDGl0Sfl8836AHd5OBwIC7g7EldFkLUanUUUwAEWpFfXwzaAnMQp+bO3RHKnpbPvJgKacjxFaCExe7dNkvYcVQ4UEC13QqIK3k7egZpHZp45O9AXfwmtpbBlJAvlgAEgu9te25pvTJ2alsQsxicrf5QyhDT7P6Ywr2WbNUnsfXKPFPC3U1P3G1yQOIjbUhrFtYkEGQ1+uZ4rNxsq2CchwBZGbcRwAAAAAAGvjNI8KrkSN3MHcLvqCNYQBc3aCYQ0jz9u7LVZY4wLugAAAAABlYLUwBUDJXSAADAAEAAQIABQCdLvoSNauGwJNctCSxAr5PIX500RCd+edd+oM4/A8JCHgvlYYrBFZwzSK+4xFMOXY6Sgi+62Y7FF0oPDHX0RAcTwAAAActs1rgAAAAAAFPo9T////4AAAABy9GhdAAAAAAARhiQwEAAAARAAAAFwAAAABkZtxHAAAAAGRm3EcAAAAAZGbcRgAAAActwjt4AAAAAAFAwzwAAAAAZGbcRkjWAz1zPieVDC4DUeJQVJHNkVSCT3FtlRNRTHS5+Y9YPdK2NoakUOxykN86HgtYPASB9lE1Ht+nY285rtVc+KMAAAACruGVUAAAAAAAv9F3////+AAAAAKux0rYAAAAAAC3HSIBAAAAFQAAABwAAAAAZGbcRwAAAABkZtxHAAAAAGRm3EYAAAACruGVUAAAAAAAv9F3AAAAAGRm3EY1FbOGHo/pPl9UC6QHfCFkBHgrhtXngHezy/0nMTqzvOYt9si0qF/hpn20TcEt5dszD3rGa3LcZYr+3w9KQVtDAAACcHgoTeAAAAAAJ08r4P////gAAAJwomgKAAAAAAAiQ9syAQAAABUAAAAfAAAAAGRm3EcAAAAAZGbcRwAAAABkZtxGAAACcHOoRAAAAAAAJlVaXAAAAABkZtxGm19z4AdefXA3YBIYDdupQnL2jYXq5BBOM1VhyYIlPUGhnQSsaWx6ZhbSkcfl0Td8yL5DfDJ7da213ButdF/K6AAAAAAE4NsJAAAAAAABWWT////4AAAAAATkLakAAAAAAAEvVQEAAAALAAAACwAAAABkZtxHAAAAAGRm3EcAAAAAZGbcRgAAAAAE4NsJAAAAAAABWWQAAAAAZGbcRuh2/NEwrdiYSjOqtSrza8G5+CLJ6+N286py1jCXThXw3O9Q3QpM0tzBfkXfFnbcszahGmHGnfegKZsBUMZy0lwAAAAAAG/y7wAAAAAAABJP////+AAAAAAAb/S7AAAAAAAAEfUBAAAAFQAAAB4AAAAAZGbcRwAAAABkZtxHAAAAAGRm3EYAAAAAAG/zhgAAAAAAABLmAAAAAGRm3EY=",
"base64"
);
describe("VAA validation works", () => {
test("with valid signatures", async () => {
let parsedVaa = parseVaa(VAA);
expect(isValidVaa(parsedVaa, "mainnet")).toBe(true);
});
test("with a wrong address", async () => {
let parsedVaa = parseVaa(VAA);
const vaaIndex = 8;
const setIndex1 = 4;
const setIndex2 = 5;
// Replace the signature from guardian at setIndex1 with the one from
// setIndex2.
parsedVaa.guardianSignatures[vaaIndex] = {
index: setIndex1,
signature: parsedVaa.guardianSignatures[setIndex2].signature,
};
expect(isValidVaa(parsedVaa, "mainnet")).toBe(false);
});
test("with an invalid signature", async () => {
let parsedVaa = parseVaa(VAA);
const vaaIndex = 8;
const setIndex = 4;
// Inject a random buffer as the signature of the guardian at setIndex.
parsedVaa.guardianSignatures[vaaIndex] = {
index: setIndex,
signature: randomBytes(65), // invalid signature
};
expect(isValidVaa(parsedVaa, "mainnet")).toBe(false);
});
});

View File

@ -1,516 +0,0 @@
import {
HexString,
Price,
PriceFeed,
PriceFeedMetadata,
} from "@pythnetwork/price-service-sdk";
import { Server } from "http";
import { WebSocket, WebSocketServer } from "ws";
import { sleep } from "../helpers";
import { PriceInfo, PriceStore } from "../listen";
import { ClientMessage, WebSocketAPI } from "../ws";
const port = 2524;
let api: WebSocketAPI;
let server: Server;
let wss: WebSocketServer;
let priceInfos: PriceInfo[];
function expandTo64Len(id: string): string {
return id.repeat(64).substring(0, 64);
}
function dummyPriceInfo(id: HexString, vaa: HexString): PriceInfo {
return {
seqNum: 1,
publishTime: 0,
attestationTime: 2,
emitterChainId: 3,
priceFeed: dummyPriceFeed(id),
vaa: Buffer.from(vaa, "hex"),
priceServiceReceiveTime: 4,
lastAttestedPublishTime: -1,
};
}
function dummyPriceFeed(id: string): PriceFeed {
return new PriceFeed({
emaPrice: new Price({
conf: "1",
expo: 2,
price: "3",
publishTime: 4,
}),
id,
price: new Price({
conf: "5",
expo: 6,
price: "7",
publishTime: 8,
}),
});
}
async function waitForSocketState(
client: WebSocket,
state: number
): Promise<void> {
while (client.readyState !== state) {
await sleep(10);
}
}
async function waitForMessages(messages: any[], cnt: number): Promise<void> {
while (messages.length < cnt) {
await sleep(10);
}
}
async function createSocketClient(): Promise<[WebSocket, any[]]> {
const client = new WebSocket(`ws://localhost:${port}/ws`);
await waitForSocketState(client, client.OPEN);
const messages: any[] = [];
client.on("message", (data) => {
messages.push(JSON.parse(data.toString()));
});
return [client, messages];
}
beforeAll(async () => {
priceInfos = [
dummyPriceInfo(expandTo64Len("abcd"), "a1b2c3d4"),
dummyPriceInfo(expandTo64Len("ef01"), "a1b2c3d4"),
dummyPriceInfo(expandTo64Len("2345"), "bad01bad"),
dummyPriceInfo(expandTo64Len("6789"), "bidbidbid"),
];
const priceInfo: PriceStore = {
getLatestPriceInfo: (_priceFeedId: string) => undefined,
addUpdateListener: (_callback: (priceInfo: PriceInfo) => any) => undefined,
getPriceIds: () => new Set(priceInfos.map((info) => info.priceFeed.id)),
getVaa: (_vaasCacheKey: string) => undefined,
};
api = new WebSocketAPI(priceInfo);
server = new Server();
server.listen(port);
wss = api.run(server);
});
afterAll(async () => {
wss.close();
server.close();
});
describe("Client receives data", () => {
test("When subscribes with valid ids without verbose flag, returns correct price feed", async () => {
const [client, serverMessages] = await createSocketClient();
const message: ClientMessage = {
ids: [priceInfos[0].priceFeed.id, priceInfos[1].priceFeed.id],
type: "subscribe",
};
client.send(JSON.stringify(message));
await waitForMessages(serverMessages, 1);
expect(serverMessages[0]).toStrictEqual({
type: "response",
status: "success",
});
api.dispatchPriceFeedUpdate(priceInfos[0]);
await waitForMessages(serverMessages, 2);
expect(serverMessages[1]).toEqual({
type: "price_update",
price_feed: priceInfos[0].priceFeed.toJson(),
});
api.dispatchPriceFeedUpdate(priceInfos[1]);
await waitForMessages(serverMessages, 3);
expect(serverMessages[2]).toEqual({
type: "price_update",
price_feed: priceInfos[1].priceFeed.toJson(),
});
client.close();
await waitForSocketState(client, client.CLOSED);
});
test("When subscribes with valid ids and verbose flag set to true, returns correct price feed with metadata", async () => {
const [client, serverMessages] = await createSocketClient();
const message: ClientMessage = {
ids: [priceInfos[0].priceFeed.id, priceInfos[1].priceFeed.id],
type: "subscribe",
verbose: true,
};
client.send(JSON.stringify(message));
await waitForMessages(serverMessages, 1);
expect(serverMessages[0]).toStrictEqual({
type: "response",
status: "success",
});
api.dispatchPriceFeedUpdate(priceInfos[0]);
await waitForMessages(serverMessages, 2);
expect(serverMessages[1]).toEqual({
type: "price_update",
price_feed: {
...priceInfos[0].priceFeed.toJson(),
metadata: new PriceFeedMetadata({
attestationTime: 2,
emitterChain: 3,
receiveTime: 4,
sequenceNumber: 1,
}).toJson(),
},
});
api.dispatchPriceFeedUpdate(priceInfos[1]);
await waitForMessages(serverMessages, 3);
expect(serverMessages[2]).toEqual({
type: "price_update",
price_feed: {
...priceInfos[1].priceFeed.toJson(),
metadata: new PriceFeedMetadata({
attestationTime: 2,
emitterChain: 3,
receiveTime: 4,
sequenceNumber: 1,
}).toJson(),
},
});
client.close();
await waitForSocketState(client, client.CLOSED);
});
test("When subscribes with valid ids and verbose flag set to false, returns correct price feed without metadata", async () => {
const [client, serverMessages] = await createSocketClient();
const message: ClientMessage = {
ids: [priceInfos[0].priceFeed.id, priceInfos[1].priceFeed.id],
type: "subscribe",
verbose: false,
};
client.send(JSON.stringify(message));
await waitForMessages(serverMessages, 1);
expect(serverMessages[0]).toStrictEqual({
type: "response",
status: "success",
});
api.dispatchPriceFeedUpdate(priceInfos[0]);
await waitForMessages(serverMessages, 2);
expect(serverMessages[1]).toEqual({
type: "price_update",
price_feed: priceInfos[0].priceFeed.toJson(),
});
api.dispatchPriceFeedUpdate(priceInfos[1]);
await waitForMessages(serverMessages, 3);
expect(serverMessages[2]).toEqual({
type: "price_update",
price_feed: priceInfos[1].priceFeed.toJson(),
});
client.close();
await waitForSocketState(client, client.CLOSED);
});
test("When subscribes with valid ids and binary flag set to true, returns correct price feed with vaa", async () => {
const [client, serverMessages] = await createSocketClient();
const message: ClientMessage = {
ids: [priceInfos[0].priceFeed.id, priceInfos[1].priceFeed.id],
type: "subscribe",
binary: true,
};
client.send(JSON.stringify(message));
await waitForMessages(serverMessages, 1);
expect(serverMessages[0]).toStrictEqual({
type: "response",
status: "success",
});
api.dispatchPriceFeedUpdate(priceInfos[0]);
await waitForMessages(serverMessages, 2);
expect(serverMessages[1]).toEqual({
type: "price_update",
price_feed: {
...priceInfos[0].priceFeed.toJson(),
vaa: priceInfos[0].vaa.toString("base64"),
},
});
api.dispatchPriceFeedUpdate(priceInfos[1]);
await waitForMessages(serverMessages, 3);
expect(serverMessages[2]).toEqual({
type: "price_update",
price_feed: {
...priceInfos[1].priceFeed.toJson(),
vaa: priceInfos[1].vaa.toString("base64"),
},
});
client.close();
await waitForSocketState(client, client.CLOSED);
});
test("When subscribes with valid ids and binary flag set to false, returns correct price feed without vaa", async () => {
const [client, serverMessages] = await createSocketClient();
const message: ClientMessage = {
ids: [priceInfos[0].priceFeed.id, priceInfos[1].priceFeed.id],
type: "subscribe",
binary: false,
};
client.send(JSON.stringify(message));
await waitForMessages(serverMessages, 1);
expect(serverMessages[0]).toStrictEqual({
type: "response",
status: "success",
});
api.dispatchPriceFeedUpdate(priceInfos[0]);
await waitForMessages(serverMessages, 2);
expect(serverMessages[1]).toEqual({
type: "price_update",
price_feed: priceInfos[0].priceFeed.toJson(),
});
api.dispatchPriceFeedUpdate(priceInfos[1]);
await waitForMessages(serverMessages, 3);
expect(serverMessages[2]).toEqual({
type: "price_update",
price_feed: priceInfos[1].priceFeed.toJson(),
});
client.close();
await waitForSocketState(client, client.CLOSED);
});
test("When subscribes with invalid ids, returns error", async () => {
const [client, serverMessages] = await createSocketClient();
const message: ClientMessage = {
ids: [expandTo64Len("aaaa")],
type: "subscribe",
};
client.send(JSON.stringify(message));
await waitForMessages(serverMessages, 1);
expect(serverMessages.length).toBe(1);
expect(serverMessages[0].type).toBe("response");
expect(serverMessages[0].status).toBe("error");
client.close();
await waitForSocketState(client, client.CLOSED);
});
test("When subscribes for Price Feed A, doesn't receive updates for Price Feed B", async () => {
const [client, serverMessages] = await createSocketClient();
const message: ClientMessage = {
ids: [priceInfos[0].priceFeed.id],
type: "subscribe",
};
client.send(JSON.stringify(message));
await waitForMessages(serverMessages, 1);
expect(serverMessages[0]).toStrictEqual({
type: "response",
status: "success",
});
api.dispatchPriceFeedUpdate(priceInfos[1]);
await sleep(100);
api.dispatchPriceFeedUpdate(priceInfos[0]);
await waitForMessages(serverMessages, 2);
expect(serverMessages[1]).toEqual({
type: "price_update",
price_feed: priceInfos[0].priceFeed.toJson(),
});
await sleep(100);
expect(serverMessages.length).toBe(2);
client.close();
await waitForSocketState(client, client.CLOSED);
});
test("When subscribes for Price Feed A, receives updated and when unsubscribes stops receiving", async () => {
const [client, serverMessages] = await createSocketClient();
let message: ClientMessage = {
ids: [priceInfos[0].priceFeed.id],
type: "subscribe",
};
client.send(JSON.stringify(message));
await waitForMessages(serverMessages, 1);
expect(serverMessages[0]).toStrictEqual({
type: "response",
status: "success",
});
api.dispatchPriceFeedUpdate(priceInfos[0]);
await waitForMessages(serverMessages, 2);
expect(serverMessages[1]).toEqual({
type: "price_update",
price_feed: priceInfos[0].priceFeed.toJson(),
});
message = {
ids: [priceInfos[0].priceFeed.id],
type: "unsubscribe",
};
client.send(JSON.stringify(message));
await waitForMessages(serverMessages, 3);
expect(serverMessages[2]).toStrictEqual({
type: "response",
status: "success",
});
api.dispatchPriceFeedUpdate(priceInfos[0]);
await sleep(100);
expect(serverMessages.length).toBe(3);
client.close();
await waitForSocketState(client, client.CLOSED);
});
test("Unsubscribe on not subscribed price feed is ok", async () => {
const [client, serverMessages] = await createSocketClient();
const message: ClientMessage = {
ids: [priceInfos[0].priceFeed.id],
type: "unsubscribe",
};
client.send(JSON.stringify(message));
await waitForMessages(serverMessages, 1);
expect(serverMessages[0]).toStrictEqual({
type: "response",
status: "success",
});
client.close();
await waitForSocketState(client, client.CLOSED);
});
test("Multiple clients with different price feed works", async () => {
const [client1, serverMessages1] = await createSocketClient();
const [client2, serverMessages2] = await createSocketClient();
const message1: ClientMessage = {
ids: [priceInfos[0].priceFeed.id],
type: "subscribe",
};
client1.send(JSON.stringify(message1));
const message2: ClientMessage = {
ids: [priceInfos[1].priceFeed.id],
type: "subscribe",
};
client2.send(JSON.stringify(message2));
await waitForMessages(serverMessages1, 1);
await waitForMessages(serverMessages2, 1);
expect(serverMessages1[0]).toStrictEqual({
type: "response",
status: "success",
});
expect(serverMessages2[0]).toStrictEqual({
type: "response",
status: "success",
});
api.dispatchPriceFeedUpdate(priceInfos[0]);
api.dispatchPriceFeedUpdate(priceInfos[1]);
await waitForMessages(serverMessages1, 2);
await waitForMessages(serverMessages2, 2);
expect(serverMessages1[1]).toEqual({
type: "price_update",
price_feed: priceInfos[0].priceFeed.toJson(),
});
expect(serverMessages2[1]).toEqual({
type: "price_update",
price_feed: priceInfos[1].priceFeed.toJson(),
});
client1.close();
client2.close();
await waitForSocketState(client1, client1.CLOSED);
await waitForSocketState(client2, client2.CLOSED);
});
});

View File

@ -1,47 +0,0 @@
// Utilities for encoding VAAs for specific target chains
// List of all possible target chains. Note that "default" is an option because we need at least one chain
// with a base64 encoding (which is the old default behavior of all API methods).
export type TargetChain = "evm" | "cosmos" | "aptos" | "default";
export const validTargetChains = ["evm", "cosmos", "aptos", "default"];
export const defaultTargetChain: TargetChain = "default";
// Possible encodings of the binary VAA data as a string.
// "0x" is the same as "hex" with a leading "0x" prepended to the hex string.
export type VaaEncoding = "base64" | "hex" | "0x";
export const defaultVaaEncoding: VaaEncoding = "base64";
export const chainToEncoding: Record<TargetChain, VaaEncoding> = {
evm: "0x",
cosmos: "base64",
// TODO: I think aptos actually wants a number[] for this data... need to decide how to
// handle that case.
aptos: "base64",
default: "base64",
};
// Given a VAA represented as either a string in base64 or a Buffer, encode it as a string
// appropriate for the given targetChain.
export function encodeVaaForChain(
vaa: string | Buffer,
targetChain: TargetChain
): string {
const encoding = chainToEncoding[targetChain];
let vaaBuffer: Buffer;
if (typeof vaa === "string") {
if (encoding === defaultVaaEncoding) {
return vaa;
} else {
vaaBuffer = Buffer.from(vaa, defaultVaaEncoding as BufferEncoding);
}
} else {
vaaBuffer = vaa;
}
switch (encoding) {
case "0x":
return "0x" + vaaBuffer.toString("hex");
default:
return vaaBuffer.toString(encoding);
}
}

View File

@ -1,45 +0,0 @@
// Time in seconds
export type TimestampInSec = number;
export type DurationInSec = number;
export type DurationInMs = number;
export function sleep(ms: number) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
// Shorthand for optional/mandatory envs
export function envOrErr(env: string): string {
const val = process.env[env];
if (!val) {
throw new Error(`environment variable "${env}" must be set`);
}
return String(process.env[env]);
}
export function parseToOptionalNumber(
s: string | undefined
): number | undefined {
if (s === undefined) {
return undefined;
}
return parseInt(s, 10);
}
export function removeLeading0x(s: string): string {
if (s.startsWith("0x")) {
return s.substring(2);
}
return s;
}
// Helper for treating T | undefined as an optional value. This lets you pick a
// default if value is undefined.
export function getOrElse<T>(value: T | undefined, defaultValue: T): T {
if (value === undefined) {
return defaultValue;
} else {
return value;
}
}

View File

@ -1,72 +0,0 @@
import { envOrErr, parseToOptionalNumber } from "./helpers";
import { Listener } from "./listen";
import { initLogger } from "./logging";
import { PromClient } from "./promClient";
import { RestAPI } from "./rest";
import { WebSocketAPI } from "./ws";
let configFile: string = ".env";
if (process.env.PYTH_PRICE_SERVICE_CONFIG) {
configFile = process.env.PYTH_PRICE_SERVICE_CONFIG;
}
// tslint:disable:no-console
console.log("Loading config file [%s]", configFile);
// tslint:disable:no-var-requires
require("dotenv").config({ path: configFile });
// Set up the logger.
initLogger({ logLevel: process.env.LOG_LEVEL });
async function run() {
const promClient = new PromClient({
name: "price_service",
port: parseInt(envOrErr("PROM_PORT"), 10),
});
const listener = new Listener(
{
spyServiceHost: envOrErr("SPY_SERVICE_HOST"),
filtersRaw: process.env.SPY_SERVICE_FILTERS,
wormholeCluster: process.env.WORMHOLE_CLUSTER,
readiness: {
spySyncTimeSeconds: parseInt(
envOrErr("READINESS_SPY_SYNC_TIME_SECONDS"),
10
),
numLoadedSymbols: parseInt(
envOrErr("READINESS_NUM_LOADED_SYMBOLS"),
10
),
},
cacheCleanupLoopInterval: parseToOptionalNumber(
process.env.REMOVE_EXPIRED_VALUES_INTERVAL_SECONDS
),
cacheTtl: parseToOptionalNumber(process.env.CACHE_TTL_SECONDS),
},
promClient
);
// In future if we have more components we will modify it to include them all
const isReady = () => listener.isReady();
const restAPI = new RestAPI(
{
port: parseInt(envOrErr("REST_PORT"), 10),
dbApiEndpoint: process.env.DB_API_ENDPOINT,
dbApiCluster: process.env.DB_API_CLUSTER,
},
listener,
isReady,
promClient
);
const wsAPI = new WebSocketAPI(listener, promClient);
listener.run();
const server = await restAPI.run();
wsAPI.run(server);
}
run();

View File

@ -1,469 +0,0 @@
import {
createSpyRPCServiceClient,
subscribeSignedVAA,
} from "@certusone/wormhole-spydk";
import { ChainId, uint8ArrayToHex, parseVaa } from "@certusone/wormhole-sdk";
import {
FilterEntry,
SubscribeSignedVAAResponse,
} from "@certusone/wormhole-spydk/lib/cjs/proto/spy/v1/spy";
import { ClientReadableStream } from "@grpc/grpc-js";
import {
getBatchSummary,
parseBatchPriceAttestation,
priceAttestationToPriceFeed,
PriceAttestation,
} from "@pythnetwork/wormhole-attester-sdk";
import { HexString, PriceFeed } from "@pythnetwork/price-service-sdk";
import LRUCache from "lru-cache";
import { DurationInSec, sleep, TimestampInSec } from "./helpers";
import { logger } from "./logging";
import { PromClient } from "./promClient";
import { isValidVaa, WormholeCluster, wormholeClusterFromString } from "./vaa";
export type PriceInfo = {
vaa: Buffer;
seqNum: number;
publishTime: TimestampInSec;
attestationTime: TimestampInSec;
lastAttestedPublishTime: TimestampInSec;
priceFeed: PriceFeed;
emitterChainId: number;
priceServiceReceiveTime: number;
};
export function createPriceInfo(
priceAttestation: PriceAttestation,
vaa: Buffer,
sequence: bigint,
emitterChain: number
): PriceInfo {
const priceFeed = priceAttestationToPriceFeed(priceAttestation);
return {
seqNum: Number(sequence),
vaa,
publishTime: priceAttestation.publishTime,
attestationTime: priceAttestation.attestationTime,
lastAttestedPublishTime: priceAttestation.lastAttestedPublishTime,
priceFeed,
emitterChainId: emitterChain,
priceServiceReceiveTime: Math.floor(new Date().getTime() / 1000),
};
}
export interface PriceStore {
getPriceIds(): Set<HexString>;
getLatestPriceInfo(priceFeedId: HexString): PriceInfo | undefined;
addUpdateListener(callback: (priceInfo: PriceInfo) => any): void;
getVaa(priceFeedId: string, publishTime: number): VaaConfig | undefined;
}
type ListenerReadinessConfig = {
spySyncTimeSeconds: number;
numLoadedSymbols: number;
};
type ListenerConfig = {
spyServiceHost: string;
wormholeCluster?: string;
filtersRaw?: string;
readiness: ListenerReadinessConfig;
webApiEndpoint?: string;
webApiCluster?: string;
cacheCleanupLoopInterval?: DurationInSec;
cacheTtl?: DurationInSec;
};
type VaaKey = string;
export type VaaConfig = {
publishTime: number;
lastAttestedPublishTime: number;
vaa: string;
};
export class VaaCache {
private cache: Map<string, VaaConfig[]>;
private ttl: DurationInSec;
private cacheCleanupLoopInterval: DurationInSec;
constructor(
ttl: DurationInSec = 300,
cacheCleanupLoopInterval: DurationInSec = 60
) {
this.cache = new Map();
this.ttl = ttl;
this.cacheCleanupLoopInterval = cacheCleanupLoopInterval;
}
set(
key: VaaKey,
publishTime: TimestampInSec,
lastAttestedPublishTime: TimestampInSec,
vaa: string
): void {
if (this.cache.has(key)) {
this.cache.get(key)!.push({ publishTime, lastAttestedPublishTime, vaa });
} else {
this.cache.set(key, [{ publishTime, lastAttestedPublishTime, vaa }]);
}
}
get(key: VaaKey, publishTime: TimestampInSec): VaaConfig | undefined {
if (!this.cache.has(key)) {
return undefined;
} else {
const vaaConf = this.find(this.cache.get(key)!, publishTime);
return vaaConf;
}
}
private find(
arr: VaaConfig[],
publishTime: TimestampInSec
): VaaConfig | undefined {
// If the publishTime is less than the first element we are
// not sure that this VAA is actually the first VAA after that
// time.
if (arr.length === 0 || publishTime < arr[0].publishTime) {
return undefined;
}
let left = 0;
let right = arr.length - 1;
let nextLargest = -1;
while (left <= right) {
const middle = Math.floor((left + right) / 2);
if (
arr[middle].publishTime === publishTime &&
arr[middle].lastAttestedPublishTime < publishTime
) {
return arr[middle];
} else if (arr[middle].publishTime < publishTime) {
left = middle + 1;
} else {
nextLargest = middle;
right = middle - 1;
}
}
return nextLargest !== -1 ? arr[nextLargest] : undefined;
}
async removeExpiredValues() {
const now = Math.floor(Date.now() / 1000);
for (const key of this.cache.keys()) {
this.cache.set(
key,
this.cache
.get(key)!
.filter((vaaConf) => now - vaaConf.publishTime < this.ttl)
);
}
}
runRemoveExpiredValuesLoop() {
setInterval(
this.removeExpiredValues.bind(this),
this.cacheCleanupLoopInterval * 1000
);
}
}
export class Listener implements PriceStore {
// Mapping of Price Feed Id to Vaa
private priceFeedVaaMap = new Map<string, PriceInfo>();
private promClient: PromClient | undefined;
private spyServiceHost: string;
private filters: FilterEntry[] = [];
private ignorePricesOlderThanSecs: number;
private spyConnectionTime: TimestampInSec | undefined;
private readinessConfig: ListenerReadinessConfig;
private updateCallbacks: ((priceInfo: PriceInfo) => any)[];
private observedVaas: LRUCache<VaaKey, boolean>;
private vaasCache: VaaCache;
private wormholeCluster: WormholeCluster;
constructor(config: ListenerConfig, promClient?: PromClient) {
this.promClient = promClient;
this.spyServiceHost = config.spyServiceHost;
this.loadFilters(config.filtersRaw);
// Don't store any prices received from wormhole that are over 5 minutes old.
this.ignorePricesOlderThanSecs = 60;
this.readinessConfig = config.readiness;
this.updateCallbacks = [];
this.observedVaas = new LRUCache({
max: 10000, // At most 10000 items
ttl: 60 * 1000, // 1 minutes which is equal to ignorePricesOlderThanSecs
});
this.vaasCache = new VaaCache(
config.cacheTtl,
config.cacheCleanupLoopInterval
);
if (config.wormholeCluster !== undefined) {
this.wormholeCluster = wormholeClusterFromString(config.wormholeCluster);
} else {
this.wormholeCluster = "mainnet";
}
}
private loadFilters(filtersRaw?: string) {
if (!filtersRaw) {
logger.info("No filters provided. Will process all signed VAAs");
return;
}
const parsedJsonFilters = JSON.parse(filtersRaw);
for (const filter of parsedJsonFilters) {
const myChainId = parseInt(filter.chain_id, 10) as ChainId;
const myEmitterAddress = filter.emitter_address;
const myEmitterFilter: FilterEntry = {
emitterFilter: {
chainId: myChainId,
emitterAddress: myEmitterAddress,
},
};
logger.info(
"adding filter: chainId: [" +
myEmitterFilter.emitterFilter!.chainId +
"], emitterAddress: [" +
myEmitterFilter.emitterFilter!.emitterAddress +
"]"
);
this.filters.push(myEmitterFilter);
}
logger.info("loaded " + this.filters.length + " filters");
}
async run() {
logger.info(
"pyth_relay starting up, will listen for signed VAAs from " +
this.spyServiceHost
);
this.vaasCache.runRemoveExpiredValuesLoop();
while (true) {
let stream: ClientReadableStream<SubscribeSignedVAAResponse> | undefined;
try {
const client = createSpyRPCServiceClient(this.spyServiceHost);
stream = await subscribeSignedVAA(client, { filters: this.filters });
stream!.on("data", ({ vaaBytes }: { vaaBytes: Buffer }) => {
this.processVaa(vaaBytes);
});
this.spyConnectionTime = this.currentTimeInSeconds();
let connected = true;
stream!.on("error", (err: any) => {
logger.error("spy service returned an error: %o", err);
connected = false;
});
stream!.on("close", () => {
logger.error("spy service closed the connection!");
connected = false;
});
logger.info("connected to spy service, listening for messages");
while (connected) {
await sleep(1000);
}
} catch (e) {
logger.error("spy service threw an exception: %o", e);
}
if (stream) {
stream.destroy();
}
this.spyConnectionTime = undefined;
await sleep(1000);
logger.info("attempting to reconnect to the spy service");
}
}
isNewPriceInfo(
cachedInfo: PriceInfo | undefined,
observedInfo: PriceInfo
): boolean {
if (cachedInfo === undefined) {
return true;
}
if (cachedInfo.attestationTime < observedInfo.attestationTime) {
return true;
}
if (
cachedInfo.attestationTime === observedInfo.attestationTime &&
cachedInfo.seqNum < observedInfo.seqNum
) {
return true;
}
return false;
}
async processVaa(vaa: Buffer) {
const parsedVaa = parseVaa(vaa);
const vaaEmitterAddressHex = Buffer.from(parsedVaa.emitterAddress).toString(
"hex"
);
const observedVaasKey: VaaKey = `${parsedVaa.emitterChain}#${vaaEmitterAddressHex}#${parsedVaa.sequence}`;
if (this.observedVaas.has(observedVaasKey)) {
return;
}
if (!isValidVaa(parsedVaa, this.wormholeCluster)) {
logger.info("Ignoring an invalid VAA");
return;
}
let batchAttestation;
try {
batchAttestation = parseBatchPriceAttestation(
Buffer.from(parsedVaa.payload)
);
} catch (e: any) {
logger.error(e, e.stack);
logger.error("Parsing failed. Dropping vaa: %o", parsedVaa);
return;
}
if (batchAttestation.priceAttestations.length === 0) {
return;
}
// Attestation time is the same in all feeds in the batch.
// Return early if an attestation is old to exclude it from
// the counter metric.
if (
batchAttestation.priceAttestations[0].attestationTime <
this.currentTimeInSeconds() - this.ignorePricesOlderThanSecs
) {
return;
}
// There is no `await` clause to release the current thread since the previous check
// but this is here to ensure this is correct as the code evolves.
if (this.observedVaas.has(observedVaasKey)) {
return;
} else {
this.observedVaas.set(observedVaasKey, true);
this.promClient?.incReceivedVaa();
}
for (const priceAttestation of batchAttestation.priceAttestations) {
const key = priceAttestation.priceId;
const priceInfo = createPriceInfo(
priceAttestation,
vaa,
parsedVaa.sequence,
parsedVaa.emitterChain
);
const cachedPriceInfo = this.priceFeedVaaMap.get(key);
if (this.isNewPriceInfo(cachedPriceInfo, priceInfo)) {
this.vaasCache.set(
priceInfo.priceFeed.id,
priceInfo.publishTime,
priceInfo.lastAttestedPublishTime,
priceInfo.vaa.toString("base64")
);
this.priceFeedVaaMap.set(key, priceInfo);
if (cachedPriceInfo !== undefined) {
this.promClient?.addPriceUpdatesAttestationTimeGap(
priceAttestation.attestationTime - cachedPriceInfo.attestationTime
);
this.promClient?.addPriceUpdatesPublishTimeGap(
priceAttestation.publishTime - cachedPriceInfo.publishTime
);
}
for (const callback of this.updateCallbacks) {
callback(priceInfo);
}
}
}
logger.info(
"Parsed a new Batch Price Attestation: [" +
parsedVaa.emitterChain +
":" +
uint8ArrayToHex(parsedVaa.emitterAddress) +
"], seqNum: " +
parsedVaa.sequence +
", Batch Summary: " +
getBatchSummary(batchAttestation)
);
}
getVaa(priceFeedId: string, publishTime: number): VaaConfig | undefined {
return this.vaasCache.get(priceFeedId, publishTime);
}
getLatestPriceInfo(priceFeedId: string): PriceInfo | undefined {
return this.priceFeedVaaMap.get(priceFeedId);
}
addUpdateListener(callback: (priceInfo: PriceInfo) => any) {
this.updateCallbacks.push(callback);
}
getPriceIds(): Set<HexString> {
return new Set(this.priceFeedVaaMap.keys());
}
isReady(): boolean {
const currentTime: TimestampInSec = Math.floor(Date.now() / 1000);
if (
this.spyConnectionTime === undefined ||
currentTime <
this.spyConnectionTime + this.readinessConfig.spySyncTimeSeconds
) {
return false;
}
if (this.priceFeedVaaMap.size < this.readinessConfig.numLoadedSymbols) {
return false;
}
// if too many price feeds are stale it probably means that the price service
// is not receiving messages from Wormhole at all and is essentially dead.
const stalenessThreshold = 60;
const maxToleratedStaleFeeds = 10;
const priceIds = [...this.getPriceIds()];
let stalePriceCnt = 0;
for (const priceId of priceIds) {
const latency =
currentTime - this.getLatestPriceInfo(priceId)!.attestationTime;
if (latency > stalenessThreshold) {
stalePriceCnt++;
}
}
if (stalePriceCnt > maxToleratedStaleFeeds) {
return false;
}
return true;
}
private currentTimeInSeconds(): number {
return new Date().getTime() / 1000;
}
}

View File

@ -1,40 +0,0 @@
import * as winston from "winston";
export let logger = winston.createLogger({
transports: [new winston.transports.Console()],
});
// Logger should be initialized before using logger
export function initLogger(config?: { logLevel?: string }) {
let logLevel = "info";
if (config?.logLevel) {
logLevel = config.logLevel;
}
let transport: any;
// tslint:disable:no-console
console.log(
"price_service is logging to the console at level [%s]",
logLevel
);
transport = new winston.transports.Console({
level: logLevel,
});
const logConfiguration = {
transports: [transport],
format: winston.format.combine(
winston.format.splat(),
winston.format.simple(),
winston.format.timestamp({
format: "YYYY-MM-DD HH:mm:ss.SSS",
}),
winston.format.printf(
(info: any) => `${[info.timestamp]}|${info.level}|${info.message}`
)
),
};
logger = winston.createLogger(logConfiguration);
}

View File

@ -1,90 +0,0 @@
import http = require("http");
import client = require("prom-client");
import { DurationInMs, DurationInSec } from "./helpers";
import { logger } from "./logging";
// NOTE: To create a new metric:
// 1) Create a private counter/gauge with appropriate name and help in metrics section of PromHelper
// 2) Create a method to set the metric to a value (such as `incIncoming` function below)
// 3) Register the metric using `register.registerMetric` function.
const SERVICE_PREFIX = "pyth__price_service__";
type WebSocketInteractionType =
| "connection"
| "close"
| "timeout"
| "server_update"
| "client_message";
export class PromClient {
private register = new client.Registry();
// Actual metrics
private receivedVaaCounter = new client.Counter({
name: `${SERVICE_PREFIX}vaas_received`,
help: "number of Pyth VAAs received",
});
private priceUpdatesPublishTimeGapHistogram = new client.Histogram({
name: `${SERVICE_PREFIX}price_updates_publish_time_gap_seconds`,
help: "Summary of publish time gaps between price updates",
buckets: [1, 3, 5, 10, 15, 30, 60, 120],
});
private priceUpdatesAttestationTimeGapHistogram = new client.Histogram({
name: `${SERVICE_PREFIX}price_updates_attestation_time_gap_seconds`,
help: "Summary of attestation time gaps between price updates",
buckets: [1, 3, 5, 10, 15, 30, 60, 120],
});
private webSocketInteractionCounter = new client.Counter({
name: `${SERVICE_PREFIX}websocket_interaction`,
help: "number of Web Socket interactions",
labelNames: ["type", "status"],
});
// End metrics
private server = http.createServer(async (req, res) => {
if (req.url === "/metrics") {
// Return all metrics in the Prometheus exposition format
res.setHeader("Content-Type", this.register.contentType);
res.write(await this.register.metrics());
res.end();
}
});
constructor(config: { name: string; port: number }) {
this.register.setDefaultLabels({
app: config.name,
});
// Register each metric
this.register.registerMetric(this.receivedVaaCounter);
this.register.registerMetric(this.priceUpdatesPublishTimeGapHistogram);
this.register.registerMetric(this.priceUpdatesAttestationTimeGapHistogram);
this.register.registerMetric(this.webSocketInteractionCounter);
// End registering metric
logger.info("prometheus client listening on port " + config.port);
this.server.listen(config.port);
}
incReceivedVaa() {
this.receivedVaaCounter.inc();
}
addPriceUpdatesPublishTimeGap(gap: DurationInSec) {
this.priceUpdatesPublishTimeGapHistogram.observe(gap);
}
addPriceUpdatesAttestationTimeGap(gap: DurationInSec) {
this.priceUpdatesAttestationTimeGapHistogram.observe(gap);
}
addWebSocketInteraction(
type: WebSocketInteractionType,
status: "ok" | "err"
) {
this.webSocketInteractionCounter.inc({
type,
status,
});
}
}

View File

@ -1,673 +0,0 @@
import { HexString, Price, PriceFeed } from "@pythnetwork/price-service-sdk";
import cors from "cors";
import express, { NextFunction, Request, Response } from "express";
import { Joi, schema, validate, ValidationError } from "express-validation";
import { Server } from "http";
import { StatusCodes } from "http-status-codes";
import morgan from "morgan";
import fetch from "node-fetch";
import { parseBatchPriceAttestation } from "@pythnetwork/wormhole-attester-sdk";
import { removeLeading0x, TimestampInSec } from "./helpers";
import { createPriceInfo, PriceInfo, PriceStore } from "./listen";
import { logger } from "./logging";
import { PromClient } from "./promClient";
import { retry } from "ts-retry-promise";
import { parseVaa } from "@certusone/wormhole-sdk";
import { getOrElse } from "./helpers";
import {
TargetChain,
validTargetChains,
defaultTargetChain,
encodeVaaForChain,
} from "./encoding";
const MORGAN_LOG_FORMAT =
':remote-addr - :remote-user ":method :url HTTP/:http-version"' +
' :status :res[content-length] :response-time ms ":referrer" ":user-agent"';
// GET argument string to represent the options for target_chain
export const targetChainArgString = `target_chain=<${validTargetChains.join(
"|"
)}>`;
export class RestException extends Error {
statusCode: number;
message: string;
constructor(statusCode: number, message: string) {
super(message);
this.statusCode = statusCode;
this.message = message;
}
static PriceFeedIdNotFound(notFoundIds: string[]): RestException {
return new RestException(
StatusCodes.BAD_REQUEST,
`Price Feed(s) with id(s) ${notFoundIds.join(", ")} not found.`
);
}
static DbApiError(): RestException {
return new RestException(StatusCodes.INTERNAL_SERVER_ERROR, `DB API Error`);
}
static VaaNotFound(): RestException {
return new RestException(StatusCodes.NOT_FOUND, "VAA not found.");
}
}
function asyncWrapper(
callback: (req: Request, res: Response, next: NextFunction) => Promise<any>
) {
return (req: Request, res: Response, next: NextFunction) => {
callback(req, res, next).catch(next);
};
}
export type VaaResponse = {
publishTime: number;
vaa: string;
};
export class RestAPI {
private port: number;
private priceFeedVaaInfo: PriceStore;
private isReady: (() => boolean) | undefined;
private promClient: PromClient | undefined;
private dbApiEndpoint?: string;
private dbApiCluster?: string;
constructor(
config: { port: number; dbApiEndpoint?: string; dbApiCluster?: string },
priceFeedVaaInfo: PriceStore,
isReady?: () => boolean,
promClient?: PromClient
) {
this.port = config.port;
this.dbApiEndpoint = config.dbApiEndpoint;
this.dbApiCluster = config.dbApiCluster;
this.priceFeedVaaInfo = priceFeedVaaInfo;
this.isReady = isReady;
this.promClient = promClient;
}
async getVaaWithDbLookup(
priceFeedId: string,
publishTime: TimestampInSec
): Promise<VaaResponse | undefined> {
// Try to fetch the vaa from the local cache
const vaaConfig = this.priceFeedVaaInfo.getVaa(priceFeedId, publishTime);
let vaa: VaaResponse | undefined;
// if publishTime is older than cache ttl or vaa is not found, fetch from db
if (vaaConfig !== undefined) {
vaa = {
vaa: vaaConfig.vaa,
publishTime: vaaConfig.publishTime,
};
} else if (vaa === undefined && this.dbApiEndpoint && this.dbApiCluster) {
const priceFeedWithoutLeading0x = removeLeading0x(priceFeedId);
try {
const data = (await retry(
() =>
fetch(
`${this.dbApiEndpoint}/vaa?id=${priceFeedWithoutLeading0x}&publishTime=${publishTime}&cluster=${this.dbApiCluster}`
).then((res) => res.json()),
{ retries: 3 }
)) as any[];
if (data.length > 0) {
vaa = {
vaa: data[0].vaa,
publishTime: Math.floor(
new Date(data[0].publishTime).getTime() / 1000
),
};
}
} catch (e: any) {
logger.error(`DB API Error: ${e}`);
throw RestException.DbApiError();
}
}
return vaa;
}
// Extract the price info from an Accumulator update. This is a temporary solution until hermes adoption
// to maintain backward compatibility when the db migrates to the new update format.
static extractPriceInfoFromAccumulatorUpdate(
priceFeedId: string,
updateData: Buffer
): PriceInfo | undefined {
let offset = 0;
offset += 4; // magic
offset += 1; // major version
offset += 1; // minor version
const trailingHeaderSize = updateData.readUint8(offset);
offset += 1 + trailingHeaderSize;
const updateType = updateData.readUint8(offset);
offset += 1;
// There is a single update type of 0 for now.
if (updateType !== 0) {
logger.error(`Invalid accumulator update type: ${updateType}`);
return undefined;
}
const vaaLength = updateData.readUint16BE(offset);
offset += 2;
const vaaBuffer = updateData.slice(offset, offset + vaaLength);
const vaa = parseVaa(vaaBuffer);
offset += vaaLength;
const numUpdates = updateData.readUint8(offset);
offset += 1;
// Iterate through the updates to find the price info with the given id
for (let i = 0; i < numUpdates; i++) {
const messageLength = updateData.readUint16BE(offset);
offset += 2;
const message = updateData.slice(offset, offset + messageLength);
offset += messageLength;
const proofLength = updateData.readUint8(offset);
offset += 1;
// ignore proofs
offset += proofLength;
// Checket whether the message is a price feed update
// from the given price id and if so, extract the price info
let messageOffset = 0;
const messageType = message.readUint8(messageOffset);
messageOffset += 1;
// MessageType of 0 is a price feed update
if (messageType !== 0) {
continue;
}
const priceId = message
.slice(messageOffset, messageOffset + 32)
.toString("hex");
messageOffset += 32;
if (priceId !== priceFeedId) {
continue;
}
const price = message.readBigInt64BE(messageOffset);
messageOffset += 8;
const conf = message.readBigUint64BE(messageOffset);
messageOffset += 8;
const expo = message.readInt32BE(messageOffset);
messageOffset += 4;
const publishTime = message.readBigInt64BE(messageOffset);
messageOffset += 8;
const prevPublishTime = message.readBigInt64BE(messageOffset);
messageOffset += 8;
const emaPrice = message.readBigInt64BE(messageOffset);
messageOffset += 8;
const emaConf = message.readBigUint64BE(messageOffset);
return {
priceFeed: new PriceFeed({
id: priceFeedId,
price: new Price({
price: price.toString(),
conf: conf.toString(),
expo,
publishTime: Number(publishTime),
}),
emaPrice: new Price({
price: emaPrice.toString(),
conf: emaConf.toString(),
expo,
publishTime: Number(publishTime),
}),
}),
publishTime: Number(publishTime),
vaa: vaaBuffer,
seqNum: Number(vaa.sequence),
emitterChainId: vaa.emitterChain,
// These are not available in the accumulator update format
// but are required by the PriceInfo type.
attestationTime: Number(publishTime),
lastAttestedPublishTime: Number(prevPublishTime),
priceServiceReceiveTime: Number(publishTime),
};
}
return undefined;
}
static vaaToPriceInfo(
priceFeedId: string,
vaa: Buffer
): PriceInfo | undefined {
// Vaa could be the update data from the db with the Accumulator format.
const ACCUMULATOR_MAGIC = "504e4155";
if (vaa.slice(0, 4).toString("hex") === ACCUMULATOR_MAGIC) {
return RestAPI.extractPriceInfoFromAccumulatorUpdate(priceFeedId, vaa);
}
const parsedVaa = parseVaa(vaa);
let batchAttestation;
try {
batchAttestation = parseBatchPriceAttestation(
Buffer.from(parsedVaa.payload)
);
} catch (e: any) {
logger.error(e, e.stack);
logger.error("Parsing historical VAA failed: %o", parsedVaa);
return undefined;
}
for (const priceAttestation of batchAttestation.priceAttestations) {
if (priceAttestation.priceId === priceFeedId) {
return createPriceInfo(
priceAttestation,
vaa,
parsedVaa.sequence,
parsedVaa.emitterChain
);
}
}
return undefined;
}
priceInfoToJson(
priceInfo: PriceInfo,
verbose: boolean,
targetChain: TargetChain | undefined
): object {
return {
...priceInfo.priceFeed.toJson(),
...(verbose && {
metadata: {
emitter_chain: priceInfo.emitterChainId,
attestation_time: priceInfo.attestationTime,
sequence_number: priceInfo.seqNum,
price_service_receive_time: priceInfo.priceServiceReceiveTime,
},
}),
...(targetChain !== undefined && {
vaa: encodeVaaForChain(priceInfo.vaa, targetChain),
}),
};
}
// Run this function without blocking (`await`) if you want to run it async.
async createApp() {
const app = express();
app.use(cors());
const winstonStream = {
write: (text: string) => {
logger.info(text);
},
};
app.use(morgan(MORGAN_LOG_FORMAT, { stream: winstonStream }));
const endpoints: string[] = [];
const latestVaasInputSchema: schema = {
query: Joi.object({
ids: Joi.array()
.items(Joi.string().regex(/^(0x)?[a-f0-9]{64}$/))
.required(),
target_chain: Joi.string()
.valid(...validTargetChains)
.optional(),
}).required(),
};
app.get(
"/api/latest_vaas",
validate(latestVaasInputSchema),
(req: Request, res: Response) => {
const priceIds = (req.query.ids as string[]).map(removeLeading0x);
const targetChain = getOrElse(
req.query.target_chain as TargetChain | undefined,
defaultTargetChain
);
// Multiple price ids might share same vaa, we use sequence number as
// key of a vaa and deduplicate using a map of seqnum to vaa bytes.
const vaaMap = new Map<number, Buffer>();
const notFoundIds: string[] = [];
for (const id of priceIds) {
const latestPriceInfo = this.priceFeedVaaInfo.getLatestPriceInfo(id);
if (latestPriceInfo === undefined) {
notFoundIds.push(id);
continue;
}
vaaMap.set(latestPriceInfo.seqNum, latestPriceInfo.vaa);
}
if (notFoundIds.length > 0) {
throw RestException.PriceFeedIdNotFound(notFoundIds);
}
const jsonResponse = Array.from(vaaMap.values(), (vaa) =>
encodeVaaForChain(vaa, targetChain)
);
res.json(jsonResponse);
}
);
endpoints.push(
`api/latest_vaas?ids[]=<price_feed_id>&ids[]=<price_feed_id_2>&..&${targetChainArgString}`
);
const getVaaInputSchema: schema = {
query: Joi.object({
id: Joi.string()
.regex(/^(0x)?[a-f0-9]{64}$/)
.required(),
publish_time: Joi.number().required(),
target_chain: Joi.string()
.valid(...validTargetChains)
.optional(),
}).required(),
};
app.get(
"/api/get_vaa",
validate(getVaaInputSchema),
asyncWrapper(async (req: Request, res: Response) => {
const priceFeedId = removeLeading0x(req.query.id as string);
const publishTime = Number(req.query.publish_time as string);
const targetChain = getOrElse(
req.query.target_chain as TargetChain | undefined,
defaultTargetChain
);
if (
this.priceFeedVaaInfo.getLatestPriceInfo(priceFeedId) === undefined
) {
throw RestException.PriceFeedIdNotFound([priceFeedId]);
}
const vaaConfig = await this.getVaaWithDbLookup(
priceFeedId,
publishTime
);
if (vaaConfig === undefined) {
throw RestException.VaaNotFound();
} else {
vaaConfig.vaa = encodeVaaForChain(vaaConfig.vaa, targetChain);
res.json(vaaConfig);
}
})
);
endpoints.push(
`api/get_vaa?id=<price_feed_id>&publish_time=<publish_time_in_unix_timestamp>&${targetChainArgString}`
);
const getVaaCcipInputSchema: schema = {
query: Joi.object({
data: Joi.string()
.regex(/^0x[a-f0-9]{80}$/)
.required(),
}).required(),
};
// CCIP compatible endpoint. Read more information about it from
// https://eips.ethereum.org/EIPS/eip-3668
app.get(
"/api/get_vaa_ccip",
validate(getVaaCcipInputSchema),
asyncWrapper(async (req: Request, res: Response) => {
const dataHex = req.query.data as string;
const data = Buffer.from(removeLeading0x(dataHex), "hex");
const priceFeedId = data.slice(0, 32).toString("hex");
const publishTime = Number(data.readBigInt64BE(32));
if (
this.priceFeedVaaInfo.getLatestPriceInfo(priceFeedId) === undefined
) {
throw RestException.PriceFeedIdNotFound([priceFeedId]);
}
const vaa = await this.getVaaWithDbLookup(priceFeedId, publishTime);
if (vaa === undefined) {
// Returning Bad Gateway error because CCIP expects a 5xx error if it needs to
// retry or try other endpoints. Bad Gateway seems the best choice here as this
// is not an internal error and could happen on two scenarios:
// 1. DB Api is not responding well (Bad Gateway is appropriate here)
// 2. Publish time is a few seconds before current time and a VAA
// Will be available in a few seconds. So we want the client to retry.
res
.status(StatusCodes.BAD_GATEWAY)
.json({ "message:": "VAA not found." });
} else {
const resData = "0x" + Buffer.from(vaa.vaa, "base64").toString("hex");
res.json({
data: resData,
});
}
})
);
endpoints.push(
"api/get_vaa_ccip?data=<0x<price_feed_id_32_bytes>+<publish_time_unix_timestamp_be_8_bytes>>"
);
const latestPriceFeedsInputSchema: schema = {
query: Joi.object({
ids: Joi.array()
.items(Joi.string().regex(/^(0x)?[a-f0-9]{64}$/))
.required(),
verbose: Joi.boolean(),
binary: Joi.boolean(),
target_chain: Joi.string()
.valid(...validTargetChains)
.optional(),
}).required(),
};
app.get(
"/api/latest_price_feeds",
validate(latestPriceFeedsInputSchema),
(req: Request, res: Response) => {
const priceIds = (req.query.ids as string[]).map(removeLeading0x);
// verbose is optional, default to false
const verbose = req.query.verbose === "true";
// The binary and target_chain are somewhat redundant. Binary still exists for backward compatibility reasons.
// No VAA will be returned if both arguments are omitted. binary=true is the same as target_chain=default
let targetChain = req.query.target_chain as TargetChain | undefined;
if (targetChain === undefined && req.query.binary === "true") {
targetChain = defaultTargetChain;
}
const responseJson = [];
const notFoundIds: string[] = [];
for (const id of priceIds) {
const latestPriceInfo = this.priceFeedVaaInfo.getLatestPriceInfo(id);
if (latestPriceInfo === undefined) {
notFoundIds.push(id);
continue;
}
responseJson.push(
this.priceInfoToJson(latestPriceInfo, verbose, targetChain)
);
}
if (notFoundIds.length > 0) {
throw RestException.PriceFeedIdNotFound(notFoundIds);
}
res.json(responseJson);
}
);
endpoints.push(
"api/latest_price_feeds?ids[]=<price_feed_id>&ids[]=<price_feed_id_2>&.."
);
endpoints.push(
"api/latest_price_feeds?ids[]=<price_feed_id>&ids[]=<price_feed_id_2>&..&verbose=true"
);
endpoints.push(
"api/latest_price_feeds?ids[]=<price_feed_id>&ids[]=<price_feed_id_2>&..&verbose=true&binary=true"
);
endpoints.push(
`api/latest_price_feeds?ids[]=<price_feed_id>&ids[]=<price_feed_id_2>&..&verbose=true&${targetChainArgString}`
);
const getPriceFeedInputSchema: schema = {
query: Joi.object({
id: Joi.string()
.regex(/^(0x)?[a-f0-9]{64}$/)
.required(),
publish_time: Joi.number().required(),
verbose: Joi.boolean(),
binary: Joi.boolean(),
target_chain: Joi.string()
.valid(...validTargetChains)
.optional(),
}).required(),
};
app.get(
"/api/get_price_feed",
validate(getPriceFeedInputSchema),
asyncWrapper(async (req: Request, res: Response) => {
const priceFeedId = removeLeading0x(req.query.id as string);
const publishTime = Number(req.query.publish_time as string);
// verbose is optional, default to false
const verbose = req.query.verbose === "true";
// The binary and target_chain are somewhat redundant. Binary still exists for backward compatibility reasons.
// No VAA will be returned if both arguments are omitted. binary=true is the same as target_chain=default
let targetChain = req.query.target_chain as TargetChain | undefined;
if (targetChain === undefined && req.query.binary === "true") {
targetChain = defaultTargetChain;
}
if (
this.priceFeedVaaInfo.getLatestPriceInfo(priceFeedId) === undefined
) {
throw RestException.PriceFeedIdNotFound([priceFeedId]);
}
const vaa = await this.getVaaWithDbLookup(priceFeedId, publishTime);
if (vaa === undefined) {
throw RestException.VaaNotFound();
}
const priceInfo = RestAPI.vaaToPriceInfo(
priceFeedId,
Buffer.from(vaa.vaa, "base64")
);
if (priceInfo === undefined) {
throw RestException.VaaNotFound();
} else {
res.json(this.priceInfoToJson(priceInfo, verbose, targetChain));
}
})
);
endpoints.push(
"api/get_price_feed?id=<price_feed_id>&publish_time=<publish_time_in_unix_timestamp>"
);
endpoints.push(
"api/get_price_feed?id=<price_feed_id>&publish_time=<publish_time_in_unix_timestamp>&verbose=true"
);
endpoints.push(
"api/get_price_feed?id=<price_feed_id>&publish_time=<publish_time_in_unix_timestamp>&binary=true"
);
app.get("/api/price_feed_ids", (req: Request, res: Response) => {
const availableIds = this.priceFeedVaaInfo.getPriceIds();
res.json([...availableIds]);
});
endpoints.push("api/price_feed_ids");
const staleFeedsInputSchema: schema = {
query: Joi.object({
threshold: Joi.number().required(),
}).required(),
};
app.get(
"/api/stale_feeds",
validate(staleFeedsInputSchema),
(req: Request, res: Response) => {
const stalenessThresholdSeconds = Number(req.query.threshold as string);
const currentTime: TimestampInSec = Math.floor(Date.now() / 1000);
const priceIds = [...this.priceFeedVaaInfo.getPriceIds()];
const stalePrices: Record<HexString, number> = {};
for (const priceId of priceIds) {
const latency =
currentTime -
this.priceFeedVaaInfo.getLatestPriceInfo(priceId)!.attestationTime;
if (latency > stalenessThresholdSeconds) {
stalePrices[priceId] = latency;
}
}
res.json(stalePrices);
}
);
endpoints.push("/api/stale_feeds?threshold=<staleness_threshold_seconds>");
app.get("/ready", (_, res: Response) => {
if (this.isReady === undefined || this.isReady!()) {
res.sendStatus(StatusCodes.OK);
} else {
res.sendStatus(StatusCodes.SERVICE_UNAVAILABLE);
}
});
endpoints.push("ready");
app.get("/live", (_, res: Response) => {
res.sendStatus(StatusCodes.OK);
});
endpoints.push("live");
// Websocket endpoint
endpoints.push("ws");
app.get("/", (_, res: Response) => res.json(endpoints));
app.use((err: any, _: Request, res: Response, next: NextFunction) => {
if (err instanceof ValidationError) {
return res.status(err.statusCode).json(err);
}
if (err instanceof RestException) {
return res.status(err.statusCode).json(err);
}
return next(err);
});
return app;
}
async run(): Promise<Server> {
const app = await this.createApp();
return app.listen(this.port, () =>
logger.debug("listening on REST port " + this.port)
);
}
}

View File

@ -1,118 +0,0 @@
import { logger } from "./logging";
import { ParsedVaa } from "@certusone/wormhole-sdk";
import { GuardianSet } from "@certusone/wormhole-spydk/lib/cjs/proto/publicrpc/v1/publicrpc";
import * as secp256k1 from "secp256k1";
import * as keccak from "keccak";
const WormholeClusters = ["localnet", "testnet", "mainnet"] as const;
export type WormholeCluster = typeof WormholeClusters[number];
export function wormholeClusterFromString(s: string): WormholeCluster {
if (WormholeClusters.includes(s as WormholeCluster)) {
return s as WormholeCluster;
}
throw new Error(`Invalid wormhole cluster: ${s}`);
}
const guardianSets: Record<WormholeCluster, GuardianSet> = {
localnet: {
index: 0,
addresses: ["0xbeFA429d57cD18b7F8A4d91A2da9AB4AF05d0FBe"],
},
testnet: {
index: 0,
addresses: ["0x13947Bd48b18E53fdAeEe77F3473391aC727C638"],
},
mainnet: {
index: 3,
addresses: [
"0x58CC3AE5C097b213cE3c81979e1B9f9570746AA5",
"0xfF6CB952589BDE862c25Ef4392132fb9D4A42157",
"0x114De8460193bdf3A2fCf81f86a09765F4762fD1",
"0x107A0086b32d7A0977926A205131d8731D39cbEB",
"0x8C82B2fd82FaeD2711d59AF0F2499D16e726f6b2",
"0x11b39756C042441BE6D8650b69b54EbE715E2343",
"0x54Ce5B4D348fb74B958e8966e2ec3dBd4958a7cd",
"0x15e7cAF07C4e3DC8e7C469f92C8Cd88FB8005a20",
"0x74a3bf913953D695260D88BC1aA25A4eeE363ef0",
"0x000aC0076727b35FBea2dAc28fEE5cCB0fEA768e",
"0xAF45Ced136b9D9e24903464AE889F5C8a723FC14",
"0xf93124b7c738843CBB89E864c862c38cddCccF95",
"0xD2CC37A4dc036a8D232b48f62cDD4731412f4890",
"0xDA798F6896A3331F64b48c12D1D57Fd9cbe70811",
"0x71AA1BE1D36CaFE3867910F99C09e347899C19C3",
"0x8192b6E7387CCd768277c17DAb1b7a5027c0b3Cf",
"0x178e21ad2E77AE06711549CFBB1f9c7a9d8096e8",
"0x5E1487F35515d02A92753504a8D75471b9f49EdB",
"0x6FbEBc898F403E4773E95feB15E80C9A99c8348d",
],
},
};
export function isValidVaa(vaa: ParsedVaa, cluster: WormholeCluster): boolean {
const currentGuardianSet = guardianSets[cluster];
if (vaa.guardianSetIndex !== currentGuardianSet.index) {
return false;
}
const threshold = Math.ceil((currentGuardianSet.addresses.length * 2) / 3);
if (vaa.guardianSignatures.length < threshold) {
return false;
}
// It's not possible to call a signature verification function directly
// because we only have the addresses of the guardians and not their public
// keys. Instead, we compare the address extracted from the public key that
// signed the VAA with the corresponding address stored in the guardian set.
const messageHash = keccak.default("keccak256").update(vaa.hash).digest();
let counter = 0;
try {
vaa.guardianSignatures.forEach((sig) => {
// Each signature is a 65-byte secp256k1 signature with the recovery ID at
// the last byte. It is not the compact representation from EIP-2098.
const recoveryID = sig.signature[64] % 2;
const signature = sig.signature.slice(0, 64);
const publicKey = Buffer.from(
secp256k1.ecdsaRecover(signature, recoveryID, messageHash, false)
);
// The first byte of the public key is the prefix (0x03 or 0x04)
// indicating if the public key is compressed. Remove it before hashing.
const publicKeyHash = keccak
.default("keccak256")
.update(publicKey.slice(1))
.digest();
// The last 20 bytes of the hash are the address.
const address = publicKeyHash.slice(-20).toString("hex");
if (
checksumAddress(address) === currentGuardianSet.addresses[sig.index]
) {
counter++;
}
});
return counter === vaa.guardianSignatures.length;
} catch (error) {
logger.warn("Error validating VAA signatures:", error);
return false;
}
}
function checksumAddress(address: string) {
address = address.toLowerCase().replace("0x", "");
const hash = keccak.default("keccak256").update(address).digest("hex");
let ret = "0x";
for (let i = 0; i < address.length; i++) {
if (parseInt(hash[i], 16) >= 8) {
ret += address[i].toUpperCase();
} else {
ret += address[i];
}
}
return ret;
}

View File

@ -1,281 +0,0 @@
import { HexString } from "@pythnetwork/price-service-sdk";
import * as http from "http";
import Joi from "joi";
import WebSocket, { RawData, WebSocketServer } from "ws";
import { PriceInfo, PriceStore } from "./listen";
import { logger } from "./logging";
import { PromClient } from "./promClient";
const ClientMessageSchema: Joi.Schema = Joi.object({
type: Joi.string().valid("subscribe", "unsubscribe").required(),
ids: Joi.array()
.items(Joi.string().regex(/^(0x)?[a-f0-9]{64}$/))
.required(),
verbose: Joi.boolean(),
binary: Joi.boolean(),
}).required();
export type ClientMessage = {
type: "subscribe" | "unsubscribe";
ids: HexString[];
verbose?: boolean;
binary?: boolean;
};
export type ServerResponse = {
type: "response";
status: "success" | "error";
error?: string;
};
export type ServerPriceUpdate = {
type: "price_update";
price_feed: any;
};
export type PriceFeedConfig = {
verbose: boolean;
binary: boolean;
};
export type ServerMessage = ServerResponse | ServerPriceUpdate;
export class WebSocketAPI {
private wsCounter: number;
private priceFeedClients: Map<HexString, Set<WebSocket>>;
private priceFeedClientsConfig: Map<
HexString,
Map<WebSocket, PriceFeedConfig>
>;
private aliveClients: Set<WebSocket>;
private wsId: Map<WebSocket, number>;
private priceFeedVaaInfo: PriceStore;
private promClient: PromClient | undefined;
constructor(priceFeedVaaInfo: PriceStore, promClient?: PromClient) {
this.priceFeedVaaInfo = priceFeedVaaInfo;
this.priceFeedClients = new Map();
this.priceFeedClientsConfig = new Map();
this.aliveClients = new Set();
this.wsCounter = 0;
this.wsId = new Map();
this.promClient = promClient;
}
private addPriceFeedClient(
ws: WebSocket,
id: HexString,
verbose: boolean = false,
binary: boolean = false
) {
if (!this.priceFeedClients.has(id)) {
this.priceFeedClients.set(id, new Set());
this.priceFeedClientsConfig.set(id, new Map([[ws, { verbose, binary }]]));
} else {
this.priceFeedClientsConfig.get(id)!.set(ws, { verbose, binary });
}
this.priceFeedClients.get(id)!.add(ws);
}
private delPriceFeedClient(ws: WebSocket, id: HexString) {
if (!this.priceFeedClients.has(id)) {
return;
}
this.priceFeedClients.get(id)!.delete(ws);
this.priceFeedClientsConfig.get(id)!.delete(ws);
}
dispatchPriceFeedUpdate(priceInfo: PriceInfo) {
if (this.priceFeedClients.get(priceInfo.priceFeed.id) === undefined) {
logger.info(
`Sending ${priceInfo.priceFeed.id} price update to no clients.`
);
return;
}
const clients: Set<WebSocket> = this.priceFeedClients.get(
priceInfo.priceFeed.id
)!;
logger.info(
`Sending ${priceInfo.priceFeed.id} price update to ${
clients.size
} clients: ${Array.from(clients.values()).map((ws, _idx, _arr) =>
this.wsId.get(ws)
)}`
);
for (const client of clients.values()) {
this.promClient?.addWebSocketInteraction("server_update", "ok");
const config = this.priceFeedClientsConfig
.get(priceInfo.priceFeed.id)!
.get(client);
const verbose = config?.verbose;
const binary = config?.binary;
const priceUpdate: ServerPriceUpdate = {
type: "price_update",
price_feed: {
...priceInfo.priceFeed.toJson(),
...(verbose && {
metadata: {
emitter_chain: priceInfo.emitterChainId,
attestation_time: priceInfo.attestationTime,
sequence_number: priceInfo.seqNum,
price_service_receive_time: priceInfo.priceServiceReceiveTime,
},
}),
...(binary && {
vaa: priceInfo.vaa.toString("base64"),
}),
},
};
client.send(JSON.stringify(priceUpdate));
}
}
clientClose(ws: WebSocket) {
for (const clients of this.priceFeedClients.values()) {
if (clients.has(ws)) {
clients.delete(ws);
}
}
this.aliveClients.delete(ws);
this.wsId.delete(ws);
}
handleMessage(ws: WebSocket, data: RawData) {
try {
const jsonData = JSON.parse(data.toString());
const validationResult = ClientMessageSchema.validate(jsonData);
if (validationResult.error !== undefined) {
throw validationResult.error;
}
const message = jsonData as ClientMessage;
message.ids = message.ids.map((id) => {
if (id.startsWith("0x")) {
return id.substring(2);
}
return id;
});
const availableIds = this.priceFeedVaaInfo.getPriceIds();
const notFoundIds = message.ids.filter((id) => !availableIds.has(id));
if (notFoundIds.length > 0) {
throw new Error(
`Price Feeds with ids ${notFoundIds.join(", ")} not found`
);
}
if (message.type === "subscribe") {
message.ids.forEach((id) =>
this.addPriceFeedClient(
ws,
id,
message.verbose === true,
message.binary === true
)
);
} else {
message.ids.forEach((id) => this.delPriceFeedClient(ws, id));
}
} catch (e: any) {
const errorResponse: ServerResponse = {
type: "response",
status: "error",
error: e.message,
};
logger.info(
`Invalid request ${data.toString()} from client ${this.wsId.get(ws)}`
);
this.promClient?.addWebSocketInteraction("client_message", "err");
ws.send(JSON.stringify(errorResponse));
return;
}
logger.info(
`Successful request ${data.toString()} from client ${this.wsId.get(ws)}`
);
this.promClient?.addWebSocketInteraction("client_message", "ok");
const response: ServerResponse = {
type: "response",
status: "success",
};
ws.send(JSON.stringify(response));
}
run(server: http.Server): WebSocketServer {
const wss = new WebSocketServer({
server,
path: "/ws",
maxPayload: 100 * 1024, // 100 KiB
});
wss.on("connection", (ws: WebSocket, request: http.IncomingMessage) => {
logger.info(
`Incoming ws connection from ${request.socket.remoteAddress}, assigned id: ${this.wsCounter}`
);
this.wsId.set(ws, this.wsCounter);
this.wsCounter += 1;
ws.on("message", (data: RawData) => this.handleMessage(ws, data));
this.aliveClients.add(ws);
ws.on("pong", (_data) => {
this.aliveClients.add(ws);
});
ws.on("error", (err: Error) => {
logger.warn(`Err with client ${this.wsId.get(ws)}: ${err}`);
});
ws.on("close", (_code: number, _reason: Buffer) => {
logger.info(`client ${this.wsId.get(ws)} closed the connection.`);
this.promClient?.addWebSocketInteraction("close", "ok");
this.clientClose(ws);
});
this.promClient?.addWebSocketInteraction("connection", "ok");
});
const pingInterval = setInterval(() => {
wss.clients.forEach((ws) => {
if (this.aliveClients.has(ws) === false) {
logger.info(
`client ${this.wsId.get(ws)} timed out. terminating connection`
);
this.promClient?.addWebSocketInteraction("timeout", "ok");
this.clientClose(ws);
ws.terminate();
return;
}
this.aliveClients.delete(ws);
ws.ping();
});
}, 30000);
wss.on("close", () => {
clearInterval(pingInterval);
});
this.priceFeedVaaInfo.addUpdateListener(
this.dispatchPriceFeedUpdate.bind(this)
);
return wss;
}
}

View File

@ -1,9 +0,0 @@
{
"extends": "../../tsconfig.base.json",
"include": ["src"],
"exclude": ["node_modules", "**/__tests__/*"],
"compilerOptions": {
"rootDir": "src/",
"outDir": "./lib"
}
}

View File

@ -1,8 +0,0 @@
{
"extends": ["tslint:recommended", "tslint-config-prettier"],
"rules": {
"max-classes-per-file": {
"severity": "off"
}
}
}

View File

@ -1,48 +0,0 @@
[package]
name = "pyth-wormhole-attester-client"
version = "5.0.0"
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
name = "pyth_wormhole_attester_client"
[[bin]]
name = "pwhac"
path = "src/main.rs"
[features]
default = ["pyth-wormhole-attester/client", "wormhole-bridge-solana/client", "pyth-wormhole-attester/trace"]
[dependencies]
borsh = "=0.9.3"
clap = {version = "3.1.18", features = ["derive"]}
env_logger = "0.8.4"
log = "0.4.14"
wormhole-bridge-solana = {git = "https://github.com/wormhole-foundation/wormhole", tag = "v2.14.8"}
pyth-wormhole-attester = {path = "../program"}
pyth-wormhole-attester-sdk = { path = "../sdk/rust", features=["solana"] }
pyth-sdk-solana = "0.6.1"
serde = "1"
serde_yaml = "0.8"
shellexpand = "2.1.0"
solana-client = "=1.10.31"
solana-program = "=1.10.31"
solana-sdk = "=1.10.31"
solana-transaction-status = "=1.10.31"
solitaire = {git = "https://github.com/wormhole-foundation/wormhole", tag = "v2.14.8"}
tokio = {version = "1", features = ["sync", "rt-multi-thread", "time"]}
futures = "0.3.21"
sha3 = "0.10.6"
generic-array = "0.14.6"
lazy_static = "1.4.0"
prometheus = "0.13.3"
warp = "0.3.3"
http = "0.2.8"
[dev-dependencies]
solana-program-test = "=1.10.31"
solana-sdk = "=1.10.31"
serial_test = "1.0.0"

View File

@ -1,31 +0,0 @@
#syntax=docker/dockerfile:1.2@sha256:e2a8561e419ab1ba6b2fe6cbdf49fd92b95912df1cf7d313c3e2230a333fdbcc
FROM ghcr.io/certusone/solana:1.10.31@sha256:d31e8db926a1d3fbaa9d9211d9979023692614b7b64912651aba0383e8c01bad AS solana
RUN apt-get update && apt-get install -yq python3 libudev-dev ncat
RUN curl -fsSL https://deb.nodesource.com/setup_16.x | bash - && apt-get install -y nodejs
ADD governance/remote_executor /usr/src/governance/remote_executor
ADD wormhole_attester /usr/src/wormhole_attester
WORKDIR /usr/src/wormhole_attester
ENV EMITTER_ADDRESS="11111111111111111111111111111115"
ENV BRIDGE_ADDRESS="Bridge1p5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o"
RUN --mount=type=cache,target=/root/.cache \
--mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=target \
cargo test --package pyth-wormhole-attester-client && \
cargo build --package pyth-wormhole-attester-client && \
mv target/debug/pwhac /usr/local/bin/pwhac
ADD third_party/pyth/pyth_utils.py /usr/src/pyth/pyth_utils.py
ADD third_party/pyth/p2w_autoattest.py /usr/src/pyth/p2w_autoattest.py
ADD tilt_devnet/secrets/solana /solana-secrets
RUN chmod a+rx /usr/src/pyth/*.py
ENV P2W_OWNER_KEYPAIR="/solana-secrets/p2w_owner.json"
ENV P2W_ATTESTATIONS_PORT="4343"
ENV PYTH_PUBLISHER_KEYPAIR="/solana-secrets/pyth_publisher.json"
ENV PYTH_PROGRAM_KEYPAIR="/solana-secrets/pyth_program.json"

View File

@ -1,656 +0,0 @@
use {
crate::{
attestation_cfg::SymbolConfig::{
Key,
Name,
},
P2WProductAccount,
},
log::{
info,
warn,
},
serde::{
de::Error,
Deserialize,
Deserializer,
Serialize,
Serializer,
},
solana_program::pubkey::Pubkey,
std::{
collections::{
HashMap,
HashSet,
},
str::FromStr,
},
};
/// Pyth2wormhole config specific to attestation requests
#[derive(Clone, Debug, Hash, Deserialize, Serialize, PartialEq, Eq)]
pub struct AttestationConfig {
#[serde(default = "default_min_msg_reuse_interval_ms")]
pub min_msg_reuse_interval_ms: u64,
#[serde(default = "default_max_msg_accounts")]
pub max_msg_accounts: u64,
/// How many consecutive attestation failures cause the service to
/// report as unhealthy.
#[serde(default = "default_healthcheck_window_size")]
pub healthcheck_window_size: u64,
#[serde(default = "default_enable_healthcheck")]
pub enable_healthcheck: bool,
/// Optionally, we take a mapping account to add remaining symbols from a Pyth deployments.
/// These symbols are processed under `default_attestation_conditions`.
#[serde(
deserialize_with = "opt_pubkey_string_de",
serialize_with = "opt_pubkey_string_ser",
default // Uses Option::default() which is None
)]
pub mapping_addr: Option<Pubkey>,
/// The known symbol list will be reloaded based off this
/// interval, to account for mapping changes. Note: This interval
/// will only work if the mapping address is defined. Whenever
/// it's time to look up the mapping, new attestation jobs are
/// started lazily, only if mapping contents affected the known
/// symbol list, and before stopping the pre-existing obsolete
/// jobs to maintain uninterrupted cranking.
#[serde(default = "default_mapping_reload_interval_mins")]
pub mapping_reload_interval_mins: u64,
#[serde(default = "default_min_rpc_interval_ms")]
/// Rate-limiting minimum delay between RPC requests in milliseconds
pub min_rpc_interval_ms: u64,
/// Attestation conditions that will be used for any symbols included in the mapping
/// that aren't explicitly in one of the groups below, and any groups without explicitly
/// configured attestation conditions.
#[serde(default)]
pub default_attestation_conditions: AttestationConditions,
/// Groups of symbols to publish.
pub symbol_groups: Vec<SymbolGroupConfig>,
}
impl AttestationConfig {
/// Instantiate the batches of symbols to attest by matching the config against the collection
/// of on-chain product accounts.
pub fn instantiate_batches(
&self,
product_accounts: &[P2WProductAccount],
max_batch_size: usize,
) -> Vec<SymbolBatch> {
// Construct mapping from the name of each product account to its corresponding symbols
let mut name_to_symbols: HashMap<String, Vec<P2WSymbol>> = HashMap::new();
for product_account in product_accounts {
for price_account_key in &product_account.price_account_keys {
if let Some(name) = &product_account.name {
let symbol = P2WSymbol {
name: Some(name.clone()),
product_addr: product_account.key,
price_addr: *price_account_key,
};
name_to_symbols
.entry(name.clone())
.or_default()
.push(symbol);
}
}
}
// Instantiate batches from the configured symbol groups.
let mut configured_batches: Vec<SymbolBatch> = vec![];
for group in &self.symbol_groups {
let group_symbols: Vec<P2WSymbol> = group
.symbols
.iter()
.flat_map(|symbol| match &symbol {
Key {
name,
product,
price,
} => {
vec![P2WSymbol {
name: name.clone(),
product_addr: *product,
price_addr: *price,
}]
}
Name { name } => {
let maybe_matched_symbols: Option<&Vec<P2WSymbol>> =
name_to_symbols.get(name);
if let Some(matched_symbols) = maybe_matched_symbols {
matched_symbols.clone()
} else {
// It's slightly unfortunate that this is a warning, but it seems better than crashing.
// The data in the mapping account can change while the attester is running and trigger this case,
// which means that it is not necessarily a configuration problem.
// Note that any named symbols in the config which fail to match will still be included
// in the remaining_symbols group below.
warn!(
"Could not find product account for configured symbol {}",
name
);
vec![]
}
}
})
.collect();
let group_conditions = group
.conditions
.as_ref()
.unwrap_or(&self.default_attestation_conditions);
configured_batches.extend(AttestationConfig::partition_into_batches(
&group.group_name,
max_batch_size,
group_conditions,
group_symbols,
))
}
// Find any accounts not included in existing batches and group them into a remainder batch
let existing_price_accounts: HashSet<Pubkey> = configured_batches
.iter()
.flat_map(|batch| batch.symbols.iter().map(|symbol| symbol.price_addr))
.chain(
configured_batches
.iter()
.flat_map(|batch| batch.symbols.iter().map(|symbol| symbol.price_addr)),
)
.collect();
let mut remaining_symbols: Vec<P2WSymbol> = vec![];
for product_account in product_accounts {
for price_account_key in &product_account.price_account_keys {
if !existing_price_accounts.contains(price_account_key) {
let symbol = P2WSymbol {
name: product_account.name.clone(),
product_addr: product_account.key,
price_addr: *price_account_key,
};
remaining_symbols.push(symbol);
}
}
}
let remaining_batches = AttestationConfig::partition_into_batches(
&"mapping".to_owned(),
max_batch_size,
&self.default_attestation_conditions,
remaining_symbols,
);
let all_batches = configured_batches
.into_iter()
.chain(remaining_batches.into_iter())
.collect::<Vec<SymbolBatch>>();
for batch in &all_batches {
info!(
"Batch {:?}, {} symbols",
batch.group_name,
batch.symbols.len(),
);
}
all_batches
}
/// Partition symbols into a collection of batches, each of which contains no more than
/// `max_batch_size` symbols.
fn partition_into_batches(
batch_name: &String,
max_batch_size: usize,
conditions: &AttestationConditions,
symbols: Vec<P2WSymbol>,
) -> Vec<SymbolBatch> {
symbols
.as_slice()
.chunks(max_batch_size)
.map(move |batch_symbols| SymbolBatch {
group_name: batch_name.to_owned(),
symbols: batch_symbols.to_vec(),
conditions: conditions.clone(),
})
.collect()
}
}
#[derive(Clone, Debug, Hash, Deserialize, Serialize, PartialEq, Eq)]
pub struct SymbolGroupConfig {
pub group_name: String,
/// Attestation conditions applied to all symbols in this group
/// If not provided, use the default attestation conditions from `AttestationConfig`.
pub conditions: Option<AttestationConditions>,
/// The symbols to publish in this group.
pub symbols: Vec<SymbolConfig>,
}
/// Config entry for a symbol to attest.
#[derive(Clone, Debug, Hash, Deserialize, Serialize, PartialEq, Eq)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum SymbolConfig {
/// A symbol specified by its product name.
Name {
/// The name of the symbol. This name is matched against the "symbol" field in the product
/// account metadata. If multiple price accounts have this name (either because 2 product
/// accounts have the same symbol or a single product account has multiple price accounts),
/// it matches *all* of them and puts them into this group.
name: String,
},
/// A symbol specified by its product and price account keys.
Key {
/// Optional human-readable name for the symbol (for logging purposes).
/// This field does not need to match the on-chain data for the product.
name: Option<String>,
#[serde(
deserialize_with = "pubkey_string_de",
serialize_with = "pubkey_string_ser"
)]
product: Pubkey,
#[serde(
deserialize_with = "pubkey_string_de",
serialize_with = "pubkey_string_ser"
)]
price: Pubkey,
},
}
impl ToString for SymbolConfig {
fn to_string(&self) -> String {
match &self {
Name { name } => name.clone(),
Key {
name: Some(name),
product: _,
price: _,
} => name.clone(),
Key {
name: None,
product,
price: _,
} => {
format!("Unnamed product {product}")
}
}
}
}
/// A batch of symbols that's ready to be attested. Includes all necessary information
/// (such as price/product account keys).
#[derive(Clone, Debug, Hash, Deserialize, Serialize, PartialEq, Eq)]
pub struct SymbolBatch {
pub group_name: String,
/// Attestation conditions applied to all symbols in this group
pub conditions: AttestationConditions,
pub symbols: Vec<P2WSymbol>,
}
pub const fn default_max_msg_accounts() -> u64 {
1_000_000
}
pub const fn default_min_msg_reuse_interval_ms() -> u64 {
10_000 // 10s
}
pub const fn default_healthcheck_window_size() -> u64 {
100
}
pub const fn default_enable_healthcheck() -> bool {
true
}
pub const fn default_mapping_reload_interval_mins() -> u64 {
15
}
pub const fn default_min_rpc_interval_ms() -> u64 {
150
}
pub const fn default_min_interval_ms() -> u64 {
60_000
}
pub const fn default_rate_limit_interval_secs() -> u32 {
1
}
pub const fn default_max_batch_jobs() -> usize {
20
}
/// Per-group attestation resend rules. Attestation is triggered if
/// any of the active conditions is met. Option<> fields can be
/// de-activated with None. All conditions are inactive by default,
/// except for the non-Option ones.
#[derive(Clone, Debug, Hash, Deserialize, Serialize, PartialEq, Eq)]
pub struct AttestationConditions {
/// Lower bound on attestation rate. Attestation is triggered
/// unconditionally whenever the specified interval elapses since
/// last attestation.
#[serde(default = "default_min_interval_ms")]
pub min_interval_ms: u64,
/// Upper bound on attestation rate. Attesting the same batch
/// before this many seconds pass fails the tx. This limit is
/// enforced on-chain, letting concurret attesters prevent
/// redundant batch resends and tx expenses. NOTE: The client
/// logic does not include rate limit failures in monitoring error
/// counts. 0 effectively disables this feature.
#[serde(default = "default_rate_limit_interval_secs")]
pub rate_limit_interval_secs: u32,
/// Limit concurrent attestation attempts per batch. This setting
/// should act only as a failsafe cap on resource consumption and is
/// best set well above the expected average number of jobs.
#[serde(default = "default_max_batch_jobs")]
pub max_batch_jobs: usize,
/// Trigger attestation if price changes by the specified
/// percentage, expressed in integer basis points (1bps = 0.01%)
#[serde(default)]
pub price_changed_bps: Option<u64>,
/// Trigger attestation if publish_time advances at least the
/// specified amount.
#[serde(default)]
pub publish_time_min_delta_secs: Option<u64>,
}
impl AttestationConditions {
/// Used by should_resend() to check if it needs to make the expensive RPC request
pub fn need_onchain_lookup(&self) -> bool {
// Bug trap for new fields that also need to be included in
// the returned expression
let AttestationConditions {
min_interval_ms: _min_interval_ms,
max_batch_jobs: _max_batch_jobs,
price_changed_bps,
publish_time_min_delta_secs,
rate_limit_interval_secs: _,
} = self;
price_changed_bps.is_some() || publish_time_min_delta_secs.is_some()
}
}
impl Default for AttestationConditions {
fn default() -> Self {
Self {
min_interval_ms: default_min_interval_ms(),
max_batch_jobs: default_max_batch_jobs(),
price_changed_bps: None,
publish_time_min_delta_secs: None,
rate_limit_interval_secs: default_rate_limit_interval_secs(),
}
}
}
#[derive(Clone, Default, Debug, Hash, Deserialize, Serialize, PartialEq, Eq)]
pub struct P2WSymbol {
/// User-defined human-readable name
pub name: Option<String>,
#[serde(
deserialize_with = "pubkey_string_de",
serialize_with = "pubkey_string_ser"
)]
pub product_addr: Pubkey,
#[serde(
deserialize_with = "pubkey_string_de",
serialize_with = "pubkey_string_ser"
)]
pub price_addr: Pubkey,
}
impl ToString for P2WSymbol {
fn to_string(&self) -> String {
self.name
.clone()
.unwrap_or(format!("Unnamed product {}", self.product_addr))
}
}
// Helper methods for strinigified SOL addresses
fn pubkey_string_ser<S>(k: &Pubkey, ser: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
ser.serialize_str(&k.to_string())
}
fn pubkey_string_de<'de, D>(de: D) -> Result<Pubkey, D::Error>
where
D: Deserializer<'de>,
{
let pubkey_string = String::deserialize(de)?;
let pubkey = Pubkey::from_str(&pubkey_string).map_err(D::Error::custom)?;
Ok(pubkey)
}
fn opt_pubkey_string_ser<S>(k_opt: &Option<Pubkey>, ser: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let k_str_opt = (*k_opt).map(|k| k.to_string());
Option::<String>::serialize(&k_str_opt, ser)
}
fn opt_pubkey_string_de<'de, D>(de: D) -> Result<Option<Pubkey>, D::Error>
where
D: Deserializer<'de>,
{
match Option::<String>::deserialize(de)? {
Some(k) => Ok(Some(Pubkey::from_str(&k).map_err(D::Error::custom)?)),
None => Ok(None),
}
}
#[cfg(test)]
mod tests {
use {
super::*,
crate::attestation_cfg::SymbolConfig::{
Key,
Name,
},
solitaire::ErrBox,
};
#[test]
fn test_sanity() -> Result<(), ErrBox> {
let fastbois = SymbolGroupConfig {
group_name: "fast bois".to_owned(),
conditions: Some(AttestationConditions {
min_interval_ms: 5,
..Default::default()
}),
symbols: vec![
Name {
name: "ETHUSD".to_owned(),
},
Key {
name: Some("BTCUSD".to_owned()),
product: Pubkey::new_unique(),
price: Pubkey::new_unique(),
},
],
};
let slowbois = SymbolGroupConfig {
group_name: "slow bois".to_owned(),
conditions: Some(AttestationConditions {
min_interval_ms: 200,
..Default::default()
}),
symbols: vec![
Name {
name: "CNYAUD".to_owned(),
},
Key {
name: None,
product: Pubkey::new_unique(),
price: Pubkey::new_unique(),
},
],
};
let cfg = AttestationConfig {
min_msg_reuse_interval_ms: 1000,
max_msg_accounts: 100_000,
enable_healthcheck: true,
healthcheck_window_size: 100,
min_rpc_interval_ms: 2123,
mapping_addr: None,
mapping_reload_interval_mins: 42,
default_attestation_conditions: AttestationConditions::default(),
symbol_groups: vec![fastbois, slowbois],
};
let serialized = serde_yaml::to_string(&cfg)?;
let deserialized: AttestationConfig = serde_yaml::from_str(&serialized)?;
assert_eq!(cfg, deserialized);
Ok(())
}
#[test]
fn test_instantiate_batches() -> Result<(), ErrBox> {
let btc_product_key = Pubkey::new_unique();
let btc_price_key = Pubkey::new_unique();
let eth_product_key = Pubkey::new_unique();
let eth_price_key_1 = Pubkey::new_unique();
let eth_price_key_2 = Pubkey::new_unique();
let unk_product_key = Pubkey::new_unique();
let unk_price_key = Pubkey::new_unique();
let eth_dup_product_key = Pubkey::new_unique();
let eth_dup_price_key = Pubkey::new_unique();
let attestation_conditions_1 = AttestationConditions {
min_interval_ms: 5,
..Default::default()
};
let products = vec![
P2WProductAccount {
name: Some("ETHUSD".to_owned()),
key: eth_product_key,
price_account_keys: vec![eth_price_key_1, eth_price_key_2],
},
P2WProductAccount {
name: None,
key: unk_product_key,
price_account_keys: vec![unk_price_key],
},
];
let group1 = SymbolGroupConfig {
group_name: "group 1".to_owned(),
conditions: Some(attestation_conditions_1.clone()),
symbols: vec![
Key {
name: Some("BTCUSD".to_owned()),
price: btc_price_key,
product: btc_product_key,
},
Name {
name: "ETHUSD".to_owned(),
},
],
};
let group2 = SymbolGroupConfig {
group_name: "group 2".to_owned(),
conditions: None,
symbols: vec![Key {
name: Some("ETHUSD".to_owned()),
price: eth_dup_price_key,
product: eth_dup_product_key,
}],
};
let default_attestation_conditions = AttestationConditions {
min_interval_ms: 1,
..Default::default()
};
let cfg = AttestationConfig {
min_msg_reuse_interval_ms: 1000,
max_msg_accounts: 100_000,
healthcheck_window_size: 100,
enable_healthcheck: true,
min_rpc_interval_ms: 2123,
mapping_addr: None,
mapping_reload_interval_mins: 42,
default_attestation_conditions: default_attestation_conditions.clone(),
symbol_groups: vec![group1, group2],
};
let batches = cfg.instantiate_batches(&products, 2);
assert_eq!(
batches,
vec![
SymbolBatch {
group_name: "group 1".to_owned(),
conditions: attestation_conditions_1.clone(),
symbols: vec![
P2WSymbol {
name: Some("BTCUSD".to_owned()),
product_addr: btc_product_key,
price_addr: btc_price_key,
},
P2WSymbol {
name: Some("ETHUSD".to_owned()),
product_addr: eth_product_key,
price_addr: eth_price_key_1,
}
],
},
SymbolBatch {
group_name: "group 1".to_owned(),
conditions: attestation_conditions_1,
symbols: vec![P2WSymbol {
name: Some("ETHUSD".to_owned()),
product_addr: eth_product_key,
price_addr: eth_price_key_2,
}],
},
SymbolBatch {
group_name: "group 2".to_owned(),
conditions: default_attestation_conditions.clone(),
symbols: vec![P2WSymbol {
name: Some("ETHUSD".to_owned()),
product_addr: eth_dup_product_key,
price_addr: eth_dup_price_key,
}],
},
SymbolBatch {
group_name: "mapping".to_owned(),
conditions: default_attestation_conditions,
symbols: vec![P2WSymbol {
name: None,
product_addr: unk_product_key,
price_addr: unk_price_key,
}],
}
]
);
Ok(())
}
}

View File

@ -1,159 +0,0 @@
use {
crate::{
attestation_cfg::SymbolBatch,
AttestationConditions,
P2WSymbol,
},
log::{
debug,
warn,
},
pyth_sdk_solana::state::PriceAccount,
solana_client::nonblocking::rpc_client::RpcClient,
std::time::{
Duration,
Instant,
},
};
/// Runtime representation of a batch. It refers to the original group
/// from the config.
#[derive(Debug)]
pub struct BatchState {
pub group_name: String,
pub symbols: Vec<P2WSymbol>,
pub last_known_symbol_states: Vec<Option<PriceAccount>>,
pub conditions: AttestationConditions,
pub last_job_finished_at: Instant,
}
impl<'a> BatchState {
pub fn new(group: &SymbolBatch) -> Self {
Self {
group_name: group.group_name.clone(),
symbols: group.symbols.clone(),
conditions: group.conditions.clone(),
last_known_symbol_states: vec![None; group.symbols.len()],
last_job_finished_at: Instant::now(),
}
}
/// Evaluate the configured attestation conditions for this
/// batch. RPC is used to update last known state. Returns
/// Some("<reason>") if any trigger condition was met. Only the
/// first encountered condition is mentioned.
pub async fn should_resend(&mut self, c: &RpcClient) -> Option<String> {
let mut ret = None;
let sym_count = self.symbols.len();
let pubkeys: Vec<_> = self.symbols.iter().map(|s| s.price_addr).collect();
// min interval
if self.last_job_finished_at.elapsed()
> Duration::from_millis(self.conditions.min_interval_ms)
{
ret = Some(format!(
"minimum interval of {}s elapsed since last state change",
self.conditions.min_interval_ms
));
}
// Only lookup and compare symbols if the conditions require
if self.conditions.need_onchain_lookup() {
let new_symbol_states: Vec<Option<PriceAccount>> =
match c.get_multiple_accounts(&pubkeys).await {
Ok(acc_opts) => {
acc_opts
.into_iter()
.enumerate()
.map(|(idx, opt)| {
// Take each Some(acc), make it None and log on load_price_account() error
opt.and_then(|acc| {
pyth_sdk_solana::state::load_price_account(&acc.data)
.cloned() // load_price_account() transmutes the data reference into another reference, and owning acc_opts is not enough
.map_err(|e| {
warn!(
"Could not parse symbol {}/{}: {}",
idx, sym_count, e
);
e
})
.ok() // Err becomes None
})
})
.collect()
}
Err(e) => {
warn!("Could not look up any symbols on-chain: {}", e);
vec![None; sym_count]
}
};
for (idx, old_new_tup) in self
.last_known_symbol_states
.iter_mut() // Borrow mutably to make the update easier
.zip(new_symbol_states.iter())
.enumerate()
{
// Only evaluate this symbol if a triggering condition is not already met
if ret.is_some() {
break;
}
match old_new_tup {
(Some(old), Some(new)) => {
// publish_time_changed
if let Some(min_delta_secs) = self.conditions.publish_time_min_delta_secs {
if new.timestamp - old.timestamp > min_delta_secs as i64 {
ret = Some(format!(
"publish_time advanced by at least {}s for {:?}",
min_delta_secs,
self.symbols[idx].to_string(),
))
}
// price_changed_bps
} else if let Some(bps) = self.conditions.price_changed_bps {
let pct = bps as f64 / 100.0;
let price_pct_diff = ((old.agg.price as f64 - new.agg.price as f64)
/ old.agg.price as f64
* 100.0)
.abs();
if price_pct_diff > pct {
ret = Some(format!(
"price moved by at least {}% for {:?}",
pct,
self.symbols[idx].to_string()
))
}
}
}
_ => {
debug!(
"Symbol {:?} {}/{}, old or new state value is None, skipping...",
self.symbols[idx].to_string(),
idx + 1,
sym_count
);
}
}
}
// Update with newer state only if a condition was met. We
// don't want to shadow changes that may happen over a larger
// period between state lookups.
if ret.is_some() {
for (old, new) in self
.last_known_symbol_states
.iter_mut()
.zip(new_symbol_states.into_iter())
{
if new.is_some() {
*old = new;
}
}
}
}
ret
}
}

View File

@ -1,131 +0,0 @@
//! CLI options
use {
clap::{
Parser,
Subcommand,
},
solana_program::pubkey::Pubkey,
solana_sdk::commitment_config::CommitmentConfig,
std::{
net::SocketAddr,
path::PathBuf,
},
};
#[derive(Parser)]
#[clap(
about = "A client for the pyth2wormhole Solana program",
author = "Pyth Network Contributors"
)]
pub struct Cli {
#[clap(
long,
help = "Identity JSON file for the entity meant to cover transaction costs",
default_value = "~/.config/solana/id.json"
)]
pub payer: String,
#[clap(short, long, default_value = "http://localhost:8899")]
pub rpc_url: String,
#[clap(long, default_value = "confirmed")]
pub commitment: CommitmentConfig,
#[clap(long)]
pub p2w_addr: Pubkey,
#[clap(subcommand)]
pub action: Action,
}
#[derive(Subcommand)]
pub enum Action {
#[clap(about = "Initialize a pyth2wormhole program freshly deployed under <p2w_addr>")]
Init {
/// The bridge program account
#[clap(short = 'w', long = "wh-prog")]
wh_prog: Pubkey,
#[clap(short = 'o', long = "owner")]
owner_addr: Pubkey,
#[clap(short = 'p', long = "pyth-owner")]
pyth_owner_addr: Pubkey,
/// Option<> makes sure not specifying this flag does not imply "false"
#[clap(long = "is-active")]
is_active: Option<bool>,
#[clap(long = "ops-owner")]
ops_owner_addr: Option<Pubkey>,
},
#[clap(
about = "Use an existing pyth2wormhole program to attest product price information to another chain"
)]
// Note: defaults target SOL mainnet-beta conditions at implementation time
Attest {
#[clap(short = 'f', long = "--config", help = "Attestation YAML config")]
attestation_cfg: PathBuf,
#[clap(
short = 't',
long = "--timeout",
help = "How many seconds to wait before giving up on tx confirmation.",
default_value = "20"
)]
confirmation_timeout_secs: u64,
#[clap(
short = 'm',
long,
help = "Address to use for serving Prometheus metrics.",
default_value = "[::]:3000"
)]
metrics_bind_addr: SocketAddr,
},
#[clap(about = "Retrieve a pyth2wormhole program's current settings")]
GetConfig,
#[clap(about = "Update an existing pyth2wormhole program's settings")]
SetConfig {
/// Current owner keypair path
#[clap(
long,
default_value = "~/.config/solana/id.json",
help = "Keypair file for the current config owner"
)]
owner: String,
/// New owner to set
#[clap(long = "new-owner")]
new_owner_addr: Option<Pubkey>,
#[clap(long = "new-wh-prog")]
new_wh_prog: Option<Pubkey>,
#[clap(long = "new-pyth-owner")]
new_pyth_owner_addr: Option<Pubkey>,
#[clap(long = "is-active")]
is_active: Option<bool>,
#[clap(long = "ops-owner")]
ops_owner_addr: Option<Pubkey>,
#[clap(long = "remove-ops-owner", conflicts_with = "ops-owner-addr")]
remove_ops_owner: bool,
},
#[clap(
about = "Migrate existing pyth2wormhole program settings to a newer format version. Client version must match the deployed contract."
)]
Migrate {
/// owner keypair path
#[clap(
long,
default_value = "~/.config/solana/id.json",
help = "Keypair file for the current config owner"
)]
owner: String,
},
#[clap(about = "Print out emitter address for the specified pyth2wormhole contract")]
GetEmitter,
#[clap(about = "Set the value of is_active config as ops_owner")]
SetIsActive {
/// Current ops owner keypair path
#[clap(
long,
default_value = "~/.config/solana/id.json",
help = "Keypair file for the current ops owner"
)]
ops_owner: String,
#[clap(
index = 1,
possible_values = ["true", "false"],
)]
new_is_active: String,
},
}

View File

@ -1,38 +0,0 @@
#[derive(Deserialize, Serialize)]
pub struct Config {
symbols: Vec<P2WSymbol>,
}
/// Config entry for a Pyth2Wormhole product + price pair
#[derive(Deserialize, Serialize)]
pub struct P2WSymbol {
/// Optional human-readable name, never used on-chain; makes
/// attester logs and the config easier to understand
name: Option<String>,
product: Pubkey,
price: Pubkey,
}
#[testmod]
mod tests {
#[test]
fn test_sanity() -> Result<(), ErrBox> {
let serialized = r#"
symbols:
- name: ETH/USD
product_addr: 11111111111111111111111111111111
price_addr: 11111111111111111111111111111111
- name: SOL/EUR
product_addr: 4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi
price_addr: 4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi
- name: BTC/CNY
product_addr: 8qbHbw2BbbTHBW1sbeqakYXVKRQM8Ne7pLK7m6CVfeR
price_addr: 8qbHbw2BbbTHBW1sbeqakYXVKRQM8Ne7pLK7m6CVfeR
- # no name
product_addr: 8qbHbw2BbbTHBW1sbeqakYXVKRQM8Ne7pLK7m6CVfeR
price_addr: 8qbHbw2BbbTHBW1sbeqakYXVKRQM8Ne7pLK7m6CVfeR
"#;
let deserialized = serde_yaml::from_str(serialized)?;
Ok(())
}
}

View File

@ -1,54 +0,0 @@
use {
crate::attestation_cfg,
std::{
collections::VecDeque,
convert::TryInto,
sync::Arc,
},
tokio::sync::Mutex,
};
lazy_static::lazy_static! {
pub static ref HEALTHCHECK_STATE: Arc<Mutex<HealthCheckState>> = Arc::new(Mutex::new(HealthCheckState::new(attestation_cfg::default_healthcheck_window_size().try_into().expect("could not convert window size to usize"), attestation_cfg::default_enable_healthcheck())));
}
/// Helper structure for deciding service health
pub struct HealthCheckState {
/// Whether to report the healthy/unhealthy status
pub enable: bool,
/// Sliding LIFO window over last `max_window_size` attestation results (true = ok, false = error)
pub window: VecDeque<bool>,
/// Window size
pub max_window_size: usize,
}
impl HealthCheckState {
pub fn new(max_window_size: usize, enable: bool) -> Self {
Self {
enable,
window: VecDeque::with_capacity(max_window_size),
max_window_size,
}
}
/// Check service health, return None if not enough data is present
pub fn is_healthy(&self) -> Option<bool> {
if self.window.len() >= self.max_window_size && self.enable {
// If all results are false, return false (unhealthy).
Some(self.window.iter().any(|entry| *entry))
} else {
// The window isn't big enough yet or the healthcheck is disabled
None
}
}
/// Rotate the window
pub fn add_result(&mut self, res: bool) {
self.window.push_front(res);
// Trim window back to size if needed. truncate() deletes from
// the back and has no effect if new size is greater than
// current size.
self.window.truncate(self.max_window_size);
}
}

View File

@ -1,539 +0,0 @@
pub mod attestation_cfg;
pub mod batch_state;
pub mod healthcheck;
pub mod message;
pub mod util;
pub use {
attestation_cfg::{
AttestationConditions,
AttestationConfig,
P2WSymbol,
},
batch_state::BatchState,
healthcheck::{
HealthCheckState,
HEALTHCHECK_STATE,
},
message::P2WMessageQueue,
pyth_wormhole_attester::Pyth2WormholeConfig,
util::{
start_metrics_server,
RLMutex,
RLMutexGuard,
},
};
use {
borsh::{
BorshDeserialize,
BorshSerialize,
},
bridge::{
accounts::{
Bridge,
FeeCollector,
Sequence,
SequenceDerivationData,
},
types::ConsistencyLevel,
},
log::{
debug,
trace,
warn,
},
pyth_sdk_solana::state::{
load_mapping_account,
load_price_account,
load_product_account,
},
pyth_wormhole_attester::{
attestation_state::AttestationStatePDA,
config::{
OldP2WConfigAccount,
P2WConfigAccount,
},
message::{
P2WMessage,
P2WMessageDrvData,
},
AttestData,
},
pyth_wormhole_attester_sdk::P2WEmitter,
solana_client::nonblocking::rpc_client::RpcClient,
solana_program::{
hash::Hash,
instruction::{
AccountMeta,
Instruction,
},
pubkey::Pubkey,
system_program,
sysvar::{
clock,
rent,
},
},
solana_sdk::{
signer::{
keypair::Keypair,
Signer,
},
transaction::Transaction,
},
solitaire::{
processors::seeded::Seeded,
AccountState,
ErrBox,
},
};
/// Future-friendly version of solitaire::ErrBox
pub type ErrBoxSend = Box<dyn std::error::Error + Send + Sync>;
pub fn gen_init_tx(
payer: Keypair,
p2w_addr: Pubkey,
config: Pyth2WormholeConfig,
latest_blockhash: Hash,
) -> Result<Transaction, ErrBox> {
let payer_pubkey = payer.pubkey();
let acc_metas = vec![
// new_config
AccountMeta::new(
P2WConfigAccount::<{ AccountState::Uninitialized }>::key(None, &p2w_addr),
false,
),
// payer
AccountMeta::new(payer.pubkey(), true),
// system_program
AccountMeta::new(system_program::id(), false),
];
let ix_data = (
pyth_wormhole_attester::instruction::Instruction::Initialize,
config,
);
let ix = Instruction::new_with_bytes(p2w_addr, ix_data.try_to_vec()?.as_slice(), acc_metas);
let signers = vec![&payer];
let tx_signed = Transaction::new_signed_with_payer::<Vec<&Keypair>>(
&[ix],
Some(&payer_pubkey),
&signers,
latest_blockhash,
);
Ok(tx_signed)
}
pub fn get_set_config_ix(
p2w_addr: &Pubkey,
owner_pubkey: &Pubkey,
payer_pubkey: &Pubkey,
new_config: Pyth2WormholeConfig,
) -> Result<Instruction, ErrBox> {
let acc_metas = vec![
// config
AccountMeta::new(
P2WConfigAccount::<{ AccountState::Initialized }>::key(None, p2w_addr),
false,
),
// current_owner
AccountMeta::new(*owner_pubkey, true),
// payer
AccountMeta::new(*payer_pubkey, true),
// system_program
AccountMeta::new(system_program::id(), false),
];
let ix_data = (
pyth_wormhole_attester::instruction::Instruction::SetConfig,
new_config,
);
Ok(Instruction::new_with_bytes(
*p2w_addr,
ix_data.try_to_vec()?.as_slice(),
acc_metas,
))
}
pub fn gen_set_config_tx(
payer: Keypair,
p2w_addr: Pubkey,
owner: Keypair,
new_config: Pyth2WormholeConfig,
latest_blockhash: Hash,
) -> Result<Transaction, ErrBox> {
let ix = get_set_config_ix(&p2w_addr, &owner.pubkey(), &payer.pubkey(), new_config)?;
let signers = vec![&owner, &payer];
let tx_signed = Transaction::new_signed_with_payer::<Vec<&Keypair>>(
&[ix],
Some(&payer.pubkey()),
&signers,
latest_blockhash,
);
Ok(tx_signed)
}
pub fn get_set_is_active_ix(
p2w_addr: &Pubkey,
ops_owner_pubkey: &Pubkey,
payer_pubkey: &Pubkey,
new_is_active: bool,
) -> Result<Instruction, ErrBox> {
let acc_metas = vec![
// config
AccountMeta::new(
P2WConfigAccount::<{ AccountState::Initialized }>::key(None, p2w_addr),
false,
),
// ops_owner
AccountMeta::new(*ops_owner_pubkey, true),
// payer
AccountMeta::new(*payer_pubkey, true),
];
let ix_data = (
pyth_wormhole_attester::instruction::Instruction::SetIsActive,
new_is_active,
);
Ok(Instruction::new_with_bytes(
*p2w_addr,
ix_data.try_to_vec()?.as_slice(),
acc_metas,
))
}
pub fn gen_set_is_active_tx(
payer: Keypair,
p2w_addr: Pubkey,
ops_owner: Keypair,
new_is_active: bool,
latest_blockhash: Hash,
) -> Result<Transaction, ErrBox> {
let ix = get_set_is_active_ix(
&p2w_addr,
&ops_owner.pubkey(),
&payer.pubkey(),
new_is_active,
)?;
let signers = vec![&ops_owner, &payer];
let tx_signed = Transaction::new_signed_with_payer::<Vec<&Keypair>>(
&[ix],
Some(&payer.pubkey()),
&signers,
latest_blockhash,
);
Ok(tx_signed)
}
pub fn gen_migrate_tx(
payer: Keypair,
p2w_addr: Pubkey,
owner: Keypair,
latest_blockhash: Hash,
) -> Result<Transaction, ErrBox> {
let payer_pubkey = payer.pubkey();
let acc_metas = vec![
// new_config
AccountMeta::new(
P2WConfigAccount::<{ AccountState::Uninitialized }>::key(None, &p2w_addr),
false,
),
// old_config
AccountMeta::new(OldP2WConfigAccount::key(None, &p2w_addr), false),
// owner
AccountMeta::new(owner.pubkey(), true),
// payer
AccountMeta::new(payer.pubkey(), true),
// system_program
AccountMeta::new(system_program::id(), false),
];
let ix_data = (
pyth_wormhole_attester::instruction::Instruction::Migrate,
(),
);
let ix = Instruction::new_with_bytes(p2w_addr, ix_data.try_to_vec()?.as_slice(), acc_metas);
let signers = vec![&owner, &payer];
let tx_signed = Transaction::new_signed_with_payer::<Vec<&Keypair>>(
&[ix],
Some(&payer_pubkey),
&signers,
latest_blockhash,
);
Ok(tx_signed)
}
/// Get the current config account data for given p2w program address
pub async fn get_config_account(
rpc_client: &RpcClient,
p2w_addr: &Pubkey,
) -> Result<Pyth2WormholeConfig, ErrBox> {
let p2w_config_addr = P2WConfigAccount::<{ AccountState::Initialized }>::key(None, p2w_addr);
let config = Pyth2WormholeConfig::try_from_slice(
rpc_client
.get_account_data(&p2w_config_addr)
.await?
.as_slice(),
)?;
Ok(config)
}
/// Generate an Instruction for making the attest() contract
/// call.
pub fn gen_attest_tx(
p2w_addr: Pubkey,
p2w_config: &Pyth2WormholeConfig, // Must be fresh, not retrieved inside to keep side effects away
payer: &Keypair,
wh_msg_id: u64,
symbols: &[P2WSymbol],
latest_blockhash: Hash,
// Desired rate limit interval. If all of the symbols are over
// the limit, the tx will fail. 0 means off.
rate_limit_interval_secs: u32,
) -> Result<Transaction, ErrBoxSend> {
let emitter_addr = P2WEmitter::key(None, &p2w_addr);
let seq_addr = Sequence::key(
&SequenceDerivationData {
emitter_key: &emitter_addr,
},
&p2w_config.wh_prog,
);
let p2w_config_addr = P2WConfigAccount::<{ AccountState::Initialized }>::key(None, &p2w_addr);
if symbols.len() > p2w_config.max_batch_size as usize {
return Err((format!(
"Expected up to {} symbols for batch, {} were found",
p2w_config.max_batch_size,
symbols.len()
))
.into());
}
// Initial attest() accounts
let mut acc_metas = vec![
// payer
AccountMeta::new(payer.pubkey(), true),
// system_program
AccountMeta::new_readonly(system_program::id(), false),
// config
AccountMeta::new_readonly(p2w_config_addr, false),
];
// Batch contents and padding if applicable
let mut padded_symbols = {
let mut not_padded: Vec<_> = symbols
.iter()
.flat_map(|s| {
let state_address = AttestationStatePDA::key(&s.price_addr, &p2w_addr);
vec![
AccountMeta::new(state_address, false),
AccountMeta::new_readonly(s.price_addr, false),
]
})
.collect();
// Align to max batch size with null accounts
let mut padding_accounts =
vec![
AccountMeta::new_readonly(Pubkey::new_from_array([0u8; 32]), false);
2 * (p2w_config.max_batch_size as usize - symbols.len())
];
not_padded.append(&mut padding_accounts);
not_padded
};
acc_metas.append(&mut padded_symbols);
// Continue with other pyth_wormhole_attester accounts
let mut acc_metas_remainder = vec![
// clock
AccountMeta::new_readonly(clock::id(), false),
// wh_prog
AccountMeta::new_readonly(p2w_config.wh_prog, false),
// wh_bridge
AccountMeta::new(
Bridge::<{ AccountState::Initialized }>::key(None, &p2w_config.wh_prog),
false,
),
// wh_message
AccountMeta::new(
P2WMessage::key(
&P2WMessageDrvData {
id: wh_msg_id,
batch_size: symbols.len() as u16,
message_owner: payer.pubkey(),
},
&p2w_addr,
),
false,
),
// wh_emitter
AccountMeta::new_readonly(emitter_addr, false),
// wh_sequence
AccountMeta::new(seq_addr, false),
// wh_fee_collector
AccountMeta::new(FeeCollector::<'_>::key(None, &p2w_config.wh_prog), false),
AccountMeta::new_readonly(rent::id(), false),
];
acc_metas.append(&mut acc_metas_remainder);
let ix_data = (
pyth_wormhole_attester::instruction::Instruction::Attest,
AttestData {
consistency_level: ConsistencyLevel::Confirmed,
message_account_id: wh_msg_id,
rate_limit_interval_secs,
},
);
let ix = Instruction::new_with_bytes(p2w_addr, ix_data.try_to_vec()?.as_slice(), acc_metas);
let tx_signed = Transaction::new_signed_with_payer::<Vec<&Keypair>>(
&[ix],
Some(&payer.pubkey()),
&vec![payer],
latest_blockhash,
);
Ok(tx_signed)
}
/// Enumerates all products and their prices in a Pyth mapping.
/// Returns map of: product address => [price addresses]
pub async fn crawl_pyth_mapping(
rpc_client: &RpcClient,
first_mapping_addr: &Pubkey,
) -> Result<Vec<P2WProductAccount>, ErrBox> {
let mut ret: Vec<P2WProductAccount> = vec![];
let mut n_mappings = 1; // We assume the first one must be valid
let mut n_products_total = 0; // Grand total products in all mapping accounts
let mut n_prices_total = 0; // Grand total prices in all product accounts in all mapping accounts
let mut mapping_addr = *first_mapping_addr;
// loop until the last non-zero MappingAccount.next account
loop {
let mapping_bytes = rpc_client.get_account_data(&mapping_addr).await?;
let mapping = match load_mapping_account(&mapping_bytes) {
Ok(p) => p,
Err(e) => {
warn!(
"Mapping: Could not parse account {} as a Pyth mapping, crawling terminated. Error: {:?}",
mapping_addr, e
);
break;
}
};
// Products in this mapping account
let mut n_mapping_products = 0;
// loop through all products in this mapping; filter out zeroed-out empty product slots
for prod_addr in mapping.products.iter().filter(|p| *p != &Pubkey::default()) {
let prod_bytes = rpc_client.get_account_data(prod_addr).await?;
let prod = match load_product_account(&prod_bytes) {
Ok(p) => p,
Err(e) => {
warn!("Mapping {}: Could not parse account {} as a Pyth product, skipping to next product. Error: {:?}", mapping_addr, prod_addr, e);
continue;
}
};
let mut prod_name = None;
for (key, val) in prod.iter() {
if key.eq_ignore_ascii_case("symbol") {
prod_name = Some(val.to_owned());
}
}
let mut price_addr = prod.px_acc;
let mut n_prod_prices = 0;
// the product might have no price, can happen in tilt due to race-condition, failed tx to add price, ...
if price_addr == Pubkey::default() {
debug!(
"Found product with addr {} that has no prices. \
This should not happen in a production enviornment.",
prod_addr
);
continue;
}
// loop until the last non-zero PriceAccount.next account
let mut price_accounts: Vec<Pubkey> = vec![];
loop {
let price_bytes = rpc_client.get_account_data(&price_addr).await?;
let price = match load_price_account(&price_bytes) {
Ok(p) => p,
Err(e) => {
warn!("Product {}: Could not parse account {} as a Pyth price, skipping to next product. Error: {:?}", prod_addr, price_addr, e);
break;
}
};
price_accounts.push(price_addr);
n_prod_prices += 1;
if price.next == Pubkey::default() {
trace!(
"Product {}: processed {} price(s)",
prod_addr,
n_prod_prices
);
break;
}
price_addr = price.next;
}
ret.push(P2WProductAccount {
key: *prod_addr,
name: prod_name.clone(),
price_account_keys: price_accounts,
});
n_prices_total += n_prod_prices;
}
n_mapping_products += 1;
n_products_total += n_mapping_products;
// Traverse other mapping accounts if applicable
if mapping.next == Pubkey::default() {
trace!(
"Mapping {}: processed {} products",
mapping_addr,
n_mapping_products
);
break;
}
mapping_addr = mapping.next;
n_mappings += 1;
}
debug!(
"Processed {} price(s) in {} product account(s), in {} mapping account(s)",
n_prices_total, n_products_total, n_mappings
);
Ok(ret)
}
#[derive(Clone, Debug)]
pub struct P2WProductAccount {
pub key: Pubkey,
pub name: Option<String>,
pub price_account_keys: Vec<Pubkey>,
}

View File

@ -1,781 +0,0 @@
use {
pyth_wormhole_attester::error::AttesterCustomError,
pyth_wormhole_attester_client::util::send_and_confirm_transaction_with_config,
solana_client::rpc_config::RpcSendTransactionConfig,
solana_program::instruction::InstructionError,
solana_sdk::transaction::TransactionError,
};
pub mod cli;
use {
clap::Parser,
cli::{
Action,
Cli,
},
futures::future::{
Future,
TryFutureExt,
},
generic_array::GenericArray,
lazy_static::lazy_static,
log::{
debug,
error,
info,
warn,
LevelFilter,
},
prometheus::{
register_histogram,
register_int_counter,
register_int_gauge,
Histogram,
IntCounter,
IntGauge,
},
pyth_wormhole_attester::{
attest::P2W_MAX_BATCH_SIZE,
Pyth2WormholeConfig,
},
pyth_wormhole_attester_client::{
attestation_cfg::SymbolBatch,
crawl_pyth_mapping,
gen_attest_tx,
gen_init_tx,
gen_migrate_tx,
gen_set_config_tx,
gen_set_is_active_tx,
get_config_account,
healthcheck::HealthCheckState,
start_metrics_server,
AttestationConfig,
BatchState,
ErrBoxSend,
P2WMessageQueue,
P2WSymbol,
RLMutex,
HEALTHCHECK_STATE,
},
pyth_wormhole_attester_sdk::P2WEmitter,
sha3::{
Digest,
Sha3_256,
},
solana_client::{
nonblocking::rpc_client::RpcClient,
rpc_config::RpcTransactionConfig,
},
solana_program::pubkey::Pubkey,
solana_sdk::{
commitment_config::CommitmentConfig,
signature::read_keypair_file,
signer::keypair::Keypair,
},
solana_transaction_status::UiTransactionEncoding,
solitaire::{
processors::seeded::Seeded,
ErrBox,
},
std::{
fs::File,
net::SocketAddr,
sync::Arc,
time::{
Duration,
Instant,
},
},
tokio::{
sync::{
Mutex,
Semaphore,
},
task::JoinHandle,
},
};
pub const SEQNO_PREFIX: &str = "Program log: Sequence: ";
lazy_static! {
static ref ATTESTATIONS_OK_CNT: IntCounter =
register_int_counter!("attestations_ok", "Number of successful attestations")
.expect("FATAL: Could not instantiate ATTESTATIONS_OK_CNT");
static ref ATTESTATIONS_ERR_CNT: IntCounter =
register_int_counter!("attestations_err", "Number of failed attestations")
.expect("FATAL: Could not instantiate ATTESTATIONS_ERR_CNT");
static ref LAST_SEQNO_GAUGE: IntGauge = register_int_gauge!(
"last_seqno",
"Latest sequence number produced by this attester"
)
.expect("FATAL: Could not instantiate LAST_SEQNO_GAUGE");
static ref SOL_RPC_TX_PROCESSING_HIST: Histogram = register_histogram!(
"sol_rpc_tx_processing",
"How long in seconds it takes to send a transaction to the Solana RPC",
vec![0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 5.0, 10.0, 20.0, 30.0, 60.0] // Buckets, 1.0 = 1 second
)
.expect("FATAL: Could not instantiate SOL_RPC_TX_PROCESSING_HIST");
}
#[tokio::main(flavor = "multi_thread")]
async fn main() -> Result<(), ErrBox> {
let cli = Cli::parse();
init_logging();
// All other CLI actions make rpc requests, this one's meant to be
// off-chain explicitly
if let Action::GetEmitter = cli.action {
let emitter_addr = P2WEmitter::key(None, &cli.p2w_addr);
println!("{emitter_addr}");
// Exit early
return Ok(());
}
let payer = read_keypair_file(&*shellexpand::tilde(&cli.payer))?;
let rpc_client = RpcClient::new_with_commitment(cli.rpc_url.clone(), cli.commitment);
let p2w_addr = cli.p2w_addr;
let latest_blockhash = rpc_client.get_latest_blockhash().await?;
match cli.action {
Action::Init {
owner_addr,
pyth_owner_addr,
wh_prog,
is_active,
ops_owner_addr,
} => {
let tx = gen_init_tx(
payer,
p2w_addr,
Pyth2WormholeConfig {
owner: owner_addr,
wh_prog,
pyth_owner: pyth_owner_addr,
is_active: is_active.unwrap_or(true),
max_batch_size: P2W_MAX_BATCH_SIZE,
ops_owner: ops_owner_addr,
},
latest_blockhash,
)?;
rpc_client
.send_and_confirm_transaction_with_spinner(&tx)
.await?;
println!(
"Initialized with config:\n{:?}",
get_config_account(&rpc_client, &p2w_addr).await?
);
}
Action::GetConfig => {
println!("{:?}", get_config_account(&rpc_client, &p2w_addr).await?);
}
Action::SetConfig {
ref owner,
new_owner_addr,
new_wh_prog,
new_pyth_owner_addr,
is_active,
ops_owner_addr,
remove_ops_owner,
} => {
let old_config = get_config_account(&rpc_client, &p2w_addr).await?;
let new_ops_owner = if remove_ops_owner {
None
} else if let Some(given_ops_owner) = ops_owner_addr {
Some(given_ops_owner)
} else {
old_config.ops_owner
};
let tx = gen_set_config_tx(
payer,
p2w_addr,
read_keypair_file(&*shellexpand::tilde(&owner))?,
Pyth2WormholeConfig {
owner: new_owner_addr.unwrap_or(old_config.owner),
wh_prog: new_wh_prog.unwrap_or(old_config.wh_prog),
pyth_owner: new_pyth_owner_addr.unwrap_or(old_config.pyth_owner),
is_active: is_active.unwrap_or(old_config.is_active),
max_batch_size: P2W_MAX_BATCH_SIZE,
ops_owner: new_ops_owner,
},
latest_blockhash,
)?;
rpc_client
.send_and_confirm_transaction_with_spinner(&tx)
.await?;
println!(
"Applied config:\n{:?}",
get_config_account(&rpc_client, &p2w_addr).await?
);
}
Action::Migrate { ref owner } => {
let tx = gen_migrate_tx(
payer,
p2w_addr,
read_keypair_file(&*shellexpand::tilde(&owner))?,
latest_blockhash,
)?;
rpc_client
.send_and_confirm_transaction_with_spinner(&tx)
.await?;
println!(
"Applied config:\n{:?}",
get_config_account(&rpc_client, &p2w_addr).await?
);
}
Action::Attest {
ref attestation_cfg,
confirmation_timeout_secs,
metrics_bind_addr,
} => {
// Load the attestation config yaml
let attestation_cfg: AttestationConfig =
serde_yaml::from_reader(File::open(attestation_cfg)?)?;
// Derive seeded accounts
let emitter_addr = P2WEmitter::key(None, &p2w_addr);
info!("Using emitter addr {}", emitter_addr);
// Note: For global rate-limitting of RPC requests, we use a
// custom Mutex wrapper which enforces a delay of rpc_interval
// between RPC accesses.
let rpc_cfg = Arc::new(RLMutex::new(
RpcCfg {
url: cli.rpc_url,
timeout: Duration::from_secs(confirmation_timeout_secs),
commitment: cli.commitment,
},
Duration::from_millis(attestation_cfg.min_rpc_interval_ms),
));
handle_attest(rpc_cfg, payer, p2w_addr, attestation_cfg, metrics_bind_addr).await?;
}
Action::GetEmitter => unreachable! {}, // It is handled early in this function.
Action::SetIsActive {
ops_owner,
new_is_active,
} => {
let tx = gen_set_is_active_tx(
payer,
p2w_addr,
read_keypair_file(&*shellexpand::tilde(&ops_owner))?,
new_is_active.eq_ignore_ascii_case("true"),
latest_blockhash,
)?;
rpc_client
.send_and_confirm_transaction_with_spinner(&tx)
.await?;
println!(
"Applied config:\n{:?}",
get_config_account(&rpc_client, &p2w_addr).await?
);
}
}
Ok(())
}
/// Continuously send batch attestations for symbols of an attestation config.
async fn handle_attest(
rpc_cfg: Arc<RLMutex<RpcCfg>>,
payer: Keypair,
p2w_addr: Pubkey,
attestation_cfg: AttestationConfig,
metrics_bind_addr: SocketAddr,
) -> Result<(), ErrBox> {
// Update healthcheck window size from config
if attestation_cfg.healthcheck_window_size == 0 {
return Err(format!(
"{} must be above 0",
stringify!(attestation_cfg.healthcheck_window_size)
)
.into());
}
*HEALTHCHECK_STATE.lock().await = HealthCheckState::new(
attestation_cfg.healthcheck_window_size as usize,
attestation_cfg.enable_healthcheck,
);
if !attestation_cfg.enable_healthcheck {
warn!("WARNING: Healthcheck is disabled");
}
tokio::spawn(start_metrics_server(metrics_bind_addr));
info!("Started serving metrics on {}", metrics_bind_addr);
info!(
"Crawling mapping {:?} every {} minutes",
attestation_cfg.mapping_addr, attestation_cfg.mapping_reload_interval_mins
);
// Used for easier detection of config changes
let mut hasher = Sha3_256::new();
let mut old_sched_futs_state: Option<(JoinHandle<_>, GenericArray<u8, _>)> = None; // (old_futs_handle, old_config_hash)
// For enforcing min_msg_reuse_interval_ms, we keep a piece of
// state that creates or reuses accounts if enough time had
// passed. It is crucial that this queue is reused across mapping
// lookups, so that previous symbol set's messages have enough
// time to be picked up by Wormhole guardians.
let message_q_mtx = Arc::new(Mutex::new(P2WMessageQueue::new(
Duration::from_millis(attestation_cfg.min_msg_reuse_interval_ms),
attestation_cfg.max_msg_accounts as usize,
)));
let mut batch_cfg = vec![];
// This loop cranks attestations without interruption. This is
// achieved by spinning up a new up-to-date symbol set before
// letting go of the previous one. Additionally, hash of on-chain
// and attestation configs is used to prevent needless reloads of
// an unchanged symbol set.
loop {
let start_time = Instant::now(); // Helps timekeep mapping lookups accurately
let config = match get_config_account(&lock_and_make_rpc(&rpc_cfg).await, &p2w_addr).await {
Ok(c) => c,
Err(e) => {
error!(
"Could not look up latest on-chain config in top-level loop: {:?}",
e
);
continue;
}
};
// Use the mapping if specified
// If we cannot query the mapping account, retain the existing batch configuration.
batch_cfg = attestation_config_to_batches(
&rpc_cfg,
&attestation_cfg,
config.max_batch_size as usize,
)
.await
.unwrap_or(batch_cfg);
// Hash currently known config
hasher.update(serde_yaml::to_vec(&batch_cfg)?);
hasher.update(borsh::to_vec(&config)?);
let new_cfg_hash = hasher.finalize_reset();
if let Some((old_handle, old_cfg_hash)) = old_sched_futs_state.as_ref() {
// Ignore unchanged configs
if &new_cfg_hash == old_cfg_hash {
info!("Note: Attestation config and on-chain config unchanged, not stopping existing attestation sched jobs");
} else {
// Process changed config into attestation scheduling futures
info!("Spinning up attestation sched jobs");
// Start the new sched futures
let new_sched_futs_handle = tokio::spawn(prepare_attestation_sched_jobs(
&batch_cfg,
&config,
&rpc_cfg,
&p2w_addr,
&payer,
message_q_mtx.clone(),
));
// Quit old sched futures
old_handle.abort();
// The just started futures become the on-going attestation state
old_sched_futs_state = Some((new_sched_futs_handle, new_cfg_hash));
}
} else {
// Base case for first attestation attempt
old_sched_futs_state = Some((
tokio::spawn(prepare_attestation_sched_jobs(
&batch_cfg,
&config,
&rpc_cfg,
&p2w_addr,
&payer,
message_q_mtx.clone(),
)),
new_cfg_hash,
));
}
// Sum up elapsed time, wait for next run accurately
let target = Duration::from_secs(attestation_cfg.mapping_reload_interval_mins * 60);
let elapsed = start_time.elapsed();
let remaining = target.saturating_sub(elapsed);
if remaining == Duration::from_secs(0) {
warn!(
"Processing took more than desired mapping lookup interval of {} seconds, not sleeping. Consider increasing {}",
target.as_secs(),
// stringify prints the up-to-date setting name automatically
stringify!(attestation_cfg.mapping_reload_interval_mins)
);
} else {
info!(
"Processing new mapping took {}.{}s, next config/mapping refresh in {}.{}s",
elapsed.as_secs(),
elapsed.subsec_millis(),
remaining.as_secs(),
remaining.subsec_millis()
);
}
tokio::time::sleep(remaining).await;
}
}
#[derive(Clone)]
pub struct RpcCfg {
pub url: String,
pub timeout: Duration,
pub commitment: CommitmentConfig,
}
/// Helper function for claiming the rate-limited mutex and constructing an RPC instance
async fn lock_and_make_rpc(rlmtx: &RLMutex<RpcCfg>) -> RpcClient {
let RpcCfg {
url,
timeout,
commitment,
} = rlmtx.lock().await.clone();
RpcClient::new_with_timeout_and_commitment(url, timeout, commitment)
}
/// Generate batches to attest by retrieving the on-chain product account data and grouping it
/// according to the configuration in `attestation_cfg`.
async fn attestation_config_to_batches(
rpc_cfg: &Arc<RLMutex<RpcCfg>>,
attestation_cfg: &AttestationConfig,
max_batch_size: usize,
) -> Result<Vec<SymbolBatch>, ErrBox> {
// Use the mapping if specified
let products = if let Some(mapping_addr) = attestation_cfg.mapping_addr.as_ref() {
let product_accounts_res =
crawl_pyth_mapping(&lock_and_make_rpc(rpc_cfg).await, mapping_addr).await;
if let Err(err) = &product_accounts_res {
error!(
"Could not crawl mapping {}: {:?}",
attestation_cfg.mapping_addr.unwrap_or_default(),
err
);
}
product_accounts_res?
} else {
vec![]
};
Ok(attestation_cfg.instantiate_batches(&products, max_batch_size))
}
/// Constructs attestation scheduling jobs from attestation config.
fn prepare_attestation_sched_jobs(
batch_cfg: &[SymbolBatch],
p2w_cfg: &Pyth2WormholeConfig,
rpc_cfg: &Arc<RLMutex<RpcCfg>>,
p2w_addr: &Pubkey,
payer: &Keypair,
message_q_mtx: Arc<Mutex<P2WMessageQueue>>,
) -> futures::future::JoinAll<impl Future<Output = Result<(), ErrBoxSend>>> {
// Flatten attestation config into a plain list of batches
let batches: Vec<_> = batch_cfg.iter().map(BatchState::new).collect();
let batch_count = batches.len();
// Create attestation scheduling routines; see attestation_sched_job() for details
let attestation_sched_futs = batches.into_iter().enumerate().map(|(idx, batch)| {
attestation_sched_job(AttestationSchedJobArgs {
batch,
batch_no: idx + 1,
batch_count,
rpc_cfg: rpc_cfg.clone(),
p2w_addr: *p2w_addr,
config: p2w_cfg.clone(),
payer: Keypair::from_bytes(&payer.to_bytes()).unwrap(),
message_q_mtx: message_q_mtx.clone(),
})
});
futures::future::join_all(attestation_sched_futs)
}
/// The argument count on attestation_sched_job got out of hand. This
/// helps keep the correct order in check.
pub struct AttestationSchedJobArgs {
pub batch: BatchState,
pub batch_no: usize,
pub batch_count: usize,
pub rpc_cfg: Arc<RLMutex<RpcCfg>>,
pub p2w_addr: Pubkey,
pub config: Pyth2WormholeConfig,
pub payer: Keypair,
pub message_q_mtx: Arc<Mutex<P2WMessageQueue>>,
}
/// A future that decides how a batch is sent in daemon mode.
///
/// Attestations of the batch are scheduled continuously using
/// spawn(), which means that a next attestation of the same batch
/// begins immediately when a condition is met without waiting for the
/// previous attempt to finish. Subsequent attestations are started
/// according to the attestation_conditions field on the
/// batch. Concurrent requests per batch are limited by the
/// max_batch_jobs field to prevent excess memory usage on network
/// slowdowns etc..
async fn attestation_sched_job(args: AttestationSchedJobArgs) -> Result<(), ErrBoxSend> {
let AttestationSchedJobArgs {
mut batch,
batch_no,
batch_count,
rpc_cfg,
p2w_addr,
config,
payer,
message_q_mtx,
} = args;
// Stagger this sched job by batch_no * 10 milliseconds. It
// mitigates uneven distribution of tx requests which may resolve
// RPC timeouts on larger interval-based symbol groups.
tokio::time::sleep(Duration::from_millis(batch_no as u64 * 10)).await;
// Enforces the max batch job count
let sema = Arc::new(Semaphore::new(batch.conditions.max_batch_jobs));
loop {
debug!(
"Batch {}/{}, group {:?}: Scheduling attestation job",
batch_no, batch_count, batch.group_name
);
// park this routine until a resend condition is met
loop {
if let Some(reason) = batch
.should_resend(&lock_and_make_rpc(&rpc_cfg).await)
.await
{
info!(
"Batch {}/{}, group {}: Resending (reason: {:?})",
batch_no, batch_count, batch.group_name, reason
);
break;
}
}
if sema.available_permits() == 0 {
warn!(
"Batch {}/{}, group {:?}: Ran out of job \
permits, some attestation conditions may be \
delayed. For better accuracy, increase \
max_batch_jobs or adjust attestation \
conditions",
batch_no, batch_count, batch.group_name
);
}
let job = attestation_job(AttestationJobArgs {
rlmtx: rpc_cfg.clone(),
batch_no,
batch_count,
group_name: batch.group_name.clone(),
p2w_addr,
config: config.clone(),
payer: Keypair::from_bytes(&payer.to_bytes()).unwrap(), // Keypair has no clone
symbols: batch.symbols.to_vec(),
max_jobs_sema: sema.clone(),
message_q_mtx: message_q_mtx.clone(),
rate_limit_interval_secs: batch.conditions.rate_limit_interval_secs,
});
// This short-lived permit prevents scheduling excess
// attestation jobs hanging on the max jobs semaphore (which could
// eventually eat all memory). It is freed as soon as we leave
// this code block.
let _permit4sched = sema.acquire().await?;
// Spawn the job in background
let _detached_job: JoinHandle<_> = tokio::spawn(job);
batch.last_job_finished_at = Instant::now();
}
}
/// Arguments for attestation_job(). This struct rules out same-type
/// ordering errors due to the large argument count
pub struct AttestationJobArgs {
pub rlmtx: Arc<RLMutex<RpcCfg>>,
pub batch_no: usize,
pub batch_count: usize,
pub group_name: String,
pub p2w_addr: Pubkey,
pub config: Pyth2WormholeConfig,
pub payer: Keypair,
pub symbols: Vec<P2WSymbol>,
pub max_jobs_sema: Arc<Semaphore>,
pub rate_limit_interval_secs: u32,
pub message_q_mtx: Arc<Mutex<P2WMessageQueue>>,
}
/// A future for a single attempt to attest a batch on Solana.
async fn attestation_job(args: AttestationJobArgs) -> Result<(), ErrBoxSend> {
let AttestationJobArgs {
rlmtx,
batch_no,
batch_count,
group_name,
p2w_addr,
config,
payer,
symbols,
max_jobs_sema,
rate_limit_interval_secs,
message_q_mtx,
} = args;
let batch_no4err_msg = batch_no;
let batch_count4err_msg = batch_count;
let group_name4err_msg = group_name.clone();
// The following async block is just wrapping the job in a log
// statement and err counter increase in case the job fails. It is
// done by using the or_else() future method. No other actions are
// performed and the error is propagated up the stack.
//
// This is necessary to learn about errors in jobs started with
// tokio::spawn() because in this package spawned futures are
// never explicitly awaited on.
//
// Previously, the or_else() existed in attestation_sched_job()
// which schedules this future. It was moved here for readability,
// after introduction of Prometheus metrics and the healthcheck,
// which helped keep metrics updates closer together.
let job_with_err_msg = (async move {
// Will be dropped after attestation is complete
let _permit = max_jobs_sema.acquire().await?;
debug!(
"Batch {}/{}, group {:?}: Starting attestation job",
batch_no, batch_count, group_name
);
let rpc = lock_and_make_rpc(&rlmtx).await; // Reuse the same lock for the blockhash/tx/get_transaction
let latest_blockhash = rpc
.get_latest_blockhash()
.map_err(|e| -> ErrBoxSend { e.into() })
.await?;
let wh_msg_id = message_q_mtx.lock().await.get_account()?.id;
let tx = gen_attest_tx(
p2w_addr,
&config,
&payer,
wh_msg_id,
symbols.as_slice(),
latest_blockhash,
rate_limit_interval_secs,
)?;
let tx_processing_start_time = Instant::now();
let sig = match send_and_confirm_transaction_with_config(&rpc, &tx, RpcSendTransactionConfig {
// Decreases probability of rate limit race conditions
skip_preflight: true,
..Default::default()
}).await {
Ok(s) => Ok(s),
Err(e) => match e.get_transaction_error() {
Some(TransactionError::InstructionError(_idx, InstructionError::Custom(code)))
if code == AttesterCustomError::AttestRateLimitReached as u32 =>
{
info!(
"Batch {}/{}, group {:?} OK: configured {} second rate limit interval reached, backing off",
batch_no, batch_count, group_name, rate_limit_interval_secs,
);
// Note: We return early if rate limit tx
// error is detected. This ensures that we
// don't count this attempt in ok/err
// monitoring and healthcheck counters.
return Ok(());
}
_other => Err(e),
},
}?;
let tx_data = rpc
.get_transaction_with_config(
&sig,
RpcTransactionConfig {
encoding: Some(UiTransactionEncoding::Json),
commitment: Some(rpc.commitment()),
max_supported_transaction_version: None,
},
)
.await?;
let tx_processing_duration = tx_processing_start_time.elapsed();
// Manually insert the value into histogram. NOTE: We're not
// using the start_timer() method because it would record
// durations even for early returns in error conditions which
// would look weird in monitoring.
SOL_RPC_TX_PROCESSING_HIST.observe(tx_processing_duration.as_secs_f64());
let seqno = tx_data
.transaction
.meta
.and_then(|meta| meta.log_messages)
.and_then(|logs| {
let mut seqno = None;
for log in logs {
if log.starts_with(SEQNO_PREFIX) {
seqno = Some(log.replace(SEQNO_PREFIX, ""));
break;
}
}
seqno
})
.ok_or_else(|| -> ErrBoxSend { "No seqno in program logs".to_string().into() })?;
info!(
"Batch {}/{}, group {:?} OK. Sequence: {}",
batch_no, batch_count, group_name, seqno
);
ATTESTATIONS_OK_CNT.inc();
LAST_SEQNO_GAUGE.set(seqno.parse::<i64>()?);
HEALTHCHECK_STATE.lock().await.add_result(true); // Report this job as successful to healthcheck
Result::<(), ErrBoxSend>::Ok(())
})
.or_else(move |e| async move {
// log any errors coming from the job
warn!(
"Batch {}/{}, group {:?} ERR: {:?}",
batch_no4err_msg, batch_count4err_msg, group_name4err_msg, e
);
// Bump counters
ATTESTATIONS_ERR_CNT.inc();
HEALTHCHECK_STATE.lock().await.add_result(false); // Report this job as failed to healthcheck
Err(e)
});
job_with_err_msg.await
}
fn init_logging() {
if std::env::var("RUST_LOG").is_ok() {
env_logger::init()
} else {
// Default to info if RUST_LOG not set
env_logger::builder().filter_level(LevelFilter::Info).init();
}
}

View File

@ -1,139 +0,0 @@
//! Re-usable message scheme for pyth2wormhole
use {
crate::ErrBoxSend,
log::debug,
std::{
collections::VecDeque,
time::{
Duration,
Instant,
},
},
};
/// One of the accounts tracked by the attestation client.
#[derive(Clone, Debug)]
pub struct P2WMessageAccount {
/// Unique ID that lets us derive unique accounts for use on-chain
pub id: u64,
/// Last time we've posted a message to wormhole with this account
pub last_used: Instant,
}
/// An umbrella data structure for tracking all message accounts in use
#[derive(Clone, Debug)]
pub struct P2WMessageQueue {
/// The tracked accounts. Sorted from oldest to newest, as guaranteed by get_account()
accounts: VecDeque<P2WMessageAccount>,
/// How much time needs to pass between reuses
grace_period: Duration,
/// A hard cap on how many accounts will be created.
max_accounts: usize,
}
impl P2WMessageQueue {
pub fn new(grace_period: Duration, max_accounts: usize) -> Self {
Self {
accounts: VecDeque::new(),
grace_period,
max_accounts,
}
}
/// Finds or creates an account with last_used at least grace_period in the past.
///
/// This method governs the self.accounts queue and preserves its sorted state.
pub fn get_account(&mut self) -> Result<P2WMessageAccount, ErrBoxSend> {
// Pick or add an account to use as message
let acc = match self.accounts.pop_front() {
// Exists and is old enough for reuse
Some(mut existing_acc) if existing_acc.last_used.elapsed() > self.grace_period => {
existing_acc.last_used = Instant::now();
existing_acc
}
// Exists but isn't old enough for reuse
Some(existing_too_new_acc) => {
// Counter-act the pop, this account is still oldest
// and will be old enough eventually.
self.accounts.push_front(existing_too_new_acc);
// Make sure we're not going over the limit
if self.accounts.len() >= self.max_accounts {
return Err(format!(
"Max message queue size of {} reached.",
self.max_accounts
)
.into());
}
debug!(
"Increasing message queue size to {}",
self.accounts.len() + 1
);
// Use a new account instead
P2WMessageAccount {
id: self.accounts.len() as u64,
last_used: Instant::now(),
}
}
// Base case: Queue is empty, use a new account
None => P2WMessageAccount {
id: self.accounts.len() as u64,
last_used: Instant::now(),
},
};
// The chosen account becomes the newest, push it to the very end.
self.accounts.push_back(acc.clone());
Ok(acc)
}
}
#[cfg(test)]
pub mod test {
use super::*;
#[test]
fn test_empty_grows_only_as_needed() -> Result<(), ErrBoxSend> {
let mut q = P2WMessageQueue::new(Duration::from_millis(500), 100_000);
// Empty -> 1 account
let acc = q.get_account()?;
assert_eq!(q.accounts.len(), 1);
assert_eq!(acc.id, 0);
// 1 -> 2 accounts, not enough time passes
let acc2 = q.get_account()?;
assert_eq!(q.accounts.len(), 2);
assert_eq!(acc2.id, 1);
std::thread::sleep(Duration::from_millis(600));
// Account 0 should be in front, enough time passed
let acc3 = q.get_account()?;
assert_eq!(q.accounts.len(), 2);
assert_eq!(acc3.id, 0);
// Account 1 also qualifies
let acc4 = q.get_account()?;
assert_eq!(q.accounts.len(), 2);
assert_eq!(acc4.id, 1);
// 2 -> 3 accounts, not enough time passes
let acc5 = q.get_account()?;
assert_eq!(q.accounts.len(), 3);
assert_eq!(acc5.id, 2);
// We should end up with 0, 1 and 2 in order
assert_eq!(q.accounts[0].id, 0);
assert_eq!(q.accounts[1].id, 1);
assert_eq!(q.accounts[2].id, 2);
Ok(())
}
}

View File

@ -1,266 +0,0 @@
use {
crate::HEALTHCHECK_STATE,
http::status::StatusCode,
log::{
error,
trace,
},
prometheus::TextEncoder,
solana_client::{
client_error::Result as SolClientResult,
nonblocking::rpc_client::RpcClient,
rpc_config::RpcSendTransactionConfig,
rpc_request::RpcError,
},
solana_sdk::{
commitment_config::CommitmentConfig,
signature::Signature,
transaction::{
uses_durable_nonce,
Transaction,
},
},
std::{
net::SocketAddr,
ops::{
Deref,
DerefMut,
},
time::{
Duration,
Instant,
},
},
tokio::{
sync::{
Mutex,
MutexGuard,
},
time::sleep,
},
warp::{
reply,
Filter,
Rejection,
Reply,
},
};
/// Rate-limited mutex. Ensures there's a period of minimum rl_interval between lock acquisitions
pub struct RLMutex<T> {
mtx: Mutex<RLMutexState<T>>,
rl_interval: Duration,
}
/// Helper to make the last_released writes also guarded by the mutex
pub struct RLMutexState<T> {
/// Helps make sure regular passage of time is subtracted from sleep duration
last_released: Instant,
val: T,
}
impl<T> Deref for RLMutexState<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.val
}
}
impl<T> DerefMut for RLMutexState<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.val
}
}
/// Helper wrapper to record lock release times via Drop
pub struct RLMutexGuard<'a, T> {
guard: MutexGuard<'a, RLMutexState<T>>,
}
impl<'a, T> Drop for RLMutexGuard<'a, T> {
fn drop(&mut self) {
let state: &mut RLMutexState<T> =
MutexGuard::<'a, RLMutexState<T>>::deref_mut(&mut self.guard);
state.last_released = Instant::now();
}
}
impl<'a, T> Deref for RLMutexGuard<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.guard.deref()
}
}
impl<'a, T> DerefMut for RLMutexGuard<'a, T> {
fn deref_mut(&mut self) -> &mut T {
self.guard.deref_mut()
}
}
impl<T> RLMutex<T> {
pub fn new(val: T, rl_interval: Duration) -> Self {
Self {
mtx: Mutex::new(RLMutexState {
last_released: Instant::now().checked_sub(rl_interval).unwrap(),
val,
}),
rl_interval,
}
}
pub async fn lock(&self) -> RLMutexGuard<'_, T> {
let guard = self.mtx.lock().await;
let elapsed = guard.last_released.elapsed();
if elapsed < self.rl_interval {
let sleep_time = self.rl_interval - elapsed;
trace!(
"RLMutex: Parking lock future for {}.{}s",
sleep_time.as_secs(),
sleep_time.subsec_millis()
);
tokio::time::sleep(sleep_time).await;
}
RLMutexGuard { guard }
}
}
async fn metrics_handler() -> Result<impl Reply, Rejection> {
let encoder = TextEncoder::new();
match encoder.encode_to_string(&prometheus::gather()) {
Ok(encoded_metrics) => Ok(reply::with_status(encoded_metrics, StatusCode::OK)),
Err(e) => {
error!("Could not serve metrics: {}", e.to_string());
Ok(reply::with_status(
"".to_string(),
StatusCode::INTERNAL_SERVER_ERROR,
))
}
}
}
/// Shares healthcheck result via HTTP status codes. The idea is to
/// get a yes/no health answer using a plain HTTP request. Note: Curl
/// does not treat 3xx statuses as errors by default.
async fn healthcheck_handler() -> Result<impl Reply, Rejection> {
let hc_state = HEALTHCHECK_STATE.lock().await;
match hc_state.is_healthy() {
// Healthy - 200 OK
Some(true) => {
let ok_count = hc_state
.window
.iter()
.fold(0usize, |acc, val| if *val { acc + 1 } else { acc });
let msg = format!(
"healthy, {} of {} last attestations OK",
ok_count, hc_state.max_window_size
);
Ok(reply::with_status(msg, StatusCode::OK))
}
// Unhealthy - 500 Internal Server Error
Some(false) => {
let msg = format!(
"unhealthy, all of {} latest attestations returned error",
hc_state.max_window_size
);
Ok(reply::with_status(msg, StatusCode::INTERNAL_SERVER_ERROR))
}
// No data - 503 Service Unavailable
None => {
let msg = if hc_state.enable {
format!(
"Not enough data in window, {} of {} min attempts made",
hc_state.window.len(),
hc_state.max_window_size
)
} else {
"Healthcheck disabled (enable_healthcheck is false)".to_string()
};
Ok(reply::with_status(msg, StatusCode::SERVICE_UNAVAILABLE))
}
}
}
/// Serves Prometheus metrics and the result of the healthcheck
pub async fn start_metrics_server(addr: impl Into<SocketAddr> + 'static) {
let metrics_route = warp::path("metrics") // The Prometheus metrics subpage is standardized to always be /metrics
.and(warp::path::end())
.and_then(metrics_handler);
let healthcheck_route = warp::path("healthcheck")
.and(warp::path::end())
.and_then(healthcheck_handler);
warp::serve(metrics_route.or(healthcheck_route))
.bind(addr)
.await;
}
/// WARNING: Copied verbatim from v1.10.31, be careful when bumping
/// solana crate versions!
///
/// TODO(2023-03-02): Use an upstream method when
/// it's available.
///
/// This method is almost identical to
/// RpcClient::send_and_confirm_transaction(). The only difference is
/// that we let the user specify the config and replace
/// send_transaction() inside with
/// send_transaction_with_config(). This variant is currently missing
/// from solana_client.
pub async fn send_and_confirm_transaction_with_config(
client: &RpcClient,
transaction: &Transaction,
config: RpcSendTransactionConfig,
) -> SolClientResult<Signature> {
const SEND_RETRIES: usize = 1;
const GET_STATUS_RETRIES: usize = usize::MAX;
'sending: for _ in 0..SEND_RETRIES {
let signature = client
.send_transaction_with_config(transaction, config)
.await?;
let recent_blockhash = if uses_durable_nonce(transaction).is_some() {
let (recent_blockhash, ..) = client
.get_latest_blockhash_with_commitment(CommitmentConfig::processed())
.await?;
recent_blockhash
} else {
transaction.message.recent_blockhash
};
for status_retry in 0..GET_STATUS_RETRIES {
match client.get_signature_status(&signature).await? {
Some(Ok(_)) => return Ok(signature),
Some(Err(e)) => return Err(e.into()),
None => {
if !client
.is_blockhash_valid(&recent_blockhash, CommitmentConfig::processed())
.await?
{
// Block hash is not found by some reason
break 'sending;
} else if cfg!(not(test))
// Ignore sleep at last step.
&& status_retry < GET_STATUS_RETRIES
{
// Retry twice a second
sleep(Duration::from_millis(500)).await;
continue;
}
}
}
}
}
Err(RpcError::ForUser(
"unable to confirm transaction. \
This can happen in situations such as transaction expiration \
and insufficient fee-payer funds"
.to_string(),
)
.into())
}

View File

@ -1,2 +0,0 @@
pub mod passthrough;
pub mod pyth;

View File

@ -1,24 +0,0 @@
//! Trivial program for mocking other programs easily
use {
solana_program::{
account_info::AccountInfo,
msg,
program_error::ProgramError,
},
solana_program_test::*,
solana_sdk::pubkey::Pubkey,
};
pub fn passthrough_entrypoint(
program_id: &Pubkey,
account_infos: &[AccountInfo],
_data: &[u8],
) -> Result<(), ProgramError> {
msg!(&format!("Program {program_id}"));
msg!(&format!("account_infos {account_infos:?}"));
Ok(())
}
pub fn add_passthrough(pt: &mut ProgramTest, name: &str, prog_id: Pubkey) {
pt.add_program(name, prog_id, processor!(passthrough_entrypoint))
}

View File

@ -1,82 +0,0 @@
//! This module contains test fixtures for instantiating plausible
//! Pyth accounts for testing purposes.
use {
pyth_sdk_solana::state::{
AccountType,
PriceAccount,
ProductAccount,
MAGIC,
PROD_ATTR_SIZE,
VERSION,
},
solana_program_test::*,
solana_sdk::{
account::Account,
pubkey::Pubkey,
rent::Rent,
},
};
/// Create a pair of brand new product/price accounts that point at each other
pub fn add_test_symbol(pt: &mut ProgramTest, owner: &Pubkey) -> (Pubkey, Pubkey) {
// Generate pubkeys
let prod_id = Pubkey::new_unique();
let price_id = Pubkey::new_unique();
// Instantiate
let prod = {
ProductAccount {
magic: MAGIC,
ver: VERSION,
atype: AccountType::Product as u32,
size: 0,
px_acc: price_id,
attr: [0u8; PROD_ATTR_SIZE],
}
};
let price = PriceAccount {
magic: MAGIC,
ver: VERSION,
atype: AccountType::Price as u32,
prod: prod_id,
..Default::default()
};
// Cast to raw bytes
let prod_buf = &[prod];
let prod_bytes = unsafe {
let (_prefix, bytes, _suffix) = prod_buf.align_to::<u8>();
bytes
};
let price_buf = &[price];
let price_bytes = unsafe {
let (_prefix, bytes, _suffix) = price_buf.align_to::<u8>();
bytes
};
// Compute exemption rent
let prod_lamports = Rent::default().minimum_balance(prod_bytes.len());
let price_lamports = Rent::default().minimum_balance(price_bytes.len());
// Populate the accounts
let prod_acc = Account {
lamports: prod_lamports,
data: (*prod_bytes).to_vec(),
owner: *owner,
rent_epoch: 0,
executable: false,
};
let price_acc = Account {
lamports: price_lamports,
data: (*price_bytes).to_vec(),
owner: *owner,
rent_epoch: 0,
executable: false,
};
pt.add_account(prod_id, prod_acc);
pt.add_account(price_id, price_acc);
(prod_id, price_id)
}

View File

@ -1,128 +0,0 @@
pub mod fixtures;
use {
bridge::accounts::{
Bridge,
BridgeConfig,
BridgeData,
},
fixtures::{
passthrough,
pyth,
},
pyth_wormhole_attester::config::{
P2WConfigAccount,
Pyth2WormholeConfig,
},
pyth_wormhole_attester_client as p2wc,
solana_program_test::*,
solana_sdk::{
account::Account,
pubkey::Pubkey,
rent::Rent,
},
solitaire::{
processors::seeded::Seeded,
AccountState,
BorshSerialize,
},
};
#[tokio::test]
async fn test_happy_path() -> Result<(), p2wc::ErrBoxSend> {
// Programs
let p2w_program_id = Pubkey::new_unique();
let wh_fixture_program_id = Pubkey::new_unique();
// Authorities
let p2w_owner = Pubkey::new_unique();
let pyth_owner = Pubkey::new_unique();
let ops_owner = Pubkey::new_unique();
// On-chain state
let p2w_config = Pyth2WormholeConfig {
owner: p2w_owner,
wh_prog: wh_fixture_program_id,
max_batch_size: pyth_wormhole_attester::attest::P2W_MAX_BATCH_SIZE,
pyth_owner,
is_active: true,
ops_owner: Some(ops_owner),
};
let bridge_config = BridgeData {
config: BridgeConfig {
fee: 0xdeadbeef,
..Default::default()
},
..Default::default()
};
// Populate test environment
let mut p2w_test = ProgramTest::new(
"pyth_wormhole_attester",
p2w_program_id,
processor!(pyth_wormhole_attester::instruction::solitaire),
);
// Plant a filled config account
let p2w_config_bytes = p2w_config.try_to_vec()?;
let p2w_config_account = Account {
lamports: Rent::default().minimum_balance(p2w_config_bytes.len()),
data: p2w_config_bytes,
owner: p2w_program_id,
executable: false,
rent_epoch: 0,
};
let p2w_config_addr =
P2WConfigAccount::<{ AccountState::Initialized }>::key(None, &p2w_program_id);
p2w_test.add_account(p2w_config_addr, p2w_config_account);
// Plant a bridge config
let bridge_config_bytes = bridge_config.try_to_vec()?;
let wh_bridge_config_account = Account {
lamports: Rent::default().minimum_balance(bridge_config_bytes.len()),
data: bridge_config_bytes,
owner: wh_fixture_program_id,
executable: false,
rent_epoch: 0,
};
let wh_bridge_config_addr =
Bridge::<{ AccountState::Initialized }>::key(None, &wh_fixture_program_id);
p2w_test.add_account(wh_bridge_config_addr, wh_bridge_config_account);
passthrough::add_passthrough(&mut p2w_test, "wormhole", wh_fixture_program_id);
let (prod_id, price_id) = pyth::add_test_symbol(&mut p2w_test, &pyth_owner);
let ctx = p2w_test.start_with_context().await;
let symbols = vec![p2wc::P2WSymbol {
name: Some("Mock symbol".to_owned()),
product_addr: prod_id,
price_addr: price_id,
}];
let _attest_tx = p2wc::gen_attest_tx(
p2w_program_id,
&p2w_config,
&ctx.payer,
0,
symbols.as_slice(),
ctx.last_blockhash,
0,
)?;
// NOTE: 2022-09-05
// Execution of this transaction is commented out as for some unknown reasons
// Solana test suite has some unknown behavior in this transaction. It is probably a
// memory leak that causes either segfault or an invalid error (after a reading an unkown
// variable from memory). It is probably solved in the following PR:
// https://github.com/solana-labs/solana/pull/26507
//
// TODO: add this check when the above PR is released in our Solana package.
// ctx.banks_client.process_transaction(attest_tx).await?;
Ok(())
}

View File

@ -1,177 +0,0 @@
//! Checks for migrating the previous config schema into the current one
pub mod fixtures;
use {
fixtures::passthrough,
log::info,
pyth_wormhole_attester::config::{
OldP2WConfigAccount,
OldPyth2WormholeConfig,
P2WConfigAccount,
Pyth2WormholeConfig,
},
pyth_wormhole_attester_client as p2wc,
serial_test::serial,
solana_program::system_program,
solana_program_test::*,
solana_sdk::{
account::Account,
pubkey::Pubkey,
rent::Rent,
signature::Signer,
signer::keypair::Keypair,
},
solitaire::{
processors::seeded::Seeded,
AccountState,
BorshSerialize,
},
};
#[tokio::test]
#[serial]
async fn test_migrate_works() -> Result<(), solitaire::ErrBox> {
info!("Starting");
// Programs
let p2w_program_id = Pubkey::new_unique();
let wh_fixture_program_id = Pubkey::new_unique();
// Authorities
let p2w_owner = Keypair::new();
let pyth_owner = Pubkey::new_unique();
// On-chain state
let old_p2w_config = OldPyth2WormholeConfig {
owner: p2w_owner.pubkey(),
wh_prog: wh_fixture_program_id,
max_batch_size: pyth_wormhole_attester::attest::P2W_MAX_BATCH_SIZE,
pyth_owner,
is_active: true,
};
info!("Before ProgramTest::new()");
// Populate test environment
let mut p2w_test = ProgramTest::new(
"pyth_wormhole_attester",
p2w_program_id,
processor!(pyth_wormhole_attester::instruction::solitaire),
);
// Plant filled config accounts
let old_p2w_config_bytes = old_p2w_config.try_to_vec()?;
let old_p2w_config_account = Account {
lamports: Rent::default().minimum_balance(old_p2w_config_bytes.len()),
data: old_p2w_config_bytes,
owner: p2w_program_id,
executable: false,
rent_epoch: 0,
};
let old_p2w_config_addr = OldP2WConfigAccount::key(None, &p2w_program_id);
info!("Before add_account() calls");
p2w_test.add_account(old_p2w_config_addr, old_p2w_config_account);
// Add system program because the contract creates an account for new configuration account
passthrough::add_passthrough(&mut p2w_test, "system", system_program::id());
info!("System program under {}", system_program::id());
info!("Before start_with_context");
let mut ctx = p2w_test.start_with_context().await;
let migrate_tx =
p2wc::gen_migrate_tx(ctx.payer, p2w_program_id, p2w_owner, ctx.last_blockhash)?;
info!("Before process_transaction");
// Migration should fail because the new config account is already initialized
ctx.banks_client.process_transaction(migrate_tx).await?;
Ok(())
}
#[tokio::test]
#[serial]
async fn test_migrate_already_migrated() -> Result<(), solitaire::ErrBox> {
info!("Starting");
// Programs
let p2w_program_id = Pubkey::new_unique();
let wh_fixture_program_id = Pubkey::new_unique();
// Authorities
let p2w_owner = Keypair::new();
let pyth_owner = Pubkey::new_unique();
let ops_owner = Keypair::new();
// On-chain state
let old_p2w_config = OldPyth2WormholeConfig {
owner: p2w_owner.pubkey(),
wh_prog: wh_fixture_program_id,
max_batch_size: pyth_wormhole_attester::attest::P2W_MAX_BATCH_SIZE,
pyth_owner,
is_active: true,
};
let new_p2w_config = Pyth2WormholeConfig {
owner: p2w_owner.pubkey(),
wh_prog: wh_fixture_program_id,
max_batch_size: pyth_wormhole_attester::attest::P2W_MAX_BATCH_SIZE,
pyth_owner,
is_active: true,
ops_owner: Some(ops_owner.pubkey()),
};
info!("Before ProgramTest::new()");
// Populate test environment
let mut p2w_test = ProgramTest::new(
"pyth_wormhole_attester",
p2w_program_id,
processor!(pyth_wormhole_attester::instruction::solitaire),
);
// Plant filled config accounts
let old_p2w_config_bytes = old_p2w_config.try_to_vec()?;
let old_p2w_config_account = Account {
lamports: Rent::default().minimum_balance(old_p2w_config_bytes.len()),
data: old_p2w_config_bytes,
owner: p2w_program_id,
executable: false,
rent_epoch: 0,
};
let old_p2w_config_addr = OldP2WConfigAccount::key(None, &p2w_program_id);
let new_p2w_config_bytes = new_p2w_config.try_to_vec()?;
let new_p2w_config_account = Account {
lamports: Rent::default().minimum_balance(new_p2w_config_bytes.len()),
data: new_p2w_config_bytes,
owner: p2w_program_id,
executable: false,
rent_epoch: 0,
};
let new_p2w_config_addr =
P2WConfigAccount::<{ AccountState::Initialized }>::key(None, &p2w_program_id);
info!("Before add_account() calls");
p2w_test.add_account(old_p2w_config_addr, old_p2w_config_account);
p2w_test.add_account(new_p2w_config_addr, new_p2w_config_account);
info!("Before start_with_context");
let mut ctx = p2w_test.start_with_context().await;
let migrate_tx =
p2wc::gen_migrate_tx(ctx.payer, p2w_program_id, p2w_owner, ctx.last_blockhash)?;
info!("Before process_transaction");
// Migration should fail because the new config account is already initialized
assert!(ctx
.banks_client
.process_transaction(migrate_tx)
.await
.is_err());
Ok(())
}

View File

@ -1,196 +0,0 @@
pub mod fixtures;
use {
pyth_wormhole_attester::config::{
P2WConfigAccount,
Pyth2WormholeConfig,
},
pyth_wormhole_attester_client as p2wc,
solana_program_test::*,
solana_sdk::{
account::Account,
pubkey::Pubkey,
rent::Rent,
signature::Signer,
signer::keypair::Keypair,
},
solitaire::{
processors::seeded::Seeded,
AccountState,
BorshSerialize,
},
};
fn clone_keypair(keypair: &Keypair) -> Keypair {
// Unwrap as we are surely copying a keypair and we are in test env.
Keypair::from_bytes(keypair.to_bytes().as_ref()).unwrap()
}
#[tokio::test]
async fn test_setting_is_active_works() -> Result<(), p2wc::ErrBoxSend> {
// Programs
let p2w_program_id = Pubkey::new_unique();
let wh_fixture_program_id = Pubkey::new_unique();
// Authorities
let p2w_owner = Pubkey::new_unique();
let pyth_owner = Pubkey::new_unique();
let ops_owner = Keypair::new();
// On-chain state
let p2w_config = Pyth2WormholeConfig {
owner: p2w_owner,
wh_prog: wh_fixture_program_id,
max_batch_size: pyth_wormhole_attester::attest::P2W_MAX_BATCH_SIZE,
pyth_owner,
is_active: true,
ops_owner: Some(ops_owner.pubkey()),
};
// Populate test environment
let mut p2w_test = ProgramTest::new(
"pyth_wormhole_attester",
p2w_program_id,
processor!(pyth_wormhole_attester::instruction::solitaire),
);
// Plant a filled config account
let p2w_config_bytes = p2w_config.try_to_vec()?;
let p2w_config_account = Account {
lamports: Rent::default().minimum_balance(p2w_config_bytes.len()),
data: p2w_config_bytes,
owner: p2w_program_id,
executable: false,
rent_epoch: 0,
};
let p2w_config_addr =
P2WConfigAccount::<{ AccountState::Initialized }>::key(None, &p2w_program_id);
p2w_test.add_account(p2w_config_addr, p2w_config_account);
let mut ctx = p2w_test.start_with_context().await;
// Setting to false should work
let set_is_active_false_tx = p2wc::gen_set_is_active_tx(
clone_keypair(&ctx.payer),
p2w_program_id,
clone_keypair(&ops_owner),
false,
ctx.last_blockhash,
)
.map_err(|e| e.to_string())?;
ctx.banks_client
.process_transaction(set_is_active_false_tx)
.await?;
let config = ctx
.banks_client
.get_account_data_with_borsh::<Pyth2WormholeConfig>(p2w_config_addr)
.await?;
assert!(!config.is_active);
// Setting to true should work
let set_is_active_true_tx = p2wc::gen_set_is_active_tx(
clone_keypair(&ctx.payer),
p2w_program_id,
clone_keypair(&ops_owner),
true,
ctx.last_blockhash,
)
.map_err(|e| e.to_string())?;
ctx.banks_client
.process_transaction(set_is_active_true_tx)
.await?;
let config = ctx
.banks_client
.get_account_data_with_borsh::<Pyth2WormholeConfig>(p2w_config_addr)
.await?;
assert!(config.is_active);
// A wrong signer cannot handle it
let set_is_active_true_tx = p2wc::gen_set_is_active_tx(
clone_keypair(&ctx.payer),
p2w_program_id,
clone_keypair(&ctx.payer),
true,
ctx.last_blockhash,
)
.map_err(|e| e.to_string())?;
assert!(ctx
.banks_client
.process_transaction(set_is_active_true_tx)
.await
.is_err());
Ok(())
}
#[tokio::test]
async fn test_setting_is_active_does_not_work_without_ops_owner() -> Result<(), p2wc::ErrBoxSend> {
// Programs
let p2w_program_id = Pubkey::new_unique();
let wh_fixture_program_id = Pubkey::new_unique();
// Authorities
let p2w_owner = Pubkey::new_unique();
let pyth_owner = Keypair::new();
// On-chain state
let p2w_config = Pyth2WormholeConfig {
owner: p2w_owner,
wh_prog: wh_fixture_program_id,
max_batch_size: pyth_wormhole_attester::attest::P2W_MAX_BATCH_SIZE,
pyth_owner: pyth_owner.pubkey(),
is_active: true,
ops_owner: None,
};
// Populate test environment
let mut p2w_test = ProgramTest::new(
"pyth_wormhole_attester",
p2w_program_id,
processor!(pyth_wormhole_attester::instruction::solitaire),
);
// Plant a filled config account
let p2w_config_bytes = p2w_config.try_to_vec()?;
let p2w_config_account = Account {
lamports: Rent::default().minimum_balance(p2w_config_bytes.len()),
data: p2w_config_bytes,
owner: p2w_program_id,
executable: false,
rent_epoch: 0,
};
let p2w_config_addr =
P2WConfigAccount::<{ AccountState::Initialized }>::key(None, &p2w_program_id);
p2w_test.add_account(p2w_config_addr, p2w_config_account);
let mut ctx = p2w_test.start_with_context().await;
// No one could should be able to handle
// For example pyth_owner is used here.
let set_is_active_true_tx = p2wc::gen_set_is_active_tx(
clone_keypair(&ctx.payer),
p2w_program_id,
pyth_owner,
true,
ctx.last_blockhash,
)
.map_err(|e| e.to_string())?;
assert!(ctx
.banks_client
.process_transaction(set_is_active_true_tx)
.await
.is_err());
Ok(())
}

View File

@ -1,14 +0,0 @@
[package]
name = "pyth-wormhole-attester-governance"
version = "0.1.0"
description = "CLI to generate governance payloads for the attester"
edition = "2018"
[dependencies]
anyhow = "1.0.65"
clap = {version = "3.1.18", features = ["derive"]}
solana-sdk = "=1.10.31"
pyth-wormhole-attester-client = {path = "../client/"}
hex = "0.4.3"
remote-executor = {path = "../../governance/remote_executor/programs/remote-executor/"}
borsh = "0.9.3"

View File

@ -1,54 +0,0 @@
//! CLI options
use {
clap::{
Parser,
Subcommand,
},
solana_sdk::pubkey::Pubkey,
};
#[derive(Parser, Debug)]
#[clap(
about = "A cli for the remote executor",
author = "Pyth Network Contributors"
)]
pub struct Cli {
#[clap(subcommand)]
pub action: Action,
}
#[derive(Subcommand, Debug)]
pub enum Action {
#[clap(about = "Get set upgrade authority payload for squads-cli")]
GetSetConfig {
#[clap(long, help = "Program id")]
program_id: Pubkey,
#[clap(long, help = "Current owner")]
owner: Pubkey,
#[clap(long, help = "Payer")]
payer: Pubkey,
#[clap(long, help = "Config : New owner")]
new_owner: Pubkey,
#[clap(long, help = "Config : Wormhole program id")]
wormhole: Pubkey,
#[clap(long, help = "Config : Pyth program id")]
pyth_owner: Pubkey,
#[clap(long, help = "Config : Max batch size")]
max_batch_size: u16,
#[clap(long, help = "Config : Is active")]
is_active: bool,
#[clap(long, help = "Config : Ops owner")]
ops_owner: Option<Pubkey>,
},
#[clap(about = "Get upgrade program payload for squads-cli")]
GetSetIsActive {
#[clap(long, help = "Program id")]
program_id: Pubkey,
#[clap(long, help = "Current ops owner")]
ops_owner: Pubkey,
#[clap(long, help = "Payer")]
payer: Pubkey,
#[clap(long, help = "Config : Is active")]
is_active: bool,
},
}

View File

@ -1,70 +0,0 @@
use {
anyhow::Result,
borsh::BorshSerialize,
clap::Parser,
cli::{
Action,
Cli,
},
pyth_wormhole_attester_client::{
get_set_config_ix,
get_set_is_active_ix,
Pyth2WormholeConfig,
},
remote_executor::state::governance_payload::{
ExecutorPayload,
GovernanceHeader,
InstructionData,
},
};
mod cli;
fn main() -> Result<()> {
let cli = Cli::parse();
match cli.action {
Action::GetSetConfig {
program_id,
owner,
payer,
new_owner,
wormhole,
pyth_owner,
max_batch_size,
is_active,
ops_owner,
} => {
let new_config = Pyth2WormholeConfig {
owner: new_owner,
wh_prog: wormhole,
pyth_owner,
max_batch_size,
is_active,
ops_owner,
};
let ix = get_set_config_ix(&program_id, &owner, &payer, new_config).unwrap();
let payload = ExecutorPayload {
header: GovernanceHeader::executor_governance_header(),
instructions: vec![InstructionData::from(&ix)],
}
.try_to_vec()?;
println!("Set config payload : {:?}", hex::encode(payload));
Ok(())
}
Action::GetSetIsActive {
program_id,
ops_owner,
payer,
is_active,
} => {
let ix = get_set_is_active_ix(&program_id, &ops_owner, &payer, is_active).unwrap();
let payload = ExecutorPayload {
header: GovernanceHeader::executor_governance_header(),
instructions: vec![InstructionData::from(&ix)],
}
.try_to_vec()?;
println!("Set is active payload : {:?}", hex::encode(payload));
Ok(())
}
}
}

View File

@ -1,859 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "ahash"
version = "0.4.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e"
[[package]]
name = "aho-corasick"
version = "0.7.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f"
dependencies = [
"memchr",
]
[[package]]
name = "arrayref"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544"
[[package]]
name = "arrayvec"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b"
[[package]]
name = "atty"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
dependencies = [
"hermit-abi",
"libc",
"winapi",
]
[[package]]
name = "autocfg"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
[[package]]
name = "bincode"
version = "1.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad"
dependencies = [
"serde",
]
[[package]]
name = "blake3"
version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b64485778c4f16a6a5a9d335e80d449ac6c70cdd6a06d2af18a6f6f775a125b3"
dependencies = [
"arrayref",
"arrayvec",
"cc",
"cfg-if 0.1.10",
"constant_time_eq",
"crypto-mac",
"digest 0.9.0",
]
[[package]]
name = "block-buffer"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4"
dependencies = [
"block-padding",
"generic-array 0.14.4",
]
[[package]]
name = "block-padding"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae"
[[package]]
name = "borsh"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09a7111f797cc721407885a323fb071636aee57f750b1a4ddc27397eba168a74"
dependencies = [
"borsh-derive",
"hashbrown",
]
[[package]]
name = "borsh-derive"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "307f3740906bac2c118a8122fe22681232b244f1369273e45f1156b45c43d2dd"
dependencies = [
"borsh-derive-internal",
"borsh-schema-derive-internal",
"proc-macro-crate",
"proc-macro2",
"syn",
]
[[package]]
name = "borsh-derive-internal"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2104c73179359431cc98e016998f2f23bc7a05bc53e79741bcba705f30047bc"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "borsh-schema-derive-internal"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae29eb8418fcd46f723f8691a2ac06857d31179d33d2f2d91eb13967de97c728"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "bridge"
version = "0.1.0"
dependencies = [
"borsh",
"byteorder",
"primitive-types",
"sha3",
"solana-program",
"solitaire",
]
[[package]]
name = "bs58"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb"
[[package]]
name = "bv"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8834bb1d8ee5dc048ee3124f2c7c1afcc6bc9aed03f11e9dfd8c69470a5db340"
dependencies = [
"feature-probe",
"serde",
]
[[package]]
name = "byteorder"
version = "1.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
[[package]]
name = "cc"
version = "1.0.68"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a72c244c1ff497a746a7e1fb3d14bd08420ecda70c8f25c7112f2781652d787"
[[package]]
name = "cfg-if"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "constant_time_eq"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc"
[[package]]
name = "cpufeatures"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "66c99696f6c9dd7f35d486b9d04d7e6e202aa3e8c40d553f2fdf5e7e0c6a71ef"
dependencies = [
"libc",
]
[[package]]
name = "crunchy"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
[[package]]
name = "crypto-mac"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab"
dependencies = [
"generic-array 0.14.4",
"subtle",
]
[[package]]
name = "curve25519-dalek"
version = "2.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "434e1720189a637d44fe464f4df1e6eb900b4835255b14354497c78af37d9bb8"
dependencies = [
"byteorder",
"digest 0.8.1",
"rand_core",
"subtle",
"zeroize",
]
[[package]]
name = "digest"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5"
dependencies = [
"generic-array 0.12.4",
]
[[package]]
name = "digest"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066"
dependencies = [
"generic-array 0.14.4",
]
[[package]]
name = "either"
version = "1.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457"
[[package]]
name = "env_logger"
version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3"
dependencies = [
"atty",
"humantime",
"log",
"regex",
"termcolor",
]
[[package]]
name = "feature-probe"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "835a3dc7d1ec9e75e2b5fb4ba75396837112d2060b03f7d43bc1897c7f7211da"
[[package]]
name = "fixed-hash"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c"
dependencies = [
"static_assertions",
]
[[package]]
name = "generic-array"
version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd"
dependencies = [
"typenum",
]
[[package]]
name = "generic-array"
version = "0.14.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817"
dependencies = [
"serde",
"typenum",
"version_check",
]
[[package]]
name = "getrandom"
version = "0.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce"
dependencies = [
"cfg-if 1.0.0",
"libc",
"wasi",
]
[[package]]
name = "hashbrown"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04"
dependencies = [
"ahash",
]
[[package]]
name = "hermit-abi"
version = "0.1.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c"
dependencies = [
"libc",
]
[[package]]
name = "hex"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
[[package]]
name = "humantime"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
[[package]]
name = "itertools"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b"
dependencies = [
"either",
]
[[package]]
name = "keccak"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7"
[[package]]
name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.97"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "12b8adadd720df158f4d70dfe7ccc6adb0472d7c55ca83445f6a5ab3e36f8fb6"
[[package]]
name = "log"
version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
dependencies = [
"cfg-if 1.0.0",
]
[[package]]
name = "memchr"
version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b16bd47d9e329435e309c58469fe0791c2d0d1ba96ec0954152a5ae2b04387dc"
[[package]]
name = "num-derive"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "num-traits"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290"
dependencies = [
"autocfg",
]
[[package]]
name = "opaque-debug"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5"
[[package]]
name = "ppv-lite86"
version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
[[package]]
name = "primitive-types"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2415937401cb030a2a0a4d922483f945fa068f52a7dbb22ce0fe5f2b6f6adace"
dependencies = [
"fixed-hash",
"uint",
]
[[package]]
name = "proc-macro-crate"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785"
dependencies = [
"toml",
]
[[package]]
name = "proc-macro2"
version = "1.0.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038"
dependencies = [
"unicode-xid",
]
[[package]]
name = "pyth-client"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "731e2d5b2b790fc676518b29e41dddf7f69f23c61f27ab25cc9ae5b75ee190ad"
[[package]]
name = "pyth2wormhole"
version = "0.1.0"
dependencies = [
"borsh",
"bridge",
"pyth-client",
"rocksalt",
"solana-program",
"solitaire",
]
[[package]]
name = "quote"
version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rand"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
dependencies = [
"getrandom",
"libc",
"rand_chacha",
"rand_core",
"rand_hc",
]
[[package]]
name = "rand_chacha"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
dependencies = [
"getrandom",
]
[[package]]
name = "rand_hc"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
dependencies = [
"rand_core",
]
[[package]]
name = "regex"
version = "1.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.6.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b"
[[package]]
name = "rocksalt"
version = "0.1.0"
dependencies = [
"byteorder",
"proc-macro2",
"quote",
"sha3",
"solana-program",
"syn",
]
[[package]]
name = "rustc_version"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
dependencies = [
"semver",
]
[[package]]
name = "rustversion"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "61b3909d758bb75c79f23d4736fac9433868679d3ad2ea7a61e3c25cfda9a088"
[[package]]
name = "semver"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
dependencies = [
"semver-parser",
]
[[package]]
name = "semver-parser"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
[[package]]
name = "serde"
version = "1.0.126"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_bytes"
version = "0.11.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "16ae07dd2f88a366f15bd0632ba725227018c69a1c8550a927324f8eb8368bb9"
dependencies = [
"serde",
]
[[package]]
name = "serde_derive"
version = "1.0.126"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "sha2"
version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b362ae5752fd2137731f9fa25fd4d9058af34666ca1966fb969119cc35719f12"
dependencies = [
"block-buffer",
"cfg-if 1.0.0",
"cpufeatures",
"digest 0.9.0",
"opaque-debug",
]
[[package]]
name = "sha3"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809"
dependencies = [
"block-buffer",
"digest 0.9.0",
"keccak",
"opaque-debug",
]
[[package]]
name = "solana-frozen-abi"
version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b81e60d88b1fe0322bba6f3fe6b0d7299df2f2ededa8d95ec77b934fabb967b"
dependencies = [
"bs58",
"bv",
"generic-array 0.14.4",
"log",
"memmap2",
"rustc_version",
"serde",
"serde_derive",
"sha2",
"solana-frozen-abi-macro",
"solana-logger",
"thiserror",
]
[[package]]
name = "solana-frozen-abi-macro"
version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f617daa0187bcc4665d63fcf9454c998e9cdad6a33181f6214558d738230bfe2"
dependencies = [
"proc-macro2",
"quote",
"rustc_version",
"syn",
]
[[package]]
name = "solana-logger"
version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b3e2b14bdcbb7b41de9ef5a541ac501ba3fbd07999cbcf7ea9006b3ae28b67b"
dependencies = [
"env_logger",
"lazy_static",
"log",
]
[[package]]
name = "solana-program"
version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c5d59f9d358c09db6461fae1fde6075a456685d856c004ef21af092a830e4e7"
dependencies = [
"bincode",
"blake3",
"borsh",
"borsh-derive",
"bs58",
"bv",
"curve25519-dalek",
"hex",
"itertools",
"lazy_static",
"log",
"num-derive",
"num-traits",
"rand",
"rustc_version",
"rustversion",
"serde",
"serde_bytes",
"serde_derive",
"sha2",
"sha3",
"solana-frozen-abi",
"solana-frozen-abi-macro",
"solana-logger",
"solana-sdk-macro",
"thiserror",
]
[[package]]
name = "solana-sdk-macro"
version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d27426b2a09676929c5e49df96967bbcffff003183c11a3c3ef11d78bac4aaaa"
dependencies = [
"bs58",
"proc-macro2",
"quote",
"rustversion",
"syn",
]
[[package]]
name = "solitaire"
version = "0.1.0"
dependencies = [
"borsh",
"byteorder",
"rocksalt",
"sha3",
"solana-program",
]
[[package]]
name = "static_assertions"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]]
name = "subtle"
version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2"
[[package]]
name = "syn"
version = "1.0.73"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f71489ff30030d2ae598524f61326b902466f72a0fb1a8564c001cc63425bcc7"
dependencies = [
"proc-macro2",
"quote",
"unicode-xid",
]
[[package]]
name = "termcolor"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4"
dependencies = [
"winapi-util",
]
[[package]]
name = "thiserror"
version = "1.0.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa6f76457f59514c7eeb4e59d891395fab0b2fd1d40723ae737d64153392e9c6"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a36768c0fbf1bb15eca10defa29526bda730a2376c2ab4393ccfa16fb1a318d"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "toml"
version = "0.5.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa"
dependencies = [
"serde",
]
[[package]]
name = "typenum"
version = "1.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "879f6906492a7cd215bfa4cf595b600146ccfac0c79bcbd1f3000162af5e8b06"
[[package]]
name = "uint"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e11fe9a9348741cf134085ad57c249508345fe16411b3d7fb4ff2da2f1d6382e"
dependencies = [
"byteorder",
"crunchy",
"hex",
"static_assertions",
]
[[package]]
name = "unicode-xid"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
[[package]]
name = "version_check"
version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe"
[[package]]
name = "wasi"
version = "0.9.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
dependencies = [
"winapi",
]
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "zeroize"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd"

View File

@ -1,28 +0,0 @@
[package]
name = "pyth-wormhole-attester"
version = "2.0.1"
description = "Pyth-over-Wormhole Solana contract"
edition = "2018"
[lib]
crate-type = ["cdylib", "lib"]
name = "pyth_wormhole_attester"
[features]
default = ["wormhole-bridge-solana/no-entrypoint"]
client = ["solitaire/client", "no-entrypoint"]
trace = ["solitaire/trace", "wormhole-bridge-solana/trace"]
no-entrypoint = []
[dependencies]
wormhole-bridge-solana = { git = "https://github.com/wormhole-foundation/wormhole", tag = "v2.14.8" }
solitaire = { git = "https://github.com/wormhole-foundation/wormhole", tag = "v2.14.8"}
rocksalt = { git = "https://github.com/wormhole-foundation/wormhole", tag = "v2.14.8"}
solana-program = "=1.10.31"
borsh = "=0.9.3"
pyth-client = "0.2.2"
pyth-wormhole-attester-sdk = { path = "../sdk/rust", features = ["solana"] }
serde = { version = "1", optional = true}
serde_derive = { version = "1", optional = true}
serde_json = { version = "1", optional = true}
pyth-sdk-solana = { version = "0.5.0" }

View File

@ -1,407 +0,0 @@
use {
crate::{
attestation_state::AttestationStatePDA,
config::P2WConfigAccount,
error::AttesterCustomError,
message::{
P2WMessage,
P2WMessageDrvData,
},
},
borsh::{
BorshDeserialize,
BorshSerialize,
},
bridge::{
accounts::BridgeData,
types::ConsistencyLevel,
},
pyth_sdk_solana::state::PriceStatus,
pyth_wormhole_attester_sdk::{
BatchPriceAttestation,
Identifier,
P2WEmitter,
PriceAttestation,
},
solana_program::{
clock::Clock,
program::invoke_signed,
program_error::ProgramError,
rent::Rent,
},
solitaire::{
trace,
AccountState,
ExecutionContext,
FromAccounts,
Info,
Keyed,
Mut,
Peel,
Result as SoliResult,
Seeded,
Signer,
SolitaireError,
Sysvar,
},
};
/// Important: must be manually maintained until native Solitaire
/// variable len vector support.
///
/// The number must reflect how many pyth state/price pairs are
/// expected in the Attest struct below. The constant itself is only
/// used in the on-chain config in order for attesters to learn the
/// correct value dynamically.
pub const P2W_MAX_BATCH_SIZE: u16 = 5;
#[derive(FromAccounts)]
pub struct Attest<'b> {
// Payer also used for wormhole
pub payer: Mut<Signer<Info<'b>>>,
pub system_program: Info<'b>,
pub config: P2WConfigAccount<'b, { AccountState::Initialized }>,
// Hardcoded state/price pairs, bypassing Solitaire's variable-length limitations
// Any change to the number of accounts must include an appropriate change to P2W_MAX_BATCH_SIZE
pub pyth_state: Mut<AttestationStatePDA<'b>>,
pub pyth_price: Info<'b>,
pub pyth_state2: Option<Mut<AttestationStatePDA<'b>>>,
pub pyth_price2: Option<Info<'b>>,
pub pyth_state3: Option<Mut<AttestationStatePDA<'b>>>,
pub pyth_price3: Option<Info<'b>>,
pub pyth_state4: Option<Mut<AttestationStatePDA<'b>>>,
pub pyth_price4: Option<Info<'b>>,
pub pyth_state5: Option<Mut<AttestationStatePDA<'b>>>,
pub pyth_price5: Option<Info<'b>>,
// Did you read the comment near `pyth_state`?
// pub pyth_state6: Option<Mut<AttestationStatePDA<'b>>>,
// pub pyth_price6: Option<Info<'b>>,
// pub pyth_state7: Option<Mut<AttestationStatePDA<'b>>>,
// pub pyth_price7: Option<Info<'b>>,
// pub pyth_state8: Option<Mut<AttestationStatePDA<'b>>>,
// pub pyth_price8: Option<Info<'b>>,
// pub pyth_state9: Option<Mut<AttestationStatePDA<'b>>>,
// pub pyth_price9: Option<Info<'b>>,
// pub pyth_state10: Option<Mut<AttestationStatePDA<'b>>>,
// pub pyth_price10: Option<Info<'b>>,
pub clock: Sysvar<'b, Clock>,
/// Wormhole program address - must match the config value
pub wh_prog: Info<'b>,
// wormhole's post_message_unreliable accounts
//
// This contract makes no attempt to exhaustively validate
// Wormhole's account inputs. Only the wormhole contract address
// is validated (see above).
/// Bridge config needed for fee calculation
pub wh_bridge: Mut<Info<'b>>,
/// Account to store the posted message.
/// This account is a PDA from the attestation contract
/// which is owned by the wormhole core contract.
pub wh_message: Mut<Info<'b>>,
/// Emitter of the VAA
pub wh_emitter: P2WEmitter<'b>,
/// Tracker for the emitter sequence
pub wh_sequence: Mut<Info<'b>>,
// We reuse our payer
// pub wh_payer: Mut<Signer<Info<'b>>>,
/// Account to collect tx fee
pub wh_fee_collector: Mut<Info<'b>>,
pub wh_rent: Sysvar<'b, Rent>,
}
#[derive(BorshDeserialize, BorshSerialize)]
pub struct AttestData {
pub consistency_level: ConsistencyLevel,
pub message_account_id: u64,
/// Fail the transaction if the global attestation rate of all
/// symbols in this batch is more frequent than the passed
/// interval. This is checked using the attestation time stored in
/// attestation state. This enables all of the clients to only
/// contribute attestations if their desired interval is not
/// already reached. If at least one symbol has been waiting
/// longer than this interval, we attest the whole batch. 0
/// effectively disables this feature.
pub rate_limit_interval_secs: u32,
}
pub fn attest(ctx: &ExecutionContext, accs: &mut Attest, data: AttestData) -> SoliResult<()> {
if !accs.config.is_active {
// msg instead of trace makes sure we're not silent about this in prod
solana_program::msg!("This attester program is disabled!");
return Err(SolitaireError::Custom(4242));
}
accs.config.verify_derivation(ctx.program_id, None)?;
if accs.config.wh_prog != *accs.wh_prog.key {
trace!(&format!(
"Wormhole program account mismatch (expected {:?}, got {:?})",
accs.config.wh_prog, accs.wh_prog.key
));
return Err(ProgramError::InvalidAccountData.into());
}
// Make the specified prices iterable
let mut price_pair_opts = [
(Some(&mut accs.pyth_state), Some(&accs.pyth_price)),
(accs.pyth_state2.as_mut(), accs.pyth_price2.as_ref()),
(accs.pyth_state3.as_mut(), accs.pyth_price3.as_ref()),
(accs.pyth_state4.as_mut(), accs.pyth_price4.as_ref()),
(accs.pyth_state5.as_mut(), accs.pyth_price5.as_ref()),
// Did you read the comment near `pyth_state`?
// (accs.pyth_state6.as_mut(), accs.pyth_price6.as_ref()),
// (accs.pyth_state7.as_mut(), accs.pyth_price7.as_ref()),
// (accs.pyth_state8.as_mut(), accs.pyth_price8.as_ref()),
// (accs.pyth_state9.as_mut(), accs.pyth_price9.as_ref()),
// (accs.pyth_state10.as_mut(), accs.pyth_price10.as_ref()),
];
let price_pairs: Vec<(_, _)> = price_pair_opts
.iter_mut()
.filter_map(|pair| match pair {
// Only use this pair if both accounts are Some
(Some(state), Some(price)) => Some((state, price)),
_other => None,
})
.collect();
trace!("{} Pyth symbols received", price_pairs.len());
// Collect the validated symbols here for batch serialization
let mut attestations = Vec::with_capacity(price_pairs.len());
let this_attestation_time = accs.clock.unix_timestamp;
let mut over_rate_limit = true;
for (state, price) in price_pairs.into_iter() {
// Pyth must own the price
if accs.config.pyth_owner != *price.owner {
trace!(&format!(
"Price {:?}: owner pubkey mismatch (expected pyth_owner {:?}, got unknown price owner {:?})",
price, accs.config.pyth_owner, price.owner
));
return Err(SolitaireError::InvalidOwner(*price.owner));
}
// State pubkey must reproduce from the price id
let state_addr_from_price = AttestationStatePDA::key(price.key, ctx.program_id);
if state_addr_from_price != *state.0 .0.info().key {
trace!(&format!(
"Price {:?}: pubkey does not produce the passed state account (expected {:?} from seeds, {:?} was passed)",
price.key, state_addr_from_price, state.0.0.info().key
));
return Err(ProgramError::InvalidAccountData.into());
}
let price_data_ref = price.try_borrow_data()?;
// Parse the upstream Pyth struct to extract current publish
// time for payload construction
let price_struct =
pyth_sdk_solana::state::load_price_account(&price_data_ref).map_err(|e| {
trace!(&e.to_string());
ProgramError::InvalidAccountData
})?;
// Retrieve and rotate last_attested_tradind_publish_time
// Pick the value to store for the next attestation of this
// symbol. We use the prev_ value if the symbol is not
// currently being traded. The oracle marks the last known
// trading timestamp with it.
let new_last_attested_trading_publish_time = match price_struct.agg.status {
PriceStatus::Trading => price_struct.timestamp,
_ => price_struct.prev_timestamp,
};
// Retrieve the timestamp saved during the previous
// attestation. Use the new_* value if no existind state is
// present on-chain
let current_last_attested_trading_publish_time = if state.0 .0.is_initialized() {
// Use the existing on-chain value
state.0 .0 .1.last_attested_trading_publish_time
} else {
// Fall back to the new value if the state is not initialized
new_last_attested_trading_publish_time
};
// Build an attestatioin struct for this symbol using the just decided current value
let attestation = PriceAttestation::from_pyth_price_struct(
Identifier::new(price.key.to_bytes()),
this_attestation_time,
current_last_attested_trading_publish_time,
price_struct,
);
// Evaluate rate limit - should be smaller than duration from last attestation
let trading_publish_time_diff =
new_last_attested_trading_publish_time - state.0 .0.last_attested_trading_publish_time;
let attestation_time_diff = this_attestation_time - state.0 .0.last_attestation_time;
// We like to have the rate_limit for trading publish_time because that is the field that
// the users consume. Also, when the price is not trading and trading_publish_time is the
// same, we still want to send the prices (on a lower frequency).
if trading_publish_time_diff >= data.rate_limit_interval_secs as i64
|| attestation_time_diff >= 2 * data.rate_limit_interval_secs as i64
{
over_rate_limit = false;
} else {
trace!("Price {:?}: over rate limit", price.key);
}
// Save the new value for the next attestation of this symbol
state.0 .0.last_attested_trading_publish_time = new_last_attested_trading_publish_time;
// Update last attestation time
state.0 .0.last_attestation_time = this_attestation_time;
// handling of last_attested_trading_publish_time ends here
if !state.0 .0.is_initialized() {
// Serialize the state to learn account size for creation
let state_serialized = state.0 .0 .1.try_to_vec()?;
let seeds = state.self_bumped_seeds(price.key, ctx.program_id);
solitaire::create_account(
ctx,
state.0 .0.info(),
accs.payer.key,
solitaire::CreationLamports::Exempt,
state_serialized.len(),
ctx.program_id,
solitaire::IsSigned::SignedWithSeeds(&[seeds
.iter()
.map(|s| s.as_slice())
.collect::<Vec<_>>()
.as_slice()]),
)?;
trace!("Attestation state init OK");
}
attestations.push(attestation);
}
// Do not proceed if none of the symbols is under rate limit
if over_rate_limit {
trace!("All symbols over limit, bailing out");
return Err(
ProgramError::Custom(AttesterCustomError::AttestRateLimitReached as u32).into(),
);
}
let batch_attestation = BatchPriceAttestation {
price_attestations: attestations,
};
trace!("Attestations successfully created");
let bridge_config = BridgeData::try_from_slice(&accs.wh_bridge.try_borrow_mut_data()?)?.config;
// Pay wormhole fee
let transfer_ix = solana_program::system_instruction::transfer(
accs.payer.key,
accs.wh_fee_collector.info().key,
bridge_config.fee,
);
solana_program::program::invoke(&transfer_ix, ctx.accounts)?;
let payload = batch_attestation.serialize().map_err(|e| {
trace!(&e.to_string());
ProgramError::InvalidAccountData
})?;
let wh_msg_drv_data = P2WMessageDrvData {
message_owner: *accs.payer.key,
batch_size: batch_attestation.price_attestations.len() as u16,
id: data.message_account_id,
};
if !P2WMessage::key(&wh_msg_drv_data, ctx.program_id).eq(accs.wh_message.info().key) {
trace!(
"Invalid seeds for wh message pubkey. Expected {} with given seeds {:?}, got {}",
P2WMessage::key(&wh_msg_drv_data, ctx.program_id),
P2WMessage::seeds(&wh_msg_drv_data)
.iter_mut()
.map(|seed| seed.as_slice())
.collect::<Vec<_>>()
.as_slice(),
accs.wh_message.info().key
);
return Err(ProgramError::InvalidSeeds.into());
}
let ix = bridge::instructions::post_message_unreliable(
*accs.wh_prog.info().key,
*accs.payer.info().key,
*accs.wh_emitter.info().key,
*accs.wh_message.info().key,
0,
payload,
data.consistency_level,
)?;
trace!(&format!(
"Cross-call Seeds: {:?}",
[
// message seeds
P2WMessage::seeds(&wh_msg_drv_data)
.iter_mut()
.map(|seed| seed.as_slice())
.collect::<Vec<_>>()
.as_slice(),
// emitter seeds
P2WEmitter::seeds(None)
.iter_mut()
.map(|seed| seed.as_slice())
.collect::<Vec<_>>()
.as_slice(),
]
));
trace!("attest() finished, cross-calling wormhole");
invoke_signed(
&ix,
ctx.accounts,
[
// message seeds
P2WMessage::bumped_seeds(&wh_msg_drv_data, ctx.program_id)
.iter_mut()
.map(|seed| seed.as_slice())
.collect::<Vec<_>>()
.as_slice(),
// emitter seeds
P2WEmitter::bumped_seeds(None, ctx.program_id)
.iter_mut()
.map(|seed| seed.as_slice())
.collect::<Vec<_>>()
.as_slice(),
]
.as_slice(),
)?;
Ok(())
}

View File

@ -1,62 +0,0 @@
//! Implementation of per-symbol on-chain state. Currently used to
//! store latest successful attestation time for each price.
use {
borsh::{
BorshDeserialize,
BorshSerialize,
},
solana_program::{
clock::UnixTimestamp,
pubkey::Pubkey,
},
solitaire::{
AccountOwner,
AccountState,
Data,
Owned,
Peel,
Seeded,
},
};
/// On-chain state for a single price attestation
#[derive(BorshSerialize, BorshDeserialize, Default)]
pub struct AttestationState {
/// The last trading publish_time this attester saw
pub last_attested_trading_publish_time: UnixTimestamp,
/// The last time this symbol was attested
pub last_attestation_time: UnixTimestamp,
}
impl Owned for AttestationState {
fn owner(&self) -> AccountOwner {
AccountOwner::This
}
}
pub struct AttestationStatePDA<'b>(
pub Data<'b, AttestationState, { AccountState::MaybeInitialized }>,
);
impl Seeded<&Pubkey> for AttestationStatePDA<'_> {
fn seeds(symbol_id: &Pubkey) -> Vec<Vec<u8>> {
vec![
"p2w-attestation-state-v1".as_bytes().to_vec(),
symbol_id.to_bytes().to_vec(),
]
}
}
impl<'a, 'b: 'a> Peel<'a, 'b> for AttestationStatePDA<'b> {
fn peel<I>(ctx: &mut solitaire::Context<'a, 'b, I>) -> solitaire::Result<Self>
where
Self: Sized,
{
Ok(Self(Data::peel(ctx)?))
}
fn persist(&self, program_id: &Pubkey) -> solitaire::Result<()> {
self.0.persist(program_id)
}
}

View File

@ -1,169 +0,0 @@
//! On-chain state for the pyth2wormhole SOL contract.
//!
//! Important: Changes to max batch size must be reflected in the
//! instruction logic in attest.rs (look there for more
//! details). Mismatches between config and contract logic may confuse
//! attesters.
//!
//! How to add a new config schema:
//! X - new config version number
//! Y = X - 1; previous config number
//! 1. Add a next Pyth2WormholeConfigVX struct,
//! e.g. Pyth2WormholeConfigV3,
//! 2. Add a P2WConfigAccountVX type alias with a unique seed str
//! 3. Implement From<Pyth2WormholeConfigVY> for the new struct,
//! e.g. From<Pyth2WormholeConfigV2> for Pyth2WormholeConfigV3
//! 4. Advance Pyth2WormholeConfig, P2WConfigAccount,
//! OldPyth2WormholeConfig, OldP2WConfigAccount typedefs to use the
//! previous and new config structs.
//! 5. Deploy and call migrate() to verify
//! 6. (optional) Remove/comment out config structs and aliases from
//! before version Y.
use {
borsh::{
BorshDeserialize,
BorshSerialize,
},
solana_program::pubkey::Pubkey,
solitaire::{
processors::seeded::AccountOwner,
AccountState,
Data,
Derive,
Owned,
},
};
/// Aliases for current config schema (to migrate into)
pub type Pyth2WormholeConfig = Pyth2WormholeConfigV3;
pub type P2WConfigAccount<'b, const IS_INITIALIZED: AccountState> =
P2WConfigAccountV3<'b, IS_INITIALIZED>;
impl Owned for Pyth2WormholeConfig {
fn owner(&self) -> AccountOwner {
AccountOwner::This
}
}
/// Aliases for previous config schema (to migrate from)
pub type OldPyth2WormholeConfig = Pyth2WormholeConfigV2;
pub type OldP2WConfigAccount<'b> = P2WConfigAccountV2<'b, { AccountState::Initialized }>; // Old config must always be initialized
impl Owned for OldPyth2WormholeConfig {
fn owner(&self) -> AccountOwner {
AccountOwner::This
}
}
/// Initial config format
#[derive(Clone, Default, BorshDeserialize, BorshSerialize)]
#[cfg_attr(feature = "client", derive(Debug))]
pub struct Pyth2WormholeConfigV1 {
/// Authority owning this contract
pub owner: Pubkey,
/// Wormhole bridge program
pub wh_prog: Pubkey,
/// Authority owning Pyth price data
pub pyth_owner: Pubkey,
pub max_batch_size: u16,
}
pub type P2WConfigAccountV1<'b, const IS_INITIALIZED: AccountState> =
Derive<Data<'b, Pyth2WormholeConfigV1, { IS_INITIALIZED }>, "pyth2wormhole-config">;
/// Added is_active
#[derive(Clone, Default, BorshDeserialize, BorshSerialize)]
#[cfg_attr(feature = "client", derive(Debug))]
pub struct Pyth2WormholeConfigV2 {
/// Authority owning this contract
pub owner: Pubkey,
/// Wormhole bridge program
pub wh_prog: Pubkey,
/// Authority owning Pyth price data
pub pyth_owner: Pubkey,
/// How many product/price pairs can be sent and attested at once
///
/// Important: Whenever the corresponding logic in attest.rs
/// changes its expected number of symbols per batch, this config
/// must be updated accordingly on-chain.
pub max_batch_size: u16,
/// If set to false, attest() will reject all calls unconditionally
pub is_active: bool,
}
/// Note: If you get stuck with a pre-existing config account
/// (e.g. someone transfers into a PDA that we're not using yet), it's
/// usually easier to change the seed slightly
/// (e.g. pyth2wormhole-config-v2 -> pyth2wormhole-config-v2.1). This
/// saves a lot of time coding around this edge case.
pub type P2WConfigAccountV2<'b, const IS_INITIALIZED: AccountState> =
Derive<Data<'b, Pyth2WormholeConfigV2, { IS_INITIALIZED }>, "pyth2wormhole-config-v2.1">;
impl From<Pyth2WormholeConfigV1> for Pyth2WormholeConfigV2 {
fn from(old: Pyth2WormholeConfigV1) -> Self {
let Pyth2WormholeConfigV1 {
owner,
wh_prog,
pyth_owner,
max_batch_size,
} = old;
Self {
owner,
wh_prog,
pyth_owner,
max_batch_size,
is_active: true,
}
}
}
// Added ops_owner which can toggle the is_active field
#[derive(Clone, Default, Hash, BorshDeserialize, BorshSerialize, PartialEq, Eq)]
#[cfg_attr(feature = "client", derive(Debug))]
pub struct Pyth2WormholeConfigV3 {
/// Authority owning this contract
pub owner: Pubkey,
/// Wormhole bridge program
pub wh_prog: Pubkey,
/// Authority owning Pyth price data
pub pyth_owner: Pubkey,
/// How many product/price pairs can be sent and attested at once
///
/// Important: Whenever the corresponding logic in attest.rs
/// changes its expected number of symbols per batch, this config
/// must be updated accordingly on-chain.
pub max_batch_size: u16,
/// If set to false, attest() will reject all calls unconditionally
pub is_active: bool,
// If the ops_owner exists, it can toggle the value of `is_active`
pub ops_owner: Option<Pubkey>,
}
pub type P2WConfigAccountV3<'b, const IS_INITIALIZED: AccountState> =
Derive<Data<'b, Pyth2WormholeConfigV3, { IS_INITIALIZED }>, "pyth2wormhole-config-v3">;
impl From<Pyth2WormholeConfigV2> for Pyth2WormholeConfigV3 {
fn from(old: Pyth2WormholeConfigV2) -> Self {
let Pyth2WormholeConfigV2 {
owner,
wh_prog,
pyth_owner,
max_batch_size,
is_active: _,
} = old;
Self {
owner,
wh_prog,
pyth_owner,
max_batch_size,
is_active: true,
ops_owner: None,
}
}
}

View File

@ -1,6 +0,0 @@
/// Append-only custom error list.
#[repr(u32)]
pub enum AttesterCustomError {
/// Explicitly checked for in client code, change carefully
AttestRateLimitReached = 13,
}

View File

@ -1,39 +0,0 @@
use {
crate::config::{
P2WConfigAccount,
Pyth2WormholeConfig,
},
solitaire::{
trace,
AccountState,
CreationLamports,
ExecutionContext,
FromAccounts,
Info,
Keyed,
Mut,
Peel,
Result as SoliResult,
Signer,
},
};
#[derive(FromAccounts)]
pub struct Initialize<'b> {
pub new_config: Mut<P2WConfigAccount<'b, { AccountState::Uninitialized }>>,
pub payer: Mut<Signer<Info<'b>>>,
pub system_program: Info<'b>,
}
/// Must be called right after deployment
pub fn initialize(
ctx: &ExecutionContext,
accs: &mut Initialize,
data: Pyth2WormholeConfig,
) -> SoliResult<()> {
accs.new_config
.create(ctx, accs.payer.info().key, CreationLamports::Exempt)?;
accs.new_config.1 = data;
Ok(())
}

View File

@ -1,46 +0,0 @@
#![allow(incomplete_features)]
#![feature(adt_const_params)]
pub mod attest;
pub mod attestation_state;
pub mod config;
pub mod error;
pub mod initialize;
pub mod message;
pub mod migrate;
pub mod set_config;
pub mod set_is_active;
use solitaire::solitaire;
pub use {
attest::{
attest,
Attest,
AttestData,
},
config::Pyth2WormholeConfig,
initialize::{
initialize,
Initialize,
},
migrate::{
migrate,
Migrate,
},
pyth_client,
set_config::{
set_config,
SetConfig,
},
set_is_active::{
set_is_active,
SetIsActive,
},
};
solitaire! {
Attest => attest,
Initialize => initialize,
SetConfig => set_config,
Migrate => migrate,
SetIsActive => set_is_active
}

View File

@ -1,53 +0,0 @@
//! Index-based PDA for storing unreliable wormhole message
//!
//! The main goal of this PDA is to take advantage of wormhole message
//! reuse securely. This is achieved by tying the account derivation
//! data to the payer account of the attest() instruction. Inside
//! attest(), payer must be a signer, and the message account must be
//! derived with their address as message_owner in
//! `P2WMessageDrvData`.
use {
borsh::{
BorshDeserialize,
BorshSerialize,
},
bridge::PostedMessageUnreliable,
solana_program::pubkey::Pubkey,
solitaire::{
processors::seeded::Seeded,
AccountState,
Mut,
},
};
pub type P2WMessage<'a> = Mut<PostedMessageUnreliable<'a, { AccountState::MaybeInitialized }>>;
#[derive(BorshDeserialize, BorshSerialize)]
pub struct P2WMessageDrvData {
/// The key owning this message account
pub message_owner: Pubkey,
/// Size of the batch. It is important that all messages have the same size
///
/// NOTE: 2022-09-05
/// Currently wormhole does not resize accounts if they have different
/// payload sizes; this (along with versioning the seed literal below) is
/// a workaround to have different PDAs for different batch sizes.
pub batch_size: u16,
/// Index for keeping many accounts per owner
pub id: u64,
}
impl<'a> Seeded<&P2WMessageDrvData> for P2WMessage<'a> {
fn seeds(data: &P2WMessageDrvData) -> Vec<Vec<u8>> {
vec![
// See the note at 2022-09-05 above.
// Change the version in the literal whenever you change the
// price attestation data.
"p2w-message-v2".as_bytes().to_vec(),
data.message_owner.to_bytes().to_vec(),
data.batch_size.to_be_bytes().to_vec(),
data.id.to_be_bytes().to_vec(),
]
}
}

View File

@ -1,98 +0,0 @@
//! Instruction used to migrate on-chain configuration from an older format
use {
crate::config::{
OldP2WConfigAccount,
OldPyth2WormholeConfig,
P2WConfigAccount,
Pyth2WormholeConfig,
},
solana_program::{
program_error::ProgramError,
system_program,
},
solitaire::{
trace,
AccountState,
CreationLamports,
ExecutionContext,
FromAccounts,
Info,
Keyed,
Mut,
Peel,
Result as SoliResult,
Signer,
SolitaireError,
},
};
/// Migration accounts meant to evolve with subsequent config accounts
///
/// NOTE: This account struct assumes Solitaire is able to validate the
/// Uninitialized requirement on the new_config account
#[derive(FromAccounts)]
pub struct Migrate<'b> {
/// New config account to be populated. Must be unused.
pub new_config: Mut<P2WConfigAccount<'b, { AccountState::Uninitialized }>>,
/// Old config using the previous format.
pub old_config: Mut<OldP2WConfigAccount<'b>>,
/// Current owner authority of the program
pub current_owner: Mut<Signer<Info<'b>>>,
/// Payer account for updating the account data
pub payer: Mut<Signer<Info<'b>>>,
/// For creating the new config account
pub system_program: Info<'b>,
}
pub fn migrate(ctx: &ExecutionContext, accs: &mut Migrate, _data: ()) -> SoliResult<()> {
let old_config: &OldPyth2WormholeConfig = &accs.old_config.1;
if &old_config.owner != accs.current_owner.info().key {
trace!(
"Current config owner account mismatch (expected {:?})",
old_config.owner
);
return Err(SolitaireError::InvalidSigner(
*accs.current_owner.info().key,
));
}
if *accs.system_program.key != system_program::id() {
trace!(
"Invalid system program, expected {:?}), found {}",
system_program::id(),
accs.system_program.key
);
return Err(SolitaireError::InvalidSigner(*accs.system_program.key));
}
// Populate new config
accs.new_config
.create(ctx, accs.payer.info().key, CreationLamports::Exempt)?;
accs.new_config.1 = Pyth2WormholeConfig::from(old_config.clone());
// Reclaim old config lamports
// Save current balance
let old_config_balance_val: u64 = accs.old_config.info().lamports();
// Drain old config
**accs.old_config.info().lamports.borrow_mut() = 0;
// Credit payer with saved balance
let new_payer_balance = accs
.payer
.info()
.lamports
.borrow_mut()
.checked_add(old_config_balance_val)
.ok_or_else(|| {
trace!("Overflow on payer balance increase");
SolitaireError::ProgramError(ProgramError::Custom(0xDEADBEEF))
})?;
**accs.payer.info().lamports.borrow_mut() = new_payer_balance;
Ok(())
}

View File

@ -1,85 +0,0 @@
use {
crate::config::{
P2WConfigAccount,
Pyth2WormholeConfig,
},
borsh::BorshSerialize,
solana_program::{
program::invoke,
rent::Rent,
system_instruction,
sysvar::Sysvar,
},
solitaire::{
trace,
AccountState,
ExecutionContext,
FromAccounts,
Info,
Keyed,
Mut,
Peel,
Result as SoliResult,
Signer,
SolitaireError,
},
};
#[derive(FromAccounts)]
pub struct SetConfig<'b> {
/// Current config used by the program
pub config: Mut<P2WConfigAccount<'b, { AccountState::Initialized }>>,
/// Current owner authority of the program
pub current_owner: Mut<Signer<Info<'b>>>,
/// Payer account for updating the account data
pub payer: Mut<Signer<Info<'b>>>,
/// Used for rent adjustment transfer
pub system_program: Info<'b>,
}
/// Alters the current settings of pyth2wormhole
pub fn set_config(
ctx: &ExecutionContext,
accs: &mut SetConfig,
data: Pyth2WormholeConfig,
) -> SoliResult<()> {
let cfg_struct: &Pyth2WormholeConfig = &accs.config; // unpack Data via nested Deref impls
if &cfg_struct.owner != accs.current_owner.info().key {
trace!(
"Current owner account mismatch (expected {:?})",
cfg_struct.owner
);
return Err(SolitaireError::InvalidSigner(
*accs.current_owner.info().key,
));
}
let old_size = accs.config.info().data_len();
let new_size = data.try_to_vec()?.len();
// Realloc if mismatched
if old_size != new_size {
accs.config.info().realloc(new_size, false)?;
}
accs.config.1 = data;
// Adjust lamports
let acc_lamports = accs.config.info().lamports();
let new_lamports = Rent::get()?.minimum_balance(new_size);
let diff_lamports: u64 = (acc_lamports as i64 - new_lamports as i64).unsigned_abs();
if acc_lamports < new_lamports {
// Less than enough lamports, debit the payer
let transfer_ix = system_instruction::transfer(
accs.payer.info().key,
accs.config.info().key,
diff_lamports,
);
invoke(&transfer_ix, ctx.accounts)?;
}
Ok(())
}

View File

@ -1,54 +0,0 @@
use {
crate::config::{
P2WConfigAccount,
Pyth2WormholeConfig,
},
solitaire::{
trace,
AccountState,
ExecutionContext,
FromAccounts,
Info,
Keyed,
Mut,
Peel,
Result as SoliResult,
Signer,
SolitaireError,
},
};
#[derive(FromAccounts)]
pub struct SetIsActive<'b> {
/// Current config used by the program
pub config: Mut<P2WConfigAccount<'b, { AccountState::Initialized }>>,
/// Current owner authority of the program
pub ops_owner: Mut<Signer<Info<'b>>>,
/// Payer account for updating the account data
pub payer: Mut<Signer<Info<'b>>>,
}
/// Alters the current settings of pyth2wormhole
pub fn set_is_active(
_ctx: &ExecutionContext,
accs: &mut SetIsActive,
new_is_active: bool,
) -> SoliResult<()> {
let cfg_struct: &mut Pyth2WormholeConfig = &mut accs.config; // unpack Data via nested Deref impls
match &cfg_struct.ops_owner {
None => Err(SolitaireError::InvalidOwner(*accs.ops_owner.info().key)),
Some(current_ops_owner) => {
if current_ops_owner != accs.ops_owner.info().key {
trace!(
"Ops owner account mismatch (expected {:?})",
current_ops_owner
);
return Err(SolitaireError::InvalidOwner(*accs.ops_owner.info().key));
}
cfg_struct.is_active = new_is_active;
Ok(())
}
}
}

View File

@ -1,33 +0,0 @@
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
# dependencies
/node_modules
/.pnp
.pnp.js
# testing
/coverage
# production
/build
# misc
.DS_Store
.env.local
.env.development.local
.env.test.local
.env.production.local
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# ethereum contracts
/contracts
/src/*-contracts/
# tsproto output
/src/proto
# build
/lib

View File

@ -1,17 +0,0 @@
# Pyth2wormhole SDK
This project contains a library for interacting with pyth2wormhole and adjacent APIs.
# Install
For now, the in-house dependencies are referenced by relative
path. The commands below will build those. For an automated version of
this process, please refer to `p2w-integration-observer`'s Dockerfile and/or our [Tilt](https://tilt.dev)
devnet with `pyth` enabled.
```shell
# Run the commands in this README's directory for --prefix to work
$ npm --prefix ../../../ethereum ci && npm --prefix ../../../ethereum run build # ETH contracts
$ npm --prefix ../../../sdk/js ci # Wormhole SDK
$ npm ci && npm run build # Pyth2wormhole SDK
```

View File

@ -1,5 +0,0 @@
/** @type {import('ts-jest/dist/types').InitialOptionsTsJest} */
module.exports = {
preset: "ts-jest",
testEnvironment: "node",
};

View File

@ -1,46 +0,0 @@
{
"name": "@pythnetwork/wormhole-attester-sdk",
"version": "1.1.0",
"description": "Pyth Wormhole Attester SDk",
"private": "true",
"types": "lib/index.d.ts",
"main": "lib/index.js",
"files": [
"lib/**/*"
],
"scripts": {
"build": "tsc",
"format": "prettier --write \"src/**/*.ts\"",
"lint": "tslint -p tsconfig.json",
"test": "jest src/",
"postversion": "git push && git push --tags",
"preversion": "npm run lint",
"version": "npm run format && git add -A src"
},
"repository": {
"type": "git",
"url": "git+https://github.com/pyth-network/pyth-crosschain.git"
},
"author": "Pyth Data Association",
"license": "MIT",
"devDependencies": {
"@types/jest": "^29.4.0",
"@types/long": "^4.0.1",
"@types/node": "^16.6.1",
"copy-dir": "^1.3.0",
"find": "^0.3.0",
"jest": "^29.4.1",
"prettier": "^2.3.2",
"ts-jest": "^29.0.5",
"tslint": "^6.1.3",
"tslint-config-prettier": "^1.18.0",
"typescript": "^4.3.5"
},
"bugs": {
"url": "https://github.com/pyth-network/pyth-crosschain/issues"
},
"homepage": "https://github.com/pyth-network/pyth-crosschain#readme",
"dependencies": {
"@pythnetwork/price-service-sdk": "*"
}
}

View File

@ -1,136 +0,0 @@
import {
parseBatchPriceAttestation,
Price,
PriceFeed,
PriceAttestation,
PriceAttestationStatus,
priceAttestationToPriceFeed,
} from "../index";
describe("Deserializing Batch Price Attestation works", () => {
test("when batch has 3 price feeds", () => {
// Generated from the rust sdk test_batch_serde
const fixture =
"50325748000300010001020003009D01010101010101010101010101010101010101010101010101010" +
"10101010101FEFEFEFEFEFEFEFEFEFEFEFEFEFEFEFEFEFEFEFEFEFEFEFEFEFEFEFEFEFEFEFE0000002B" +
"AD2FEED70000000000000065FFFFFFFDFFFFFFFFFFFFFFD6000000000000002A010001E14C0004E6D00" +
"000DEADBEEFFADE00000000DADEBEEF00000000DEADBABE0000DEADFACEBEEF000000BADBADBEEF0000" +
"DEADBEEFFACE0202020202020202020202020202020202020202020202020202020202020202FDFDFDF" +
"DFDFDFDFDFDFDFDFDFDFDFDFDFDFDFDFDFDFDFDFDFDFDFDFDFDFDFDFD0000002BAD2FEED70000000000" +
"000065FFFFFFFDFFFFFFFFFFFFFFD6000000000000002A010001E14C0004E6D00000DEADBEEFFADE000" +
"00000DADEBEEF00000000DEADBABE0000DEADFACEBEEF000000BADBADBEEF0000DEADBEEFFACE030303" +
"0303030303030303030303030303030303030303030303030303030303FCFCFCFCFCFCFCFCFCFCFCFCF" +
"CFCFCFCFCFCFCFCFCFCFCFCFCFCFCFCFCFCFCFC0000002BAD2FEED70000000000000065FFFFFFFDFFFF" +
"FFFFFFFFFFD6000000000000002A010001E14C0004E6D00000DEADBEEFFADE00000000DADEBEEF00000" +
"000DEADBABE0000DEADFACEBEEF000000BADBADBEEF0000DEADBEEFFACE";
const data = Buffer.from(fixture, "hex");
const batchPriceAttestation = parseBatchPriceAttestation(data);
expect(batchPriceAttestation.priceAttestations.length).toBe(3);
// values are from the rust sdk mock_attestation
batchPriceAttestation.priceAttestations.forEach((pa, idx) => {
expect(pa).toEqual<PriceAttestation>({
productId: Buffer.from(Array(32).fill(idx + 1)).toString("hex"),
priceId: Buffer.from(Array(32).fill(255 - idx - 1)).toString("hex"),
price: (0x2bad2feed7).toString(),
conf: "101",
emaPrice: "-42",
emaConf: "42",
expo: -3,
status: PriceAttestationStatus.Trading,
numPublishers: 123212,
maxNumPublishers: 321232,
attestationTime: 0xdeadbeeffade,
publishTime: 0xdadebeef,
prevPublishTime: 0xdeadbabe,
prevPrice: (0xdeadfacebeef).toString(),
prevConf: (0xbadbadbeef).toString(),
lastAttestedPublishTime: 0xdeadbeefface,
});
});
});
});
describe("Price Attestation to Price Feed works", () => {
test("when status is trading", () => {
const priceAttestation = {
productId: "012345",
priceId: "abcde",
price: "100",
conf: "5",
emaPrice: "103",
emaConf: "3",
expo: -3,
status: PriceAttestationStatus.Trading,
numPublishers: 1,
maxNumPublishers: 2,
attestationTime: 1000,
publishTime: 1000,
prevPublishTime: 998,
prevPrice: "101",
prevConf: "6",
lastAttestedPublishTime: 997,
};
const priceFeed = priceAttestationToPriceFeed(priceAttestation);
expect(priceFeed).toEqual(
new PriceFeed({
id: "abcde",
price: new Price({
price: "100",
conf: "5",
expo: -3,
publishTime: 1000,
}),
emaPrice: new Price({
price: "103",
conf: "3",
expo: -3,
publishTime: 1000,
}),
})
);
});
test("when status is not trading", () => {
const priceAttestation = {
productId: "012345",
priceId: "abcde",
price: "100",
conf: "5",
emaPrice: "103",
emaConf: "3",
expo: -3,
status: PriceAttestationStatus.Unknown,
numPublishers: 1,
maxNumPublishers: 2,
attestationTime: 1000,
publishTime: 1000,
prevPublishTime: 998,
prevPrice: "101",
prevConf: "6",
lastAttestedPublishTime: 997,
};
const priceFeed = priceAttestationToPriceFeed(priceAttestation);
expect(priceFeed).toEqual(
new PriceFeed({
id: "abcde",
price: new Price({
price: "101",
conf: "6",
expo: -3,
publishTime: 998,
}),
emaPrice: new Price({
price: "103",
conf: "3",
expo: -3,
publishTime: 998,
}),
})
);
});
});

View File

@ -1,256 +0,0 @@
import {
PriceFeed,
Price,
UnixTimestamp,
} from "@pythnetwork/price-service-sdk";
export {
PriceFeed,
Price,
UnixTimestamp,
} from "@pythnetwork/price-service-sdk";
export enum PriceAttestationStatus {
Unknown = 0,
Trading = 1,
Halted = 2,
Auction = 3,
Ignored = 4,
}
export type PriceAttestation = {
productId: string;
priceId: string;
price: string;
conf: string;
expo: number;
emaPrice: string;
emaConf: string;
status: PriceAttestationStatus;
numPublishers: number;
maxNumPublishers: number;
attestationTime: UnixTimestamp;
publishTime: UnixTimestamp;
prevPublishTime: UnixTimestamp;
prevPrice: string;
prevConf: string;
lastAttestedPublishTime: UnixTimestamp;
};
export type BatchPriceAttestation = {
priceAttestations: PriceAttestation[];
};
/// Precedes every message implementing the wormhole attester serialization format
const P2W_FORMAT_MAGIC: string = "P2WH";
const P2W_FORMAT_VER_MAJOR = 3;
const P2W_FORMAT_VER_MINOR = 0;
const P2W_FORMAT_PAYLOAD_ID = 2;
export function parsePriceAttestation(bytes: Buffer): PriceAttestation {
let offset = 0;
const productId = bytes.slice(offset, offset + 32).toString("hex");
offset += 32;
const priceId = bytes.slice(offset, offset + 32).toString("hex");
offset += 32;
const price = bytes.readBigInt64BE(offset).toString();
offset += 8;
const conf = bytes.readBigUint64BE(offset).toString();
offset += 8;
const expo = bytes.readInt32BE(offset);
offset += 4;
const emaPrice = bytes.readBigInt64BE(offset).toString();
offset += 8;
const emaConf = bytes.readBigUint64BE(offset).toString();
offset += 8;
const status = bytes.readUint8(offset) as PriceAttestationStatus;
offset += 1;
const numPublishers = bytes.readUint32BE(offset);
offset += 4;
const maxNumPublishers = bytes.readUint32BE(offset);
offset += 4;
const attestationTime = Number(bytes.readBigInt64BE(offset));
offset += 8;
const publishTime = Number(bytes.readBigInt64BE(offset));
offset += 8;
const prevPublishTime = Number(bytes.readBigInt64BE(offset));
offset += 8;
const prevPrice = bytes.readBigInt64BE(offset).toString();
offset += 8;
const prevConf = bytes.readBigUint64BE(offset).toString();
offset += 8;
const lastAttestedPublishTime = Number(bytes.readBigInt64BE(offset));
offset += 8;
return {
productId,
priceId,
price,
conf,
expo,
emaPrice,
emaConf,
status,
numPublishers,
maxNumPublishers,
attestationTime,
publishTime,
prevPublishTime,
prevPrice,
prevConf,
lastAttestedPublishTime,
};
}
// Read the sdk/rust as the reference implementation and documentation.
export function parseBatchPriceAttestation(
bytes: Buffer
): BatchPriceAttestation {
let offset = 0;
const magic = bytes.slice(offset, offset + 4).toString("utf8");
offset += 4;
if (magic !== P2W_FORMAT_MAGIC) {
throw new Error(`Invalid magic: ${magic}, expected: ${P2W_FORMAT_MAGIC}`);
}
const versionMajor = bytes.readUInt16BE(offset);
offset += 2;
if (versionMajor !== P2W_FORMAT_VER_MAJOR) {
throw new Error(
`Unsupported major version: ${versionMajor}, expected: ${P2W_FORMAT_VER_MAJOR}`
);
}
const versionMinor = bytes.readUInt16BE(offset);
offset += 2;
if (versionMinor < P2W_FORMAT_VER_MINOR) {
throw new Error(
`Unsupported minor version: ${versionMinor}, expected: ${P2W_FORMAT_VER_MINOR}`
);
}
// Header size is added for future-compatibility
const headerSize = bytes.readUint16BE(offset);
offset += 2;
let headerOffset = 0;
const payloadId = bytes.readUint8(offset + headerOffset);
headerOffset += 1;
if (payloadId !== P2W_FORMAT_PAYLOAD_ID) {
throw new Error(
`Invalid payloadId: ${payloadId}, expected: ${P2W_FORMAT_PAYLOAD_ID}`
);
}
offset += headerSize;
const batchLen = bytes.readUInt16BE(offset);
offset += 2;
const attestationSize = bytes.readUint16BE(offset);
offset += 2;
const priceAttestations: PriceAttestation[] = [];
for (let i = 0; i < batchLen; i += 1) {
priceAttestations.push(
parsePriceAttestation(bytes.subarray(offset, offset + attestationSize))
);
offset += attestationSize;
}
if (offset !== bytes.length) {
throw new Error(`Invalid length: ${bytes.length}, expected: ${offset}`);
}
return {
priceAttestations,
};
}
// Returns a hash of all priceIds within the batch, it can be used to identify whether there is a
// new batch with exact same symbols (and ignore the old one)
export function getBatchAttestationHashKey(
batchAttestation: BatchPriceAttestation
): string {
const priceIds: string[] = batchAttestation.priceAttestations.map(
(priceAttestation) => priceAttestation.priceId
);
priceIds.sort();
return priceIds.join("#");
}
export function getBatchSummary(batch: BatchPriceAttestation): string {
const abstractRepresentation = {
num_attestations: batch.priceAttestations.length,
prices: batch.priceAttestations.map((priceAttestation) => {
const priceFeed = priceAttestationToPriceFeed(priceAttestation);
return {
price_id: priceFeed.id,
price: priceFeed.getPriceUnchecked().getPriceAsNumberUnchecked(),
conf: priceFeed.getEmaPriceUnchecked().getConfAsNumberUnchecked(),
};
}),
};
return JSON.stringify(abstractRepresentation);
}
export function priceAttestationToPriceFeed(
priceAttestation: PriceAttestation
): PriceFeed {
const emaPrice: Price = new Price({
conf: priceAttestation.emaConf,
expo: priceAttestation.expo,
price: priceAttestation.emaPrice,
publishTime: priceAttestation.publishTime,
});
let price: Price;
if (priceAttestation.status === PriceAttestationStatus.Trading) {
// 1 means trading
price = new Price({
conf: priceAttestation.conf,
expo: priceAttestation.expo,
price: priceAttestation.price,
publishTime: priceAttestation.publishTime,
});
} else {
price = new Price({
conf: priceAttestation.prevConf,
expo: priceAttestation.expo,
price: priceAttestation.prevPrice,
publishTime: priceAttestation.prevPublishTime,
});
// emaPrice won't get updated if the status is unknown and hence it uses
// the previous publish time
emaPrice.publishTime = priceAttestation.prevPublishTime;
}
return new PriceFeed({
emaPrice,
id: priceAttestation.priceId,
price,
});
}

View File

@ -1,9 +0,0 @@
{
"extends": "../../../tsconfig.base.json",
"include": ["src"],
"exclude": ["node_modules", "**/__tests__/*"],
"compilerOptions": {
"rootDir": "src/",
"outDir": "./lib"
}
}

View File

@ -1,6 +0,0 @@
{
"extends": ["tslint:recommended", "tslint-config-prettier"],
"linterOptions": {
"exclude": ["src/proto/**"]
}
}