Compare commits

..

No commits in common. "main" and "pyth-js-v46" have entirely different histories.

354 changed files with 12047 additions and 42364 deletions

View File

@ -15,4 +15,5 @@
.git
!apps/hermes/src/state/cache.rs
hermes/wormhole
!hermes/src/state/cache.rs

View File

@ -21,10 +21,10 @@ jobs:
- uses: actions/checkout@v3
- name: Download CLI
run: wget https://github.com/aptos-labs/aptos-core/releases/download/aptos-cli-v3.1.0/aptos-cli-3.1.0-Ubuntu-22.04-x86_64.zip
run: wget https://github.com/aptos-labs/aptos-core/releases/download/aptos-cli-v1.0.4/aptos-cli-1.0.4-Ubuntu-22.04-x86_64.zip
- name: Unzip CLI
run: unzip aptos-cli-3.1.0-Ubuntu-22.04-x86_64.zip
run: unzip aptos-cli-1.0.4-Ubuntu-22.04-x86_64.zip
- name: Run tests
run: ./aptos move test

View File

@ -2,10 +2,10 @@ name: Check Fortuna
on:
pull_request:
paths: [apps/fortuna/**]
paths: [fortuna/**]
push:
branches: [main]
paths: [apps/fortuna/**]
paths: [fortuna/**]
jobs:
test:
runs-on: ubuntu-latest
@ -17,4 +17,4 @@ jobs:
toolchain: nightly-2023-07-23
override: true
- name: Run executor tests
run: cargo test --manifest-path ./apps/fortuna/Cargo.toml
run: cargo test --manifest-path ./fortuna/Cargo.toml

View File

@ -1,35 +0,0 @@
name: Test Fuel Contract
on:
pull_request:
paths:
- target_chains/fuel/**
push:
branches:
- main
paths:
- target_chains/fuel/**
env:
CARGO_TERM_COLOR: always
jobs:
build:
runs-on: ubuntu-latest
defaults:
run:
working-directory: target_chains/fuel/contracts/
steps:
- uses: actions/checkout@v2
- name: Install Fuel toolchain
run: |
curl https://install.fuel.network | sh
echo "$HOME/.fuelup/bin" >> $GITHUB_PATH
- name: Build with Forc
run: forc build --verbose
- name: Run tests with Forc
run: forc test --verbose
- name: Build
run: cargo build --verbose
- name: Run tests
run: cargo test --verbose

View File

@ -1,37 +0,0 @@
name: Starknet contract
on:
pull_request:
paths:
- target_chains/starknet/contracts/**
push:
branches:
- main
paths:
- target_chains/starknet/contracts/**
jobs:
check:
name: Starknet Foundry tests
runs-on: ubuntu-latest
defaults:
run:
working-directory: target_chains/starknet/contracts/
steps:
- uses: actions/checkout@v3
- name: Install Scarb
uses: software-mansion/setup-scarb@v1
with:
tool-versions: target_chains/starknet/contracts/.tool-versions
- name: Install Starknet Foundry
uses: foundry-rs/setup-snfoundry@v3
with:
tool-versions: target_chains/starknet/contracts/.tool-versions
- name: Install Starkli
run: curl https://get.starkli.sh | sh && . ~/.config/.starkli/env && starkliup -v $(awk '/starkli/{print $2}' .tool-versions)
- name: Install Katana
run: curl -L https://install.dojoengine.org | bash && PATH="$PATH:$HOME/.config/.dojo/bin" dojoup -v $(awk '/dojo/{print $2}' .tool-versions)
- name: Check formatting
run: scarb fmt --check
- name: Run tests
run: snforge test
- name: Test local deployment script
run: bash -c 'PATH="$PATH:$HOME/.config/.dojo/bin" katana & . ~/.config/.starkli/env && deploy/local_deploy'

View File

@ -12,7 +12,7 @@ jobs:
- uses: actions/checkout@v2
- uses: actions/setup-node@v2
with:
node-version: "18"
node-version: "16"
registry-url: "https://registry.npmjs.org"
- run: npm ci
- run: npx lerna run build --no-private

View File

@ -11,14 +11,8 @@ jobs:
steps:
- name: Checkout sources
uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
default: true
profile: minimal
- run: cargo +stable-x86_64-unknown-linux-gnu publish --token ${CARGO_REGISTRY_TOKEN}
- run: cargo publish --token ${CARGO_REGISTRY_TOKEN}
env:
CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
working-directory: "target_chains/solana/pyth_solana_receiver_sdk"

View File

@ -46,7 +46,7 @@ jobs:
uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
with:
context: .
file: "./apps/fortuna/Dockerfile"
file: "./fortuna/Dockerfile"
push: true
tags: ${{ steps.metadata_fortuna.outputs.tags }}
labels: ${{ steps.metadata_fortuna.outputs.labels }}

View File

@ -40,7 +40,7 @@ jobs:
id: ecr_login
- run: |
DOCKER_BUILDKIT=1 docker build -t lerna -f Dockerfile.lerna .
DOCKER_BUILDKIT=1 docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -f apps/price_pusher/Dockerfile .
DOCKER_BUILDKIT=1 docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -f price_pusher/Dockerfile .
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
env:
ECR_REGISTRY: public.ecr.aws

View File

@ -5,17 +5,12 @@ on:
tags:
- "python-v*"
env:
PYTHON_VERSION: "3.11"
jobs:
deploy:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install dependencies
run: |
python3 -m pip install --upgrade poetry

1
.npmrc
View File

@ -1 +0,0 @@
engine-strict=true

View File

@ -60,9 +60,9 @@ repos:
- id: cargo-fmt-fortuna
name: Cargo format for Fortuna
language: "rust"
entry: cargo +nightly-2023-07-23 fmt --manifest-path ./apps/fortuna/Cargo.toml --all -- --config-path rustfmt.toml
entry: cargo +nightly-2023-07-23 fmt --manifest-path ./fortuna/Cargo.toml --all -- --config-path rustfmt.toml
pass_filenames: false
files: apps/fortuna
files: fortuna
# Hooks for message buffer contract
- id: cargo-fmt-message-buffer
name: Cargo format for message buffer contract
@ -80,13 +80,13 @@ repos:
- id: cargo-fmt-pythnet-sdk
name: Cargo format for pythnet SDK
language: "rust"
entry: cargo +nightly-2024-03-26 fmt --manifest-path ./pythnet/pythnet_sdk/Cargo.toml --all -- --config-path rustfmt.toml
entry: cargo +nightly-2023-07-23 fmt --manifest-path ./pythnet/pythnet_sdk/Cargo.toml --all -- --config-path rustfmt.toml
pass_filenames: false
files: pythnet/pythnet_sdk
- id: cargo-clippy-pythnet-sdk
name: Cargo clippy for pythnet SDK
language: "rust"
entry: cargo +nightly-2024-03-26 clippy --manifest-path ./pythnet/pythnet_sdk/Cargo.toml --tests --fix --allow-dirty --allow-staged -- -D warnings
entry: cargo +nightly-2023-07-23 clippy --manifest-path ./pythnet/pythnet_sdk/Cargo.toml --tests --fix --allow-dirty --allow-staged -- -D warnings
pass_filenames: false
files: pythnet/pythnet_sdk
# Hooks for solana receiver contract

View File

@ -79,11 +79,10 @@ Lerna has some common failure modes that you may encounter:
1. `npm ci` fails with a typescript compilation error about a missing package.
This error likely means that the failing package has a `prepare` entry compiling the typescript in its `package.json`.
Fix this error by moving that logic to the `prepublishOnly` entry.
2. The software builds locally but fails in CI, or vice-versa.
1. The software builds locally but fails in CI, or vice-versa.
This error likely means that some local build caches need to be cleaned.
The build error may not indicate that this is a caching issue, e.g., it may appear that the packages are being built in the wrong order.
Delete `node_modules/`, `lib/` and `tsconfig.tsbuildinfo` from each package's subdirectory. then try again.
3. `npm ci` fails due to wrong node version. Make sure to be using `v18`. Node version `v21` is not supported and known to cause issues.
## Audit / Feature Status

View File

@ -1,7 +0,0 @@
chains:
lightlink-pegasus:
commitments:
# prettier-ignore
- seed: [219,125,217,197,234,88,208,120,21,181,172,143,239,102,41,233,167,212,237,106,37,255,184,165,238,121,230,155,116,158,173,48]
chain_length: 10000
original_commitment_sequence_number: 104

View File

@ -1 +0,0 @@
nightly-2023-07-23

View File

@ -1,487 +0,0 @@
use {
crate::{
api::{
self,
BlockchainState,
},
chain::{
ethereum::SignablePythContract,
reader::{
BlockNumber,
RequestedWithCallbackEvent,
},
},
config::EthereumConfig,
},
anyhow::{
anyhow,
Result,
},
ethers::{
contract::ContractError,
providers::{
Middleware,
Provider,
Ws,
},
types::U256,
},
futures::StreamExt,
std::sync::Arc,
tokio::{
spawn,
sync::mpsc,
time::{
self,
Duration,
},
},
tracing::{
self,
Instrument,
},
};
#[derive(Debug)]
pub struct BlockRange {
pub from: BlockNumber,
pub to: BlockNumber,
}
/// How much to wait before retrying in case of an RPC error
const RETRY_INTERVAL: Duration = Duration::from_secs(5);
/// How many blocks to look back for events that might be missed when starting the keeper
const BACKLOG_RANGE: u64 = 1000;
/// How many blocks to fetch events for in a single rpc call
const BLOCK_BATCH_SIZE: u64 = 100;
/// How much to wait before polling the next latest block
const POLL_INTERVAL: Duration = Duration::from_secs(5);
/// Get the latest safe block number for the chain. Retry internally if there is an error.
async fn get_latest_safe_block(chain_state: &BlockchainState) -> BlockNumber {
loop {
match chain_state
.contract
.get_block_number(chain_state.confirmed_block_status)
.await
{
Ok(latest_confirmed_block) => {
tracing::info!(
"Fetched latest safe block {}",
latest_confirmed_block - chain_state.reveal_delay_blocks
);
return latest_confirmed_block - chain_state.reveal_delay_blocks;
}
Err(e) => {
tracing::error!("Error while getting block number. error: {:?}", e);
time::sleep(RETRY_INTERVAL).await;
}
}
}
}
/// Run threads to handle events for the last `BACKLOG_RANGE` blocks, watch for new blocks and
/// handle any events for the new blocks.
#[tracing::instrument(name="keeper", skip_all, fields(chain_id=chain_state.id))]
pub async fn run_keeper_threads(
private_key: String,
chain_eth_config: EthereumConfig,
chain_state: BlockchainState,
) {
tracing::info!("starting keeper");
let latest_safe_block = get_latest_safe_block(&chain_state).in_current_span().await;
tracing::info!("latest safe block: {}", &latest_safe_block);
let contract = Arc::new(
SignablePythContract::from_config(&chain_eth_config, &private_key)
.await
.expect("Chain config should be valid"),
);
// Spawn a thread to handle the events from last BACKLOG_RANGE blocks.
spawn(
process_backlog(
BlockRange {
from: latest_safe_block.saturating_sub(BACKLOG_RANGE),
to: latest_safe_block,
},
contract.clone(),
chain_eth_config.gas_limit,
chain_state.clone(),
)
.in_current_span(),
);
let (tx, rx) = mpsc::channel::<BlockRange>(1000);
// Spawn a thread to watch for new blocks and send the range of blocks for which events has not been handled to the `tx` channel.
spawn(
watch_blocks_wrapper(
chain_state.clone(),
latest_safe_block,
tx,
chain_eth_config.geth_rpc_wss.clone(),
)
.in_current_span(),
);
// Spawn a thread that listens for block ranges on the `rx` channel and processes the events for those blocks.
spawn(
process_new_blocks(
chain_state.clone(),
rx,
Arc::clone(&contract),
chain_eth_config.gas_limit,
)
.in_current_span(),
);
}
/// Process an event for a chain. It estimates the gas for the reveal with callback and
/// submits the transaction if the gas estimate is below the gas limit.
/// It will return an Error if the gas estimation failed with a provider error or if the
/// reveal with callback failed with a provider error.
pub async fn process_event(
event: RequestedWithCallbackEvent,
chain_config: &BlockchainState,
contract: &Arc<SignablePythContract>,
gas_limit: U256,
) -> Result<()> {
if chain_config.provider_address != event.provider_address {
return Ok(());
}
let provider_revelation = match chain_config.state.reveal(event.sequence_number) {
Ok(result) => result,
Err(e) => {
tracing::error!(
sequence_number = &event.sequence_number,
"Error while revealing with error: {:?}",
e
);
return Ok(());
}
};
let gas_estimate_res = chain_config
.contract
.estimate_reveal_with_callback_gas(
event.provider_address,
event.sequence_number,
event.user_random_number,
provider_revelation,
)
.in_current_span()
.await;
match gas_estimate_res {
Ok(gas_estimate_option) => match gas_estimate_option {
Some(gas_estimate) => {
// Pad the gas estimate by 33%
let (gas_estimate, _) = gas_estimate
.saturating_mul(U256::from(4))
.div_mod(U256::from(3));
if gas_estimate > gas_limit {
tracing::error!(
sequence_number = &event.sequence_number,
"Gas estimate for reveal with callback is higher than the gas limit"
);
return Ok(());
}
let contract_call = contract
.reveal_with_callback(
event.provider_address,
event.sequence_number,
event.user_random_number,
provider_revelation,
)
.gas(gas_estimate);
let res = contract_call.send().await;
let pending_tx = match res {
Ok(pending_tx) => pending_tx,
Err(e) => match e {
// If there is a provider error, we weren't able to send the transaction.
// We will return an error. So, that the caller can decide what to do (retry).
ContractError::ProviderError { e } => return Err(e.into()),
// For all the other errors, it is likely the case we won't be able to reveal for
// ever. We will return an Ok(()) to signal that we have processed this reveal
// and concluded that its Ok to not reveal.
_ => {
tracing::error!(
sequence_number = &event.sequence_number,
"Error while revealing with error: {:?}",
e
);
return Ok(());
}
},
};
match pending_tx.await {
Ok(res) => {
tracing::info!(
sequence_number = &event.sequence_number,
"Revealed with res: {:?}",
res
);
Ok(())
}
Err(e) => {
tracing::error!(
sequence_number = &event.sequence_number,
"Error while revealing with error: {:?}",
e
);
Err(e.into())
}
}
}
None => {
tracing::info!(
sequence_number = &event.sequence_number,
"Not processing event"
);
Ok(())
}
},
Err(e) => {
tracing::error!(
sequence_number = &event.sequence_number,
"Error while simulating reveal with error: {:?}",
e
);
Err(e)
}
}
}
/// Process a range of blocks in batches. It calls the `process_single_block_batch` method for each batch.
#[tracing::instrument(skip_all, fields(range_from_block=block_range.from, range_to_block=block_range.to))]
pub async fn process_block_range(
block_range: BlockRange,
contract: Arc<SignablePythContract>,
gas_limit: U256,
chain_state: api::BlockchainState,
) {
let BlockRange {
from: first_block,
to: last_block,
} = block_range;
let mut current_block = first_block;
while current_block <= last_block {
let mut to_block = current_block + BLOCK_BATCH_SIZE;
if to_block > last_block {
to_block = last_block;
}
process_single_block_batch(
BlockRange {
from: current_block,
to: to_block,
},
contract.clone(),
gas_limit,
chain_state.clone(),
)
.in_current_span()
.await;
current_block = to_block + 1;
}
}
/// Process a batch of blocks for a chain. It will fetch events for all the blocks in a single call for the provided batch
/// and then try to process them one by one. If the process fails, it will retry indefinitely.
#[tracing::instrument(name="batch", skip_all, fields(batch_from_block=block_range.from, batch_to_block=block_range.to))]
pub async fn process_single_block_batch(
block_range: BlockRange,
contract: Arc<SignablePythContract>,
gas_limit: U256,
chain_state: api::BlockchainState,
) {
loop {
let events_res = chain_state
.contract
.get_request_with_callback_events(block_range.from, block_range.to)
.await;
match events_res {
Ok(events) => {
tracing::info!(num_of_events = &events.len(), "Processing",);
for event in &events {
tracing::info!(sequence_number = &event.sequence_number, "Processing event",);
while let Err(e) =
process_event(event.clone(), &chain_state, &contract, gas_limit)
.in_current_span()
.await
{
tracing::error!(
sequence_number = &event.sequence_number,
"Error while processing event. Waiting for {} seconds before retry. error: {:?}",
RETRY_INTERVAL.as_secs(),
e
);
time::sleep(RETRY_INTERVAL).await;
}
tracing::info!(sequence_number = &event.sequence_number, "Processed event",);
}
tracing::info!(num_of_events = &events.len(), "Processed",);
break;
}
Err(e) => {
tracing::error!(
"Error while getting events. Waiting for {} seconds before retry. error: {:?}",
RETRY_INTERVAL.as_secs(),
e
);
time::sleep(RETRY_INTERVAL).await;
}
}
}
}
/// Wrapper for the `watch_blocks` method. If there was an error while watching, it will retry after a delay.
/// It retries indefinitely.
#[tracing::instrument(name="watch_blocks", skip_all, fields(initial_safe_block=latest_safe_block))]
pub async fn watch_blocks_wrapper(
chain_state: BlockchainState,
latest_safe_block: BlockNumber,
tx: mpsc::Sender<BlockRange>,
geth_rpc_wss: Option<String>,
) {
let mut last_safe_block_processed = latest_safe_block;
loop {
if let Err(e) = watch_blocks(
chain_state.clone(),
&mut last_safe_block_processed,
tx.clone(),
geth_rpc_wss.clone(),
)
.in_current_span()
.await
{
tracing::error!("watching blocks. error: {:?}", e);
time::sleep(RETRY_INTERVAL).await;
}
}
}
/// Watch for new blocks and send the range of blocks for which events have not been handled to the `tx` channel.
/// We are subscribing to new blocks instead of events. If we miss some blocks, it will be fine as we are sending
/// block ranges to the `tx` channel. If we have subscribed to events, we could have missed those and won't even
/// know about it.
pub async fn watch_blocks(
chain_state: BlockchainState,
last_safe_block_processed: &mut BlockNumber,
tx: mpsc::Sender<BlockRange>,
geth_rpc_wss: Option<String>,
) -> Result<()> {
tracing::info!("Watching blocks to handle new events");
let provider_option = match geth_rpc_wss {
Some(wss) => Some(match Provider::<Ws>::connect(wss.clone()).await {
Ok(provider) => provider,
Err(e) => {
tracing::error!("Error while connecting to wss: {}. error: {:?}", wss, e);
return Err(e.into());
}
}),
None => {
tracing::info!("No wss provided");
None
}
};
let mut stream_option = match provider_option {
Some(ref provider) => Some(match provider.subscribe_blocks().await {
Ok(client) => client,
Err(e) => {
tracing::error!("Error while subscribing to blocks. error {:?}", e);
return Err(e.into());
}
}),
None => None,
};
loop {
match stream_option {
Some(ref mut stream) => {
if let None = stream.next().await {
tracing::error!("Error blocks subscription stream ended");
return Err(anyhow!("Error blocks subscription stream ended"));
}
}
None => {
time::sleep(POLL_INTERVAL).await;
}
}
let latest_safe_block = get_latest_safe_block(&chain_state).in_current_span().await;
if latest_safe_block > *last_safe_block_processed {
match tx
.send(BlockRange {
from: *last_safe_block_processed + 1,
to: latest_safe_block,
})
.await
{
Ok(_) => {
tracing::info!(
from_block = *last_safe_block_processed + 1,
to_block = &latest_safe_block,
"Block range sent to handle events",
);
*last_safe_block_processed = latest_safe_block;
}
Err(e) => {
tracing::error!(
"Error while sending block range to handle events. These will be handled in next call. error: {:?}",
e
);
}
};
}
}
}
/// It waits on rx channel to receive block ranges and then calls process_block_range to process them.
#[tracing::instrument(skip_all)]
pub async fn process_new_blocks(
chain_state: BlockchainState,
mut rx: mpsc::Receiver<BlockRange>,
contract: Arc<SignablePythContract>,
gas_limit: U256,
) {
tracing::info!("Waiting for new block ranges to process");
loop {
if let Some(block_range) = rx.recv().await {
process_block_range(
block_range,
Arc::clone(&contract),
gas_limit,
chain_state.clone(),
)
.in_current_span()
.await;
}
}
}
/// Processes the backlog_range for a chain.
#[tracing::instrument(skip_all)]
pub async fn process_backlog(
backlog_range: BlockRange,
contract: Arc<SignablePythContract>,
gas_limit: U256,
chain_state: BlockchainState,
) {
tracing::info!("Processing backlog");
process_block_range(backlog_range, contract, gas_limit, chain_state)
.in_current_span()
.await;
tracing::info!("Backlog processed");
}

View File

@ -1796,7 +1796,7 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
[[package]]
name = "hermes"
version = "0.5.9"
version = "0.5.5"
dependencies = [
"anyhow",
"async-trait",
@ -3138,7 +3138,7 @@ dependencies = [
[[package]]
name = "pythnet-sdk"
version = "2.1.0"
version = "2.0.0"
dependencies = [
"bincode",
"borsh 0.10.3",

View File

@ -1,6 +1,6 @@
[package]
name = "hermes"
version = "0.5.9"
version = "0.5.5"
description = "Hermes is an agent that provides Verified Prices from the Pythnet Pyth Oracle."
edition = "2021"

View File

@ -20,7 +20,6 @@ use {
},
crate::{
network::wormhole::VaaBytes,
price_feeds_metadata::PriceFeedMeta,
state::{
benchmarks::Benchmarks,
cache::{
@ -60,13 +59,6 @@ use {
collections::HashSet,
time::Duration,
},
tokio::sync::{
broadcast::{
Receiver,
Sender,
},
RwLock,
},
wormhole_sdk::Vaa,
};
@ -110,7 +102,8 @@ impl AggregationEvent {
}
}
pub struct AggregateStateData {
#[derive(Clone, Debug)]
pub struct AggregateState {
/// The latest completed slot. This is used to check whether a completed state is new or out of
/// order.
pub latest_completed_slot: Option<Slot>,
@ -126,7 +119,7 @@ pub struct AggregateStateData {
pub metrics: metrics::Metrics,
}
impl AggregateStateData {
impl AggregateState {
pub fn new(metrics_registry: &mut Registry) -> Self {
Self {
latest_completed_slot: None,
@ -137,20 +130,6 @@ impl AggregateStateData {
}
}
pub struct AggregateState {
pub data: RwLock<AggregateStateData>,
pub api_update_tx: Sender<AggregationEvent>,
}
impl AggregateState {
pub fn new(update_tx: Sender<AggregationEvent>, metrics_registry: &mut Registry) -> Self {
Self {
data: RwLock::new(AggregateStateData::new(metrics_registry)),
api_update_tx: update_tx,
}
}
}
/// Accumulator messages coming from Pythnet validators.
///
/// The validators writes the accumulator messages using Borsh with
@ -198,48 +177,9 @@ const READINESS_STALENESS_THRESHOLD: Duration = Duration::from_secs(30);
/// 10 slots is almost 5 seconds.
const READINESS_MAX_ALLOWED_SLOT_LAG: Slot = 10;
#[async_trait::async_trait]
pub trait Aggregates
where
Self: Cache,
Self: Benchmarks,
Self: PriceFeedMeta,
{
fn subscribe(&self) -> Receiver<AggregationEvent>;
async fn is_ready(&self) -> bool;
async fn store_update(&self, update: Update) -> Result<()>;
async fn get_price_feed_ids(&self) -> HashSet<PriceIdentifier>;
async fn get_price_feeds_with_update_data(
&self,
price_ids: &[PriceIdentifier],
request_time: RequestTime,
) -> Result<PriceFeedsWithUpdateData>;
}
/// Allow downcasting State into CacheState for functions that depend on the `Cache` service.
impl<'a> From<&'a State> for &'a AggregateState {
fn from(state: &'a State) -> &'a AggregateState {
&state.aggregates
}
}
#[async_trait::async_trait]
impl<T> Aggregates for T
where
for<'a> &'a T: Into<&'a AggregateState>,
T: Sync,
T: Send,
T: Cache,
T: Benchmarks,
T: PriceFeedMeta,
{
fn subscribe(&self) -> Receiver<AggregationEvent> {
self.into().api_update_tx.subscribe()
}
/// Stores the update data in the store
#[tracing::instrument(skip(self, update))]
async fn store_update(&self, update: Update) -> Result<()> {
/// Stores the update data in the store
#[tracing::instrument(skip(state, update))]
pub async fn store_update(state: &State, update: Update) -> Result<()> {
// The slot that the update is originating from. It should be available
// in all the updates.
let slot = match update {
@ -252,14 +192,14 @@ where
tracing::info!(slot = proof.slot, "Storing VAA Merkle Proof.");
store_wormhole_merkle_verified_message(
self,
state,
proof.clone(),
update_vaa.to_owned(),
)
.await?;
self.into()
.data
state
.aggregate_state
.write()
.await
.metrics
@ -273,11 +213,12 @@ where
let slot = accumulator_messages.slot;
tracing::info!(slot = slot, "Storing Accumulator Messages.");
self.store_accumulator_messages(accumulator_messages)
state
.store_accumulator_messages(accumulator_messages)
.await?;
self.into()
.data
state
.aggregate_state
.write()
.await
.metrics
@ -288,15 +229,15 @@ where
// Update the aggregate state with the latest observed slot
{
let mut aggregate_state = self.into().data.write().await;
let mut aggregate_state = state.aggregate_state.write().await;
aggregate_state.latest_observed_slot = aggregate_state
.latest_observed_slot
.map(|latest| latest.max(slot))
.or(Some(slot));
}
let accumulator_messages = self.fetch_accumulator_messages(slot).await?;
let wormhole_merkle_state = self.fetch_wormhole_merkle_state(slot).await?;
let accumulator_messages = state.fetch_accumulator_messages(slot).await?;
let wormhole_merkle_state = state.fetch_wormhole_merkle_state(slot).await?;
let (accumulator_messages, wormhole_merkle_state) =
match (accumulator_messages, wormhole_merkle_state) {
@ -318,29 +259,24 @@ where
.collect::<HashSet<_>>();
tracing::info!(len = message_states.len(), "Storing Message States.");
self.store_message_states(message_states).await?;
state.store_message_states(message_states).await?;
// Update the aggregate state
let mut aggregate_state = self.into().data.write().await;
let mut aggregate_state = state.aggregate_state.write().await;
// Send update event to subscribers. We are purposefully ignoring the result
// because there might be no subscribers.
let _ = match aggregate_state.latest_completed_slot {
None => {
aggregate_state.latest_completed_slot.replace(slot);
self.into()
.api_update_tx
.send(AggregationEvent::New { slot })
state.api_update_tx.send(AggregationEvent::New { slot })
}
Some(latest) if slot > latest => {
self.prune_removed_keys(message_state_keys).await;
state.prune_removed_keys(message_state_keys).await;
aggregate_state.latest_completed_slot.replace(slot);
self.into()
.api_update_tx
.send(AggregationEvent::New { slot })
state.api_update_tx.send(AggregationEvent::New { slot })
}
_ => self
.into()
_ => state
.api_update_tx
.send(AggregationEvent::OutOfOrder { slot }),
};
@ -359,59 +295,6 @@ where
.observe(slot, metrics::Event::CompletedUpdate);
Ok(())
}
async fn get_price_feeds_with_update_data(
&self,
price_ids: &[PriceIdentifier],
request_time: RequestTime,
) -> Result<PriceFeedsWithUpdateData> {
match get_verified_price_feeds(self, price_ids, request_time.clone()).await {
Ok(price_feeds_with_update_data) => Ok(price_feeds_with_update_data),
Err(e) => {
if let RequestTime::FirstAfter(publish_time) = request_time {
return Benchmarks::get_verified_price_feeds(self, price_ids, publish_time)
.await;
}
Err(e)
}
}
}
async fn get_price_feed_ids(&self) -> HashSet<PriceIdentifier> {
Cache::message_state_keys(self)
.await
.iter()
.map(|key| PriceIdentifier::new(key.feed_id))
.collect()
}
async fn is_ready(&self) -> bool {
let metadata = self.into().data.read().await;
let price_feeds_metadata = PriceFeedMeta::retrieve_price_feeds_metadata(self)
.await
.unwrap();
let has_completed_recently = match metadata.latest_completed_update_at.as_ref() {
Some(latest_completed_update_time) => {
latest_completed_update_time.elapsed() < READINESS_STALENESS_THRESHOLD
}
None => false,
};
let is_not_behind = match (
metadata.latest_completed_slot,
metadata.latest_observed_slot,
) {
(Some(latest_completed_slot), Some(latest_observed_slot)) => {
latest_observed_slot - latest_completed_slot <= READINESS_MAX_ALLOWED_SLOT_LAG
}
_ => false,
};
let is_metadata_loaded = !price_feeds_metadata.is_empty();
has_completed_recently && is_not_behind && is_metadata_loaded
}
}
#[tracing::instrument(skip(accumulator_messages, wormhole_merkle_state))]
@ -506,12 +389,73 @@ where
})
}
pub async fn get_price_feeds_with_update_data<S>(
state: &S,
price_ids: &[PriceIdentifier],
request_time: RequestTime,
) -> Result<PriceFeedsWithUpdateData>
where
S: Cache,
S: Benchmarks,
{
match get_verified_price_feeds(state, price_ids, request_time.clone()).await {
Ok(price_feeds_with_update_data) => Ok(price_feeds_with_update_data),
Err(e) => {
if let RequestTime::FirstAfter(publish_time) = request_time {
return state
.get_verified_price_feeds(price_ids, publish_time)
.await;
}
Err(e)
}
}
}
pub async fn get_price_feed_ids<S>(state: &S) -> HashSet<PriceIdentifier>
where
S: Cache,
{
state
.message_state_keys()
.await
.iter()
.map(|key| PriceIdentifier::new(key.feed_id))
.collect()
}
pub async fn is_ready(state: &State) -> bool {
let metadata = state.aggregate_state.read().await;
let price_feeds_metadata = state.price_feed_meta.data.read().await;
let has_completed_recently = match metadata.latest_completed_update_at.as_ref() {
Some(latest_completed_update_time) => {
latest_completed_update_time.elapsed() < READINESS_STALENESS_THRESHOLD
}
None => false,
};
let is_not_behind = match (
metadata.latest_completed_slot,
metadata.latest_observed_slot,
) {
(Some(latest_completed_slot), Some(latest_observed_slot)) => {
latest_observed_slot - latest_completed_slot <= READINESS_MAX_ALLOWED_SLOT_LAG
}
_ => false,
};
let is_metadata_loaded = !price_feeds_metadata.is_empty();
has_completed_recently && is_not_behind && is_metadata_loaded
}
#[cfg(test)]
mod test {
use {
super::*,
crate::{
api::types::PriceFeedMetadata,
price_feeds_metadata::PriceFeedMeta,
state::test::setup_state,
},
futures::future::join_all,
@ -613,7 +557,7 @@ mod test {
}
pub async fn store_multiple_concurrent_valid_updates(state: Arc<State>, updates: Vec<Update>) {
let res = join_all(updates.into_iter().map(|u| (&state).store_update(u))).await;
let res = join_all(updates.into_iter().map(|u| store_update(&state, u))).await;
// Check that all store_update calls succeeded
assert!(res.into_iter().all(|r| r.is_ok()));
}
@ -639,14 +583,14 @@ mod test {
// Check the price ids are stored correctly
assert_eq!(
(&*state).get_price_feed_ids().await,
get_price_feed_ids(&*state).await,
vec![PriceIdentifier::new([100; 32])].into_iter().collect()
);
// Check get_price_feeds_with_update_data retrieves the correct
// price feed with correct update data.
let price_feeds_with_update_data = (&*state)
.get_price_feeds_with_update_data(
let price_feeds_with_update_data = get_price_feeds_with_update_data(
&*state,
&[PriceIdentifier::new([100; 32])],
RequestTime::Latest,
)
@ -764,7 +708,7 @@ mod test {
// Check the price ids are stored correctly
assert_eq!(
(&*state).get_price_feed_ids().await,
get_price_feed_ids(&*state).await,
vec![
PriceIdentifier::new([100; 32]),
PriceIdentifier::new([200; 32])
@ -774,8 +718,8 @@ mod test {
);
// Check that price feed 2 exists
assert!((&*state)
.get_price_feeds_with_update_data(
assert!(get_price_feeds_with_update_data(
&*state,
&[PriceIdentifier::new([200; 32])],
RequestTime::Latest,
)
@ -801,12 +745,12 @@ mod test {
// Check that price feed 2 does not exist anymore
assert_eq!(
(&*state).get_price_feed_ids().await,
get_price_feed_ids(&*state).await,
vec![PriceIdentifier::new([100; 32]),].into_iter().collect()
);
assert!((&*state)
.get_price_feeds_with_update_data(
assert!(get_price_feeds_with_update_data(
&*state,
&[PriceIdentifier::new([200; 32])],
RequestTime::Latest,
)
@ -847,8 +791,8 @@ mod test {
MockClock::advance(Duration::from_secs(1));
// Get the price feeds with update data
let price_feeds_with_update_data = (&*state)
.get_price_feeds_with_update_data(
let price_feeds_with_update_data = get_price_feeds_with_update_data(
&*state,
&[PriceIdentifier::new([100; 32])],
RequestTime::Latest,
)
@ -873,13 +817,13 @@ mod test {
.unwrap();
// Check the state is ready
assert!((&state).is_ready().await);
assert!(is_ready(&state).await);
// Advance the clock to make the prices stale
MockClock::advance_system_time(READINESS_STALENESS_THRESHOLD);
MockClock::advance(READINESS_STALENESS_THRESHOLD);
// Check the state is not ready
assert!(!(&state).is_ready().await);
assert!(!is_ready(&state).await);
}
/// Test that the state retains the latest slots upon cache eviction.
@ -922,8 +866,8 @@ mod test {
// Check the last 100 slots are retained
for slot in 900..1000 {
let price_feeds_with_update_data = (&*state)
.get_price_feeds_with_update_data(
let price_feeds_with_update_data = get_price_feeds_with_update_data(
&*state,
&[
PriceIdentifier::new([100; 32]),
PriceIdentifier::new([200; 32]),
@ -939,8 +883,8 @@ mod test {
// Check nothing else is retained
for slot in 0..900 {
assert!((&*state)
.get_price_feeds_with_update_data(
assert!(get_price_feeds_with_update_data(
&*state,
&[
PriceIdentifier::new([100; 32]),
PriceIdentifier::new([200; 32])

View File

@ -1,5 +1,6 @@
use {
crate::{
aggregate::AggregationEvent,
config::RunOptions,
state::State,
},
@ -13,6 +14,7 @@ use {
ipnet::IpNet,
serde_qs::axum::QsQueryConfig,
std::sync::Arc,
tokio::sync::broadcast::Sender,
tower_http::cors::CorsLayer,
utoipa::OpenApi,
utoipa_swagger_ui::SwaggerUi,
@ -28,6 +30,7 @@ pub struct ApiState<S = State> {
pub state: Arc<S>,
pub ws: Arc<ws::WsState>,
pub metrics: Arc<metrics_middleware::Metrics>,
pub update_tx: Sender<AggregationEvent>,
}
/// Manually implement `Clone` as the derive macro will try and slap `Clone` on
@ -38,6 +41,7 @@ impl<S> Clone for ApiState<S> {
state: self.state.clone(),
ws: self.ws.clone(),
metrics: self.metrics.clone(),
update_tx: self.update_tx.clone(),
}
}
}
@ -47,6 +51,7 @@ impl ApiState<State> {
state: Arc<State>,
ws_whitelist: Vec<IpNet>,
requester_ip_header_name: String,
update_tx: Sender<AggregationEvent>,
) -> Self {
Self {
metrics: Arc::new(metrics_middleware::Metrics::new(state.clone())),
@ -56,18 +61,24 @@ impl ApiState<State> {
state.clone(),
)),
state,
update_tx,
}
}
}
#[tracing::instrument(skip(opts, state))]
pub async fn spawn(opts: RunOptions, state: Arc<State>) -> Result<()> {
#[tracing::instrument(skip(opts, state, update_tx))]
pub async fn spawn(
opts: RunOptions,
state: Arc<State>,
update_tx: Sender<AggregationEvent>,
) -> Result<()> {
let state = {
let opts = opts.clone();
ApiState::new(
state,
opts.rpc.ws_whitelist,
opts.rpc.requester_ip_header_name,
update_tx,
)
};
@ -124,7 +135,6 @@ pub async fn run(opts: RunOptions, state: ApiState) -> Result<()> {
// Initialize Axum Router. Note the type here is a `Router<State>` due to the use of the
// `with_state` method which replaces `Body` with `State` in the type signature.
let app = Router::new();
#[allow(deprecated)]
let app = app
.merge(SwaggerUi::new("/docs").url("/docs/openapi.json", ApiDoc::openapi()))
.route("/", get(rest::index))

View File

@ -1,4 +1,4 @@
use crate::state::aggregate::UnixTimestamp;
use crate::aggregate::UnixTimestamp;
// Example values for the utoipa API docs.
// Note that each of these expressions is only evaluated once when the documentation is created,

View File

@ -1,6 +1,5 @@
use {
super::ApiState,
crate::state::aggregate::Aggregates,
axum::{
http::StatusCode,
response::{
@ -94,15 +93,11 @@ impl IntoResponse for RestError {
}
/// Verify that the price ids exist in the aggregate state.
pub async fn verify_price_ids_exist<S>(
state: &ApiState<S>,
pub async fn verify_price_ids_exist(
state: &ApiState,
price_ids: &[PriceIdentifier],
) -> Result<(), RestError>
where
S: Aggregates,
{
let state = &*state.state;
let all_ids = Aggregates::get_price_feed_ids(state).await;
) -> Result<(), RestError> {
let all_ids = crate::aggregate::get_price_feed_ids(&*state.state).await;
let missing_ids = price_ids
.iter()
.filter(|id| !all_ids.contains(id))

View File

@ -1,6 +1,10 @@
use {
super::verify_price_ids_exist,
crate::{
aggregate::{
RequestTime,
UnixTimestamp,
},
api::{
doc_examples,
rest::RestError,
@ -8,12 +12,6 @@ use {
PriceIdInput,
RpcPriceFeed,
},
ApiState,
},
state::aggregate::{
Aggregates,
RequestTime,
UnixTimestamp,
},
},
anyhow::Result,
@ -49,8 +47,6 @@ pub struct GetPriceFeedQueryParams {
binary: bool,
}
/// **Deprecated: use /v2/updates/price/{publish_time} instead**
///
/// Get a price update for a price feed with a specific timestamp
///
/// Given a price feed id and timestamp, retrieve the Pyth price update closest to that timestamp.
@ -64,20 +60,16 @@ pub struct GetPriceFeedQueryParams {
GetPriceFeedQueryParams
)
)]
#[deprecated]
pub async fn get_price_feed<S>(
State(state): State<ApiState<S>>,
pub async fn get_price_feed(
State(state): State<crate::api::ApiState>,
QsQuery(params): QsQuery<GetPriceFeedQueryParams>,
) -> Result<Json<RpcPriceFeed>, RestError>
where
S: Aggregates,
{
) -> Result<Json<RpcPriceFeed>, RestError> {
let price_id: PriceIdentifier = params.id.into();
verify_price_ids_exist(&state, &[price_id]).await?;
let state = &*state.state;
let price_feeds_with_update_data = Aggregates::get_price_feeds_with_update_data(
state,
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
&*state.state,
&[price_id],
RequestTime::FirstAfter(params.publish_time),
)

View File

@ -1,16 +1,15 @@
use {
super::verify_price_ids_exist,
crate::{
aggregate::{
get_price_feeds_with_update_data,
RequestTime,
UnixTimestamp,
},
api::{
doc_examples,
rest::RestError,
types::PriceIdInput,
ApiState,
},
state::aggregate::{
Aggregates,
RequestTime,
UnixTimestamp,
},
},
anyhow::Result,
@ -55,8 +54,6 @@ pub struct GetVaaResponse {
publish_time: UnixTimestamp,
}
/// **Deprecated: use /v2/updates/price/{publish_time} instead**
///
/// Get a VAA for a price feed with a specific timestamp
///
/// Given a price feed id and timestamp, retrieve the Pyth price update closest to that timestamp.
@ -71,20 +68,16 @@ pub struct GetVaaResponse {
GetVaaQueryParams
)
)]
#[deprecated]
pub async fn get_vaa<S>(
State(state): State<ApiState<S>>,
pub async fn get_vaa(
State(state): State<crate::api::ApiState>,
QsQuery(params): QsQuery<GetVaaQueryParams>,
) -> Result<Json<GetVaaResponse>, RestError>
where
S: Aggregates,
{
) -> Result<Json<GetVaaResponse>, RestError> {
let price_id: PriceIdentifier = params.id.into();
verify_price_ids_exist(&state, &[price_id]).await?;
let state = &*state.state;
let price_feeds_with_update_data = Aggregates::get_price_feeds_with_update_data(
state,
let price_feeds_with_update_data = get_price_feeds_with_update_data(
&*state.state,
&[price_id],
RequestTime::FirstAfter(params.publish_time),
)

View File

@ -1,15 +1,11 @@
use {
super::verify_price_ids_exist,
crate::{
api::{
rest::RestError,
ApiState,
},
state::aggregate::{
Aggregates,
aggregate::{
RequestTime,
UnixTimestamp,
},
api::rest::RestError,
},
anyhow::Result,
axum::{
@ -46,8 +42,6 @@ pub struct GetVaaCcipResponse {
data: String, // TODO: Use a typed wrapper for the hex output with leading 0x.
}
/// **Deprecated: use /v2/updates/price/{publish_time} instead**
///
/// Get a VAA for a price feed using CCIP
///
/// This endpoint accepts a single argument which is a hex-encoded byte string of the following form:
@ -62,30 +56,25 @@ pub struct GetVaaCcipResponse {
GetVaaCcipQueryParams
)
)]
#[deprecated]
pub async fn get_vaa_ccip<S>(
State(state): State<ApiState<S>>,
pub async fn get_vaa_ccip(
State(state): State<crate::api::ApiState>,
QsQuery(params): QsQuery<GetVaaCcipQueryParams>,
) -> Result<Json<GetVaaCcipResponse>, RestError>
where
S: Aggregates,
{
) -> Result<Json<GetVaaCcipResponse>, RestError> {
let price_id: PriceIdentifier = PriceIdentifier::new(
params.data[0..32]
.try_into()
.map_err(|_| RestError::InvalidCCIPInput)?,
);
verify_price_ids_exist(&state, &[price_id]).await?;
let publish_time = UnixTimestamp::from_be_bytes(
params.data[32..40]
.try_into()
.map_err(|_| RestError::InvalidCCIPInput)?,
);
let state = &*state.state;
let price_feeds_with_update_data = Aggregates::get_price_feeds_with_update_data(
state,
verify_price_ids_exist(&state, &[price_id]).await?;
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
&*state.state,
&[price_id],
RequestTime::FirstAfter(publish_time),
)

View File

@ -1,17 +1,13 @@
use {
super::verify_price_ids_exist,
crate::{
aggregate::RequestTime,
api::{
rest::RestError,
types::{
PriceIdInput,
RpcPriceFeed,
},
ApiState,
},
state::aggregate::{
Aggregates,
RequestTime,
},
},
anyhow::Result,
@ -50,8 +46,6 @@ pub struct LatestPriceFeedsQueryParams {
binary: bool,
}
/// **Deprecated: use /v2/updates/price/latest instead**
///
/// Get the latest price updates by price feed id.
///
/// Given a collection of price feed ids, retrieve the latest Pyth price for each price feed.
@ -65,20 +59,19 @@ pub struct LatestPriceFeedsQueryParams {
LatestPriceFeedsQueryParams
)
)]
#[deprecated]
pub async fn latest_price_feeds<S>(
State(state): State<ApiState<S>>,
pub async fn latest_price_feeds(
State(state): State<crate::api::ApiState>,
QsQuery(params): QsQuery<LatestPriceFeedsQueryParams>,
) -> Result<Json<Vec<RpcPriceFeed>>, RestError>
where
S: Aggregates,
{
) -> Result<Json<Vec<RpcPriceFeed>>, RestError> {
let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(|id| id.into()).collect();
verify_price_ids_exist(&state, &price_ids).await?;
let state = &*state.state;
let price_feeds_with_update_data =
Aggregates::get_price_feeds_with_update_data(state, &price_ids, RequestTime::Latest)
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
&*state.state,
&price_ids,
RequestTime::Latest,
)
.await
.map_err(|e| {
tracing::warn!(

View File

@ -1,15 +1,11 @@
use {
super::verify_price_ids_exist,
crate::{
aggregate::RequestTime,
api::{
doc_examples,
rest::RestError,
types::PriceIdInput,
ApiState,
},
state::aggregate::{
Aggregates,
RequestTime,
},
},
anyhow::Result,
@ -43,8 +39,6 @@ pub struct LatestVaasQueryParams {
}
/// **Deprecated: use /v2/updates/price/latest instead**
///
/// Get VAAs for a set of price feed ids.
///
/// Given a collection of price feed ids, retrieve the latest VAA for each. The returned VAA(s) can
@ -60,20 +54,19 @@ pub struct LatestVaasQueryParams {
(status = 200, description = "VAAs retrieved successfully", body = Vec<String>, example=json!([doc_examples::vaa_example()]))
),
)]
#[deprecated]
pub async fn latest_vaas<S>(
State(state): State<ApiState<S>>,
pub async fn latest_vaas(
State(state): State<crate::api::ApiState>,
QsQuery(params): QsQuery<LatestVaasQueryParams>,
) -> Result<Json<Vec<String>>, RestError>
where
S: Aggregates,
{
) -> Result<Json<Vec<String>>, RestError> {
let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(|id| id.into()).collect();
verify_price_ids_exist(&state, &price_ids).await?;
let state = &*state.state;
let price_feeds_with_update_data =
Aggregates::get_price_feeds_with_update_data(state, &price_ids, RequestTime::Latest)
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
&*state.state,
&price_ids,
RequestTime::Latest,
)
.await
.map_err(|e| {
tracing::warn!(

View File

@ -1,11 +1,7 @@
use {
crate::{
api::{
crate::api::{
rest::RestError,
types::RpcPriceIdentifier,
ApiState,
},
state::aggregate::Aggregates,
},
anyhow::Result,
axum::{
@ -14,8 +10,6 @@ use {
},
};
/// **Deprecated: use /v2/price_feeds instead**
///
/// Get the set of price feed IDs.
///
/// This endpoint fetches all of the price feed IDs for which price updates can be retrieved.
@ -27,15 +21,10 @@ use {
(status = 200, description = "Price feed ids retrieved successfully", body = Vec<RpcPriceIdentifier>)
),
)]
#[deprecated]
pub async fn price_feed_ids<S>(
State(state): State<ApiState<S>>,
) -> Result<Json<Vec<RpcPriceIdentifier>>, RestError>
where
S: Aggregates,
{
let state = &*state.state;
let price_feed_ids = Aggregates::get_price_feed_ids(state)
pub async fn price_feed_ids(
State(state): State<crate::api::ApiState>,
) -> Result<Json<Vec<RpcPriceIdentifier>>, RestError> {
let price_feed_ids = crate::aggregate::get_price_feed_ids(&*state.state)
.await
.into_iter()
.map(RpcPriceIdentifier::from)

View File

@ -1,24 +1,14 @@
use {
crate::{
api::ApiState,
state::aggregate::Aggregates,
},
axum::{
use axum::{
extract::State,
http::StatusCode,
response::{
IntoResponse,
Response,
},
},
};
pub async fn ready<S>(State(state): State<ApiState<S>>) -> Response
where
S: Aggregates,
{
let state = &*state.state;
match Aggregates::is_ready(state).await {
pub async fn ready(State(state): State<crate::api::ApiState>) -> Response {
match crate::aggregate::is_ready(&state.state).await {
true => (StatusCode::OK, "OK").into_response(),
false => (StatusCode::SERVICE_UNAVAILABLE, "Service Unavailable").into_response(),
}

View File

@ -1,5 +1,6 @@
use {
crate::{
aggregate::RequestTime,
api::{
rest::{
verify_price_ids_exist,
@ -12,11 +13,6 @@ use {
PriceIdInput,
PriceUpdate,
},
ApiState,
},
state::aggregate::{
Aggregates,
RequestTime,
},
},
anyhow::Result,
@ -77,19 +73,19 @@ fn default_true() -> bool {
LatestPriceUpdatesQueryParams
)
)]
pub async fn latest_price_updates<S>(
State(state): State<ApiState<S>>,
pub async fn latest_price_updates(
State(state): State<crate::api::ApiState>,
QsQuery(params): QsQuery<LatestPriceUpdatesQueryParams>,
) -> Result<Json<PriceUpdate>, RestError>
where
S: Aggregates,
{
) -> Result<Json<PriceUpdate>, RestError> {
let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(|id| id.into()).collect();
verify_price_ids_exist(&state, &price_ids).await?;
let state = &*state.state;
let price_feeds_with_update_data =
Aggregates::get_price_feeds_with_update_data(state, &price_ids, RequestTime::Latest)
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
&*state.state,
&price_ids,
RequestTime::Latest,
)
.await
.map_err(|e| {
tracing::warn!(

View File

@ -1,5 +1,9 @@
use {
crate::{
aggregate::{
AggregationEvent,
RequestTime,
},
api::{
rest::{
verify_price_ids_exist,
@ -15,11 +19,6 @@ use {
},
ApiState,
},
state::aggregate::{
Aggregates,
AggregationEvent,
RequestTime,
},
},
anyhow::Result,
axum::{
@ -89,22 +88,16 @@ fn default_true() -> bool {
params(StreamPriceUpdatesQueryParams)
)]
/// SSE route handler for streaming price updates.
pub async fn price_stream_sse_handler<S>(
State(state): State<ApiState<S>>,
pub async fn price_stream_sse_handler(
State(state): State<ApiState>,
QsQuery(params): QsQuery<StreamPriceUpdatesQueryParams>,
) -> Result<Sse<impl Stream<Item = Result<Event, Infallible>>>, RestError>
where
S: Aggregates,
S: Sync,
S: Send,
S: 'static,
{
) -> Result<Sse<impl Stream<Item = Result<Event, Infallible>>>, RestError> {
let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(Into::into).collect();
verify_price_ids_exist(&state, &price_ids).await?;
// Clone the update_tx receiver to listen for new price updates
let update_rx: broadcast::Receiver<AggregationEvent> = Aggregates::subscribe(&*state.state);
let update_rx: broadcast::Receiver<AggregationEvent> = state.update_tx.subscribe();
// Convert the broadcast receiver into a Stream
let stream = BroadcastStream::new(update_rx);
@ -141,18 +134,15 @@ where
Ok(Sse::new(sse_stream).keep_alive(KeepAlive::default()))
}
async fn handle_aggregation_event<S>(
async fn handle_aggregation_event(
event: AggregationEvent,
state: ApiState<S>,
state: ApiState,
mut price_ids: Vec<PriceIdentifier>,
encoding: EncodingType,
parsed: bool,
benchmarks_only: bool,
allow_unordered: bool,
) -> Result<Option<PriceUpdate>>
where
S: Aggregates,
{
) -> Result<Option<PriceUpdate>> {
// Handle out-of-order events
if let AggregationEvent::OutOfOrder { .. } = event {
if !allow_unordered {
@ -161,11 +151,11 @@ where
}
// We check for available price feed ids to ensure that the price feed ids provided exists since price feeds can be removed.
let available_price_feed_ids = Aggregates::get_price_feed_ids(&*state.state).await;
let available_price_feed_ids = crate::aggregate::get_price_feed_ids(&*state.state).await;
price_ids.retain(|price_feed_id| available_price_feed_ids.contains(price_feed_id));
let mut price_feeds_with_update_data = Aggregates::get_price_feeds_with_update_data(
let mut price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
&*state.state,
&price_ids,
RequestTime::AtSlot(event.slot()),
@ -195,7 +185,7 @@ where
.iter()
.any(|price_feed| price_feed.id == RpcPriceIdentifier::from(*price_id))
});
price_feeds_with_update_data = Aggregates::get_price_feeds_with_update_data(
price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
&*state.state,
&price_ids,
RequestTime::AtSlot(event.slot()),

View File

@ -1,5 +1,9 @@
use {
crate::{
aggregate::{
RequestTime,
UnixTimestamp,
},
api::{
doc_examples,
rest::{
@ -13,12 +17,6 @@ use {
PriceIdInput,
PriceUpdate,
},
ApiState,
},
state::aggregate::{
Aggregates,
RequestTime,
UnixTimestamp,
},
},
anyhow::Result,
@ -89,22 +87,18 @@ fn default_true() -> bool {
TimestampPriceUpdatesQueryParams
)
)]
pub async fn timestamp_price_updates<S>(
State(state): State<ApiState<S>>,
pub async fn timestamp_price_updates(
State(state): State<crate::api::ApiState>,
Path(path_params): Path<TimestampPriceUpdatesPathParams>,
QsQuery(query_params): QsQuery<TimestampPriceUpdatesQueryParams>,
) -> Result<Json<PriceUpdate>, RestError>
where
S: Aggregates,
{
) -> Result<Json<PriceUpdate>, RestError> {
let price_ids: Vec<PriceIdentifier> =
query_params.ids.into_iter().map(|id| id.into()).collect();
verify_price_ids_exist(&state, &price_ids).await?;
let state = &*state.state;
let price_feeds_with_update_data = Aggregates::get_price_feeds_with_update_data(
state,
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
&*state.state,
&price_ids,
RequestTime::FirstAfter(path_params.publish_time),
)

View File

@ -1,6 +1,6 @@
use {
super::doc_examples,
crate::state::aggregate::{
crate::aggregate::{
PriceFeedUpdate,
PriceFeedsWithUpdateData,
Slot,

View File

@ -1,18 +1,14 @@
use {
super::{
types::{
super::types::{
PriceIdInput,
RpcPriceFeed,
},
ApiState,
},
crate::state::{
crate::{
aggregate::{
Aggregates,
AggregationEvent,
RequestTime,
},
State,
state::State,
},
anyhow::{
anyhow,
@ -216,10 +212,11 @@ pub async fn ws_route_handler(
}
#[tracing::instrument(skip(stream, state, subscriber_ip))]
async fn websocket_handler<S>(stream: WebSocket, state: ApiState<S>, subscriber_ip: Option<IpAddr>)
where
S: Aggregates,
{
async fn websocket_handler(
stream: WebSocket,
state: super::ApiState,
subscriber_ip: Option<IpAddr>,
) {
let ws_state = state.ws.clone();
// Retain the recent rate limit data for the IP addresses to
@ -238,7 +235,7 @@ where
})
.inc();
let notify_receiver = Aggregates::subscribe(&*state.state);
let notify_receiver = state.update_tx.subscribe();
let (sender, receiver) = stream.split();
let mut subscriber = Subscriber::new(
id,
@ -257,11 +254,11 @@ pub type SubscriberId = usize;
/// Subscriber is an actor that handles a single websocket connection.
/// It listens to the store for updates and sends them to the client.
pub struct Subscriber<S> {
pub struct Subscriber {
id: SubscriberId,
ip_addr: Option<IpAddr>,
closed: bool,
state: Arc<S>,
store: Arc<State>,
ws_state: Arc<WsState>,
notify_receiver: Receiver<AggregationEvent>,
receiver: SplitStream<WebSocket>,
@ -272,14 +269,11 @@ pub struct Subscriber<S> {
responded_to_ping: bool,
}
impl<S> Subscriber<S>
where
S: Aggregates,
{
impl Subscriber {
pub fn new(
id: SubscriberId,
ip_addr: Option<IpAddr>,
state: Arc<S>,
store: Arc<State>,
ws_state: Arc<WsState>,
notify_receiver: Receiver<AggregationEvent>,
receiver: SplitStream<WebSocket>,
@ -289,7 +283,7 @@ where
id,
ip_addr,
closed: false,
state,
store,
ws_state,
notify_receiver,
receiver,
@ -356,9 +350,8 @@ where
.cloned()
.collect::<Vec<_>>();
let state = &*self.state;
let updates = match Aggregates::get_price_feeds_with_update_data(
state,
let updates = match crate::aggregate::get_price_feeds_with_update_data(
&*self.store,
&price_feed_ids,
RequestTime::AtSlot(event.slot()),
)
@ -371,7 +364,8 @@ where
// subscription. In this case we just remove the non-existing
// price feed from the list and will keep sending updates for
// the rest.
let available_price_feed_ids = Aggregates::get_price_feed_ids(state).await;
let available_price_feed_ids =
crate::aggregate::get_price_feed_ids(&*self.store).await;
self.price_feeds_with_config
.retain(|price_feed_id, _| available_price_feed_ids.contains(price_feed_id));
@ -382,8 +376,8 @@ where
.cloned()
.collect::<Vec<_>>();
Aggregates::get_price_feeds_with_update_data(
state,
crate::aggregate::get_price_feeds_with_update_data(
&*self.store,
&price_feed_ids,
RequestTime::AtSlot(event.slot()),
)
@ -551,7 +545,7 @@ where
allow_out_of_order,
}) => {
let price_ids: Vec<PriceIdentifier> = ids.into_iter().map(|id| id.into()).collect();
let available_price_ids = Aggregates::get_price_feed_ids(&*self.state).await;
let available_price_ids = crate::aggregate::get_price_feed_ids(&*self.store).await;
let not_found_price_ids: Vec<&PriceIdentifier> = price_ids
.iter()

View File

@ -19,9 +19,9 @@ pub struct Options {
#[arg(env = "PYTHNET_HTTP_ADDR")]
pub http_addr: String,
/// Pyth mapping account address on Pythnet.
#[arg(long = "pythnet-mapping-addr")]
/// Pyth mapping account address.
#[arg(long = "mapping-address")]
#[arg(default_value = DEFAULT_PYTHNET_MAPPING_ADDR)]
#[arg(env = "PYTHNET_MAPPING_ADDR")]
#[arg(env = "MAPPING_ADDRESS")]
pub mapping_addr: Pubkey,
}

View File

@ -17,6 +17,7 @@ use {
},
};
mod aggregate;
mod api;
mod config;
mod metrics_server;
@ -53,7 +54,7 @@ async fn init() -> Result<()> {
let (update_tx, _) = tokio::sync::broadcast::channel(1000);
// Initialize a cache store with a 1000 element circular buffer.
let state = State::new(update_tx.clone(), 1000, opts.benchmarks.endpoint.clone());
let store = State::new(update_tx.clone(), 1000, opts.benchmarks.endpoint.clone());
// Listen for Ctrl+C so we can set the exit flag and wait for a graceful shutdown.
spawn(async move {
@ -66,10 +67,10 @@ async fn init() -> Result<()> {
// Spawn all worker tasks, and wait for all to complete (which will happen if a shutdown
// signal has been observed).
let tasks = join_all(vec![
spawn(network::wormhole::spawn(opts.clone(), state.clone())),
spawn(network::pythnet::spawn(opts.clone(), state.clone())),
spawn(metrics_server::run(opts.clone(), state.clone())),
spawn(api::spawn(opts.clone(), state.clone())),
spawn(network::wormhole::spawn(opts.clone(), store.clone())),
spawn(network::pythnet::spawn(opts.clone(), store.clone())),
spawn(metrics_server::run(opts.clone(), store.clone())),
spawn(api::spawn(opts.clone(), store.clone(), update_tx)),
])
.await;

View File

@ -4,6 +4,10 @@
use {
crate::{
aggregate::{
AccumulatorMessages,
Update,
},
api::types::PriceFeedMetadata,
config::RunOptions,
network::wormhole::{
@ -16,14 +20,7 @@ use {
PriceFeedMeta,
DEFAULT_PRICE_FEEDS_CACHE_UPDATE_INTERVAL,
},
state::{
aggregate::{
AccumulatorMessages,
Aggregates,
Update,
},
State,
},
state::State,
},
anyhow::{
anyhow,
@ -139,7 +136,7 @@ async fn fetch_bridge_data(
}
}
pub async fn run(store: Arc<State>, pythnet_ws_endpoint: String) -> Result<!> {
pub async fn run(store: Arc<State>, pythnet_ws_endpoint: String) -> Result<()> {
let client = PubsubClient::new(pythnet_ws_endpoint.as_ref()).await?;
let config = RpcProgramAccountsConfig {
@ -160,7 +157,9 @@ pub async fn run(store: Arc<State>, pythnet_ws_endpoint: String) -> Result<!> {
.program_subscribe(&system_program::id(), Some(config))
.await?;
while let Some(update) = notif.next().await {
loop {
match notif.next().await {
Some(update) => {
let account: Account = match update.value.account.decode() {
Some(account) => account,
None => {
@ -183,8 +182,8 @@ pub async fn run(store: Arc<State>, pythnet_ws_endpoint: String) -> Result<!> {
if candidate.to_string() == update.value.pubkey {
let store = store.clone();
tokio::spawn(async move {
if let Err(err) = Aggregates::store_update(
&*store,
if let Err(err) = crate::aggregate::store_update(
&store,
Update::AccumulatorMessages(accumulator_messages),
)
.await
@ -206,8 +205,11 @@ pub async fn run(store: Arc<State>, pythnet_ws_endpoint: String) -> Result<!> {
}
};
}
Err(anyhow!("Pythnet network listener connection terminated"))
None => {
return Err(anyhow!("Pythnet network listener terminated"));
}
}
}
}
/// Fetch existing GuardianSet accounts from Wormhole.

View File

@ -7,13 +7,7 @@
use {
crate::{
config::RunOptions,
state::{
aggregate::{
Aggregates,
Update,
},
State,
},
state::State,
},
anyhow::{
anyhow,
@ -49,11 +43,7 @@ use {
Digest,
Keccak256,
},
std::{
sync::Arc,
time::Duration,
},
tokio::time::Instant,
std::sync::Arc,
tonic::Request,
wormhole_sdk::{
vaa::{
@ -110,10 +100,10 @@ pub struct BridgeConfig {
/// GuardianSetData extracted from wormhole bridge account, due to no API.
#[derive(borsh::BorshDeserialize)]
pub struct GuardianSetData {
pub _index: u32,
pub index: u32,
pub keys: Vec<[u8; 20]>,
pub _creation_time: u32,
pub _expiration_time: u32,
pub creation_time: u32,
pub expiration_time: u32,
}
/// Update the guardian set with the given ID in the state.
@ -162,16 +152,10 @@ mod proto {
pub async fn spawn(opts: RunOptions, state: Arc<State>) -> Result<()> {
let mut exit = crate::EXIT.subscribe();
loop {
let current_time = Instant::now();
tokio::select! {
_ = exit.changed() => break,
Err(err) = run(opts.clone(), state.clone()) => {
tracing::error!(error = ?err, "Wormhole gRPC service failed.");
if current_time.elapsed() < Duration::from_secs(30) {
tracing::error!("Wormhole listener restarting too quickly. Sleep 1s.");
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
}
}
@ -180,7 +164,7 @@ pub async fn spawn(opts: RunOptions, state: Arc<State>) -> Result<()> {
}
#[tracing::instrument(skip(opts, state))]
async fn run(opts: RunOptions, state: Arc<State>) -> Result<!> {
async fn run(opts: RunOptions, state: Arc<State>) -> Result<()> {
let mut client = SpyRpcServiceClient::connect(opts.wormhole.spy_rpc_addr).await?;
let mut stream = client
.subscribe_signed_vaa(Request::new(SubscribeSignedVaaRequest {
@ -200,7 +184,7 @@ async fn run(opts: RunOptions, state: Arc<State>) -> Result<!> {
}
}
Err(anyhow!("Wormhole gRPC stream terminated."))
Ok(())
}
/// Process a message received via a Wormhole gRPC connection.
@ -368,7 +352,9 @@ pub async fn store_vaa(state: Arc<State>, sequence: u64, vaa_bytes: Vec<u8>) {
}
// Hand the VAA to the aggregate store.
if let Err(e) = Aggregates::store_update(&*state, Update::Vaa(vaa_bytes)).await {
if let Err(e) =
crate::aggregate::store_update(&state, crate::aggregate::Update::Vaa(vaa_bytes)).await
{
tracing::error!(error = ?e, "Failed to store VAA in aggregate store.");
}
}

View File

@ -31,7 +31,6 @@ impl<'a> From<&'a State> for &'a PriceFeedMetaState {
}
}
#[async_trait::async_trait]
pub trait PriceFeedMeta {
async fn retrieve_price_feeds_metadata(&self) -> Result<Vec<PriceFeedMetadata>>;
async fn store_price_feeds_metadata(
@ -45,7 +44,6 @@ pub trait PriceFeedMeta {
) -> Result<Vec<PriceFeedMetadata>>;
}
#[async_trait::async_trait]
impl<T> PriceFeedMeta for T
where
for<'a> &'a T: Into<&'a PriceFeedMetaState>,

View File

@ -2,14 +2,14 @@
use {
self::{
aggregate::{
AggregateState,
AggregationEvent,
},
benchmarks::BenchmarksState,
cache::CacheState,
},
crate::{
aggregate::{
AggregateState,
AggregationEvent,
},
network::wormhole::GuardianSet,
price_feeds_metadata::PriceFeedMetaState,
},
@ -28,7 +28,6 @@ use {
},
};
pub mod aggregate;
pub mod benchmarks;
pub mod cache;
@ -42,9 +41,6 @@ pub struct State {
/// State for the `PriceFeedMeta` service for looking up metadata related to Pyth price feeds.
pub price_feed_meta: PriceFeedMetaState,
/// State for accessing/storing Pyth price aggregates.
pub aggregates: AggregateState,
/// Sequence numbers of lately observed Vaas. Store uses this set
/// to ignore the previously observed Vaas as a performance boost.
pub observed_vaa_seqs: RwLock<BTreeSet<u64>>,
@ -52,6 +48,12 @@ pub struct State {
/// Wormhole guardian sets. It is used to verify Vaas before using them.
pub guardian_set: RwLock<BTreeMap<u32, GuardianSet>>,
/// The sender to the channel between Store and Api to notify completed updates.
pub api_update_tx: Sender<AggregationEvent>,
/// The aggregate module state.
pub aggregate_state: RwLock<AggregateState>,
/// Metrics registry
pub metrics_registry: RwLock<Registry>,
}
@ -67,9 +69,10 @@ impl State {
cache: CacheState::new(cache_size),
benchmarks: BenchmarksState::new(benchmarks_endpoint),
price_feed_meta: PriceFeedMetaState::new(),
aggregates: AggregateState::new(update_tx, &mut metrics_registry),
observed_vaa_seqs: RwLock::new(Default::default()),
guardian_set: RwLock::new(Default::default()),
api_update_tx: update_tx,
aggregate_state: RwLock::new(AggregateState::new(&mut metrics_registry)),
metrics_registry: RwLock::new(metrics_registry),
})
}

View File

@ -1,14 +1,14 @@
//! This module communicates with Pyth Benchmarks, an API for historical price feeds and their updates.
use {
super::{
super::State,
crate::{
aggregate::{
PriceFeedsWithUpdateData,
UnixTimestamp,
},
State,
api::types::PriceUpdate,
},
crate::api::types::PriceUpdate,
anyhow::Result,
base64::{
engine::general_purpose::STANDARD as base64_standard_engine,
@ -69,7 +69,6 @@ impl<'a> From<&'a State> for &'a BenchmarksState {
}
}
#[async_trait::async_trait]
pub trait Benchmarks {
async fn get_verified_price_feeds(
&self,
@ -78,7 +77,6 @@ pub trait Benchmarks {
) -> Result<PriceFeedsWithUpdateData>;
}
#[async_trait::async_trait]
impl<T> Benchmarks for T
where
for<'a> &'a T: Into<&'a BenchmarksState>,

View File

@ -1,6 +1,6 @@
use {
super::State,
crate::state::aggregate::{
crate::aggregate::{
wormhole_merkle::WormholeMerkleState,
AccumulatorMessages,
ProofSet,
@ -132,10 +132,16 @@ impl<'a> From<&'a State> for &'a CacheState {
}
}
#[async_trait::async_trait]
pub trait Cache {
async fn message_state_keys(&self) -> Vec<MessageStateKey>;
async fn store_message_states(&self, message_states: Vec<MessageState>) -> Result<()>;
async fn prune_removed_keys(&self, current_keys: HashSet<MessageStateKey>);
async fn fetch_message_states(
&self,
ids: Vec<FeedId>,
request_time: RequestTime,
filter: MessageStateFilter,
) -> Result<Vec<MessageState>>;
async fn store_accumulator_messages(
&self,
accumulator_messages: AccumulatorMessages,
@ -146,16 +152,8 @@ pub trait Cache {
wormhole_merkle_state: WormholeMerkleState,
) -> Result<()>;
async fn fetch_wormhole_merkle_state(&self, slot: Slot) -> Result<Option<WormholeMerkleState>>;
async fn message_state_keys(&self) -> Vec<MessageStateKey>;
async fn fetch_message_states(
&self,
ids: Vec<FeedId>,
request_time: RequestTime,
filter: MessageStateFilter,
) -> Result<Vec<MessageState>>;
}
#[async_trait::async_trait]
impl<T> Cache for T
where
for<'a> &'a T: Into<&'a CacheState>,
@ -324,9 +322,9 @@ async fn retrieve_message_state(
mod test {
use {
super::*,
crate::state::{
crate::{
aggregate::wormhole_merkle::WormholeMerkleMessageProof,
test::setup_state,
state::test::setup_state,
},
pyth_sdk::UnixTimestamp,
pythnet_sdk::{

View File

@ -1,13 +0,0 @@
{
"endpoint": "https://api.mainnet-beta.solana.com",
"keypair-file": "./id.json",
"shard-id": 1,
"jito-endpoint": "mainnet.block-engine.jito.wtf",
"jito-keypair-file": "./jito.json",
"jito-tip-lamports": "100000",
"jito-bundle-size": "5",
"price-config-file": "./price-config.yaml",
"price-service-endpoint": "https://hermes.pyth.network/",
"pyth-contract-address": "pythWSnswVUd12oZpeFP8e9CVaEqJg25g1Vtc2biRsT",
"pushing-frequency": "30"
}

View File

@ -1,9 +0,0 @@
{
"endpoint": "https://api.devnet.solana.com",
"keypair-file": "./id.json",
"shard-id": 1,
"price-config-file": "./price-config.yaml",
"price-service-endpoint": "https://hermes.pyth.network/",
"pyth-contract-address": "pythWSnswVUd12oZpeFP8e9CVaEqJg25g1Vtc2biRsT",
"pushing-frequency": "30"
}

View File

@ -1,5 +1,5 @@
{
"name": "@pythnetwork/contract-manager",
"name": "contract_manager",
"version": "1.0.0",
"description": "Set of tools to manage pyth contracts",
"private": true,
@ -23,7 +23,7 @@
"dependencies": {
"@certusone/wormhole-sdk": "^0.9.8",
"@coral-xyz/anchor": "^0.29.0",
"@injectivelabs/networks": "^1.14.6",
"@injectivelabs/networks": "1.0.68",
"@mysten/sui.js": "^0.49.1",
"@pythnetwork/cosmwasm-deploy-tools": "*",
"@pythnetwork/entropy-sdk-solidity": "*",

View File

@ -23,7 +23,7 @@ import {
EvmEntropyContract,
EvmPriceFeedContract,
getCodeDigestWithoutAddress,
EvmWormholeContract,
WormholeEvmContract,
} from "../src/contracts/evm";
import Web3 from "web3";
@ -73,7 +73,7 @@ async function main() {
instruction.governanceAction.targetChainId
) {
const address = instruction.governanceAction.address;
const contract = new EvmWormholeContract(chain, address);
const contract = new WormholeEvmContract(chain, address);
const currentIndex = await contract.getCurrentGuardianSetIndex();
const guardianSet = await contract.getGuardianSet();

View File

@ -1,18 +1,11 @@
import {
DefaultStore,
EvmChain,
EvmEntropyContract,
EvmWormholeContract,
getDefaultDeploymentConfig,
PrivateKey,
} from "../src";
import { DefaultStore, EvmChain, PrivateKey } from "../src";
import { existsSync, readFileSync, writeFileSync } from "fs";
import { join } from "path";
import Web3 from "web3";
import { Contract } from "web3-eth-contract";
import { InferredOptionType } from "yargs";
export interface BaseDeployConfig {
interface DeployConfig {
gasMultiplier: number;
gasPriceMultiplier: number;
jsonOutputDir: string;
@ -26,7 +19,7 @@ export interface BaseDeployConfig {
export async function deployIfNotCached(
cacheFile: string,
chain: EvmChain,
config: BaseDeployConfig,
config: DeployConfig,
artifactName: string,
deployArgs: any[], // eslint-disable-line @typescript-eslint/no-explicit-any
cacheKey?: string
@ -79,7 +72,7 @@ export const COMMON_DEPLOY_OPTIONS = {
chain: {
type: "array",
demandOption: true,
desc: "Chain to upload the contract on. Can be one of the chains available in the store",
desc: "Chain to upload the contract on. Can be one of the evm chains available in the store",
},
"deployment-type": {
type: "string",
@ -188,149 +181,3 @@ export function getSelectedChains(argv: {
}
return selectedChains;
}
/**
* Finds the entropy contract for a given EVM chain.
* @param {EvmChain} chain The EVM chain to find the entropy contract for.
* @returns The entropy contract for the given EVM chain.
* @throws {Error} an error if the entropy contract is not found for the given EVM chain.
*/
export function findEntropyContract(chain: EvmChain): EvmEntropyContract {
for (const contract of Object.values(DefaultStore.entropy_contracts)) {
if (contract.getChain().getId() === chain.getId()) {
return contract;
}
}
throw new Error(`Entropy contract not found for chain ${chain.getId()}`);
}
/**
* Finds an EVM chain by its name.
* @param {string} chainName The name of the chain to find.
* @returns The EVM chain instance.
* @throws {Error} an error if the chain is not found or is not an EVM chain.
*/
export function findEvmChain(chainName: string): EvmChain {
const chain = DefaultStore.chains[chainName];
if (!chain) {
throw new Error(`Chain ${chainName} not found`);
} else if (!(chain instanceof EvmChain)) {
throw new Error(`Chain ${chainName} is not an EVM chain`);
}
return chain;
}
/**
* Finds the wormhole contract for a given EVM chain.
* @param {EvmChain} chain The EVM chain to find the wormhole contract for.
* @returns If found, the wormhole contract for the given EVM chain. Else, undefined
*/
export function findWormholeContract(
chain: EvmChain
): EvmWormholeContract | undefined {
for (const contract of Object.values(DefaultStore.wormhole_contracts)) {
if (
contract instanceof EvmWormholeContract &&
contract.getChain().getId() === chain.getId()
) {
return contract;
}
}
}
export interface DeployWormholeReceiverContractsConfig
extends BaseDeployConfig {
saveContract: boolean;
type: "stable" | "beta";
}
/**
* Deploys the wormhole receiver contract for a given EVM chain.
* @param {EvmChain} chain The EVM chain to find the wormhole receiver contract for.
* @param {DeployWormholeReceiverContractsConfig} config The deployment configuration.
* @param {string} cacheFile The path to the cache file.
* @returns {EvmWormholeContract} The wormhole contract for the given EVM chain.
*/
export async function deployWormholeContract(
chain: EvmChain,
config: DeployWormholeReceiverContractsConfig,
cacheFile: string
): Promise<EvmWormholeContract> {
const receiverSetupAddr = await deployIfNotCached(
cacheFile,
chain,
config,
"ReceiverSetup",
[]
);
const receiverImplAddr = await deployIfNotCached(
cacheFile,
chain,
config,
"ReceiverImplementation",
[]
);
// Craft the init data for the proxy contract
const setupContract = getWeb3Contract(
config.jsonOutputDir,
"ReceiverSetup",
receiverSetupAddr
);
const { wormholeConfig } = getDefaultDeploymentConfig(config.type);
const initData = setupContract.methods
.setup(
receiverImplAddr,
wormholeConfig.initialGuardianSet.map((addr: string) => "0x" + addr),
chain.getWormholeChainId(),
wormholeConfig.governanceChainId,
"0x" + wormholeConfig.governanceContract
)
.encodeABI();
const wormholeReceiverAddr = await deployIfNotCached(
cacheFile,
chain,
config,
"WormholeReceiver",
[receiverSetupAddr, initData]
);
const wormholeContract = new EvmWormholeContract(chain, wormholeReceiverAddr);
if (config.type === "stable") {
console.log(`Syncing mainnet guardian sets for ${chain.getId()}...`);
// TODO: Add a way to pass gas configs to this
await wormholeContract.syncMainnetGuardianSets(config.privateKey);
console.log(`✅ Synced mainnet guardian sets for ${chain.getId()}`);
}
if (config.saveContract) {
DefaultStore.wormhole_contracts[wormholeContract.getId()] =
wormholeContract;
DefaultStore.saveAllContracts();
}
return wormholeContract;
}
/**
* Returns the wormhole contract for a given EVM chain.
* If there was no wormhole contract deployed for the given chain, it will deploy the wormhole contract and save it to the default store.
* @param {EvmChain} chain The EVM chain to find the wormhole contract for.
* @param {DeployWormholeReceiverContractsConfig} config The deployment configuration.
* @param {string} cacheFile The path to the cache file.
* @returns {EvmWormholeContract} The wormhole contract for the given EVM chain.
*/
export async function getOrDeployWormholeContract(
chain: EvmChain,
config: DeployWormholeReceiverContractsConfig,
cacheFile: string
): Promise<EvmWormholeContract> {
return (
findWormholeContract(chain) ??
(await deployWormholeContract(chain, config, cacheFile))
);
}

View File

@ -5,23 +5,29 @@ import { DefaultStore } from "../src/store";
import {
DeploymentType,
EvmEntropyContract,
EvmPriceFeedContract,
getDefaultDeploymentConfig,
PrivateKey,
toDeploymentType,
toPrivateKey,
WormholeEvmContract,
} from "../src";
import {
COMMON_DEPLOY_OPTIONS,
deployIfNotCached,
getWeb3Contract,
getOrDeployWormholeContract,
BaseDeployConfig,
} from "./common";
import Web3 from "web3";
interface DeploymentConfig extends BaseDeployConfig {
type DeploymentConfig = {
type: DeploymentType;
gasMultiplier: number;
gasPriceMultiplier: number;
privateKey: PrivateKey;
jsonOutputDir: string;
wormholeAddr: string;
saveContract: boolean;
}
};
const CACHE_FILE = ".cache-deploy-evm-entropy-contracts";
const ENTROPY_DEFAULT_PROVIDER = {
@ -45,8 +51,7 @@ const parser = yargs(hideBin(process.argv))
async function deployExecutorContracts(
chain: EvmChain,
config: DeploymentConfig,
wormholeAddr: string
config: DeploymentConfig
): Promise<string> {
const executorImplAddr = await deployIfNotCached(
CACHE_FILE,
@ -67,7 +72,7 @@ async function deployExecutorContracts(
const executorInitData = executorImplContract.methods
.initialize(
wormholeAddr,
config.wormholeAddr,
0, // lastExecutedSequence,
chain.getWormholeChainId(),
governanceDataSource.emitterChain,
@ -156,6 +161,19 @@ async function topupProviderIfNecessary(
}
}
async function findWormholeAddress(
chain: EvmChain
): Promise<string | undefined> {
for (const contract of Object.values(DefaultStore.contracts)) {
if (
contract instanceof EvmPriceFeedContract &&
contract.getChain().getId() === chain.getId()
) {
return (await contract.getWormholeContract()).address;
}
}
}
async function main() {
const argv = await parser.argv;
@ -167,6 +185,12 @@ async function main() {
throw new Error(`Chain ${chainName} is not an EVM chain`);
}
const wormholeAddr = await findWormholeAddress(chain);
if (!wormholeAddr) {
// TODO: deploy wormhole if necessary and maintain a wormhole store
throw new Error(`Wormhole contract not found for chain ${chain.getId()}`);
}
const deploymentConfig: DeploymentConfig = {
type: toDeploymentType(argv.deploymentType),
gasMultiplier: argv.gasMultiplier,
@ -174,14 +198,18 @@ async function main() {
privateKey: toPrivateKey(argv.privateKey),
jsonOutputDir: argv.stdOutputDir,
saveContract: argv.saveContract,
wormholeAddr,
};
const wormholeContract = await getOrDeployWormholeContract(
const wormholeContract = new WormholeEvmContract(
chain,
deploymentConfig,
CACHE_FILE
deploymentConfig.wormholeAddr
);
const wormholeChainId = await wormholeContract.getChainId();
if (chain.getWormholeChainId() != wormholeChainId) {
throw new Error(
`Wormhole chain id mismatch. Expected ${chain.getWormholeChainId()} but got ${wormholeChainId}`
);
}
await topupProviderIfNecessary(chain, deploymentConfig);
console.log(
@ -190,11 +218,7 @@ async function main() {
console.log(`Deploying entropy contracts on ${chain.getId()}...`);
const executorAddr = await deployExecutorContracts(
chain,
deploymentConfig,
wormholeContract.address
);
const executorAddr = await deployExecutorContracts(chain, deploymentConfig);
const entropyAddr = await deployEntropyContracts(
chain,
deploymentConfig,

View File

@ -6,23 +6,27 @@ import {
DeploymentType,
EvmPriceFeedContract,
getDefaultDeploymentConfig,
PrivateKey,
toDeploymentType,
toPrivateKey,
WormholeEvmContract,
} from "../src";
import {
COMMON_DEPLOY_OPTIONS,
deployIfNotCached,
getWeb3Contract,
getOrDeployWormholeContract,
BaseDeployConfig,
} from "./common";
interface DeploymentConfig extends BaseDeployConfig {
type DeploymentConfig = {
type: DeploymentType;
validTimePeriodSeconds: number;
singleUpdateFeeInWei: number;
gasMultiplier: number;
gasPriceMultiplier: number;
privateKey: PrivateKey;
jsonOutputDir: string;
saveContract: boolean;
}
};
const CACHE_FILE = ".cache-deploy-evm";
@ -47,6 +51,68 @@ const parser = yargs(hideBin(process.argv))
},
});
async function deployWormholeReceiverContracts(
chain: EvmChain,
config: DeploymentConfig
): Promise<string> {
const receiverSetupAddr = await deployIfNotCached(
CACHE_FILE,
chain,
config,
"ReceiverSetup",
[]
);
const receiverImplAddr = await deployIfNotCached(
CACHE_FILE,
chain,
config,
"ReceiverImplementation",
[]
);
// Craft the init data for the proxy contract
const setupContract = getWeb3Contract(
config.jsonOutputDir,
"ReceiverSetup",
receiverSetupAddr
);
const { wormholeConfig } = getDefaultDeploymentConfig(config.type);
const initData = setupContract.methods
.setup(
receiverImplAddr,
wormholeConfig.initialGuardianSet.map((addr: string) => "0x" + addr),
chain.getWormholeChainId(),
wormholeConfig.governanceChainId,
"0x" + wormholeConfig.governanceContract
)
.encodeABI();
const wormholeReceiverAddr = await deployIfNotCached(
CACHE_FILE,
chain,
config,
"WormholeReceiver",
[receiverSetupAddr, initData]
);
const wormholeEvmContract = new WormholeEvmContract(
chain,
wormholeReceiverAddr
);
if (config.type === "stable") {
console.log(`Syncing mainnet guardian sets for ${chain.getId()}...`);
// TODO: Add a way to pass gas configs to this
await wormholeEvmContract.syncMainnetGuardianSets(config.privateKey);
console.log(`✅ Synced mainnet guardian sets for ${chain.getId()}`);
}
return wormholeReceiverAddr;
}
async function deployPriceFeedContracts(
chain: EvmChain,
config: DeploymentConfig,
@ -120,16 +186,14 @@ async function main() {
console.log(`Deploying price feed contracts on ${chain.getId()}...`);
const wormholeContract = await getOrDeployWormholeContract(
const wormholeAddr = await deployWormholeReceiverContracts(
chain,
deploymentConfig,
CACHE_FILE
deploymentConfig
);
const priceFeedAddr = await deployPriceFeedContracts(
chain,
deploymentConfig,
wormholeContract.address
wormholeAddr
);
if (deploymentConfig.saveContract) {

View File

@ -1,64 +0,0 @@
import yargs from "yargs";
import { hideBin } from "yargs/helpers";
import { DefaultStore } from "../src";
function deserializeCommitmentMetadata(data: Buffer) {
const seed = Uint8Array.from(data.subarray(0, 32));
const chainLength = data.readBigInt64LE(32);
return {
seed,
chainLength,
};
}
const parser = yargs(hideBin(process.argv))
.usage("Usage: $0")
.options({
testnet: {
type: "boolean",
default: false,
desc: "Fetch the provider registration data for the testnet contracts.",
},
});
async function main() {
const argv = await parser.argv;
for (const contract of Object.values(DefaultStore.entropy_contracts)) {
if (contract.getChain().isMainnet() === argv.testnet) continue;
let provider;
let providerInfo;
try {
provider = await contract.getDefaultProvider();
providerInfo = await contract.getProviderInfo(provider);
} catch (e) {
console.error(`Error fetching info for ${contract.getId()}`, e);
continue;
}
const commitmentMetadata = providerInfo.commitmentMetadata.replace(
"0x",
""
);
// const binaryData = hexToBytes(commitmentMetadata);
const metadata = deserializeCommitmentMetadata(
Buffer.from(commitmentMetadata, "hex")
);
console.log("=".repeat(100));
console.log(`Fetched info for ${contract.getId()}`);
console.log(`chain : ${contract.getChain().getId()}`);
console.log(`contract : ${contract.address}`);
console.log(`provider : ${provider}`);
console.log(`commitment data : ${commitmentMetadata}`);
console.log(`chainLength : ${metadata.chainLength}`);
console.log(`seed : [${metadata.seed}]`);
console.log(
`original seq no : ${providerInfo.originalCommitmentSequenceNumber}`
);
}
}
main();

View File

@ -1,32 +1,33 @@
import yargs from "yargs";
import { hideBin } from "yargs/helpers";
import { toPrivateKey } from "../src";
import {
COMMON_DEPLOY_OPTIONS,
findEntropyContract,
findEvmChain,
} from "./common";
import { DefaultStore, toPrivateKey } from "../src";
import { COMMON_DEPLOY_OPTIONS } from "./common";
const parser = yargs(hideBin(process.argv))
.usage(
"Requests and reveals a random number from an entropy contract while measuing the\n" +
"latency between request submission and availablity of the provider revelation from fortuna.\n" +
"Usage: $0 --chain <chain-id> --private-key <private-key>"
"Usage: $0 --contract <entropy_contract_id> --private-key <private-key>"
)
.options({
chain: {
contract: {
type: "string",
demandOption: true,
desc: "test latency for the contract on this chain",
desc: "Contract to test latency for",
},
"private-key": COMMON_DEPLOY_OPTIONS["private-key"],
});
async function main() {
const argv = await parser.argv;
const chain = findEvmChain(argv.chain);
const contract = findEntropyContract(chain);
const contract = DefaultStore.entropy_contracts[argv.contract];
if (!contract) {
throw new Error(
`Contract ${argv.contract} not found. Contracts found: ${Object.keys(
DefaultStore.entropy_contracts
)}`
);
}
const provider = await contract.getDefaultProvider();
const providerInfo = await contract.getProviderInfo(provider);
const userRandomNumber = contract.generateUserRandomNumber();

View File

@ -1,118 +0,0 @@
import yargs from "yargs";
import { hideBin } from "yargs/helpers";
import {
DefaultStore,
EvmEntropyContract,
PrivateKey,
toPrivateKey,
} from "../src";
import {
COMMON_DEPLOY_OPTIONS,
findEntropyContract,
findEvmChain,
} from "./common";
import Web3 from "web3";
const parser = yargs(hideBin(process.argv))
.usage(
"Requests a random number from an entropy contract and measures the\n" +
"latency between request submission and fulfillment by the Fortuna keeper service.\n" +
"Usage: $0 --private-key <private-key> --chain <chain-id> | --all-chains <testnet|mainnet>"
)
.options({
chain: {
type: "string",
desc: "test latency for the contract on this chain",
conflicts: "all-chains",
},
"all-chains": {
type: "string",
conflicts: "chain",
choices: ["testnet", "mainnet"],
desc: "test latency for all entropy contracts deployed either on mainnet or testnet",
},
"private-key": COMMON_DEPLOY_OPTIONS["private-key"],
});
async function testLatency(
contract: EvmEntropyContract,
privateKey: PrivateKey
) {
const provider = await contract.getDefaultProvider();
const userRandomNumber = contract.generateUserRandomNumber();
const requestResponse = await contract.requestRandomness(
userRandomNumber,
provider,
privateKey,
true // with callback
);
console.log(`Request tx hash : ${requestResponse.transactionHash}`);
// Read the sequence number for the request from the transaction events.
const sequenceNumber =
requestResponse.events.RequestedWithCallback.returnValues.sequenceNumber;
console.log(`sequence : ${sequenceNumber}`);
const startTime = Date.now();
let fromBlock = requestResponse.blockNumber;
const web3 = new Web3(contract.chain.getRpcUrl());
const entropyContract = contract.getContract();
// eslint-disable-next-line no-constant-condition
while (true) {
const currentBlock = await web3.eth.getBlockNumber();
if (fromBlock > currentBlock) {
continue;
}
const events = await entropyContract.getPastEvents("RevealedWithCallback", {
fromBlock: fromBlock,
toBlock: currentBlock,
});
fromBlock = currentBlock + 1;
const event = events.find(
(event) => event.returnValues.request[1] == sequenceNumber
);
if (event !== undefined) {
console.log(`Random number : ${event.returnValues.randomNumber}`);
const endTime = Date.now();
console.log(`Fortuna Latency : ${endTime - startTime}ms`);
console.log(
`Revealed after : ${
currentBlock - requestResponse.blockNumber
} blocks`
);
break;
}
await new Promise((resolve) => setTimeout(resolve, 300));
}
}
async function main() {
const argv = await parser.argv;
if (!argv.chain && !argv["all-chains"]) {
throw new Error("Must specify either --chain or --all-chains");
}
const privateKey = toPrivateKey(argv.privateKey);
if (argv["all-chains"]) {
for (const contract of Object.values(DefaultStore.entropy_contracts)) {
if (
contract.getChain().isMainnet() ===
(argv["all-chains"] === "mainnet")
) {
console.log(`Testing latency for ${contract.getId()}...`);
await testLatency(contract, privateKey);
}
}
} else if (argv.chain) {
const chain = findEvmChain(argv.chain);
const contract = findEntropyContract(chain);
await testLatency(contract, privateKey);
}
}
main();

View File

@ -13,22 +13,15 @@ const parser = yargs(hideBin(process.argv))
},
});
const KEEPER_ADDRESS = {
mainnet: "0xBcAb779fCa45290288C35F5E231c37F9fA87b130",
testnet: "0xa5A68ed167431Afe739846A22597786ba2da85df",
};
async function main() {
const argv = await parser.argv;
const entries = [];
const keeperAddress = KEEPER_ADDRESS[argv.testnet ? "testnet" : "mainnet"];
for (const contract of Object.values(DefaultStore.entropy_contracts)) {
if (contract.getChain().isMainnet() === argv.testnet) continue;
try {
const provider = await contract.getDefaultProvider();
const w3 = new Web3(contract.getChain().getRpcUrl());
const balance = await w3.eth.getBalance(provider);
const keeperBalance = await w3.eth.getBalance(keeperAddress);
let version = "unknown";
try {
version = await contract.getVersion();
@ -41,7 +34,6 @@ async function main() {
contract: contract.address,
provider: providerInfo.uri,
balance,
keeperBalance,
seq: providerInfo.sequenceNumber,
version,
});

View File

@ -1,69 +0,0 @@
import yargs from "yargs";
import { hideBin } from "yargs/helpers";
import {
CosmWasmPriceFeedContract,
DefaultStore,
EvmPriceFeedContract,
toPrivateKey,
} from "../src";
const parser = yargs(hideBin(process.argv))
.usage("Update the guardian set in stable networks. Usage: $0")
.options({
"private-key": {
type: "string",
demandOption: true,
desc: "Private key to sign the transactions with",
},
chain: {
type: "array",
desc: "Can be one of the chains available in the store",
},
});
async function main() {
const argv = await parser.argv;
const privateKey = toPrivateKey(argv.privateKey);
const chains = argv.chain;
for (const contract of Object.values(DefaultStore.contracts)) {
// We are currently only managing wormhole receiver contracts in EVM and
// CosmWasm and Solana-based networks. The rest of the networks are
// managed by the guardians themselves and they should be the ones updating
// the guardian set.
// TODO: Solana-based receivers have their script in their rust cli. Add
// support for Solana-based networks here once they are added to the
// contract manager.
if (
contract instanceof CosmWasmPriceFeedContract ||
contract instanceof EvmPriceFeedContract
) {
if (chains && !chains.includes(contract.getChain().getId())) {
continue;
}
try {
console.log("------------------------------------");
const wormhole = await contract.getWormholeContract();
// TODO: This is a temporary workaround to skip contracts that are in beta channel
// We should have a better way to handle this
if ((await wormhole.getCurrentGuardianSetIndex()) === 0) {
continue;
}
console.log(
`Current Guardianset for ${contract.getId()}: ${await wormhole.getCurrentGuardianSetIndex()}`
);
await wormhole.syncMainnetGuardianSets(privateKey);
console.log(`Updated Guardianset for ${contract.getId()}`);
} catch (e) {
console.error(`Error updating Guardianset for ${contract.getId()}`, e);
}
}
}
}
main();

View File

@ -408,10 +408,11 @@ export class EvmChain extends Chain {
const GAS_ESTIMATE_MULTIPLIER = 2;
const gasEstimate = await transactionObject.estimateGas(txParams);
// Some networks like Filecoin do not support the normal transaction type and need a type 2 transaction.
// To send a type 2 transaction, remove the ``gasPrice`` field.
// To send a type 2 transaction, remove the ``gasPrice`` field and add the `type` field with the value
// `0x2` to the transaction configuration parameters.
return transactionObject.send({
gas: gasEstimate * GAS_ESTIMATE_MULTIPLIER,
gasPrice: Number(await this.getGasPrice()),
gasPrice: await this.getGasPrice(),
...txParams,
});
}

View File

@ -17,39 +17,7 @@ type GuardianSet = {
index: { number: string };
};
export class AptosWormholeContract extends WormholeContract {
static type = "AptosWormholeContract";
getId(): string {
return `${this.chain.getId()}_${this.address}`;
}
getType(): string {
return AptosWormholeContract.type;
}
toJson() {
return {
chain: this.chain.getId(),
address: this.address,
type: AptosWormholeContract.type,
};
}
static fromJson(
chain: Chain,
parsed: {
type: string;
address: string;
}
): AptosWormholeContract {
if (parsed.type !== AptosWormholeContract.type)
throw new Error("Invalid type");
if (!(chain instanceof AptosChain))
throw new Error(`Wrong chain type ${chain}`);
return new AptosWormholeContract(chain, parsed.address);
}
export class WormholeAptosContract extends WormholeContract {
constructor(public chain: AptosChain, public address: string) {
super();
}
@ -156,8 +124,8 @@ export class AptosPriceFeedContract extends PriceFeedContract {
return this.chain.sendTransaction(senderPrivateKey, txPayload);
}
public getWormholeContract(): AptosWormholeContract {
return new AptosWormholeContract(this.chain, this.wormholeStateId);
public getWormholeContract(): WormholeAptosContract {
return new WormholeAptosContract(this.chain, this.wormholeStateId);
}
async executeUpdatePriceFeed(

View File

@ -38,36 +38,7 @@ export interface DeploymentConfig {
fee: { amount: string; denom: string };
}
export class CosmWasmWormholeContract extends WormholeContract {
static type = "CosmWasmWormholeContract";
getId(): string {
return `${this.chain.getId()}_${this.address}`;
}
getType(): string {
return CosmWasmWormholeContract.type;
}
toJson() {
return {
chain: this.chain.getId(),
address: this.address,
type: CosmWasmWormholeContract.type,
};
}
static fromJson(
chain: Chain,
parsed: { type: string; address: string }
): CosmWasmWormholeContract {
if (parsed.type !== CosmWasmWormholeContract.type)
throw new Error("Invalid type");
if (!(chain instanceof CosmWasmChain))
throw new Error(`Wrong chain type ${chain}`);
return new CosmWasmWormholeContract(chain, parsed.address);
}
export class WormholeCosmWasmContract extends WormholeContract {
constructor(public chain: CosmWasmChain, public address: string) {
super();
}
@ -240,9 +211,7 @@ export class CosmWasmPriceFeedContract extends PriceFeedContract {
})) as Record<string, string>;
const config = {
config_v1: JSON.parse(allStates["\x00\tconfig_v1"]),
contract_version: allStates["\x00\x10contract_version"]
? JSON.parse(allStates["\x00\x10contract_version"])
: undefined,
contract_version: JSON.parse(allStates["\x00\x10contract_version"]),
};
return config;
}
@ -339,10 +308,10 @@ export class CosmWasmPriceFeedContract extends PriceFeedContract {
return { id: result.txHash, info: result };
}
async getWormholeContract(): Promise<CosmWasmWormholeContract> {
async getWormholeContract(): Promise<WormholeCosmWasmContract> {
const config = await this.getConfig();
const wormholeAddress = config.config_v1.wormhole_contract;
return new CosmWasmWormholeContract(this.chain, wormholeAddress);
return new WormholeCosmWasmContract(this.chain, wormholeAddress);
}
async getUpdateFee(msgs: string[]): Promise<Coin> {

View File

@ -390,37 +390,7 @@ export async function getCodeDigestWithoutAddress(
return Web3.utils.keccak256(strippedCode);
}
export class EvmWormholeContract extends WormholeContract {
static type = "EvmWormholeContract";
getId(): string {
return `${this.chain.getId()}_${this.address}`;
}
getChain(): EvmChain {
return this.chain;
}
getType(): string {
return EvmWormholeContract.type;
}
async getVersion(): Promise<string> {
const contract = this.getContract();
return contract.methods.version().call();
}
static fromJson(
chain: Chain,
parsed: { type: string; address: string }
): EvmWormholeContract {
if (parsed.type !== EvmWormholeContract.type)
throw new Error("Invalid type");
if (!(chain instanceof EvmChain))
throw new Error(`Wrong chain type ${chain}`);
return new EvmWormholeContract(chain, parsed.address);
}
export class WormholeEvmContract extends WormholeContract {
constructor(public chain: EvmChain, public address: string) {
super();
}
@ -466,14 +436,6 @@ export class EvmWormholeContract extends WormholeContract {
);
return { id: result.transactionHash, info: result };
}
toJson() {
return {
chain: this.chain.getId(),
address: this.address,
type: EvmWormholeContract.type,
};
}
}
interface EntropyProviderInfo {
@ -630,30 +592,19 @@ export class EvmEntropyContract extends Storable {
async requestRandomness(
userRandomNumber: string,
provider: string,
senderPrivateKey: PrivateKey,
withCallback?: boolean
senderPrivateKey: PrivateKey
) {
const web3 = new Web3(this.chain.getRpcUrl());
const userCommitment = web3.utils.keccak256(userRandomNumber);
const contract = new web3.eth.Contract(EXTENDED_ENTROPY_ABI, this.address);
const fee = await contract.methods.getFee(provider).call();
const { address } = web3.eth.accounts.wallet.add(senderPrivateKey);
let transactionObject;
if (withCallback) {
transactionObject = contract.methods.requestWithCallback(
provider,
userCommitment
);
} else {
const useBlockHash = false;
transactionObject = contract.methods.request(
const transactionObject = contract.methods.request(
provider,
userCommitment,
useBlockHash
);
}
return this.chain.estiamteAndSendTransaction(transactionObject, {
from: address,
value: fee,
@ -689,13 +640,13 @@ export class EvmExecutorContract {
return `${this.chain.getId()}_${this.address}`;
}
async getWormholeContract(): Promise<EvmWormholeContract> {
async getWormholeContract(): Promise<WormholeEvmContract> {
const web3 = new Web3(this.chain.getRpcUrl());
//Unfortunately, there is no public method to get the wormhole address
//Found 251 by using `forge build --extra-output storageLayout` and finding the slot for the wormhole variable.
let address = await web3.eth.getStorageAt(this.address, 251);
address = "0x" + address.slice(26);
return new EvmWormholeContract(this.chain, address);
return new WormholeEvmContract(this.chain, address);
}
getContract() {
@ -860,10 +811,10 @@ export class EvmPriceFeedContract extends PriceFeedContract {
/**
* Returns the wormhole contract which is being used for VAA verification
*/
async getWormholeContract(): Promise<EvmWormholeContract> {
async getWormholeContract(): Promise<WormholeEvmContract> {
const pythContract = this.getContract();
const address = await pythContract.methods.wormhole().call();
return new EvmWormholeContract(this.chain, address);
return new WormholeEvmContract(this.chain, address);
}
async getBaseUpdateFee() {

View File

@ -1,6 +1,6 @@
import { PrivateKey, Storable, TxResult } from "../base";
import { PrivateKey, TxResult } from "../base";
export abstract class WormholeContract extends Storable {
export abstract class WormholeContract {
abstract getCurrentGuardianSetIndex(): Promise<number>;
/**
@ -33,7 +33,6 @@ export abstract class WormholeContract extends Storable {
"010000000001007ac31b282c2aeeeb37f3385ee0de5f8e421d30b9e5ae8ba3d4375c1c77a86e77159bb697d9c456d6f8c02d22a94b1279b65b0d6a9957e7d3857423845ac758e300610ac1d2000000030001000000000000000000000000000000000000000000000000000000000000000400000000000005390000000000000000000000000000000000000000000000000000000000436f7265020000000000011358cc3ae5c097b213ce3c81979e1b9f9570746aa5ff6cb952589bde862c25ef4392132fb9d4a42157114de8460193bdf3a2fcf81f86a09765f4762fd1107a0086b32d7a0977926a205131d8731d39cbeb8c82b2fd82faed2711d59af0f2499d16e726f6b211b39756c042441be6d8650b69b54ebe715e234354ce5b4d348fb74b958e8966e2ec3dbd4958a7cdeb5f7389fa26941519f0863349c223b73a6ddee774a3bf913953d695260d88bc1aa25a4eee363ef0000ac0076727b35fbea2dac28fee5ccb0fea768eaf45ced136b9d9e24903464ae889f5c8a723fc14f93124b7c738843cbb89e864c862c38cddcccf95d2cc37a4dc036a8d232b48f62cdd4731412f4890da798f6896a3331f64b48c12d1d57fd9cbe7081171aa1be1d36cafe3867910f99c09e347899c19c38192b6e7387ccd768277c17dab1b7a5027c0b3cf178e21ad2e77ae06711549cfbb1f9c7a9d8096e85e1487f35515d02a92753504a8d75471b9f49edb6fbebc898f403e4773e95feb15e80c9a99c8348d",
"01000000010d0012e6b39c6da90c5dfd3c228edbb78c7a4c97c488ff8a346d161a91db067e51d638c17216f368aa9bdf4836b8645a98018ca67d2fec87d769cabfdf2406bf790a0002ef42b288091a670ef3556596f4f47323717882881eaf38e03345078d07a156f312b785b64dae6e9a87e3d32872f59cb1931f728cecf511762981baf48303668f0103cef2616b84c4e511ff03329e0853f1bd7ee9ac5ba71d70a4d76108bddf94f69c2a8a84e4ee94065e8003c334e899184943634e12043d0dda78d93996da073d190104e76d166b9dac98f602107cc4b44ac82868faf00b63df7d24f177aa391e050902413b71046434e67c770b19aecdf7fce1d1435ea0be7262e3e4c18f50ddc8175c0105d9450e8216d741e0206a50f93b750a47e0a258b80eb8fed1314cc300b3d905092de25cd36d366097b7103ae2d184121329ba3aa2d7c6cc53273f11af14798110010687477c8deec89d36a23e7948feb074df95362fc8dcbd8ae910ac556a1dee1e755c56b9db5d710c940938ed79bc1895a3646523a58bc55f475a23435a373ecfdd0107fb06734864f79def4e192497362513171530daea81f07fbb9f698afe7e66c6d44db21323144f2657d4a5386a954bb94eef9f64148c33aef6e477eafa2c5c984c01088769e82216310d1827d9bd48645ec23e90de4ef8a8de99e2d351d1df318608566248d80cdc83bdcac382b3c30c670352be87f9069aab5037d0b747208eae9c650109e9796497ff9106d0d1c62e184d83716282870cef61a1ee13d6fc485b521adcce255c96f7d1bca8d8e7e7d454b65783a830bddc9d94092091a268d311ecd84c26010c468c9fb6d41026841ff9f8d7368fa309d4dbea3ea4bbd2feccf94a92cc8a20a226338a8e2126cd16f70eaf15b4fc9be2c3fa19def14e071956a605e9d1ac4162010e23fcb6bd445b7c25afb722250c1acbc061ed964ba9de1326609ae012acdfb96942b2a102a2de99ab96327859a34a2b49a767dbdb62e0a1fb26af60fe44fd496a00106bb0bac77ac68b347645f2fb1ad789ea9bd76fb9b2324f25ae06f97e65246f142df717f662e73948317182c62ce87d79c73def0dba12e5242dfc038382812cfe00126da03c5e56cb15aeeceadc1e17a45753ab4dc0ec7bf6a75ca03143ed4a294f6f61bc3f478a457833e43084ecd7c985bf2f55a55f168aac0e030fc49e845e497101626e9d9a5d9e343f00010000000000000000000000000000000000000000000000000000000000000004c1759167c43f501c2000000000000000000000000000000000000000000000000000000000436f7265020000000000021358cc3ae5c097b213ce3c81979e1b9f9570746aa5ff6cb952589bde862c25ef4392132fb9d4a42157114de8460193bdf3a2fcf81f86a09765f4762fd1107a0086b32d7a0977926a205131d8731d39cbeb8c82b2fd82faed2711d59af0f2499d16e726f6b211b39756c042441be6d8650b69b54ebe715e234354ce5b4d348fb74b958e8966e2ec3dbd4958a7cd66b9590e1c41e0b226937bf9217d1d67fd4e91f574a3bf913953d695260d88bc1aa25a4eee363ef0000ac0076727b35fbea2dac28fee5ccb0fea768eaf45ced136b9d9e24903464ae889f5c8a723fc14f93124b7c738843cbb89e864c862c38cddcccf95d2cc37a4dc036a8d232b48f62cdd4731412f4890da798f6896a3331f64b48c12d1d57fd9cbe7081171aa1be1d36cafe3867910f99c09e347899c19c38192b6e7387ccd768277c17dab1b7a5027c0b3cf178e21ad2e77ae06711549cfbb1f9c7a9d8096e85e1487f35515d02a92753504a8d75471b9f49edb6fbebc898f403e4773e95feb15e80c9a99c8348d",
"01000000020d00ce45474d9e1b1e7790a2d210871e195db53a70ffd6f237cfe70e2686a32859ac43c84a332267a8ef66f59719cf91cc8df0101fd7c36aa1878d5139241660edc0010375cc906156ae530786661c0cd9aef444747bc3d8d5aa84cac6a6d2933d4e1a031cffa30383d4af8131e929d9f203f460b07309a647d6cd32ab1cc7724089392c000452305156cfc90343128f97e499311b5cae174f488ff22fbc09591991a0a73d8e6af3afb8a5968441d3ab8437836407481739e9850ad5c95e6acfcc871e951bc30105a7956eefc23e7c945a1966d5ddbe9e4be376c2f54e45e3d5da88c2f8692510c7429b1ea860ae94d929bd97e84923a18187e777aa3db419813a80deb84cc8d22b00061b2a4f3d2666608e0aa96737689e3ba5793810ff3a52ff28ad57d8efb20967735dc5537a2e43ef10f583d144c12a1606542c207f5b79af08c38656d3ac40713301086b62c8e130af3411b3c0d91b5b50dcb01ed5f293963f901fc36e7b0e50114dce203373b32eb45971cef8288e5d928d0ed51cd86e2a3006b0af6a65c396c009080009e93ab4d2c8228901a5f4525934000b2c26d1dc679a05e47fdf0ff3231d98fbc207103159ff4116df2832eea69b38275283434e6cd4a4af04d25fa7a82990b707010aa643f4cf615dfff06ffd65830f7f6cf6512dabc3690d5d9e210fdc712842dc2708b8b2c22e224c99280cd25e5e8bfb40e3d1c55b8c41774e287c1e2c352aecfc010b89c1e85faa20a30601964ccc6a79c0ae53cfd26fb10863db37783428cd91390a163346558239db3cd9d420cfe423a0df84c84399790e2e308011b4b63e6b8015010ca31dcb564ac81a053a268d8090e72097f94f366711d0c5d13815af1ec7d47e662e2d1bde22678113d15963da100b668ba26c0c325970d07114b83c5698f46097010dc9fda39c0d592d9ed92cd22b5425cc6b37430e236f02d0d1f8a2ef45a00bde26223c0a6eb363c8b25fd3bf57234a1d9364976cefb8360e755a267cbbb674b39501108db01e444ab1003dd8b6c96f8eb77958b40ba7a85fefecf32ad00b7a47c0ae7524216262495977e09c0989dd50f280c21453d3756843608eacd17f4fdfe47600001261025228ef5af837cb060bcd986fcfa84ccef75b3fa100468cfd24e7fadf99163938f3b841a33496c2706d0208faab088bd155b2e20fd74c625bb1cc8c43677a0163c53c409e0c5dfa000100000000000000000000000000000000000000000000000000000000000000046c5a054d7833d1e42000000000000000000000000000000000000000000000000000000000436f7265020000000000031358cc3ae5c097b213ce3c81979e1b9f9570746aa5ff6cb952589bde862c25ef4392132fb9d4a42157114de8460193bdf3a2fcf81f86a09765f4762fd1107a0086b32d7a0977926a205131d8731d39cbeb8c82b2fd82faed2711d59af0f2499d16e726f6b211b39756c042441be6d8650b69b54ebe715e234354ce5b4d348fb74b958e8966e2ec3dbd4958a7cd15e7caf07c4e3dc8e7c469f92c8cd88fb8005a2074a3bf913953d695260d88bc1aa25a4eee363ef0000ac0076727b35fbea2dac28fee5ccb0fea768eaf45ced136b9d9e24903464ae889f5c8a723fc14f93124b7c738843cbb89e864c862c38cddcccf95d2cc37a4dc036a8d232b48f62cdd4731412f4890da798f6896a3331f64b48c12d1d57fd9cbe7081171aa1be1d36cafe3867910f99c09e347899c19c38192b6e7387ccd768277c17dab1b7a5027c0b3cf178e21ad2e77ae06711549cfbb1f9c7a9d8096e85e1487f35515d02a92753504a8d75471b9f49edb6fbebc898f403e4773e95feb15e80c9a99c8348d",
"01000000030d03d4a37a6ff4361d91714730831e9d49785f61624c8f348a9c6c1d82bc1d98cadc5e936338204445c6250bb4928f3f3e165ad47ca03a5d63111168a2de4576856301049a5df10464ea4e1961589fd30fc18d1970a7a2ffaad617e56a0f7777f25275253af7d10a0f0f2494dc6e99fc80e444ab9ebbbee252ded2d5dcb50cbf7a54bb5a01055f4603b553b9ba9e224f9c55c7bca3da00abb10abd19e0081aecd3b352be061a70f79f5f388ebe5190838ef3cd13a2f22459c9a94206883b739c90b40d5d74640006a8fade3997f650a36e46bceb1f609edff201ab32362266f166c5c7da713f6a19590c20b68ed3f0119cb24813c727560ede086b3d610c2d7a1efa66f655bad90900080f5e495a75ea52241c59d145c616bfac01e57182ad8d784cbcc9862ed3afb60c0983ccbc690553961ffcf115a0c917367daada8e60be2cbb8b8008bac6341a8c010935ab11e0eea28b87a1edc5ccce3f1fac25f75b5f640fe6b0673a7cd74513c9dc01c544216cf364cc9993b09fda612e0cd1ced9c00fb668b872a16a64ebb55d27010ab2bc39617a2396e7defa24cd7c22f42dc31f3c42ffcd9d1472b02df8468a4d0563911e8fb6a4b5b0ce0bd505daa53779b08ff660967b31f246126ed7f6f29a7e000bdb6d3fd7b33bdc9ac3992916eb4aacb97e7e21d19649e7fa28d2dd6e337937e4274516a96c13ac7a8895da9f91948ea3a09c25f44b982c62ce8842b58e20c8a9000d3d1b19c8bb000856b6610b9d28abde6c35cb7705c6ca5db711f7be96d60eed9d72cfa402a6bfe8bf0496dbc7af35796fc768da51a067b95941b3712dce8ae1e7010ec80085033157fd1a5628fc0c56267469a86f0e5a66d7dede1ad4ce74ecc3dff95b60307a39c3bfbeedc915075070da30d0395def9635130584f709b3885e1bdc0010fc480eb9ee715a2d151b23722b48b42581d7f4001fc1696c75425040bfc1ffc5394fe418adb2b64bd3dc692efda4cc408163677dbe233b16bcdabb853a20843301118ee9e115e1a0c981f19d0772b850e666591322da742a9a12cce9f52a5665bd474abdd59c580016bee8aae67fdf39b315be2528d12eec3a652910e03cc4c6fa3801129d0d1e2e429e969918ec163d16a7a5b2c6729aa44af5dccad07d25d19891556a79b574f42d9adbd9e2a9ae5a6b8750331d2fccb328dd94c3bf8791ee1bfe85aa00661e99781981faea00010000000000000000000000000000000000000000000000000000000000000004fd4c6c55ec8dfd342000000000000000000000000000000000000000000000000000000000436f726502000000000004135893b5a76c3f739645648885bdccc06cd70a3cd3ff6cb952589bde862c25ef4392132fb9d4a42157114de8460193bdf3a2fcf81f86a09765f4762fd1107a0086b32d7a0977926a205131d8731d39cbeb8c82b2fd82faed2711d59af0f2499d16e726f6b211b39756c042441be6d8650b69b54ebe715e234354ce5b4d348fb74b958e8966e2ec3dbd4958a7cd15e7caf07c4e3dc8e7c469f92c8cd88fb8005a2074a3bf913953d695260d88bc1aa25a4eee363ef0000ac0076727b35fbea2dac28fee5ccb0fea768eaf45ced136b9d9e24903464ae889f5c8a723fc14f93124b7c738843cbb89e864c862c38cddcccf95d2cc37a4dc036a8d232b48f62cdd4731412f4890da798f6896a3331f64b48c12d1d57fd9cbe7081171aa1be1d36cafe3867910f99c09e347899c19c38192b6e7387ccd768277c17dab1b7a5027c0b3cf178e21ad2e77ae06711549cfbb1f9c7a9d8096e85e1487f35515d02a92753504a8d75471b9f49edb6fbebc898f403e4773e95feb15e80c9a99c8348d",
];
const currentIndex = await this.getCurrentGuardianSetIndex();
for (let i = currentIndex; i < MAINNET_UPGRADE_VAAS.length; i++) {

View File

@ -8,9 +8,9 @@ repl.evalCode(
"import { loadHotWallet, Vault } from './src/governance';" +
"import { SuiChain, CosmWasmChain, AptosChain, EvmChain } from './src/chains';" +
"import { SuiPriceFeedContract } from './src/contracts/sui';" +
"import { CosmWasmWormholeContract, CosmWasmPriceFeedContract } from './src/contracts/cosmwasm';" +
"import { EvmWormholeContract, EvmPriceFeedContract } from './src/contracts/evm';" +
"import { AptosWormholeContract, AptosPriceFeedContract } from './src/contracts/aptos';" +
"import { WormholeCosmWasmContract, CosmWasmPriceFeedContract } from './src/contracts/cosmwasm';" +
"import { WormholeEvmContract, EvmPriceFeedContract } from './src/contracts/evm';" +
"import { WormholeAptosContract, AptosPriceFeedContract } from './src/contracts/aptos';" +
"import { DefaultStore } from './src/store';" +
"import { toPrivateKey } from './src/base';" +
"DefaultStore"

View File

@ -8,14 +8,10 @@ import {
} from "./chains";
import {
AptosPriceFeedContract,
AptosWormholeContract,
CosmWasmPriceFeedContract,
CosmWasmWormholeContract,
EvmEntropyContract,
EvmPriceFeedContract,
EvmWormholeContract,
SuiPriceFeedContract,
WormholeContract,
} from "./contracts";
import { Token } from "./token";
import { PriceFeedContract, Storable } from "./base";
@ -27,7 +23,6 @@ export class Store {
public chains: Record<string, Chain> = { global: new GlobalChain() };
public contracts: Record<string, PriceFeedContract> = {};
public entropy_contracts: Record<string, EvmEntropyContract> = {};
public wormhole_contracts: Record<string, WormholeContract> = {};
public tokens: Record<string, Token> = {};
public vaults: Record<string, Vault> = {};
@ -86,7 +81,6 @@ export class Store {
const contractsByType: Record<string, Storable[]> = {};
const contracts: Storable[] = Object.values(this.contracts);
contracts.push(...Object.values(this.entropy_contracts));
contracts.push(...Object.values(this.wormhole_contracts));
for (const contract of contracts) {
if (!contractsByType[contract.getType()]) {
contractsByType[contract.getType()] = [];
@ -120,13 +114,10 @@ export class Store {
loadAllContracts() {
const allContractClasses = {
[CosmWasmPriceFeedContract.type]: CosmWasmPriceFeedContract,
[CosmWasmWormholeContract.type]: CosmWasmWormholeContract,
[SuiPriceFeedContract.type]: SuiPriceFeedContract,
[EvmPriceFeedContract.type]: EvmPriceFeedContract,
[AptosPriceFeedContract.type]: AptosPriceFeedContract,
[AptosWormholeContract.type]: AptosWormholeContract,
[EvmEntropyContract.type]: EvmEntropyContract,
[EvmWormholeContract.type]: EvmWormholeContract,
};
this.getYamlFiles(`${this.path}/contracts/`).forEach((yamlFile) => {
const parsedArray = parse(readFileSync(yamlFile, "utf-8"));
@ -141,16 +132,13 @@ export class Store {
);
if (
this.contracts[chainContract.getId()] ||
this.entropy_contracts[chainContract.getId()] ||
this.wormhole_contracts[chainContract.getId()]
this.entropy_contracts[chainContract.getId()]
)
throw new Error(
`Multiple contracts with id ${chainContract.getId()} found`
);
if (chainContract instanceof EvmEntropyContract) {
this.entropy_contracts[chainContract.getId()] = chainContract;
} else if (chainContract instanceof WormholeContract) {
this.wormhole_contracts[chainContract.getId()] = chainContract;
} else {
this.contracts[chainContract.getId()] = chainContract;
}

View File

@ -74,11 +74,3 @@
prefix: rol
feeDenom: urax
type: CosmWasmChain
- endpoint: https://testnet-burnt-rpc.lavenderfive.com
id: xion_testnet
wormholeChainName: xion_testnet
mainnet: false
gasPrice: "0.025"
prefix: xion
feeDenom: uxion
type: CosmWasmChain

View File

@ -39,6 +39,11 @@
rpcUrl: https://evm-t3.cronos.org
networkId: 338
type: EvmChain
- id: zksync_goerli
mainnet: false
rpcUrl: https://zksync2-testnet.zksync.dev
networkId: 280
type: EvmChain
- id: canto_testnet
mainnet: false
rpcUrl: https://canto-testnet.plexnode.wtf
@ -46,7 +51,7 @@
type: EvmChain
- id: polygon_zkevm_testnet
mainnet: false
rpcUrl: https://rpc.public.zkevm-test.net
rpcUrl: https://rpc.public.zkevm-test.net/
networkId: 1442
type: EvmChain
- id: polygon_blackberry
@ -77,7 +82,7 @@
type: EvmChain
- id: neon
mainnet: true
rpcUrl: https://neon-evm.drpc.org
rpcUrl: https://neon-proxy-mainnet.solana.p2p.org
networkId: 245022934
type: EvmChain
- id: fantom
@ -169,6 +174,11 @@
rpcUrl: https://evm.confluxrpc.org
networkId: 1030
type: EvmChain
- id: optimism_goerli
mainnet: false
rpcUrl: https://rpc.ankr.com/optimism_testnet
networkId: 420
type: EvmChain
- id: celo
mainnet: true
rpcUrl: https://forno.celo.org
@ -279,7 +289,7 @@
type: EvmChain
- id: horizen_eon
mainnet: true
rpcUrl: https://rpc.ankr.com/horizen_eon
rpcUrl: https://eon-rpc.horizenlabs.io/ethv1
networkId: 7332
type: EvmChain
- id: horizen_gobi
@ -317,11 +327,6 @@
rpcUrl: https://goerli.boba.network
networkId: 2888
type: EvmChain
- id: boba_sepolia
mainnet: false
rpcUrl: https://sepolia.boba.network
networkId: 28882
type: EvmChain
- id: manta
mainnet: true
rpcUrl: https://pacific-rpc.manta.network/http
@ -329,7 +334,7 @@
type: EvmChain
- id: manta_testnet
mainnet: false
rpcUrl: https://manta-pacific-testnet.drpc.org
rpcUrl: https://pacific-rpc.testnet.manta.network/http
networkId: 3441005
type: EvmChain
- id: manta_sepolia
@ -372,11 +377,6 @@
rpcUrl: https://rpc.zkatana.gelato.digital
networkId: 1261120
type: EvmChain
- id: astar_zkyoto_testnet
mainnet: false
rpcUrl: https://rpc.startale.com/zkyoto
networkId: 6038361
type: EvmChain
- id: astar_zkevm
mainnet: true
rpcUrl: https://rpc.startale.com/astar-zkevm
@ -393,14 +393,14 @@
networkId: 1116
type: EvmChain
nativeToken: CORE
- id: viction
- id: tomochain
mainnet: true
rpcUrl: https://viction.blockpi.network/v1/rpc/public
rpcUrl: https://rpc.tomochain.com
networkId: 88
type: EvmChain
- id: viction_testnet
- id: tomochain_testnet
mainnet: false
rpcUrl: https://rpc-testnet.viction.xyz
rpcUrl: https://rpc.testnet.tomochain.com
networkId: 89
type: EvmChain
- id: mode_testnet
@ -450,7 +450,7 @@
type: EvmChain
- id: blast_s2_testnet
mainnet: false
rpcUrl: https://sepolia.blast.io
rpcUrl: https://blast-sepolia.blockpi.network/v1/rpc/public
networkId: 168587773
type: EvmChain
- id: hedera_testnet
@ -486,7 +486,7 @@
type: EvmChain
- id: sei_evm_devnet
mainnet: false
rpcUrl: https://evm-rpc-arctic-1.sei-apis.com
rpcUrl: https://evm-devnet.seinetwork.io
networkId: 713715
type: EvmChain
- id: fantom_sonic_testnet
@ -502,7 +502,7 @@
- id: idex_xchain_testnet
mainnet: false
rpcUrl: https://xchain-testnet-rpc.idex.io
networkId: 64002
networkId: 671276500
type: EvmChain
- id: injective_inevm_testnet
mainnet: false
@ -554,23 +554,3 @@
rpcUrl: https://rpc-testnet.morphl2.io
networkId: 2710
type: EvmChain
- id: iota
mainnet: true
rpcUrl: https://json-rpc.evm.iotaledger.net
networkId: 8822
type: EvmChain
- id: flow_previewnet
mainnet: true
rpcUrl: https://previewnet.evm.nodes.onflow.org
networkId: 646
type: EvmChain
- id: olive_testnet
mainnet: false
rpcUrl: https://olive-network-testnet.rpc.caldera.xyz/http
networkId: 8101902
type: EvmChain
- id: taiko_hekla
mainnet: false
rpcUrl: https://rpc.hekla.taiko.xyz/
networkId: 167009
type: EvmChain

View File

@ -1,9 +0,0 @@
- chain: aptos_mainnet
address: "0x5bc11445584a763c1fa7ed39081f1b920954da14e04b32440cba863d03e19625"
type: AptosWormholeContract
- chain: aptos_testnet
address: "0x5bc11445584a763c1fa7ed39081f1b920954da14e04b32440cba863d03e19625"
type: AptosWormholeContract
- chain: movement_move_devnet
address: "0x9236893d6444b208b7e0b3e8d4be4ace90b6d17817ab7d1584e46a33ef5c50c9"
type: AptosWormholeContract

View File

@ -43,6 +43,3 @@
- chain: rol_testnet
address: rol1pvrwmjuusn9wh34j7y520g8gumuy9xtl3gvprlljfdpwju3x7ucszdyfs8
type: CosmWasmPriceFeedContract
- chain: xion_testnet
address: xion1w39ctwxxhxxc2kxarycjxj9rndn65gf8daek7ggarwh3rq3zl0lqqllnmt
type: CosmWasmPriceFeedContract

View File

@ -1,48 +0,0 @@
- chain: rol_testnet
address: rol17p9rzwnnfxcjp32un9ug7yhhzgtkhvl9jfksztgw5uh69wac2pgss2u902
type: CosmWasmWormholeContract
- chain: osmosis
address: osmo1t7qham5kle36rs28se2xd7cckm9mpwzgt65t40lrdf8fcq3837qqjvw80s
type: CosmWasmWormholeContract
- chain: sei_testnet_atlantic_2
address: sei14utt2wp7hamd2qmuz0e5yj728y4u08cm7etujxkc6qprnrla3uwq95jz86
type: CosmWasmWormholeContract
- chain: juno_testnet
address: juno1h7m0xwgu4qh0nrthahpydxzw7klvyd5w8d7jjl675p944ds7jr4sf3ta4l
type: CosmWasmWormholeContract
- chain: sei_testnet_atlantic_2
address: sei1cn8ygrvqk03p5zce3c6rrst7j97qarm33d23rxgme7rzmasddfusw7cpxw
type: CosmWasmWormholeContract
- chain: neutron_testnet_pion_1
address: neutron1nxs2ajn4ejrggfuvqczfx4txghrendcpy3526avg2tsngjktedtspgla8t
type: CosmWasmWormholeContract
- chain: neutron_testnet_pion_1
address: neutron1wtuuak4yt4vyhtv7gt4xnv0m8zfakad5lnz6r7dx8alyydu0sgns67kmvy
type: CosmWasmWormholeContract
- chain: juno_testnet
address: juno1g9xhl5jzhlm6lqc2earxkzyazwl2cshr5cnemxtjy0le64s4w22skukkxj
type: CosmWasmWormholeContract
- chain: osmosis_testnet_5
address: osmo19ah8ak7rgmds40te22xnz7zsdmx5twjulv3sypqm79skkl2ajm4skuhwmf
type: CosmWasmWormholeContract
- chain: sei_pacific_1
address: sei12qq3cufehhsaprjfjrwpx5ltyr43lcrxvf6eaqf0p4jsjpc7semq8p6ewa
type: CosmWasmWormholeContract
- chain: injective_testnet
address: inj1hglkee95shfsl5xxky26hdqxj0mqp54lh7xm59
type: CosmWasmWormholeContract
- chain: neutron
address: neutron178ruq7gf6gk3uus5n8xztj5tsrt5xwxfelw88mc9egfw5d99ktksnk5rsh
type: CosmWasmWormholeContract
- chain: osmosis_testnet_5
address: osmo1llum0y8zc4h2f0rhcdn63xje4mrkdljrve9l40lun9lpeyu2l7cq4phaw6
type: CosmWasmWormholeContract
- chain: injective_testnet
address: inj17sy3vx5dfeva9wx33d09yqdwruntpccnjyw0hj
type: CosmWasmWormholeContract
- chain: injective
address: inj17p9rzwnnfxcjp32un9ug7yhhzgtkhvl9l2q74d
type: CosmWasmWormholeContract
- chain: xion_testnet
address: xion14ycw3tx0hpz3aawmzm6cufs6hx94d64ht5qawd0ej9ug9j2ffzsqmpecys
type: CosmWasmWormholeContract

View File

@ -19,6 +19,9 @@
- chain: blast_s2_testnet
address: "0x98046Bd286715D3B0BC227Dd7a956b83D8978603"
type: EvmEntropyContract
- chain: sei_evm_devnet
address: "0x6E3A2a644eeDCf6007d3c7d85F0094Cc1B25B2AE"
type: EvmEntropyContract
- chain: lightlink_phoenix
address: "0x98046Bd286715D3B0BC227Dd7a956b83D8978603"
type: EvmEntropyContract
@ -58,9 +61,3 @@
- chain: base
address: "0x6E7D74FA7d5c90FEF9F0512987605a6d546181Bb"
type: EvmEntropyContract
- chain: sei_evm_devnet
address: "0x23f0e8FAeE7bbb405E7A7C3d60138FCfd43d7509"
type: EvmEntropyContract
- chain: taiko_hekla
address: "0x98046Bd286715D3B0BC227Dd7a956b83D8978603"
type: EvmEntropyContract

View File

@ -97,7 +97,7 @@
- chain: coredao
address: "0xA2aa501b19aff244D90cc15a4Cf739D2725B5729"
type: EvmPriceFeedContract
- chain: viction
- chain: tomochain
address: "0xA2aa501b19aff244D90cc15a4Cf739D2725B5729"
type: EvmPriceFeedContract
- chain: arbitrum_sepolia
@ -142,6 +142,9 @@
- chain: meter_testnet
address: "0x5a71C07a0588074443545eE0c08fb0375564c3E4"
type: EvmPriceFeedContract
- chain: optimism_goerli
address: "0xDd24F84d36BF92C65F92307595335bdFab5Bbd21"
type: EvmPriceFeedContract
- chain: shimmer_testnet
address: "0x8D254a21b3C86D32F7179855531CE99164721933"
type: EvmPriceFeedContract
@ -166,6 +169,9 @@
- chain: coredao_testnet
address: "0x8D254a21b3C86D32F7179855531CE99164721933"
type: EvmPriceFeedContract
- chain: tomochain_testnet
address: "0x5D289Ad1CE59fCC25b6892e7A303dfFf3a9f7167"
type: EvmPriceFeedContract
- chain: cronos_testnet
address: "0x36825bf3Fbdf5a29E2d5148bfe7Dcf7B5639e320"
type: EvmPriceFeedContract
@ -196,6 +202,9 @@
- chain: neon_devnet
address: "0x0708325268dF9F66270F1401206434524814508b"
type: EvmPriceFeedContract
- chain: zksync_goerli
address: "0x8739d5024B5143278E2b15Bd9e7C26f6CEc658F1"
type: EvmPriceFeedContract
- chain: optimism_sepolia
address: "0x0708325268dF9F66270F1401206434524814508b"
type: EvmPriceFeedContract
@ -239,7 +248,7 @@
address: "0x2880aB155794e7179c9eE2e38200202908C17B43"
type: EvmPriceFeedContract
- chain: sei_evm_devnet
address: "0xe9d69CdD6Fe41e7B621B4A688C5D1a68cB5c8ADc"
address: "0x23f0e8FAeE7bbb405E7A7C3d60138FCfd43d7509"
type: EvmPriceFeedContract
- chain: lightlink_pegasus_testnet
address: "0x5D289Ad1CE59fCC25b6892e7A303dfFf3a9f7167"
@ -254,7 +263,7 @@
address: "0xA2aa501b19aff244D90cc15a4Cf739D2725B5729"
type: EvmPriceFeedContract
- chain: idex_xchain_testnet
address: "0x2880aB155794e7179c9eE2e38200202908C17B43"
address: "0xA2aa501b19aff244D90cc15a4Cf739D2725B5729"
type: EvmPriceFeedContract
- chain: injective_inevm_testnet
address: "0xA2aa501b19aff244D90cc15a4Cf739D2725B5729"
@ -313,12 +322,3 @@
- chain: morph_testnet
address: "0xA2aa501b19aff244D90cc15a4Cf739D2725B5729"
type: EvmPriceFeedContract
- chain: flow_previewnet
address: "0x2880aB155794e7179c9eE2e38200202908C17B43"
type: EvmPriceFeedContract
- chain: taiko_hekla
address: "0x2880aB155794e7179c9eE2e38200202908C17B43"
type: EvmPriceFeedContract
- chain: olive_testnet
address: "0x41c9e39574F40Ad34c79f1C99B66A45eFB830d4c"
type: EvmPriceFeedContract

View File

@ -1,303 +0,0 @@
- chain: polygon
address: "0x35a58BeeE77a2Ad547FcDed7e8CB1c6e19746b13"
type: EvmWormholeContract
- chain: aurora
address: "0x41955476936DdA8d0fA98b8d1778172F7E4fCcA1"
type: EvmWormholeContract
- chain: fantom
address: "0x35a58BeeE77a2Ad547FcDed7e8CB1c6e19746b13"
type: EvmWormholeContract
- chain: optimism
address: "0x87047526937246727E4869C5f76A347160e08672"
type: EvmWormholeContract
- chain: arbitrum
address: "0xEbe57e8045F2F230872523bbff7374986E45C486"
type: EvmWormholeContract
- chain: gnosis
address: "0x26DD80569a8B23768A1d80869Ed7339e07595E85"
type: EvmWormholeContract
- chain: polygon_zkevm
address: "0x41955476936DdA8d0fA98b8d1778172F7E4fCcA1"
type: EvmWormholeContract
- chain: conflux_espace
address: "0xDd24F84d36BF92C65F92307595335bdFab5Bbd21"
type: EvmWormholeContract
- chain: bsc
address: "0x41955476936DdA8d0fA98b8d1778172F7E4fCcA1"
type: EvmWormholeContract
- chain: kava
address: "0x0708325268dF9F66270F1401206434524814508b"
type: EvmWormholeContract
- chain: avalanche
address: "0x41955476936DdA8d0fA98b8d1778172F7E4fCcA1"
type: EvmWormholeContract
- chain: canto
address: "0xf0a1b566B55e0A0CB5BeF52Eb2a57142617Bee67"
type: EvmWormholeContract
- chain: linea
address: "0x0708325268dF9F66270F1401206434524814508b"
type: EvmWormholeContract
- chain: neon
address: "0xCd76c50c3210C5AaA9c39D53A4f95BFd8b1a3a19"
type: EvmWormholeContract
- chain: mantle
address: "0xf0a1b566B55e0A0CB5BeF52Eb2a57142617Bee67"
type: EvmWormholeContract
- chain: meter
address: "0xfA133831D350A2A5997d6db182B6Ca9e8ad4191B"
type: EvmWormholeContract
- chain: kcc
address: "0x41955476936DdA8d0fA98b8d1778172F7E4fCcA1"
type: EvmWormholeContract
- chain: eos
address: "0xEbe57e8045F2F230872523bbff7374986E45C486"
type: EvmWormholeContract
- chain: celo
address: "0x41955476936DdA8d0fA98b8d1778172F7E4fCcA1"
type: EvmWormholeContract
- chain: wemix
address: "0xEbe57e8045F2F230872523bbff7374986E45C486"
type: EvmWormholeContract
- chain: base
address: "0x87047526937246727E4869C5f76A347160e08672"
type: EvmWormholeContract
- chain: zksync
address: "0x53cD6960888cA09361506678adfE267b4CE81A08"
type: EvmWormholeContract
- chain: horizen_eon
address: "0x8250f4aF4B972684F7b336503E2D6dFeDeB1487a"
type: EvmWormholeContract
- chain: shimmer
address: "0x8250f4aF4B972684F7b336503E2D6dFeDeB1487a"
type: EvmWormholeContract
- chain: boba
address: "0x26DD80569a8B23768A1d80869Ed7339e07595E85"
type: EvmWormholeContract
- chain: manta
address: "0x8250f4aF4B972684F7b336503E2D6dFeDeB1487a"
type: EvmWormholeContract
- chain: scroll
address: "0x8250f4aF4B972684F7b336503E2D6dFeDeB1487a"
type: EvmWormholeContract
- chain: chiliz
address: "0x8250f4aF4B972684F7b336503E2D6dFeDeB1487a"
type: EvmWormholeContract
- chain: coredao
address: "0x8250f4aF4B972684F7b336503E2D6dFeDeB1487a"
type: EvmWormholeContract
- chain: viction
address: "0x8250f4aF4B972684F7b336503E2D6dFeDeB1487a"
type: EvmWormholeContract
- chain: arbitrum_sepolia
address: "0xfA25E653b44586dBbe27eE9d252192F0e4956683"
type: EvmWormholeContract
- chain: fuji
address: "0x5744Cbf430D99456a0A8771208b674F27f8EF0Fb"
type: EvmWormholeContract
- chain: canto_testnet
address: "0x41c9e39574F40Ad34c79f1C99B66A45eFB830d4c"
type: EvmWormholeContract
- chain: aurora_testnet
address: "0x2880aB155794e7179c9eE2e38200202908C17B43"
type: EvmWormholeContract
- chain: chiado
address: "0x87047526937246727E4869C5f76A347160e08672"
type: EvmWormholeContract
- chain: kava_testnet
address: "0xD458261E832415CFd3BAE5E416FdF3230ce6F134"
type: EvmWormholeContract
- chain: conflux_espace_testnet
address: "0xEbe57e8045F2F230872523bbff7374986E45C486"
type: EvmWormholeContract
- chain: celo_alfajores_testnet
address: "0x2880aB155794e7179c9eE2e38200202908C17B43"
type: EvmWormholeContract
- chain: bsc_testnet
address: "0xe9d69CdD6Fe41e7B621B4A688C5D1a68cB5c8ADc"
type: EvmWormholeContract
- chain: kcc_testnet
address: "0x2880aB155794e7179c9eE2e38200202908C17B43"
type: EvmWormholeContract
- chain: eos_testnet
address: "0x8D254a21b3C86D32F7179855531CE99164721933"
type: EvmWormholeContract
- chain: meter_testnet
address: "0x257c3B61102442C1c3286Efbd24242322d002920"
type: EvmWormholeContract
- chain: shimmer_testnet
address: "0x98046Bd286715D3B0BC227Dd7a956b83D8978603"
type: EvmWormholeContract
- chain: scroll_sepolia
address: "0x36825bf3Fbdf5a29E2d5148bfe7Dcf7B5639e320"
type: EvmWormholeContract
- chain: boba_goerli
address: "0x98046Bd286715D3B0BC227Dd7a956b83D8978603"
type: EvmWormholeContract
- chain: manta_testnet
address: "0x98046Bd286715D3B0BC227Dd7a956b83D8978603"
type: EvmWormholeContract
- chain: chiliz_spicy
address: "0x98046Bd286715D3B0BC227Dd7a956b83D8978603"
type: EvmWormholeContract
- chain: coredao_testnet
address: "0x98046Bd286715D3B0BC227Dd7a956b83D8978603"
type: EvmWormholeContract
- chain: cronos_testnet
address: "0x74f09cb3c7e2A01865f424FD14F6dc9A14E3e94E"
type: EvmWormholeContract
- chain: wemix_testnet
address: "0x41c9e39574F40Ad34c79f1C99B66A45eFB830d4c"
type: EvmWormholeContract
- chain: evmos_testnet
address: "0x2880aB155794e7179c9eE2e38200202908C17B43"
type: EvmWormholeContract
- chain: zetachain_testnet
address: "0x8D254a21b3C86D32F7179855531CE99164721933"
type: EvmWormholeContract
- chain: neon_devnet
address: "0x23f0e8FAeE7bbb405E7A7C3d60138FCfd43d7509"
type: EvmWormholeContract
- chain: optimism_sepolia
address: "0x8D254a21b3C86D32F7179855531CE99164721933"
type: EvmWormholeContract
- chain: mode
address: "0x8250f4aF4B972684F7b336503E2D6dFeDeB1487a"
type: EvmWormholeContract
- chain: mode_testnet
address: "0x8250f4aF4B972684F7b336503E2D6dFeDeB1487a"
type: EvmWormholeContract
- chain: bttc_testnet
address: "0x8250f4aF4B972684F7b336503E2D6dFeDeB1487a"
type: EvmWormholeContract
- chain: bttc
address: "0x8250f4aF4B972684F7b336503E2D6dFeDeB1487a"
type: EvmWormholeContract
- chain: zksync_sepolia
address: "0xc10F5BE78E464BB0E1f534D66E5A6ecaB150aEFa"
type: EvmWormholeContract
- chain: base_sepolia
address: "0x8250f4aF4B972684F7b336503E2D6dFeDeB1487a"
type: EvmWormholeContract
- chain: movement_evm_devnet
address: "0x8250f4aF4B972684F7b336503E2D6dFeDeB1487a"
type: EvmWormholeContract
- chain: zkfair_testnet
address: "0x8250f4aF4B972684F7b336503E2D6dFeDeB1487a"
type: EvmWormholeContract
- chain: blast_s2_testnet
address: "0x8250f4aF4B972684F7b336503E2D6dFeDeB1487a"
type: EvmWormholeContract
- chain: zkfair
address: "0x8250f4aF4B972684F7b336503E2D6dFeDeB1487a"
type: EvmWormholeContract
- chain: filecoin_calibration
address: "0x8250f4aF4B972684F7b336503E2D6dFeDeB1487a"
type: EvmWormholeContract
- chain: filecoin
address: "0x8250f4aF4B972684F7b336503E2D6dFeDeB1487a"
type: EvmWormholeContract
- chain: zetachain
address: "0xb27e5ca259702f209a29225d0eDdC131039C9933"
type: EvmWormholeContract
- chain: sei_evm_devnet
address: "0x66E9cBa5529824a03B5Bc9931d9c63637101D0F7"
type: EvmWormholeContract
- chain: lightlink_pegasus_testnet
address: "0x5f3c61944CEb01B3eAef861251Fb1E0f14b848fb"
type: EvmWormholeContract
- chain: fantom_sonic_testnet
address: "0x74f09cb3c7e2A01865f424FD14F6dc9A14E3e94E"
type: EvmWormholeContract
- chain: dela_deperp_testnet
address: "0xb27e5ca259702f209a29225d0eDdC131039C9933"
type: EvmWormholeContract
- chain: lightlink_phoenix
address: "0xb27e5ca259702f209a29225d0eDdC131039C9933"
type: EvmWormholeContract
- chain: injective_inevm_testnet
address: "0xb27e5ca259702f209a29225d0eDdC131039C9933"
type: EvmWormholeContract
- chain: injective_inevm
address: "0xb27e5ca259702f209a29225d0eDdC131039C9933"
type: EvmWormholeContract
- chain: hedera_testnet
address: "0xb27e5ca259702f209a29225d0eDdC131039C9933"
type: EvmWormholeContract
- chain: hedera
address: "0xb27e5ca259702f209a29225d0eDdC131039C9933"
type: EvmWormholeContract
- chain: berachain_testnet
address: "0x74f09cb3c7e2A01865f424FD14F6dc9A14E3e94E"
type: EvmWormholeContract
- chain: blast
address: "0xb27e5ca259702f209a29225d0eDdC131039C9933"
type: EvmWormholeContract
- chain: astar_zkevm
address: "0xb27e5ca259702f209a29225d0eDdC131039C9933"
type: EvmWormholeContract
- chain: merlin_testnet
address: "0xb27e5ca259702f209a29225d0eDdC131039C9933"
type: EvmWormholeContract
- chain: mantle_sepolia
address: "0x66E9cBa5529824a03B5Bc9931d9c63637101D0F7"
type: EvmWormholeContract
- chain: merlin
address: "0xb27e5ca259702f209a29225d0eDdC131039C9933"
type: EvmWormholeContract
- chain: manta_sepolia
address: "0xb27e5ca259702f209a29225d0eDdC131039C9933"
type: EvmWormholeContract
- chain: polygon_blackberry
address: "0xb27e5ca259702f209a29225d0eDdC131039C9933"
type: EvmWormholeContract
- chain: arbitrum_blueberry
address: "0xb27e5ca259702f209a29225d0eDdC131039C9933"
type: EvmWormholeContract
- chain: optimism_celestia_raspberry
address: "0xb27e5ca259702f209a29225d0eDdC131039C9933"
type: EvmWormholeContract
- chain: polynomial_testnet
address: "0x87047526937246727E4869C5f76A347160e08672"
type: EvmWormholeContract
- chain: parallel_testnet
address: "0x87047526937246727E4869C5f76A347160e08672"
type: EvmWormholeContract
- chain: parallel
address: "0xb27e5ca259702f209a29225d0eDdC131039C9933"
type: EvmWormholeContract
- chain: linea_sepolia
address: "0xb27e5ca259702f209a29225d0eDdC131039C9933"
type: EvmWormholeContract
- chain: morph_testnet
address: "0xb27e5ca259702f209a29225d0eDdC131039C9933"
type: EvmWormholeContract
- chain: cronos
address: "0x41955476936DdA8d0fA98b8d1778172F7E4fCcA1"
type: EvmWormholeContract
- chain: ronin
address: "0x41955476936DdA8d0fA98b8d1778172F7E4fCcA1"
type: EvmWormholeContract
- chain: saigon
address: "0x36825bf3Fbdf5a29E2d5148bfe7Dcf7B5639e320"
type: EvmWormholeContract
- chain: ethereum
address: "0x74f09cb3c7e2A01865f424FD14F6dc9A14E3e94E"
type: EvmWormholeContract
- chain: mumbai
address: "0x876A4e56A51386aBb1a5ab5d62f77E814372f0C7"
type: EvmWormholeContract
- chain: fantom_testnet
address: "0xe9d69CdD6Fe41e7B621B4A688C5D1a68cB5c8ADc"
type: EvmWormholeContract
- chain: sepolia
address: "0x41c9e39574F40Ad34c79f1C99B66A45eFB830d4c"
type: EvmWormholeContract
- chain: linea_goerli
address: "0xfA25E653b44586dBbe27eE9d252192F0e4956683"
type: EvmWormholeContract
- chain: taiko_hekla
address: "0xb27e5ca259702f209a29225d0eDdC131039C9933"
type: EvmWormholeContract
- chain: olive_testnet
address: "0x74f09cb3c7e2A01865f424FD14F6dc9A14E3e94E"
type: EvmWormholeContract

View File

@ -1,6 +1,6 @@
{
"name": "@pythnetwork/express-relay-evm-js",
"version": "0.4.1",
"version": "0.1.1",
"lockfileVersion": 3,
"requires": true,
"packages": {

View File

@ -1,6 +1,6 @@
{
"name": "@pythnetwork/express-relay-evm-js",
"version": "0.4.1",
"version": "0.2.1",
"description": "Utilities for interacting with the express relay protocol",
"homepage": "https://github.com/pyth-network/pyth-crosschain/tree/main/express_relay/sdk/js",
"author": "Douro Labs",

View File

@ -30,10 +30,7 @@ class SimpleSearcher {
resultDetails = `, transaction ${bidStatus.result}`;
}
console.log(
`Bid status for bid ${bidStatus.id}: ${bidStatus.type.replaceAll(
"_",
" "
)}${resultDetails}`
`Bid status for bid ${bidStatus.id}: ${bidStatus.type}${resultDetails}`
);
}

View File

@ -2,8 +2,15 @@ import type { components, paths } from "./serverTypes";
import createClient, {
ClientOptions as FetchClientOptions,
} from "openapi-fetch";
import { Address, Hex, isAddress, isHex } from "viem";
import { privateKeyToAccount, signTypedData } from "viem/accounts";
import {
Address,
encodeAbiParameters,
Hex,
isAddress,
isHex,
keccak256,
} from "viem";
import { privateKeyToAccount, sign, signatureToHex } from "viem/accounts";
import WebSocket from "isomorphic-ws";
import {
Bid,
@ -11,7 +18,6 @@ import {
BidParams,
BidStatusUpdate,
Opportunity,
EIP712Domain,
OpportunityBid,
OpportunityParams,
TokenAmount,
@ -130,17 +136,6 @@ export class Client {
});
}
private convertEIP712Domain(
eip712Domain: components["schemas"]["EIP712Domain"]
): EIP712Domain {
return {
name: eip712Domain.name,
version: eip712Domain.version,
verifyingContract: checkAddress(eip712Domain.verifying_contract),
chainId: BigInt(eip712Domain.chain_id),
};
}
/**
* Converts an opportunity from the server to the client format
* Returns undefined if the opportunity version is not supported
@ -164,7 +159,6 @@ export class Client {
targetCallValue: BigInt(opportunity.target_call_value),
sellTokens: opportunity.sell_tokens.map(checkTokenQty),
buyTokens: opportunity.buy_tokens.map(checkTokenQty),
eip712Domain: this.convertEIP712Domain(opportunity.eip_712_domain),
};
}
@ -299,49 +293,62 @@ export class Client {
bidParams: BidParams,
privateKey: Hex
): Promise<OpportunityBid> {
const types = {
ExecutionParams: [
{ name: "sellTokens", type: "TokenAmount[]" },
{ name: "buyTokens", type: "TokenAmount[]" },
{ name: "executor", type: "address" },
{ name: "targetContract", type: "address" },
{ name: "targetCalldata", type: "bytes" },
{ name: "targetCallValue", type: "uint256" },
{ name: "validUntil", type: "uint256" },
{ name: "bidAmount", type: "uint256" },
],
TokenAmount: [
{ name: "token", type: "address" },
{ name: "amount", type: "uint256" },
],
};
const account = privateKeyToAccount(privateKey);
const signature = await signTypedData({
privateKey,
domain: {
...opportunity.eip712Domain,
chainId: Number(opportunity.eip712Domain.chainId),
const convertTokenQty = ({ token, amount }: TokenAmount): [Hex, bigint] => [
token,
amount,
];
const payload = encodeAbiParameters(
[
{
name: "repayTokens",
type: "tuple[]",
components: [
{
type: "address",
},
types,
primaryType: "ExecutionParams",
message: {
sellTokens: opportunity.sellTokens,
buyTokens: opportunity.buyTokens,
executor: account.address,
targetContract: opportunity.targetContract,
targetCalldata: opportunity.targetCalldata,
targetCallValue: opportunity.targetCallValue,
validUntil: bidParams.validUntil,
bidAmount: bidParams.amount,
{
type: "uint256",
},
});
],
},
{
name: "receiptTokens",
type: "tuple[]",
components: [
{
type: "address",
},
{
type: "uint256",
},
],
},
{ name: "contract", type: "address" },
{ name: "calldata", type: "bytes" },
{ name: "value", type: "uint256" },
{ name: "bid", type: "uint256" },
{ name: "validUntil", type: "uint256" },
],
[
opportunity.sellTokens.map(convertTokenQty),
opportunity.buyTokens.map(convertTokenQty),
opportunity.targetContract,
opportunity.targetCalldata,
opportunity.targetCallValue,
bidParams.amount,
bidParams.validUntil,
]
);
const msgHash = keccak256(payload);
const hash = signatureToHex(await sign({ hash: msgHash, privateKey }));
return {
permissionKey: opportunity.permissionKey,
bid: bidParams,
executor: account.address,
signature,
signature: hash,
opportunityId: opportunity.opportunityId,
};
}

View File

@ -90,10 +90,6 @@ export interface components {
/** @enum {string} */
type: "pending";
}
| {
/** @enum {string} */
type: "simulation_failed";
}
| {
/**
* Format: int32
@ -148,28 +144,6 @@ export interface components {
ClientRequest: components["schemas"]["ClientMessage"] & {
id: string;
};
EIP712Domain: {
/**
* @description The network chain id parameter for EIP712 domain.
* @example 31337
*/
chain_id: string;
/**
* @description The name parameter for the EIP712 domain.
* @example OpportunityAdapter
*/
name: string;
/**
* @description The verifying contract address parameter for the EIP712 domain.
* @example 0xcA11bde05977b3631167028862bE2a173976CA11
*/
verifying_contract: string;
/**
* @description The version parameter for the EIP712 domain.
* @example 1
*/
version: string;
};
ErrorBodyResponse: {
error: string;
};
@ -192,7 +166,7 @@ export interface components {
/** @example 0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef12 */
signature: string;
/**
* @description The latest unix timestamp in seconds until which the bid is valid
* @description How long the bid will be valid for.
* @example 1000000000000000000
*/
valid_until: string;
@ -246,7 +220,6 @@ export interface components {
* @example 1700000000000000
*/
creation_time: number;
eip_712_domain: components["schemas"]["EIP712Domain"];
/**
* @description The opportunity unique id
* @example obo3ee3e-58cc-4372-a567-0e02b2c3d479
@ -329,7 +302,6 @@ export interface components {
* @example 1700000000000000
*/
creation_time: number;
eip_712_domain: components["schemas"]["EIP712Domain"];
/**
* @description The opportunity unique id
* @example obo3ee3e-58cc-4372-a567-0e02b2c3d479

View File

@ -23,27 +23,6 @@ export type BidParams = {
*/
validUntil: bigint;
};
/**
* Represents the configuration for signing an opportunity
*/
export type EIP712Domain = {
/**
* The network chain id for the EIP712 domain.
*/
chainId: bigint;
/**
* The verifying contract address for the EIP712 domain.
*/
verifyingContract: Address;
/**
* The name parameter for the EIP712 domain.
*/
name: string;
/**
* The version parameter for the EIP712 domain.
*/
version: string;
};
/**
* Represents a valid opportunity ready to be executed
*/
@ -81,18 +60,11 @@ export type Opportunity = {
* Tokens to receive after the opportunity is executed
*/
buyTokens: TokenAmount[];
/**
* The data required to sign the opportunity
*/
eip712Domain: EIP712Domain;
};
/**
* All the parameters necessary to represent an opportunity
*/
export type OpportunityParams = Omit<
Opportunity,
"opportunityId" | "eip712Domain"
>;
export type OpportunityParams = Omit<Opportunity, "opportunityId">;
/**
* Represents a bid for an opportunity
*/

View File

@ -6,10 +6,13 @@ from typing import Callable, Any
from collections.abc import Coroutine
from uuid import UUID
import httpx
import web3
import websockets
from websockets.client import WebSocketClientProtocol
from eth_abi import encode
from eth_account.account import Account
from express_relay.express_relay_types import (
from web3.auto import w3
from express_relay.types import (
Opportunity,
BidStatusUpdate,
ClientMessage,
@ -402,66 +405,42 @@ def sign_bid(
Returns:
A OpportunityBid object, representing the transaction to submit to the server. This object contains the searcher's signature.
"""
sell_tokens = [
(token.token, int(token.amount)) for token in opportunity.sell_tokens
]
buy_tokens = [(token.token, int(token.amount)) for token in opportunity.buy_tokens]
target_calldata = bytes.fromhex(opportunity.target_calldata.replace("0x", ""))
executor = Account.from_key(private_key).address
domain_data = {
"name": opportunity.eip_712_domain.name,
"version": opportunity.eip_712_domain.version,
"chainId": opportunity.eip_712_domain.chain_id,
"verifyingContract": opportunity.eip_712_domain.verifying_contract,
}
message_types = {
"ExecutionParams": [
{"name": "sellTokens", "type": "TokenAmount[]"},
{"name": "buyTokens", "type": "TokenAmount[]"},
{"name": "executor", "type": "address"},
{"name": "targetContract", "type": "address"},
{"name": "targetCalldata", "type": "bytes"},
{"name": "targetCallValue", "type": "uint256"},
{"name": "validUntil", "type": "uint256"},
{"name": "bidAmount", "type": "uint256"},
digest = encode(
[
"(address,uint256)[]",
"(address,uint256)[]",
"address",
"bytes",
"uint256",
"uint256",
"uint256",
],
"TokenAmount": [
{"name": "token", "type": "address"},
{"name": "amount", "type": "uint256"},
[
sell_tokens,
buy_tokens,
opportunity.target_contract,
target_calldata,
opportunity.target_call_value,
bid_amount,
valid_until,
],
}
# the data to be signed
message_data = {
"sellTokens": [
{
"token": token.token,
"amount": int(token.amount),
}
for token in opportunity.sell_tokens
],
"buyTokens": [
{
"token": token.token,
"amount": int(token.amount),
}
for token in opportunity.buy_tokens
],
"executor": executor,
"targetContract": opportunity.target_contract,
"targetCalldata": bytes.fromhex(opportunity.target_calldata.replace("0x", "")),
"targetCallValue": opportunity.target_call_value,
"validUntil": valid_until,
"bidAmount": bid_amount,
}
signed_typed_data = Account.sign_typed_data(
private_key, domain_data, message_types, message_data
)
msg_data = web3.Web3.solidity_keccak(["bytes"], [digest])
signature = w3.eth.account.signHash(msg_data, private_key=private_key)
opportunity_bid = OpportunityBid(
opportunity_id=opportunity.opportunity_id,
permission_key=opportunity.permission_key,
amount=bid_amount,
valid_until=valid_until,
executor=executor,
signature=signed_typed_data,
executor=Account.from_key(private_key).address,
signature=signature,
)
return opportunity_bid

View File

@ -3,7 +3,7 @@ import asyncio
import logging
from eth_account.account import Account
from express_relay.client import ExpressRelayClient, sign_bid
from express_relay.express_relay_types import (
from express_relay.types import (
Opportunity,
OpportunityBid,
Bytes32,
@ -76,16 +76,18 @@ class SimpleSearcher:
bid_status = bid_status_update.bid_status
result = bid_status_update.result
result_details = ""
if bid_status == BidStatus("submitted"):
result_details = (
f", transaction {result}, index {bid_status_update.index} of multicall"
logger.info(
f"Bid {id} has been submitted in transaction {result} at index {bid_status_update.index} of the multicall"
)
elif bid_status == BidStatus("lost"):
result_details = f", transaction {result}"
logger.error(
f"Bid status for bid {id}: {bid_status.value.replace('_', ' ')}{result_details}"
logger.info(
f"Bid {id} was unsuccessful, not included in transaction {result}"
)
elif bid_status == BidStatus("pending"):
logger.info(f"Bid {id} is pending")
else:
logger.error(f"Unrecognized status {bid_status} for bid {id}")
async def main():

View File

@ -105,14 +105,13 @@ class BidStatus(Enum):
SUBMITTED = "submitted"
LOST = "lost"
PENDING = "pending"
SIMULATION_FAILED = "simulation_failed"
class BidStatusUpdate(BaseModel):
"""
Attributes:
id: The ID of the bid.
bid_status: The current status of the bid.
bid_status: The status enum, either SUBMITTED, LOST, or PENDING.
result: The result of the bid: a transaction hash if the status is SUBMITTED or LOST, else None.
index: The index of the bid in the submitted transaction; None if the status is not SUBMITTED.
"""
@ -124,10 +123,7 @@ class BidStatusUpdate(BaseModel):
@model_validator(mode="after")
def check_result(self):
if self.bid_status in [
BidStatus("pending"),
BidStatus("simulation_failed"),
]:
if self.bid_status == BidStatus("pending"):
assert self.result is None, "result must be None"
else:
assert self.result is not None, "result must be a valid 32-byte hash"
@ -197,13 +193,6 @@ class OpportunityParams(BaseModel):
params: Union[OpportunityParamsV1] = Field(..., discriminator="version")
class EIP712Domain(BaseModel):
name: str
version: str
chain_id: IntString
verifying_contract: Address
class Opportunity(BaseModel):
"""
Attributes:
@ -217,7 +206,6 @@ class Opportunity(BaseModel):
version: The version of the opportunity.
creation_time: The creation time of the opportunity.
opportunity_id: The ID of the opportunity.
eip_712_domain: The EIP712 domain data needed for signing.
"""
target_calldata: HexString
@ -230,7 +218,6 @@ class Opportunity(BaseModel):
version: str
creation_time: IntString
opportunity_id: UUIDString
eip_712_domain: EIP712Domain
supported_versions: ClassVar[list[str]] = ["v1"]

View File

@ -1,6 +1,6 @@
[tool.poetry]
name = "express-relay"
version = "0.4.2"
version = "0.2.1"
description = "Utilities for searchers and protocols to interact with the Express Relay protocol."
authors = ["dourolabs"]
license = "Proprietary"

View File

@ -1,4 +1,4 @@
/target
*config.yaml
config.yaml
*secret*
*private-key*

View File

@ -1488,7 +1488,7 @@ dependencies = [
[[package]]
name = "fortuna"
version = "5.2.2"
version = "4.0.0"
dependencies = [
"anyhow",
"axum",
@ -2822,7 +2822,7 @@ dependencies = [
[[package]]
name = "pythnet-sdk"
version = "2.1.0"
version = "2.0.0"
dependencies = [
"bincode",
"borsh",

View File

@ -1,6 +1,6 @@
[package]
name = "fortuna"
version = "5.2.2"
version = "4.0.0"
edition = "2021"
[dependencies]
@ -16,7 +16,7 @@ ethers = { version = "2.0.14", features = ["ws"] }
futures = { version = "0.3.28" }
hex = "0.4.3"
prometheus-client = { version = "0.21.2" }
pythnet-sdk = { path = "../../pythnet/pythnet_sdk", features = ["strum"] }
pythnet-sdk = { path = "../pythnet/pythnet_sdk", features = ["strum"] }
rand = "0.8.5"
reqwest = { version = "0.11.22", features = ["json", "blocking"] }
serde = { version = "1.0.188", features = ["derive"] }

View File

@ -7,15 +7,15 @@ RUN rustup default nightly-2023-07-23
# Build
WORKDIR /src
COPY apps/fortuna apps/fortuna
COPY fortuna fortuna
COPY pythnet pythnet
COPY target_chains/ethereum/entropy_sdk/solidity/abis target_chains/ethereum/entropy_sdk/solidity/abis
WORKDIR /src/apps/fortuna
WORKDIR /src/fortuna
RUN --mount=type=cache,target=/root/.cargo/registry cargo build --release
FROM rust:${RUST_VERSION}
# Copy artifacts from other images
COPY --from=build /src/apps/fortuna/target/release/fortuna /usr/local/bin/
COPY --from=build /src/fortuna/target/release/fortuna /usr/local/bin/

View File

@ -4,4 +4,3 @@ chains:
contract_addr: 0x8250f4aF4B972684F7b336503E2D6dFeDeB1487a
reveal_delay_blocks: 0
legacy_tx: true
gas_limit: 500000

View File

@ -59,7 +59,7 @@ use {
// contract in the same repo.
abigen!(
PythRandom,
"../../target_chains/ethereum/entropy_sdk/solidity/abis/IEntropy.json"
"../target_chains/ethereum/entropy_sdk/solidity/abis/IEntropy.json"
);
pub type SignablePythContract = PythRandom<
@ -277,11 +277,7 @@ impl EntropyReader for PythContract {
Err(e) => match e {
ContractError::ProviderError { e } => Err(anyhow!(e)),
_ => {
tracing::info!(
sequence_number = sequence_number,
"Gas estimation failed. error: {:?}",
e
);
tracing::info!("Gas estimation for reveal with callback failed: {:?}", e);
Ok(None)
}
},

Some files were not shown because too many files have changed in this diff Show More