Compare commits
91 Commits
pyth-js-v4
...
main
Author | SHA1 | Date |
---|---|---|
|
cf7987f4c5 | |
|
6e0bd0569b | |
|
1e1be9dbeb | |
|
d105a7aa86 | |
|
dd9b07b5e4 | |
|
e26c9d1a30 | |
|
9dddd3d1e7 | |
|
77c68c5069 | |
|
bf2c8b5d43 | |
|
3f07c27243 | |
|
42b64ac09f | |
|
55cbe62997 | |
|
94b36c4961 | |
|
ff6b11023c | |
|
4966b956df | |
|
10dc4a05b8 | |
|
586a4398bd | |
|
020ecdf5da | |
|
308599714f | |
|
587a6fa524 | |
|
a592c6bc33 | |
|
31483a9fc7 | |
|
3d9781ed58 | |
|
344f8a9e47 | |
|
b2cb7c878a | |
|
4e630edac0 | |
|
20d99bceb7 | |
|
d2ce2ecd33 | |
|
2095da34e9 | |
|
a8dbabc7f9 | |
|
cae194eb62 | |
|
8d32b4c2fc | |
|
a203808a44 | |
|
24a08a06c5 | |
|
f212907a8b | |
|
ef922220ee | |
|
6da2e1ba53 | |
|
050a3412f9 | |
|
cf90bff236 | |
|
37ee3b46bd | |
|
b47ee059d7 | |
|
c2da454637 | |
|
567b4a6597 | |
|
2014d1e205 | |
|
93a71f2eef | |
|
9437d51843 | |
|
d31cefb446 | |
|
48a5faf4d9 | |
|
b110bbca5c | |
|
d05df508a8 | |
|
d51e5712f4 | |
|
1a3e3a7c00 | |
|
4b8b9bfd87 | |
|
c7883c822b | |
|
b30604c5ba | |
|
d50488ef5c | |
|
64037e5b4a | |
|
4445c73443 | |
|
5b494689d2 | |
|
e46821d423 | |
|
644b54676c | |
|
f9292177e9 | |
|
1b13bf651a | |
|
e8c198065e | |
|
a1e4fc0924 | |
|
67132c0572 | |
|
c7c3527bfe | |
|
8b76d8c19a | |
|
bdc2e967b0 | |
|
e04edcfece | |
|
ffbe02b4f6 | |
|
26bbe4a0ef | |
|
8b66d0f814 | |
|
0a219fbead | |
|
30c741ed49 | |
|
5fac32fa40 | |
|
6e62328528 | |
|
2d9c6d3028 | |
|
508de75839 | |
|
7bbcfa80d4 | |
|
0d6c35fce8 | |
|
c58b675a63 | |
|
899a995e2e | |
|
5a676978db | |
|
3f6a14897d | |
|
481a428e88 | |
|
3f58a2a8b3 | |
|
76205745c8 | |
|
93efd61ea4 | |
|
8be6a9ad1c | |
|
76ec4e3322 |
|
@ -15,5 +15,4 @@
|
|||
|
||||
.git
|
||||
|
||||
hermes/wormhole
|
||||
!hermes/src/state/cache.rs
|
||||
!apps/hermes/src/state/cache.rs
|
||||
|
|
|
@ -21,10 +21,10 @@ jobs:
|
|||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Download CLI
|
||||
run: wget https://github.com/aptos-labs/aptos-core/releases/download/aptos-cli-v1.0.4/aptos-cli-1.0.4-Ubuntu-22.04-x86_64.zip
|
||||
run: wget https://github.com/aptos-labs/aptos-core/releases/download/aptos-cli-v3.1.0/aptos-cli-3.1.0-Ubuntu-22.04-x86_64.zip
|
||||
|
||||
- name: Unzip CLI
|
||||
run: unzip aptos-cli-1.0.4-Ubuntu-22.04-x86_64.zip
|
||||
run: unzip aptos-cli-3.1.0-Ubuntu-22.04-x86_64.zip
|
||||
|
||||
- name: Run tests
|
||||
run: ./aptos move test
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
name: Test Fuel Contract
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- target_chains/fuel/**
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- target_chains/fuel/**
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: target_chains/fuel/contracts/
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install Fuel toolchain
|
||||
run: |
|
||||
curl https://install.fuel.network | sh
|
||||
echo "$HOME/.fuelup/bin" >> $GITHUB_PATH
|
||||
- name: Build with Forc
|
||||
run: forc build --verbose
|
||||
- name: Run tests with Forc
|
||||
run: forc test --verbose
|
||||
- name: Build
|
||||
run: cargo build --verbose
|
||||
- name: Run tests
|
||||
run: cargo test --verbose
|
|
@ -0,0 +1,37 @@
|
|||
name: Starknet contract
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- target_chains/starknet/contracts/**
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- target_chains/starknet/contracts/**
|
||||
jobs:
|
||||
check:
|
||||
name: Starknet Foundry tests
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: target_chains/starknet/contracts/
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install Scarb
|
||||
uses: software-mansion/setup-scarb@v1
|
||||
with:
|
||||
tool-versions: target_chains/starknet/contracts/.tool-versions
|
||||
- name: Install Starknet Foundry
|
||||
uses: foundry-rs/setup-snfoundry@v3
|
||||
with:
|
||||
tool-versions: target_chains/starknet/contracts/.tool-versions
|
||||
- name: Install Starkli
|
||||
run: curl https://get.starkli.sh | sh && . ~/.config/.starkli/env && starkliup -v $(awk '/starkli/{print $2}' .tool-versions)
|
||||
- name: Install Katana
|
||||
run: curl -L https://install.dojoengine.org | bash && PATH="$PATH:$HOME/.config/.dojo/bin" dojoup -v $(awk '/dojo/{print $2}' .tool-versions)
|
||||
- name: Check formatting
|
||||
run: scarb fmt --check
|
||||
- name: Run tests
|
||||
run: snforge test
|
||||
- name: Test local deployment script
|
||||
run: bash -c 'PATH="$PATH:$HOME/.config/.dojo/bin" katana & . ~/.config/.starkli/env && deploy/local_deploy'
|
|
@ -12,7 +12,7 @@ jobs:
|
|||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: "16"
|
||||
node-version: "18"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
- run: npm ci
|
||||
- run: npx lerna run build --no-private
|
||||
|
|
|
@ -11,8 +11,14 @@ jobs:
|
|||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
default: true
|
||||
profile: minimal
|
||||
|
||||
- run: cargo publish --token ${CARGO_REGISTRY_TOKEN}
|
||||
- run: cargo +stable-x86_64-unknown-linux-gnu publish --token ${CARGO_REGISTRY_TOKEN}
|
||||
env:
|
||||
CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
|
||||
working-directory: "target_chains/solana/pyth_solana_receiver_sdk"
|
||||
|
|
|
@ -40,7 +40,7 @@ jobs:
|
|||
id: ecr_login
|
||||
- run: |
|
||||
DOCKER_BUILDKIT=1 docker build -t lerna -f Dockerfile.lerna .
|
||||
DOCKER_BUILDKIT=1 docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -f price_pusher/Dockerfile .
|
||||
DOCKER_BUILDKIT=1 docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -f apps/price_pusher/Dockerfile .
|
||||
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
|
||||
env:
|
||||
ECR_REGISTRY: public.ecr.aws
|
||||
|
|
|
@ -5,12 +5,17 @@ on:
|
|||
tags:
|
||||
- "python-v*"
|
||||
|
||||
env:
|
||||
PYTHON_VERSION: "3.11"
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python3 -m pip install --upgrade poetry
|
||||
|
|
|
@ -80,13 +80,13 @@ repos:
|
|||
- id: cargo-fmt-pythnet-sdk
|
||||
name: Cargo format for pythnet SDK
|
||||
language: "rust"
|
||||
entry: cargo +nightly-2023-07-23 fmt --manifest-path ./pythnet/pythnet_sdk/Cargo.toml --all -- --config-path rustfmt.toml
|
||||
entry: cargo +nightly-2024-03-26 fmt --manifest-path ./pythnet/pythnet_sdk/Cargo.toml --all -- --config-path rustfmt.toml
|
||||
pass_filenames: false
|
||||
files: pythnet/pythnet_sdk
|
||||
- id: cargo-clippy-pythnet-sdk
|
||||
name: Cargo clippy for pythnet SDK
|
||||
language: "rust"
|
||||
entry: cargo +nightly-2023-07-23 clippy --manifest-path ./pythnet/pythnet_sdk/Cargo.toml --tests --fix --allow-dirty --allow-staged -- -D warnings
|
||||
entry: cargo +nightly-2024-03-26 clippy --manifest-path ./pythnet/pythnet_sdk/Cargo.toml --tests --fix --allow-dirty --allow-staged -- -D warnings
|
||||
pass_filenames: false
|
||||
files: pythnet/pythnet_sdk
|
||||
# Hooks for solana receiver contract
|
||||
|
|
|
@ -79,10 +79,11 @@ Lerna has some common failure modes that you may encounter:
|
|||
1. `npm ci` fails with a typescript compilation error about a missing package.
|
||||
This error likely means that the failing package has a `prepare` entry compiling the typescript in its `package.json`.
|
||||
Fix this error by moving that logic to the `prepublishOnly` entry.
|
||||
1. The software builds locally but fails in CI, or vice-versa.
|
||||
2. The software builds locally but fails in CI, or vice-versa.
|
||||
This error likely means that some local build caches need to be cleaned.
|
||||
The build error may not indicate that this is a caching issue, e.g., it may appear that the packages are being built in the wrong order.
|
||||
Delete `node_modules/`, `lib/` and `tsconfig.tsbuildinfo` from each package's subdirectory. then try again.
|
||||
3. `npm ci` fails due to wrong node version. Make sure to be using `v18`. Node version `v21` is not supported and known to cause issues.
|
||||
|
||||
## Audit / Feature Status
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/target
|
||||
config.yaml
|
||||
*config.yaml
|
||||
*secret*
|
||||
*private-key*
|
||||
|
|
|
@ -1488,7 +1488,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "fortuna"
|
||||
version = "4.0.0"
|
||||
version = "5.2.2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"axum",
|
||||
|
@ -2822,7 +2822,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "pythnet-sdk"
|
||||
version = "2.0.0"
|
||||
version = "2.1.0"
|
||||
dependencies = [
|
||||
"bincode",
|
||||
"borsh",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "fortuna"
|
||||
version = "4.0.0"
|
||||
version = "5.2.2"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
|
|
|
@ -4,3 +4,4 @@ chains:
|
|||
contract_addr: 0x8250f4aF4B972684F7b336503E2D6dFeDeB1487a
|
||||
reveal_delay_blocks: 0
|
||||
legacy_tx: true
|
||||
gas_limit: 500000
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
chains:
|
||||
lightlink-pegasus:
|
||||
commitments:
|
||||
# prettier-ignore
|
||||
- seed: [219,125,217,197,234,88,208,120,21,181,172,143,239,102,41,233,167,212,237,106,37,255,184,165,238,121,230,155,116,158,173,48]
|
||||
chain_length: 10000
|
||||
original_commitment_sequence_number: 104
|
|
@ -1 +1 @@
|
|||
nightly-2023-07-23-x86_64-unknown-linux-gnu
|
||||
nightly-2023-07-23
|
||||
|
|
|
@ -277,7 +277,11 @@ impl EntropyReader for PythContract {
|
|||
Err(e) => match e {
|
||||
ContractError::ProviderError { e } => Err(anyhow!(e)),
|
||||
_ => {
|
||||
tracing::info!("Gas estimation for reveal with callback failed: {:?}", e);
|
||||
tracing::info!(
|
||||
sequence_number = sequence_number,
|
||||
"Gas estimation failed. error: {:?}",
|
||||
e
|
||||
);
|
||||
Ok(None)
|
||||
}
|
||||
},
|
||||
|
|
|
@ -8,7 +8,9 @@ use {
|
|||
chain::ethereum::PythContract,
|
||||
command::register_provider::CommitmentMetadata,
|
||||
config::{
|
||||
Commitment,
|
||||
Config,
|
||||
ProviderConfig,
|
||||
RunOptions,
|
||||
},
|
||||
keeper,
|
||||
|
@ -27,7 +29,6 @@ use {
|
|||
collections::HashMap,
|
||||
net::SocketAddr,
|
||||
sync::Arc,
|
||||
vec,
|
||||
},
|
||||
tokio::{
|
||||
spawn,
|
||||
|
@ -121,38 +122,67 @@ pub async fn run_keeper(
|
|||
|
||||
pub async fn run(opts: &RunOptions) -> Result<()> {
|
||||
let config = Config::load(&opts.config.config)?;
|
||||
let private_key = opts.load_private_key()?;
|
||||
let provider_config = opts
|
||||
.provider_config
|
||||
.provider_config
|
||||
.as_ref()
|
||||
.map(|path| ProviderConfig::load(&path).expect("Failed to load provider config"));
|
||||
let secret = opts.randomness.load_secret()?;
|
||||
let (tx_exit, rx_exit) = watch::channel(false);
|
||||
|
||||
let mut chains: HashMap<ChainId, BlockchainState> = HashMap::new();
|
||||
for (chain_id, chain_config) in &config.chains {
|
||||
let contract = Arc::new(PythContract::from_config(&chain_config)?);
|
||||
let provider_info = contract.get_provider_info(opts.provider).call().await?;
|
||||
let provider_chain_config = provider_config
|
||||
.as_ref()
|
||||
.and_then(|c| c.get_chain_config(chain_id));
|
||||
let mut provider_commitments = provider_chain_config
|
||||
.as_ref()
|
||||
.map(|c| c.get_sorted_commitments())
|
||||
.unwrap_or_else(|| Vec::new());
|
||||
|
||||
let provider_info = contract.get_provider_info(opts.provider).call().await?;
|
||||
let latest_metadata =
|
||||
bincode::deserialize::<CommitmentMetadata>(&provider_info.commitment_metadata)
|
||||
.map_err(|e| {
|
||||
anyhow!(
|
||||
"Chain: {} - Failed to deserialize commitment metadata: {}",
|
||||
&chain_id,
|
||||
e
|
||||
)
|
||||
})?;
|
||||
|
||||
provider_commitments.push(Commitment {
|
||||
seed: latest_metadata.seed,
|
||||
chain_length: latest_metadata.chain_length,
|
||||
original_commitment_sequence_number: provider_info.original_commitment_sequence_number,
|
||||
});
|
||||
|
||||
// Reconstruct the hash chain based on the metadata and check that it matches the on-chain commitment.
|
||||
// TODO: we should instantiate the state here with multiple hash chains.
|
||||
// This approach works fine as long as we haven't rotated the commitment (i.e., all user requests
|
||||
// are for the most recent chain).
|
||||
// TODO: we may want to load the hash chain in a lazy/fault-tolerant way. If there are many blockchains,
|
||||
// then it's more likely that some RPC fails. We should tolerate these faults and generate the hash chain
|
||||
// later when a user request comes in for that chain.
|
||||
let metadata =
|
||||
bincode::deserialize::<CommitmentMetadata>(&provider_info.commitment_metadata)?;
|
||||
|
||||
let hash_chain = PebbleHashChain::from_config(
|
||||
let mut offsets = Vec::<usize>::new();
|
||||
let mut hash_chains = Vec::<PebbleHashChain>::new();
|
||||
|
||||
for commitment in &provider_commitments {
|
||||
let offset = commitment.original_commitment_sequence_number.try_into()?;
|
||||
offsets.push(offset);
|
||||
|
||||
let pebble_hash_chain = PebbleHashChain::from_config(
|
||||
&secret,
|
||||
&chain_id,
|
||||
&opts.provider,
|
||||
&chain_config.contract_addr,
|
||||
&metadata.seed,
|
||||
metadata.chain_length,
|
||||
&commitment.seed,
|
||||
commitment.chain_length,
|
||||
)?;
|
||||
hash_chains.push(pebble_hash_chain);
|
||||
}
|
||||
|
||||
let chain_state = HashChainState {
|
||||
offsets: vec![provider_info
|
||||
.original_commitment_sequence_number
|
||||
.try_into()?],
|
||||
hash_chains: vec![hash_chain],
|
||||
offsets,
|
||||
hash_chains,
|
||||
};
|
||||
|
||||
if chain_state.reveal(provider_info.original_commitment_sequence_number)?
|
||||
|
@ -187,7 +217,10 @@ pub async fn run(opts: &RunOptions) -> Result<()> {
|
|||
|
||||
Ok::<(), Error>(())
|
||||
});
|
||||
spawn(run_keeper(chains.clone(), config, private_key));
|
||||
|
||||
if let Some(keeper_private_key) = opts.load_keeper_private_key()? {
|
||||
spawn(run_keeper(chains.clone(), config, keeper_private_key));
|
||||
}
|
||||
|
||||
run_api(opts.addr.clone(), chains, rx_exit).await?;
|
||||
|
||||
|
|
|
@ -16,7 +16,10 @@ use {
|
|||
PebbleHashChain,
|
||||
},
|
||||
},
|
||||
anyhow::Result,
|
||||
anyhow::{
|
||||
anyhow,
|
||||
Result,
|
||||
},
|
||||
ethers::{
|
||||
abi::Bytes as AbiBytes,
|
||||
signers::{
|
||||
|
@ -66,7 +69,14 @@ pub async fn setup_provider(opts: &SetupProviderOptions) -> Result<()> {
|
|||
register = true;
|
||||
} else {
|
||||
let metadata =
|
||||
bincode::deserialize::<CommitmentMetadata>(&provider_info.commitment_metadata)?;
|
||||
bincode::deserialize::<CommitmentMetadata>(&provider_info.commitment_metadata)
|
||||
.map_err(|e| {
|
||||
anyhow!(
|
||||
"Chain: {} - Failed to deserialize commitment metadata: {}",
|
||||
&chain_id,
|
||||
e
|
||||
)
|
||||
})?;
|
||||
|
||||
let hash_chain = PebbleHashChain::from_config(
|
||||
&secret,
|
||||
|
@ -74,7 +84,7 @@ pub async fn setup_provider(opts: &SetupProviderOptions) -> Result<()> {
|
|||
&provider_address,
|
||||
&chain_config.contract_addr,
|
||||
&metadata.seed,
|
||||
metadata.chain_length,
|
||||
opts.randomness.chain_length,
|
||||
)?;
|
||||
let chain_state = HashChainState {
|
||||
offsets: vec![provider_info
|
||||
|
@ -105,7 +115,8 @@ pub async fn setup_provider(opts: &SetupProviderOptions) -> Result<()> {
|
|||
fee: opts.fee,
|
||||
uri,
|
||||
})
|
||||
.await?;
|
||||
.await
|
||||
.map_err(|e| anyhow!("Chain: {} - Failed to register provider: {}", &chain_id, e))?;
|
||||
tracing::info!("{}: registered", &chain_id);
|
||||
} else {
|
||||
if provider_info.fee_in_wei != opts.fee {
|
||||
|
|
|
@ -97,7 +97,7 @@ pub struct RandomnessOptions {
|
|||
/// The length of the hash chain to generate.
|
||||
#[arg(long = "chain-length")]
|
||||
#[arg(env = "FORTUNA_CHAIN_LENGTH")]
|
||||
#[arg(default_value = "10000")]
|
||||
#[arg(default_value = "100000")]
|
||||
pub chain_length: u64,
|
||||
}
|
||||
|
||||
|
@ -158,3 +158,57 @@ pub struct EthereumConfig {
|
|||
/// The gas limit to use for entropy callback transactions.
|
||||
pub gas_limit: U256,
|
||||
}
|
||||
|
||||
#[derive(Args, Clone, Debug)]
|
||||
#[command(next_help_heading = "Provider Config Options")]
|
||||
#[group(id = "ProviderConfig")]
|
||||
pub struct ProviderConfigOptions {
|
||||
#[arg(long = "provider-config")]
|
||||
#[arg(env = "FORTUNA_PROVIDER_CONFIG")]
|
||||
pub provider_config: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
|
||||
pub struct ProviderConfig {
|
||||
pub chains: HashMap<ChainId, ProviderChainConfig>,
|
||||
}
|
||||
|
||||
impl ProviderConfig {
|
||||
pub fn load(path: &str) -> Result<ProviderConfig> {
|
||||
// Open and read the YAML file
|
||||
let yaml_content = fs::read_to_string(path)?;
|
||||
let config: ProviderConfig = serde_yaml::from_str(&yaml_content)?;
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
/// Get the provider chain config. The method returns an Option for ProviderChainConfig.
|
||||
/// We may not have past any commitments for a chain. For example, for a new chain
|
||||
pub fn get_chain_config(&self, chain_id: &ChainId) -> Option<ProviderChainConfig> {
|
||||
self.chains.get(chain_id).map(|x| x.clone())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
|
||||
pub struct ProviderChainConfig {
|
||||
commitments: Vec<Commitment>,
|
||||
}
|
||||
|
||||
impl ProviderChainConfig {
|
||||
/// Returns a clone of the commitments in the sorted order.
|
||||
/// `HashChainState` requires offsets to be in order.
|
||||
pub fn get_sorted_commitments(&self) -> Vec<Commitment> {
|
||||
let mut sorted_commitments = self.commitments.clone();
|
||||
sorted_commitments.sort_by(|c1, c2| {
|
||||
c1.original_commitment_sequence_number
|
||||
.cmp(&c2.original_commitment_sequence_number)
|
||||
});
|
||||
sorted_commitments
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
|
||||
pub struct Commitment {
|
||||
pub seed: [u8; 32],
|
||||
pub chain_length: u64,
|
||||
pub original_commitment_sequence_number: u64,
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
use {
|
||||
crate::config::{
|
||||
ConfigOptions,
|
||||
ProviderConfigOptions,
|
||||
RandomnessOptions,
|
||||
},
|
||||
anyhow::Result,
|
||||
|
@ -18,6 +19,9 @@ pub struct RunOptions {
|
|||
#[command(flatten)]
|
||||
pub config: ConfigOptions,
|
||||
|
||||
#[command(flatten)]
|
||||
pub provider_config: ProviderConfigOptions,
|
||||
|
||||
#[command(flatten)]
|
||||
pub randomness: RandomnessOptions,
|
||||
|
||||
|
@ -32,16 +36,20 @@ pub struct RunOptions {
|
|||
#[arg(env = "FORTUNA_PROVIDER")]
|
||||
pub provider: Address,
|
||||
|
||||
/// Path to a file containing a 20-byte (40 char) hex encoded Ethereum private key.
|
||||
/// If provided, the keeper will run alongside the Fortuna API service.
|
||||
/// It should be a path to a file containing a 20-byte (40 char) hex encoded Ethereum private key.
|
||||
/// This key is required to submit transactions for entropy callback requests.
|
||||
/// This key should not be a registered provider.
|
||||
#[arg(long = "keeper-private-key")]
|
||||
#[arg(env = "KEEPER_PRIVATE_KEY")]
|
||||
pub keeper_private_key_file: String,
|
||||
pub keeper_private_key_file: Option<String>,
|
||||
}
|
||||
|
||||
impl RunOptions {
|
||||
pub fn load_private_key(&self) -> Result<String> {
|
||||
return Ok((fs::read_to_string(&self.keeper_private_key_file))?);
|
||||
pub fn load_keeper_private_key(&self) -> Result<Option<String>> {
|
||||
if let Some(ref keeper_private_key_file) = self.keeper_private_key_file {
|
||||
return Ok(Some(fs::read_to_string(keeper_private_key_file)?));
|
||||
}
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -13,8 +13,12 @@ use {
|
|||
},
|
||||
config::EthereumConfig,
|
||||
},
|
||||
anyhow::Result,
|
||||
anyhow::{
|
||||
anyhow,
|
||||
Result,
|
||||
},
|
||||
ethers::{
|
||||
contract::ContractError,
|
||||
providers::{
|
||||
Middleware,
|
||||
Provider,
|
||||
|
@ -32,9 +36,17 @@ use {
|
|||
Duration,
|
||||
},
|
||||
},
|
||||
tracing,
|
||||
tracing::{
|
||||
self,
|
||||
Instrument,
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BlockRange {
|
||||
pub from: BlockNumber,
|
||||
pub to: BlockNumber,
|
||||
}
|
||||
|
||||
/// How much to wait before retrying in case of an RPC error
|
||||
const RETRY_INTERVAL: Duration = Duration::from_secs(5);
|
||||
|
@ -55,7 +67,11 @@ async fn get_latest_safe_block(chain_state: &BlockchainState) -> BlockNumber {
|
|||
.await
|
||||
{
|
||||
Ok(latest_confirmed_block) => {
|
||||
return latest_confirmed_block - chain_state.reveal_delay_blocks
|
||||
tracing::info!(
|
||||
"Fetched latest safe block {}",
|
||||
latest_confirmed_block - chain_state.reveal_delay_blocks
|
||||
);
|
||||
return latest_confirmed_block - chain_state.reveal_delay_blocks;
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Error while getting block number. error: {:?}", e);
|
||||
|
@ -65,22 +81,17 @@ async fn get_latest_safe_block(chain_state: &BlockchainState) -> BlockNumber {
|
|||
}
|
||||
}
|
||||
|
||||
/// Run threads to handle events for the last `BACKLOG_RANGE` blocks. Watch for new blocks and
|
||||
/// Run threads to handle events for the last `BACKLOG_RANGE` blocks, watch for new blocks and
|
||||
/// handle any events for the new blocks.
|
||||
#[tracing::instrument(name="keeper", skip_all, fields(chain_id=chain_state.id))]
|
||||
pub async fn run_keeper_threads(
|
||||
private_key: String,
|
||||
chain_eth_config: EthereumConfig,
|
||||
chain_state: BlockchainState,
|
||||
) {
|
||||
tracing::info!("Starting keeper for chain: {}", &chain_state.id);
|
||||
|
||||
let latest_safe_block = get_latest_safe_block(&chain_state).await;
|
||||
|
||||
tracing::info!(
|
||||
"Latest safe block for chain {}: {} ",
|
||||
&chain_state.id,
|
||||
&latest_safe_block
|
||||
);
|
||||
tracing::info!("starting keeper");
|
||||
let latest_safe_block = get_latest_safe_block(&chain_state).in_current_span().await;
|
||||
tracing::info!("latest safe block: {}", &latest_safe_block);
|
||||
|
||||
let contract = Arc::new(
|
||||
SignablePythContract::from_config(&chain_eth_config, &private_key)
|
||||
|
@ -88,64 +99,48 @@ pub async fn run_keeper_threads(
|
|||
.expect("Chain config should be valid"),
|
||||
);
|
||||
|
||||
let backlog_chain_state = chain_state.clone();
|
||||
let backlog_contract = contract.clone();
|
||||
// Spawn a thread to handle the events from last BACKLOG_RANGE blocks.
|
||||
spawn(async move {
|
||||
let from_block = latest_safe_block.saturating_sub(BACKLOG_RANGE);
|
||||
process_block_range(
|
||||
spawn(
|
||||
process_backlog(
|
||||
BlockRange {
|
||||
from: from_block,
|
||||
from: latest_safe_block.saturating_sub(BACKLOG_RANGE),
|
||||
to: latest_safe_block,
|
||||
},
|
||||
backlog_contract,
|
||||
contract.clone(),
|
||||
chain_eth_config.gas_limit,
|
||||
backlog_chain_state.clone(),
|
||||
chain_state.clone(),
|
||||
)
|
||||
.await;
|
||||
tracing::info!(
|
||||
"Backlog processing for chain: {} completed",
|
||||
&backlog_chain_state.id
|
||||
.in_current_span(),
|
||||
);
|
||||
});
|
||||
|
||||
let (tx, rx) = mpsc::channel::<BlockRange>(1000);
|
||||
|
||||
let watch_blocks_chain_state = chain_state.clone();
|
||||
// Spawn a thread to watch for new blocks and send the range of blocks for which events has not been handled to the `tx` channel.
|
||||
spawn(async move {
|
||||
loop {
|
||||
if let Err(e) = watch_blocks(
|
||||
watch_blocks_chain_state.clone(),
|
||||
spawn(
|
||||
watch_blocks_wrapper(
|
||||
chain_state.clone(),
|
||||
latest_safe_block,
|
||||
tx.clone(),
|
||||
tx,
|
||||
chain_eth_config.geth_rpc_wss.clone(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::error!(
|
||||
"Error in watching blocks for chain: {}, {:?}",
|
||||
&watch_blocks_chain_state.id,
|
||||
e
|
||||
.in_current_span(),
|
||||
);
|
||||
time::sleep(RETRY_INTERVAL).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
// Spawn a thread that listens for block ranges on the `rx` channel and processes the events for those blocks.
|
||||
spawn(process_new_blocks(
|
||||
spawn(
|
||||
process_new_blocks(
|
||||
chain_state.clone(),
|
||||
rx,
|
||||
Arc::clone(&contract),
|
||||
chain_eth_config.gas_limit,
|
||||
));
|
||||
)
|
||||
.in_current_span(),
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
// Process an event for a chain. It estimates the gas for the reveal with callback and
|
||||
// submits the transaction if the gas estimate is below the gas limit.
|
||||
// It will return an Error if the gas estimation failed with a provider error or if the
|
||||
// reveal with callback failed with a provider error.
|
||||
/// Process an event for a chain. It estimates the gas for the reveal with callback and
|
||||
/// submits the transaction if the gas estimate is below the gas limit.
|
||||
/// It will return an Error if the gas estimation failed with a provider error or if the
|
||||
/// reveal with callback failed with a provider error.
|
||||
pub async fn process_event(
|
||||
event: RequestedWithCallbackEvent,
|
||||
chain_config: &BlockchainState,
|
||||
|
@ -159,9 +154,8 @@ pub async fn process_event(
|
|||
Ok(result) => result,
|
||||
Err(e) => {
|
||||
tracing::error!(
|
||||
"Error while revealing for provider: {} and sequence number: {} with error: {:?}",
|
||||
event.provider_address,
|
||||
event.sequence_number,
|
||||
sequence_number = &event.sequence_number,
|
||||
"Error while revealing with error: {:?}",
|
||||
e
|
||||
);
|
||||
return Ok(());
|
||||
|
@ -176,6 +170,7 @@ pub async fn process_event(
|
|||
event.user_random_number,
|
||||
provider_revelation,
|
||||
)
|
||||
.in_current_span()
|
||||
.await;
|
||||
|
||||
match gas_estimate_res {
|
||||
|
@ -188,53 +183,74 @@ pub async fn process_event(
|
|||
|
||||
if gas_estimate > gas_limit {
|
||||
tracing::error!(
|
||||
"Gas estimate for reveal with callback is higher than the gas limit for chain: {}",
|
||||
&chain_config.id
|
||||
sequence_number = &event.sequence_number,
|
||||
"Gas estimate for reveal with callback is higher than the gas limit"
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let res = contract
|
||||
let contract_call = contract
|
||||
.reveal_with_callback(
|
||||
event.provider_address,
|
||||
event.sequence_number,
|
||||
event.user_random_number,
|
||||
provider_revelation,
|
||||
)
|
||||
.gas(gas_estimate)
|
||||
.send()
|
||||
.await?
|
||||
.await;
|
||||
.gas(gas_estimate);
|
||||
|
||||
match res {
|
||||
Ok(_) => {
|
||||
let res = contract_call.send().await;
|
||||
|
||||
let pending_tx = match res {
|
||||
Ok(pending_tx) => pending_tx,
|
||||
Err(e) => match e {
|
||||
// If there is a provider error, we weren't able to send the transaction.
|
||||
// We will return an error. So, that the caller can decide what to do (retry).
|
||||
ContractError::ProviderError { e } => return Err(e.into()),
|
||||
// For all the other errors, it is likely the case we won't be able to reveal for
|
||||
// ever. We will return an Ok(()) to signal that we have processed this reveal
|
||||
// and concluded that its Ok to not reveal.
|
||||
_ => {
|
||||
tracing::error!(
|
||||
sequence_number = &event.sequence_number,
|
||||
"Error while revealing with error: {:?}",
|
||||
e
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
match pending_tx.await {
|
||||
Ok(res) => {
|
||||
tracing::info!(
|
||||
"Revealed on chain: {} for provider: {} and sequence number: {} with res: {:?}",
|
||||
&chain_config.id,
|
||||
event.provider_address,
|
||||
event.sequence_number,
|
||||
sequence_number = &event.sequence_number,
|
||||
"Revealed with res: {:?}",
|
||||
res
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!(
|
||||
"Error while revealing for provider: {} and sequence number: {} with error: {:?}",
|
||||
event.provider_address,
|
||||
event.sequence_number,
|
||||
sequence_number = &event.sequence_number,
|
||||
"Error while revealing with error: {:?}",
|
||||
e
|
||||
);
|
||||
Err(e.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
None => Ok(()),
|
||||
None => {
|
||||
tracing::info!(
|
||||
sequence_number = &event.sequence_number,
|
||||
"Not processing event"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
tracing::error!(
|
||||
"Error while simulating reveal for provider: {} and sequence number: {} \n error: {:?}",
|
||||
event.provider_address,
|
||||
event.sequence_number,
|
||||
sequence_number = &event.sequence_number,
|
||||
"Error while simulating reveal with error: {:?}",
|
||||
e
|
||||
);
|
||||
Err(e)
|
||||
|
@ -243,21 +259,14 @@ pub async fn process_event(
|
|||
}
|
||||
|
||||
|
||||
/// Process a range of blocks for a chain. It will fetch events for the blocks in the provided range
|
||||
/// and then try to process them one by one. If the process fails, it will retry indefinitely.
|
||||
/// Process a range of blocks in batches. It calls the `process_single_block_batch` method for each batch.
|
||||
#[tracing::instrument(skip_all, fields(range_from_block=block_range.from, range_to_block=block_range.to))]
|
||||
pub async fn process_block_range(
|
||||
block_range: BlockRange,
|
||||
contract: Arc<SignablePythContract>,
|
||||
gas_limit: U256,
|
||||
chain_state: api::BlockchainState,
|
||||
) {
|
||||
tracing::info!(
|
||||
"Processing blocks for chain: {} from block: {} to block: {}",
|
||||
&chain_state.id,
|
||||
block_range.from,
|
||||
block_range.to
|
||||
);
|
||||
|
||||
let BlockRange {
|
||||
from: first_block,
|
||||
to: last_block,
|
||||
|
@ -268,41 +277,64 @@ pub async fn process_block_range(
|
|||
if to_block > last_block {
|
||||
to_block = last_block;
|
||||
}
|
||||
|
||||
process_single_block_batch(
|
||||
BlockRange {
|
||||
from: current_block,
|
||||
to: to_block,
|
||||
},
|
||||
contract.clone(),
|
||||
gas_limit,
|
||||
chain_state.clone(),
|
||||
)
|
||||
.in_current_span()
|
||||
.await;
|
||||
|
||||
current_block = to_block + 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// Process a batch of blocks for a chain. It will fetch events for all the blocks in a single call for the provided batch
|
||||
/// and then try to process them one by one. If the process fails, it will retry indefinitely.
|
||||
#[tracing::instrument(name="batch", skip_all, fields(batch_from_block=block_range.from, batch_to_block=block_range.to))]
|
||||
pub async fn process_single_block_batch(
|
||||
block_range: BlockRange,
|
||||
contract: Arc<SignablePythContract>,
|
||||
gas_limit: U256,
|
||||
chain_state: api::BlockchainState,
|
||||
) {
|
||||
loop {
|
||||
let events_res = chain_state
|
||||
.contract
|
||||
.get_request_with_callback_events(current_block, to_block)
|
||||
.get_request_with_callback_events(block_range.from, block_range.to)
|
||||
.await;
|
||||
|
||||
match events_res {
|
||||
Ok(events) => {
|
||||
for event in events {
|
||||
tracing::info!(num_of_events = &events.len(), "Processing",);
|
||||
for event in &events {
|
||||
tracing::info!(sequence_number = &event.sequence_number, "Processing event",);
|
||||
while let Err(e) =
|
||||
process_event(event.clone(), &chain_state, &contract, gas_limit).await
|
||||
process_event(event.clone(), &chain_state, &contract, gas_limit)
|
||||
.in_current_span()
|
||||
.await
|
||||
{
|
||||
tracing::error!(
|
||||
"Error while processing event for chain: {} and sequence number: {}. Waiting for {} seconds before retry. error: {:?}",
|
||||
&chain_state.id,
|
||||
&event.sequence_number,
|
||||
sequence_number = &event.sequence_number,
|
||||
"Error while processing event. Waiting for {} seconds before retry. error: {:?}",
|
||||
RETRY_INTERVAL.as_secs(),
|
||||
e
|
||||
);
|
||||
time::sleep(RETRY_INTERVAL).await;
|
||||
}
|
||||
tracing::info!(sequence_number = &event.sequence_number, "Processed event",);
|
||||
}
|
||||
tracing::info!(
|
||||
"Backlog processed for chain: {} from block: {} to block: {}",
|
||||
&chain_state.id,
|
||||
¤t_block,
|
||||
&to_block
|
||||
);
|
||||
current_block = to_block + 1;
|
||||
tracing::info!(num_of_events = &events.len(), "Processed",);
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!(
|
||||
"Error while getting events for chain: {} from block: {} to block: {}. Waiting for {} seconds before retry. error: {:?}",
|
||||
&chain_state.id,
|
||||
¤t_block,
|
||||
&to_block,
|
||||
"Error while getting events. Waiting for {} seconds before retry. error: {:?}",
|
||||
RETRY_INTERVAL.as_secs(),
|
||||
e
|
||||
);
|
||||
|
@ -312,9 +344,30 @@ pub async fn process_block_range(
|
|||
}
|
||||
}
|
||||
|
||||
pub struct BlockRange {
|
||||
pub from: BlockNumber,
|
||||
pub to: BlockNumber,
|
||||
/// Wrapper for the `watch_blocks` method. If there was an error while watching, it will retry after a delay.
|
||||
/// It retries indefinitely.
|
||||
#[tracing::instrument(name="watch_blocks", skip_all, fields(initial_safe_block=latest_safe_block))]
|
||||
pub async fn watch_blocks_wrapper(
|
||||
chain_state: BlockchainState,
|
||||
latest_safe_block: BlockNumber,
|
||||
tx: mpsc::Sender<BlockRange>,
|
||||
geth_rpc_wss: Option<String>,
|
||||
) {
|
||||
let mut last_safe_block_processed = latest_safe_block;
|
||||
loop {
|
||||
if let Err(e) = watch_blocks(
|
||||
chain_state.clone(),
|
||||
&mut last_safe_block_processed,
|
||||
tx.clone(),
|
||||
geth_rpc_wss.clone(),
|
||||
)
|
||||
.in_current_span()
|
||||
.await
|
||||
{
|
||||
tracing::error!("watching blocks. error: {:?}", e);
|
||||
time::sleep(RETRY_INTERVAL).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Watch for new blocks and send the range of blocks for which events have not been handled to the `tx` channel.
|
||||
|
@ -323,59 +376,72 @@ pub struct BlockRange {
|
|||
/// know about it.
|
||||
pub async fn watch_blocks(
|
||||
chain_state: BlockchainState,
|
||||
latest_safe_block: BlockNumber,
|
||||
last_safe_block_processed: &mut BlockNumber,
|
||||
tx: mpsc::Sender<BlockRange>,
|
||||
geth_rpc_wss: Option<String>,
|
||||
) -> Result<()> {
|
||||
tracing::info!(
|
||||
"Watching blocks to handle new events for chain: {}",
|
||||
&chain_state.id
|
||||
);
|
||||
let mut last_safe_block_processed = latest_safe_block;
|
||||
tracing::info!("Watching blocks to handle new events");
|
||||
|
||||
let provider_option = match geth_rpc_wss {
|
||||
Some(wss) => Some(Provider::<Ws>::connect(wss).await?),
|
||||
Some(wss) => Some(match Provider::<Ws>::connect(wss.clone()).await {
|
||||
Ok(provider) => provider,
|
||||
Err(e) => {
|
||||
tracing::error!("Error while connecting to wss: {}. error: {:?}", wss, e);
|
||||
return Err(e.into());
|
||||
}
|
||||
}),
|
||||
None => {
|
||||
tracing::info!("No wss provided for chain: {}", &chain_state.id);
|
||||
tracing::info!("No wss provided");
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
let mut stream_option = match provider_option {
|
||||
Some(ref provider) => Some(provider.subscribe_blocks().await?),
|
||||
Some(ref provider) => Some(match provider.subscribe_blocks().await {
|
||||
Ok(client) => client,
|
||||
Err(e) => {
|
||||
tracing::error!("Error while subscribing to blocks. error {:?}", e);
|
||||
return Err(e.into());
|
||||
}
|
||||
}),
|
||||
None => None,
|
||||
};
|
||||
|
||||
loop {
|
||||
match stream_option {
|
||||
Some(ref mut stream) => {
|
||||
stream.next().await;
|
||||
if let None = stream.next().await {
|
||||
tracing::error!("Error blocks subscription stream ended");
|
||||
return Err(anyhow!("Error blocks subscription stream ended"));
|
||||
}
|
||||
}
|
||||
None => {
|
||||
time::sleep(POLL_INTERVAL).await;
|
||||
}
|
||||
}
|
||||
|
||||
let latest_safe_block = get_latest_safe_block(&chain_state).await;
|
||||
if latest_safe_block > last_safe_block_processed {
|
||||
let latest_safe_block = get_latest_safe_block(&chain_state).in_current_span().await;
|
||||
if latest_safe_block > *last_safe_block_processed {
|
||||
match tx
|
||||
.send(BlockRange {
|
||||
from: last_safe_block_processed + 1,
|
||||
from: *last_safe_block_processed + 1,
|
||||
to: latest_safe_block,
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
tracing::info!(
|
||||
"Block range sent to handle events for chain {}: {} to {}",
|
||||
&chain_state.id,
|
||||
&last_safe_block_processed + 1,
|
||||
&latest_safe_block
|
||||
from_block = *last_safe_block_processed + 1,
|
||||
to_block = &latest_safe_block,
|
||||
"Block range sent to handle events",
|
||||
);
|
||||
last_safe_block_processed = latest_safe_block;
|
||||
*last_safe_block_processed = latest_safe_block;
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Error while sending block range to handle events for chain {}. These will be handled in next call. error: {:?}",&chain_state.id,e);
|
||||
tracing::error!(
|
||||
"Error while sending block range to handle events. These will be handled in next call. error: {:?}",
|
||||
e
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -383,17 +449,15 @@ pub async fn watch_blocks(
|
|||
}
|
||||
|
||||
/// It waits on rx channel to receive block ranges and then calls process_block_range to process them.
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn process_new_blocks(
|
||||
chain_state: BlockchainState,
|
||||
mut rx: mpsc::Receiver<BlockRange>,
|
||||
contract: Arc<SignablePythContract>,
|
||||
gas_limit: U256,
|
||||
) {
|
||||
tracing::info!("Waiting for new block ranges to process");
|
||||
loop {
|
||||
tracing::info!(
|
||||
"Waiting for new block ranges to process for chain: {}",
|
||||
&chain_state.id
|
||||
);
|
||||
if let Some(block_range) = rx.recv().await {
|
||||
process_block_range(
|
||||
block_range,
|
||||
|
@ -401,7 +465,23 @@ pub async fn process_new_blocks(
|
|||
gas_limit,
|
||||
chain_state.clone(),
|
||||
)
|
||||
.in_current_span()
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Processes the backlog_range for a chain.
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn process_backlog(
|
||||
backlog_range: BlockRange,
|
||||
contract: Arc<SignablePythContract>,
|
||||
gas_limit: U256,
|
||||
chain_state: BlockchainState,
|
||||
) {
|
||||
tracing::info!("Processing backlog");
|
||||
process_block_range(backlog_range, contract, gas_limit, chain_state)
|
||||
.in_current_span()
|
||||
.await;
|
||||
tracing::info!("Backlog processed");
|
||||
}
|
||||
|
|
|
@ -1796,7 +1796,7 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
|
|||
|
||||
[[package]]
|
||||
name = "hermes"
|
||||
version = "0.5.5"
|
||||
version = "0.5.9"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
|
@ -3138,7 +3138,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "pythnet-sdk"
|
||||
version = "2.0.0"
|
||||
version = "2.1.0"
|
||||
dependencies = [
|
||||
"bincode",
|
||||
"borsh 0.10.3",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "hermes"
|
||||
version = "0.5.5"
|
||||
version = "0.5.9"
|
||||
description = "Hermes is an agent that provides Verified Prices from the Pythnet Pyth Oracle."
|
||||
edition = "2021"
|
||||
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
use {
|
||||
crate::{
|
||||
aggregate::AggregationEvent,
|
||||
config::RunOptions,
|
||||
state::State,
|
||||
},
|
||||
|
@ -14,7 +13,6 @@ use {
|
|||
ipnet::IpNet,
|
||||
serde_qs::axum::QsQueryConfig,
|
||||
std::sync::Arc,
|
||||
tokio::sync::broadcast::Sender,
|
||||
tower_http::cors::CorsLayer,
|
||||
utoipa::OpenApi,
|
||||
utoipa_swagger_ui::SwaggerUi,
|
||||
|
@ -30,7 +28,6 @@ pub struct ApiState<S = State> {
|
|||
pub state: Arc<S>,
|
||||
pub ws: Arc<ws::WsState>,
|
||||
pub metrics: Arc<metrics_middleware::Metrics>,
|
||||
pub update_tx: Sender<AggregationEvent>,
|
||||
}
|
||||
|
||||
/// Manually implement `Clone` as the derive macro will try and slap `Clone` on
|
||||
|
@ -41,7 +38,6 @@ impl<S> Clone for ApiState<S> {
|
|||
state: self.state.clone(),
|
||||
ws: self.ws.clone(),
|
||||
metrics: self.metrics.clone(),
|
||||
update_tx: self.update_tx.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -51,7 +47,6 @@ impl ApiState<State> {
|
|||
state: Arc<State>,
|
||||
ws_whitelist: Vec<IpNet>,
|
||||
requester_ip_header_name: String,
|
||||
update_tx: Sender<AggregationEvent>,
|
||||
) -> Self {
|
||||
Self {
|
||||
metrics: Arc::new(metrics_middleware::Metrics::new(state.clone())),
|
||||
|
@ -61,24 +56,18 @@ impl ApiState<State> {
|
|||
state.clone(),
|
||||
)),
|
||||
state,
|
||||
update_tx,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(opts, state, update_tx))]
|
||||
pub async fn spawn(
|
||||
opts: RunOptions,
|
||||
state: Arc<State>,
|
||||
update_tx: Sender<AggregationEvent>,
|
||||
) -> Result<()> {
|
||||
#[tracing::instrument(skip(opts, state))]
|
||||
pub async fn spawn(opts: RunOptions, state: Arc<State>) -> Result<()> {
|
||||
let state = {
|
||||
let opts = opts.clone();
|
||||
ApiState::new(
|
||||
state,
|
||||
opts.rpc.ws_whitelist,
|
||||
opts.rpc.requester_ip_header_name,
|
||||
update_tx,
|
||||
)
|
||||
};
|
||||
|
||||
|
@ -135,6 +124,7 @@ pub async fn run(opts: RunOptions, state: ApiState) -> Result<()> {
|
|||
// Initialize Axum Router. Note the type here is a `Router<State>` due to the use of the
|
||||
// `with_state` method which replaces `Body` with `State` in the type signature.
|
||||
let app = Router::new();
|
||||
#[allow(deprecated)]
|
||||
let app = app
|
||||
.merge(SwaggerUi::new("/docs").url("/docs/openapi.json", ApiDoc::openapi()))
|
||||
.route("/", get(rest::index))
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use crate::aggregate::UnixTimestamp;
|
||||
use crate::state::aggregate::UnixTimestamp;
|
||||
|
||||
// Example values for the utoipa API docs.
|
||||
// Note that each of these expressions is only evaluated once when the documentation is created,
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
use {
|
||||
super::ApiState,
|
||||
crate::state::aggregate::Aggregates,
|
||||
axum::{
|
||||
http::StatusCode,
|
||||
response::{
|
||||
|
@ -93,11 +94,15 @@ impl IntoResponse for RestError {
|
|||
}
|
||||
|
||||
/// Verify that the price ids exist in the aggregate state.
|
||||
pub async fn verify_price_ids_exist(
|
||||
state: &ApiState,
|
||||
pub async fn verify_price_ids_exist<S>(
|
||||
state: &ApiState<S>,
|
||||
price_ids: &[PriceIdentifier],
|
||||
) -> Result<(), RestError> {
|
||||
let all_ids = crate::aggregate::get_price_feed_ids(&*state.state).await;
|
||||
) -> Result<(), RestError>
|
||||
where
|
||||
S: Aggregates,
|
||||
{
|
||||
let state = &*state.state;
|
||||
let all_ids = Aggregates::get_price_feed_ids(state).await;
|
||||
let missing_ids = price_ids
|
||||
.iter()
|
||||
.filter(|id| !all_ids.contains(id))
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
use {
|
||||
super::verify_price_ids_exist,
|
||||
crate::{
|
||||
aggregate::{
|
||||
RequestTime,
|
||||
UnixTimestamp,
|
||||
},
|
||||
api::{
|
||||
doc_examples,
|
||||
rest::RestError,
|
||||
|
@ -12,6 +8,12 @@ use {
|
|||
PriceIdInput,
|
||||
RpcPriceFeed,
|
||||
},
|
||||
ApiState,
|
||||
},
|
||||
state::aggregate::{
|
||||
Aggregates,
|
||||
RequestTime,
|
||||
UnixTimestamp,
|
||||
},
|
||||
},
|
||||
anyhow::Result,
|
||||
|
@ -47,6 +49,8 @@ pub struct GetPriceFeedQueryParams {
|
|||
binary: bool,
|
||||
}
|
||||
|
||||
/// **Deprecated: use /v2/updates/price/{publish_time} instead**
|
||||
///
|
||||
/// Get a price update for a price feed with a specific timestamp
|
||||
///
|
||||
/// Given a price feed id and timestamp, retrieve the Pyth price update closest to that timestamp.
|
||||
|
@ -60,16 +64,20 @@ pub struct GetPriceFeedQueryParams {
|
|||
GetPriceFeedQueryParams
|
||||
)
|
||||
)]
|
||||
pub async fn get_price_feed(
|
||||
State(state): State<crate::api::ApiState>,
|
||||
#[deprecated]
|
||||
pub async fn get_price_feed<S>(
|
||||
State(state): State<ApiState<S>>,
|
||||
QsQuery(params): QsQuery<GetPriceFeedQueryParams>,
|
||||
) -> Result<Json<RpcPriceFeed>, RestError> {
|
||||
) -> Result<Json<RpcPriceFeed>, RestError>
|
||||
where
|
||||
S: Aggregates,
|
||||
{
|
||||
let price_id: PriceIdentifier = params.id.into();
|
||||
|
||||
verify_price_ids_exist(&state, &[price_id]).await?;
|
||||
|
||||
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
|
||||
&*state.state,
|
||||
let state = &*state.state;
|
||||
let price_feeds_with_update_data = Aggregates::get_price_feeds_with_update_data(
|
||||
state,
|
||||
&[price_id],
|
||||
RequestTime::FirstAfter(params.publish_time),
|
||||
)
|
||||
|
|
|
@ -1,15 +1,16 @@
|
|||
use {
|
||||
super::verify_price_ids_exist,
|
||||
crate::{
|
||||
aggregate::{
|
||||
get_price_feeds_with_update_data,
|
||||
RequestTime,
|
||||
UnixTimestamp,
|
||||
},
|
||||
api::{
|
||||
doc_examples,
|
||||
rest::RestError,
|
||||
types::PriceIdInput,
|
||||
ApiState,
|
||||
},
|
||||
state::aggregate::{
|
||||
Aggregates,
|
||||
RequestTime,
|
||||
UnixTimestamp,
|
||||
},
|
||||
},
|
||||
anyhow::Result,
|
||||
|
@ -54,6 +55,8 @@ pub struct GetVaaResponse {
|
|||
publish_time: UnixTimestamp,
|
||||
}
|
||||
|
||||
/// **Deprecated: use /v2/updates/price/{publish_time} instead**
|
||||
///
|
||||
/// Get a VAA for a price feed with a specific timestamp
|
||||
///
|
||||
/// Given a price feed id and timestamp, retrieve the Pyth price update closest to that timestamp.
|
||||
|
@ -68,16 +71,20 @@ pub struct GetVaaResponse {
|
|||
GetVaaQueryParams
|
||||
)
|
||||
)]
|
||||
pub async fn get_vaa(
|
||||
State(state): State<crate::api::ApiState>,
|
||||
#[deprecated]
|
||||
pub async fn get_vaa<S>(
|
||||
State(state): State<ApiState<S>>,
|
||||
QsQuery(params): QsQuery<GetVaaQueryParams>,
|
||||
) -> Result<Json<GetVaaResponse>, RestError> {
|
||||
) -> Result<Json<GetVaaResponse>, RestError>
|
||||
where
|
||||
S: Aggregates,
|
||||
{
|
||||
let price_id: PriceIdentifier = params.id.into();
|
||||
|
||||
verify_price_ids_exist(&state, &[price_id]).await?;
|
||||
|
||||
let price_feeds_with_update_data = get_price_feeds_with_update_data(
|
||||
&*state.state,
|
||||
let state = &*state.state;
|
||||
let price_feeds_with_update_data = Aggregates::get_price_feeds_with_update_data(
|
||||
state,
|
||||
&[price_id],
|
||||
RequestTime::FirstAfter(params.publish_time),
|
||||
)
|
||||
|
|
|
@ -1,11 +1,15 @@
|
|||
use {
|
||||
super::verify_price_ids_exist,
|
||||
crate::{
|
||||
aggregate::{
|
||||
api::{
|
||||
rest::RestError,
|
||||
ApiState,
|
||||
},
|
||||
state::aggregate::{
|
||||
Aggregates,
|
||||
RequestTime,
|
||||
UnixTimestamp,
|
||||
},
|
||||
api::rest::RestError,
|
||||
},
|
||||
anyhow::Result,
|
||||
axum::{
|
||||
|
@ -42,6 +46,8 @@ pub struct GetVaaCcipResponse {
|
|||
data: String, // TODO: Use a typed wrapper for the hex output with leading 0x.
|
||||
}
|
||||
|
||||
/// **Deprecated: use /v2/updates/price/{publish_time} instead**
|
||||
///
|
||||
/// Get a VAA for a price feed using CCIP
|
||||
///
|
||||
/// This endpoint accepts a single argument which is a hex-encoded byte string of the following form:
|
||||
|
@ -56,25 +62,30 @@ pub struct GetVaaCcipResponse {
|
|||
GetVaaCcipQueryParams
|
||||
)
|
||||
)]
|
||||
pub async fn get_vaa_ccip(
|
||||
State(state): State<crate::api::ApiState>,
|
||||
#[deprecated]
|
||||
pub async fn get_vaa_ccip<S>(
|
||||
State(state): State<ApiState<S>>,
|
||||
QsQuery(params): QsQuery<GetVaaCcipQueryParams>,
|
||||
) -> Result<Json<GetVaaCcipResponse>, RestError> {
|
||||
) -> Result<Json<GetVaaCcipResponse>, RestError>
|
||||
where
|
||||
S: Aggregates,
|
||||
{
|
||||
let price_id: PriceIdentifier = PriceIdentifier::new(
|
||||
params.data[0..32]
|
||||
.try_into()
|
||||
.map_err(|_| RestError::InvalidCCIPInput)?,
|
||||
);
|
||||
verify_price_ids_exist(&state, &[price_id]).await?;
|
||||
|
||||
let publish_time = UnixTimestamp::from_be_bytes(
|
||||
params.data[32..40]
|
||||
.try_into()
|
||||
.map_err(|_| RestError::InvalidCCIPInput)?,
|
||||
);
|
||||
|
||||
verify_price_ids_exist(&state, &[price_id]).await?;
|
||||
|
||||
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
|
||||
&*state.state,
|
||||
let state = &*state.state;
|
||||
let price_feeds_with_update_data = Aggregates::get_price_feeds_with_update_data(
|
||||
state,
|
||||
&[price_id],
|
||||
RequestTime::FirstAfter(publish_time),
|
||||
)
|
||||
|
|
|
@ -1,13 +1,17 @@
|
|||
use {
|
||||
super::verify_price_ids_exist,
|
||||
crate::{
|
||||
aggregate::RequestTime,
|
||||
api::{
|
||||
rest::RestError,
|
||||
types::{
|
||||
PriceIdInput,
|
||||
RpcPriceFeed,
|
||||
},
|
||||
ApiState,
|
||||
},
|
||||
state::aggregate::{
|
||||
Aggregates,
|
||||
RequestTime,
|
||||
},
|
||||
},
|
||||
anyhow::Result,
|
||||
|
@ -46,6 +50,8 @@ pub struct LatestPriceFeedsQueryParams {
|
|||
binary: bool,
|
||||
}
|
||||
|
||||
/// **Deprecated: use /v2/updates/price/latest instead**
|
||||
///
|
||||
/// Get the latest price updates by price feed id.
|
||||
///
|
||||
/// Given a collection of price feed ids, retrieve the latest Pyth price for each price feed.
|
||||
|
@ -59,19 +65,20 @@ pub struct LatestPriceFeedsQueryParams {
|
|||
LatestPriceFeedsQueryParams
|
||||
)
|
||||
)]
|
||||
pub async fn latest_price_feeds(
|
||||
State(state): State<crate::api::ApiState>,
|
||||
#[deprecated]
|
||||
pub async fn latest_price_feeds<S>(
|
||||
State(state): State<ApiState<S>>,
|
||||
QsQuery(params): QsQuery<LatestPriceFeedsQueryParams>,
|
||||
) -> Result<Json<Vec<RpcPriceFeed>>, RestError> {
|
||||
) -> Result<Json<Vec<RpcPriceFeed>>, RestError>
|
||||
where
|
||||
S: Aggregates,
|
||||
{
|
||||
let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(|id| id.into()).collect();
|
||||
|
||||
verify_price_ids_exist(&state, &price_ids).await?;
|
||||
|
||||
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
|
||||
&*state.state,
|
||||
&price_ids,
|
||||
RequestTime::Latest,
|
||||
)
|
||||
let state = &*state.state;
|
||||
let price_feeds_with_update_data =
|
||||
Aggregates::get_price_feeds_with_update_data(state, &price_ids, RequestTime::Latest)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
tracing::warn!(
|
||||
|
|
|
@ -1,11 +1,15 @@
|
|||
use {
|
||||
super::verify_price_ids_exist,
|
||||
crate::{
|
||||
aggregate::RequestTime,
|
||||
api::{
|
||||
doc_examples,
|
||||
rest::RestError,
|
||||
types::PriceIdInput,
|
||||
ApiState,
|
||||
},
|
||||
state::aggregate::{
|
||||
Aggregates,
|
||||
RequestTime,
|
||||
},
|
||||
},
|
||||
anyhow::Result,
|
||||
|
@ -39,6 +43,8 @@ pub struct LatestVaasQueryParams {
|
|||
}
|
||||
|
||||
|
||||
/// **Deprecated: use /v2/updates/price/latest instead**
|
||||
///
|
||||
/// Get VAAs for a set of price feed ids.
|
||||
///
|
||||
/// Given a collection of price feed ids, retrieve the latest VAA for each. The returned VAA(s) can
|
||||
|
@ -54,19 +60,20 @@ pub struct LatestVaasQueryParams {
|
|||
(status = 200, description = "VAAs retrieved successfully", body = Vec<String>, example=json!([doc_examples::vaa_example()]))
|
||||
),
|
||||
)]
|
||||
pub async fn latest_vaas(
|
||||
State(state): State<crate::api::ApiState>,
|
||||
#[deprecated]
|
||||
pub async fn latest_vaas<S>(
|
||||
State(state): State<ApiState<S>>,
|
||||
QsQuery(params): QsQuery<LatestVaasQueryParams>,
|
||||
) -> Result<Json<Vec<String>>, RestError> {
|
||||
) -> Result<Json<Vec<String>>, RestError>
|
||||
where
|
||||
S: Aggregates,
|
||||
{
|
||||
let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(|id| id.into()).collect();
|
||||
|
||||
verify_price_ids_exist(&state, &price_ids).await?;
|
||||
|
||||
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
|
||||
&*state.state,
|
||||
&price_ids,
|
||||
RequestTime::Latest,
|
||||
)
|
||||
let state = &*state.state;
|
||||
let price_feeds_with_update_data =
|
||||
Aggregates::get_price_feeds_with_update_data(state, &price_ids, RequestTime::Latest)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
tracing::warn!(
|
||||
|
|
|
@ -1,7 +1,11 @@
|
|||
use {
|
||||
crate::api::{
|
||||
crate::{
|
||||
api::{
|
||||
rest::RestError,
|
||||
types::RpcPriceIdentifier,
|
||||
ApiState,
|
||||
},
|
||||
state::aggregate::Aggregates,
|
||||
},
|
||||
anyhow::Result,
|
||||
axum::{
|
||||
|
@ -10,6 +14,8 @@ use {
|
|||
},
|
||||
};
|
||||
|
||||
/// **Deprecated: use /v2/price_feeds instead**
|
||||
///
|
||||
/// Get the set of price feed IDs.
|
||||
///
|
||||
/// This endpoint fetches all of the price feed IDs for which price updates can be retrieved.
|
||||
|
@ -21,10 +27,15 @@ use {
|
|||
(status = 200, description = "Price feed ids retrieved successfully", body = Vec<RpcPriceIdentifier>)
|
||||
),
|
||||
)]
|
||||
pub async fn price_feed_ids(
|
||||
State(state): State<crate::api::ApiState>,
|
||||
) -> Result<Json<Vec<RpcPriceIdentifier>>, RestError> {
|
||||
let price_feed_ids = crate::aggregate::get_price_feed_ids(&*state.state)
|
||||
#[deprecated]
|
||||
pub async fn price_feed_ids<S>(
|
||||
State(state): State<ApiState<S>>,
|
||||
) -> Result<Json<Vec<RpcPriceIdentifier>>, RestError>
|
||||
where
|
||||
S: Aggregates,
|
||||
{
|
||||
let state = &*state.state;
|
||||
let price_feed_ids = Aggregates::get_price_feed_ids(state)
|
||||
.await
|
||||
.into_iter()
|
||||
.map(RpcPriceIdentifier::from)
|
||||
|
|
|
@ -1,14 +1,24 @@
|
|||
use axum::{
|
||||
use {
|
||||
crate::{
|
||||
api::ApiState,
|
||||
state::aggregate::Aggregates,
|
||||
},
|
||||
axum::{
|
||||
extract::State,
|
||||
http::StatusCode,
|
||||
response::{
|
||||
IntoResponse,
|
||||
Response,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
pub async fn ready(State(state): State<crate::api::ApiState>) -> Response {
|
||||
match crate::aggregate::is_ready(&state.state).await {
|
||||
pub async fn ready<S>(State(state): State<ApiState<S>>) -> Response
|
||||
where
|
||||
S: Aggregates,
|
||||
{
|
||||
let state = &*state.state;
|
||||
match Aggregates::is_ready(state).await {
|
||||
true => (StatusCode::OK, "OK").into_response(),
|
||||
false => (StatusCode::SERVICE_UNAVAILABLE, "Service Unavailable").into_response(),
|
||||
}
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
use {
|
||||
crate::{
|
||||
aggregate::RequestTime,
|
||||
api::{
|
||||
rest::{
|
||||
verify_price_ids_exist,
|
||||
|
@ -13,6 +12,11 @@ use {
|
|||
PriceIdInput,
|
||||
PriceUpdate,
|
||||
},
|
||||
ApiState,
|
||||
},
|
||||
state::aggregate::{
|
||||
Aggregates,
|
||||
RequestTime,
|
||||
},
|
||||
},
|
||||
anyhow::Result,
|
||||
|
@ -73,19 +77,19 @@ fn default_true() -> bool {
|
|||
LatestPriceUpdatesQueryParams
|
||||
)
|
||||
)]
|
||||
pub async fn latest_price_updates(
|
||||
State(state): State<crate::api::ApiState>,
|
||||
pub async fn latest_price_updates<S>(
|
||||
State(state): State<ApiState<S>>,
|
||||
QsQuery(params): QsQuery<LatestPriceUpdatesQueryParams>,
|
||||
) -> Result<Json<PriceUpdate>, RestError> {
|
||||
) -> Result<Json<PriceUpdate>, RestError>
|
||||
where
|
||||
S: Aggregates,
|
||||
{
|
||||
let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(|id| id.into()).collect();
|
||||
|
||||
verify_price_ids_exist(&state, &price_ids).await?;
|
||||
|
||||
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
|
||||
&*state.state,
|
||||
&price_ids,
|
||||
RequestTime::Latest,
|
||||
)
|
||||
let state = &*state.state;
|
||||
let price_feeds_with_update_data =
|
||||
Aggregates::get_price_feeds_with_update_data(state, &price_ids, RequestTime::Latest)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
tracing::warn!(
|
||||
|
|
|
@ -1,9 +1,5 @@
|
|||
use {
|
||||
crate::{
|
||||
aggregate::{
|
||||
AggregationEvent,
|
||||
RequestTime,
|
||||
},
|
||||
api::{
|
||||
rest::{
|
||||
verify_price_ids_exist,
|
||||
|
@ -19,6 +15,11 @@ use {
|
|||
},
|
||||
ApiState,
|
||||
},
|
||||
state::aggregate::{
|
||||
Aggregates,
|
||||
AggregationEvent,
|
||||
RequestTime,
|
||||
},
|
||||
},
|
||||
anyhow::Result,
|
||||
axum::{
|
||||
|
@ -88,16 +89,22 @@ fn default_true() -> bool {
|
|||
params(StreamPriceUpdatesQueryParams)
|
||||
)]
|
||||
/// SSE route handler for streaming price updates.
|
||||
pub async fn price_stream_sse_handler(
|
||||
State(state): State<ApiState>,
|
||||
pub async fn price_stream_sse_handler<S>(
|
||||
State(state): State<ApiState<S>>,
|
||||
QsQuery(params): QsQuery<StreamPriceUpdatesQueryParams>,
|
||||
) -> Result<Sse<impl Stream<Item = Result<Event, Infallible>>>, RestError> {
|
||||
) -> Result<Sse<impl Stream<Item = Result<Event, Infallible>>>, RestError>
|
||||
where
|
||||
S: Aggregates,
|
||||
S: Sync,
|
||||
S: Send,
|
||||
S: 'static,
|
||||
{
|
||||
let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(Into::into).collect();
|
||||
|
||||
verify_price_ids_exist(&state, &price_ids).await?;
|
||||
|
||||
// Clone the update_tx receiver to listen for new price updates
|
||||
let update_rx: broadcast::Receiver<AggregationEvent> = state.update_tx.subscribe();
|
||||
let update_rx: broadcast::Receiver<AggregationEvent> = Aggregates::subscribe(&*state.state);
|
||||
|
||||
// Convert the broadcast receiver into a Stream
|
||||
let stream = BroadcastStream::new(update_rx);
|
||||
|
@ -134,15 +141,18 @@ pub async fn price_stream_sse_handler(
|
|||
Ok(Sse::new(sse_stream).keep_alive(KeepAlive::default()))
|
||||
}
|
||||
|
||||
async fn handle_aggregation_event(
|
||||
async fn handle_aggregation_event<S>(
|
||||
event: AggregationEvent,
|
||||
state: ApiState,
|
||||
state: ApiState<S>,
|
||||
mut price_ids: Vec<PriceIdentifier>,
|
||||
encoding: EncodingType,
|
||||
parsed: bool,
|
||||
benchmarks_only: bool,
|
||||
allow_unordered: bool,
|
||||
) -> Result<Option<PriceUpdate>> {
|
||||
) -> Result<Option<PriceUpdate>>
|
||||
where
|
||||
S: Aggregates,
|
||||
{
|
||||
// Handle out-of-order events
|
||||
if let AggregationEvent::OutOfOrder { .. } = event {
|
||||
if !allow_unordered {
|
||||
|
@ -151,11 +161,11 @@ async fn handle_aggregation_event(
|
|||
}
|
||||
|
||||
// We check for available price feed ids to ensure that the price feed ids provided exists since price feeds can be removed.
|
||||
let available_price_feed_ids = crate::aggregate::get_price_feed_ids(&*state.state).await;
|
||||
let available_price_feed_ids = Aggregates::get_price_feed_ids(&*state.state).await;
|
||||
|
||||
price_ids.retain(|price_feed_id| available_price_feed_ids.contains(price_feed_id));
|
||||
|
||||
let mut price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
|
||||
let mut price_feeds_with_update_data = Aggregates::get_price_feeds_with_update_data(
|
||||
&*state.state,
|
||||
&price_ids,
|
||||
RequestTime::AtSlot(event.slot()),
|
||||
|
@ -185,7 +195,7 @@ async fn handle_aggregation_event(
|
|||
.iter()
|
||||
.any(|price_feed| price_feed.id == RpcPriceIdentifier::from(*price_id))
|
||||
});
|
||||
price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
|
||||
price_feeds_with_update_data = Aggregates::get_price_feeds_with_update_data(
|
||||
&*state.state,
|
||||
&price_ids,
|
||||
RequestTime::AtSlot(event.slot()),
|
||||
|
|
|
@ -1,9 +1,5 @@
|
|||
use {
|
||||
crate::{
|
||||
aggregate::{
|
||||
RequestTime,
|
||||
UnixTimestamp,
|
||||
},
|
||||
api::{
|
||||
doc_examples,
|
||||
rest::{
|
||||
|
@ -17,6 +13,12 @@ use {
|
|||
PriceIdInput,
|
||||
PriceUpdate,
|
||||
},
|
||||
ApiState,
|
||||
},
|
||||
state::aggregate::{
|
||||
Aggregates,
|
||||
RequestTime,
|
||||
UnixTimestamp,
|
||||
},
|
||||
},
|
||||
anyhow::Result,
|
||||
|
@ -87,18 +89,22 @@ fn default_true() -> bool {
|
|||
TimestampPriceUpdatesQueryParams
|
||||
)
|
||||
)]
|
||||
pub async fn timestamp_price_updates(
|
||||
State(state): State<crate::api::ApiState>,
|
||||
pub async fn timestamp_price_updates<S>(
|
||||
State(state): State<ApiState<S>>,
|
||||
Path(path_params): Path<TimestampPriceUpdatesPathParams>,
|
||||
QsQuery(query_params): QsQuery<TimestampPriceUpdatesQueryParams>,
|
||||
) -> Result<Json<PriceUpdate>, RestError> {
|
||||
) -> Result<Json<PriceUpdate>, RestError>
|
||||
where
|
||||
S: Aggregates,
|
||||
{
|
||||
let price_ids: Vec<PriceIdentifier> =
|
||||
query_params.ids.into_iter().map(|id| id.into()).collect();
|
||||
|
||||
verify_price_ids_exist(&state, &price_ids).await?;
|
||||
|
||||
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
|
||||
&*state.state,
|
||||
let state = &*state.state;
|
||||
let price_feeds_with_update_data = Aggregates::get_price_feeds_with_update_data(
|
||||
state,
|
||||
&price_ids,
|
||||
RequestTime::FirstAfter(path_params.publish_time),
|
||||
)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use {
|
||||
super::doc_examples,
|
||||
crate::aggregate::{
|
||||
crate::state::aggregate::{
|
||||
PriceFeedUpdate,
|
||||
PriceFeedsWithUpdateData,
|
||||
Slot,
|
||||
|
|
|
@ -1,14 +1,18 @@
|
|||
use {
|
||||
super::types::{
|
||||
super::{
|
||||
types::{
|
||||
PriceIdInput,
|
||||
RpcPriceFeed,
|
||||
},
|
||||
crate::{
|
||||
ApiState,
|
||||
},
|
||||
crate::state::{
|
||||
aggregate::{
|
||||
Aggregates,
|
||||
AggregationEvent,
|
||||
RequestTime,
|
||||
},
|
||||
state::State,
|
||||
State,
|
||||
},
|
||||
anyhow::{
|
||||
anyhow,
|
||||
|
@ -212,11 +216,10 @@ pub async fn ws_route_handler(
|
|||
}
|
||||
|
||||
#[tracing::instrument(skip(stream, state, subscriber_ip))]
|
||||
async fn websocket_handler(
|
||||
stream: WebSocket,
|
||||
state: super::ApiState,
|
||||
subscriber_ip: Option<IpAddr>,
|
||||
) {
|
||||
async fn websocket_handler<S>(stream: WebSocket, state: ApiState<S>, subscriber_ip: Option<IpAddr>)
|
||||
where
|
||||
S: Aggregates,
|
||||
{
|
||||
let ws_state = state.ws.clone();
|
||||
|
||||
// Retain the recent rate limit data for the IP addresses to
|
||||
|
@ -235,7 +238,7 @@ async fn websocket_handler(
|
|||
})
|
||||
.inc();
|
||||
|
||||
let notify_receiver = state.update_tx.subscribe();
|
||||
let notify_receiver = Aggregates::subscribe(&*state.state);
|
||||
let (sender, receiver) = stream.split();
|
||||
let mut subscriber = Subscriber::new(
|
||||
id,
|
||||
|
@ -254,11 +257,11 @@ pub type SubscriberId = usize;
|
|||
|
||||
/// Subscriber is an actor that handles a single websocket connection.
|
||||
/// It listens to the store for updates and sends them to the client.
|
||||
pub struct Subscriber {
|
||||
pub struct Subscriber<S> {
|
||||
id: SubscriberId,
|
||||
ip_addr: Option<IpAddr>,
|
||||
closed: bool,
|
||||
store: Arc<State>,
|
||||
state: Arc<S>,
|
||||
ws_state: Arc<WsState>,
|
||||
notify_receiver: Receiver<AggregationEvent>,
|
||||
receiver: SplitStream<WebSocket>,
|
||||
|
@ -269,11 +272,14 @@ pub struct Subscriber {
|
|||
responded_to_ping: bool,
|
||||
}
|
||||
|
||||
impl Subscriber {
|
||||
impl<S> Subscriber<S>
|
||||
where
|
||||
S: Aggregates,
|
||||
{
|
||||
pub fn new(
|
||||
id: SubscriberId,
|
||||
ip_addr: Option<IpAddr>,
|
||||
store: Arc<State>,
|
||||
state: Arc<S>,
|
||||
ws_state: Arc<WsState>,
|
||||
notify_receiver: Receiver<AggregationEvent>,
|
||||
receiver: SplitStream<WebSocket>,
|
||||
|
@ -283,7 +289,7 @@ impl Subscriber {
|
|||
id,
|
||||
ip_addr,
|
||||
closed: false,
|
||||
store,
|
||||
state,
|
||||
ws_state,
|
||||
notify_receiver,
|
||||
receiver,
|
||||
|
@ -350,8 +356,9 @@ impl Subscriber {
|
|||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let updates = match crate::aggregate::get_price_feeds_with_update_data(
|
||||
&*self.store,
|
||||
let state = &*self.state;
|
||||
let updates = match Aggregates::get_price_feeds_with_update_data(
|
||||
state,
|
||||
&price_feed_ids,
|
||||
RequestTime::AtSlot(event.slot()),
|
||||
)
|
||||
|
@ -364,8 +371,7 @@ impl Subscriber {
|
|||
// subscription. In this case we just remove the non-existing
|
||||
// price feed from the list and will keep sending updates for
|
||||
// the rest.
|
||||
let available_price_feed_ids =
|
||||
crate::aggregate::get_price_feed_ids(&*self.store).await;
|
||||
let available_price_feed_ids = Aggregates::get_price_feed_ids(state).await;
|
||||
|
||||
self.price_feeds_with_config
|
||||
.retain(|price_feed_id, _| available_price_feed_ids.contains(price_feed_id));
|
||||
|
@ -376,8 +382,8 @@ impl Subscriber {
|
|||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
crate::aggregate::get_price_feeds_with_update_data(
|
||||
&*self.store,
|
||||
Aggregates::get_price_feeds_with_update_data(
|
||||
state,
|
||||
&price_feed_ids,
|
||||
RequestTime::AtSlot(event.slot()),
|
||||
)
|
||||
|
@ -545,7 +551,7 @@ impl Subscriber {
|
|||
allow_out_of_order,
|
||||
}) => {
|
||||
let price_ids: Vec<PriceIdentifier> = ids.into_iter().map(|id| id.into()).collect();
|
||||
let available_price_ids = crate::aggregate::get_price_feed_ids(&*self.store).await;
|
||||
let available_price_ids = Aggregates::get_price_feed_ids(&*self.state).await;
|
||||
|
||||
let not_found_price_ids: Vec<&PriceIdentifier> = price_ids
|
||||
.iter()
|
||||
|
|
|
@ -19,9 +19,9 @@ pub struct Options {
|
|||
#[arg(env = "PYTHNET_HTTP_ADDR")]
|
||||
pub http_addr: String,
|
||||
|
||||
/// Pyth mapping account address.
|
||||
#[arg(long = "mapping-address")]
|
||||
/// Pyth mapping account address on Pythnet.
|
||||
#[arg(long = "pythnet-mapping-addr")]
|
||||
#[arg(default_value = DEFAULT_PYTHNET_MAPPING_ADDR)]
|
||||
#[arg(env = "MAPPING_ADDRESS")]
|
||||
#[arg(env = "PYTHNET_MAPPING_ADDR")]
|
||||
pub mapping_addr: Pubkey,
|
||||
}
|
||||
|
|
|
@ -17,7 +17,6 @@ use {
|
|||
},
|
||||
};
|
||||
|
||||
mod aggregate;
|
||||
mod api;
|
||||
mod config;
|
||||
mod metrics_server;
|
||||
|
@ -54,7 +53,7 @@ async fn init() -> Result<()> {
|
|||
let (update_tx, _) = tokio::sync::broadcast::channel(1000);
|
||||
|
||||
// Initialize a cache store with a 1000 element circular buffer.
|
||||
let store = State::new(update_tx.clone(), 1000, opts.benchmarks.endpoint.clone());
|
||||
let state = State::new(update_tx.clone(), 1000, opts.benchmarks.endpoint.clone());
|
||||
|
||||
// Listen for Ctrl+C so we can set the exit flag and wait for a graceful shutdown.
|
||||
spawn(async move {
|
||||
|
@ -67,10 +66,10 @@ async fn init() -> Result<()> {
|
|||
// Spawn all worker tasks, and wait for all to complete (which will happen if a shutdown
|
||||
// signal has been observed).
|
||||
let tasks = join_all(vec![
|
||||
spawn(network::wormhole::spawn(opts.clone(), store.clone())),
|
||||
spawn(network::pythnet::spawn(opts.clone(), store.clone())),
|
||||
spawn(metrics_server::run(opts.clone(), store.clone())),
|
||||
spawn(api::spawn(opts.clone(), store.clone(), update_tx)),
|
||||
spawn(network::wormhole::spawn(opts.clone(), state.clone())),
|
||||
spawn(network::pythnet::spawn(opts.clone(), state.clone())),
|
||||
spawn(metrics_server::run(opts.clone(), state.clone())),
|
||||
spawn(api::spawn(opts.clone(), state.clone())),
|
||||
])
|
||||
.await;
|
||||
|
||||
|
|
|
@ -4,10 +4,6 @@
|
|||
|
||||
use {
|
||||
crate::{
|
||||
aggregate::{
|
||||
AccumulatorMessages,
|
||||
Update,
|
||||
},
|
||||
api::types::PriceFeedMetadata,
|
||||
config::RunOptions,
|
||||
network::wormhole::{
|
||||
|
@ -20,7 +16,14 @@ use {
|
|||
PriceFeedMeta,
|
||||
DEFAULT_PRICE_FEEDS_CACHE_UPDATE_INTERVAL,
|
||||
},
|
||||
state::State,
|
||||
state::{
|
||||
aggregate::{
|
||||
AccumulatorMessages,
|
||||
Aggregates,
|
||||
Update,
|
||||
},
|
||||
State,
|
||||
},
|
||||
},
|
||||
anyhow::{
|
||||
anyhow,
|
||||
|
@ -136,7 +139,7 @@ async fn fetch_bridge_data(
|
|||
}
|
||||
}
|
||||
|
||||
pub async fn run(store: Arc<State>, pythnet_ws_endpoint: String) -> Result<()> {
|
||||
pub async fn run(store: Arc<State>, pythnet_ws_endpoint: String) -> Result<!> {
|
||||
let client = PubsubClient::new(pythnet_ws_endpoint.as_ref()).await?;
|
||||
|
||||
let config = RpcProgramAccountsConfig {
|
||||
|
@ -157,9 +160,7 @@ pub async fn run(store: Arc<State>, pythnet_ws_endpoint: String) -> Result<()> {
|
|||
.program_subscribe(&system_program::id(), Some(config))
|
||||
.await?;
|
||||
|
||||
loop {
|
||||
match notif.next().await {
|
||||
Some(update) => {
|
||||
while let Some(update) = notif.next().await {
|
||||
let account: Account = match update.value.account.decode() {
|
||||
Some(account) => account,
|
||||
None => {
|
||||
|
@ -182,8 +183,8 @@ pub async fn run(store: Arc<State>, pythnet_ws_endpoint: String) -> Result<()> {
|
|||
if candidate.to_string() == update.value.pubkey {
|
||||
let store = store.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Err(err) = crate::aggregate::store_update(
|
||||
&store,
|
||||
if let Err(err) = Aggregates::store_update(
|
||||
&*store,
|
||||
Update::AccumulatorMessages(accumulator_messages),
|
||||
)
|
||||
.await
|
||||
|
@ -205,11 +206,8 @@ pub async fn run(store: Arc<State>, pythnet_ws_endpoint: String) -> Result<()> {
|
|||
}
|
||||
};
|
||||
}
|
||||
None => {
|
||||
return Err(anyhow!("Pythnet network listener terminated"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Err(anyhow!("Pythnet network listener connection terminated"))
|
||||
}
|
||||
|
||||
/// Fetch existing GuardianSet accounts from Wormhole.
|
||||
|
|
|
@ -7,7 +7,13 @@
|
|||
use {
|
||||
crate::{
|
||||
config::RunOptions,
|
||||
state::State,
|
||||
state::{
|
||||
aggregate::{
|
||||
Aggregates,
|
||||
Update,
|
||||
},
|
||||
State,
|
||||
},
|
||||
},
|
||||
anyhow::{
|
||||
anyhow,
|
||||
|
@ -43,7 +49,11 @@ use {
|
|||
Digest,
|
||||
Keccak256,
|
||||
},
|
||||
std::sync::Arc,
|
||||
std::{
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
},
|
||||
tokio::time::Instant,
|
||||
tonic::Request,
|
||||
wormhole_sdk::{
|
||||
vaa::{
|
||||
|
@ -100,10 +110,10 @@ pub struct BridgeConfig {
|
|||
/// GuardianSetData extracted from wormhole bridge account, due to no API.
|
||||
#[derive(borsh::BorshDeserialize)]
|
||||
pub struct GuardianSetData {
|
||||
pub index: u32,
|
||||
pub _index: u32,
|
||||
pub keys: Vec<[u8; 20]>,
|
||||
pub creation_time: u32,
|
||||
pub expiration_time: u32,
|
||||
pub _creation_time: u32,
|
||||
pub _expiration_time: u32,
|
||||
}
|
||||
|
||||
/// Update the guardian set with the given ID in the state.
|
||||
|
@ -152,10 +162,16 @@ mod proto {
|
|||
pub async fn spawn(opts: RunOptions, state: Arc<State>) -> Result<()> {
|
||||
let mut exit = crate::EXIT.subscribe();
|
||||
loop {
|
||||
let current_time = Instant::now();
|
||||
tokio::select! {
|
||||
_ = exit.changed() => break,
|
||||
Err(err) = run(opts.clone(), state.clone()) => {
|
||||
tracing::error!(error = ?err, "Wormhole gRPC service failed.");
|
||||
|
||||
if current_time.elapsed() < Duration::from_secs(30) {
|
||||
tracing::error!("Wormhole listener restarting too quickly. Sleep 1s.");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -164,7 +180,7 @@ pub async fn spawn(opts: RunOptions, state: Arc<State>) -> Result<()> {
|
|||
}
|
||||
|
||||
#[tracing::instrument(skip(opts, state))]
|
||||
async fn run(opts: RunOptions, state: Arc<State>) -> Result<()> {
|
||||
async fn run(opts: RunOptions, state: Arc<State>) -> Result<!> {
|
||||
let mut client = SpyRpcServiceClient::connect(opts.wormhole.spy_rpc_addr).await?;
|
||||
let mut stream = client
|
||||
.subscribe_signed_vaa(Request::new(SubscribeSignedVaaRequest {
|
||||
|
@ -184,7 +200,7 @@ async fn run(opts: RunOptions, state: Arc<State>) -> Result<()> {
|
|||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
Err(anyhow!("Wormhole gRPC stream terminated."))
|
||||
}
|
||||
|
||||
/// Process a message received via a Wormhole gRPC connection.
|
||||
|
@ -352,9 +368,7 @@ pub async fn store_vaa(state: Arc<State>, sequence: u64, vaa_bytes: Vec<u8>) {
|
|||
}
|
||||
|
||||
// Hand the VAA to the aggregate store.
|
||||
if let Err(e) =
|
||||
crate::aggregate::store_update(&state, crate::aggregate::Update::Vaa(vaa_bytes)).await
|
||||
{
|
||||
if let Err(e) = Aggregates::store_update(&*state, Update::Vaa(vaa_bytes)).await {
|
||||
tracing::error!(error = ?e, "Failed to store VAA in aggregate store.");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ impl<'a> From<&'a State> for &'a PriceFeedMetaState {
|
|||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait PriceFeedMeta {
|
||||
async fn retrieve_price_feeds_metadata(&self) -> Result<Vec<PriceFeedMetadata>>;
|
||||
async fn store_price_feeds_metadata(
|
||||
|
@ -44,6 +45,7 @@ pub trait PriceFeedMeta {
|
|||
) -> Result<Vec<PriceFeedMetadata>>;
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<T> PriceFeedMeta for T
|
||||
where
|
||||
for<'a> &'a T: Into<&'a PriceFeedMetaState>,
|
||||
|
|
|
@ -2,14 +2,14 @@
|
|||
|
||||
use {
|
||||
self::{
|
||||
benchmarks::BenchmarksState,
|
||||
cache::CacheState,
|
||||
},
|
||||
crate::{
|
||||
aggregate::{
|
||||
AggregateState,
|
||||
AggregationEvent,
|
||||
},
|
||||
benchmarks::BenchmarksState,
|
||||
cache::CacheState,
|
||||
},
|
||||
crate::{
|
||||
network::wormhole::GuardianSet,
|
||||
price_feeds_metadata::PriceFeedMetaState,
|
||||
},
|
||||
|
@ -28,6 +28,7 @@ use {
|
|||
},
|
||||
};
|
||||
|
||||
pub mod aggregate;
|
||||
pub mod benchmarks;
|
||||
pub mod cache;
|
||||
|
||||
|
@ -41,6 +42,9 @@ pub struct State {
|
|||
/// State for the `PriceFeedMeta` service for looking up metadata related to Pyth price feeds.
|
||||
pub price_feed_meta: PriceFeedMetaState,
|
||||
|
||||
/// State for accessing/storing Pyth price aggregates.
|
||||
pub aggregates: AggregateState,
|
||||
|
||||
/// Sequence numbers of lately observed Vaas. Store uses this set
|
||||
/// to ignore the previously observed Vaas as a performance boost.
|
||||
pub observed_vaa_seqs: RwLock<BTreeSet<u64>>,
|
||||
|
@ -48,12 +52,6 @@ pub struct State {
|
|||
/// Wormhole guardian sets. It is used to verify Vaas before using them.
|
||||
pub guardian_set: RwLock<BTreeMap<u32, GuardianSet>>,
|
||||
|
||||
/// The sender to the channel between Store and Api to notify completed updates.
|
||||
pub api_update_tx: Sender<AggregationEvent>,
|
||||
|
||||
/// The aggregate module state.
|
||||
pub aggregate_state: RwLock<AggregateState>,
|
||||
|
||||
/// Metrics registry
|
||||
pub metrics_registry: RwLock<Registry>,
|
||||
}
|
||||
|
@ -69,10 +67,9 @@ impl State {
|
|||
cache: CacheState::new(cache_size),
|
||||
benchmarks: BenchmarksState::new(benchmarks_endpoint),
|
||||
price_feed_meta: PriceFeedMetaState::new(),
|
||||
aggregates: AggregateState::new(update_tx, &mut metrics_registry),
|
||||
observed_vaa_seqs: RwLock::new(Default::default()),
|
||||
guardian_set: RwLock::new(Default::default()),
|
||||
api_update_tx: update_tx,
|
||||
aggregate_state: RwLock::new(AggregateState::new(&mut metrics_registry)),
|
||||
metrics_registry: RwLock::new(metrics_registry),
|
||||
})
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ use {
|
|||
},
|
||||
crate::{
|
||||
network::wormhole::VaaBytes,
|
||||
price_feeds_metadata::PriceFeedMeta,
|
||||
state::{
|
||||
benchmarks::Benchmarks,
|
||||
cache::{
|
||||
|
@ -59,6 +60,13 @@ use {
|
|||
collections::HashSet,
|
||||
time::Duration,
|
||||
},
|
||||
tokio::sync::{
|
||||
broadcast::{
|
||||
Receiver,
|
||||
Sender,
|
||||
},
|
||||
RwLock,
|
||||
},
|
||||
wormhole_sdk::Vaa,
|
||||
};
|
||||
|
||||
|
@ -102,8 +110,7 @@ impl AggregationEvent {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct AggregateState {
|
||||
pub struct AggregateStateData {
|
||||
/// The latest completed slot. This is used to check whether a completed state is new or out of
|
||||
/// order.
|
||||
pub latest_completed_slot: Option<Slot>,
|
||||
|
@ -119,7 +126,7 @@ pub struct AggregateState {
|
|||
pub metrics: metrics::Metrics,
|
||||
}
|
||||
|
||||
impl AggregateState {
|
||||
impl AggregateStateData {
|
||||
pub fn new(metrics_registry: &mut Registry) -> Self {
|
||||
Self {
|
||||
latest_completed_slot: None,
|
||||
|
@ -130,6 +137,20 @@ impl AggregateState {
|
|||
}
|
||||
}
|
||||
|
||||
pub struct AggregateState {
|
||||
pub data: RwLock<AggregateStateData>,
|
||||
pub api_update_tx: Sender<AggregationEvent>,
|
||||
}
|
||||
|
||||
impl AggregateState {
|
||||
pub fn new(update_tx: Sender<AggregationEvent>, metrics_registry: &mut Registry) -> Self {
|
||||
Self {
|
||||
data: RwLock::new(AggregateStateData::new(metrics_registry)),
|
||||
api_update_tx: update_tx,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Accumulator messages coming from Pythnet validators.
|
||||
///
|
||||
/// The validators writes the accumulator messages using Borsh with
|
||||
|
@ -177,9 +198,48 @@ const READINESS_STALENESS_THRESHOLD: Duration = Duration::from_secs(30);
|
|||
/// 10 slots is almost 5 seconds.
|
||||
const READINESS_MAX_ALLOWED_SLOT_LAG: Slot = 10;
|
||||
|
||||
/// Stores the update data in the store
|
||||
#[tracing::instrument(skip(state, update))]
|
||||
pub async fn store_update(state: &State, update: Update) -> Result<()> {
|
||||
#[async_trait::async_trait]
|
||||
pub trait Aggregates
|
||||
where
|
||||
Self: Cache,
|
||||
Self: Benchmarks,
|
||||
Self: PriceFeedMeta,
|
||||
{
|
||||
fn subscribe(&self) -> Receiver<AggregationEvent>;
|
||||
async fn is_ready(&self) -> bool;
|
||||
async fn store_update(&self, update: Update) -> Result<()>;
|
||||
async fn get_price_feed_ids(&self) -> HashSet<PriceIdentifier>;
|
||||
async fn get_price_feeds_with_update_data(
|
||||
&self,
|
||||
price_ids: &[PriceIdentifier],
|
||||
request_time: RequestTime,
|
||||
) -> Result<PriceFeedsWithUpdateData>;
|
||||
}
|
||||
|
||||
/// Allow downcasting State into CacheState for functions that depend on the `Cache` service.
|
||||
impl<'a> From<&'a State> for &'a AggregateState {
|
||||
fn from(state: &'a State) -> &'a AggregateState {
|
||||
&state.aggregates
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<T> Aggregates for T
|
||||
where
|
||||
for<'a> &'a T: Into<&'a AggregateState>,
|
||||
T: Sync,
|
||||
T: Send,
|
||||
T: Cache,
|
||||
T: Benchmarks,
|
||||
T: PriceFeedMeta,
|
||||
{
|
||||
fn subscribe(&self) -> Receiver<AggregationEvent> {
|
||||
self.into().api_update_tx.subscribe()
|
||||
}
|
||||
|
||||
/// Stores the update data in the store
|
||||
#[tracing::instrument(skip(self, update))]
|
||||
async fn store_update(&self, update: Update) -> Result<()> {
|
||||
// The slot that the update is originating from. It should be available
|
||||
// in all the updates.
|
||||
let slot = match update {
|
||||
|
@ -192,14 +252,14 @@ pub async fn store_update(state: &State, update: Update) -> Result<()> {
|
|||
tracing::info!(slot = proof.slot, "Storing VAA Merkle Proof.");
|
||||
|
||||
store_wormhole_merkle_verified_message(
|
||||
state,
|
||||
self,
|
||||
proof.clone(),
|
||||
update_vaa.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
state
|
||||
.aggregate_state
|
||||
self.into()
|
||||
.data
|
||||
.write()
|
||||
.await
|
||||
.metrics
|
||||
|
@ -213,12 +273,11 @@ pub async fn store_update(state: &State, update: Update) -> Result<()> {
|
|||
let slot = accumulator_messages.slot;
|
||||
tracing::info!(slot = slot, "Storing Accumulator Messages.");
|
||||
|
||||
state
|
||||
.store_accumulator_messages(accumulator_messages)
|
||||
self.store_accumulator_messages(accumulator_messages)
|
||||
.await?;
|
||||
|
||||
state
|
||||
.aggregate_state
|
||||
self.into()
|
||||
.data
|
||||
.write()
|
||||
.await
|
||||
.metrics
|
||||
|
@ -229,15 +288,15 @@ pub async fn store_update(state: &State, update: Update) -> Result<()> {
|
|||
|
||||
// Update the aggregate state with the latest observed slot
|
||||
{
|
||||
let mut aggregate_state = state.aggregate_state.write().await;
|
||||
let mut aggregate_state = self.into().data.write().await;
|
||||
aggregate_state.latest_observed_slot = aggregate_state
|
||||
.latest_observed_slot
|
||||
.map(|latest| latest.max(slot))
|
||||
.or(Some(slot));
|
||||
}
|
||||
|
||||
let accumulator_messages = state.fetch_accumulator_messages(slot).await?;
|
||||
let wormhole_merkle_state = state.fetch_wormhole_merkle_state(slot).await?;
|
||||
let accumulator_messages = self.fetch_accumulator_messages(slot).await?;
|
||||
let wormhole_merkle_state = self.fetch_wormhole_merkle_state(slot).await?;
|
||||
|
||||
let (accumulator_messages, wormhole_merkle_state) =
|
||||
match (accumulator_messages, wormhole_merkle_state) {
|
||||
|
@ -259,24 +318,29 @@ pub async fn store_update(state: &State, update: Update) -> Result<()> {
|
|||
.collect::<HashSet<_>>();
|
||||
|
||||
tracing::info!(len = message_states.len(), "Storing Message States.");
|
||||
state.store_message_states(message_states).await?;
|
||||
self.store_message_states(message_states).await?;
|
||||
|
||||
// Update the aggregate state
|
||||
let mut aggregate_state = state.aggregate_state.write().await;
|
||||
let mut aggregate_state = self.into().data.write().await;
|
||||
|
||||
// Send update event to subscribers. We are purposefully ignoring the result
|
||||
// because there might be no subscribers.
|
||||
let _ = match aggregate_state.latest_completed_slot {
|
||||
None => {
|
||||
aggregate_state.latest_completed_slot.replace(slot);
|
||||
state.api_update_tx.send(AggregationEvent::New { slot })
|
||||
self.into()
|
||||
.api_update_tx
|
||||
.send(AggregationEvent::New { slot })
|
||||
}
|
||||
Some(latest) if slot > latest => {
|
||||
state.prune_removed_keys(message_state_keys).await;
|
||||
self.prune_removed_keys(message_state_keys).await;
|
||||
aggregate_state.latest_completed_slot.replace(slot);
|
||||
state.api_update_tx.send(AggregationEvent::New { slot })
|
||||
self.into()
|
||||
.api_update_tx
|
||||
.send(AggregationEvent::New { slot })
|
||||
}
|
||||
_ => state
|
||||
_ => self
|
||||
.into()
|
||||
.api_update_tx
|
||||
.send(AggregationEvent::OutOfOrder { slot }),
|
||||
};
|
||||
|
@ -295,6 +359,59 @@ pub async fn store_update(state: &State, update: Update) -> Result<()> {
|
|||
.observe(slot, metrics::Event::CompletedUpdate);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_price_feeds_with_update_data(
|
||||
&self,
|
||||
price_ids: &[PriceIdentifier],
|
||||
request_time: RequestTime,
|
||||
) -> Result<PriceFeedsWithUpdateData> {
|
||||
match get_verified_price_feeds(self, price_ids, request_time.clone()).await {
|
||||
Ok(price_feeds_with_update_data) => Ok(price_feeds_with_update_data),
|
||||
Err(e) => {
|
||||
if let RequestTime::FirstAfter(publish_time) = request_time {
|
||||
return Benchmarks::get_verified_price_feeds(self, price_ids, publish_time)
|
||||
.await;
|
||||
}
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_price_feed_ids(&self) -> HashSet<PriceIdentifier> {
|
||||
Cache::message_state_keys(self)
|
||||
.await
|
||||
.iter()
|
||||
.map(|key| PriceIdentifier::new(key.feed_id))
|
||||
.collect()
|
||||
}
|
||||
|
||||
async fn is_ready(&self) -> bool {
|
||||
let metadata = self.into().data.read().await;
|
||||
let price_feeds_metadata = PriceFeedMeta::retrieve_price_feeds_metadata(self)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let has_completed_recently = match metadata.latest_completed_update_at.as_ref() {
|
||||
Some(latest_completed_update_time) => {
|
||||
latest_completed_update_time.elapsed() < READINESS_STALENESS_THRESHOLD
|
||||
}
|
||||
None => false,
|
||||
};
|
||||
|
||||
let is_not_behind = match (
|
||||
metadata.latest_completed_slot,
|
||||
metadata.latest_observed_slot,
|
||||
) {
|
||||
(Some(latest_completed_slot), Some(latest_observed_slot)) => {
|
||||
latest_observed_slot - latest_completed_slot <= READINESS_MAX_ALLOWED_SLOT_LAG
|
||||
}
|
||||
_ => false,
|
||||
};
|
||||
|
||||
let is_metadata_loaded = !price_feeds_metadata.is_empty();
|
||||
has_completed_recently && is_not_behind && is_metadata_loaded
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(accumulator_messages, wormhole_merkle_state))]
|
||||
|
@ -389,73 +506,12 @@ where
|
|||
})
|
||||
}
|
||||
|
||||
pub async fn get_price_feeds_with_update_data<S>(
|
||||
state: &S,
|
||||
price_ids: &[PriceIdentifier],
|
||||
request_time: RequestTime,
|
||||
) -> Result<PriceFeedsWithUpdateData>
|
||||
where
|
||||
S: Cache,
|
||||
S: Benchmarks,
|
||||
{
|
||||
match get_verified_price_feeds(state, price_ids, request_time.clone()).await {
|
||||
Ok(price_feeds_with_update_data) => Ok(price_feeds_with_update_data),
|
||||
Err(e) => {
|
||||
if let RequestTime::FirstAfter(publish_time) = request_time {
|
||||
return state
|
||||
.get_verified_price_feeds(price_ids, publish_time)
|
||||
.await;
|
||||
}
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_price_feed_ids<S>(state: &S) -> HashSet<PriceIdentifier>
|
||||
where
|
||||
S: Cache,
|
||||
{
|
||||
state
|
||||
.message_state_keys()
|
||||
.await
|
||||
.iter()
|
||||
.map(|key| PriceIdentifier::new(key.feed_id))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub async fn is_ready(state: &State) -> bool {
|
||||
let metadata = state.aggregate_state.read().await;
|
||||
let price_feeds_metadata = state.price_feed_meta.data.read().await;
|
||||
|
||||
let has_completed_recently = match metadata.latest_completed_update_at.as_ref() {
|
||||
Some(latest_completed_update_time) => {
|
||||
latest_completed_update_time.elapsed() < READINESS_STALENESS_THRESHOLD
|
||||
}
|
||||
None => false,
|
||||
};
|
||||
|
||||
let is_not_behind = match (
|
||||
metadata.latest_completed_slot,
|
||||
metadata.latest_observed_slot,
|
||||
) {
|
||||
(Some(latest_completed_slot), Some(latest_observed_slot)) => {
|
||||
latest_observed_slot - latest_completed_slot <= READINESS_MAX_ALLOWED_SLOT_LAG
|
||||
}
|
||||
_ => false,
|
||||
};
|
||||
|
||||
let is_metadata_loaded = !price_feeds_metadata.is_empty();
|
||||
|
||||
has_completed_recently && is_not_behind && is_metadata_loaded
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use {
|
||||
super::*,
|
||||
crate::{
|
||||
api::types::PriceFeedMetadata,
|
||||
price_feeds_metadata::PriceFeedMeta,
|
||||
state::test::setup_state,
|
||||
},
|
||||
futures::future::join_all,
|
||||
|
@ -557,7 +613,7 @@ mod test {
|
|||
}
|
||||
|
||||
pub async fn store_multiple_concurrent_valid_updates(state: Arc<State>, updates: Vec<Update>) {
|
||||
let res = join_all(updates.into_iter().map(|u| store_update(&state, u))).await;
|
||||
let res = join_all(updates.into_iter().map(|u| (&state).store_update(u))).await;
|
||||
// Check that all store_update calls succeeded
|
||||
assert!(res.into_iter().all(|r| r.is_ok()));
|
||||
}
|
||||
|
@ -583,14 +639,14 @@ mod test {
|
|||
|
||||
// Check the price ids are stored correctly
|
||||
assert_eq!(
|
||||
get_price_feed_ids(&*state).await,
|
||||
(&*state).get_price_feed_ids().await,
|
||||
vec![PriceIdentifier::new([100; 32])].into_iter().collect()
|
||||
);
|
||||
|
||||
// Check get_price_feeds_with_update_data retrieves the correct
|
||||
// price feed with correct update data.
|
||||
let price_feeds_with_update_data = get_price_feeds_with_update_data(
|
||||
&*state,
|
||||
let price_feeds_with_update_data = (&*state)
|
||||
.get_price_feeds_with_update_data(
|
||||
&[PriceIdentifier::new([100; 32])],
|
||||
RequestTime::Latest,
|
||||
)
|
||||
|
@ -708,7 +764,7 @@ mod test {
|
|||
|
||||
// Check the price ids are stored correctly
|
||||
assert_eq!(
|
||||
get_price_feed_ids(&*state).await,
|
||||
(&*state).get_price_feed_ids().await,
|
||||
vec![
|
||||
PriceIdentifier::new([100; 32]),
|
||||
PriceIdentifier::new([200; 32])
|
||||
|
@ -718,8 +774,8 @@ mod test {
|
|||
);
|
||||
|
||||
// Check that price feed 2 exists
|
||||
assert!(get_price_feeds_with_update_data(
|
||||
&*state,
|
||||
assert!((&*state)
|
||||
.get_price_feeds_with_update_data(
|
||||
&[PriceIdentifier::new([200; 32])],
|
||||
RequestTime::Latest,
|
||||
)
|
||||
|
@ -745,12 +801,12 @@ mod test {
|
|||
|
||||
// Check that price feed 2 does not exist anymore
|
||||
assert_eq!(
|
||||
get_price_feed_ids(&*state).await,
|
||||
(&*state).get_price_feed_ids().await,
|
||||
vec![PriceIdentifier::new([100; 32]),].into_iter().collect()
|
||||
);
|
||||
|
||||
assert!(get_price_feeds_with_update_data(
|
||||
&*state,
|
||||
assert!((&*state)
|
||||
.get_price_feeds_with_update_data(
|
||||
&[PriceIdentifier::new([200; 32])],
|
||||
RequestTime::Latest,
|
||||
)
|
||||
|
@ -791,8 +847,8 @@ mod test {
|
|||
MockClock::advance(Duration::from_secs(1));
|
||||
|
||||
// Get the price feeds with update data
|
||||
let price_feeds_with_update_data = get_price_feeds_with_update_data(
|
||||
&*state,
|
||||
let price_feeds_with_update_data = (&*state)
|
||||
.get_price_feeds_with_update_data(
|
||||
&[PriceIdentifier::new([100; 32])],
|
||||
RequestTime::Latest,
|
||||
)
|
||||
|
@ -817,13 +873,13 @@ mod test {
|
|||
.unwrap();
|
||||
|
||||
// Check the state is ready
|
||||
assert!(is_ready(&state).await);
|
||||
assert!((&state).is_ready().await);
|
||||
|
||||
// Advance the clock to make the prices stale
|
||||
MockClock::advance_system_time(READINESS_STALENESS_THRESHOLD);
|
||||
MockClock::advance(READINESS_STALENESS_THRESHOLD);
|
||||
// Check the state is not ready
|
||||
assert!(!is_ready(&state).await);
|
||||
assert!(!(&state).is_ready().await);
|
||||
}
|
||||
|
||||
/// Test that the state retains the latest slots upon cache eviction.
|
||||
|
@ -866,8 +922,8 @@ mod test {
|
|||
|
||||
// Check the last 100 slots are retained
|
||||
for slot in 900..1000 {
|
||||
let price_feeds_with_update_data = get_price_feeds_with_update_data(
|
||||
&*state,
|
||||
let price_feeds_with_update_data = (&*state)
|
||||
.get_price_feeds_with_update_data(
|
||||
&[
|
||||
PriceIdentifier::new([100; 32]),
|
||||
PriceIdentifier::new([200; 32]),
|
||||
|
@ -883,8 +939,8 @@ mod test {
|
|||
|
||||
// Check nothing else is retained
|
||||
for slot in 0..900 {
|
||||
assert!(get_price_feeds_with_update_data(
|
||||
&*state,
|
||||
assert!((&*state)
|
||||
.get_price_feeds_with_update_data(
|
||||
&[
|
||||
PriceIdentifier::new([100; 32]),
|
||||
PriceIdentifier::new([200; 32])
|
|
@ -1,14 +1,14 @@
|
|||
//! This module communicates with Pyth Benchmarks, an API for historical price feeds and their updates.
|
||||
|
||||
use {
|
||||
super::State,
|
||||
crate::{
|
||||
super::{
|
||||
aggregate::{
|
||||
PriceFeedsWithUpdateData,
|
||||
UnixTimestamp,
|
||||
},
|
||||
api::types::PriceUpdate,
|
||||
State,
|
||||
},
|
||||
crate::api::types::PriceUpdate,
|
||||
anyhow::Result,
|
||||
base64::{
|
||||
engine::general_purpose::STANDARD as base64_standard_engine,
|
||||
|
@ -69,6 +69,7 @@ impl<'a> From<&'a State> for &'a BenchmarksState {
|
|||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait Benchmarks {
|
||||
async fn get_verified_price_feeds(
|
||||
&self,
|
||||
|
@ -77,6 +78,7 @@ pub trait Benchmarks {
|
|||
) -> Result<PriceFeedsWithUpdateData>;
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<T> Benchmarks for T
|
||||
where
|
||||
for<'a> &'a T: Into<&'a BenchmarksState>,
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use {
|
||||
super::State,
|
||||
crate::aggregate::{
|
||||
crate::state::aggregate::{
|
||||
wormhole_merkle::WormholeMerkleState,
|
||||
AccumulatorMessages,
|
||||
ProofSet,
|
||||
|
@ -132,16 +132,10 @@ impl<'a> From<&'a State> for &'a CacheState {
|
|||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait Cache {
|
||||
async fn message_state_keys(&self) -> Vec<MessageStateKey>;
|
||||
async fn store_message_states(&self, message_states: Vec<MessageState>) -> Result<()>;
|
||||
async fn prune_removed_keys(&self, current_keys: HashSet<MessageStateKey>);
|
||||
async fn fetch_message_states(
|
||||
&self,
|
||||
ids: Vec<FeedId>,
|
||||
request_time: RequestTime,
|
||||
filter: MessageStateFilter,
|
||||
) -> Result<Vec<MessageState>>;
|
||||
async fn store_accumulator_messages(
|
||||
&self,
|
||||
accumulator_messages: AccumulatorMessages,
|
||||
|
@ -152,8 +146,16 @@ pub trait Cache {
|
|||
wormhole_merkle_state: WormholeMerkleState,
|
||||
) -> Result<()>;
|
||||
async fn fetch_wormhole_merkle_state(&self, slot: Slot) -> Result<Option<WormholeMerkleState>>;
|
||||
async fn message_state_keys(&self) -> Vec<MessageStateKey>;
|
||||
async fn fetch_message_states(
|
||||
&self,
|
||||
ids: Vec<FeedId>,
|
||||
request_time: RequestTime,
|
||||
filter: MessageStateFilter,
|
||||
) -> Result<Vec<MessageState>>;
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<T> Cache for T
|
||||
where
|
||||
for<'a> &'a T: Into<&'a CacheState>,
|
||||
|
@ -322,9 +324,9 @@ async fn retrieve_message_state(
|
|||
mod test {
|
||||
use {
|
||||
super::*,
|
||||
crate::{
|
||||
crate::state::{
|
||||
aggregate::wormhole_merkle::WormholeMerkleMessageProof,
|
||||
state::test::setup_state,
|
||||
test::setup_state,
|
||||
},
|
||||
pyth_sdk::UnixTimestamp,
|
||||
pythnet_sdk::{
|
||||
|
|
|
@ -13,10 +13,10 @@ COPY --chown=1000:1000 price_service/client/js price_service/client/js
|
|||
COPY --chown=1000:1000 price_service/sdk/js price_service/sdk/js
|
||||
COPY --chown=1000:1000 target_chains/solana/sdk/js target_chains/solana/sdk/js
|
||||
|
||||
COPY --chown=1000:1000 price_pusher price_pusher
|
||||
COPY --chown=1000:1000 apps/price_pusher apps/price_pusher
|
||||
|
||||
RUN npx lerna run build --scope="@pythnetwork/price-pusher" --include-dependencies
|
||||
|
||||
WORKDIR /home/node/price_pusher
|
||||
WORKDIR /home/node/apps/price_pusher
|
||||
|
||||
ENTRYPOINT [ "npm", "run", "start" ]
|
|
@ -10,7 +10,7 @@ By default, Pyth does not automatically update the on-chain price every time the
|
|||
instead, anyone can permissionlessly update the on-chain price prior to using it.
|
||||
For more information please refer to [this document](https://docs.pyth.network/documentation/how-pyth-works).
|
||||
|
||||
Protocols integrating with can update the on-chain Pyth prices in two different ways.
|
||||
Protocols integrating with Pyth can update the on-chain Pyth prices in two different ways.
|
||||
The first approach is on-demand updates: package a Pyth price update together with each transaction that depends on it.
|
||||
On-demand updates minimize latency and are more gas efficient, as prices are only updated on-chain when they are needed.
|
||||
|
||||
|
@ -87,7 +87,7 @@ npm install
|
|||
npx lerna run build --scope @pythnetwork/price-pusher --include-dependencies
|
||||
|
||||
# Navigate to the price_pusher folder
|
||||
cd price_pusher
|
||||
cd apps/price_pusher
|
||||
|
||||
# For EVM
|
||||
npm run start -- evm --endpoint wss://example-rpc.com \
|
||||
|
@ -145,6 +145,33 @@ npm run start -- near \
|
|||
[--pushing-frequency 10] \
|
||||
[--polling-frequency 5]
|
||||
|
||||
# For Solana, using Jito (recommended)
|
||||
npm run start -- solana \
|
||||
--endpoint https://api.mainnet-beta.solana.com \
|
||||
--keypair-file ./id.json \
|
||||
--shard-id 1 \
|
||||
--jito-endpoint mainnet.block-engine.jito.wtf \
|
||||
--jito-keypair-file ./jito.json \
|
||||
--jito-tip-lamports 100000 \
|
||||
--jito-bundle-size 5 \
|
||||
--price-config-file ./price-config.yaml \
|
||||
--price-service-endpoint https://hermes.pyth.network/ \
|
||||
--pyth-contract-address pythWSnswVUd12oZpeFP8e9CVaEqJg25g1Vtc2biRsT \
|
||||
--pushing-frequency 30 \
|
||||
[--polling-frequency 5]
|
||||
|
||||
# For Solana, using Solana RPC
|
||||
npm run start -- solana \
|
||||
--endpoint https://api.devnet.solana.com \
|
||||
--keypair-file ./id.json \
|
||||
--shard-id 1 \
|
||||
--price-config-file ./price-config.yaml \
|
||||
--price-service-endpoint https://hermes.pyth.network/ \
|
||||
--pyth-contract-address pythWSnswVUd12oZpeFP8e9CVaEqJg25g1Vtc2biRsT \
|
||||
--pushing-frequency 30 \
|
||||
[--polling-frequency 5]
|
||||
|
||||
|
||||
|
||||
# Or, run the price pusher docker image instead of building from the source
|
||||
docker run public.ecr.aws/pyth-network/xc-price-pusher:v<version> -- <above-arguments>
|
|
@ -0,0 +1,13 @@
|
|||
{
|
||||
"endpoint": "https://api.mainnet-beta.solana.com",
|
||||
"keypair-file": "./id.json",
|
||||
"shard-id": 1,
|
||||
"jito-endpoint": "mainnet.block-engine.jito.wtf",
|
||||
"jito-keypair-file": "./jito.json",
|
||||
"jito-tip-lamports": "100000",
|
||||
"jito-bundle-size": "5",
|
||||
"price-config-file": "./price-config.yaml",
|
||||
"price-service-endpoint": "https://hermes.pyth.network/",
|
||||
"pyth-contract-address": "pythWSnswVUd12oZpeFP8e9CVaEqJg25g1Vtc2biRsT",
|
||||
"pushing-frequency": "30"
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
{
|
||||
"endpoint": "https://api.devnet.solana.com",
|
||||
"keypair-file": "./id.json",
|
||||
"shard-id": 1,
|
||||
"price-config-file": "./price-config.yaml",
|
||||
"price-service-endpoint": "https://hermes.pyth.network/",
|
||||
"pyth-contract-address": "pythWSnswVUd12oZpeFP8e9CVaEqJg25g1Vtc2biRsT",
|
||||
"pushing-frequency": "30"
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "@pythnetwork/price-pusher",
|
||||
"version": "6.6.0",
|
||||
"version": "6.7.2",
|
||||
"description": "Pyth Price Pusher",
|
||||
"homepage": "https://pyth.network",
|
||||
"main": "lib/index.js",
|
||||
|
@ -14,7 +14,7 @@
|
|||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/pyth-network/pyth-crosschain",
|
||||
"directory": "price_pusher"
|
||||
"directory": "apps/price_pusher"
|
||||
},
|
||||
"publishConfig": {
|
||||
"access": "public"
|
|
@ -52,6 +52,20 @@ export default {
|
|||
required: false,
|
||||
default: 5,
|
||||
} as Options,
|
||||
"gas-limit": {
|
||||
description: "Gas limit for the transaction",
|
||||
type: "number",
|
||||
required: false,
|
||||
} as Options,
|
||||
"update-fee-multiplier": {
|
||||
description:
|
||||
"Multiplier for the fee to update the price. It is useful in networks " +
|
||||
"such as Hedera where setting on-chain getUpdateFee as the transaction value " +
|
||||
"won't work. Default to 1",
|
||||
type: "number",
|
||||
required: false,
|
||||
default: 1,
|
||||
} as Options,
|
||||
...options.priceConfigFile,
|
||||
...options.priceServiceEndpoint,
|
||||
...options.mnemonicFile,
|
||||
|
@ -73,6 +87,8 @@ export default {
|
|||
txSpeed,
|
||||
overrideGasPriceMultiplier,
|
||||
overrideGasPriceMultiplierCap,
|
||||
gasLimit,
|
||||
updateFeeMultiplier,
|
||||
} = argv;
|
||||
|
||||
const priceConfigs = readPriceConfigFile(priceConfigFile);
|
||||
|
@ -119,6 +135,8 @@ export default {
|
|||
pythContractFactory,
|
||||
overrideGasPriceMultiplier,
|
||||
overrideGasPriceMultiplierCap,
|
||||
updateFeeMultiplier,
|
||||
gasLimit,
|
||||
gasStation
|
||||
);
|
||||
|
|
@ -130,6 +130,8 @@ export class EvmPricePusher implements IPricePusher {
|
|||
pythContractFactory: PythContractFactory,
|
||||
private overrideGasPriceMultiplier: number,
|
||||
private overrideGasPriceMultiplierCap: number,
|
||||
private updateFeeMultiplier: number,
|
||||
private gasLimit?: number,
|
||||
customGasStation?: CustomGasStation
|
||||
) {
|
||||
this.customGasStation = customGasStation;
|
||||
|
@ -168,6 +170,7 @@ export class EvmPricePusher implements IPricePusher {
|
|||
updateFee = await this.pythContract.methods
|
||||
.getUpdateFee(priceFeedUpdateData)
|
||||
.call();
|
||||
updateFee = Number(updateFee) * (this.updateFeeMultiplier || 1);
|
||||
console.log(`Update fee: ${updateFee}`);
|
||||
} catch (e: any) {
|
||||
console.error(
|
||||
|
@ -217,7 +220,12 @@ export class EvmPricePusher implements IPricePusher {
|
|||
priceIdsWith0x,
|
||||
pubTimesToPush
|
||||
)
|
||||
.send({ value: updateFee, gasPrice, nonce: txNonce })
|
||||
.send({
|
||||
value: updateFee,
|
||||
gasPrice,
|
||||
nonce: txNonce,
|
||||
gasLimit: this.gasLimit,
|
||||
})
|
||||
.on("transactionHash", (hash: string) => {
|
||||
console.log(`Successful. Tx hash: ${hash}`);
|
||||
})
|
|
@ -1,4 +1,4 @@
|
|||
// #!/usr/bin/env node
|
||||
#!/usr/bin/env node
|
||||
import yargs from "yargs";
|
||||
import { hideBin } from "yargs/helpers";
|
||||
import injective from "./injective/command";
|
||||
|
@ -9,6 +9,9 @@ import near from "./near/command";
|
|||
import solana from "./solana/command";
|
||||
|
||||
yargs(hideBin(process.argv))
|
||||
.parserConfiguration({
|
||||
"parse-numbers": false,
|
||||
})
|
||||
.config("config")
|
||||
.global("config")
|
||||
.command(evm)
|
|
@ -117,7 +117,7 @@ export default {
|
|||
);
|
||||
|
||||
const pythSolanaReceiver = new PythSolanaReceiver({
|
||||
connection: new Connection(endpoint),
|
||||
connection: new Connection(endpoint, "processed"),
|
||||
wallet,
|
||||
pushOracleProgramId: new PublicKey(pythContractAddress),
|
||||
});
|
|
@ -59,19 +59,13 @@ export class SolanaPricePusher implements IPricePusher {
|
|||
private pythSolanaReceiver: PythSolanaReceiver,
|
||||
private priceServiceConnection: PriceServiceConnection,
|
||||
private shardId: number,
|
||||
private computeUnitPriceMicroLamports: number,
|
||||
private alreadySending: boolean = false
|
||||
private computeUnitPriceMicroLamports: number
|
||||
) {}
|
||||
|
||||
async updatePriceFeed(
|
||||
priceIds: string[],
|
||||
pubTimesToPush: number[]
|
||||
): Promise<void> {
|
||||
if (this.alreadySending) {
|
||||
console.log(new Date(), "updatePriceFeed already in progress");
|
||||
return;
|
||||
}
|
||||
this.alreadySending = true;
|
||||
if (priceIds.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
@ -106,10 +100,8 @@ export class SolanaPricePusher implements IPricePusher {
|
|||
this.pythSolanaReceiver.wallet
|
||||
);
|
||||
console.log(new Date(), "updatePriceFeed successful");
|
||||
this.alreadySending = false;
|
||||
} catch (e: any) {
|
||||
console.error(new Date(), "updatePriceFeed failed", e);
|
||||
this.alreadySending = false;
|
||||
return;
|
||||
}
|
||||
}
|
|
@ -44,6 +44,13 @@ export default {
|
|||
required: true,
|
||||
default: 30,
|
||||
} as Options,
|
||||
"ignore-gas-objects": {
|
||||
description:
|
||||
"Gas objects to ignore when merging gas objects on startup -- use this for locked objects.",
|
||||
type: "array",
|
||||
required: false,
|
||||
default: [],
|
||||
} as Options,
|
||||
"gas-budget": {
|
||||
description: "Gas budget for each price update",
|
||||
type: "number",
|
||||
|
@ -73,6 +80,7 @@ export default {
|
|||
pythStateId,
|
||||
wormholeStateId,
|
||||
numGasObjects,
|
||||
ignoreGasObjects,
|
||||
gasBudget,
|
||||
accountIndex,
|
||||
} = argv;
|
||||
|
@ -126,7 +134,8 @@ export default {
|
|||
endpoint,
|
||||
keypair,
|
||||
gasBudget,
|
||||
numGasObjects
|
||||
numGasObjects,
|
||||
ignoreGasObjects
|
||||
);
|
||||
|
||||
const controller = new Controller(
|
|
@ -162,7 +162,8 @@ export class SuiPricePusher implements IPricePusher {
|
|||
endpoint: string,
|
||||
keypair: Ed25519Keypair,
|
||||
gasBudget: number,
|
||||
numGasObjects: number
|
||||
numGasObjects: number,
|
||||
ignoreGasObjects: string[]
|
||||
): Promise<SuiPricePusher> {
|
||||
if (numGasObjects > MAX_NUM_OBJECTS_IN_ARGUMENT) {
|
||||
throw new Error(
|
||||
|
@ -183,7 +184,8 @@ export class SuiPricePusher implements IPricePusher {
|
|||
const gasPool = await SuiPricePusher.initializeGasPool(
|
||||
keypair,
|
||||
provider,
|
||||
numGasObjects
|
||||
numGasObjects,
|
||||
ignoreGasObjects
|
||||
);
|
||||
|
||||
const pythClient = new SuiPythClient(
|
||||
|
@ -318,17 +320,26 @@ export class SuiPricePusher implements IPricePusher {
|
|||
|
||||
// This function will smash all coins owned by the signer into one, and then
|
||||
// split them equally into numGasObjects.
|
||||
// ignoreGasObjects is a list of gas objects that will be ignored during the
|
||||
// merging -- use this to store any locked objects on initialization.
|
||||
private static async initializeGasPool(
|
||||
signer: Ed25519Keypair,
|
||||
provider: SuiClient,
|
||||
numGasObjects: number
|
||||
numGasObjects: number,
|
||||
ignoreGasObjects: string[]
|
||||
): Promise<SuiObjectRef[]> {
|
||||
const signerAddress = await signer.toSuiAddress();
|
||||
|
||||
if (ignoreGasObjects.length > 0) {
|
||||
console.log("Ignoring some gas objects for coin merging:");
|
||||
console.log(ignoreGasObjects);
|
||||
}
|
||||
|
||||
const consolidatedCoin = await SuiPricePusher.mergeGasCoinsIntoOne(
|
||||
signer,
|
||||
provider,
|
||||
signerAddress
|
||||
signerAddress,
|
||||
ignoreGasObjects
|
||||
);
|
||||
const coinResult = await provider.getObject({
|
||||
id: consolidatedCoin.objectId,
|
||||
|
@ -458,7 +469,8 @@ export class SuiPricePusher implements IPricePusher {
|
|||
private static async mergeGasCoinsIntoOne(
|
||||
signer: Ed25519Keypair,
|
||||
provider: SuiClient,
|
||||
owner: SuiAddress
|
||||
owner: SuiAddress,
|
||||
initialLockedAddresses: string[]
|
||||
): Promise<SuiObjectRef> {
|
||||
const gasCoins = await SuiPricePusher.getAllGasCoins(provider, owner);
|
||||
// skip merging if there is only one coin
|
||||
|
@ -472,6 +484,7 @@ export class SuiPricePusher implements IPricePusher {
|
|||
);
|
||||
let finalCoin;
|
||||
const lockedAddresses: Set<string> = new Set();
|
||||
initialLockedAddresses.forEach((value) => lockedAddresses.add(value));
|
||||
for (let i = 0; i < gasCoinsChunks.length; i++) {
|
||||
const mergeTx = new TransactionBlock();
|
||||
let coins = gasCoinsChunks[i];
|
||||
|
@ -488,6 +501,10 @@ export class SuiPricePusher implements IPricePusher {
|
|||
options: { showEffects: true },
|
||||
});
|
||||
} catch (e) {
|
||||
console.log("Merge transaction failed with error:");
|
||||
console.log(e);
|
||||
console.log((e as any).data);
|
||||
console.log(JSON.stringify(e));
|
||||
if (
|
||||
String(e).includes(
|
||||
"quorum of validators because of locked objects. Retried a conflicting transaction"
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"extends": "../tsconfig.base.json",
|
||||
"extends": "../../tsconfig.base.json",
|
||||
"compilerOptions": {
|
||||
"target": "esnext",
|
||||
"module": "commonjs",
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"name": "contract_manager",
|
||||
"name": "@pythnetwork/contract-manager",
|
||||
"version": "1.0.0",
|
||||
"description": "Set of tools to manage pyth contracts",
|
||||
"private": true,
|
||||
|
@ -23,7 +23,7 @@
|
|||
"dependencies": {
|
||||
"@certusone/wormhole-sdk": "^0.9.8",
|
||||
"@coral-xyz/anchor": "^0.29.0",
|
||||
"@injectivelabs/networks": "1.0.68",
|
||||
"@injectivelabs/networks": "^1.14.6",
|
||||
"@mysten/sui.js": "^0.49.1",
|
||||
"@pythnetwork/cosmwasm-deploy-tools": "*",
|
||||
"@pythnetwork/entropy-sdk-solidity": "*",
|
||||
|
|
|
@ -23,7 +23,7 @@ import {
|
|||
EvmEntropyContract,
|
||||
EvmPriceFeedContract,
|
||||
getCodeDigestWithoutAddress,
|
||||
WormholeEvmContract,
|
||||
EvmWormholeContract,
|
||||
} from "../src/contracts/evm";
|
||||
import Web3 from "web3";
|
||||
|
||||
|
@ -73,7 +73,7 @@ async function main() {
|
|||
instruction.governanceAction.targetChainId
|
||||
) {
|
||||
const address = instruction.governanceAction.address;
|
||||
const contract = new WormholeEvmContract(chain, address);
|
||||
const contract = new EvmWormholeContract(chain, address);
|
||||
const currentIndex = await contract.getCurrentGuardianSetIndex();
|
||||
const guardianSet = await contract.getGuardianSet();
|
||||
|
||||
|
|
|
@ -1,11 +1,18 @@
|
|||
import { DefaultStore, EvmChain, PrivateKey } from "../src";
|
||||
import {
|
||||
DefaultStore,
|
||||
EvmChain,
|
||||
EvmEntropyContract,
|
||||
EvmWormholeContract,
|
||||
getDefaultDeploymentConfig,
|
||||
PrivateKey,
|
||||
} from "../src";
|
||||
import { existsSync, readFileSync, writeFileSync } from "fs";
|
||||
import { join } from "path";
|
||||
import Web3 from "web3";
|
||||
import { Contract } from "web3-eth-contract";
|
||||
import { InferredOptionType } from "yargs";
|
||||
|
||||
interface DeployConfig {
|
||||
export interface BaseDeployConfig {
|
||||
gasMultiplier: number;
|
||||
gasPriceMultiplier: number;
|
||||
jsonOutputDir: string;
|
||||
|
@ -19,7 +26,7 @@ interface DeployConfig {
|
|||
export async function deployIfNotCached(
|
||||
cacheFile: string,
|
||||
chain: EvmChain,
|
||||
config: DeployConfig,
|
||||
config: BaseDeployConfig,
|
||||
artifactName: string,
|
||||
deployArgs: any[], // eslint-disable-line @typescript-eslint/no-explicit-any
|
||||
cacheKey?: string
|
||||
|
@ -72,7 +79,7 @@ export const COMMON_DEPLOY_OPTIONS = {
|
|||
chain: {
|
||||
type: "array",
|
||||
demandOption: true,
|
||||
desc: "Chain to upload the contract on. Can be one of the evm chains available in the store",
|
||||
desc: "Chain to upload the contract on. Can be one of the chains available in the store",
|
||||
},
|
||||
"deployment-type": {
|
||||
type: "string",
|
||||
|
@ -181,3 +188,149 @@ export function getSelectedChains(argv: {
|
|||
}
|
||||
return selectedChains;
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds the entropy contract for a given EVM chain.
|
||||
* @param {EvmChain} chain The EVM chain to find the entropy contract for.
|
||||
* @returns The entropy contract for the given EVM chain.
|
||||
* @throws {Error} an error if the entropy contract is not found for the given EVM chain.
|
||||
*/
|
||||
export function findEntropyContract(chain: EvmChain): EvmEntropyContract {
|
||||
for (const contract of Object.values(DefaultStore.entropy_contracts)) {
|
||||
if (contract.getChain().getId() === chain.getId()) {
|
||||
return contract;
|
||||
}
|
||||
}
|
||||
throw new Error(`Entropy contract not found for chain ${chain.getId()}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds an EVM chain by its name.
|
||||
* @param {string} chainName The name of the chain to find.
|
||||
* @returns The EVM chain instance.
|
||||
* @throws {Error} an error if the chain is not found or is not an EVM chain.
|
||||
*/
|
||||
export function findEvmChain(chainName: string): EvmChain {
|
||||
const chain = DefaultStore.chains[chainName];
|
||||
if (!chain) {
|
||||
throw new Error(`Chain ${chainName} not found`);
|
||||
} else if (!(chain instanceof EvmChain)) {
|
||||
throw new Error(`Chain ${chainName} is not an EVM chain`);
|
||||
}
|
||||
return chain;
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds the wormhole contract for a given EVM chain.
|
||||
* @param {EvmChain} chain The EVM chain to find the wormhole contract for.
|
||||
* @returns If found, the wormhole contract for the given EVM chain. Else, undefined
|
||||
*/
|
||||
export function findWormholeContract(
|
||||
chain: EvmChain
|
||||
): EvmWormholeContract | undefined {
|
||||
for (const contract of Object.values(DefaultStore.wormhole_contracts)) {
|
||||
if (
|
||||
contract instanceof EvmWormholeContract &&
|
||||
contract.getChain().getId() === chain.getId()
|
||||
) {
|
||||
return contract;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export interface DeployWormholeReceiverContractsConfig
|
||||
extends BaseDeployConfig {
|
||||
saveContract: boolean;
|
||||
type: "stable" | "beta";
|
||||
}
|
||||
/**
|
||||
* Deploys the wormhole receiver contract for a given EVM chain.
|
||||
* @param {EvmChain} chain The EVM chain to find the wormhole receiver contract for.
|
||||
* @param {DeployWormholeReceiverContractsConfig} config The deployment configuration.
|
||||
* @param {string} cacheFile The path to the cache file.
|
||||
* @returns {EvmWormholeContract} The wormhole contract for the given EVM chain.
|
||||
*/
|
||||
export async function deployWormholeContract(
|
||||
chain: EvmChain,
|
||||
config: DeployWormholeReceiverContractsConfig,
|
||||
cacheFile: string
|
||||
): Promise<EvmWormholeContract> {
|
||||
const receiverSetupAddr = await deployIfNotCached(
|
||||
cacheFile,
|
||||
chain,
|
||||
config,
|
||||
"ReceiverSetup",
|
||||
[]
|
||||
);
|
||||
|
||||
const receiverImplAddr = await deployIfNotCached(
|
||||
cacheFile,
|
||||
chain,
|
||||
config,
|
||||
"ReceiverImplementation",
|
||||
[]
|
||||
);
|
||||
|
||||
// Craft the init data for the proxy contract
|
||||
const setupContract = getWeb3Contract(
|
||||
config.jsonOutputDir,
|
||||
"ReceiverSetup",
|
||||
receiverSetupAddr
|
||||
);
|
||||
|
||||
const { wormholeConfig } = getDefaultDeploymentConfig(config.type);
|
||||
|
||||
const initData = setupContract.methods
|
||||
.setup(
|
||||
receiverImplAddr,
|
||||
wormholeConfig.initialGuardianSet.map((addr: string) => "0x" + addr),
|
||||
chain.getWormholeChainId(),
|
||||
wormholeConfig.governanceChainId,
|
||||
"0x" + wormholeConfig.governanceContract
|
||||
)
|
||||
.encodeABI();
|
||||
|
||||
const wormholeReceiverAddr = await deployIfNotCached(
|
||||
cacheFile,
|
||||
chain,
|
||||
config,
|
||||
"WormholeReceiver",
|
||||
[receiverSetupAddr, initData]
|
||||
);
|
||||
|
||||
const wormholeContract = new EvmWormholeContract(chain, wormholeReceiverAddr);
|
||||
|
||||
if (config.type === "stable") {
|
||||
console.log(`Syncing mainnet guardian sets for ${chain.getId()}...`);
|
||||
// TODO: Add a way to pass gas configs to this
|
||||
await wormholeContract.syncMainnetGuardianSets(config.privateKey);
|
||||
console.log(`✅ Synced mainnet guardian sets for ${chain.getId()}`);
|
||||
}
|
||||
|
||||
if (config.saveContract) {
|
||||
DefaultStore.wormhole_contracts[wormholeContract.getId()] =
|
||||
wormholeContract;
|
||||
DefaultStore.saveAllContracts();
|
||||
}
|
||||
|
||||
return wormholeContract;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the wormhole contract for a given EVM chain.
|
||||
* If there was no wormhole contract deployed for the given chain, it will deploy the wormhole contract and save it to the default store.
|
||||
* @param {EvmChain} chain The EVM chain to find the wormhole contract for.
|
||||
* @param {DeployWormholeReceiverContractsConfig} config The deployment configuration.
|
||||
* @param {string} cacheFile The path to the cache file.
|
||||
* @returns {EvmWormholeContract} The wormhole contract for the given EVM chain.
|
||||
*/
|
||||
export async function getOrDeployWormholeContract(
|
||||
chain: EvmChain,
|
||||
config: DeployWormholeReceiverContractsConfig,
|
||||
cacheFile: string
|
||||
): Promise<EvmWormholeContract> {
|
||||
return (
|
||||
findWormholeContract(chain) ??
|
||||
(await deployWormholeContract(chain, config, cacheFile))
|
||||
);
|
||||
}
|
||||
|
|
|
@ -5,29 +5,23 @@ import { DefaultStore } from "../src/store";
|
|||
import {
|
||||
DeploymentType,
|
||||
EvmEntropyContract,
|
||||
EvmPriceFeedContract,
|
||||
getDefaultDeploymentConfig,
|
||||
PrivateKey,
|
||||
toDeploymentType,
|
||||
toPrivateKey,
|
||||
WormholeEvmContract,
|
||||
} from "../src";
|
||||
import {
|
||||
COMMON_DEPLOY_OPTIONS,
|
||||
deployIfNotCached,
|
||||
getWeb3Contract,
|
||||
getOrDeployWormholeContract,
|
||||
BaseDeployConfig,
|
||||
} from "./common";
|
||||
import Web3 from "web3";
|
||||
|
||||
type DeploymentConfig = {
|
||||
interface DeploymentConfig extends BaseDeployConfig {
|
||||
type: DeploymentType;
|
||||
gasMultiplier: number;
|
||||
gasPriceMultiplier: number;
|
||||
privateKey: PrivateKey;
|
||||
jsonOutputDir: string;
|
||||
wormholeAddr: string;
|
||||
saveContract: boolean;
|
||||
};
|
||||
}
|
||||
|
||||
const CACHE_FILE = ".cache-deploy-evm-entropy-contracts";
|
||||
const ENTROPY_DEFAULT_PROVIDER = {
|
||||
|
@ -51,7 +45,8 @@ const parser = yargs(hideBin(process.argv))
|
|||
|
||||
async function deployExecutorContracts(
|
||||
chain: EvmChain,
|
||||
config: DeploymentConfig
|
||||
config: DeploymentConfig,
|
||||
wormholeAddr: string
|
||||
): Promise<string> {
|
||||
const executorImplAddr = await deployIfNotCached(
|
||||
CACHE_FILE,
|
||||
|
@ -72,7 +67,7 @@ async function deployExecutorContracts(
|
|||
|
||||
const executorInitData = executorImplContract.methods
|
||||
.initialize(
|
||||
config.wormholeAddr,
|
||||
wormholeAddr,
|
||||
0, // lastExecutedSequence,
|
||||
chain.getWormholeChainId(),
|
||||
governanceDataSource.emitterChain,
|
||||
|
@ -161,19 +156,6 @@ async function topupProviderIfNecessary(
|
|||
}
|
||||
}
|
||||
|
||||
async function findWormholeAddress(
|
||||
chain: EvmChain
|
||||
): Promise<string | undefined> {
|
||||
for (const contract of Object.values(DefaultStore.contracts)) {
|
||||
if (
|
||||
contract instanceof EvmPriceFeedContract &&
|
||||
contract.getChain().getId() === chain.getId()
|
||||
) {
|
||||
return (await contract.getWormholeContract()).address;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const argv = await parser.argv;
|
||||
|
||||
|
@ -185,12 +167,6 @@ async function main() {
|
|||
throw new Error(`Chain ${chainName} is not an EVM chain`);
|
||||
}
|
||||
|
||||
const wormholeAddr = await findWormholeAddress(chain);
|
||||
if (!wormholeAddr) {
|
||||
// TODO: deploy wormhole if necessary and maintain a wormhole store
|
||||
throw new Error(`Wormhole contract not found for chain ${chain.getId()}`);
|
||||
}
|
||||
|
||||
const deploymentConfig: DeploymentConfig = {
|
||||
type: toDeploymentType(argv.deploymentType),
|
||||
gasMultiplier: argv.gasMultiplier,
|
||||
|
@ -198,18 +174,14 @@ async function main() {
|
|||
privateKey: toPrivateKey(argv.privateKey),
|
||||
jsonOutputDir: argv.stdOutputDir,
|
||||
saveContract: argv.saveContract,
|
||||
wormholeAddr,
|
||||
};
|
||||
const wormholeContract = new WormholeEvmContract(
|
||||
|
||||
const wormholeContract = await getOrDeployWormholeContract(
|
||||
chain,
|
||||
deploymentConfig.wormholeAddr
|
||||
deploymentConfig,
|
||||
CACHE_FILE
|
||||
);
|
||||
const wormholeChainId = await wormholeContract.getChainId();
|
||||
if (chain.getWormholeChainId() != wormholeChainId) {
|
||||
throw new Error(
|
||||
`Wormhole chain id mismatch. Expected ${chain.getWormholeChainId()} but got ${wormholeChainId}`
|
||||
);
|
||||
}
|
||||
|
||||
await topupProviderIfNecessary(chain, deploymentConfig);
|
||||
|
||||
console.log(
|
||||
|
@ -218,7 +190,11 @@ async function main() {
|
|||
|
||||
console.log(`Deploying entropy contracts on ${chain.getId()}...`);
|
||||
|
||||
const executorAddr = await deployExecutorContracts(chain, deploymentConfig);
|
||||
const executorAddr = await deployExecutorContracts(
|
||||
chain,
|
||||
deploymentConfig,
|
||||
wormholeContract.address
|
||||
);
|
||||
const entropyAddr = await deployEntropyContracts(
|
||||
chain,
|
||||
deploymentConfig,
|
||||
|
|
|
@ -6,27 +6,23 @@ import {
|
|||
DeploymentType,
|
||||
EvmPriceFeedContract,
|
||||
getDefaultDeploymentConfig,
|
||||
PrivateKey,
|
||||
toDeploymentType,
|
||||
toPrivateKey,
|
||||
WormholeEvmContract,
|
||||
} from "../src";
|
||||
import {
|
||||
COMMON_DEPLOY_OPTIONS,
|
||||
deployIfNotCached,
|
||||
getWeb3Contract,
|
||||
getOrDeployWormholeContract,
|
||||
BaseDeployConfig,
|
||||
} from "./common";
|
||||
|
||||
type DeploymentConfig = {
|
||||
interface DeploymentConfig extends BaseDeployConfig {
|
||||
type: DeploymentType;
|
||||
validTimePeriodSeconds: number;
|
||||
singleUpdateFeeInWei: number;
|
||||
gasMultiplier: number;
|
||||
gasPriceMultiplier: number;
|
||||
privateKey: PrivateKey;
|
||||
jsonOutputDir: string;
|
||||
saveContract: boolean;
|
||||
};
|
||||
}
|
||||
|
||||
const CACHE_FILE = ".cache-deploy-evm";
|
||||
|
||||
|
@ -51,68 +47,6 @@ const parser = yargs(hideBin(process.argv))
|
|||
},
|
||||
});
|
||||
|
||||
async function deployWormholeReceiverContracts(
|
||||
chain: EvmChain,
|
||||
config: DeploymentConfig
|
||||
): Promise<string> {
|
||||
const receiverSetupAddr = await deployIfNotCached(
|
||||
CACHE_FILE,
|
||||
chain,
|
||||
config,
|
||||
"ReceiverSetup",
|
||||
[]
|
||||
);
|
||||
|
||||
const receiverImplAddr = await deployIfNotCached(
|
||||
CACHE_FILE,
|
||||
chain,
|
||||
config,
|
||||
"ReceiverImplementation",
|
||||
[]
|
||||
);
|
||||
|
||||
// Craft the init data for the proxy contract
|
||||
const setupContract = getWeb3Contract(
|
||||
config.jsonOutputDir,
|
||||
"ReceiverSetup",
|
||||
receiverSetupAddr
|
||||
);
|
||||
|
||||
const { wormholeConfig } = getDefaultDeploymentConfig(config.type);
|
||||
|
||||
const initData = setupContract.methods
|
||||
.setup(
|
||||
receiverImplAddr,
|
||||
wormholeConfig.initialGuardianSet.map((addr: string) => "0x" + addr),
|
||||
chain.getWormholeChainId(),
|
||||
wormholeConfig.governanceChainId,
|
||||
"0x" + wormholeConfig.governanceContract
|
||||
)
|
||||
.encodeABI();
|
||||
|
||||
const wormholeReceiverAddr = await deployIfNotCached(
|
||||
CACHE_FILE,
|
||||
chain,
|
||||
config,
|
||||
"WormholeReceiver",
|
||||
[receiverSetupAddr, initData]
|
||||
);
|
||||
|
||||
const wormholeEvmContract = new WormholeEvmContract(
|
||||
chain,
|
||||
wormholeReceiverAddr
|
||||
);
|
||||
|
||||
if (config.type === "stable") {
|
||||
console.log(`Syncing mainnet guardian sets for ${chain.getId()}...`);
|
||||
// TODO: Add a way to pass gas configs to this
|
||||
await wormholeEvmContract.syncMainnetGuardianSets(config.privateKey);
|
||||
console.log(`✅ Synced mainnet guardian sets for ${chain.getId()}`);
|
||||
}
|
||||
|
||||
return wormholeReceiverAddr;
|
||||
}
|
||||
|
||||
async function deployPriceFeedContracts(
|
||||
chain: EvmChain,
|
||||
config: DeploymentConfig,
|
||||
|
@ -186,14 +120,16 @@ async function main() {
|
|||
|
||||
console.log(`Deploying price feed contracts on ${chain.getId()}...`);
|
||||
|
||||
const wormholeAddr = await deployWormholeReceiverContracts(
|
||||
const wormholeContract = await getOrDeployWormholeContract(
|
||||
chain,
|
||||
deploymentConfig
|
||||
deploymentConfig,
|
||||
CACHE_FILE
|
||||
);
|
||||
|
||||
const priceFeedAddr = await deployPriceFeedContracts(
|
||||
chain,
|
||||
deploymentConfig,
|
||||
wormholeAddr
|
||||
wormholeContract.address
|
||||
);
|
||||
|
||||
if (deploymentConfig.saveContract) {
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
import yargs from "yargs";
|
||||
import { hideBin } from "yargs/helpers";
|
||||
import { DefaultStore } from "../src";
|
||||
|
||||
function deserializeCommitmentMetadata(data: Buffer) {
|
||||
const seed = Uint8Array.from(data.subarray(0, 32));
|
||||
const chainLength = data.readBigInt64LE(32);
|
||||
|
||||
return {
|
||||
seed,
|
||||
chainLength,
|
||||
};
|
||||
}
|
||||
|
||||
const parser = yargs(hideBin(process.argv))
|
||||
.usage("Usage: $0")
|
||||
.options({
|
||||
testnet: {
|
||||
type: "boolean",
|
||||
default: false,
|
||||
desc: "Fetch the provider registration data for the testnet contracts.",
|
||||
},
|
||||
});
|
||||
|
||||
async function main() {
|
||||
const argv = await parser.argv;
|
||||
|
||||
for (const contract of Object.values(DefaultStore.entropy_contracts)) {
|
||||
if (contract.getChain().isMainnet() === argv.testnet) continue;
|
||||
let provider;
|
||||
let providerInfo;
|
||||
try {
|
||||
provider = await contract.getDefaultProvider();
|
||||
providerInfo = await contract.getProviderInfo(provider);
|
||||
} catch (e) {
|
||||
console.error(`Error fetching info for ${contract.getId()}`, e);
|
||||
continue;
|
||||
}
|
||||
|
||||
const commitmentMetadata = providerInfo.commitmentMetadata.replace(
|
||||
"0x",
|
||||
""
|
||||
);
|
||||
|
||||
// const binaryData = hexToBytes(commitmentMetadata);
|
||||
const metadata = deserializeCommitmentMetadata(
|
||||
Buffer.from(commitmentMetadata, "hex")
|
||||
);
|
||||
console.log("=".repeat(100));
|
||||
console.log(`Fetched info for ${contract.getId()}`);
|
||||
|
||||
console.log(`chain : ${contract.getChain().getId()}`);
|
||||
console.log(`contract : ${contract.address}`);
|
||||
console.log(`provider : ${provider}`);
|
||||
console.log(`commitment data : ${commitmentMetadata}`);
|
||||
console.log(`chainLength : ${metadata.chainLength}`);
|
||||
console.log(`seed : [${metadata.seed}]`);
|
||||
console.log(
|
||||
`original seq no : ${providerInfo.originalCommitmentSequenceNumber}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
|
@ -1,33 +1,32 @@
|
|||
import yargs from "yargs";
|
||||
import { hideBin } from "yargs/helpers";
|
||||
import { DefaultStore, toPrivateKey } from "../src";
|
||||
import { COMMON_DEPLOY_OPTIONS } from "./common";
|
||||
import { toPrivateKey } from "../src";
|
||||
import {
|
||||
COMMON_DEPLOY_OPTIONS,
|
||||
findEntropyContract,
|
||||
findEvmChain,
|
||||
} from "./common";
|
||||
|
||||
const parser = yargs(hideBin(process.argv))
|
||||
.usage(
|
||||
"Requests and reveals a random number from an entropy contract while measuing the\n" +
|
||||
"latency between request submission and availablity of the provider revelation from fortuna.\n" +
|
||||
"Usage: $0 --contract <entropy_contract_id> --private-key <private-key>"
|
||||
"Usage: $0 --chain <chain-id> --private-key <private-key>"
|
||||
)
|
||||
.options({
|
||||
contract: {
|
||||
chain: {
|
||||
type: "string",
|
||||
demandOption: true,
|
||||
desc: "Contract to test latency for",
|
||||
desc: "test latency for the contract on this chain",
|
||||
},
|
||||
"private-key": COMMON_DEPLOY_OPTIONS["private-key"],
|
||||
});
|
||||
|
||||
async function main() {
|
||||
const argv = await parser.argv;
|
||||
const contract = DefaultStore.entropy_contracts[argv.contract];
|
||||
if (!contract) {
|
||||
throw new Error(
|
||||
`Contract ${argv.contract} not found. Contracts found: ${Object.keys(
|
||||
DefaultStore.entropy_contracts
|
||||
)}`
|
||||
);
|
||||
}
|
||||
const chain = findEvmChain(argv.chain);
|
||||
const contract = findEntropyContract(chain);
|
||||
|
||||
const provider = await contract.getDefaultProvider();
|
||||
const providerInfo = await contract.getProviderInfo(provider);
|
||||
const userRandomNumber = contract.generateUserRandomNumber();
|
||||
|
|
|
@ -0,0 +1,118 @@
|
|||
import yargs from "yargs";
|
||||
import { hideBin } from "yargs/helpers";
|
||||
import {
|
||||
DefaultStore,
|
||||
EvmEntropyContract,
|
||||
PrivateKey,
|
||||
toPrivateKey,
|
||||
} from "../src";
|
||||
import {
|
||||
COMMON_DEPLOY_OPTIONS,
|
||||
findEntropyContract,
|
||||
findEvmChain,
|
||||
} from "./common";
|
||||
import Web3 from "web3";
|
||||
|
||||
const parser = yargs(hideBin(process.argv))
|
||||
.usage(
|
||||
"Requests a random number from an entropy contract and measures the\n" +
|
||||
"latency between request submission and fulfillment by the Fortuna keeper service.\n" +
|
||||
"Usage: $0 --private-key <private-key> --chain <chain-id> | --all-chains <testnet|mainnet>"
|
||||
)
|
||||
.options({
|
||||
chain: {
|
||||
type: "string",
|
||||
desc: "test latency for the contract on this chain",
|
||||
conflicts: "all-chains",
|
||||
},
|
||||
"all-chains": {
|
||||
type: "string",
|
||||
conflicts: "chain",
|
||||
choices: ["testnet", "mainnet"],
|
||||
desc: "test latency for all entropy contracts deployed either on mainnet or testnet",
|
||||
},
|
||||
"private-key": COMMON_DEPLOY_OPTIONS["private-key"],
|
||||
});
|
||||
|
||||
async function testLatency(
|
||||
contract: EvmEntropyContract,
|
||||
privateKey: PrivateKey
|
||||
) {
|
||||
const provider = await contract.getDefaultProvider();
|
||||
const userRandomNumber = contract.generateUserRandomNumber();
|
||||
const requestResponse = await contract.requestRandomness(
|
||||
userRandomNumber,
|
||||
provider,
|
||||
privateKey,
|
||||
true // with callback
|
||||
);
|
||||
console.log(`Request tx hash : ${requestResponse.transactionHash}`);
|
||||
// Read the sequence number for the request from the transaction events.
|
||||
const sequenceNumber =
|
||||
requestResponse.events.RequestedWithCallback.returnValues.sequenceNumber;
|
||||
console.log(`sequence : ${sequenceNumber}`);
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
let fromBlock = requestResponse.blockNumber;
|
||||
const web3 = new Web3(contract.chain.getRpcUrl());
|
||||
const entropyContract = contract.getContract();
|
||||
|
||||
// eslint-disable-next-line no-constant-condition
|
||||
while (true) {
|
||||
const currentBlock = await web3.eth.getBlockNumber();
|
||||
|
||||
if (fromBlock > currentBlock) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const events = await entropyContract.getPastEvents("RevealedWithCallback", {
|
||||
fromBlock: fromBlock,
|
||||
toBlock: currentBlock,
|
||||
});
|
||||
fromBlock = currentBlock + 1;
|
||||
|
||||
const event = events.find(
|
||||
(event) => event.returnValues.request[1] == sequenceNumber
|
||||
);
|
||||
|
||||
if (event !== undefined) {
|
||||
console.log(`Random number : ${event.returnValues.randomNumber}`);
|
||||
const endTime = Date.now();
|
||||
console.log(`Fortuna Latency : ${endTime - startTime}ms`);
|
||||
console.log(
|
||||
`Revealed after : ${
|
||||
currentBlock - requestResponse.blockNumber
|
||||
} blocks`
|
||||
);
|
||||
break;
|
||||
}
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 300));
|
||||
}
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const argv = await parser.argv;
|
||||
if (!argv.chain && !argv["all-chains"]) {
|
||||
throw new Error("Must specify either --chain or --all-chains");
|
||||
}
|
||||
const privateKey = toPrivateKey(argv.privateKey);
|
||||
if (argv["all-chains"]) {
|
||||
for (const contract of Object.values(DefaultStore.entropy_contracts)) {
|
||||
if (
|
||||
contract.getChain().isMainnet() ===
|
||||
(argv["all-chains"] === "mainnet")
|
||||
) {
|
||||
console.log(`Testing latency for ${contract.getId()}...`);
|
||||
await testLatency(contract, privateKey);
|
||||
}
|
||||
}
|
||||
} else if (argv.chain) {
|
||||
const chain = findEvmChain(argv.chain);
|
||||
const contract = findEntropyContract(chain);
|
||||
await testLatency(contract, privateKey);
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue