refactor(hermes): rename store to aggregate
This commit is contained in:
parent
cbeada6c6d
commit
09e2b17d1c
|
@ -12,13 +12,6 @@ use std::time::{
|
||||||
};
|
};
|
||||||
use {
|
use {
|
||||||
self::{
|
self::{
|
||||||
benchmarks::Benchmarks,
|
|
||||||
cache::{
|
|
||||||
Cache,
|
|
||||||
CacheStore,
|
|
||||||
MessageState,
|
|
||||||
MessageStateFilter,
|
|
||||||
},
|
|
||||||
proof::wormhole_merkle::{
|
proof::wormhole_merkle::{
|
||||||
construct_update_data,
|
construct_update_data,
|
||||||
WormholeMerkleState,
|
WormholeMerkleState,
|
||||||
|
@ -32,16 +25,27 @@ use {
|
||||||
},
|
},
|
||||||
wormhole::GuardianSet,
|
wormhole::GuardianSet,
|
||||||
},
|
},
|
||||||
crate::store::{
|
crate::{
|
||||||
proof::wormhole_merkle::{
|
aggregate::{
|
||||||
construct_message_states_proofs,
|
proof::wormhole_merkle::{
|
||||||
store_wormhole_merkle_verified_message,
|
construct_message_states_proofs,
|
||||||
|
store_wormhole_merkle_verified_message,
|
||||||
|
},
|
||||||
|
types::{
|
||||||
|
ProofSet,
|
||||||
|
UnixTimestamp,
|
||||||
|
},
|
||||||
|
wormhole::verify_vaa,
|
||||||
},
|
},
|
||||||
types::{
|
state::{
|
||||||
ProofSet,
|
benchmarks::Benchmarks,
|
||||||
UnixTimestamp,
|
cache::{
|
||||||
|
CacheStore,
|
||||||
|
MessageState,
|
||||||
|
MessageStateFilter,
|
||||||
|
},
|
||||||
|
State,
|
||||||
},
|
},
|
||||||
wormhole::verify_vaa,
|
|
||||||
},
|
},
|
||||||
anyhow::{
|
anyhow::{
|
||||||
anyhow,
|
anyhow,
|
||||||
|
@ -66,25 +70,13 @@ use {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
reqwest::Url,
|
|
||||||
std::{
|
std::{
|
||||||
collections::{
|
collections::HashSet,
|
||||||
BTreeMap,
|
|
||||||
BTreeSet,
|
|
||||||
HashSet,
|
|
||||||
},
|
|
||||||
sync::Arc,
|
|
||||||
time::Duration,
|
time::Duration,
|
||||||
},
|
},
|
||||||
tokio::sync::{
|
|
||||||
mpsc::Sender,
|
|
||||||
RwLock,
|
|
||||||
},
|
|
||||||
wormhole_sdk::Vaa,
|
wormhole_sdk::Vaa,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub mod benchmarks;
|
|
||||||
pub mod cache;
|
|
||||||
pub mod proof;
|
pub mod proof;
|
||||||
pub mod types;
|
pub mod types;
|
||||||
pub mod wormhole;
|
pub mod wormhole;
|
||||||
|
@ -92,54 +84,9 @@ pub mod wormhole;
|
||||||
const OBSERVED_CACHE_SIZE: usize = 1000;
|
const OBSERVED_CACHE_SIZE: usize = 1000;
|
||||||
const READINESS_STALENESS_THRESHOLD: Duration = Duration::from_secs(30);
|
const READINESS_STALENESS_THRESHOLD: Duration = Duration::from_secs(30);
|
||||||
|
|
||||||
pub struct Store {
|
|
||||||
/// Storage is a short-lived cache of the state of all the updates that have been passed to the
|
|
||||||
/// store.
|
|
||||||
pub cache: Cache,
|
|
||||||
|
|
||||||
/// Sequence numbers of lately observed Vaas. Store uses this set
|
|
||||||
/// to ignore the previously observed Vaas as a performance boost.
|
|
||||||
pub observed_vaa_seqs: RwLock<BTreeSet<u64>>,
|
|
||||||
|
|
||||||
/// Wormhole guardian sets. It is used to verify Vaas before using them.
|
|
||||||
pub guardian_set: RwLock<BTreeMap<u32, GuardianSet>>,
|
|
||||||
|
|
||||||
/// The sender to the channel between Store and Api to notify completed updates.
|
|
||||||
pub update_tx: Sender<()>,
|
|
||||||
|
|
||||||
/// Time of the last completed update. This is used for the health probes.
|
|
||||||
pub last_completed_update_at: RwLock<Option<Instant>>,
|
|
||||||
|
|
||||||
/// Benchmarks endpoint
|
|
||||||
pub benchmarks_endpoint: Option<Url>,
|
|
||||||
}
|
|
||||||
|
|
||||||
// impl CacheStore for Store {
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// impl Benchmarks for Store {
|
|
||||||
// }
|
|
||||||
|
|
||||||
impl Store {
|
|
||||||
pub fn new(
|
|
||||||
update_tx: Sender<()>,
|
|
||||||
cache_size: u64,
|
|
||||||
benchmarks_endpoint: Option<Url>,
|
|
||||||
) -> Arc<Self> {
|
|
||||||
Arc::new(Self {
|
|
||||||
cache: Cache::new(cache_size),
|
|
||||||
observed_vaa_seqs: RwLock::new(Default::default()),
|
|
||||||
guardian_set: RwLock::new(Default::default()),
|
|
||||||
update_tx,
|
|
||||||
last_completed_update_at: RwLock::new(None),
|
|
||||||
benchmarks_endpoint,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Stores the update data in the store
|
/// Stores the update data in the store
|
||||||
#[tracing::instrument(skip(store, update))]
|
#[tracing::instrument(skip(state, update))]
|
||||||
pub async fn store_update(store: &Store, update: Update) -> Result<()> {
|
pub async fn store_update(state: &State, update: Update) -> Result<()> {
|
||||||
// The slot that the update is originating from. It should be available
|
// The slot that the update is originating from. It should be available
|
||||||
// in all the updates.
|
// in all the updates.
|
||||||
let slot = match update {
|
let slot = match update {
|
||||||
|
@ -147,11 +94,11 @@ pub async fn store_update(store: &Store, update: Update) -> Result<()> {
|
||||||
// FIXME: Move to wormhole.rs
|
// FIXME: Move to wormhole.rs
|
||||||
let vaa = serde_wormhole::from_slice::<Vaa<&serde_wormhole::RawMessage>>(&update_vaa)?;
|
let vaa = serde_wormhole::from_slice::<Vaa<&serde_wormhole::RawMessage>>(&update_vaa)?;
|
||||||
|
|
||||||
if store.observed_vaa_seqs.read().await.contains(&vaa.sequence) {
|
if state.observed_vaa_seqs.read().await.contains(&vaa.sequence) {
|
||||||
return Ok(()); // Ignore VAA if we have already seen it
|
return Ok(()); // Ignore VAA if we have already seen it
|
||||||
}
|
}
|
||||||
|
|
||||||
let vaa = verify_vaa(store, vaa).await;
|
let vaa = verify_vaa(state, vaa).await;
|
||||||
|
|
||||||
let vaa = match vaa {
|
let vaa = match vaa {
|
||||||
Ok(vaa) => vaa,
|
Ok(vaa) => vaa,
|
||||||
|
@ -162,7 +109,7 @@ pub async fn store_update(store: &Store, update: Update) -> Result<()> {
|
||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut observed_vaa_seqs = store.observed_vaa_seqs.write().await;
|
let mut observed_vaa_seqs = state.observed_vaa_seqs.write().await;
|
||||||
if observed_vaa_seqs.contains(&vaa.sequence) {
|
if observed_vaa_seqs.contains(&vaa.sequence) {
|
||||||
return Ok(()); // Ignore VAA if we have already seen it
|
return Ok(()); // Ignore VAA if we have already seen it
|
||||||
}
|
}
|
||||||
|
@ -177,7 +124,7 @@ pub async fn store_update(store: &Store, update: Update) -> Result<()> {
|
||||||
tracing::info!(slot = proof.slot, "Storing VAA Merkle Proof.");
|
tracing::info!(slot = proof.slot, "Storing VAA Merkle Proof.");
|
||||||
|
|
||||||
store_wormhole_merkle_verified_message(
|
store_wormhole_merkle_verified_message(
|
||||||
store,
|
state,
|
||||||
proof.clone(),
|
proof.clone(),
|
||||||
update_vaa.to_owned(),
|
update_vaa.to_owned(),
|
||||||
)
|
)
|
||||||
|
@ -192,15 +139,15 @@ pub async fn store_update(store: &Store, update: Update) -> Result<()> {
|
||||||
let slot = accumulator_messages.slot;
|
let slot = accumulator_messages.slot;
|
||||||
tracing::info!(slot = slot, "Storing Accumulator Messages.");
|
tracing::info!(slot = slot, "Storing Accumulator Messages.");
|
||||||
|
|
||||||
store
|
state
|
||||||
.store_accumulator_messages(accumulator_messages)
|
.store_accumulator_messages(accumulator_messages)
|
||||||
.await?;
|
.await?;
|
||||||
slot
|
slot
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let accumulator_messages = store.fetch_accumulator_messages(slot).await?;
|
let accumulator_messages = state.fetch_accumulator_messages(slot).await?;
|
||||||
let wormhole_merkle_state = store.fetch_wormhole_merkle_state(slot).await?;
|
let wormhole_merkle_state = state.fetch_wormhole_merkle_state(slot).await?;
|
||||||
|
|
||||||
let (accumulator_messages, wormhole_merkle_state) =
|
let (accumulator_messages, wormhole_merkle_state) =
|
||||||
match (accumulator_messages, wormhole_merkle_state) {
|
match (accumulator_messages, wormhole_merkle_state) {
|
||||||
|
@ -214,11 +161,11 @@ pub async fn store_update(store: &Store, update: Update) -> Result<()> {
|
||||||
|
|
||||||
// Once the accumulator reaches a complete state for a specific slot
|
// Once the accumulator reaches a complete state for a specific slot
|
||||||
// we can build the message states
|
// we can build the message states
|
||||||
build_message_states(store, accumulator_messages, wormhole_merkle_state).await?;
|
build_message_states(state, accumulator_messages, wormhole_merkle_state).await?;
|
||||||
|
|
||||||
store.update_tx.send(()).await?;
|
state.update_tx.send(()).await?;
|
||||||
|
|
||||||
store
|
state
|
||||||
.last_completed_update_at
|
.last_completed_update_at
|
||||||
.write()
|
.write()
|
||||||
.await
|
.await
|
||||||
|
@ -227,9 +174,9 @@ pub async fn store_update(store: &Store, update: Update) -> Result<()> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(store, accumulator_messages, wormhole_merkle_state))]
|
#[tracing::instrument(skip(state, accumulator_messages, wormhole_merkle_state))]
|
||||||
async fn build_message_states(
|
async fn build_message_states(
|
||||||
store: &Store,
|
state: &State,
|
||||||
accumulator_messages: AccumulatorMessages,
|
accumulator_messages: AccumulatorMessages,
|
||||||
wormhole_merkle_state: WormholeMerkleState,
|
wormhole_merkle_state: WormholeMerkleState,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
|
@ -261,25 +208,25 @@ async fn build_message_states(
|
||||||
|
|
||||||
tracing::info!(len = message_states.len(), "Storing Message States.");
|
tracing::info!(len = message_states.len(), "Storing Message States.");
|
||||||
|
|
||||||
store.store_message_states(message_states).await?;
|
state.store_message_states(message_states).await?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn update_guardian_set(store: &Store, id: u32, guardian_set: GuardianSet) {
|
pub async fn update_guardian_set(state: &State, id: u32, guardian_set: GuardianSet) {
|
||||||
let mut guardian_sets = store.guardian_set.write().await;
|
let mut guardian_sets = state.guardian_set.write().await;
|
||||||
guardian_sets.insert(id, guardian_set);
|
guardian_sets.insert(id, guardian_set);
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_verified_price_feeds<S>(
|
async fn get_verified_price_feeds<S>(
|
||||||
store: &S,
|
state: &S,
|
||||||
price_ids: Vec<PriceIdentifier>,
|
price_ids: Vec<PriceIdentifier>,
|
||||||
request_time: RequestTime,
|
request_time: RequestTime,
|
||||||
) -> Result<PriceFeedsWithUpdateData>
|
) -> Result<PriceFeedsWithUpdateData>
|
||||||
where
|
where
|
||||||
S: CacheStore,
|
S: CacheStore,
|
||||||
{
|
{
|
||||||
let messages = store
|
let messages = state
|
||||||
.fetch_message_states(
|
.fetch_message_states(
|
||||||
price_ids
|
price_ids
|
||||||
.iter()
|
.iter()
|
||||||
|
@ -331,7 +278,7 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_price_feeds_with_update_data<S>(
|
pub async fn get_price_feeds_with_update_data<S>(
|
||||||
store: &S,
|
state: &S,
|
||||||
price_ids: Vec<PriceIdentifier>,
|
price_ids: Vec<PriceIdentifier>,
|
||||||
request_time: RequestTime,
|
request_time: RequestTime,
|
||||||
) -> Result<PriceFeedsWithUpdateData>
|
) -> Result<PriceFeedsWithUpdateData>
|
||||||
|
@ -339,22 +286,22 @@ where
|
||||||
S: CacheStore,
|
S: CacheStore,
|
||||||
S: Benchmarks,
|
S: Benchmarks,
|
||||||
{
|
{
|
||||||
match get_verified_price_feeds(store, price_ids.clone(), request_time.clone()).await {
|
match get_verified_price_feeds(state, price_ids.clone(), request_time.clone()).await {
|
||||||
Ok(price_feeds_with_update_data) => Ok(price_feeds_with_update_data),
|
Ok(price_feeds_with_update_data) => Ok(price_feeds_with_update_data),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
if let RequestTime::FirstAfter(publish_time) = request_time {
|
if let RequestTime::FirstAfter(publish_time) = request_time {
|
||||||
return Benchmarks::get_verified_price_feeds(store, price_ids, publish_time).await;
|
return Benchmarks::get_verified_price_feeds(state, price_ids, publish_time).await;
|
||||||
}
|
}
|
||||||
Err(e)
|
Err(e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_price_feed_ids<S>(store: &S) -> HashSet<PriceIdentifier>
|
pub async fn get_price_feed_ids<S>(state: &S) -> HashSet<PriceIdentifier>
|
||||||
where
|
where
|
||||||
S: CacheStore,
|
S: CacheStore,
|
||||||
{
|
{
|
||||||
store
|
state
|
||||||
.message_state_keys()
|
.message_state_keys()
|
||||||
.await
|
.await
|
||||||
.iter()
|
.iter()
|
||||||
|
@ -362,8 +309,8 @@ where
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn is_ready(store: &Store) -> bool {
|
pub async fn is_ready(state: &State) -> bool {
|
||||||
let last_completed_update_at = store.last_completed_update_at.read().await;
|
let last_completed_update_at = state.last_completed_update_at.read().await;
|
||||||
match last_completed_update_at.as_ref() {
|
match last_completed_update_at.as_ref() {
|
||||||
Some(last_completed_update_at) => {
|
Some(last_completed_update_at) => {
|
||||||
last_completed_update_at.elapsed() < READINESS_STALENESS_THRESHOLD
|
last_completed_update_at.elapsed() < READINESS_STALENESS_THRESHOLD
|
||||||
|
@ -480,13 +427,13 @@ mod test {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn setup_store(cache_size: u64) -> (Arc<Store>, Receiver<()>) {
|
pub async fn setup_store(cache_size: u64) -> (Arc<State>, Receiver<()>) {
|
||||||
let (update_tx, update_rx) = tokio::sync::mpsc::channel(1000);
|
let (update_tx, update_rx) = tokio::sync::mpsc::channel(1000);
|
||||||
let store = Store::new(update_tx, cache_size, None);
|
let state = State::new(update_tx, cache_size, None);
|
||||||
|
|
||||||
// Add an initial guardian set with public key 0
|
// Add an initial guardian set with public key 0
|
||||||
update_guardian_set(
|
update_guardian_set(
|
||||||
&store,
|
&state,
|
||||||
0,
|
0,
|
||||||
GuardianSet {
|
GuardianSet {
|
||||||
keys: vec![[0; 20]],
|
keys: vec![[0; 20]],
|
||||||
|
@ -494,24 +441,24 @@ mod test {
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
(store, update_rx)
|
(state, update_rx)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn store_multiple_concurrent_valid_updates(store: Arc<Store>, updates: Vec<Update>) {
|
pub async fn store_multiple_concurrent_valid_updates(state: Arc<State>, updates: Vec<Update>) {
|
||||||
let res = join_all(updates.into_iter().map(|u| store_update(&store, u))).await;
|
let res = join_all(updates.into_iter().map(|u| store_update(&state, u))).await;
|
||||||
// Check that all store_update calls succeeded
|
// Check that all store_update calls succeeded
|
||||||
assert!(res.into_iter().all(|r| r.is_ok()));
|
assert!(res.into_iter().all(|r| r.is_ok()));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
pub async fn test_store_works() {
|
pub async fn test_store_works() {
|
||||||
let (store, mut update_rx) = setup_store(10).await;
|
let (state, mut update_rx) = setup_store(10).await;
|
||||||
|
|
||||||
let price_feed_message = create_dummy_price_feed_message(100, 10, 9);
|
let price_feed_message = create_dummy_price_feed_message(100, 10, 9);
|
||||||
|
|
||||||
// Populate the store
|
// Populate the state
|
||||||
store_multiple_concurrent_valid_updates(
|
store_multiple_concurrent_valid_updates(
|
||||||
store.clone(),
|
state.clone(),
|
||||||
generate_update(vec![Message::PriceFeedMessage(price_feed_message)], 10, 20),
|
generate_update(vec![Message::PriceFeedMessage(price_feed_message)], 10, 20),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
@ -521,14 +468,14 @@ mod test {
|
||||||
|
|
||||||
// Check the price ids are stored correctly
|
// Check the price ids are stored correctly
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
get_price_feed_ids(&*store).await,
|
get_price_feed_ids(&*state).await,
|
||||||
vec![PriceIdentifier::new([100; 32])].into_iter().collect()
|
vec![PriceIdentifier::new([100; 32])].into_iter().collect()
|
||||||
);
|
);
|
||||||
|
|
||||||
// Check get_price_feeds_with_update_data retrieves the correct
|
// Check get_price_feeds_with_update_data retrieves the correct
|
||||||
// price feed with correct update data.
|
// price feed with correct update data.
|
||||||
let price_feeds_with_update_data = get_price_feeds_with_update_data(
|
let price_feeds_with_update_data = get_price_feeds_with_update_data(
|
||||||
&store,
|
&state,
|
||||||
vec![PriceIdentifier::new([100; 32])],
|
vec![PriceIdentifier::new([100; 32])],
|
||||||
RequestTime::Latest,
|
RequestTime::Latest,
|
||||||
)
|
)
|
||||||
|
@ -616,10 +563,10 @@ mod test {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
pub async fn test_metadata_times_and_readiness_work() {
|
pub async fn test_metadata_times_and_readiness_work() {
|
||||||
// The receiver channel should stay open for the store to work
|
// The receiver channel should stay open for the state to work
|
||||||
// properly. That is why we don't use _ here as it drops the channel
|
// properly. That is why we don't use _ here as it drops the channel
|
||||||
// immediately.
|
// immediately.
|
||||||
let (store, _receiver_tx) = setup_store(10).await;
|
let (state, _receiver_tx) = setup_store(10).await;
|
||||||
|
|
||||||
let price_feed_message = create_dummy_price_feed_message(100, 10, 9);
|
let price_feed_message = create_dummy_price_feed_message(100, 10, 9);
|
||||||
|
|
||||||
|
@ -635,9 +582,9 @@ mod test {
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.as_secs();
|
.as_secs();
|
||||||
|
|
||||||
// Populate the store
|
// Populate the state
|
||||||
store_multiple_concurrent_valid_updates(
|
store_multiple_concurrent_valid_updates(
|
||||||
store.clone(),
|
state.clone(),
|
||||||
generate_update(vec![Message::PriceFeedMessage(price_feed_message)], 10, 20),
|
generate_update(vec![Message::PriceFeedMessage(price_feed_message)], 10, 20),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
@ -648,7 +595,7 @@ mod test {
|
||||||
|
|
||||||
// Get the price feeds with update data
|
// Get the price feeds with update data
|
||||||
let price_feeds_with_update_data = get_price_feeds_with_update_data(
|
let price_feeds_with_update_data = get_price_feeds_with_update_data(
|
||||||
&store,
|
&state,
|
||||||
vec![PriceIdentifier::new([100; 32])],
|
vec![PriceIdentifier::new([100; 32])],
|
||||||
RequestTime::Latest,
|
RequestTime::Latest,
|
||||||
)
|
)
|
||||||
|
@ -662,28 +609,28 @@ mod test {
|
||||||
Some(unix_timestamp as i64)
|
Some(unix_timestamp as i64)
|
||||||
);
|
);
|
||||||
|
|
||||||
// Check the store is ready
|
// Check the state is ready
|
||||||
assert!(is_ready(&store).await);
|
assert!(is_ready(&state).await);
|
||||||
|
|
||||||
// Advance the clock to make the prices stale
|
// Advance the clock to make the prices stale
|
||||||
MockClock::advance_system_time(READINESS_STALENESS_THRESHOLD);
|
MockClock::advance_system_time(READINESS_STALENESS_THRESHOLD);
|
||||||
MockClock::advance(READINESS_STALENESS_THRESHOLD);
|
MockClock::advance(READINESS_STALENESS_THRESHOLD);
|
||||||
// Check the store is not ready
|
// Check the state is not ready
|
||||||
assert!(!is_ready(&store).await);
|
assert!(!is_ready(&state).await);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Test that the store retains the latest slots upon cache eviction.
|
/// Test that the state retains the latest slots upon cache eviction.
|
||||||
///
|
///
|
||||||
/// Store is set up with cache size of 100 and 1000 slot updates will
|
/// state is set up with cache size of 100 and 1000 slot updates will
|
||||||
/// be stored all at the same time with random order.
|
/// be stored all at the same time with random order.
|
||||||
/// After the cache eviction, the store should retain the latest 100
|
/// After the cache eviction, the state should retain the latest 100
|
||||||
/// slots regardless of the order.
|
/// slots regardless of the order.
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
pub async fn test_store_retains_latest_slots_upon_cache_eviction() {
|
pub async fn test_store_retains_latest_slots_upon_cache_eviction() {
|
||||||
// The receiver channel should stay open for the store to work
|
// The receiver channel should stay open for the store to work
|
||||||
// properly. That is why we don't use _ here as it drops the channel
|
// properly. That is why we don't use _ here as it drops the channel
|
||||||
// immediately.
|
// immediately.
|
||||||
let (store, _receiver_tx) = setup_store(100).await;
|
let (state, _receiver_tx) = setup_store(100).await;
|
||||||
|
|
||||||
let mut updates: Vec<Update> = (0..1000)
|
let mut updates: Vec<Update> = (0..1000)
|
||||||
.flat_map(|slot| {
|
.flat_map(|slot| {
|
||||||
|
@ -708,12 +655,12 @@ mod test {
|
||||||
updates.shuffle(&mut rng);
|
updates.shuffle(&mut rng);
|
||||||
|
|
||||||
// Store the updates
|
// Store the updates
|
||||||
store_multiple_concurrent_valid_updates(store.clone(), updates).await;
|
store_multiple_concurrent_valid_updates(state.clone(), updates).await;
|
||||||
|
|
||||||
// Check the last 100 slots are retained
|
// Check the last 100 slots are retained
|
||||||
for slot in 900..1000 {
|
for slot in 900..1000 {
|
||||||
let price_feeds_with_update_data = get_price_feeds_with_update_data(
|
let price_feeds_with_update_data = get_price_feeds_with_update_data(
|
||||||
&store,
|
&state,
|
||||||
vec![
|
vec![
|
||||||
PriceIdentifier::new([100; 32]),
|
PriceIdentifier::new([100; 32]),
|
||||||
PriceIdentifier::new([200; 32]),
|
PriceIdentifier::new([200; 32]),
|
||||||
|
@ -730,7 +677,7 @@ mod test {
|
||||||
// Check nothing else is retained
|
// Check nothing else is retained
|
||||||
for slot in 0..900 {
|
for slot in 0..900 {
|
||||||
assert!(get_price_feeds_with_update_data(
|
assert!(get_price_feeds_with_update_data(
|
||||||
&store,
|
&state,
|
||||||
vec![
|
vec![
|
||||||
PriceIdentifier::new([100; 32]),
|
PriceIdentifier::new([100; 32]),
|
||||||
PriceIdentifier::new([200; 32]),
|
PriceIdentifier::new([200; 32]),
|
|
@ -1,14 +1,14 @@
|
||||||
use {
|
use {
|
||||||
crate::store::{
|
crate::{
|
||||||
cache::{
|
aggregate::types::{
|
||||||
CacheStore,
|
|
||||||
MessageState,
|
|
||||||
},
|
|
||||||
types::{
|
|
||||||
AccumulatorMessages,
|
AccumulatorMessages,
|
||||||
RawMessage,
|
RawMessage,
|
||||||
Slot,
|
Slot,
|
||||||
},
|
},
|
||||||
|
state::cache::{
|
||||||
|
CacheStore,
|
||||||
|
MessageState,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
anyhow::{
|
anyhow::{
|
||||||
anyhow,
|
anyhow,
|
|
@ -1,5 +1,5 @@
|
||||||
use {
|
use {
|
||||||
super::Store,
|
super::State,
|
||||||
anyhow::{
|
anyhow::{
|
||||||
anyhow,
|
anyhow,
|
||||||
Result,
|
Result,
|
||||||
|
@ -74,12 +74,12 @@ pub struct GuardianSetData {
|
||||||
|
|
||||||
/// Verifies a VAA to ensure it is signed by the Wormhole guardian set.
|
/// Verifies a VAA to ensure it is signed by the Wormhole guardian set.
|
||||||
pub async fn verify_vaa<'a>(
|
pub async fn verify_vaa<'a>(
|
||||||
store: &Store,
|
state: &State,
|
||||||
vaa: Vaa<&'a RawMessage>,
|
vaa: Vaa<&'a RawMessage>,
|
||||||
) -> Result<Vaa<&'a RawMessage>> {
|
) -> Result<Vaa<&'a RawMessage>> {
|
||||||
let (header, body): (Header, Body<&RawMessage>) = vaa.into();
|
let (header, body): (Header, Body<&RawMessage>) = vaa.into();
|
||||||
let digest = body.digest()?;
|
let digest = body.digest()?;
|
||||||
let guardian_set = store.guardian_set.read().await;
|
let guardian_set = state.guardian_set.read().await;
|
||||||
let guardian_set = guardian_set
|
let guardian_set = guardian_set
|
||||||
.get(&header.guardian_set_index)
|
.get(&header.guardian_set_index)
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| {
|
|
@ -2,7 +2,7 @@ use {
|
||||||
self::ws::notify_updates,
|
self::ws::notify_updates,
|
||||||
crate::{
|
crate::{
|
||||||
config::RunOptions,
|
config::RunOptions,
|
||||||
store::Store,
|
state::State,
|
||||||
},
|
},
|
||||||
anyhow::Result,
|
anyhow::Result,
|
||||||
axum::{
|
axum::{
|
||||||
|
@ -29,15 +29,15 @@ mod types;
|
||||||
mod ws;
|
mod ws;
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct State {
|
pub struct ApiState {
|
||||||
pub store: Arc<Store>,
|
pub state: Arc<State>,
|
||||||
pub ws: Arc<ws::WsState>,
|
pub ws: Arc<ws::WsState>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl State {
|
impl ApiState {
|
||||||
pub fn new(store: Arc<Store>) -> Self {
|
pub fn new(state: Arc<State>) -> Self {
|
||||||
Self {
|
Self {
|
||||||
store,
|
state,
|
||||||
ws: Arc::new(ws::WsState::new()),
|
ws: Arc::new(ws::WsState::new()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -47,8 +47,8 @@ impl State {
|
||||||
///
|
///
|
||||||
/// Currently this is based on Axum due to the simplicity and strong ecosystem support for the
|
/// Currently this is based on Axum due to the simplicity and strong ecosystem support for the
|
||||||
/// packages they are based on (tokio & hyper).
|
/// packages they are based on (tokio & hyper).
|
||||||
#[tracing::instrument(skip(opts, store, update_rx))]
|
#[tracing::instrument(skip(opts, state, update_rx))]
|
||||||
pub async fn run(opts: RunOptions, store: Arc<Store>, mut update_rx: Receiver<()>) -> Result<()> {
|
pub async fn run(opts: RunOptions, state: Arc<State>, mut update_rx: Receiver<()>) -> Result<()> {
|
||||||
tracing::info!(endpoint = %opts.api_addr, "Starting RPC Server.");
|
tracing::info!(endpoint = %opts.api_addr, "Starting RPC Server.");
|
||||||
|
|
||||||
#[derive(OpenApi)]
|
#[derive(OpenApi)]
|
||||||
|
@ -79,7 +79,7 @@ pub async fn run(opts: RunOptions, store: Arc<Store>, mut update_rx: Receiver<()
|
||||||
)]
|
)]
|
||||||
struct ApiDoc;
|
struct ApiDoc;
|
||||||
|
|
||||||
let state = State::new(store);
|
let state = ApiState::new(state);
|
||||||
|
|
||||||
// Initialize Axum Router. Note the type here is a `Router<State>` due to the use of the
|
// Initialize Axum Router. Note the type here is a `Router<State>` due to the use of the
|
||||||
// `with_state` method which replaces `Body` with `State` in the type signature.
|
// `with_state` method which replaces `Body` with `State` in the type signature.
|
||||||
|
|
|
@ -1,5 +1,9 @@
|
||||||
use {
|
use {
|
||||||
crate::{
|
crate::{
|
||||||
|
aggregate::types::{
|
||||||
|
RequestTime,
|
||||||
|
UnixTimestamp,
|
||||||
|
},
|
||||||
api::{
|
api::{
|
||||||
rest::RestError,
|
rest::RestError,
|
||||||
types::{
|
types::{
|
||||||
|
@ -8,10 +12,6 @@ use {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
doc_examples,
|
doc_examples,
|
||||||
store::types::{
|
|
||||||
RequestTime,
|
|
||||||
UnixTimestamp,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
anyhow::Result,
|
anyhow::Result,
|
||||||
axum::{
|
axum::{
|
||||||
|
@ -60,13 +60,13 @@ pub struct GetPriceFeedQueryParams {
|
||||||
)
|
)
|
||||||
)]
|
)]
|
||||||
pub async fn get_price_feed(
|
pub async fn get_price_feed(
|
||||||
State(state): State<crate::api::State>,
|
State(state): State<crate::api::ApiState>,
|
||||||
QsQuery(params): QsQuery<GetPriceFeedQueryParams>,
|
QsQuery(params): QsQuery<GetPriceFeedQueryParams>,
|
||||||
) -> Result<Json<RpcPriceFeed>, RestError> {
|
) -> Result<Json<RpcPriceFeed>, RestError> {
|
||||||
let price_id: PriceIdentifier = params.id.into();
|
let price_id: PriceIdentifier = params.id.into();
|
||||||
|
|
||||||
let price_feeds_with_update_data = crate::store::get_price_feeds_with_update_data(
|
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
|
||||||
&*state.store,
|
&*state.state,
|
||||||
vec![price_id],
|
vec![price_id],
|
||||||
RequestTime::FirstAfter(params.publish_time),
|
RequestTime::FirstAfter(params.publish_time),
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,17 +1,17 @@
|
||||||
use {
|
use {
|
||||||
crate::{
|
crate::{
|
||||||
api::{
|
aggregate::{
|
||||||
rest::RestError,
|
|
||||||
types::PriceIdInput,
|
|
||||||
},
|
|
||||||
doc_examples,
|
|
||||||
store::{
|
|
||||||
self,
|
self,
|
||||||
types::{
|
types::{
|
||||||
RequestTime,
|
RequestTime,
|
||||||
UnixTimestamp,
|
UnixTimestamp,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
api::{
|
||||||
|
rest::RestError,
|
||||||
|
types::PriceIdInput,
|
||||||
|
},
|
||||||
|
doc_examples,
|
||||||
},
|
},
|
||||||
anyhow::Result,
|
anyhow::Result,
|
||||||
axum::{
|
axum::{
|
||||||
|
@ -70,13 +70,13 @@ pub struct GetVaaResponse {
|
||||||
)
|
)
|
||||||
)]
|
)]
|
||||||
pub async fn get_vaa(
|
pub async fn get_vaa(
|
||||||
State(state): State<crate::api::State>,
|
State(state): State<crate::api::ApiState>,
|
||||||
QsQuery(params): QsQuery<GetVaaQueryParams>,
|
QsQuery(params): QsQuery<GetVaaQueryParams>,
|
||||||
) -> Result<Json<GetVaaResponse>, RestError> {
|
) -> Result<Json<GetVaaResponse>, RestError> {
|
||||||
let price_id: PriceIdentifier = params.id.into();
|
let price_id: PriceIdentifier = params.id.into();
|
||||||
|
|
||||||
let price_feeds_with_update_data = store::get_price_feeds_with_update_data(
|
let price_feeds_with_update_data = aggregate::get_price_feeds_with_update_data(
|
||||||
&*state.store,
|
&*state.state,
|
||||||
vec![price_id],
|
vec![price_id],
|
||||||
RequestTime::FirstAfter(params.publish_time),
|
RequestTime::FirstAfter(params.publish_time),
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
use {
|
use {
|
||||||
crate::{
|
crate::{
|
||||||
api::rest::RestError,
|
aggregate::types::{
|
||||||
impl_deserialize_for_hex_string_wrapper,
|
|
||||||
store::types::{
|
|
||||||
RequestTime,
|
RequestTime,
|
||||||
UnixTimestamp,
|
UnixTimestamp,
|
||||||
},
|
},
|
||||||
|
api::rest::RestError,
|
||||||
|
impl_deserialize_for_hex_string_wrapper,
|
||||||
},
|
},
|
||||||
anyhow::Result,
|
anyhow::Result,
|
||||||
axum::{
|
axum::{
|
||||||
|
@ -54,7 +54,7 @@ pub struct GetVaaCcipResponse {
|
||||||
)
|
)
|
||||||
)]
|
)]
|
||||||
pub async fn get_vaa_ccip(
|
pub async fn get_vaa_ccip(
|
||||||
State(state): State<crate::api::State>,
|
State(state): State<crate::api::ApiState>,
|
||||||
QsQuery(params): QsQuery<GetVaaCcipQueryParams>,
|
QsQuery(params): QsQuery<GetVaaCcipQueryParams>,
|
||||||
) -> Result<Json<GetVaaCcipResponse>, RestError> {
|
) -> Result<Json<GetVaaCcipResponse>, RestError> {
|
||||||
let price_id: PriceIdentifier = PriceIdentifier::new(
|
let price_id: PriceIdentifier = PriceIdentifier::new(
|
||||||
|
@ -68,8 +68,8 @@ pub async fn get_vaa_ccip(
|
||||||
.map_err(|_| RestError::InvalidCCIPInput)?,
|
.map_err(|_| RestError::InvalidCCIPInput)?,
|
||||||
);
|
);
|
||||||
|
|
||||||
let price_feeds_with_update_data = crate::store::get_price_feeds_with_update_data(
|
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
|
||||||
&*state.store,
|
&*state.state,
|
||||||
vec![price_id],
|
vec![price_id],
|
||||||
RequestTime::FirstAfter(publish_time),
|
RequestTime::FirstAfter(publish_time),
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
use {
|
use {
|
||||||
crate::{
|
crate::{
|
||||||
|
aggregate::types::RequestTime,
|
||||||
api::{
|
api::{
|
||||||
rest::RestError,
|
rest::RestError,
|
||||||
types::{
|
types::{
|
||||||
|
@ -7,7 +8,6 @@ use {
|
||||||
RpcPriceFeed,
|
RpcPriceFeed,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
store::types::RequestTime,
|
|
||||||
},
|
},
|
||||||
anyhow::Result,
|
anyhow::Result,
|
||||||
axum::{
|
axum::{
|
||||||
|
@ -59,12 +59,12 @@ pub struct LatestPriceFeedsQueryParams {
|
||||||
)
|
)
|
||||||
)]
|
)]
|
||||||
pub async fn latest_price_feeds(
|
pub async fn latest_price_feeds(
|
||||||
State(state): State<crate::api::State>,
|
State(state): State<crate::api::ApiState>,
|
||||||
QsQuery(params): QsQuery<LatestPriceFeedsQueryParams>,
|
QsQuery(params): QsQuery<LatestPriceFeedsQueryParams>,
|
||||||
) -> Result<Json<Vec<RpcPriceFeed>>, RestError> {
|
) -> Result<Json<Vec<RpcPriceFeed>>, RestError> {
|
||||||
let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(|id| id.into()).collect();
|
let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(|id| id.into()).collect();
|
||||||
let price_feeds_with_update_data = crate::store::get_price_feeds_with_update_data(
|
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
|
||||||
&*state.store,
|
&*state.state,
|
||||||
price_ids,
|
price_ids,
|
||||||
RequestTime::Latest,
|
RequestTime::Latest,
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
use {
|
use {
|
||||||
crate::{
|
crate::{
|
||||||
|
aggregate::types::RequestTime,
|
||||||
api::{
|
api::{
|
||||||
rest::RestError,
|
rest::RestError,
|
||||||
types::PriceIdInput,
|
types::PriceIdInput,
|
||||||
},
|
},
|
||||||
doc_examples,
|
doc_examples,
|
||||||
store::types::RequestTime,
|
|
||||||
},
|
},
|
||||||
anyhow::Result,
|
anyhow::Result,
|
||||||
axum::{
|
axum::{
|
||||||
|
@ -54,12 +54,12 @@ pub struct LatestVaasQueryParams {
|
||||||
),
|
),
|
||||||
)]
|
)]
|
||||||
pub async fn latest_vaas(
|
pub async fn latest_vaas(
|
||||||
State(state): State<crate::api::State>,
|
State(state): State<crate::api::ApiState>,
|
||||||
QsQuery(params): QsQuery<LatestVaasQueryParams>,
|
QsQuery(params): QsQuery<LatestVaasQueryParams>,
|
||||||
) -> Result<Json<Vec<String>>, RestError> {
|
) -> Result<Json<Vec<String>>, RestError> {
|
||||||
let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(|id| id.into()).collect();
|
let price_ids: Vec<PriceIdentifier> = params.ids.into_iter().map(|id| id.into()).collect();
|
||||||
let price_feeds_with_update_data = crate::store::get_price_feeds_with_update_data(
|
let price_feeds_with_update_data = crate::aggregate::get_price_feeds_with_update_data(
|
||||||
&*state.store,
|
&*state.state,
|
||||||
price_ids,
|
price_ids,
|
||||||
RequestTime::Latest,
|
RequestTime::Latest,
|
||||||
)
|
)
|
||||||
|
|
|
@ -22,9 +22,9 @@ use {
|
||||||
),
|
),
|
||||||
)]
|
)]
|
||||||
pub async fn price_feed_ids(
|
pub async fn price_feed_ids(
|
||||||
State(state): State<crate::api::State>,
|
State(state): State<crate::api::ApiState>,
|
||||||
) -> Result<Json<Vec<RpcPriceIdentifier>>, RestError> {
|
) -> Result<Json<Vec<RpcPriceIdentifier>>, RestError> {
|
||||||
let price_feed_ids = crate::store::get_price_feed_ids(&*state.store)
|
let price_feed_ids = crate::aggregate::get_price_feed_ids(&*state.state)
|
||||||
.await
|
.await
|
||||||
.iter()
|
.iter()
|
||||||
.map(RpcPriceIdentifier::from)
|
.map(RpcPriceIdentifier::from)
|
||||||
|
|
|
@ -7,8 +7,8 @@ use axum::{
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
pub async fn ready(State(state): State<crate::api::State>) -> Response {
|
pub async fn ready(State(state): State<crate::api::ApiState>) -> Response {
|
||||||
match crate::store::is_ready(&state.store).await {
|
match crate::aggregate::is_ready(&state.state).await {
|
||||||
true => (StatusCode::OK, "OK").into_response(),
|
true => (StatusCode::OK, "OK").into_response(),
|
||||||
false => (StatusCode::SERVICE_UNAVAILABLE, "Service Unavailable").into_response(),
|
false => (StatusCode::SERVICE_UNAVAILABLE, "Service Unavailable").into_response(),
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
use {
|
use {
|
||||||
crate::{
|
crate::{
|
||||||
doc_examples,
|
aggregate::types::{
|
||||||
impl_deserialize_for_hex_string_wrapper,
|
|
||||||
store::types::{
|
|
||||||
PriceFeedUpdate,
|
PriceFeedUpdate,
|
||||||
Slot,
|
Slot,
|
||||||
UnixTimestamp,
|
UnixTimestamp,
|
||||||
},
|
},
|
||||||
|
doc_examples,
|
||||||
|
impl_deserialize_for_hex_string_wrapper,
|
||||||
},
|
},
|
||||||
base64::{
|
base64::{
|
||||||
engine::general_purpose::STANDARD as base64_standard_engine,
|
engine::general_purpose::STANDARD as base64_standard_engine,
|
||||||
|
|
|
@ -3,9 +3,9 @@ use {
|
||||||
PriceIdInput,
|
PriceIdInput,
|
||||||
RpcPriceFeed,
|
RpcPriceFeed,
|
||||||
},
|
},
|
||||||
crate::store::{
|
crate::{
|
||||||
types::RequestTime,
|
aggregate::types::RequestTime,
|
||||||
Store,
|
state::State,
|
||||||
},
|
},
|
||||||
anyhow::{
|
anyhow::{
|
||||||
anyhow,
|
anyhow,
|
||||||
|
@ -18,7 +18,7 @@ use {
|
||||||
WebSocket,
|
WebSocket,
|
||||||
WebSocketUpgrade,
|
WebSocketUpgrade,
|
||||||
},
|
},
|
||||||
State,
|
State as AxumState,
|
||||||
},
|
},
|
||||||
response::IntoResponse,
|
response::IntoResponse,
|
||||||
},
|
},
|
||||||
|
@ -56,13 +56,13 @@ pub const NOTIFICATIONS_CHAN_LEN: usize = 1000;
|
||||||
|
|
||||||
pub async fn ws_route_handler(
|
pub async fn ws_route_handler(
|
||||||
ws: WebSocketUpgrade,
|
ws: WebSocketUpgrade,
|
||||||
State(state): State<super::State>,
|
AxumState(state): AxumState<super::ApiState>,
|
||||||
) -> impl IntoResponse {
|
) -> impl IntoResponse {
|
||||||
ws.on_upgrade(|socket| websocket_handler(socket, state))
|
ws.on_upgrade(|socket| websocket_handler(socket, state))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(stream, state))]
|
#[tracing::instrument(skip(stream, state))]
|
||||||
async fn websocket_handler(stream: WebSocket, state: super::State) {
|
async fn websocket_handler(stream: WebSocket, state: super::ApiState) {
|
||||||
let ws_state = state.ws.clone();
|
let ws_state = state.ws.clone();
|
||||||
let id = ws_state.subscriber_counter.fetch_add(1, Ordering::SeqCst);
|
let id = ws_state.subscriber_counter.fetch_add(1, Ordering::SeqCst);
|
||||||
tracing::debug!(id, "New Websocket Connection");
|
tracing::debug!(id, "New Websocket Connection");
|
||||||
|
@ -70,7 +70,7 @@ async fn websocket_handler(stream: WebSocket, state: super::State) {
|
||||||
let (notify_sender, notify_receiver) = mpsc::channel::<()>(NOTIFICATIONS_CHAN_LEN);
|
let (notify_sender, notify_receiver) = mpsc::channel::<()>(NOTIFICATIONS_CHAN_LEN);
|
||||||
let (sender, receiver) = stream.split();
|
let (sender, receiver) = stream.split();
|
||||||
let mut subscriber =
|
let mut subscriber =
|
||||||
Subscriber::new(id, state.store.clone(), notify_receiver, receiver, sender);
|
Subscriber::new(id, state.state.clone(), notify_receiver, receiver, sender);
|
||||||
|
|
||||||
ws_state.subscribers.insert(id, notify_sender);
|
ws_state.subscribers.insert(id, notify_sender);
|
||||||
subscriber.run().await;
|
subscriber.run().await;
|
||||||
|
@ -83,7 +83,7 @@ pub type SubscriberId = usize;
|
||||||
pub struct Subscriber {
|
pub struct Subscriber {
|
||||||
id: SubscriberId,
|
id: SubscriberId,
|
||||||
closed: bool,
|
closed: bool,
|
||||||
store: Arc<Store>,
|
store: Arc<State>,
|
||||||
notify_receiver: mpsc::Receiver<()>,
|
notify_receiver: mpsc::Receiver<()>,
|
||||||
receiver: SplitStream<WebSocket>,
|
receiver: SplitStream<WebSocket>,
|
||||||
sender: SplitSink<WebSocket, Message>,
|
sender: SplitSink<WebSocket, Message>,
|
||||||
|
@ -95,7 +95,7 @@ pub struct Subscriber {
|
||||||
impl Subscriber {
|
impl Subscriber {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
id: SubscriberId,
|
id: SubscriberId,
|
||||||
store: Arc<Store>,
|
store: Arc<State>,
|
||||||
notify_receiver: mpsc::Receiver<()>,
|
notify_receiver: mpsc::Receiver<()>,
|
||||||
receiver: SplitStream<WebSocket>,
|
receiver: SplitStream<WebSocket>,
|
||||||
sender: SplitSink<WebSocket, Message>,
|
sender: SplitSink<WebSocket, Message>,
|
||||||
|
@ -149,7 +149,7 @@ impl Subscriber {
|
||||||
|
|
||||||
async fn handle_price_feeds_update(&mut self) -> Result<()> {
|
async fn handle_price_feeds_update(&mut self) -> Result<()> {
|
||||||
let price_feed_ids = self.price_feeds_with_config.keys().cloned().collect();
|
let price_feed_ids = self.price_feeds_with_config.keys().cloned().collect();
|
||||||
for update in crate::store::get_price_feeds_with_update_data(
|
for update in crate::aggregate::get_price_feeds_with_update_data(
|
||||||
&*self.store,
|
&*self.store,
|
||||||
price_feed_ids,
|
price_feed_ids,
|
||||||
RequestTime::Latest,
|
RequestTime::Latest,
|
||||||
|
@ -233,7 +233,7 @@ impl Subscriber {
|
||||||
binary,
|
binary,
|
||||||
}) => {
|
}) => {
|
||||||
let price_ids: Vec<PriceIdentifier> = ids.into_iter().map(|id| id.into()).collect();
|
let price_ids: Vec<PriceIdentifier> = ids.into_iter().map(|id| id.into()).collect();
|
||||||
let available_price_ids = crate::store::get_price_feed_ids(&*self.store).await;
|
let available_price_ids = crate::aggregate::get_price_feed_ids(&*self.store).await;
|
||||||
|
|
||||||
let not_found_price_ids: Vec<&PriceIdentifier> = price_ids
|
let not_found_price_ids: Vec<&PriceIdentifier> = price_ids
|
||||||
.iter()
|
.iter()
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::store::types::UnixTimestamp;
|
use crate::aggregate::types::UnixTimestamp;
|
||||||
|
|
||||||
// Example values for the utoipa API docs.
|
// Example values for the utoipa API docs.
|
||||||
// Note that each of these expressions is only evaluated once when the documentation is created,
|
// Note that each of these expressions is only evaluated once when the documentation is created,
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
#![feature(btree_cursors)]
|
#![feature(btree_cursors)]
|
||||||
|
|
||||||
use {
|
use {
|
||||||
crate::store::Store,
|
crate::state::State,
|
||||||
anyhow::Result,
|
anyhow::Result,
|
||||||
futures::future::join_all,
|
futures::future::join_all,
|
||||||
std::{
|
std::{
|
||||||
|
@ -13,12 +13,13 @@ use {
|
||||||
tokio::spawn,
|
tokio::spawn,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
mod aggregate;
|
||||||
mod api;
|
mod api;
|
||||||
mod config;
|
mod config;
|
||||||
mod doc_examples;
|
mod doc_examples;
|
||||||
mod macros;
|
mod macros;
|
||||||
mod network;
|
mod network;
|
||||||
mod store;
|
mod state;
|
||||||
|
|
||||||
// A static exit flag to indicate to running threads that we're shutting down. This is used to
|
// A static exit flag to indicate to running threads that we're shutting down. This is used to
|
||||||
// gracefully shutdown the application.
|
// gracefully shutdown the application.
|
||||||
|
@ -43,7 +44,7 @@ async fn init() -> Result<()> {
|
||||||
let (update_tx, update_rx) = tokio::sync::mpsc::channel(1000);
|
let (update_tx, update_rx) = tokio::sync::mpsc::channel(1000);
|
||||||
|
|
||||||
// Initialize a cache store with a 1000 element circular buffer.
|
// Initialize a cache store with a 1000 element circular buffer.
|
||||||
let store = Store::new(update_tx.clone(), 1000, opts.benchmarks_endpoint.clone());
|
let store = State::new(update_tx.clone(), 1000, opts.benchmarks_endpoint.clone());
|
||||||
|
|
||||||
// Listen for Ctrl+C so we can set the exit flag and wait for a graceful shutdown. We
|
// Listen for Ctrl+C so we can set the exit flag and wait for a graceful shutdown. We
|
||||||
// also send off any notifications needed to close off any waiting tasks.
|
// also send off any notifications needed to close off any waiting tasks.
|
||||||
|
|
|
@ -11,11 +11,9 @@
|
||||||
|
|
||||||
use {
|
use {
|
||||||
crate::{
|
crate::{
|
||||||
|
aggregate::types::Update,
|
||||||
config::RunOptions,
|
config::RunOptions,
|
||||||
store::{
|
state::State,
|
||||||
types::Update,
|
|
||||||
Store,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
anyhow::Result,
|
anyhow::Result,
|
||||||
libp2p::Multiaddr,
|
libp2p::Multiaddr,
|
||||||
|
@ -176,7 +174,7 @@ pub fn bootstrap(
|
||||||
|
|
||||||
// Spawn's the P2P layer as a separate thread via Go.
|
// Spawn's the P2P layer as a separate thread via Go.
|
||||||
#[tracing::instrument(skip(opts, store))]
|
#[tracing::instrument(skip(opts, store))]
|
||||||
pub async fn spawn(opts: RunOptions, store: Arc<Store>) -> Result<()> {
|
pub async fn spawn(opts: RunOptions, store: Arc<State>) -> Result<()> {
|
||||||
tracing::info!(listeners = ?opts.wh_listen_addrs, "Starting P2P Server");
|
tracing::info!(listeners = ?opts.wh_listen_addrs, "Starting P2P Server");
|
||||||
|
|
||||||
std::thread::spawn(|| {
|
std::thread::spawn(|| {
|
||||||
|
@ -213,7 +211,7 @@ pub async fn spawn(opts: RunOptions, store: Arc<Store>) -> Result<()> {
|
||||||
|
|
||||||
let store = store.clone();
|
let store = store.clone();
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
if let Err(e) = crate::store::store_update(&store, Update::Vaa(vaa)).await {
|
if let Err(e) = crate::aggregate::store_update(&store, Update::Vaa(vaa)).await {
|
||||||
tracing::error!(error = ?e, "Failed to process VAA.");
|
tracing::error!(error = ?e, "Failed to process VAA.");
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
|
@ -4,8 +4,7 @@
|
||||||
|
|
||||||
use {
|
use {
|
||||||
crate::{
|
crate::{
|
||||||
config::RunOptions,
|
aggregate::{
|
||||||
store::{
|
|
||||||
types::{
|
types::{
|
||||||
AccumulatorMessages,
|
AccumulatorMessages,
|
||||||
Update,
|
Update,
|
||||||
|
@ -15,8 +14,9 @@ use {
|
||||||
GuardianSet,
|
GuardianSet,
|
||||||
GuardianSetData,
|
GuardianSetData,
|
||||||
},
|
},
|
||||||
Store,
|
|
||||||
},
|
},
|
||||||
|
config::RunOptions,
|
||||||
|
state::State,
|
||||||
},
|
},
|
||||||
anyhow::{
|
anyhow::{
|
||||||
anyhow,
|
anyhow,
|
||||||
|
@ -128,7 +128,7 @@ async fn fetch_bridge_data(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn run(store: Arc<Store>, pythnet_ws_endpoint: String) -> Result<()> {
|
pub async fn run(store: Arc<State>, pythnet_ws_endpoint: String) -> Result<()> {
|
||||||
let client = PubsubClient::new(pythnet_ws_endpoint.as_ref()).await?;
|
let client = PubsubClient::new(pythnet_ws_endpoint.as_ref()).await?;
|
||||||
|
|
||||||
let config = RpcProgramAccountsConfig {
|
let config = RpcProgramAccountsConfig {
|
||||||
|
@ -175,7 +175,7 @@ pub async fn run(store: Arc<Store>, pythnet_ws_endpoint: String) -> Result<()> {
|
||||||
if candidate.to_string() == update.value.pubkey {
|
if candidate.to_string() == update.value.pubkey {
|
||||||
let store = store.clone();
|
let store = store.clone();
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
if let Err(err) = crate::store::store_update(
|
if let Err(err) = crate::aggregate::store_update(
|
||||||
&store,
|
&store,
|
||||||
Update::AccumulatorMessages(accumulator_messages),
|
Update::AccumulatorMessages(accumulator_messages),
|
||||||
)
|
)
|
||||||
|
@ -213,7 +213,7 @@ pub async fn run(store: Arc<Store>, pythnet_ws_endpoint: String) -> Result<()> {
|
||||||
/// sets from a deployed Wormhole contract. Note that we only fetch the last two accounts due to
|
/// sets from a deployed Wormhole contract. Note that we only fetch the last two accounts due to
|
||||||
/// the fact that during a Wormhole upgrade, there will only be messages produces from those two.
|
/// the fact that during a Wormhole upgrade, there will only be messages produces from those two.
|
||||||
async fn fetch_existing_guardian_sets(
|
async fn fetch_existing_guardian_sets(
|
||||||
store: Arc<Store>,
|
store: Arc<State>,
|
||||||
pythnet_http_endpoint: String,
|
pythnet_http_endpoint: String,
|
||||||
wormhole_contract_addr: Pubkey,
|
wormhole_contract_addr: Pubkey,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
|
@ -230,7 +230,7 @@ async fn fetch_existing_guardian_sets(
|
||||||
"Retrieved Current GuardianSet.",
|
"Retrieved Current GuardianSet.",
|
||||||
);
|
);
|
||||||
|
|
||||||
crate::store::update_guardian_set(&store, bridge.guardian_set_index, current).await;
|
crate::aggregate::update_guardian_set(&store, bridge.guardian_set_index, current).await;
|
||||||
|
|
||||||
// If there are more than one guardian set, we want to fetch the previous one as well as it
|
// If there are more than one guardian set, we want to fetch the previous one as well as it
|
||||||
// may still be in transition phase if a guardian upgrade has just occurred.
|
// may still be in transition phase if a guardian upgrade has just occurred.
|
||||||
|
@ -248,14 +248,15 @@ async fn fetch_existing_guardian_sets(
|
||||||
"Retrieved Previous GuardianSet.",
|
"Retrieved Previous GuardianSet.",
|
||||||
);
|
);
|
||||||
|
|
||||||
crate::store::update_guardian_set(&store, bridge.guardian_set_index - 1, previous).await;
|
crate::aggregate::update_guardian_set(&store, bridge.guardian_set_index - 1, previous)
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(opts, store))]
|
#[tracing::instrument(skip(opts, store))]
|
||||||
pub async fn spawn(opts: RunOptions, store: Arc<Store>) -> Result<()> {
|
pub async fn spawn(opts: RunOptions, store: Arc<State>) -> Result<()> {
|
||||||
tracing::info!(
|
tracing::info!(
|
||||||
endpoint = opts.pythnet_ws_endpoint,
|
endpoint = opts.pythnet_ws_endpoint,
|
||||||
"Started Pythnet Listener."
|
"Started Pythnet Listener."
|
||||||
|
|
|
@ -0,0 +1,66 @@
|
||||||
|
#[cfg(test)]
|
||||||
|
use mock_instant::{
|
||||||
|
Instant,
|
||||||
|
SystemTime,
|
||||||
|
UNIX_EPOCH,
|
||||||
|
};
|
||||||
|
#[cfg(not(test))]
|
||||||
|
use std::time::Instant;
|
||||||
|
use {
|
||||||
|
self::cache::Cache,
|
||||||
|
crate::aggregate::wormhole::GuardianSet,
|
||||||
|
reqwest::Url,
|
||||||
|
std::{
|
||||||
|
collections::{
|
||||||
|
BTreeMap,
|
||||||
|
BTreeSet,
|
||||||
|
},
|
||||||
|
sync::Arc,
|
||||||
|
},
|
||||||
|
tokio::sync::{
|
||||||
|
mpsc::Sender,
|
||||||
|
RwLock,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
pub mod benchmarks;
|
||||||
|
pub mod cache;
|
||||||
|
|
||||||
|
pub struct State {
|
||||||
|
/// Storage is a short-lived cache of the state of all the updates that have been passed to the
|
||||||
|
/// store.
|
||||||
|
pub cache: Cache,
|
||||||
|
|
||||||
|
/// Sequence numbers of lately observed Vaas. Store uses this set
|
||||||
|
/// to ignore the previously observed Vaas as a performance boost.
|
||||||
|
pub observed_vaa_seqs: RwLock<BTreeSet<u64>>,
|
||||||
|
|
||||||
|
/// Wormhole guardian sets. It is used to verify Vaas before using them.
|
||||||
|
pub guardian_set: RwLock<BTreeMap<u32, GuardianSet>>,
|
||||||
|
|
||||||
|
/// The sender to the channel between Store and Api to notify completed updates.
|
||||||
|
pub update_tx: Sender<()>,
|
||||||
|
|
||||||
|
/// Time of the last completed update. This is used for the health probes.
|
||||||
|
pub last_completed_update_at: RwLock<Option<Instant>>,
|
||||||
|
|
||||||
|
/// Benchmarks endpoint
|
||||||
|
pub benchmarks_endpoint: Option<Url>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl State {
|
||||||
|
pub fn new(
|
||||||
|
update_tx: Sender<()>,
|
||||||
|
cache_size: u64,
|
||||||
|
benchmarks_endpoint: Option<Url>,
|
||||||
|
) -> Arc<Self> {
|
||||||
|
Arc::new(Self {
|
||||||
|
cache: Cache::new(cache_size),
|
||||||
|
observed_vaa_seqs: RwLock::new(Default::default()),
|
||||||
|
guardian_set: RwLock::new(Default::default()),
|
||||||
|
update_tx,
|
||||||
|
last_completed_update_at: RwLock::new(None),
|
||||||
|
benchmarks_endpoint,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,7 +1,7 @@
|
||||||
//! This module communicates with Pyth Benchmarks, an API for historical price feeds and their updates.
|
//! This module communicates with Pyth Benchmarks, an API for historical price feeds and their updates.
|
||||||
|
|
||||||
use {
|
use {
|
||||||
super::types::{
|
crate::aggregate::types::{
|
||||||
PriceFeedUpdate,
|
PriceFeedUpdate,
|
||||||
PriceFeedsWithUpdateData,
|
PriceFeedsWithUpdateData,
|
||||||
UnixTimestamp,
|
UnixTimestamp,
|
||||||
|
@ -85,7 +85,7 @@ pub trait Benchmarks {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl Benchmarks for crate::store::Store {
|
impl Benchmarks for crate::state::State {
|
||||||
async fn get_verified_price_feeds(
|
async fn get_verified_price_feeds(
|
||||||
&self,
|
&self,
|
||||||
price_ids: Vec<PriceIdentifier>,
|
price_ids: Vec<PriceIdentifier>,
|
|
@ -1,5 +1,5 @@
|
||||||
use {
|
use {
|
||||||
super::{
|
crate::aggregate::{
|
||||||
proof::wormhole_merkle::WormholeMerkleState,
|
proof::wormhole_merkle::WormholeMerkleState,
|
||||||
types::{
|
types::{
|
||||||
AccumulatorMessages,
|
AccumulatorMessages,
|
||||||
|
@ -178,7 +178,7 @@ impl Cache {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl CacheStore for crate::store::Store {
|
impl CacheStore for crate::state::State {
|
||||||
async fn message_state_keys(&self) -> Vec<MessageStateKey> {
|
async fn message_state_keys(&self) -> Vec<MessageStateKey> {
|
||||||
self.cache
|
self.cache
|
||||||
.message_cache
|
.message_cache
|
||||||
|
@ -272,7 +272,7 @@ impl CacheStore for crate::store::Store {
|
||||||
mod test {
|
mod test {
|
||||||
use {
|
use {
|
||||||
super::*,
|
super::*,
|
||||||
crate::store::{
|
crate::aggregate::{
|
||||||
proof::wormhole_merkle::{
|
proof::wormhole_merkle::{
|
||||||
WormholeMerkleMessageProof,
|
WormholeMerkleMessageProof,
|
||||||
WormholeMerkleState,
|
WormholeMerkleState,
|
Loading…
Reference in New Issue