pythnet: replace pythnet_sdk with the right files (#803)

This commit is contained in:
Reisen 2023-05-05 21:04:49 +01:00 committed by GitHub
parent 71bab7a9fd
commit 1902dbaa30
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 715 additions and 931 deletions

View File

@ -1,30 +1,40 @@
[package]
name = "solana-pyth"
name = "pythnet-sdk"
version = "1.13.6"
description = "Pyth Runtime for Solana"
authors = ["Pyth Data Association"]
repository = "https://github.com/pyth-network/pythnet"
edition = "2021"
[lib]
crate-type = ["lib"]
name = "solana_pyth"
[dependencies]
borsh = "0.9.1"
bincode = "1.3.1"
borsh = "0.9.1"
bytemuck = { version = "1.11.0", features = ["derive"] }
fast-math = "0.1"
hex = { version = "0.4.3", features = ["serde"] }
serde = { version = "1.0.144", features = ["derive"] }
serde_wormhole = { git = "https://github.com/wormhole-foundation/wormhole" }
sha3 = "0.10.4"
slow_primes = "0.1.14"
wormhole-sdk = { git = "https://github.com/wormhole-foundation/wormhole" }
[dev-dependencies]
base64 = "0.21.0"
rand = "0.7.0"
[lib]
crate-type = ["lib"]
name = "solana_pyth"
serde_json = "1.0.96"
solana-client = { path = "../client" }
solana-sdk = { path = "../sdk" }
proptest = "1.1.0"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[build-dependencies]
rustc_version = "0.4"
[patch.crates-io]
serde_wormhole = { git = "https://github.com/wormhole-foundation/wormhole" }

View File

@ -0,0 +1,124 @@
// Use the Solana client library to pull the addresses of all relevant accounts from PythNet so we
// can test locally.
// #![feature(proc_macro_hygiene)]
use {
serde_json::json,
solana_client::rpc_client::RpcClient,
solana_pyth::PYTH_PID,
solana_sdk::pubkey::Pubkey,
std::str::FromStr,
std::io::Write,
};
fn main() {
let client = RpcClient::new("http://pythnet.rpcpool.com/".to_string());
let pythnet = Pubkey::from_str("FsJ3A3u2vn5cTVofAjvy6y5kwABJAqYWpe4975bi2epH").unwrap();
let wormhole = Pubkey::from_str("H3fxXJ86ADW2PNuDDmZJg6mzTtPxkYCpNuQUTgmJ7AjU").unwrap();
// Create a folder called `accounts` in the current directory, if it already exists that is OK
// but only if the folder is empty.
std::fs::create_dir_all("accounts").unwrap();
// Download all PythNet accounts into .json files in the current directory.
{
let pythnet_accounts = client.get_program_accounts(&pythnet).map_err(|e| {
println!("{e}");
e
});
pythnet_accounts
.unwrap()
.into_iter()
.for_each(|(pubkey, _account)| {
// This writes the account as JSON into a file that solana-test-validator can read into
// the ledger. Each account should be written into a file named `<pubkey>.json`
let account = client.get_account(&pubkey).unwrap();
// Now write to <pubkey>.json.
std::fs::write(
format!("accounts/{pubkey}.json"),
json!({
"pubkey": pubkey.to_string(),
"account": {
"lamports": account.lamports,
"data": [
base64::encode(&account.data),
"base64"
],
"owner": account.owner.to_string(),
"executable": account.executable,
"rentEpoch": account.rent_epoch,
}
})
.to_string(),
)
.unwrap();
});
}
// Download the Wormhole program only into a .json file in the current directory. Instead of
// getting the program accounts we just want the wormhole one itself.
{
let wormhole_account = client.get_account(&wormhole).unwrap();
// Now write to wormhole.json.
std::fs::write(
format!("accounts/{wormhole}.json"),
json!({
"pubkey": wormhole.to_string(),
"account": {
"lamports": wormhole_account.lamports,
"data": [
base64::encode(&wormhole_account.data),
"base64"
],
"owner": wormhole_account.owner.to_string(),
"executable": wormhole_account.executable,
"rentEpoch": wormhole_account.rent_epoch,
}
})
.to_string(),
)
.unwrap();
}
// Same for the Pyth program.
{
let pyth_account = client.get_account(&pythnet).unwrap();
// Now write to pyth.json.
std::fs::write(
format!("accounts/{pythnet}.json"),
json!({
"pubkey": pythnet.to_string(),
"account": {
"lamports": pyth_account.lamports,
"data": [
base64::encode(&pyth_account.data),
"base64"
],
"owner": pyth_account.owner.to_string(),
"executable": pyth_account.executable,
"rentEpoch": pyth_account.rent_epoch,
}
})
.to_string(),
)
.unwrap();
}
// Write names of AccumulatorState accounts to pdas.txt
{
let mut file = std::fs::File::create("pdas.txt").unwrap();
for i in (0..10_000u32) {
let (accumulator_account, _) = Pubkey::find_program_address(
&[b"AccumulatorState", &PYTH_PID, &i.to_be_bytes()],
&solana_sdk::system_program::id(),
);
file.write_all(format!("{}\n", accumulator_account).as_bytes())
.unwrap();
}
}
}

View File

@ -1,9 +1,31 @@
pub mod merkle;
mod mul;
//! Accumulators
//!
//! This module defines the Accumulator abstraction as well as the implementation details for
//! several different accumulators. This library can be used for interacting with PythNet state
//! proofs for account content.
pub trait Accumulator<'a>: Sized {
type Proof: 'a;
fn from_set(items: impl Iterator<Item = &'a &'a [u8]>) -> Option<Self>;
pub mod merkle;
pub mod mul;
/// The Accumulator trait defines the interface for an accumulator.
///
/// This trait assumes an accumulator has an associated proof type that can be used to prove
/// membership of a specific item. The choice to return Proof makes this the most generic
/// implementation possible for any accumulator.
pub trait Accumulator<'a>
where
Self: Sized,
Self::Proof: 'a,
Self::Proof: Sized,
{
type Proof;
/// Prove an item is a member of the accumulator.
fn prove(&'a self, item: &[u8]) -> Option<Self::Proof>;
fn verify(&'a self, proof: Self::Proof, item: &[u8]) -> bool;
/// Verify an item is a member of the accumulator.
fn check(&'a self, proof: Self::Proof, item: &[u8]) -> bool;
/// Create an accumulator from a set of items.
fn from_set(items: impl Iterator<Item = &'a [u8]>) -> Option<Self>;
}

View File

@ -1,13 +1,12 @@
// TODO: Go back to a reference based implementation ala Solana's original.
//! A MerkleTree based Accumulator.
use {
crate::{
accumulators::Accumulator,
hashers::{
keccak256::Keccak256Hasher,
keccak256::Keccak256,
Hasher,
},
PriceId,
},
borsh::{
BorshDeserialize,
@ -17,240 +16,165 @@ use {
Deserialize,
Serialize,
},
std::collections::HashSet,
};
// We need to discern between leaf and intermediate nodes to prevent trivial second
// pre-image attacks.
// https://flawed.net.nz/2018/02/21/attacking-merkle-trees-with-a-second-preimage-attack
// We need to discern between leaf and intermediate nodes to prevent trivial second pre-image
// attacks. If we did not do this it would be possible for an attacker to intentionally create
// non-leaf nodes that have the same hash as a leaf node, and then use that to prove the existence
// of a leaf node that does not exist.
//
// See:
//
// - https://flawed.net.nz/2018/02/21/attacking-merkle-trees-with-a-second-preimage-attack
// - https://en.wikipedia.org/wiki/Merkle_tree#Second_preimage_attack
//
// NOTE: We use a NULL prefix for leaf nodes to distinguish them from the empty message (""), while
// there is no path that allows empty messages this is a safety measure to prevent future
// vulnerabilities being introduced.
const LEAF_PREFIX: &[u8] = &[0];
const INTERMEDIATE_PREFIX: &[u8] = &[1];
const NODE_PREFIX: &[u8] = &[1];
const NULL_PREFIX: &[u8] = &[2];
macro_rules! hash_leaf {
{$x:ty, $d:ident} => {
<$x as Hasher>::hashv(&[LEAF_PREFIX, $d])
fn hash_leaf<H: Hasher>(leaf: &[u8]) -> H::Hash {
H::hashv(&[LEAF_PREFIX, leaf])
}
fn hash_node<H: Hasher>(l: &H::Hash, r: &H::Hash) -> H::Hash {
H::hashv(&[
NODE_PREFIX,
(if l <= r { l } else { r }).as_ref(),
(if l <= r { r } else { l }).as_ref(),
])
}
fn hash_null<H: Hasher>() -> H::Hash {
H::hashv(&[NULL_PREFIX])
}
#[derive(Clone, Default, Debug, PartialEq, Eq, Serialize)]
pub struct MerklePath<H: Hasher>(Vec<H::Hash>);
impl<H: Hasher> MerklePath<H> {
pub fn new(path: Vec<H::Hash>) -> Self {
Self(path)
}
}
macro_rules! hash_intermediate {
{$x:ty, $l:ident, $r:ident} => {
<$x as Hasher>::hashv(&[INTERMEDIATE_PREFIX, $l.as_ref(), $r.as_ref()])
}
}
/// An implementation of a Sha3/Keccak256 based Merkle Tree based on the implementation provided by
/// solana-merkle-tree. This modifies the structure slightly to be serialization friendly, and to
/// make verification cheaper on EVM based networks.
/// A MerkleAccumulator maintains a Merkle Tree.
///
/// The implementation is based on Solana's Merkle Tree implementation. This structure also stores
/// the items that are in the tree due to the need to look-up the index of an item in the tree in
/// order to create a proof.
#[derive(
Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, Serialize, Deserialize, Default,
)]
pub struct MerkleTree<H: Hasher = Keccak256Hasher> {
pub leaf_count: usize,
pub nodes: Vec<H::Hash>,
pub struct MerkleAccumulator<H: Hasher = Keccak256> {
pub root: H::Hash,
#[serde(skip)]
pub nodes: Vec<H::Hash>,
}
pub struct MerkleAccumulator<'a, H: Hasher = Keccak256Hasher> {
pub accumulator: MerkleTree<H>,
/// A list of the original items inserted into the tree.
///
/// The full list is kept because proofs require the index of each item in the tree, by
/// keeping the nodes we can look up the position in the original list for proof
/// verification.
pub items: Vec<&'a [u8]>,
// Layout:
//
// ```
// 4 bytes: magic number
// 1 byte: update type
// 4 byte: storage id
// 32 bytes: root hash
// ```
//
// TODO: This code does not belong to MerkleAccumulator, we should be using the wire data types in
// calling code to wrap this value.
impl<'a, H: Hasher + 'a> MerkleAccumulator<H> {
pub fn serialize(&self, storage: u32) -> Vec<u8> {
let mut serialized = vec![];
serialized.extend_from_slice(0x41555756u32.to_be_bytes().as_ref());
serialized.extend_from_slice(0u8.to_be_bytes().as_ref());
serialized.extend_from_slice(storage.to_be_bytes().as_ref());
serialized.extend_from_slice(self.root.as_ref());
serialized
}
}
impl<'a, H: Hasher + 'a> Accumulator<'a> for MerkleAccumulator<'a, H> {
impl<'a, H: Hasher + 'a> Accumulator<'a> for MerkleAccumulator<H> {
type Proof = MerklePath<H>;
fn from_set(items: impl Iterator<Item = &'a &'a [u8]>) -> Option<Self> {
let items: Vec<&[u8]> = items.copied().collect();
let tree = MerkleTree::new(&items);
Some(Self {
accumulator: tree,
items,
})
fn from_set(items: impl Iterator<Item = &'a [u8]>) -> Option<Self> {
let items: Vec<&[u8]> = items.collect();
Self::new(&items)
}
fn prove(&'a self, item: &[u8]) -> Option<Self::Proof> {
let index = self.items.iter().position(|i| i == &item)?;
self.accumulator.find_path(index)
let item = hash_leaf::<H>(item);
let index = self.nodes.iter().position(|i| i == &item)?;
Some(self.find_path(index))
}
fn verify(&'a self, proof: Self::Proof, item: &[u8]) -> bool {
let item = hash_leaf!(H, item);
proof.validate(item)
// NOTE: This `check` call is intended to be generic accross accumulator implementations, but
// for a merkle tree the proof does not use the `self` parameter as the proof is standalone
// and doesn't need the original nodes. Normally a merkle API would be something like:
//
// ```
// MerkleTree::check(proof)
// ```
//
// or even:
//
// ```
// proof.verify()
// ```
//
// But to stick to the Accumulator trait we do it via the trait method.
fn check(&'a self, proof: Self::Proof, item: &[u8]) -> bool {
let mut current = hash_leaf::<H>(item);
for hash in proof.0 {
current = hash_node::<H>(&current, &hash);
}
current == self.root
}
}
impl<H: Hasher> MerkleTree<H> {
#[inline]
fn next_level_len(level_len: usize) -> usize {
if level_len == 1 {
0
} else {
(level_len + 1) / 2
}
}
fn calculate_vec_capacity(leaf_count: usize) -> usize {
// the most nodes consuming case is when n-1 is full balanced binary tree
// then n will cause the previous tree add a left only path to the root
// this cause the total nodes number increased by tree height, we use this
// condition as the max nodes consuming case.
// n is current leaf nodes number
// assuming n-1 is a full balanced binary tree, n-1 tree nodes number will be
// 2(n-1) - 1, n tree height is closed to log2(n) + 1
// so the max nodes number is 2(n-1) - 1 + log2(n) + 1, finally we can use
// 2n + log2(n+1) as a safe capacity value.
// test results:
// 8192 leaf nodes(full balanced):
// computed cap is 16398, actually using is 16383
// 8193 leaf nodes:(full balanced plus 1 leaf):
// computed cap is 16400, actually using is 16398
// about performance: current used fast_math log2 code is constant algo time
if leaf_count > 0 {
fast_math::log2_raw(leaf_count as f32) as usize + 2 * leaf_count + 1
} else {
0
}
}
pub fn new<T: AsRef<[u8]>>(items: &[T]) -> Self {
let cap = MerkleTree::<H>::calculate_vec_capacity(items.len());
let mut mt = MerkleTree {
leaf_count: items.len(),
nodes: Vec::with_capacity(cap),
};
for item in items {
let item = item.as_ref();
let hash = hash_leaf!(H, item);
mt.nodes.push(hash);
}
let mut level_len = MerkleTree::<H>::next_level_len(items.len());
let mut level_start = items.len();
let mut prev_level_len = items.len();
let mut prev_level_start = 0;
while level_len > 0 {
for i in 0..level_len {
let prev_level_idx = 2 * i;
let lsib: &H::Hash = &mt.nodes[prev_level_start + prev_level_idx];
let rsib: &H::Hash = if prev_level_idx + 1 < prev_level_len {
&mt.nodes[prev_level_start + prev_level_idx + 1]
} else {
// Duplicate last entry if the level length is odd
&mt.nodes[prev_level_start + prev_level_idx]
};
let hash = hash_intermediate!(H, lsib, rsib);
mt.nodes.push(hash);
}
prev_level_start = level_start;
prev_level_len = level_len;
level_start += level_len;
level_len = MerkleTree::<H>::next_level_len(level_len);
}
mt
}
pub fn get_root(&self) -> Option<&H::Hash> {
self.nodes.iter().last()
}
pub fn find_path(&self, index: usize) -> Option<MerklePath<H>> {
if index >= self.leaf_count {
impl<H: Hasher> MerkleAccumulator<H> {
pub fn new(items: &[&[u8]]) -> Option<Self> {
if items.is_empty() {
return None;
}
let mut level_len = self.leaf_count;
let mut level_start = 0;
let mut path = MerklePath::<H>::default();
let mut node_index = index;
let mut lsib = None;
let mut rsib = None;
while level_len > 0 {
let level = &self.nodes[level_start..(level_start + level_len)];
let depth = items.len().next_power_of_two().trailing_zeros();
let mut tree: Vec<H::Hash> = vec![Default::default(); 1 << (depth + 1)];
let target = level[node_index];
if lsib.is_some() || rsib.is_some() {
path.push(MerkleNode::new(target, lsib, rsib));
}
if node_index % 2 == 0 {
lsib = None;
rsib = if node_index + 1 < level.len() {
Some(level[node_index + 1])
} else {
Some(level[node_index])
};
// Filling the leaf hashes
for i in 0..(1 << depth) {
if i < items.len() {
tree[(1 << depth) + i] = hash_leaf::<H>(items[i].as_ref());
} else {
lsib = Some(level[node_index - 1]);
rsib = None;
tree[(1 << depth) + i] = hash_null::<H>();
}
node_index /= 2;
level_start += level_len;
level_len = MerkleTree::<H>::next_level_len(level_len);
}
Some(path)
}
}
#[derive(Clone, Default, Debug, PartialEq, Eq, Serialize)]
pub struct MerklePath<H: Hasher>(Vec<MerkleNode<H>>);
impl<H: Hasher> MerklePath<H> {
pub fn push(&mut self, entry: MerkleNode<H>) {
self.0.push(entry)
}
pub fn validate(&self, candidate: H::Hash) -> bool {
let result = self.0.iter().try_fold(candidate, |candidate, pe| {
let lsib = &pe.1.unwrap_or(candidate);
let rsib = &pe.2.unwrap_or(candidate);
let hash = hash_intermediate!(H, lsib, rsib);
if hash == pe.0 {
Some(hash)
} else {
None
// Filling the node hashes from bottom to top
for k in (1..=depth).rev() {
let level = k - 1;
let level_num_nodes = 1 << level;
for i in 0..level_num_nodes {
let id = (1 << level) + i;
tree[id] = hash_node::<H>(&tree[id * 2], &tree[id * 2 + 1]);
}
});
matches!(result, Some(_))
}
Some(Self {
root: tree[1],
nodes: tree,
})
}
}
#[derive(Clone, Default, Debug, PartialEq, Eq, Serialize)]
pub struct MerkleNode<H: Hasher>(H::Hash, Option<H::Hash>, Option<H::Hash>);
impl<'a, H: Hasher> MerkleNode<H> {
pub fn new(
target: H::Hash,
left_sibling: Option<H::Hash>,
right_sibling: Option<H::Hash>,
) -> Self {
assert!(left_sibling.is_none() ^ right_sibling.is_none());
Self(target, left_sibling, right_sibling)
}
}
//TODO: update this to correct value/type later
//
/** using `sdk/program/src/slot_hashes.rs` as a reference **/
//TODO: newtype or type alias?
// also double check alignment in conjunction with `AccumulatorPrice`
// #[repr(transparent)
#[derive(Serialize, PartialEq, Eq, Default)]
pub struct PriceProofs<H: Hasher>(Vec<(PriceId, MerklePath<H>)>);
impl<H: Hasher> PriceProofs<H> {
pub fn new(price_proofs: &[(PriceId, MerklePath<H>)]) -> Self {
let mut price_proofs = price_proofs.to_vec();
price_proofs.sort_by(|(a, _), (b, _)| a.cmp(b));
Self(price_proofs)
fn find_path(&self, mut index: usize) -> MerklePath<H> {
let mut path = Vec::new();
while index > 1 {
path.push(self.nodes[index ^ 1].clone());
index /= 2;
}
MerklePath::new(path)
}
}
@ -258,7 +182,11 @@ impl<H: Hasher> PriceProofs<H> {
mod test {
use {
super::*,
std::mem::size_of,
proptest::prelude::*,
std::{
collections::BTreeSet,
mem::size_of,
},
};
#[derive(Default, Clone, Debug, borsh::BorshSerialize)]
@ -288,9 +216,56 @@ mod test {
}
}
#[derive(Debug)]
struct MerkleAccumulatorDataWrapper {
pub accumulator: MerkleAccumulator,
pub data: BTreeSet<Vec<u8>>,
}
impl Arbitrary for MerkleAccumulatorDataWrapper {
type Parameters = usize;
fn arbitrary_with(size: Self::Parameters) -> Self::Strategy {
let size = size.saturating_add(1);
prop::collection::vec(
prop::collection::vec(any::<u8>(), 1..=10),
size..=size.saturating_add(100),
)
.prop_map(|v| {
let data: BTreeSet<Vec<u8>> = v.into_iter().collect();
let accumulator =
MerkleAccumulator::<Keccak256>::from_set(data.iter().map(|i| i.as_ref()))
.unwrap();
MerkleAccumulatorDataWrapper { accumulator, data }
})
.boxed()
}
type Strategy = BoxedStrategy<Self>;
}
impl Arbitrary for MerklePath<Keccak256> {
type Parameters = usize;
fn arbitrary_with(size: Self::Parameters) -> Self::Strategy {
let size = size.saturating_add(1);
prop::collection::vec(
prop::collection::vec(any::<u8>(), 32),
size..=size.saturating_add(100),
)
.prop_map(|v| {
let v = v.into_iter().map(|i| i.try_into().unwrap()).collect();
MerklePath(v)
})
.boxed()
}
type Strategy = BoxedStrategy<Self>;
}
#[test]
fn test_merkle() {
let mut set: HashSet<&[u8]> = HashSet::new();
let mut set: BTreeSet<&[u8]> = BTreeSet::new();
// Create some random elements (converted to bytes). All accumulators store arbitrary bytes so
// that we can target any account (or subset of accounts).
@ -314,35 +289,168 @@ mod test {
set.insert(&item_b);
set.insert(&item_c);
let accumulator = MerkleAccumulator::<'_, Keccak256Hasher>::from_set(set.iter()).unwrap();
let accumulator = MerkleAccumulator::<Keccak256>::from_set(set.into_iter()).unwrap();
let proof = accumulator.prove(&item_a).unwrap();
// println!("Proof: {:02X?}", proof);
assert!(accumulator.verify(proof, &item_a));
let proof = accumulator.prove(&item_a).unwrap();
println!(
"proof: {:#?}",
proof.0.iter().map(|x| format!("{x:?}")).collect::<Vec<_>>()
);
println!(
"accumulator root: {:?}",
accumulator.accumulator.get_root().unwrap()
);
println!(
r"
Sizes:
MerkleAccumulator::Proof {:?}
Keccak256Hasher::Hash {:?}
MerkleNode {:?}
MerklePath {:?}
",
size_of::<<MerkleAccumulator<'_> as Accumulator>::Proof>(),
size_of::<<Keccak256Hasher as Hasher>::Hash>(),
size_of::<MerkleNode<Keccak256Hasher>>(),
size_of::<MerklePath<Keccak256Hasher>>()
);
assert!(!accumulator.verify(proof, &item_d));
assert!(accumulator.check(proof, &item_a));
let proof = accumulator.prove(&item_a).unwrap();
assert_eq!(size_of::<<Keccak256 as Hasher>::Hash>(), 32);
assert!(!accumulator.check(proof, &item_d));
}
//TODO: more tests
#[test]
// Note that this is testing proofs for trees size 2 and greater, as a size 1 tree the root is
// its own proof and will always pass. This just checks the most obvious case that an empty or
// default proof should obviously not work, see the proptest for a more thorough check.
fn test_merkle_default_proof_fails() {
let mut set: BTreeSet<&[u8]> = BTreeSet::new();
// Insert the bytes into the Accumulate type.
let item_a = 88usize.to_be_bytes();
let item_b = 99usize.to_be_bytes();
set.insert(&item_a);
set.insert(&item_b);
// Attempt to prove empty proofs that are not in the accumulator.
let accumulator = MerkleAccumulator::<Keccak256>::from_set(set.into_iter()).unwrap();
let proof = MerklePath::<Keccak256>::default();
assert!(!accumulator.check(proof, &item_a));
let proof = MerklePath::<Keccak256>(vec![Default::default()]);
assert!(!accumulator.check(proof, &item_a));
}
#[test]
fn test_corrupted_tree_proofs() {
let mut set: BTreeSet<&[u8]> = BTreeSet::new();
// Insert the bytes into the Accumulate type.
let item_a = 88usize.to_be_bytes();
let item_b = 99usize.to_be_bytes();
let item_c = 100usize.to_be_bytes();
let item_d = 101usize.to_be_bytes();
set.insert(&item_a);
set.insert(&item_b);
set.insert(&item_c);
set.insert(&item_d);
// Accumulate
let accumulator = MerkleAccumulator::<Keccak256>::from_set(set.into_iter()).unwrap();
// For each hash in the resulting proofs, corrupt one hash and confirm that the proof
// cannot pass check.
for item in [item_a, item_b, item_c, item_d].iter() {
let proof = accumulator.prove(item).unwrap();
for (i, _) in proof.0.iter().enumerate() {
let mut corrupted_proof = proof.clone();
corrupted_proof.0[i] = Default::default();
assert!(!accumulator.check(corrupted_proof, item));
}
}
}
#[test]
#[should_panic]
// Generates a tree with four leaves, then uses the first leaf of the right subtree as the
// sibling hash, this detects if second preimage attacks are possible.
fn test_merkle_second_preimage_attack() {
let mut set: BTreeSet<&[u8]> = BTreeSet::new();
// Insert the bytes into the Accumulate type.
let item_a = 81usize.to_be_bytes();
let item_b = 99usize.to_be_bytes();
let item_c = 100usize.to_be_bytes();
let item_d = 101usize.to_be_bytes();
set.insert(&item_a);
set.insert(&item_b);
set.insert(&item_c);
set.insert(&item_d);
// Accumulate into a 2 level tree.
let accumulator = MerkleAccumulator::<Keccak256>::from_set(set.into_iter()).unwrap();
let proof = accumulator.prove(&item_a).unwrap();
assert!(accumulator.check(proof.clone(), &item_a));
// We now have a 2 level tree with 4 nodes:
//
// root
// / \
// / \
// A B
// / \ / \
// a b c d
//
// Laid out as: [0, root, A, B, a, b, c, d]
//
// In order to test preimage resistance we will attack the tree by dropping its leaf nodes
// from the bottom level, this produces a new tree with 2 nodes:
//
// root
// / \
// / \
// A B
//
// Laid out as: [0, root, A, B]
//
// Here rather than A/B being hashes of leaf nodes, they themselves ARE the leaves, if the
// implementation did not use a different hash for nodes and leaves then it is possible to
// falsely prove `A` was in the original tree by tricking the implementation into performing
// H(a || b) at the leaf.
let faulty_accumulator = MerkleAccumulator::<Keccak256> {
root: accumulator.root,
nodes: vec![
accumulator.nodes[0].clone(),
accumulator.nodes[1].clone(), // Root Stays the Same
accumulator.nodes[2].clone(), // Left node hash becomes a leaf.
accumulator.nodes[3].clone(), // Right node hash becomes a leaf.
],
};
// `a || b` is the concatenation of a and b, which when hashed without pre-image fixes in
// place generates A as a leaf rather than a pair node.
let fake_leaf_A = &[
hash_leaf::<Keccak256>(&item_b),
hash_leaf::<Keccak256>(&item_a),
]
.concat();
// Confirm our combined hash existed as a node pair in the original tree.
assert_eq!(hash_leaf::<Keccak256>(&fake_leaf_A), accumulator.nodes[2]);
// Now we can try and prove leaf membership in the faulty accumulator. NOTE: this should
// fail but to confirm that the test is actually correct you can remove the PREFIXES from
// the hash functions and this test will erroneously pass.
let proof = faulty_accumulator.prove(&fake_leaf_A).unwrap();
assert!(faulty_accumulator.check(proof, &fake_leaf_A));
}
proptest! {
// Use proptest to generate arbitrary Merkle trees as part of our fuzzing strategy. This
// will help us identify any edge cases or unexpected behavior in the implementation.
#[test]
fn test_merkle_tree(v in any::<MerkleAccumulatorDataWrapper>()) {
for d in v.data {
let proof = v.accumulator.prove(&d).unwrap();
assert!(v.accumulator.check(proof, &d));
}
}
// Use proptest to generate arbitrary proofs for Merkle Trees trying to find a proof that
// passes which should not.
#[test]
fn test_fake_merkle_proofs(
v in any::<MerkleAccumulatorDataWrapper>(),
p in any::<MerklePath<Keccak256>>(),
) {
// Reject 1-sized trees as they will always pass due to root being the only elements
// own proof (I.E proof is [])
if v.data.len() == 1 {
return Ok(());
}
for d in v.data {
assert!(!v.accumulator.check(p.clone(), &d));
}
}
}
}

View File

@ -1,3 +1,5 @@
//! A multiplication based Accumulator (should not use, example only)
use crate::{
accumulators::Accumulator,
hashers::{
@ -6,6 +8,13 @@ use crate::{
},
};
/// A multiplication based Accumulator
///
/// This accumulator relies on the quasi-commutative nature of the multiplication operator. It's
/// here mostly as a an example to gain intuition for how accumulators should function. This
/// implementation relies on the fact that `/` can be used to "remove" an element but typically an
/// accumulator cannot rely on having a shortcut, and must re-accumulate sans the element being
/// proved to be a member.
pub struct MulAccumulator<H: Hasher> {
pub accumulator: H::Hash,
pub items: Vec<H::Hash>,
@ -20,13 +29,13 @@ impl<'a> Accumulator<'a> for MulAccumulator<PrimeHasher> {
Some((acc / bytes).to_be_bytes())
}
fn verify(&self, proof: Self::Proof, item: &[u8]) -> bool {
fn check(&self, proof: Self::Proof, item: &[u8]) -> bool {
let bytes = u128::from_be_bytes(PrimeHasher::hashv(&[item]));
let proof = u128::from_be_bytes(proof);
proof * bytes == u128::from_be_bytes(self.accumulator)
}
fn from_set(items: impl Iterator<Item = &'a &'a [u8]>) -> Option<Self> {
fn from_set(items: impl Iterator<Item = &'a [u8]>) -> Option<Self> {
let primes: Vec<[u8; 16]> = items.map(|i| PrimeHasher::hashv(&[i])).collect();
Some(Self {
items: primes.clone(),
@ -48,8 +57,8 @@ mod test {
fn test_membership() {
let mut set: HashSet<&[u8]> = HashSet::new();
// Create some random elements (converted to bytes). All accumulators store arbitrary bytes so
// that we can target any account (or subset of accounts).
// Create some random elements (converted to bytes). All accumulators store arbitrary bytes
// so that we can target any account (or subset of accounts).
let item_a = 33usize.to_be_bytes();
let item_b = 54usize.to_be_bytes();
let item_c = 2usize.to_be_bytes();
@ -64,16 +73,10 @@ mod test {
// Create an Accumulator. Test Membership.
{
let accumulator = MulAccumulator::<PrimeHasher>::from_set(set.iter()).unwrap();
let accumulator = MulAccumulator::<PrimeHasher>::from_set(set.into_iter()).unwrap();
let proof = accumulator.prove(&item_a).unwrap();
// println!("Mul:");
// println!("Proof: {:?}", accumulator.verify(proof, &item_a));
// println!("Proof: {:?}", accumulator.verify(proof, &item_d));
assert!(accumulator.verify(proof, &item_a));
assert!(!accumulator.verify(proof, &item_d));
assert!(accumulator.check(proof, &item_a));
assert!(!accumulator.check(proof, &item_d));
}
}
//TODO: more tests
// MulAccumulator::<Keccack256Hasher>
}

View File

@ -1,32 +1,37 @@
use std::fmt::Debug;
use {
serde::{
Deserialize,
Serialize,
},
std::fmt::Debug,
};
pub mod keccak256;
pub mod keccak256_160;
pub mod prime;
/// Hasher is a trait used to provide a hashing algorithm for the library.
pub trait Hasher: Clone + Default + Debug + serde::Serialize {
/// This type is used as a hash type in the library.
/// It is recommended to use fixed size u8 array as a hash type. For example,
/// for sha256 the type would be `[u8; 32]`, representing 32 bytes,
/// which is the size of the sha256 digest. Also, fixed sized arrays of `u8`
/// by default satisfy all trait bounds required by this type.
///
/// # Trait bounds
/// `Copy` is required as the hash needs to be copied to be concatenated/propagated
/// when constructing nodes.
/// `PartialEq` is required to compare equality when verifying proof
/// `Into<Vec<u8>>` is required to be able to serialize proof
/// `TryFrom<Vec<u8>>` is required to parse hashes from a serialized proof
/// `Default` is required to be able to create a default hash
// TODO: use Digest trait from digest crate?
/// We provide `Hasher` as a small hashing abstraction.
///
/// This trait allows us to use a more abstract idea of hashing than the `Digest` trait from the
/// `digest` create provides. In particular, if we want to use none cryptographic hashes or hashes
/// that fit the mathematical definition of a hash, we can do this with this far more general
/// abstraction.
pub trait Hasher
where
Self: Clone,
Self: Debug,
Self: Default,
Self: Serialize,
{
type Hash: Copy
+ PartialEq
+ AsRef<[u8]>
+ Debug
+ Default
+ Eq
+ Default
+ Debug
+ AsRef<[u8]>
+ PartialOrd
+ PartialEq
+ serde::Serialize
+ for<'a> serde::de::Deserialize<'a>;
fn hashv<T: AsRef<[u8]>>(data: &[T]) -> Self::Hash;
+ for<'a> Deserialize<'a>;
fn hashv(data: &[impl AsRef<[u8]>]) -> Self::Hash;
}

View File

@ -1,20 +1,40 @@
use crate::hashers::Hasher;
use {
crate::hashers::Hasher,
serde::Serialize,
sha3::{
Digest,
Keccak256 as Keccak256Digest,
},
};
#[derive(Clone, Default, Debug, serde::Serialize)]
pub struct Keccak256Hasher {}
#[derive(Clone, Default, Debug, Eq, PartialEq, Serialize)]
pub struct Keccak256 {}
impl Hasher for Keccak256Hasher {
impl Hasher for Keccak256 {
type Hash = [u8; 32];
fn hashv<T: AsRef<[u8]>>(data: &[T]) -> [u8; 32] {
use sha3::{
Digest,
Keccak256,
};
let mut hasher = Keccak256::new();
for d in data {
hasher.update(d);
}
fn hashv(data: &[impl AsRef<[u8]>]) -> [u8; 32] {
let mut hasher = Keccak256Digest::new();
data.iter().for_each(|d| hasher.update(d));
hasher.finalize().into()
}
}
#[cfg(test)]
mod tests {
use {
super::*,
crate::hashers::Hasher,
};
#[test]
fn test_keccak256() {
let data = b"helloworld";
let hash_a = Keccak256::hashv(&[data]);
let data = [b"hello", b"world"];
let hash_b = Keccak256::hashv(&data);
assert_eq!(hash_a, hash_b);
}
}

View File

@ -0,0 +1,24 @@
use {
crate::hashers::Hasher,
serde::Serialize,
sha3::{
Digest,
Keccak256,
},
};
#[derive(Clone, Default, Debug, Eq, PartialEq, Serialize)]
pub struct Keccak160 {}
impl Hasher for Keccak160 {
type Hash = [u8; 20];
fn hashv(data: &[impl AsRef<[u8]>]) -> [u8; 20] {
let mut hasher = Keccak256::new();
data.iter().for_each(|d| hasher.update(d));
let bytes: [u8; 32] = hasher.finalize().into();
let mut hash = [0u8; 20];
hash.copy_from_slice(&bytes[0..20]);
hash
}
}

View File

@ -1,18 +1,19 @@
use {
crate::hashers::Hasher,
serde::Serialize,
sha3::Digest,
slow_primes::is_prime_miller_rabin,
};
#[derive(Clone, Default, Debug, serde::Serialize)]
#[derive(Clone, Default, Debug, Eq, PartialEq, Serialize)]
pub struct PrimeHasher {}
impl Hasher for PrimeHasher {
// u128 in big endian bytes
type Hash = [u8; 16];
fn hashv<T: AsRef<[u8]>>(data: &[T]) -> [u8; 16] {
// Scan for prime's generated by hashing the bytes starting from 0. We use a number like
fn hashv(data: &[impl AsRef<[u8]>]) -> [u8; 16] {
// Scan for primes generated by hashing the bytes starting from 0. We use a number like
// this so once the prime is found we can directly compute the hash instead of scanning
// the range again.
let mut search = 0usize;
@ -29,7 +30,7 @@ impl Hasher for PrimeHasher {
hasher.update(search.to_be_bytes());
let hash_bytes: [u8; 32] = hasher.finalize().into();
// Take only a u32 from the end, return if it's prime.
// Take only a u32 from the end, return if its prime.
let prime = u32::from_be_bytes(hash_bytes[28..].try_into().unwrap()) | 1;
if is_prime_miller_rabin(prime as u64) {
return (prime as u128).to_be_bytes();

View File

@ -1,343 +1,35 @@
//! A type to hold data for the [`Accumulator` sysvar][sv].
//!
//! TODO: replace this with an actual link if needed
//! [sv]: https://docs.pythnetwork.org/developing/runtime-facilities/sysvars#accumulator
//!
//! The sysvar ID is declared in [`sysvar::accumulator`].
//!
//! [`sysvar::accumulator`]: crate::sysvar::accumulator
use {
borsh::{
BorshDeserialize,
BorshSerialize,
},
hex::FromHexError,
pyth::{
PayloadId,
P2W_FORMAT_HDR_SIZE,
P2W_FORMAT_VER_MAJOR,
P2W_FORMAT_VER_MINOR,
PACC2W_MAGIC,
},
serde::{
Deserialize,
Serialize,
Serializer,
},
std::{
fmt,
io::{
Read,
Write,
},
mem,
},
};
pub mod accumulators;
pub mod hashers;
pub mod pyth;
pub mod payload;
pub mod wormhole;
pub(crate) type RawPubkey = [u8; 32];
pub(crate) type Hash = [u8; 32];
pub(crate) type PriceId = RawPubkey;
pub(crate) type Pubkey = [u8; 32];
pub(crate) type PriceId = Pubkey;
// TODO:
// 1. decide what will be pulled out into a "pythnet" crate and what needs to remain in here
// a. be careful of cyclic dependencies
// b. git submodules?
/// Pubkey::find_program_address(&[b"emitter"], &sysvar::accumulator::id());
/// pubkey!("G9LV2mp9ua1znRAfYwZz5cPiJMAbo1T6mbjdQsDZuMJg");
pub const ACCUMULATOR_EMITTER_ADDR: Pubkey = [
225, 1, 250, 237, 172, 88, 81, 227, 43, 155, 35, 181, 249, 65, 26, 140, 43, 172, 74, 174, 62,
212, 221, 123, 129, 29, 209, 167, 46, 164, 170, 113,
];
/*** Dummy Field(s) for now just to test updating the sysvar ***/
pub type Slot = u64;
/// Pubkey::find_program_address(&[b"Sequence", &emitter_pda_key.to_bytes()], &WORMHOLE_PID);
/// pubkey!("HiqU8jiyUoFbRjf4YFAKRFWq5NZykEYC6mWhXXnoszJR");
pub const ACCUMULATOR_SEQUENCE_ADDR: Pubkey = [
248, 114, 155, 82, 154, 159, 139, 78, 187, 144, 5, 110, 22, 123, 227, 191, 18, 224, 118, 212,
39, 87, 137, 86, 88, 211, 220, 104, 229, 255, 139, 70,
];
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct AccumulatorAttestation<P: serde::Serialize> {
pub accumulator: P,
/// Official Pyth Program Address
/// pubkey!("FsJ3A3u2vn5cTVofAjvy6y5kwABJAqYWpe4975bi2epH");
pub const PYTH_PID: Pubkey = [
220, 229, 235, 225, 228, 156, 59, 159, 17, 76, 181, 84, 76, 80, 169, 158, 192, 214, 146, 214,
63, 86, 121, 90, 224, 41, 172, 131, 217, 234, 139, 226,
];
#[serde(serialize_with = "use_to_string")]
pub ring_buffer_idx: u64,
#[serde(serialize_with = "use_to_string")]
pub height: u64,
// TODO: Go back to UnixTimestamp.
pub timestamp: i64,
}
pub type ErrBox = Box<dyn std::error::Error>;
// from pyth-crosschain/wormhole_attester/sdk/rust/src/lib.rs
impl<P: serde::Serialize + for<'a> serde::Deserialize<'a>> AccumulatorAttestation<P> {
pub fn serialize(&self) -> Result<Vec<u8>, ErrBox> {
// magic
let mut buf = PACC2W_MAGIC.to_vec();
// major_version
buf.extend_from_slice(&P2W_FORMAT_VER_MAJOR.to_be_bytes()[..]);
// minor_version
buf.extend_from_slice(&P2W_FORMAT_VER_MINOR.to_be_bytes()[..]);
// hdr_size
buf.extend_from_slice(&P2W_FORMAT_HDR_SIZE.to_be_bytes()[..]);
// // payload_id
buf.push(PayloadId::AccumulationAttestation as u8);
// Header is over. NOTE: If you need to append to the header,
// make sure that the number of bytes after hdr_size is
// reflected in the P2W_FORMAT_HDR_SIZE constant.
let AccumulatorAttestation {
// accumulator_root: accumulator_root,
accumulator,
ring_buffer_idx,
height,
timestamp,
} = self;
//TODO: decide on pyth-accumulator-over-wormhole serialization format.
let mut serialized_acc = bincode::serialize(&accumulator).unwrap();
// TODO: always 32? is u16 enough?
buf.extend_from_slice(&(serialized_acc.len() as u16).to_be_bytes()[..]);
buf.append(&mut serialized_acc);
buf.extend_from_slice(&ring_buffer_idx.to_be_bytes()[..]);
buf.extend_from_slice(&height.to_be_bytes()[..]);
buf.extend_from_slice(&timestamp.to_be_bytes()[..]);
Ok(buf)
}
//TODO: update this for accumulator attest
pub fn deserialize(mut bytes: impl Read) -> Result<Self, ErrBox> {
let mut magic_vec = vec![0u8; PACC2W_MAGIC.len()];
bytes.read_exact(magic_vec.as_mut_slice())?;
if magic_vec.as_slice() != PACC2W_MAGIC {
return Err(
format!("Invalid magic {magic_vec:02X?}, expected {PACC2W_MAGIC:02X?}",).into(),
);
}
let mut major_version_vec = vec![0u8; mem::size_of_val(&P2W_FORMAT_VER_MAJOR)];
bytes.read_exact(major_version_vec.as_mut_slice())?;
let major_version = u16::from_be_bytes(major_version_vec.as_slice().try_into()?);
// Major must match exactly
if major_version != P2W_FORMAT_VER_MAJOR {
return Err(format!(
"Unsupported format major_version {major_version}, expected {P2W_FORMAT_VER_MAJOR}"
)
.into());
}
let mut minor_version_vec = vec![0u8; mem::size_of_val(&P2W_FORMAT_VER_MINOR)];
bytes.read_exact(minor_version_vec.as_mut_slice())?;
let minor_version = u16::from_be_bytes(minor_version_vec.as_slice().try_into()?);
// Only older minors are not okay for this codebase
if minor_version < P2W_FORMAT_VER_MINOR {
return Err(format!(
"Unsupported format minor_version {minor_version}, expected {P2W_FORMAT_VER_MINOR} or more"
)
.into());
}
// Read header size value
let mut hdr_size_vec = vec![0u8; mem::size_of_val(&P2W_FORMAT_HDR_SIZE)];
bytes.read_exact(hdr_size_vec.as_mut_slice())?;
let hdr_size = u16::from_be_bytes(hdr_size_vec.as_slice().try_into()?);
// Consume the declared number of remaining header
// bytes. Remaining header fields must be read from hdr_buf
let mut hdr_buf = vec![0u8; hdr_size as usize];
bytes.read_exact(hdr_buf.as_mut_slice())?;
let mut payload_id_vec = vec![0u8; mem::size_of::<PayloadId>()];
hdr_buf
.as_slice()
.read_exact(payload_id_vec.as_mut_slice())?;
if payload_id_vec[0] != PayloadId::AccumulationAttestation as u8 {
return Err(format!(
"Invalid Payload ID {}, expected {}",
payload_id_vec[0],
PayloadId::AccumulationAttestation as u8,
)
.into());
}
// Header consumed, continue with remaining fields
let mut accum_len_vec = vec![0u8; mem::size_of::<u16>()];
bytes.read_exact(accum_len_vec.as_mut_slice())?;
let accum_len = u16::from_be_bytes(accum_len_vec.as_slice().try_into()?);
// let accum_vec = Vec::with_capacity(accum_len_vec as usize);
let mut accum_vec = vec![0u8; accum_len as usize];
bytes.read_exact(accum_vec.as_mut_slice())?;
let accumulator = match bincode::deserialize(accum_vec.as_slice()) {
Ok(acc) => acc,
Err(e) => return Err(format!("AccumulatorDeserialization failed: {e}").into()),
};
let mut ring_buff_idx_vec = vec![0u8; mem::size_of::<u64>()];
bytes.read_exact(ring_buff_idx_vec.as_mut_slice())?;
let ring_buffer_idx = u64::from_be_bytes(ring_buff_idx_vec.as_slice().try_into()?);
let mut height_vec = vec![0u8; mem::size_of::<u64>()];
bytes.read_exact(height_vec.as_mut_slice())?;
let height = u64::from_be_bytes(height_vec.as_slice().try_into()?);
let mut timestamp_vec = vec![0u8; mem::size_of::<i64>()];
bytes.read_exact(timestamp_vec.as_mut_slice())?;
let timestamp = i64::from_be_bytes(timestamp_vec.as_slice().try_into()?);
Ok(Self {
accumulator,
ring_buffer_idx,
height,
timestamp,
})
}
}
pub fn use_to_string<T, S>(val: &T, s: S) -> Result<S::Ok, S::Error>
where
T: ToString,
S: Serializer,
{
s.serialize_str(&val.to_string())
}
pub fn pubkey_to_hex<S>(val: &Identifier, s: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
s.serialize_str(&hex::encode(val.to_bytes()))
}
#[derive(
Copy,
Clone,
Default,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
BorshSerialize,
BorshDeserialize,
serde::Serialize,
serde::Deserialize,
)]
#[repr(C)]
pub struct Identifier(#[serde(with = "hex")] [u8; 32]);
impl Identifier {
pub fn new(bytes: [u8; 32]) -> Identifier {
Identifier(bytes)
}
pub fn to_bytes(&self) -> [u8; 32] {
self.0
}
pub fn to_hex(&self) -> String {
hex::encode(self.0)
}
pub fn from_hex<T: AsRef<[u8]>>(s: T) -> Result<Identifier, FromHexError> {
let mut bytes = [0u8; 32];
hex::decode_to_slice(s, &mut bytes)?;
Ok(Identifier::new(bytes))
}
}
impl fmt::Debug for Identifier {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "0x{}", self.to_hex())
}
}
impl fmt::Display for Identifier {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "0x{}", self.to_hex())
}
}
impl AsRef<[u8]> for Identifier {
fn as_ref(&self) -> &[u8] {
&self.0[..]
}
}
#[cfg(test)]
mod tests {
use {
super::*,
crate::{
accumulators::{
merkle::MerkleAccumulator,
Accumulator,
},
hashers::keccak256::Keccak256Hasher,
pyth::*,
},
};
pub fn new_unique_pubkey() -> RawPubkey {
use rand::Rng;
rand::thread_rng().gen::<[u8; 32]>()
}
impl AccountHeader {
fn new(account_type: u32) -> Self {
Self {
account_type,
..AccountHeader::default()
}
}
}
fn generate_price_account(price: i64) -> (RawPubkey, PriceAccount) {
(
new_unique_pubkey(),
PriceAccount {
price_type: 0,
header: AccountHeader::new(PC_ACCTYPE_PRICE),
agg_: PriceInfo {
price_: price,
..PriceInfo::default()
},
..PriceAccount::default()
},
)
}
#[test]
fn test_pa_default() {
println!("testing pa");
let acct_header = AccountHeader::default();
println!("acct_header.acct_type: {}", acct_header.account_type);
let pa = PriceAccount::default();
println!("price_account.price_type: {}", pa.price_type);
}
#[test]
fn test_new_accumulator() {
let price_accts_and_keys = (0..2)
.map(|i| generate_price_account(i * 2))
.collect::<Vec<_>>();
let set = price_accts_and_keys
.iter()
.map(|(_, pa)| bytemuck::bytes_of(pa))
.collect::<Vec<_>>();
let accumulator = MerkleAccumulator::<'_, Keccak256Hasher>::from_set(set.iter()).unwrap();
println!("acc: {:#?}", accumulator.accumulator.get_root());
}
}
/// Official Wormhole Program Address
/// pubkey!("worm2ZoG2kUd4vFXhvjh93UUH596ayRfgQ2MgjNMTth");
pub const WORMHOLE_PID: Pubkey = [
224, 165, 137, 164, 26, 85, 251, 214, 108, 82, 164, 117, 242, 217, 42, 109, 61, 201, 180, 116,
113, 20, 203, 154, 248, 37, 169, 139, 84, 93, 60, 224,
];

View File

@ -0,0 +1,64 @@
//! Definition of the Accumulator Payload Formats.
//!
//! This module defines the data types that are injected into VAA's to be sent to other chains via
//! Wormhole. The wire format for these types must be backwards compatible and so all tyeps in this
//! module are expected to be append-only (for minor changes) and versioned for breaking changes.
use {
borsh::BorshSerialize,
serde::Serialize,
wormhole_sdk::Vaa,
};
// Transfer Format.
// --------------------------------------------------------------------------------
// This definition is what will be sent over the wire (I.E, pulled from PythNet and
// submitted to target chains).
#[derive(BorshSerialize, Serialize)]
pub struct AccumulatorProof<'a> {
magic: [u8; 4],
major_version: u8,
minor_version: u8,
trailing: &'a [u8],
proof: v1::Proof<'a>,
}
// Proof Format (V1)
// --------------------------------------------------------------------------------
// The definitions within each module can be updated with append-only data without
// requiring a new module to be defined. So for example, new accounts can be added
// to the end of `AccumulatorAccount` without moving to a `v1`.
pub mod v1 {
use super::*;
// A hash of some data.
pub type Hash = [u8; 32];
#[derive(Serialize)]
pub enum Proof<'a> {
WormholeMerkle {
proof: Vaa<VerifiedDigest>,
updates: &'a [MerkleProof<'a>],
},
}
#[derive(Serialize)]
pub struct VerifiedDigest {
magic: [u8; 4],
proof_type: u8,
len: u8,
storage_id: u64,
digest: Hash,
}
#[derive(Serialize)]
pub struct MerkleProof<'a> {
proof: &'a [Hash],
data: &'a [u8],
}
#[derive(Serialize)]
pub enum AccumulatorAccount {
Empty,
}
}

View File

@ -1,269 +0,0 @@
use {
crate::RawPubkey,
borsh::BorshSerialize,
};
use {
bytemuck::{
try_from_bytes,
Pod,
Zeroable,
},
// solana_merkle_tree::MerkleTree,
std::mem::size_of,
};
#[repr(C)]
#[derive(Copy, Clone, Zeroable, Pod, Default, BorshSerialize)]
pub struct AccountHeader {
pub magic_number: u32,
pub version: u32,
pub account_type: u32,
pub size: u32,
}
pub const PC_MAP_TABLE_SIZE: u32 = 640;
pub const PC_MAGIC: u32 = 2712847316;
pub const PC_VERSION: u32 = 2;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct MappingAccount {
pub header: AccountHeader,
pub number_of_products: u32,
pub unused_: u32,
pub next_mapping_account: RawPubkey,
pub products_list: [RawPubkey; PC_MAP_TABLE_SIZE as usize],
}
pub const PC_ACCTYPE_MAPPING: u32 = 1;
pub const PC_MAP_TABLE_T_PROD_OFFSET: size_t = 56;
impl PythAccount for MappingAccount {
const ACCOUNT_TYPE: u32 = PC_ACCTYPE_MAPPING;
/// Equal to the offset of `prod_` in `MappingAccount`, see the trait comment for more detail
const INITIAL_SIZE: u32 = PC_MAP_TABLE_T_PROD_OFFSET as u32;
}
// Unsafe impl because product_list is of size 640 and there's no derived trait for this size
unsafe impl Pod for MappingAccount {
}
unsafe impl Zeroable for MappingAccount {
}
#[repr(C)]
#[derive(Copy, Clone, Pod, Zeroable)]
pub struct ProductAccount {
pub header: AccountHeader,
pub first_price_account: RawPubkey,
}
pub const PC_ACCTYPE_PRODUCT: u32 = 2;
pub const PC_PROD_ACC_SIZE: u32 = 512;
impl PythAccount for ProductAccount {
const ACCOUNT_TYPE: u32 = PC_ACCTYPE_PRODUCT;
const INITIAL_SIZE: u32 = size_of::<ProductAccount>() as u32;
const MINIMUM_SIZE: usize = PC_PROD_ACC_SIZE as usize;
}
#[repr(C)]
#[cfg_attr(not(test), derive(Copy, Clone, Pod, Zeroable))]
#[cfg_attr(test, derive(Copy, Clone, Pod, Zeroable, Default))]
pub struct PriceAccount {
pub header: AccountHeader,
/// Type of the price account
pub price_type: u32,
/// Exponent for the published prices
pub exponent: i32,
/// Current number of authorized publishers
pub num_: u32,
/// Number of valid quotes for the last aggregation
pub num_qt_: u32,
/// Last slot with a succesful aggregation (status : TRADING)
pub last_slot_: u64,
/// Second to last slot where aggregation was attempted
pub valid_slot_: u64,
/// Ema for price
pub twap_: PriceEma,
/// Ema for confidence
pub twac_: PriceEma,
/// Last time aggregation was attempted
pub timestamp_: i64,
/// Minimum valid publisher quotes for a succesful aggregation
pub min_pub_: u8,
pub unused_1_: i8,
pub unused_2_: i16,
pub unused_3_: i32,
/// Corresponding product account
pub product_account: RawPubkey,
/// Next price account in the list
pub next_price_account: RawPubkey,
/// Second to last slot where aggregation was succesful (i.e. status : TRADING)
pub prev_slot_: u64,
/// Aggregate price at prev_slot_
pub prev_price_: i64,
/// Confidence interval at prev_slot_
pub prev_conf_: u64,
/// Timestamp of prev_slot_
pub prev_timestamp_: i64,
/// Last attempted aggregate results
pub agg_: PriceInfo,
/// Publishers' price components
pub comp_: [PriceComponent; PC_COMP_SIZE as usize],
}
pub const PC_COMP_SIZE: u32 = 32;
#[repr(C)]
// #[derive(Copy, Clone, Pod, Zeroable)]
#[cfg_attr(not(test), derive(Copy, Clone, Pod, Zeroable))]
#[cfg_attr(test, derive(Copy, Clone, Pod, Zeroable, Default))]
pub struct PriceComponent {
pub pub_: RawPubkey,
pub agg_: PriceInfo,
pub latest_: PriceInfo,
}
#[repr(C)]
// #[derive(Debug, Copy, Clone, Pod, Zeroable)]
#[cfg_attr(not(test), derive(Copy, Clone, Pod, Zeroable))]
#[cfg_attr(test, derive(Copy, Clone, Pod, Zeroable, Default))]
pub struct PriceInfo {
pub price_: i64,
pub conf_: u64,
pub status_: u32,
pub corp_act_status_: u32,
pub pub_slot_: u64,
}
#[repr(C)]
// #[derive(Debug, Copy, Clone, Pod, Zeroable)]
#[cfg_attr(not(test), derive(Copy, Clone, Pod, Zeroable))]
#[cfg_attr(test, derive(Copy, Clone, Pod, Zeroable, Default))]
pub struct PriceEma {
pub val_: i64,
pub numer_: i64,
pub denom_: i64,
}
pub const PC_ACCTYPE_PRICE: u32 = 3;
pub type size_t = ::std::os::raw::c_ulong;
pub const PC_PRICE_T_COMP_OFFSET: size_t = 240;
impl PythAccount for PriceAccount {
const ACCOUNT_TYPE: u32 = PC_ACCTYPE_PRICE;
/// Equal to the offset of `comp_` in `PriceAccount`, see the trait comment for more detail
const INITIAL_SIZE: u32 = PC_PRICE_T_COMP_OFFSET as u32;
}
/// The PythAccount trait's purpose is to attach constants to the 3 types of accounts that Pyth has
/// (mapping, price, product). This allows less duplicated code, because now we can create generic
/// functions to perform common checks on the accounts and to load and initialize the accounts.
pub trait PythAccount: Pod {
/// `ACCOUNT_TYPE` is just the account discriminator, it is different for mapping, product and
/// price
const ACCOUNT_TYPE: u32;
/// `INITIAL_SIZE` is the value that the field `size_` will take when the account is first
/// initialized this one is slightly tricky because for mapping (resp. price) `size_` won't
/// include the unpopulated entries of `prod_` (resp. `comp_`). At the beginning there are 0
/// products (resp. 0 components) therefore `INITIAL_SIZE` will be equal to the offset of
/// `prod_` (resp. `comp_`) Similarly the product account `INITIAL_SIZE` won't include any
/// key values.
const INITIAL_SIZE: u32;
/// `minimum_size()` is the minimum size that the solana account holding the struct needs to
/// have. `INITIAL_SIZE` <= `minimum_size()`
const MINIMUM_SIZE: usize = size_of::<Self>();
}
/// Interpret the bytes in `data` as a value of type `T`
/// This will fail if :
/// - `data` is too short
/// - `data` is not aligned for T
pub fn load<T: Pod>(data: &[u8]) -> &T {
try_from_bytes(data.get(0..size_of::<T>()).unwrap()).unwrap()
}
pub fn load_as_option<T: Pod>(data: &[u8]) -> Option<&T> {
data.get(0..size_of::<T>())
.map(|data| try_from_bytes(data).unwrap())
}
pub fn check<T: PythAccount>(account_data: &[u8]) -> bool {
if account_data.len() < T::MINIMUM_SIZE {
return false;
}
let account_header = load::<AccountHeader>(account_data);
if account_header.magic_number != PC_MAGIC
|| account_header.version != PC_VERSION
|| account_header.account_type != T::ACCOUNT_TYPE
{
return false;
}
true
}
pub fn load_account<'a, T: Pod>(data: &'a [u8]) -> Option<&'a T> {
// let data = account.try_borrow_mut_data()?;
bytemuck::try_from_bytes(&data[0..size_of::<T>()]).ok()
}
pub fn load_checked<'a, T: PythAccount>(account_data: &'a [u8], _version: u32) -> Option<&'a T> {
if !check::<T>(account_data) {
return None;
}
load_account::<T>(account_data)
}
/// Precedes every message implementing the p2w serialization format
pub const PACC2W_MAGIC: &[u8] = b"acc";
/// Format version used and understood by this codebase
pub const P2W_FORMAT_VER_MAJOR: u16 = 3;
/// Starting with v3, format introduces a minor version to mark
/// forward-compatible iterations.
/// IMPORTANT: Remember to reset this to 0 whenever major version is
/// bumped.
/// Changelog:
/// * v3.1 - last_attested_publish_time field added
pub const P2W_FORMAT_VER_MINOR: u16 = 1;
/// Starting with v3, format introduces append-only
/// forward-compatibility to the header. This is the current number of
/// bytes after the hdr_size field. After the specified bytes, inner
/// payload-specific fields begin.
pub const P2W_FORMAT_HDR_SIZE: u16 = 1;
pub const PUBKEY_LEN: usize = 32;
#[repr(u8)]
pub enum PayloadId {
PriceAttestation = 1,
// Not in use
PriceBatchAttestation = 2,
// Not in use
AccumulationAttestation = 3,
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_price_account_size() {
let price_account_size = size_of::<PriceAccount>();
// comp_ offset + (size_of::<PriceComp>() * PC_COMP_SIZE)
// = 240 + (96 * 32)
// = 3312
assert_eq!(price_account_size, 3312);
}
}

View File

@ -1,5 +1,5 @@
use {
crate::RawPubkey,
crate::Pubkey,
borsh::{
BorshDeserialize,
BorshSerialize,
@ -29,35 +29,16 @@ pub struct PostedMessageUnreliableData {
#[derive(Debug, Default, BorshSerialize, BorshDeserialize, Clone, Serialize, Deserialize)]
pub struct MessageData {
/// Header of the posted VAA
pub vaa_version: u8,
/// Level of consistency requested by the emitter
pub consistency_level: u8,
/// Time the vaa was submitted
pub vaa_time: u32,
/// Account where signatures are stored
pub vaa_signature_account: RawPubkey,
/// Time the posted message was created
pub submission_time: u32,
/// Unique nonce for this message
pub nonce: u32,
/// Sequence number of this message
pub sequence: u64,
/// Emitter of the message
pub emitter_chain: u16,
/// Emitter of the message
pub emitter_address: [u8; 32],
/// Message payload
pub payload: Vec<u8>,
pub vaa_version: u8,
pub consistency_level: u8,
pub vaa_time: u32,
pub vaa_signature_account: Pubkey,
pub submission_time: u32,
pub nonce: u32,
pub sequence: u64,
pub emitter_chain: u16,
pub emitter_address: [u8; 32],
pub payload: Vec<u8>,
}
impl BorshSerialize for PostedMessageUnreliableData {
@ -90,7 +71,6 @@ impl BorshDeserialize for PostedMessageUnreliableData {
impl Deref for PostedMessageUnreliableData {
type Target = MessageData;
fn deref(&self) -> &Self::Target {
&self.message
}