pythnet: move `pyth/` from pythnet and colocate other pythnet libs (#802)

This commit is contained in:
Reisen 2023-05-04 18:47:35 +01:00 committed by GitHub
parent 2f0ff1235a
commit 677343c339
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
48 changed files with 1304 additions and 2 deletions

View File

@ -69,13 +69,13 @@ repos:
- id: cargo-fmt-message-buffer
name: Cargo format for message buffer contract
language: "rust"
entry: cargo +nightly fmt --manifest-path ./message_buffer/Cargo.toml --all -- --config-path rustfmt.toml
entry: cargo +nightly fmt --manifest-path ./pythnet/message_buffer/Cargo.toml --all -- --config-path rustfmt.toml
pass_filenames: false
files: message_buffer
- id: cargo-clippy-message-buffer
name: Cargo clippy for message buffer contract
language: "rust"
entry: cargo +nightly clippy --manifest-path ./message_buffer/Cargo.toml --tests --fix --allow-dirty --allow-staged -- -D warnings
entry: cargo +nightly clippy --manifest-path ./pythnet/message_buffer/Cargo.toml --tests --fix --allow-dirty --allow-staged -- -D warnings
pass_filenames: false
files: message_buffer
# Hooks for solana receiver contract

View File

@ -0,0 +1,30 @@
[package]
name = "solana-pyth"
version = "1.13.6"
description = "Pyth Runtime for Solana"
authors = ["Pyth Data Association"]
repository = "https://github.com/pyth-network/pythnet"
edition = "2021"
[dependencies]
borsh = "0.9.1"
bincode = "1.3.1"
bytemuck = { version = "1.11.0", features = ["derive"] }
fast-math = "0.1"
hex = { version = "0.4.3", features = ["serde"] }
serde = { version = "1.0.144", features = ["derive"] }
sha3 = "0.10.4"
slow_primes = "0.1.14"
[dev-dependencies]
rand = "0.7.0"
[lib]
crate-type = ["lib"]
name = "solana_pyth"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[build-dependencies]
rustc_version = "0.4"

View File

@ -0,0 +1,17 @@
# Merge all imports into a clean vertical list of module imports.
imports_granularity = "One"
group_imports = "One"
imports_layout = "Vertical"
# Better grep-ability.
empty_item_single_line = false
# Consistent pipe layout.
match_arm_leading_pipes = "Preserve"
# Align Fields
enum_discrim_align_threshold = 80
struct_field_align_threshold = 80
# Allow up to two blank lines for visual grouping.
blank_lines_upper_bound = 2

View File

@ -0,0 +1,9 @@
pub mod merkle;
mod mul;
pub trait Accumulator<'a>: Sized {
type Proof: 'a;
fn from_set(items: impl Iterator<Item = &'a &'a [u8]>) -> Option<Self>;
fn prove(&'a self, item: &[u8]) -> Option<Self::Proof>;
fn verify(&'a self, proof: Self::Proof, item: &[u8]) -> bool;
}

View File

@ -0,0 +1,348 @@
// TODO: Go back to a reference based implementation ala Solana's original.
use {
crate::{
accumulators::Accumulator,
hashers::{
keccak256::Keccak256Hasher,
Hasher,
},
PriceId,
},
borsh::{
BorshDeserialize,
BorshSerialize,
},
serde::{
Deserialize,
Serialize,
},
std::collections::HashSet,
};
// We need to discern between leaf and intermediate nodes to prevent trivial second
// pre-image attacks.
// https://flawed.net.nz/2018/02/21/attacking-merkle-trees-with-a-second-preimage-attack
const LEAF_PREFIX: &[u8] = &[0];
const INTERMEDIATE_PREFIX: &[u8] = &[1];
macro_rules! hash_leaf {
{$x:ty, $d:ident} => {
<$x as Hasher>::hashv(&[LEAF_PREFIX, $d])
}
}
macro_rules! hash_intermediate {
{$x:ty, $l:ident, $r:ident} => {
<$x as Hasher>::hashv(&[INTERMEDIATE_PREFIX, $l.as_ref(), $r.as_ref()])
}
}
/// An implementation of a Sha3/Keccak256 based Merkle Tree based on the implementation provided by
/// solana-merkle-tree. This modifies the structure slightly to be serialization friendly, and to
/// make verification cheaper on EVM based networks.
#[derive(
Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, Serialize, Deserialize, Default,
)]
pub struct MerkleTree<H: Hasher = Keccak256Hasher> {
pub leaf_count: usize,
pub nodes: Vec<H::Hash>,
}
pub struct MerkleAccumulator<'a, H: Hasher = Keccak256Hasher> {
pub accumulator: MerkleTree<H>,
/// A list of the original items inserted into the tree.
///
/// The full list is kept because proofs require the index of each item in the tree, by
/// keeping the nodes we can look up the position in the original list for proof
/// verification.
pub items: Vec<&'a [u8]>,
}
impl<'a, H: Hasher + 'a> Accumulator<'a> for MerkleAccumulator<'a, H> {
type Proof = MerklePath<H>;
fn from_set(items: impl Iterator<Item = &'a &'a [u8]>) -> Option<Self> {
let items: Vec<&[u8]> = items.copied().collect();
let tree = MerkleTree::new(&items);
Some(Self {
accumulator: tree,
items,
})
}
fn prove(&'a self, item: &[u8]) -> Option<Self::Proof> {
let index = self.items.iter().position(|i| i == &item)?;
self.accumulator.find_path(index)
}
fn verify(&'a self, proof: Self::Proof, item: &[u8]) -> bool {
let item = hash_leaf!(H, item);
proof.validate(item)
}
}
impl<H: Hasher> MerkleTree<H> {
#[inline]
fn next_level_len(level_len: usize) -> usize {
if level_len == 1 {
0
} else {
(level_len + 1) / 2
}
}
fn calculate_vec_capacity(leaf_count: usize) -> usize {
// the most nodes consuming case is when n-1 is full balanced binary tree
// then n will cause the previous tree add a left only path to the root
// this cause the total nodes number increased by tree height, we use this
// condition as the max nodes consuming case.
// n is current leaf nodes number
// assuming n-1 is a full balanced binary tree, n-1 tree nodes number will be
// 2(n-1) - 1, n tree height is closed to log2(n) + 1
// so the max nodes number is 2(n-1) - 1 + log2(n) + 1, finally we can use
// 2n + log2(n+1) as a safe capacity value.
// test results:
// 8192 leaf nodes(full balanced):
// computed cap is 16398, actually using is 16383
// 8193 leaf nodes:(full balanced plus 1 leaf):
// computed cap is 16400, actually using is 16398
// about performance: current used fast_math log2 code is constant algo time
if leaf_count > 0 {
fast_math::log2_raw(leaf_count as f32) as usize + 2 * leaf_count + 1
} else {
0
}
}
pub fn new<T: AsRef<[u8]>>(items: &[T]) -> Self {
let cap = MerkleTree::<H>::calculate_vec_capacity(items.len());
let mut mt = MerkleTree {
leaf_count: items.len(),
nodes: Vec::with_capacity(cap),
};
for item in items {
let item = item.as_ref();
let hash = hash_leaf!(H, item);
mt.nodes.push(hash);
}
let mut level_len = MerkleTree::<H>::next_level_len(items.len());
let mut level_start = items.len();
let mut prev_level_len = items.len();
let mut prev_level_start = 0;
while level_len > 0 {
for i in 0..level_len {
let prev_level_idx = 2 * i;
let lsib: &H::Hash = &mt.nodes[prev_level_start + prev_level_idx];
let rsib: &H::Hash = if prev_level_idx + 1 < prev_level_len {
&mt.nodes[prev_level_start + prev_level_idx + 1]
} else {
// Duplicate last entry if the level length is odd
&mt.nodes[prev_level_start + prev_level_idx]
};
let hash = hash_intermediate!(H, lsib, rsib);
mt.nodes.push(hash);
}
prev_level_start = level_start;
prev_level_len = level_len;
level_start += level_len;
level_len = MerkleTree::<H>::next_level_len(level_len);
}
mt
}
pub fn get_root(&self) -> Option<&H::Hash> {
self.nodes.iter().last()
}
pub fn find_path(&self, index: usize) -> Option<MerklePath<H>> {
if index >= self.leaf_count {
return None;
}
let mut level_len = self.leaf_count;
let mut level_start = 0;
let mut path = MerklePath::<H>::default();
let mut node_index = index;
let mut lsib = None;
let mut rsib = None;
while level_len > 0 {
let level = &self.nodes[level_start..(level_start + level_len)];
let target = level[node_index];
if lsib.is_some() || rsib.is_some() {
path.push(MerkleNode::new(target, lsib, rsib));
}
if node_index % 2 == 0 {
lsib = None;
rsib = if node_index + 1 < level.len() {
Some(level[node_index + 1])
} else {
Some(level[node_index])
};
} else {
lsib = Some(level[node_index - 1]);
rsib = None;
}
node_index /= 2;
level_start += level_len;
level_len = MerkleTree::<H>::next_level_len(level_len);
}
Some(path)
}
}
#[derive(Clone, Default, Debug, PartialEq, Eq, Serialize)]
pub struct MerklePath<H: Hasher>(Vec<MerkleNode<H>>);
impl<H: Hasher> MerklePath<H> {
pub fn push(&mut self, entry: MerkleNode<H>) {
self.0.push(entry)
}
pub fn validate(&self, candidate: H::Hash) -> bool {
let result = self.0.iter().try_fold(candidate, |candidate, pe| {
let lsib = &pe.1.unwrap_or(candidate);
let rsib = &pe.2.unwrap_or(candidate);
let hash = hash_intermediate!(H, lsib, rsib);
if hash == pe.0 {
Some(hash)
} else {
None
}
});
matches!(result, Some(_))
}
}
#[derive(Clone, Default, Debug, PartialEq, Eq, Serialize)]
pub struct MerkleNode<H: Hasher>(H::Hash, Option<H::Hash>, Option<H::Hash>);
impl<'a, H: Hasher> MerkleNode<H> {
pub fn new(
target: H::Hash,
left_sibling: Option<H::Hash>,
right_sibling: Option<H::Hash>,
) -> Self {
assert!(left_sibling.is_none() ^ right_sibling.is_none());
Self(target, left_sibling, right_sibling)
}
}
//TODO: update this to correct value/type later
//
/** using `sdk/program/src/slot_hashes.rs` as a reference **/
//TODO: newtype or type alias?
// also double check alignment in conjunction with `AccumulatorPrice`
// #[repr(transparent)
#[derive(Serialize, PartialEq, Eq, Default)]
pub struct PriceProofs<H: Hasher>(Vec<(PriceId, MerklePath<H>)>);
impl<H: Hasher> PriceProofs<H> {
pub fn new(price_proofs: &[(PriceId, MerklePath<H>)]) -> Self {
let mut price_proofs = price_proofs.to_vec();
price_proofs.sort_by(|(a, _), (b, _)| a.cmp(b));
Self(price_proofs)
}
}
#[cfg(test)]
mod test {
use {
super::*,
std::mem::size_of,
};
#[derive(Default, Clone, Debug, borsh::BorshSerialize)]
struct PriceAccount {
pub id: u64,
pub price: u64,
pub price_expo: u64,
pub ema: u64,
pub ema_expo: u64,
}
#[derive(Default, Debug, borsh::BorshSerialize)]
struct PriceOnly {
pub price_expo: u64,
pub price: u64,
pub id: u64,
}
impl From<PriceAccount> for PriceOnly {
fn from(other: PriceAccount) -> Self {
Self {
id: other.id,
price: other.price,
price_expo: other.price_expo,
}
}
}
#[test]
fn test_merkle() {
let mut set: HashSet<&[u8]> = HashSet::new();
// Create some random elements (converted to bytes). All accumulators store arbitrary bytes so
// that we can target any account (or subset of accounts).
let price_account_a = PriceAccount {
id: 1,
price: 100,
price_expo: 2,
ema: 50,
ema_expo: 1,
};
let item_a = borsh::BorshSerialize::try_to_vec(&price_account_a).unwrap();
let mut price_only_b = PriceOnly::from(price_account_a);
price_only_b.price = 200;
let item_b = BorshSerialize::try_to_vec(&price_only_b).unwrap();
let item_c = 2usize.to_be_bytes();
let item_d = 88usize.to_be_bytes();
// Insert the bytes into the Accumulate type.
set.insert(&item_a);
set.insert(&item_b);
set.insert(&item_c);
let accumulator = MerkleAccumulator::<'_, Keccak256Hasher>::from_set(set.iter()).unwrap();
let proof = accumulator.prove(&item_a).unwrap();
// println!("Proof: {:02X?}", proof);
assert!(accumulator.verify(proof, &item_a));
let proof = accumulator.prove(&item_a).unwrap();
println!(
"proof: {:#?}",
proof.0.iter().map(|x| format!("{x:?}")).collect::<Vec<_>>()
);
println!(
"accumulator root: {:?}",
accumulator.accumulator.get_root().unwrap()
);
println!(
r"
Sizes:
MerkleAccumulator::Proof {:?}
Keccak256Hasher::Hash {:?}
MerkleNode {:?}
MerklePath {:?}
",
size_of::<<MerkleAccumulator<'_> as Accumulator>::Proof>(),
size_of::<<Keccak256Hasher as Hasher>::Hash>(),
size_of::<MerkleNode<Keccak256Hasher>>(),
size_of::<MerklePath<Keccak256Hasher>>()
);
assert!(!accumulator.verify(proof, &item_d));
}
//TODO: more tests
}

View File

@ -0,0 +1,79 @@
use crate::{
accumulators::Accumulator,
hashers::{
prime::PrimeHasher,
Hasher,
},
};
pub struct MulAccumulator<H: Hasher> {
pub accumulator: H::Hash,
pub items: Vec<H::Hash>,
}
impl<'a> Accumulator<'a> for MulAccumulator<PrimeHasher> {
type Proof = <PrimeHasher as Hasher>::Hash;
fn prove(&self, item: &[u8]) -> Option<Self::Proof> {
let bytes = u128::from_be_bytes(PrimeHasher::hashv(&[item]));
let acc = u128::from_be_bytes(self.accumulator);
Some((acc / bytes).to_be_bytes())
}
fn verify(&self, proof: Self::Proof, item: &[u8]) -> bool {
let bytes = u128::from_be_bytes(PrimeHasher::hashv(&[item]));
let proof = u128::from_be_bytes(proof);
proof * bytes == u128::from_be_bytes(self.accumulator)
}
fn from_set(items: impl Iterator<Item = &'a &'a [u8]>) -> Option<Self> {
let primes: Vec<[u8; 16]> = items.map(|i| PrimeHasher::hashv(&[i])).collect();
Some(Self {
items: primes.clone(),
accumulator: primes.into_iter().reduce(|acc, v| {
u128::to_be_bytes(u128::from_be_bytes(acc) * u128::from_be_bytes(v))
})?,
})
}
}
#[cfg(test)]
mod test {
use {
super::*,
std::collections::HashSet,
};
#[test]
fn test_membership() {
let mut set: HashSet<&[u8]> = HashSet::new();
// Create some random elements (converted to bytes). All accumulators store arbitrary bytes so
// that we can target any account (or subset of accounts).
let item_a = 33usize.to_be_bytes();
let item_b = 54usize.to_be_bytes();
let item_c = 2usize.to_be_bytes();
let item_d = 88usize.to_be_bytes();
// Insert the bytes into the Accumulate type.
set.insert(&item_a);
set.insert(&item_b);
set.insert(&item_c);
println!();
// Create an Accumulator. Test Membership.
{
let accumulator = MulAccumulator::<PrimeHasher>::from_set(set.iter()).unwrap();
let proof = accumulator.prove(&item_a).unwrap();
// println!("Mul:");
// println!("Proof: {:?}", accumulator.verify(proof, &item_a));
// println!("Proof: {:?}", accumulator.verify(proof, &item_d));
assert!(accumulator.verify(proof, &item_a));
assert!(!accumulator.verify(proof, &item_d));
}
}
//TODO: more tests
// MulAccumulator::<Keccack256Hasher>
}

View File

@ -0,0 +1,32 @@
use std::fmt::Debug;
pub mod keccak256;
pub mod prime;
/// Hasher is a trait used to provide a hashing algorithm for the library.
pub trait Hasher: Clone + Default + Debug + serde::Serialize {
/// This type is used as a hash type in the library.
/// It is recommended to use fixed size u8 array as a hash type. For example,
/// for sha256 the type would be `[u8; 32]`, representing 32 bytes,
/// which is the size of the sha256 digest. Also, fixed sized arrays of `u8`
/// by default satisfy all trait bounds required by this type.
///
/// # Trait bounds
/// `Copy` is required as the hash needs to be copied to be concatenated/propagated
/// when constructing nodes.
/// `PartialEq` is required to compare equality when verifying proof
/// `Into<Vec<u8>>` is required to be able to serialize proof
/// `TryFrom<Vec<u8>>` is required to parse hashes from a serialized proof
/// `Default` is required to be able to create a default hash
// TODO: use Digest trait from digest crate?
type Hash: Copy
+ PartialEq
+ Default
+ Eq
+ Default
+ Debug
+ AsRef<[u8]>
+ serde::Serialize
+ for<'a> serde::de::Deserialize<'a>;
fn hashv<T: AsRef<[u8]>>(data: &[T]) -> Self::Hash;
}

View File

@ -0,0 +1,20 @@
use crate::hashers::Hasher;
#[derive(Clone, Default, Debug, serde::Serialize)]
pub struct Keccak256Hasher {}
impl Hasher for Keccak256Hasher {
type Hash = [u8; 32];
fn hashv<T: AsRef<[u8]>>(data: &[T]) -> [u8; 32] {
use sha3::{
Digest,
Keccak256,
};
let mut hasher = Keccak256::new();
for d in data {
hasher.update(d);
}
hasher.finalize().into()
}
}

View File

@ -0,0 +1,39 @@
use {
crate::hashers::Hasher,
sha3::Digest,
slow_primes::is_prime_miller_rabin,
};
#[derive(Clone, Default, Debug, serde::Serialize)]
pub struct PrimeHasher {}
impl Hasher for PrimeHasher {
// u128 in big endian bytes
type Hash = [u8; 16];
fn hashv<T: AsRef<[u8]>>(data: &[T]) -> [u8; 16] {
// Scan for prime's generated by hashing the bytes starting from 0. We use a number like
// this so once the prime is found we can directly compute the hash instead of scanning
// the range again.
let mut search = 0usize;
loop {
// Increment Search Counter.
search += 1;
// Hash Input.
let mut hasher = sha3::Sha3_256::new();
for d in data {
hasher.update(d);
}
hasher.update(search.to_be_bytes());
let hash_bytes: [u8; 32] = hasher.finalize().into();
// Take only a u32 from the end, return if it's prime.
let prime = u32::from_be_bytes(hash_bytes[28..].try_into().unwrap()) | 1;
if is_prime_miller_rabin(prime as u64) {
return (prime as u128).to_be_bytes();
}
}
}
}

View File

@ -0,0 +1,343 @@
//! A type to hold data for the [`Accumulator` sysvar][sv].
//!
//! TODO: replace this with an actual link if needed
//! [sv]: https://docs.pythnetwork.org/developing/runtime-facilities/sysvars#accumulator
//!
//! The sysvar ID is declared in [`sysvar::accumulator`].
//!
//! [`sysvar::accumulator`]: crate::sysvar::accumulator
use {
borsh::{
BorshDeserialize,
BorshSerialize,
},
hex::FromHexError,
pyth::{
PayloadId,
P2W_FORMAT_HDR_SIZE,
P2W_FORMAT_VER_MAJOR,
P2W_FORMAT_VER_MINOR,
PACC2W_MAGIC,
},
serde::{
Deserialize,
Serialize,
Serializer,
},
std::{
fmt,
io::{
Read,
Write,
},
mem,
},
};
pub mod accumulators;
pub mod hashers;
pub mod pyth;
pub mod wormhole;
pub(crate) type RawPubkey = [u8; 32];
pub(crate) type Hash = [u8; 32];
pub(crate) type PriceId = RawPubkey;
// TODO:
// 1. decide what will be pulled out into a "pythnet" crate and what needs to remain in here
// a. be careful of cyclic dependencies
// b. git submodules?
/*** Dummy Field(s) for now just to test updating the sysvar ***/
pub type Slot = u64;
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct AccumulatorAttestation<P: serde::Serialize> {
pub accumulator: P,
#[serde(serialize_with = "use_to_string")]
pub ring_buffer_idx: u64,
#[serde(serialize_with = "use_to_string")]
pub height: u64,
// TODO: Go back to UnixTimestamp.
pub timestamp: i64,
}
pub type ErrBox = Box<dyn std::error::Error>;
// from pyth-crosschain/wormhole_attester/sdk/rust/src/lib.rs
impl<P: serde::Serialize + for<'a> serde::Deserialize<'a>> AccumulatorAttestation<P> {
pub fn serialize(&self) -> Result<Vec<u8>, ErrBox> {
// magic
let mut buf = PACC2W_MAGIC.to_vec();
// major_version
buf.extend_from_slice(&P2W_FORMAT_VER_MAJOR.to_be_bytes()[..]);
// minor_version
buf.extend_from_slice(&P2W_FORMAT_VER_MINOR.to_be_bytes()[..]);
// hdr_size
buf.extend_from_slice(&P2W_FORMAT_HDR_SIZE.to_be_bytes()[..]);
// // payload_id
buf.push(PayloadId::AccumulationAttestation as u8);
// Header is over. NOTE: If you need to append to the header,
// make sure that the number of bytes after hdr_size is
// reflected in the P2W_FORMAT_HDR_SIZE constant.
let AccumulatorAttestation {
// accumulator_root: accumulator_root,
accumulator,
ring_buffer_idx,
height,
timestamp,
} = self;
//TODO: decide on pyth-accumulator-over-wormhole serialization format.
let mut serialized_acc = bincode::serialize(&accumulator).unwrap();
// TODO: always 32? is u16 enough?
buf.extend_from_slice(&(serialized_acc.len() as u16).to_be_bytes()[..]);
buf.append(&mut serialized_acc);
buf.extend_from_slice(&ring_buffer_idx.to_be_bytes()[..]);
buf.extend_from_slice(&height.to_be_bytes()[..]);
buf.extend_from_slice(&timestamp.to_be_bytes()[..]);
Ok(buf)
}
//TODO: update this for accumulator attest
pub fn deserialize(mut bytes: impl Read) -> Result<Self, ErrBox> {
let mut magic_vec = vec![0u8; PACC2W_MAGIC.len()];
bytes.read_exact(magic_vec.as_mut_slice())?;
if magic_vec.as_slice() != PACC2W_MAGIC {
return Err(
format!("Invalid magic {magic_vec:02X?}, expected {PACC2W_MAGIC:02X?}",).into(),
);
}
let mut major_version_vec = vec![0u8; mem::size_of_val(&P2W_FORMAT_VER_MAJOR)];
bytes.read_exact(major_version_vec.as_mut_slice())?;
let major_version = u16::from_be_bytes(major_version_vec.as_slice().try_into()?);
// Major must match exactly
if major_version != P2W_FORMAT_VER_MAJOR {
return Err(format!(
"Unsupported format major_version {major_version}, expected {P2W_FORMAT_VER_MAJOR}"
)
.into());
}
let mut minor_version_vec = vec![0u8; mem::size_of_val(&P2W_FORMAT_VER_MINOR)];
bytes.read_exact(minor_version_vec.as_mut_slice())?;
let minor_version = u16::from_be_bytes(minor_version_vec.as_slice().try_into()?);
// Only older minors are not okay for this codebase
if minor_version < P2W_FORMAT_VER_MINOR {
return Err(format!(
"Unsupported format minor_version {minor_version}, expected {P2W_FORMAT_VER_MINOR} or more"
)
.into());
}
// Read header size value
let mut hdr_size_vec = vec![0u8; mem::size_of_val(&P2W_FORMAT_HDR_SIZE)];
bytes.read_exact(hdr_size_vec.as_mut_slice())?;
let hdr_size = u16::from_be_bytes(hdr_size_vec.as_slice().try_into()?);
// Consume the declared number of remaining header
// bytes. Remaining header fields must be read from hdr_buf
let mut hdr_buf = vec![0u8; hdr_size as usize];
bytes.read_exact(hdr_buf.as_mut_slice())?;
let mut payload_id_vec = vec![0u8; mem::size_of::<PayloadId>()];
hdr_buf
.as_slice()
.read_exact(payload_id_vec.as_mut_slice())?;
if payload_id_vec[0] != PayloadId::AccumulationAttestation as u8 {
return Err(format!(
"Invalid Payload ID {}, expected {}",
payload_id_vec[0],
PayloadId::AccumulationAttestation as u8,
)
.into());
}
// Header consumed, continue with remaining fields
let mut accum_len_vec = vec![0u8; mem::size_of::<u16>()];
bytes.read_exact(accum_len_vec.as_mut_slice())?;
let accum_len = u16::from_be_bytes(accum_len_vec.as_slice().try_into()?);
// let accum_vec = Vec::with_capacity(accum_len_vec as usize);
let mut accum_vec = vec![0u8; accum_len as usize];
bytes.read_exact(accum_vec.as_mut_slice())?;
let accumulator = match bincode::deserialize(accum_vec.as_slice()) {
Ok(acc) => acc,
Err(e) => return Err(format!("AccumulatorDeserialization failed: {e}").into()),
};
let mut ring_buff_idx_vec = vec![0u8; mem::size_of::<u64>()];
bytes.read_exact(ring_buff_idx_vec.as_mut_slice())?;
let ring_buffer_idx = u64::from_be_bytes(ring_buff_idx_vec.as_slice().try_into()?);
let mut height_vec = vec![0u8; mem::size_of::<u64>()];
bytes.read_exact(height_vec.as_mut_slice())?;
let height = u64::from_be_bytes(height_vec.as_slice().try_into()?);
let mut timestamp_vec = vec![0u8; mem::size_of::<i64>()];
bytes.read_exact(timestamp_vec.as_mut_slice())?;
let timestamp = i64::from_be_bytes(timestamp_vec.as_slice().try_into()?);
Ok(Self {
accumulator,
ring_buffer_idx,
height,
timestamp,
})
}
}
pub fn use_to_string<T, S>(val: &T, s: S) -> Result<S::Ok, S::Error>
where
T: ToString,
S: Serializer,
{
s.serialize_str(&val.to_string())
}
pub fn pubkey_to_hex<S>(val: &Identifier, s: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
s.serialize_str(&hex::encode(val.to_bytes()))
}
#[derive(
Copy,
Clone,
Default,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
BorshSerialize,
BorshDeserialize,
serde::Serialize,
serde::Deserialize,
)]
#[repr(C)]
pub struct Identifier(#[serde(with = "hex")] [u8; 32]);
impl Identifier {
pub fn new(bytes: [u8; 32]) -> Identifier {
Identifier(bytes)
}
pub fn to_bytes(&self) -> [u8; 32] {
self.0
}
pub fn to_hex(&self) -> String {
hex::encode(self.0)
}
pub fn from_hex<T: AsRef<[u8]>>(s: T) -> Result<Identifier, FromHexError> {
let mut bytes = [0u8; 32];
hex::decode_to_slice(s, &mut bytes)?;
Ok(Identifier::new(bytes))
}
}
impl fmt::Debug for Identifier {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "0x{}", self.to_hex())
}
}
impl fmt::Display for Identifier {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "0x{}", self.to_hex())
}
}
impl AsRef<[u8]> for Identifier {
fn as_ref(&self) -> &[u8] {
&self.0[..]
}
}
#[cfg(test)]
mod tests {
use {
super::*,
crate::{
accumulators::{
merkle::MerkleAccumulator,
Accumulator,
},
hashers::keccak256::Keccak256Hasher,
pyth::*,
},
};
pub fn new_unique_pubkey() -> RawPubkey {
use rand::Rng;
rand::thread_rng().gen::<[u8; 32]>()
}
impl AccountHeader {
fn new(account_type: u32) -> Self {
Self {
account_type,
..AccountHeader::default()
}
}
}
fn generate_price_account(price: i64) -> (RawPubkey, PriceAccount) {
(
new_unique_pubkey(),
PriceAccount {
price_type: 0,
header: AccountHeader::new(PC_ACCTYPE_PRICE),
agg_: PriceInfo {
price_: price,
..PriceInfo::default()
},
..PriceAccount::default()
},
)
}
#[test]
fn test_pa_default() {
println!("testing pa");
let acct_header = AccountHeader::default();
println!("acct_header.acct_type: {}", acct_header.account_type);
let pa = PriceAccount::default();
println!("price_account.price_type: {}", pa.price_type);
}
#[test]
fn test_new_accumulator() {
let price_accts_and_keys = (0..2)
.map(|i| generate_price_account(i * 2))
.collect::<Vec<_>>();
let set = price_accts_and_keys
.iter()
.map(|(_, pa)| bytemuck::bytes_of(pa))
.collect::<Vec<_>>();
let accumulator = MerkleAccumulator::<'_, Keccak256Hasher>::from_set(set.iter()).unwrap();
println!("acc: {:#?}", accumulator.accumulator.get_root());
}
}

View File

@ -0,0 +1,269 @@
use {
crate::RawPubkey,
borsh::BorshSerialize,
};
use {
bytemuck::{
try_from_bytes,
Pod,
Zeroable,
},
// solana_merkle_tree::MerkleTree,
std::mem::size_of,
};
#[repr(C)]
#[derive(Copy, Clone, Zeroable, Pod, Default, BorshSerialize)]
pub struct AccountHeader {
pub magic_number: u32,
pub version: u32,
pub account_type: u32,
pub size: u32,
}
pub const PC_MAP_TABLE_SIZE: u32 = 640;
pub const PC_MAGIC: u32 = 2712847316;
pub const PC_VERSION: u32 = 2;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct MappingAccount {
pub header: AccountHeader,
pub number_of_products: u32,
pub unused_: u32,
pub next_mapping_account: RawPubkey,
pub products_list: [RawPubkey; PC_MAP_TABLE_SIZE as usize],
}
pub const PC_ACCTYPE_MAPPING: u32 = 1;
pub const PC_MAP_TABLE_T_PROD_OFFSET: size_t = 56;
impl PythAccount for MappingAccount {
const ACCOUNT_TYPE: u32 = PC_ACCTYPE_MAPPING;
/// Equal to the offset of `prod_` in `MappingAccount`, see the trait comment for more detail
const INITIAL_SIZE: u32 = PC_MAP_TABLE_T_PROD_OFFSET as u32;
}
// Unsafe impl because product_list is of size 640 and there's no derived trait for this size
unsafe impl Pod for MappingAccount {
}
unsafe impl Zeroable for MappingAccount {
}
#[repr(C)]
#[derive(Copy, Clone, Pod, Zeroable)]
pub struct ProductAccount {
pub header: AccountHeader,
pub first_price_account: RawPubkey,
}
pub const PC_ACCTYPE_PRODUCT: u32 = 2;
pub const PC_PROD_ACC_SIZE: u32 = 512;
impl PythAccount for ProductAccount {
const ACCOUNT_TYPE: u32 = PC_ACCTYPE_PRODUCT;
const INITIAL_SIZE: u32 = size_of::<ProductAccount>() as u32;
const MINIMUM_SIZE: usize = PC_PROD_ACC_SIZE as usize;
}
#[repr(C)]
#[cfg_attr(not(test), derive(Copy, Clone, Pod, Zeroable))]
#[cfg_attr(test, derive(Copy, Clone, Pod, Zeroable, Default))]
pub struct PriceAccount {
pub header: AccountHeader,
/// Type of the price account
pub price_type: u32,
/// Exponent for the published prices
pub exponent: i32,
/// Current number of authorized publishers
pub num_: u32,
/// Number of valid quotes for the last aggregation
pub num_qt_: u32,
/// Last slot with a succesful aggregation (status : TRADING)
pub last_slot_: u64,
/// Second to last slot where aggregation was attempted
pub valid_slot_: u64,
/// Ema for price
pub twap_: PriceEma,
/// Ema for confidence
pub twac_: PriceEma,
/// Last time aggregation was attempted
pub timestamp_: i64,
/// Minimum valid publisher quotes for a succesful aggregation
pub min_pub_: u8,
pub unused_1_: i8,
pub unused_2_: i16,
pub unused_3_: i32,
/// Corresponding product account
pub product_account: RawPubkey,
/// Next price account in the list
pub next_price_account: RawPubkey,
/// Second to last slot where aggregation was succesful (i.e. status : TRADING)
pub prev_slot_: u64,
/// Aggregate price at prev_slot_
pub prev_price_: i64,
/// Confidence interval at prev_slot_
pub prev_conf_: u64,
/// Timestamp of prev_slot_
pub prev_timestamp_: i64,
/// Last attempted aggregate results
pub agg_: PriceInfo,
/// Publishers' price components
pub comp_: [PriceComponent; PC_COMP_SIZE as usize],
}
pub const PC_COMP_SIZE: u32 = 32;
#[repr(C)]
// #[derive(Copy, Clone, Pod, Zeroable)]
#[cfg_attr(not(test), derive(Copy, Clone, Pod, Zeroable))]
#[cfg_attr(test, derive(Copy, Clone, Pod, Zeroable, Default))]
pub struct PriceComponent {
pub pub_: RawPubkey,
pub agg_: PriceInfo,
pub latest_: PriceInfo,
}
#[repr(C)]
// #[derive(Debug, Copy, Clone, Pod, Zeroable)]
#[cfg_attr(not(test), derive(Copy, Clone, Pod, Zeroable))]
#[cfg_attr(test, derive(Copy, Clone, Pod, Zeroable, Default))]
pub struct PriceInfo {
pub price_: i64,
pub conf_: u64,
pub status_: u32,
pub corp_act_status_: u32,
pub pub_slot_: u64,
}
#[repr(C)]
// #[derive(Debug, Copy, Clone, Pod, Zeroable)]
#[cfg_attr(not(test), derive(Copy, Clone, Pod, Zeroable))]
#[cfg_attr(test, derive(Copy, Clone, Pod, Zeroable, Default))]
pub struct PriceEma {
pub val_: i64,
pub numer_: i64,
pub denom_: i64,
}
pub const PC_ACCTYPE_PRICE: u32 = 3;
pub type size_t = ::std::os::raw::c_ulong;
pub const PC_PRICE_T_COMP_OFFSET: size_t = 240;
impl PythAccount for PriceAccount {
const ACCOUNT_TYPE: u32 = PC_ACCTYPE_PRICE;
/// Equal to the offset of `comp_` in `PriceAccount`, see the trait comment for more detail
const INITIAL_SIZE: u32 = PC_PRICE_T_COMP_OFFSET as u32;
}
/// The PythAccount trait's purpose is to attach constants to the 3 types of accounts that Pyth has
/// (mapping, price, product). This allows less duplicated code, because now we can create generic
/// functions to perform common checks on the accounts and to load and initialize the accounts.
pub trait PythAccount: Pod {
/// `ACCOUNT_TYPE` is just the account discriminator, it is different for mapping, product and
/// price
const ACCOUNT_TYPE: u32;
/// `INITIAL_SIZE` is the value that the field `size_` will take when the account is first
/// initialized this one is slightly tricky because for mapping (resp. price) `size_` won't
/// include the unpopulated entries of `prod_` (resp. `comp_`). At the beginning there are 0
/// products (resp. 0 components) therefore `INITIAL_SIZE` will be equal to the offset of
/// `prod_` (resp. `comp_`) Similarly the product account `INITIAL_SIZE` won't include any
/// key values.
const INITIAL_SIZE: u32;
/// `minimum_size()` is the minimum size that the solana account holding the struct needs to
/// have. `INITIAL_SIZE` <= `minimum_size()`
const MINIMUM_SIZE: usize = size_of::<Self>();
}
/// Interpret the bytes in `data` as a value of type `T`
/// This will fail if :
/// - `data` is too short
/// - `data` is not aligned for T
pub fn load<T: Pod>(data: &[u8]) -> &T {
try_from_bytes(data.get(0..size_of::<T>()).unwrap()).unwrap()
}
pub fn load_as_option<T: Pod>(data: &[u8]) -> Option<&T> {
data.get(0..size_of::<T>())
.map(|data| try_from_bytes(data).unwrap())
}
pub fn check<T: PythAccount>(account_data: &[u8]) -> bool {
if account_data.len() < T::MINIMUM_SIZE {
return false;
}
let account_header = load::<AccountHeader>(account_data);
if account_header.magic_number != PC_MAGIC
|| account_header.version != PC_VERSION
|| account_header.account_type != T::ACCOUNT_TYPE
{
return false;
}
true
}
pub fn load_account<'a, T: Pod>(data: &'a [u8]) -> Option<&'a T> {
// let data = account.try_borrow_mut_data()?;
bytemuck::try_from_bytes(&data[0..size_of::<T>()]).ok()
}
pub fn load_checked<'a, T: PythAccount>(account_data: &'a [u8], _version: u32) -> Option<&'a T> {
if !check::<T>(account_data) {
return None;
}
load_account::<T>(account_data)
}
/// Precedes every message implementing the p2w serialization format
pub const PACC2W_MAGIC: &[u8] = b"acc";
/// Format version used and understood by this codebase
pub const P2W_FORMAT_VER_MAJOR: u16 = 3;
/// Starting with v3, format introduces a minor version to mark
/// forward-compatible iterations.
/// IMPORTANT: Remember to reset this to 0 whenever major version is
/// bumped.
/// Changelog:
/// * v3.1 - last_attested_publish_time field added
pub const P2W_FORMAT_VER_MINOR: u16 = 1;
/// Starting with v3, format introduces append-only
/// forward-compatibility to the header. This is the current number of
/// bytes after the hdr_size field. After the specified bytes, inner
/// payload-specific fields begin.
pub const P2W_FORMAT_HDR_SIZE: u16 = 1;
pub const PUBKEY_LEN: usize = 32;
#[repr(u8)]
pub enum PayloadId {
PriceAttestation = 1,
// Not in use
PriceBatchAttestation = 2,
// Not in use
AccumulationAttestation = 3,
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_price_account_size() {
let price_account_size = size_of::<PriceAccount>();
// comp_ offset + (size_of::<PriceComp>() * PC_COMP_SIZE)
// = 240 + (96 * 32)
// = 3312
assert_eq!(price_account_size, 3312);
}
}

View File

@ -0,0 +1,116 @@
use {
crate::RawPubkey,
borsh::{
BorshDeserialize,
BorshSerialize,
},
serde::{
Deserialize,
Serialize,
},
std::{
io::{
Error,
ErrorKind::InvalidData,
Write,
},
ops::{
Deref,
DerefMut,
},
},
};
#[repr(transparent)]
#[derive(Default)]
pub struct PostedMessageUnreliableData {
pub message: MessageData,
}
#[derive(Debug, Default, BorshSerialize, BorshDeserialize, Clone, Serialize, Deserialize)]
pub struct MessageData {
/// Header of the posted VAA
pub vaa_version: u8,
/// Level of consistency requested by the emitter
pub consistency_level: u8,
/// Time the vaa was submitted
pub vaa_time: u32,
/// Account where signatures are stored
pub vaa_signature_account: RawPubkey,
/// Time the posted message was created
pub submission_time: u32,
/// Unique nonce for this message
pub nonce: u32,
/// Sequence number of this message
pub sequence: u64,
/// Emitter of the message
pub emitter_chain: u16,
/// Emitter of the message
pub emitter_address: [u8; 32],
/// Message payload
pub payload: Vec<u8>,
}
impl BorshSerialize for PostedMessageUnreliableData {
fn serialize<W: Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_all(b"msu")?;
BorshSerialize::serialize(&self.message, writer)
}
}
impl BorshDeserialize for PostedMessageUnreliableData {
fn deserialize(buf: &mut &[u8]) -> std::io::Result<Self> {
if buf.len() < 3 {
return Err(Error::new(InvalidData, "Not enough bytes"));
}
let expected = b"msu";
let magic: &[u8] = &buf[0..3];
if magic != expected {
return Err(Error::new(
InvalidData,
format!("Magic mismatch. Expected {expected:?} but got {magic:?}"),
));
};
*buf = &buf[3..];
Ok(PostedMessageUnreliableData {
message: <MessageData as BorshDeserialize>::deserialize(buf)?,
})
}
}
impl Deref for PostedMessageUnreliableData {
type Target = MessageData;
fn deref(&self) -> &Self::Target {
&self.message
}
}
impl DerefMut for PostedMessageUnreliableData {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.message
}
}
impl Clone for PostedMessageUnreliableData {
fn clone(&self) -> Self {
PostedMessageUnreliableData {
message: self.message.clone(),
}
}
}
#[derive(Default, Clone, Copy, BorshDeserialize, BorshSerialize)]
pub struct AccumulatorSequenceTracker {
pub sequence: u64,
}