Add on-chain compression program that uses CMT (#3489)

This commit is contained in:
Noah Gundotra 2022-08-25 08:51:35 -10:00 committed by GitHub
parent 9382641da2
commit 3fe92ea05f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 4204 additions and 6 deletions

View File

@ -9,6 +9,9 @@ members = [
"token/program",
"token/program-2022",
]
exclude = [
"account-compression/"
]
[provider]
cluster = "mainnet"

View File

@ -0,0 +1,19 @@
[programs.localnet]
spl-compression = "GRoLLzvxpxxu2PGNJMMeZPyMxjAUH9pKqxGXV9DGiceU"
[programs.testnet]
spl-compression = "GRoLLMza82AiYN7W9S9KCCtCyyPRAQP2ifBy4v4D5RMD"
[[test.genesis]]
address = "WRAPYChf58WFCnyjXKJHtrPgzKXgHp6MD9aVDqJBbGh"
program = "./target/deploy/wrapper.so"
[registry]
url = "https://anchor.projectserum.com"
[provider]
cluster = "testnet"
wallet = "~/.config/solana/id.json"
[scripts]
test = "yarn run ts-mocha -t 1000000 tests/**/*-test.ts"

1426
account-compression/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,5 @@
[workspace]
members = [
"programs/account-compression",
"programs/wrapper"
]

View File

@ -0,0 +1,18 @@
# Account Compression
This on-chain program provides an interface for composing smart-contracts to create and use
SPL ConcurrentMerkleTrees. The primary application of using SPL ConcurrentMerkleTrees is
to make edits to off-chain data with on-chain verification.
Using this program requires an indexer to parse transaction information and write relevant information to an off-chain database.
## SDK
The typescript SDK for this contract will be generated using Metaplex Foundation's [Solita](https://github.com/metaplex-foundation/solita/).
## Testing
Testing contracts locally requires the SDK to be built. Then you can run: `anchor test`
Testing contracts against indexer + api: `anchor test --skip-build --skip-local-validator --skip-deploy` and limit the test script to only the continuous test.

View File

@ -0,0 +1,33 @@
{
"dependencies": {
"@metaplex-foundation/amman": "0.10.0",
"@metaplex-foundation/beet": "0.3.0",
"@metaplex-foundation/mpl-core": "0.6.1",
"@metaplex-foundation/mpl-token-metadata": "2.2.0",
"@project-serum/anchor": "0.24.2",
"@solana/spl-token": "^0.1.8",
"@solana/web3.js": "^1.50.1",
"collections": "^5.1.13",
"cors": "^2.8.5",
"express": "^4.18.1",
"pg-promise": "^10.11.1",
"react": "^18.1.0",
"retry-as-promised": "^6.0.0",
"sqlite": "^4.1.1",
"sqlite3": "^5.0.8",
"typescript-collections": "^1.3.3"
},
"devDependencies": {
"@types/chai": "^4.3.0",
"@types/mocha": "^9.0.0",
"@types/node": "^18.0.5",
"chai": "^4.3.4",
"mocha": "^9.0.3",
"ts-mocha": "^10.0.0",
"ts-node": "^10.9.1",
"typescript": "^4.7.4"
},
"scripts": {
"test": "anchor test"
}
}

View File

@ -0,0 +1,25 @@
[package]
name = "spl-compression"
version = "0.1.0"
description = "Created with Anchor"
edition = "2018"
[lib]
crate-type = ["cdylib", "lib"]
name = "spl_compression"
[features]
no-entrypoint = []
no-idl = []
no-log-ix-name = []
cpi = ["no-entrypoint"]
default = []
[dependencies]
anchor-lang = "0.25.0"
bytemuck = "1.8.0"
spl-concurrent-merkle-tree = { path="../../../libraries/concurrent-merkle-tree", features = [ "sol-log" ]}
wrapper = { path="../wrapper", features = [ "no-entrypoint" ]}
[profile.release]
overflow-checks = true

View File

@ -0,0 +1,2 @@
[target.bpfel-unknown-unknown.dependencies.std]
features = []

View File

@ -0,0 +1,127 @@
//! Canopy is way to cache the upper `N` levels of a SPL ConcurrentMerkleTree.
//!
//! By caching the upper `N` levels of a depth `D` SPL ConcurrentMerkleTree,
//! proofs can be truncated to the first `D - N` nodes. This helps reduce the size of account
//! compression transactions, and makes it possible to
//! modify trees up to depth 31, which store more than 1 billion leaves.
//!
//! Note: this means that creating a tree of depth > 24 without a canopy will be impossible to modify
//! on-chain until TransactionV2 is launched.
//!
//! To initialize a canopy on a ConcurrentMerkleTree account, you must initialize
//! the ConcurrentMerkleTree account with additional bytes. The number of additional bytes
//! needed is `(pow(2, N+1)-1) * 32`, where `N` is the number of levels of the merkle tree
//! you want the canopy to cache.
//!
//! The canopy will be updated everytime the concurrent merkle tree is modified. No additional work
//! needed.
use crate::error::AccountCompressionError;
use crate::events::ChangeLogEvent;
use anchor_lang::prelude::*;
use bytemuck::cast_slice_mut;
use spl_concurrent_merkle_tree::node::{empty_node_cached, Node, EMPTY};
use std::mem::size_of;
#[inline(always)]
pub fn check_canopy_bytes(canopy_bytes: &mut [u8]) -> Result<()> {
if canopy_bytes.len() % size_of::<Node>() != 0 {
msg!(
"Canopy byte length {} is not a multiple of {}",
canopy_bytes.len(),
size_of::<Node>()
);
err!(AccountCompressionError::CanopyLengthMismatch)
} else {
Ok(())
}
}
#[inline(always)]
fn get_cached_path_length(canopy: &mut [Node], max_depth: u32) -> Result<u32> {
// The offset of 2 is applied because the canopy is a full binary tree without the root node
// Size: (2^n - 2) -> Size + 2 must be a power of 2
let closest_power_of_2 = (canopy.len() + 2) as u32;
// This expression will return true if `closest_power_of_2` is actually a power of 2
if closest_power_of_2 & (closest_power_of_2 - 1) == 0 {
// (1 << max_depth) returns the number of leaves in the full merkle tree
// (1 << (max_depth + 1)) - 1 returns the number of nodes in the full tree
// The canopy size cannot exceed the size of the tree
if closest_power_of_2 > (1 << (max_depth + 1)) {
msg!(
"Canopy size is too large. Size: {}. Max size: {}",
closest_power_of_2 - 2,
(1 << (max_depth + 1)) - 2
);
return err!(AccountCompressionError::CanopyLengthMismatch);
}
} else {
msg!(
"Canopy length {} is not 2 less than a power of 2",
canopy.len()
);
return err!(AccountCompressionError::CanopyLengthMismatch);
}
// 1 is subtracted from the trailing zeros because the root is not stored in the canopy
Ok(closest_power_of_2.trailing_zeros() - 1)
}
pub fn update_canopy(
canopy_bytes: &mut [u8],
max_depth: u32,
change_log: Option<Box<ChangeLogEvent>>,
) -> Result<()> {
check_canopy_bytes(canopy_bytes)?;
let canopy = cast_slice_mut::<u8, Node>(canopy_bytes);
let path_len = get_cached_path_length(canopy, max_depth)?;
if let Some(cl) = change_log {
// Update the canopy from the newest change log
for path_node in cl.path.iter().rev().skip(1).take(path_len as usize) {
// node_idx - 2 maps to the canopy index
canopy[(path_node.index - 2) as usize] = path_node.node;
}
}
Ok(())
}
pub fn fill_in_proof_from_canopy(
canopy_bytes: &mut [u8],
max_depth: u32,
index: u32,
proof: &mut Vec<Node>,
) -> Result<()> {
// 30 is hard coded as it is the current max depth that SPL Compression supports
let mut empty_node_cache = Box::new([EMPTY; 30]);
check_canopy_bytes(canopy_bytes)?;
let canopy = cast_slice_mut::<u8, Node>(canopy_bytes);
let path_len = get_cached_path_length(canopy, max_depth)?;
// We want to compute the node index (w.r.t. the canopy) where the current path
// intersects the leaves of the canopy
let mut node_idx = ((1 << max_depth) + index) >> (max_depth - path_len);
let mut inferred_nodes = vec![];
while node_idx > 1 {
// node_idx - 2 maps to the canopy index
let shifted_index = node_idx as usize - 2;
let cached_idx = if shifted_index % 2 == 0 {
shifted_index + 1
} else {
shifted_index - 1
};
if canopy[cached_idx] == EMPTY {
let level = max_depth - (31 - node_idx.leading_zeros());
let empty_node = empty_node_cached::<30>(level, &mut empty_node_cache);
canopy[cached_idx] = empty_node;
inferred_nodes.push(empty_node);
} else {
inferred_nodes.push(canopy[cached_idx]);
}
node_idx >>= 1;
}
// We only want to add inferred canopy nodes such that the proof length
// is equal to the tree depth. If the lengh of proof + lengh of canopy nodes is
// less than the tree depth, the instruction will fail.
let overlap = (proof.len() + inferred_nodes.len()).saturating_sub(max_depth as usize);
proof.extend(inferred_nodes.iter().skip(overlap));
Ok(())
}

View File

@ -0,0 +1,30 @@
//! # Data Wrapper
//! We use CPI calls to circumvent the 10kb log limit on Solana transactions.
//! Instead of logging events to the runtime, we execute a CPI to the `wrapper` program
//! where the log data is serialized into the instruction data.
//!
//! This works because CPI instruction data is never truncated. Logging information is
//! vital to the functioning of compression. When compression logs are truncated, indexers can fallback to
//! deserializing the CPI instruction data.
use anchor_lang::{prelude::*, solana_program::program::invoke};
#[derive(Clone)]
pub struct Wrapper;
impl anchor_lang::Id for Wrapper {
fn id() -> Pubkey {
wrapper::id()
}
}
pub fn wrap_event<'info>(
data: Vec<u8>,
log_wrapper_program: &Program<'info, Wrapper>,
) -> Result<()> {
invoke(
&wrapper::wrap_instruction(data),
&[log_wrapper_program.to_account_info()],
)?;
Ok(())
}

View File

@ -0,0 +1,63 @@
use anchor_lang::{
prelude::*,
solana_program::{msg, program_error::ProgramError},
};
use bytemuck::PodCastError;
use spl_concurrent_merkle_tree::error::ConcurrentMerkleTreeError;
use std::any::type_name;
use std::mem::size_of;
/// Errors related to misconfiguration or misuse of the Merkle tree
#[error_code]
pub enum AccountCompressionError {
/// This error is currently not used.
#[msg("Incorrect leaf length. Expected vec of 32 bytes")]
IncorrectLeafLength,
/// A modification to the tree was invalid and a changelog was not emitted.
/// The proof may be invalid or out-of-date, or the provided leaf hash was invalid.
#[msg("Concurrent merkle tree error")]
ConcurrentMerkleTreeError,
/// An issue was detected with loading the provided account data for this ConcurrentMerkleTree.
#[msg("Issue zero copying concurrent merkle tree data")]
ZeroCopyError,
/// See [ConcurrentMerkleTreeHeader](/spl_compression/state/struct.ConcurrentMerkleTreeHeader.html) for valid configuration options.
#[msg("An unsupported max depth or max buffer size constant was provided")]
ConcurrentMerkleTreeConstantsError,
/// When using Canopy, the stored byte length should a multiple of the node's byte length (32 bytes)
#[msg("Expected a different byte length for the merkle tree canopy")]
CanopyLengthMismatch,
/// Incorrect authority account provided
#[msg("Provided authority does not match expected tree authority")]
IncorrectAuthority,
/// Incorrect account owner
#[msg("Account is owned by a different program, expected it to be owned by this program")]
IncorrectAccountOwner,
/// Incorrect account type
#[msg("Account provided has incorrect account type")]
IncorrectAccountType,
}
impl From<&ConcurrentMerkleTreeError> for AccountCompressionError {
fn from(_error: &ConcurrentMerkleTreeError) -> Self {
AccountCompressionError::ConcurrentMerkleTreeError
}
}
pub fn error_msg<T>(data_len: usize) -> impl Fn(PodCastError) -> ProgramError {
move |_: PodCastError| -> ProgramError {
msg!(
"Failed to load {}. Size is {}, expected {}",
type_name::<T>(),
data_len,
size_of::<T>(),
);
ProgramError::InvalidAccountData
}
}

View File

@ -0,0 +1,46 @@
use crate::state::PathNode;
use anchor_lang::prelude::*;
use spl_concurrent_merkle_tree::changelog::ChangeLog;
#[event]
pub struct ChangeLogEvent {
/// Public key of the ConcurrentMerkleTree
pub id: Pubkey,
/// Nodes of off-chain merkle tree needed by indexer
pub path: Vec<PathNode>,
/// Index corresponding to the number of successful operations on this tree.
/// Used by the off-chain indexer to figure out when there are gaps to be backfilled.
pub seq: u64,
/// Bitmap of node parity (used when hashing)
pub index: u32,
}
impl<const MAX_DEPTH: usize> From<(Box<ChangeLog<MAX_DEPTH>>, Pubkey, u64)>
for Box<ChangeLogEvent>
{
fn from(log_info: (Box<ChangeLog<MAX_DEPTH>>, Pubkey, u64)) -> Self {
let (changelog, tree_id, seq) = log_info;
let path_len = changelog.path.len() as u32;
let mut path: Vec<PathNode> = changelog
.path
.iter()
.enumerate()
.map(|(lvl, n)| {
PathNode::new(
*n,
(1 << (path_len - lvl as u32)) + (changelog.index >> lvl),
)
})
.collect();
path.push(PathNode::new(changelog.root, 1));
Box::new(ChangeLogEvent {
id: tree_id,
path,
seq,
index: changelog.index,
})
}
}

View File

@ -0,0 +1,7 @@
//! Anchor events are used to emit information necessary to
//! index changes made to a SPL ConcurrentMerkleTree
mod changelog_event;
mod new_leaf_event;
pub use changelog_event::ChangeLogEvent;
pub use new_leaf_event::NewLeafEvent;

View File

@ -0,0 +1,9 @@
use anchor_lang::prelude::*;
#[event]
pub struct NewLeafEvent {
/// Public key of the concurrent merkle tree account
pub id: Pubkey,
/// Needed by off-chain indexers to track new data
pub leaf: [u8; 32],
}

View File

@ -0,0 +1,498 @@
//! SPL Account Compression is an on-chain program that exposes an interface to manipulating SPL ConcurrentMerkleTrees
//!
//! A buffer of proof-like changelogs is stored on-chain that allow multiple proof-based writes to succeed within the same slot.
//! This is accomplished by fast-forwarding out-of-date (or possibly invalid) proofs based on information stored in the changelogs.
//! See a copy of the whitepaper [here](https://drive.google.com/file/d/1BOpa5OFmara50fTvL0VIVYjtg-qzHCVc/view)
//!
//! To circumvent proof size restrictions stemming from Solana transaction size restrictions,
//! SPL Account Compression also provides the ability to cache the upper most leaves of the
//! concurrent merkle tree. This is called the "canopy", and is stored at the end of the
//! ConcurrentMerkleTreeAccount. More information can be found in the initialization instruction
//! documentation.
//!
//! While SPL ConcurrentMerkleTrees can generically store arbitrary information,
//! one exemplified use-case is the [Bubblegum](https://github.com/metaplex-foundation/metaplex-program-library/tree/master/bubblegum) contract,
//! which uses SPL-Compression to store encoded information about NFTs.
//! The use of SPL-Compression within Bubblegum allows for:
//! - up to 1 billion NFTs to be stored in a single account on-chain (>10,000x decrease in on-chain cost)
//! - up to 2048 concurrent updates per slot
//!
//! Operationally, SPL ConcurrentMerkleTrees **must** be supplemented by off-chain indexers to cache information
//! about leafs and to power an API that can supply up-to-date proofs to allow updates to the tree.
//! All modifications to SPL ConcurrentMerkleTrees are settled on the Solana ledger via instructions against the SPL Compression contract.
//! A production-ready indexer (Plerkle) can be found in the [Metaplex program library](https://github.com/metaplex-foundation/digital-asset-validator-plugin)
use anchor_lang::{
emit,
prelude::*,
solana_program::sysvar::{clock::Clock, rent::Rent},
};
use borsh::{BorshDeserialize, BorshSerialize};
use std::mem::size_of;
pub mod canopy;
pub mod data_wrapper;
pub mod error;
pub mod events;
pub mod state;
pub mod zero_copy;
use crate::canopy::{fill_in_proof_from_canopy, update_canopy};
use crate::data_wrapper::{wrap_event, Wrapper};
use crate::error::AccountCompressionError;
use crate::events::ChangeLogEvent;
use crate::state::ConcurrentMerkleTreeHeader;
use crate::zero_copy::ZeroCopy;
/// Exported for Anchor / Solita
pub use spl_concurrent_merkle_tree::{
concurrent_merkle_tree::ConcurrentMerkleTree, error::ConcurrentMerkleTreeError, node::Node,
};
declare_id!("GRoLLzvxpxxu2PGNJMMeZPyMxjAUH9pKqxGXV9DGiceU");
/// Context for initializing a new SPL ConcurrentMerkleTree
#[derive(Accounts)]
pub struct Initialize<'info> {
#[account(zero)]
/// CHECK: This account will be zeroed out, and the size will be validated
pub merkle_tree: UncheckedAccount<'info>,
/// Authority that validates the content of the trees.
/// Typically a program, e.g., the Bubblegum contract validates that leaves are valid NFTs.
pub authority: Signer<'info>,
/// Program used to emit changelogs as instruction data.
/// See `WRAPYChf58WFCnyjXKJHtrPgzKXgHp6MD9aVDqJBbGh`
pub log_wrapper: Program<'info, Wrapper>,
}
/// Context for inserting, appending, or replacing a leaf in the tree
///
/// Modification instructions also require the proof to the leaf to be provided
/// as 32-byte nodes via "remaining accounts".
#[derive(Accounts)]
pub struct Modify<'info> {
#[account(mut)]
/// CHECK: This account is validated in the instruction
pub merkle_tree: UncheckedAccount<'info>,
/// Authority that validates the content of the trees.
/// Typically a program, e.g., the Bubblegum contract validates that leaves are valid NFTs.
pub authority: Signer<'info>,
/// Program used to emit changelogs as instruction data.
/// See `WRAPYChf58WFCnyjXKJHtrPgzKXgHp6MD9aVDqJBbGh`
pub log_wrapper: Program<'info, Wrapper>,
}
/// Context for validating a provided proof against the SPL ConcurrentMerkleTree.
/// Throws an error if provided proof is invalid.
#[derive(Accounts)]
pub struct VerifyLeaf<'info> {
/// CHECK: This account is validated in the instruction
pub merkle_tree: UncheckedAccount<'info>,
}
/// Context for transferring `authority`
#[derive(Accounts)]
pub struct TransferAuthority<'info> {
#[account(mut)]
/// CHECK: This account is validated in the instruction
pub merkle_tree: UncheckedAccount<'info>,
/// Authority that validates the content of the trees.
/// Typically a program, e.g., the Bubblegum contract validates that leaves are valid NFTs.
pub authority: Signer<'info>,
}
/// This macro applies functions on a ConcurrentMerkleT:ee and emits leaf information
/// needed to sync the merkle tree state with off-chain indexers.
macro_rules! merkle_tree_depth_size_apply_fn {
($max_depth:literal, $max_size:literal, $id:ident, $bytes:ident, $func:ident, $($arg:tt)*) => {
match ConcurrentMerkleTree::<$max_depth, $max_size>::load_mut_bytes($bytes) {
Ok(merkle_tree) => {
match merkle_tree.$func($($arg)*) {
Ok(_) => {
Ok(Box::<ChangeLogEvent>::from((merkle_tree.get_change_log(), $id, merkle_tree.sequence_number)))
}
Err(err) => {
msg!("Error using concurrent merkle tree: {}", err);
err!(AccountCompressionError::ConcurrentMerkleTreeError)
}
}
}
Err(err) => {
msg!("Error zero copying concurrent merkle tree: {}", err);
err!(AccountCompressionError::ZeroCopyError)
}
}
}
}
fn merkle_tree_get_size(header: &ConcurrentMerkleTreeHeader) -> Result<usize> {
// Note: max_buffer_size MUST be a power of 2
match (header.max_depth, header.max_buffer_size) {
(3, 8) => Ok(size_of::<ConcurrentMerkleTree<3, 8>>()),
(5, 8) => Ok(size_of::<ConcurrentMerkleTree<5, 8>>()),
(14, 64) => Ok(size_of::<ConcurrentMerkleTree<14, 64>>()),
(14, 256) => Ok(size_of::<ConcurrentMerkleTree<14, 256>>()),
(14, 1024) => Ok(size_of::<ConcurrentMerkleTree<14, 1024>>()),
(14, 2048) => Ok(size_of::<ConcurrentMerkleTree<14, 2048>>()),
(20, 64) => Ok(size_of::<ConcurrentMerkleTree<20, 64>>()),
(20, 256) => Ok(size_of::<ConcurrentMerkleTree<20, 256>>()),
(20, 1024) => Ok(size_of::<ConcurrentMerkleTree<20, 1024>>()),
(20, 2048) => Ok(size_of::<ConcurrentMerkleTree<20, 2048>>()),
(24, 64) => Ok(size_of::<ConcurrentMerkleTree<24, 64>>()),
(24, 256) => Ok(size_of::<ConcurrentMerkleTree<24, 256>>()),
(24, 512) => Ok(size_of::<ConcurrentMerkleTree<24, 512>>()),
(24, 1024) => Ok(size_of::<ConcurrentMerkleTree<24, 1024>>()),
(24, 2048) => Ok(size_of::<ConcurrentMerkleTree<24, 2048>>()),
(26, 512) => Ok(size_of::<ConcurrentMerkleTree<26, 512>>()),
(26, 1024) => Ok(size_of::<ConcurrentMerkleTree<26, 1024>>()),
(26, 2048) => Ok(size_of::<ConcurrentMerkleTree<26, 2048>>()),
(30, 512) => Ok(size_of::<ConcurrentMerkleTree<30, 512>>()),
(30, 1024) => Ok(size_of::<ConcurrentMerkleTree<30, 1024>>()),
(30, 2048) => Ok(size_of::<ConcurrentMerkleTree<30, 2048>>()),
_ => {
msg!(
"Failed to get size of max depth {} and max buffer size {}",
header.max_depth,
header.max_buffer_size
);
err!(AccountCompressionError::ConcurrentMerkleTreeConstantsError)
}
}
}
/// This applies a given function on a ConcurrentMerkleTree by
/// allowing the compiler to infer the size of the tree based
/// upon the header information stored on-chain
macro_rules! merkle_tree_apply_fn {
($header:ident, $id:ident, $bytes:ident, $func:ident, $($arg:tt)*) => {
// Note: max_buffer_size MUST be a power of 2
match ($header.max_depth, $header.max_buffer_size) {
(3, 8) => merkle_tree_depth_size_apply_fn!(3, 8, $id, $bytes, $func, $($arg)*),
(5, 8) => merkle_tree_depth_size_apply_fn!(5, 8, $id, $bytes, $func, $($arg)*),
(14, 64) => merkle_tree_depth_size_apply_fn!(14, 64, $id, $bytes, $func, $($arg)*),
(14, 256) => merkle_tree_depth_size_apply_fn!(14, 256, $id, $bytes, $func, $($arg)*),
(14, 1024) => merkle_tree_depth_size_apply_fn!(14, 1024, $id, $bytes, $func, $($arg)*),
(14, 2048) => merkle_tree_depth_size_apply_fn!(14, 2048, $id, $bytes, $func, $($arg)*),
(20, 64) => merkle_tree_depth_size_apply_fn!(20, 64, $id, $bytes, $func, $($arg)*),
(20, 256) => merkle_tree_depth_size_apply_fn!(20, 256, $id, $bytes, $func, $($arg)*),
(20, 1024) => merkle_tree_depth_size_apply_fn!(20, 1024, $id, $bytes, $func, $($arg)*),
(20, 2048) => merkle_tree_depth_size_apply_fn!(20, 2048, $id, $bytes, $func, $($arg)*),
(24, 64) => merkle_tree_depth_size_apply_fn!(24, 64, $id, $bytes, $func, $($arg)*),
(24, 256) => merkle_tree_depth_size_apply_fn!(24, 256, $id, $bytes, $func, $($arg)*),
(24, 512) => merkle_tree_depth_size_apply_fn!(24, 512, $id, $bytes, $func, $($arg)*),
(24, 1024) => merkle_tree_depth_size_apply_fn!(24, 1024, $id, $bytes, $func, $($arg)*),
(24, 2048) => merkle_tree_depth_size_apply_fn!(24, 2048, $id, $bytes, $func, $($arg)*),
(26, 512) => merkle_tree_depth_size_apply_fn!(26, 512, $id, $bytes, $func, $($arg)*),
(26, 1024) => merkle_tree_depth_size_apply_fn!(26, 1024, $id, $bytes, $func, $($arg)*),
(26, 2048) => merkle_tree_depth_size_apply_fn!(26, 2048, $id, $bytes, $func, $($arg)*),
(30, 512) => merkle_tree_depth_size_apply_fn!(30, 512, $id, $bytes, $func, $($arg)*),
(30, 1024) => merkle_tree_depth_size_apply_fn!(30, 1024, $id, $bytes, $func, $($arg)*),
(30, 2048) => merkle_tree_depth_size_apply_fn!(30, 2048, $id, $bytes, $func, $($arg)*),
_ => {
msg!("Failed to apply {} on concurrent merkle tree with max depth {} and max buffer size {}", stringify!($func), $header.max_depth, $header.max_buffer_size);
err!(AccountCompressionError::ConcurrentMerkleTreeConstantsError)
}
}
};
}
#[program]
pub mod spl_compression {
use super::*;
/// Creates a new merkle tree with maximum leaf capacity of `power(2, max_depth)`
/// and a minimum concurrency limit of `max_buffer_size`.
///
/// Concurrency limit represents the # of replace instructions that can be successfully
/// executed with proofs dated for the same root. For example, a maximum buffer size of 1024
/// means that a minimum of 1024 replaces can be executed before a new proof must be
/// generated for the next replace instruction.
///
/// Concurrency limit should be determined by empirically testing the demand for
/// state built on top of SPL Compression.
///
/// For instructions on enabling the canopy, see [canopy].
pub fn init_empty_merkle_tree(
ctx: Context<Initialize>,
max_depth: u32,
max_buffer_size: u32,
) -> Result<()> {
require_eq!(
*ctx.accounts.merkle_tree.owner,
crate::id(),
AccountCompressionError::IncorrectAccountOwner
);
let mut merkle_tree_bytes = ctx.accounts.merkle_tree.try_borrow_mut_data()?;
let (mut header_bytes, rest) =
merkle_tree_bytes.split_at_mut(size_of::<ConcurrentMerkleTreeHeader>());
let mut header = ConcurrentMerkleTreeHeader::try_from_slice(header_bytes)?;
header.initialize(
max_depth,
max_buffer_size,
&ctx.accounts.authority.key(),
Clock::get()?.slot,
);
header.serialize(&mut header_bytes)?;
let merkle_tree_size = merkle_tree_get_size(&header)?;
let (tree_bytes, canopy_bytes) = rest.split_at_mut(merkle_tree_size);
let id = ctx.accounts.merkle_tree.key();
let change_log = merkle_tree_apply_fn!(header, id, tree_bytes, initialize,)?;
wrap_event(change_log.try_to_vec()?, &ctx.accounts.log_wrapper)?;
emit!(*change_log);
update_canopy(canopy_bytes, header.max_depth, None)
}
/// Note:
/// Supporting this instruction open a security vulnerability for indexers.
/// This instruction has been deemed unusable for publicly indexed compressed NFTs.
/// Indexing batched data in this way requires indexers to read in the `uri`s onto physical storage
/// and then into their database. This opens up a DOS attack vector, whereby this instruction is
/// repeatedly invoked, causing indexers to fail.
///
/// Because this instruction was deemed insecure, this instruction has been removed
/// until secure usage is available on-chain.
// pub fn init_merkle_tree_with_root(
// ctx: Context<Initialize>,
// max_depth: u32,
// max_buffer_size: u32,
// root: [u8; 32],
// leaf: [u8; 32],
// index: u32,
// _changelog_db_uri: String,
// _metadata_db_uri: String,
// ) -> Result<()> {
// require_eq!(
// *ctx.accounts.merkle_tree.owner,
// crate::id(),
// AccountCompressionError::IncorrectAccountOwner
// );
// let mut merkle_tree_bytes = ctx.accounts.merkle_tree.try_borrow_mut_data()?;
// let (mut header_bytes, rest) =
// merkle_tree_bytes.split_at_mut(size_of::<ConcurrentMerkleTreeHeader>());
// let mut header = ConcurrentMerkleTreeHeader::try_from_slice(&header_bytes)?;
// header.initialize(
// max_depth,
// max_buffer_size,
// &ctx.accounts.authority.key(),
// Clock::get()?.slot,
// );
// header.serialize(&mut header_bytes)?;
// let merkle_tree_size = merkle_tree_get_size(&header)?;
// let (tree_bytes, canopy_bytes) = rest.split_at_mut(merkle_tree_size);
// // Get rightmost proof from accounts
// let mut proof = vec![];
// for node in ctx.remaining_accounts.iter() {
// proof.push(node.key().to_bytes());
// }
// fill_in_proof_from_canopy(canopy_bytes, header.max_depth, index, &mut proof)?;
// assert_eq!(proof.len(), max_depth as usize);
// let id = ctx.accounts.merkle_tree.key();
// // A call is made to ConcurrentMerkleTree::initialize_with_root(root, leaf, proof, index)
// let change_log = merkle_tree_apply_fn!(
// header,
// id,
// tree_bytes,
// initialize_with_root,
// root,
// leaf,
// &proof,
// index
// )?;
// wrap_event(change_log.try_to_vec()?, &ctx.accounts.log_wrapper)?;
// emit!(*change_log);
// update_canopy(canopy_bytes, header.max_depth, Some(change_log))
// }
/// Executes an instruction that overwrites a leaf node.
/// Composing programs should check that the data hashed into previous_leaf
/// matches the authority information necessary to execute this instruction.
pub fn replace_leaf(
ctx: Context<Modify>,
root: [u8; 32],
previous_leaf: [u8; 32],
new_leaf: [u8; 32],
index: u32,
) -> Result<()> {
require_eq!(
*ctx.accounts.merkle_tree.owner,
crate::id(),
AccountCompressionError::IncorrectAccountOwner
);
let mut merkle_tree_bytes = ctx.accounts.merkle_tree.try_borrow_mut_data()?;
let (header_bytes, rest) =
merkle_tree_bytes.split_at_mut(size_of::<ConcurrentMerkleTreeHeader>());
let header = ConcurrentMerkleTreeHeader::try_from_slice(header_bytes)?;
header.assert_valid_authority(&ctx.accounts.authority.key())?;
let merkle_tree_size = merkle_tree_get_size(&header)?;
let (tree_bytes, canopy_bytes) = rest.split_at_mut(merkle_tree_size);
let mut proof = vec![];
for node in ctx.remaining_accounts.iter() {
proof.push(node.key().to_bytes());
}
fill_in_proof_from_canopy(canopy_bytes, header.max_depth, index, &mut proof)?;
let id = ctx.accounts.merkle_tree.key();
// A call is made to ConcurrentMerkleTree::set_leaf(root, previous_leaf, new_leaf, proof, index)
let change_log = merkle_tree_apply_fn!(
header,
id,
tree_bytes,
set_leaf,
root,
previous_leaf,
new_leaf,
&proof,
index,
)?;
wrap_event(change_log.try_to_vec()?, &ctx.accounts.log_wrapper)?;
emit!(*change_log);
update_canopy(canopy_bytes, header.max_depth, Some(change_log))
}
/// Transfers `authority`.
/// Requires `authority` to sign
pub fn transfer_authority(
ctx: Context<TransferAuthority>,
new_authority: Pubkey,
) -> Result<()> {
require_eq!(
*ctx.accounts.merkle_tree.owner,
crate::id(),
AccountCompressionError::IncorrectAccountOwner
);
let mut merkle_tree_bytes = ctx.accounts.merkle_tree.try_borrow_mut_data()?;
let (mut header_bytes, _) =
merkle_tree_bytes.split_at_mut(size_of::<ConcurrentMerkleTreeHeader>());
let mut header = ConcurrentMerkleTreeHeader::try_from_slice(header_bytes)?;
header.assert_valid_authority(&ctx.accounts.authority.key())?;
header.authority = new_authority;
msg!("Authority transferred to: {:?}", header.authority);
header.serialize(&mut header_bytes)?;
Ok(())
}
/// Verifies a provided proof and leaf.
/// If invalid, throws an error.
pub fn verify_leaf(
ctx: Context<VerifyLeaf>,
root: [u8; 32],
leaf: [u8; 32],
index: u32,
) -> Result<()> {
require_eq!(
*ctx.accounts.merkle_tree.owner,
crate::id(),
AccountCompressionError::IncorrectAccountOwner
);
let mut merkle_tree_bytes = ctx.accounts.merkle_tree.try_borrow_mut_data()?;
let (header_bytes, rest) =
merkle_tree_bytes.split_at_mut(size_of::<ConcurrentMerkleTreeHeader>());
let header = ConcurrentMerkleTreeHeader::try_from_slice(header_bytes)?;
header.assert_valid()?;
let merkle_tree_size = merkle_tree_get_size(&header)?;
let (tree_bytes, canopy_bytes) = rest.split_at_mut(merkle_tree_size);
let mut proof = vec![];
for node in ctx.remaining_accounts.iter() {
proof.push(node.key().to_bytes());
}
fill_in_proof_from_canopy(canopy_bytes, header.max_depth, index, &mut proof)?;
let id = ctx.accounts.merkle_tree.key();
merkle_tree_apply_fn!(header, id, tree_bytes, prove_leaf, root, leaf, &proof, index)?;
Ok(())
}
/// This instruction allows the tree's `authority` to append a new leaf to the tree
/// without having to supply a proof.
///
/// Learn more about SPL
/// ConcurrentMerkleTree
/// [here](https://github.com/solana-labs/solana-program-library/tree/master/libraries/concurrent-merkle-tree)
pub fn append(ctx: Context<Modify>, leaf: [u8; 32]) -> Result<()> {
require_eq!(
*ctx.accounts.merkle_tree.owner,
crate::id(),
AccountCompressionError::IncorrectAccountOwner
);
let mut merkle_tree_bytes = ctx.accounts.merkle_tree.try_borrow_mut_data()?;
let (header_bytes, rest) =
merkle_tree_bytes.split_at_mut(size_of::<ConcurrentMerkleTreeHeader>());
let header = ConcurrentMerkleTreeHeader::try_from_slice(header_bytes)?;
header.assert_valid_authority(&ctx.accounts.authority.key())?;
let id = ctx.accounts.merkle_tree.key();
let merkle_tree_size = merkle_tree_get_size(&header)?;
let (tree_bytes, canopy_bytes) = rest.split_at_mut(merkle_tree_size);
let change_log = merkle_tree_apply_fn!(header, id, tree_bytes, append, leaf)?;
wrap_event(change_log.try_to_vec()?, &ctx.accounts.log_wrapper)?;
emit!(*change_log);
update_canopy(canopy_bytes, header.max_depth, Some(change_log))
}
/// This instruction takes a proof, and will attempt to write the given leaf
/// to the specified index in the tree. If the insert operation fails, the leaf will be `append`-ed
/// to the tree.
/// It is up to the indexer to parse the final location of the leaf from the emitted changelog.
pub fn insert_or_append(
ctx: Context<Modify>,
root: [u8; 32],
leaf: [u8; 32],
index: u32,
) -> Result<()> {
require_eq!(
*ctx.accounts.merkle_tree.owner,
crate::id(),
AccountCompressionError::IncorrectAccountOwner
);
let mut merkle_tree_bytes = ctx.accounts.merkle_tree.try_borrow_mut_data()?;
let (header_bytes, rest) =
merkle_tree_bytes.split_at_mut(size_of::<ConcurrentMerkleTreeHeader>());
let header = ConcurrentMerkleTreeHeader::try_from_slice(header_bytes)?;
header.assert_valid_authority(&ctx.accounts.authority.key())?;
let merkle_tree_size = merkle_tree_get_size(&header)?;
let (tree_bytes, canopy_bytes) = rest.split_at_mut(merkle_tree_size);
let mut proof = vec![];
for node in ctx.remaining_accounts.iter() {
proof.push(node.key().to_bytes());
}
fill_in_proof_from_canopy(canopy_bytes, header.max_depth, index, &mut proof)?;
// A call is made to ConcurrentMerkleTree::fill_empty_or_append
let id = ctx.accounts.merkle_tree.key();
let change_log = merkle_tree_apply_fn!(
header,
id,
tree_bytes,
fill_empty_or_append,
root,
leaf,
&proof,
index,
)?;
wrap_event(change_log.try_to_vec()?, &ctx.accounts.log_wrapper)?;
emit!(*change_log);
update_canopy(canopy_bytes, header.max_depth, Some(change_log))
}
}

View File

@ -0,0 +1,94 @@
use anchor_lang::prelude::*;
use borsh::{BorshDeserialize, BorshSerialize};
use crate::error::AccountCompressionError;
#[derive(Debug, Copy, Clone, PartialEq, BorshDeserialize, BorshSerialize)]
#[repr(u32)]
pub enum CompressionAccountType {
/// Uninitialized
Uninitialized,
/// SPL ConcurrentMerkleTree data structure, may include a Canopy
ConcurrentMerkleTree,
}
impl std::fmt::Display for CompressionAccountType {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{:?}", &self)
}
}
/// Initialization parameters for an SPL ConcurrentMerkleTree.
///
/// Only the following permutations are valid:
///
/// | max_depth | max_buffer_size |
/// | --------- | --------------------- |
/// | 14 | (64, 256, 1024, 2048) |
/// | 20 | (64, 256, 1024, 2048) |
/// | 24 | (64, 256, 512, 1024, 2048) |
/// | 26 | (64, 256, 512, 1024, 2048) |
/// | 30 | (512, 1024, 2048) |
///
#[derive(BorshDeserialize, BorshSerialize)]
#[repr(C)]
pub struct ConcurrentMerkleTreeHeader {
/// Account type
pub account_type: CompressionAccountType,
/// Buffer of changelogs stored on-chain.
/// Must be a power of 2; see above table for valid combinations.
pub max_buffer_size: u32,
/// Depth of the SPL ConcurrentMerkleTree to store.
/// Tree capacity can be calculated as power(2, max_depth).
/// See above table for valid options.
pub max_depth: u32,
/// Authority that validates the content of the trees.
/// Typically a program, e.g., the Bubblegum contract validates that leaves are valid NFTs.
pub authority: Pubkey,
/// Slot corresponding to when the Merkle tree was created.
/// Provides a lower-bound on what slot to start (re-)building a tree from.
pub creation_slot: u64,
}
impl ConcurrentMerkleTreeHeader {
pub fn initialize(
&mut self,
max_depth: u32,
max_buffer_size: u32,
authority: &Pubkey,
creation_slot: u64,
) {
// Check header is empty
assert_eq!(self.max_buffer_size, 0);
assert_eq!(self.max_depth, 0);
self.account_type = CompressionAccountType::ConcurrentMerkleTree;
self.max_buffer_size = max_buffer_size;
self.max_depth = max_depth;
self.authority = *authority;
self.creation_slot = creation_slot;
}
pub fn assert_valid(&self) -> Result<()> {
require_eq!(
self.account_type,
CompressionAccountType::ConcurrentMerkleTree,
AccountCompressionError::IncorrectAccountType,
);
Ok(())
}
pub fn assert_valid_authority(&self, expected_authority: &Pubkey) -> Result<()> {
self.assert_valid()?;
require_eq!(
self.authority,
*expected_authority,
AccountCompressionError::IncorrectAuthority,
);
Ok(())
}
}

View File

@ -0,0 +1,6 @@
//! State needed to manipulate SPL ConcurrentMerkleTrees
mod concurrent_merkle_tree_header;
mod path_node;
pub use concurrent_merkle_tree_header::ConcurrentMerkleTreeHeader;
pub use path_node::PathNode;

View File

@ -0,0 +1,17 @@
use anchor_lang::prelude::*;
use spl_concurrent_merkle_tree::node::Node;
#[derive(AnchorDeserialize, AnchorSerialize, Clone, Copy, Debug)]
pub struct PathNode {
pub node: [u8; 32],
pub index: u32,
}
impl PathNode {
pub fn new(tree_node: Node, index: u32) -> Self {
Self {
node: tree_node,
index,
}
}
}

View File

@ -0,0 +1,22 @@
//! Implements ZeroCopy over ConcurrrentMerkleTree generics
use crate::error::error_msg;
use anchor_lang::prelude::*;
use bytemuck::Pod;
use spl_concurrent_merkle_tree::concurrent_merkle_tree::ConcurrentMerkleTree;
use std::mem::size_of;
pub trait ZeroCopy: Pod {
fn load_mut_bytes<'a>(data: &'a mut [u8]) -> Result<&'a mut Self> {
let size = size_of::<Self>();
let data_len = data.len();
Ok(bytemuck::try_from_bytes_mut(&mut data[..size])
.map_err(error_msg::<Self>(data_len))
.unwrap())
}
}
impl<const MAX_DEPTH: usize, const MAX_BUFFER_SIZE: usize> ZeroCopy
for ConcurrentMerkleTree<MAX_DEPTH, MAX_BUFFER_SIZE>
{
}

View File

@ -0,0 +1,19 @@
[package]
name = "wrapper"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
crate-type = ["cdylib", "lib"]
[features]
no-entrypoint = []
no-idl = []
no-log-ix-name = []
cpi = ["no-entrypoint"]
default = []
[dependencies]
solana-program = "1.10.35"

View File

@ -0,0 +1,28 @@
use solana_program::{
account_info::AccountInfo, declare_id, entrypoint::ProgramResult, instruction::Instruction,
pubkey::Pubkey,
};
declare_id!("WRAPYChf58WFCnyjXKJHtrPgzKXgHp6MD9aVDqJBbGh");
#[cfg(not(feature = "no-entrypoint"))]
use solana_program::entrypoint;
#[cfg(not(feature = "no-entrypoint"))]
entrypoint!(wrap);
pub fn wrap(
_program_id: &Pubkey,
_accounts: &[AccountInfo],
_instruction_data: &[u8],
) -> ProgramResult {
Ok(())
}
pub fn wrap_instruction(data: Vec<u8>) -> Instruction {
Instruction {
program_id: crate::id(),
accounts: vec![],
data,
}
}

View File

@ -0,0 +1,10 @@
{
"compilerOptions": {
"types": ["mocha", "chai"],
"typeRoots": ["./node_modules/@types"],
"lib": ["es2015", "dom"],
"module": "commonjs",
"target": "es6",
"esModuleInterop": true
}
}

File diff suppressed because it is too large Load Diff

View File

@ -9,9 +9,6 @@ use bytemuck::{Pod, Zeroable};
use log_compute;
use solana_logging;
#[cfg(feature = "sol-log")]
use solana_program::{log::sol_log_compute_units, msg};
/// Enforce constraints on max depth and buffer size
#[inline(always)]
fn check_bounds(max_depth: usize, max_buffer_size: usize) {

View File

@ -1,17 +1,17 @@
macro_rules! solana_logging {
($message:literal, $($arg:tt)*) => {
#[cfg(feature = "log")]
msg!($message, $($arg)*);
::solana_program::msg!($message, $($arg)*);
};
($message:literal) => {
#[cfg(feature = "log")]
msg!($message);
::solana_program::msg!($message);
};
}
macro_rules! log_compute {
() => {
#[cfg(all(feature = "sol-log", feature = "log"))]
sol_log_compute_units();
::solana_program::log::sol_log_compute_units();
};
}