Merge pull request #69 from nuttycom/cap_cache

Add support for tracking legacy frontiers and witnesses in `ShardTree` instances.
This commit is contained in:
Kris Nuttycombe 2023-07-03 11:27:57 -06:00 committed by GitHub
commit 313c072afb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 2414 additions and 816 deletions

View File

@ -21,5 +21,10 @@ proptest = { version = "1.0.0", optional = true }
proptest = "1.0.0"
[features]
# The legacy-api feature guards types and functions that were previously
# part of the `zcash_primitives` crate. Those types were removed in the
# `zcash_primitives` 0.12 release and are now maintained here.
legacy-api = []
# The test-dependencies feature guards types and functions that are
# useful for testing incremental Merkle trees and Merkle tree frontiers.
test-dependencies = ["proptest"]

View File

@ -54,6 +54,11 @@ impl<H> NonEmptyFrontier<H> {
}
}
/// Decomposes the frontier into its constituent parts
pub fn into_parts(self) -> (Position, H, Vec<H>) {
(self.position, self.leaf, self.ommers)
}
/// Returns the position of the most recently appended leaf.
pub fn position(&self) -> Position {
self.position
@ -79,9 +84,9 @@ impl<H: Hashable + Clone> NonEmptyFrontier<H> {
let prior_leaf = self.leaf.clone();
self.position += 1;
self.leaf = leaf;
if self.position.is_odd() {
// if the new position is odd, the current leaf will directly become
// an ommer at level 0, and there is no other mutation made to the tree.
if self.position.is_right_child() {
// if the new position is a right-hand leaf, the current leaf will directly become an
// ommer at level 0, and there is no other mutation made to the tree.
self.ommers.insert(0, prior_leaf);
} else {
// if the new position is even, then the current leaf will be hashed
@ -198,21 +203,23 @@ impl<H, const DEPTH: u8> Frontier<H, DEPTH> {
/// Constructs a new frontier from its constituent parts.
///
/// Returns `None` if the new frontier would exceed the maximum
/// allowed depth or if the list of ommers provided is not consistent
/// with the position of the leaf.
/// Returns an error if the new frontier would exceed the maximum allowed depth or if the list
/// of ommers provided is not consistent with the position of the leaf.
pub fn from_parts(position: Position, leaf: H, ommers: Vec<H>) -> Result<Self, FrontierError> {
NonEmptyFrontier::from_parts(position, leaf, ommers).and_then(Self::try_from)
}
/// Return the wrapped NonEmptyFrontier reference, or None if
/// the frontier is empty.
/// Return the wrapped NonEmptyFrontier reference, or None if the frontier is empty.
pub fn value(&self) -> Option<&NonEmptyFrontier<H>> {
self.frontier.as_ref()
}
/// Returns the amount of memory dynamically allocated for ommer
/// values within the frontier.
/// Consumes this wrapper and returns the underlying `Option<NonEmptyFrontier>`
pub fn take(self) -> Option<NonEmptyFrontier<H>> {
self.frontier
}
/// Returns the amount of memory dynamically allocated for ommer values within the frontier.
pub fn dynamic_memory_usage(&self) -> usize {
self.frontier.as_ref().map_or(0, |f| {
size_of::<usize>() + (f.ommers.capacity() + 1) * size_of::<H>()
@ -334,6 +341,10 @@ impl<H, const DEPTH: u8> CommitmentTree<H, DEPTH> {
}
}
pub fn is_empty(&self) -> bool {
self.left.is_none() && self.right.is_none()
}
pub fn left(&self) -> &Option<H> {
&self.left
}
@ -346,6 +357,22 @@ impl<H, const DEPTH: u8> CommitmentTree<H, DEPTH> {
&self.parents
}
pub fn leaf(&self) -> Option<&H> {
self.right.as_ref().or(self.left.as_ref())
}
pub fn ommers_iter(&self) -> Box<dyn Iterator<Item = &'_ H> + '_> {
if self.right.is_some() {
Box::new(
self.left
.iter()
.chain(self.parents.iter().filter_map(|v| v.as_ref())),
)
} else {
Box::new(self.parents.iter().filter_map(|v| v.as_ref()))
}
}
/// Returns the number of leaf nodes in the tree.
pub fn size(&self) -> usize {
self.parents.iter().enumerate().fold(
@ -384,7 +411,7 @@ impl<H: Hashable + Clone, const DEPTH: u8> CommitmentTree<H, DEPTH> {
pub fn from_frontier(frontier: &Frontier<H, DEPTH>) -> Self {
frontier.value().map_or_else(Self::empty, |f| {
let mut ommers_iter = f.ommers().iter().cloned();
let (left, right) = if f.position().is_odd() {
let (left, right) = if f.position().is_right_child() {
(
ommers_iter
.next()
@ -515,11 +542,12 @@ impl<H: Hashable + Clone, const DEPTH: u8> CommitmentTree<H, DEPTH> {
#[cfg(feature = "test-dependencies")]
pub mod testing {
use core::fmt::Debug;
use proptest::collection::vec;
use proptest::prelude::*;
use std::collections::hash_map::DefaultHasher;
use std::hash::Hasher;
use crate::{Hashable, Level};
use crate::{frontier::Frontier, Hashable, Level};
impl<H: Hashable + Clone, const DEPTH: u8> crate::testing::Frontier<H>
for super::Frontier<H, DEPTH>
@ -550,14 +578,26 @@ pub mod testing {
}
}
prop_compose! {
pub fn arb_test_node()(i in any::<u64>()) -> TestNode {
TestNode(i)
}
pub fn arb_test_node() -> impl Strategy<Value = TestNode> + Clone {
any::<u64>().prop_map(TestNode)
}
pub fn arb_frontier<H: Hashable + Clone + Debug, T: Strategy<Value = H>, const DEPTH: u8>(
min_size: usize,
arb_node: T,
) -> impl Strategy<Value = Frontier<H, DEPTH>> {
assert!((1 << DEPTH) >= min_size + 100);
vec(arb_node, min_size..(min_size + 100)).prop_map(move |v| {
let mut frontier = Frontier::empty();
for node in v.into_iter() {
frontier.append(node);
}
frontier
})
}
#[cfg(feature = "legacy-api")]
use {crate::frontier::CommitmentTree, proptest::collection::vec};
use crate::frontier::CommitmentTree;
#[cfg(feature = "legacy-api")]
pub fn arb_commitment_tree<

View File

@ -137,8 +137,9 @@ impl Iterator for WitnessAddrsIter {
pub struct Position(u64);
impl Position {
/// Return whether the position is odd-valued.
pub fn is_odd(&self) -> bool {
/// Return whether the position refers to the right-hand child of a subtree with
/// its root at level 1.
pub fn is_right_child(&self) -> bool {
self.0 & 0x1 == 1
}
@ -236,6 +237,10 @@ impl TryFrom<Position> for usize {
pub struct Level(u8);
impl Level {
pub const fn new(value: u8) -> Self {
Self(value)
}
// TODO: replace with an instance for `Step<Level>` once `step_trait`
// is stabilized
pub fn iter_to(self, other: Level) -> impl Iterator<Item = Self> {
@ -262,9 +267,21 @@ impl From<Level> for u8 {
}
}
// Supporting sub-8-bit platforms isn't on our
impl From<Level> for u32 {
fn from(level: Level) -> u32 {
level.0.into()
}
}
impl From<Level> for u64 {
fn from(level: Level) -> u64 {
level.0.into()
}
}
impl From<Level> for usize {
fn from(level: Level) -> usize {
// Supporting sub-8-bit platforms isn't on our roadmap.
level.0 as usize
}
}
@ -364,6 +381,27 @@ impl Address {
self.level > addr.level && { addr.index >> (self.level.0 - addr.level.0) == self.index }
}
/// Returns the common ancestor of `self` and `other` having the smallest level value.
pub fn common_ancestor(&self, other: &Self) -> Self {
if self.level >= other.level {
let other_ancestor_idx = other.index >> (self.level.0 - other.level.0);
let index_delta = self.index.abs_diff(other_ancestor_idx);
let level_delta = (u64::BITS - index_delta.leading_zeros()) as u8;
Address {
level: self.level + level_delta,
index: std::cmp::max(self.index, other_ancestor_idx) >> level_delta,
}
} else {
let self_ancestor_idx = self.index >> (other.level.0 - self.level.0);
let index_delta = other.index.abs_diff(self_ancestor_idx);
let level_delta = (u64::BITS - index_delta.leading_zeros()) as u8;
Address {
level: other.level + level_delta,
index: std::cmp::max(other.index, self_ancestor_idx) >> level_delta,
}
}
}
/// Returns whether this address is an ancestor of, or is equal to,
/// the specified address.
pub fn contains(&self, addr: &Self) -> bool {
@ -430,6 +468,11 @@ impl Address {
}
}
/// Returns whether this address is the left-hand child of its parent
pub fn is_left_child(&self) -> bool {
self.index & 0x1 == 0
}
/// Returns whether this address is the right-hand child of its parent
pub fn is_right_child(&self) -> bool {
self.index & 0x1 == 1
@ -780,4 +823,28 @@ pub(crate) mod tests {
assert_eq!(path.root("c".to_string()), "abcdefgh".to_string());
}
#[test]
fn addr_common_ancestor() {
assert_eq!(
Address::from_parts(Level(2), 1).common_ancestor(&Address::from_parts(Level(3), 2)),
Address::from_parts(Level(5), 0)
);
assert_eq!(
Address::from_parts(Level(2), 2).common_ancestor(&Address::from_parts(Level(1), 7)),
Address::from_parts(Level(3), 1)
);
assert_eq!(
Address::from_parts(Level(2), 2).common_ancestor(&Address::from_parts(Level(2), 2)),
Address::from_parts(Level(2), 2)
);
assert_eq!(
Address::from_parts(Level(2), 2).common_ancestor(&Address::from_parts(Level(0), 9)),
Address::from_parts(Level(2), 2)
);
assert_eq!(
Address::from_parts(Level(0), 9).common_ancestor(&Address::from_parts(Level(2), 2)),
Address::from_parts(Level(2), 2)
);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -3,7 +3,7 @@ use std::iter::repeat;
use crate::{
frontier::{CommitmentTree, PathFiller},
Hashable, Level, MerklePath, Position,
Hashable, Level, MerklePath, Position, Source,
};
/// An updatable witness to a path from a position in a particular [`CommitmentTree`].
@ -26,7 +26,7 @@ use crate::{
/// tree.append(TestNode(0));
/// tree.append(TestNode(1));
/// let mut witness = IncrementalWitness::from_tree(tree.clone());
/// assert_eq!(witness.position(), Position::from(1));
/// assert_eq!(witness.witnessed_position(), Position::from(1));
/// assert_eq!(tree.root(), witness.root());
///
/// let next = TestNode(2);
@ -84,11 +84,34 @@ impl<H, const DEPTH: u8> IncrementalWitness<H, DEPTH> {
}
/// Returns the position of the witnessed leaf node in the commitment tree.
pub fn position(&self) -> Position {
pub fn witnessed_position(&self) -> Position {
Position::try_from(self.tree.size() - 1)
.expect("Commitment trees with more than 2^64 leaves are unsupported.")
}
/// Returns the position of the last leaf appended to the witness.
pub fn tip_position(&self) -> Position {
let leaves_to_cursor_start = self
.witnessed_position()
.witness_addrs(Level::from(DEPTH))
.filter_map(|(addr, source)| {
if source == Source::Future {
Some(addr)
} else {
None
}
})
.take(self.filled.len())
.fold(0u64, |acc, addr| acc + (1u64 << u8::from(addr.level())));
self.witnessed_position()
+ leaves_to_cursor_start
+ self.cursor.as_ref().map_or(0, |c| {
u64::try_from(c.size())
.expect("Note commitment trees with > 2^64 leaves are not supported.")
})
}
/// Finds the next "depth" of an unfilled subtree.
fn next_depth(&self) -> u8 {
let mut skip: u8 = self
@ -212,6 +235,24 @@ impl<H: Hashable + Clone, const DEPTH: u8> IncrementalWitness<H, DEPTH> {
assert_eq!(auth_path.len(), usize::from(depth));
MerklePath::from_parts(auth_path, self.position()).ok()
MerklePath::from_parts(auth_path, self.witnessed_position()).ok()
}
}
#[cfg(test)]
mod tests {
use crate::{frontier::CommitmentTree, witness::IncrementalWitness, Position};
#[test]
fn witness_tip_position() {
let mut base_tree = CommitmentTree::<String, 6>::empty();
for c in 'a'..'h' {
base_tree.append(c.to_string()).unwrap();
}
let mut witness = IncrementalWitness::from_tree(base_tree);
for c in 'h'..'z' {
witness.append(c.to_string()).unwrap();
}
assert_eq!(witness.tip_position(), Position::from(24));
}
}

View File

@ -12,6 +12,7 @@ repository = "https://github.com/zcash/incrementalmerkletree"
categories = ["algorithms", "data-structures"]
[dependencies]
assert_matches = { version = "1.5", optional = true }
bitflags = "1.3"
either = "1.8"
incrementalmerkletree = { version = "0.4", path = "../incrementalmerkletree" }
@ -24,7 +25,13 @@ incrementalmerkletree = { version = "0.4", path = "../incrementalmerkletree", fe
proptest = "1.0.0"
[features]
test-dependencies = ["proptest"]
# The legacy-api feature guards types and functions that are useful for
# migrating data previously managed using `incrementalmerkletree/legacy-api`
# types into the `ShardTree` data structure.
legacy-api = ["incrementalmerkletree/legacy-api"]
# The test-depenencies feature can be enabled to expose types and functions
# that are useful for testing `shardtree` functionality.
test-dependencies = ["proptest", "assert_matches"]
[target.'cfg(unix)'.dev-dependencies]
pprof = { version = "0.9", features = ["criterion", "flamegraph"] } # MSRV 1.56

File diff suppressed because it is too large Load Diff