Remove the `merkle_tree::incremental` module.

This consolidates all the seralization code for frontiers and
incremental witnesses in the `merkle_tree` module.
This commit is contained in:
Kris Nuttycombe 2023-03-17 15:20:04 -06:00
parent 6aaa96d377
commit 71f74d4ac1
3 changed files with 226 additions and 226 deletions

View File

@ -21,9 +21,13 @@ and this library adheres to Rust's notion of
methods for these types that do not exist for the `incrementalmerkletree`
replacement types have been replaced by new methods in the `merkle_tree` module.
- `merkle_tree::incremental::write_auth_fragment_v1` has been removed without replacement.
- The following have been removed from `merkle_tree::incremental`; these were
`zcashd`-specific serialization methods which have been moved into the
- The `merkle_tree::incremental` module has been removed; its former contents
were either moved to the `merkle_tree` module or were `zcashd`-specific
serialization methods which have been removed entirely and moved into the
[zcashd](https://github.com/zcash/zcash) repository.
- The dependency on the `bridgetree` crate has been removed from
`zcash_primitives` and the following zcashd-specific serialization methods
have been moved to the [zcashd](https://github.com/zcash/zcash) repository:
- `read_auth_fragment_v1`
- `read_bridge_v1`
- `read_bridge_v2`
@ -35,6 +39,24 @@ and this library adheres to Rust's notion of
- `read_tree`
- `write_tree`
### Moved
- The following constants and methods have been moved from the
`merkle_tree::incremental` module into the `merkle_tree` module to
consolidate the serialization code for commitment tree frontiers:
- `SER_V1`
- `SER_V2`
- `write_usize_leu64`
- `read_leu64_usize`
- `write_position`
- `read_position`
- `write_address`
- `read_address`
- `read_frontier_v0`
- `write_nonempty_frontier`
- `read_nonempty_frontier_v1`
- `write_frontier_v1`
- `read_frontier_v1`
### Added
- `merkle_tree::incremental::{read_address, write_address}`
- `merkle_tree::incremental::read_bridge_v2`

View File

@ -1,13 +1,19 @@
//! Implementation of a Merkle tree of commitments used to prove the existence of notes.
use byteorder::{LittleEndian, ReadBytesExt};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use incrementalmerkletree::{
frontier::CommitmentTree, witness::IncrementalWitness, MerklePath, Position,
frontier::{CommitmentTree, Frontier, NonEmptyFrontier},
witness::IncrementalWitness,
Address, Hashable, Level, MerklePath, Position,
};
use orchard::tree::MerkleHashOrchard;
use std::io::{self, Read, Write};
use zcash_encoding::{Optional, Vector};
pub mod incremental;
use crate::sapling;
pub const SER_V1: u8 = 1;
pub const SER_V2: u8 = 2;
/// A hashable node within a Merkle tree.
pub trait HashSer {
@ -20,7 +26,146 @@ pub trait HashSer {
fn write<W: Write>(&self, writer: W) -> io::Result<()>;
}
/// Reads a `CommitmentTree` from its serialized form.
impl HashSer for MerkleHashOrchard {
fn read<R: Read>(mut reader: R) -> io::Result<Self>
where
Self: Sized,
{
let mut repr = [0u8; 32];
reader.read_exact(&mut repr)?;
<Option<_>>::from(Self::from_bytes(&repr)).ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
"Non-canonical encoding of Pallas base field value.",
)
})
}
fn write<W: Write>(&self, mut writer: W) -> io::Result<()> {
writer.write_all(&self.to_bytes())
}
}
/// Writes a usize value encoded as a u64 in little-endian order. Since usize
/// is platform-dependent, we consistently represent it as u64 in serialized
/// formats.
pub fn write_usize_leu64<W: Write>(mut writer: W, value: usize) -> io::Result<()> {
// Panic if we get a usize value that can't fit into a u64.
writer.write_u64::<LittleEndian>(value.try_into().unwrap())
}
/// Reads a usize value encoded as a u64 in little-endian order. Since usize
/// is platform-dependent, we consistently represent it as u64 in serialized
/// formats.
pub fn read_leu64_usize<R: Read>(mut reader: R) -> io::Result<usize> {
reader.read_u64::<LittleEndian>()?.try_into().map_err(|e| {
io::Error::new(
io::ErrorKind::InvalidInput,
format!(
"usize could not be decoded from a 64-bit value on this platform: {:?}",
e
),
)
})
}
pub fn write_position<W: Write>(mut writer: W, position: Position) -> io::Result<()> {
write_usize_leu64(&mut writer, position.into())
}
pub fn read_position<R: Read>(mut reader: R) -> io::Result<Position> {
read_leu64_usize(&mut reader).map(Position::from)
}
pub fn write_address<W: Write>(mut writer: W, addr: Address) -> io::Result<()> {
writer.write_u8(addr.level().into())?;
write_usize_leu64(&mut writer, addr.index())
}
pub fn read_address<R: Read>(mut reader: R) -> io::Result<Address> {
let level = reader.read_u8().map(Level::from)?;
let index = read_leu64_usize(&mut reader).map(usize::from)?;
Ok(Address::from_parts(level, index))
}
pub fn read_frontier_v0<H: Hashable + HashSer + Clone, R: Read>(
mut reader: R,
) -> io::Result<Frontier<H, { sapling::NOTE_COMMITMENT_TREE_DEPTH }>> {
let tree = read_commitment_tree(&mut reader)?;
Ok(tree.to_frontier())
}
pub fn write_nonempty_frontier_v1<H: HashSer, W: Write>(
mut writer: W,
frontier: &NonEmptyFrontier<H>,
) -> io::Result<()> {
write_position(&mut writer, frontier.position())?;
if frontier.position().is_odd() {
// The v1 serialization wrote the sibling of a right-hand leaf as an optional value, rather
// than as part of the ommers vector.
frontier
.ommers()
.get(0)
.expect("ommers vector cannot be empty for right-hand nodes")
.write(&mut writer)?;
Optional::write(&mut writer, Some(frontier.leaf()), |w, n: &H| n.write(w))?;
Vector::write(&mut writer, &frontier.ommers()[1..], |w, e| e.write(w))?;
} else {
frontier.leaf().write(&mut writer)?;
Optional::write(&mut writer, None, |w, n: &H| n.write(w))?;
Vector::write(&mut writer, frontier.ommers(), |w, e| e.write(w))?;
}
Ok(())
}
#[allow(clippy::redundant_closure)]
pub fn read_nonempty_frontier_v1<H: HashSer + Clone, R: Read>(
mut reader: R,
) -> io::Result<NonEmptyFrontier<H>> {
let position = read_position(&mut reader)?;
let left = H::read(&mut reader)?;
let right = Optional::read(&mut reader, H::read)?;
let mut ommers = Vector::read(&mut reader, |r| H::read(r))?;
let leaf = if let Some(right) = right {
// if the frontier has a right leaf, then the left leaf is the first ommer
ommers.insert(0, left);
right
} else {
left
};
NonEmptyFrontier::from_parts(position, leaf, ommers).map_err(|err| {
io::Error::new(
io::ErrorKind::InvalidInput,
format!("Parsing resulted in an invalid Merkle frontier: {:?}", err),
)
})
}
pub fn write_frontier_v1<H: HashSer, W: Write>(
writer: W,
frontier: &Frontier<H, 32>,
) -> io::Result<()> {
Optional::write(writer, frontier.value(), write_nonempty_frontier_v1)
}
#[allow(clippy::redundant_closure)]
pub fn read_frontier_v1<H: HashSer + Clone, R: Read>(reader: R) -> io::Result<Frontier<H, 32>> {
match Optional::read(reader, read_nonempty_frontier_v1)? {
None => Ok(Frontier::empty()),
Some(f) => Frontier::try_from(f).map_err(|err| {
io::Error::new(
io::ErrorKind::InvalidInput,
format!("Parsing resulted in an invalid Merkle frontier: {:?}", err),
)
}),
}
}
/// Reads a legacy `CommitmentTree` from its serialized form.
pub fn read_commitment_tree<Node: HashSer, R: Read, const DEPTH: u8>(
mut reader: R,
) -> io::Result<CommitmentTree<Node, DEPTH>> {
@ -36,7 +181,7 @@ pub fn read_commitment_tree<Node: HashSer, R: Read, const DEPTH: u8>(
})
}
/// Serializes this tree as an array of bytes.
/// Serializes a legacy `CommitmentTree` as an array of bytes.
pub fn write_commitment_tree<Node: HashSer, W: Write, const DEPTH: u8>(
tree: &CommitmentTree<Node, DEPTH>,
mut writer: W,
@ -60,7 +205,7 @@ pub fn read_incremental_witness<Node: HashSer, R: Read, const DEPTH: u8>(
Ok(IncrementalWitness::from_parts(tree, filled, cursor))
}
/// Serializes this `IncrementalWitness` as an array of bytes.
/// Serializes an `IncrementalWitness` as an array of bytes.
pub fn write_incremental_witness<Node: HashSer, W: Write, const DEPTH: u8>(
witness: &IncrementalWitness<Node, DEPTH>,
mut writer: W,
@ -120,23 +265,67 @@ pub fn merkle_path_from_slice<Node: HashSer, const DEPTH: u8>(
}
}
#[cfg(any(test, feature = "test-dependencies"))]
pub mod testing {
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use incrementalmerkletree::frontier::testing::TestNode;
use std::io::{self, Read, Write};
use super::HashSer;
impl HashSer for TestNode {
fn read<R: Read>(mut reader: R) -> io::Result<TestNode> {
reader.read_u64::<LittleEndian>().map(TestNode)
}
fn write<W: Write>(&self, mut writer: W) -> io::Result<()> {
writer.write_u64::<LittleEndian>(self.0)
}
}
}
#[cfg(test)]
mod tests {
use assert_matches::assert_matches;
use incrementalmerkletree::Hashable;
use incrementalmerkletree::{
frontier::{testing::arb_commitment_tree, Frontier, PathFiller},
witness::IncrementalWitness,
Hashable,
};
use proptest::prelude::*;
use proptest::strategy::Strategy;
use crate::sapling::{self, testing::arb_node, Node};
use super::{
merkle_path_from_slice, read_commitment_tree, read_incremental_witness,
write_commitment_tree, write_incremental_witness, CommitmentTree, HashSer,
merkle_path_from_slice, read_commitment_tree, read_frontier_v0, read_frontier_v1,
read_incremental_witness, write_commitment_tree, write_frontier_v1,
write_incremental_witness, CommitmentTree, HashSer,
};
use crate::sapling::{self, Node};
proptest! {
#[test]
fn frontier_serialization_v0(t in arb_commitment_tree::<_, _, 32>(0, sapling::testing::arb_node()))
{
let mut buffer = vec![];
write_commitment_tree(&t, &mut buffer).unwrap();
let frontier: Frontier<Node, 32> = read_frontier_v0(&buffer[..]).unwrap();
let expected: Frontier<Node, 32> = t.to_frontier();
assert_eq!(frontier, expected);
}
#[test]
fn frontier_serialization_v1(t in arb_commitment_tree::<_, _, 32>(1, sapling::testing::arb_node()))
{
let original: Frontier<Node, 32> = t.to_frontier();
let mut buffer = vec![];
write_frontier_v1(&mut buffer, &original).unwrap();
let read: Frontier<Node, 32> = read_frontier_v1(&buffer[..]).unwrap();
assert_eq!(read, original);
}
}
const HEX_EMPTY_ROOTS: [&str; 33] = [
"0100000000000000000000000000000000000000000000000000000000000000",
@ -649,7 +838,7 @@ mod tests {
}
#[test]
fn prop_commitment_tree_roundtrip_node(ct in arb_commitment_tree::<_, _, 8>(32, arb_node())) {
fn prop_commitment_tree_roundtrip_node(ct in arb_commitment_tree::<_, _, 8>(32, sapling::testing::arb_node())) {
let frontier: Frontier<Node, 8> = ct.to_frontier();
let ct0 = CommitmentTree::from_frontier(&frontier);
assert_eq!(ct, ct0);
@ -658,29 +847,10 @@ mod tests {
}
#[test]
fn prop_commitment_tree_roundtrip_ser(ct in arb_commitment_tree::<_, _, 8>(32, arb_node())) {
fn prop_commitment_tree_roundtrip_ser(ct in arb_commitment_tree::<_, _, 8>(32, sapling::testing::arb_node())) {
let mut serialized = vec![];
assert_matches!(write_commitment_tree(&ct, &mut serialized), Ok(()));
assert_matches!(read_commitment_tree::<_, _, 8>(&serialized[..]), Ok(ct_out) if ct == ct_out);
}
}
}
#[cfg(any(test, feature = "test-dependencies"))]
pub mod testing {
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use incrementalmerkletree::frontier::testing::TestNode;
use std::io::{self, Read, Write};
use super::HashSer;
impl HashSer for TestNode {
fn read<R: Read>(mut reader: R) -> io::Result<TestNode> {
reader.read_u64::<LittleEndian>().map(TestNode)
}
fn write<W: Write>(&self, mut writer: W) -> io::Result<()> {
writer.write_u64::<LittleEndian>(self.0)
}
}
}

View File

@ -1,192 +0,0 @@
//! Implementations of serialization and parsing for Orchard note commitment trees.
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::io::{self, Read, Write};
use incrementalmerkletree::{
frontier::{Frontier, NonEmptyFrontier},
Address, Hashable, Level, Position,
};
use orchard::tree::MerkleHashOrchard;
use zcash_encoding::{Optional, Vector};
use super::{read_commitment_tree, HashSer};
use crate::sapling;
pub const SER_V1: u8 = 1;
pub const SER_V2: u8 = 2;
impl HashSer for MerkleHashOrchard {
fn read<R: Read>(mut reader: R) -> io::Result<Self>
where
Self: Sized,
{
let mut repr = [0u8; 32];
reader.read_exact(&mut repr)?;
<Option<_>>::from(Self::from_bytes(&repr)).ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
"Non-canonical encoding of Pallas base field value.",
)
})
}
fn write<W: Write>(&self, mut writer: W) -> io::Result<()> {
writer.write_all(&self.to_bytes())
}
}
/// Writes a usize value encoded as a u64 in little-endian order. Since usize
/// is platform-dependent, we consistently represent it as u64 in serialized
/// formats.
pub fn write_usize_leu64<W: Write>(mut writer: W, value: usize) -> io::Result<()> {
// Panic if we get a usize value that can't fit into a u64.
writer.write_u64::<LittleEndian>(value.try_into().unwrap())
}
/// Reads a usize value encoded as a u64 in little-endian order. Since usize
/// is platform-dependent, we consistently represent it as u64 in serialized
/// formats.
pub fn read_leu64_usize<R: Read>(mut reader: R) -> io::Result<usize> {
reader.read_u64::<LittleEndian>()?.try_into().map_err(|e| {
io::Error::new(
io::ErrorKind::InvalidInput,
format!(
"usize could not be decoded from a 64-bit value on this platform: {:?}",
e
),
)
})
}
pub fn write_position<W: Write>(mut writer: W, position: Position) -> io::Result<()> {
write_usize_leu64(&mut writer, position.into())
}
pub fn read_position<R: Read>(mut reader: R) -> io::Result<Position> {
read_leu64_usize(&mut reader).map(Position::from)
}
pub fn write_address<W: Write>(mut writer: W, addr: Address) -> io::Result<()> {
writer.write_u8(addr.level().into())?;
write_usize_leu64(&mut writer, addr.index())
}
pub fn read_address<R: Read>(mut reader: R) -> io::Result<Address> {
let level = reader.read_u8().map(Level::from)?;
let index = read_leu64_usize(&mut reader)?;
Ok(Address::from_parts(level, index))
}
pub fn read_frontier_v0<H: Hashable + HashSer + Clone, R: Read>(
mut reader: R,
) -> io::Result<Frontier<H, { sapling::NOTE_COMMITMENT_TREE_DEPTH }>> {
let tree = read_commitment_tree(&mut reader)?;
Ok(tree.to_frontier())
}
pub fn write_nonempty_frontier_v1<H: HashSer, W: Write>(
mut writer: W,
frontier: &NonEmptyFrontier<H>,
) -> io::Result<()> {
write_position(&mut writer, frontier.position())?;
if frontier.position().is_odd() {
// The v1 serialization wrote the sibling of a right-hand leaf as a non-optional value,
// rather than as part of the ommers vector.
frontier
.ommers()
.get(0)
.expect("ommers vector cannot be empty for right-hand nodes")
.write(&mut writer)?;
Optional::write(&mut writer, Some(frontier.leaf()), |w, n: &H| n.write(w))?;
Vector::write(&mut writer, &frontier.ommers()[1..], |w, e| e.write(w))?;
} else {
frontier.leaf().write(&mut writer)?;
Optional::write(&mut writer, None, |w, n: &H| n.write(w))?;
Vector::write(&mut writer, frontier.ommers(), |w, e| e.write(w))?;
}
Ok(())
}
#[allow(clippy::redundant_closure)]
pub fn read_nonempty_frontier_v1<H: HashSer + Clone, R: Read>(
mut reader: R,
) -> io::Result<NonEmptyFrontier<H>> {
let position = read_position(&mut reader)?;
let left = H::read(&mut reader)?;
let right = Optional::read(&mut reader, H::read)?;
let mut ommers = Vector::read(&mut reader, |r| H::read(r))?;
let leaf = if let Some(right) = right {
// if the frontier has a right leaf, then the left leaf is the first ommer
ommers.insert(0, left);
right
} else {
left
};
NonEmptyFrontier::from_parts(position, leaf, ommers).map_err(|err| {
io::Error::new(
io::ErrorKind::InvalidInput,
format!("Parsing resulted in an invalid Merkle frontier: {:?}", err),
)
})
}
pub fn write_frontier_v1<H: HashSer, W: Write>(
writer: W,
frontier: &Frontier<H, 32>,
) -> io::Result<()> {
Optional::write(writer, frontier.value(), write_nonempty_frontier_v1)
}
#[allow(clippy::redundant_closure)]
pub fn read_frontier_v1<H: HashSer + Clone, R: Read>(reader: R) -> io::Result<Frontier<H, 32>> {
match Optional::read(reader, read_nonempty_frontier_v1)? {
None => Ok(Frontier::empty()),
Some(f) => Frontier::try_from(f).map_err(|err| {
io::Error::new(
io::ErrorKind::InvalidInput,
format!("Parsing resulted in an invalid Merkle frontier: {:?}", err),
)
}),
}
}
#[cfg(test)]
mod tests {
use proptest::prelude::*;
use super::*;
use crate::{
merkle_tree::write_commitment_tree,
sapling::{testing as sapling, Node},
};
use incrementalmerkletree::frontier::{testing::arb_commitment_tree, Frontier};
proptest! {
#[test]
fn frontier_serialization_v0(t in arb_commitment_tree::<_, _, 32>(0, sapling::arb_node()))
{
let mut buffer = vec![];
write_commitment_tree(&t, &mut buffer).unwrap();
let frontier: Frontier<Node, 32> = read_frontier_v0(&buffer[..]).unwrap();
let expected: Frontier<Node, 32> = t.to_frontier();
assert_eq!(frontier, expected);
}
#[test]
fn frontier_serialization_v1(t in arb_commitment_tree::<_, _, 32>(1, sapling::arb_node()))
{
let original: Frontier<Node, 32> = t.to_frontier();
let mut buffer = vec![];
write_frontier_v1(&mut buffer, &original).unwrap();
let read: Frontier<Node, 32> = read_frontier_v1(&buffer[..]).unwrap();
assert_eq!(read, original);
}
}
}