Add TRACE-level logging to `shardtree` internals

This commit is contained in:
Jack Grigg 2023-07-05 19:51:28 +00:00
parent 8c927ce11e
commit 7643b16261
5 changed files with 30 additions and 2 deletions

View File

@ -5,6 +5,10 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to Rust's notion of
[Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## Unreleased
### Changed
- `incrementalmerkletree::Hashable` trait now has a `Debug` bound.
## [0.4.0] - 2023-06-05
Release 0.4.0 represents a substantial rewrite of the `incrementalmerkletree`

View File

@ -46,6 +46,7 @@
use either::Either;
use std::cmp::Ordering;
use std::convert::{TryFrom, TryInto};
use std::fmt;
use std::num::TryFromIntError;
use std::ops::{Add, AddAssign, Range, Sub};
@ -602,7 +603,7 @@ impl<H: Hashable, const DEPTH: u8> MerklePath<H, DEPTH> {
/// A trait describing the operations that make a type suitable for use as
/// a leaf or node value in a merkle tree.
pub trait Hashable {
pub trait Hashable: fmt::Debug {
fn empty_leaf() -> Self;
fn combine(level: Level, a: &Self, b: &Self) -> Self;

View File

@ -17,6 +17,7 @@ bitflags = "1.3"
either = "1.8"
incrementalmerkletree = { version = "0.4", path = "../incrementalmerkletree" }
proptest = { version = "1.0.0", optional = true }
tracing = "0.1"
[dev-dependencies]
assert_matches = "1.5"

View File

@ -2,6 +2,7 @@ use core::fmt::{self, Debug, Display};
use either::Either;
use std::collections::{BTreeMap, BTreeSet};
use std::rc::Rc;
use tracing::trace;
use incrementalmerkletree::{
frontier::NonEmptyFrontier, Address, Hashable, Level, MerklePath, Position, Retention,
@ -349,7 +350,7 @@ where
impl<
H: Hashable + Clone + PartialEq,
C: Clone + Ord,
C: Clone + Debug + Ord,
S: ShardStore<H = H, CheckpointId = C>,
const DEPTH: u8,
const SHARD_HEIGHT: u8,
@ -534,6 +535,7 @@ impl<
) -> Result<(), ShardTreeError<S::Error>> {
let leaf_position = frontier.position();
let subtree_root_addr = Address::above_position(Self::subtree_level(), leaf_position);
trace!("Subtree containing nodes: {:?}", subtree_root_addr);
let (updated_subtree, supertree) = self
.store
@ -559,6 +561,7 @@ impl<
}
if let Retention::Checkpoint { id, is_marked: _ } = leaf_retention {
trace!("Adding checkpoint {:?} at {:?}", id, leaf_position);
self.store
.add_checkpoint(id, Checkpoint::at_position(leaf_position))
.map_err(ShardTreeError::Storage)?;
@ -801,6 +804,11 @@ impl<
.store
.checkpoint_count()
.map_err(ShardTreeError::Storage)?;
trace!(
"Tree has {} checkpoints, max is {}",
checkpoint_count,
self.max_checkpoints,
);
if checkpoint_count > self.max_checkpoints {
// Batch removals by subtree & create a list of the checkpoint identifiers that
// will be removed from the checkpoints map.
@ -841,6 +849,12 @@ impl<
)
.map_err(ShardTreeError::Storage)?;
trace!(
"Removing checkpoints {:?}, pruning subtrees {:?}",
checkpoints_to_delete,
clear_positions,
);
// Prune each affected subtree
for (subtree_addr, positions) in clear_positions.into_iter() {
let cleared = self

View File

@ -7,6 +7,7 @@ use bitflags::bitflags;
use incrementalmerkletree::{
frontier::NonEmptyFrontier, Address, Hashable, Level, Position, Retention,
};
use tracing::trace;
use crate::{LocatedTree, Node, Tree};
@ -1358,6 +1359,12 @@ impl<H: Hashable + Clone + PartialEq> LocatedPrunableTree<H> {
let (l_addr, r_addr) = root_addr.children().unwrap();
let p = to_clear.partition_point(|(p, _)| p < &l_addr.position_range_end());
trace!(
"In {:?}, partitioned: {:?} {:?}",
root_addr,
&to_clear[0..p],
&to_clear[p..],
);
Tree::unite(
l_addr.level(),
ann.clone(),
@ -1366,6 +1373,7 @@ impl<H: Hashable + Clone + PartialEq> LocatedPrunableTree<H> {
)
}
Tree(Node::Leaf { value: (h, r) }) => {
trace!("In {:?}, clearing {:?}", root_addr, to_clear);
// When we reach a leaf, we should be down to just a single position
// which should correspond to the last level-0 child of the address's
// subtree range; if it's a checkpoint this will always be the case for