change(state): Add note subtree indexes for new and existing blocks (#7437)

* Copy the add_subtrees upgrade from the original branch

* Copy the database write changes in shielded.rs from the original branch

* Copy the tree API changes from the original branch

* Simplify subtree APIs to avoid exposing frontiers

* Fix a dead code warning by re-using existing methods

* Use mpsc::Receiver<CancelFormatChange> in the subtree upgrade

* Run the subtree upgrade on startup

* Bump the database format version to 25.2.0

* Fix a confusing 'upgrade complete' log

* Clarify some comments and error messages

* Simplify prev_tree unwrap to avoid an (impossible?) concurrency bug

* Use separate subtree writing functions

* Use common note commitment list code

* Fix subtree completion condition and add asserts

* Simplify subtree API and avoid exposing Address

* Fix API compatibility when Arcs are removed

* Log when each subtree is added

* If a format change is cancelled, don't mark the database as upgraded or do format checks

* Log subtree progress about once every two minutes

* Adds a state validity check for subtrees upgrade

* Orchard is faster, decrease log interval

* Clarify subtree index docs

* Move a log to the correct location

* Refactor subtree upgrade to remove duplicate inverted loop conditions

* updates subtree state validity check

* Add a subtree format check when there is no upgrade

* Fix an off-by-one error with the final subtree check

* Use error-level logs for database format checks

* Skip format checks in tests that create invalid formats

* fix state validity test

* Add a concurrency comment to subtree by height methods

* Add individual subtree state methods: reverts removing these methods in an earlier PR

* fastmod "subtrees_by_index" "subtree_list_by_index_for_rpc"

---------

Co-authored-by: arya2 <aryasolhi@gmail.com>
This commit is contained in:
teor 2023-09-06 02:52:06 +10:00 committed by GitHub
parent 97b43fb4a6
commit cc61bd50b9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 737 additions and 94 deletions

View File

@ -2,6 +2,8 @@
use std::{collections::HashMap, fmt, ops::Neg, sync::Arc};
use halo2::pasta::pallas;
use crate::{
amount::NegativeAllowed,
block::merkle::AuthDataRoot,
@ -152,16 +154,30 @@ impl Block {
/// Access the [`orchard::Nullifier`]s from all transactions in this block.
pub fn orchard_nullifiers(&self) -> impl Iterator<Item = &orchard::Nullifier> {
// Work around a compiler panic (ICE) with flat_map():
// https://github.com/rust-lang/rust/issues/105044
#[allow(clippy::needless_collect)]
let nullifiers: Vec<_> = self
.transactions
self.transactions
.iter()
.flat_map(|transaction| transaction.orchard_nullifiers())
.collect();
}
nullifiers.into_iter()
/// Access the [`sprout::NoteCommitment`]s from all transactions in this block.
pub fn sprout_note_commitments(&self) -> impl Iterator<Item = &sprout::NoteCommitment> {
self.transactions
.iter()
.flat_map(|transaction| transaction.sprout_note_commitments())
}
/// Access the [sapling note commitments](jubjub::Fq) from all transactions in this block.
pub fn sapling_note_commitments(&self) -> impl Iterator<Item = &jubjub::Fq> {
self.transactions
.iter()
.flat_map(|transaction| transaction.sapling_note_commitments())
}
/// Access the [orchard note commitments](pallas::Base) from all transactions in this block.
pub fn orchard_note_commitments(&self) -> impl Iterator<Item = &pallas::Base> {
self.transactions
.iter()
.flat_map(|transaction| transaction.orchard_note_commitments())
}
/// Count how many Sapling transactions exist in a block,

View File

@ -32,7 +32,7 @@ use crate::{
serialization::{
serde_helpers, ReadZcashExt, SerializationError, ZcashDeserialize, ZcashSerialize,
},
subtree::TRACKED_SUBTREE_HEIGHT,
subtree::{NoteCommitmentSubtreeIndex, TRACKED_SUBTREE_HEIGHT},
};
pub mod legacy;
@ -389,28 +389,48 @@ impl NoteCommitmentTree {
}
}
/// Returns frontier of non-empty tree, or `None` if the tree is empty.
fn frontier(&self) -> Option<&NonEmptyFrontier<Node>> {
self.inner.value()
}
/// Returns true if the most recently appended leaf completes the subtree
pub fn is_complete_subtree(tree: &NonEmptyFrontier<Node>) -> bool {
pub fn is_complete_subtree(&self) -> bool {
let Some(tree) = self.frontier() else {
// An empty tree can't be a complete subtree.
return false;
};
tree.position()
.is_complete_subtree(TRACKED_SUBTREE_HEIGHT.into())
}
/// Returns subtree address at [`TRACKED_SUBTREE_HEIGHT`]
pub fn subtree_address(tree: &NonEmptyFrontier<Node>) -> incrementalmerkletree::Address {
incrementalmerkletree::Address::above_position(
/// Returns the subtree index at [`TRACKED_SUBTREE_HEIGHT`].
/// This is the number of complete or incomplete subtrees that are currently in the tree.
/// Returns `None` if the tree is empty.
#[allow(clippy::unwrap_in_result)]
pub fn subtree_index(&self) -> Option<NoteCommitmentSubtreeIndex> {
let tree = self.frontier()?;
let index = incrementalmerkletree::Address::above_position(
TRACKED_SUBTREE_HEIGHT.into(),
tree.position(),
)
.index()
.try_into()
.expect("fits in u16");
Some(index)
}
/// Returns subtree index and root if the most recently appended leaf completes the subtree
#[allow(clippy::unwrap_in_result)]
pub fn completed_subtree_index_and_root(&self) -> Option<(u16, Node)> {
let value = self.inner.value()?;
Self::is_complete_subtree(value).then_some(())?;
let address = Self::subtree_address(value);
let index = address.index().try_into().expect("should fit in u16");
let root = value.root(Some(TRACKED_SUBTREE_HEIGHT.into()));
pub fn completed_subtree_index_and_root(&self) -> Option<(NoteCommitmentSubtreeIndex, Node)> {
if !self.is_complete_subtree() {
return None;
}
let index = self.subtree_index()?;
let root = self.frontier()?.root(Some(TRACKED_SUBTREE_HEIGHT.into()));
Some((index, root))
}

View File

@ -4,7 +4,11 @@ use std::sync::Arc;
use thiserror::Error;
use crate::{block::Block, orchard, sapling, sprout, subtree::NoteCommitmentSubtree};
use crate::{
block::Block,
orchard, sapling, sprout,
subtree::{NoteCommitmentSubtree, NoteCommitmentSubtreeIndex},
};
/// An argument wrapper struct for note commitment trees.
#[derive(Clone, Debug)]
@ -65,24 +69,9 @@ impl NoteCommitmentTrees {
..
} = self.clone();
let sprout_note_commitments: Vec<_> = block
.transactions
.iter()
.flat_map(|tx| tx.sprout_note_commitments())
.cloned()
.collect();
let sapling_note_commitments: Vec<_> = block
.transactions
.iter()
.flat_map(|tx| tx.sapling_note_commitments())
.cloned()
.collect();
let orchard_note_commitments: Vec<_> = block
.transactions
.iter()
.flat_map(|tx| tx.orchard_note_commitments())
.cloned()
.collect();
let sprout_note_commitments: Vec<_> = block.sprout_note_commitments().cloned().collect();
let sapling_note_commitments: Vec<_> = block.sapling_note_commitments().cloned().collect();
let orchard_note_commitments: Vec<_> = block.orchard_note_commitments().cloned().collect();
let mut sprout_result = None;
let mut sapling_result = None;
@ -163,7 +152,7 @@ impl NoteCommitmentTrees {
) -> Result<
(
Arc<sapling::tree::NoteCommitmentTree>,
Option<(u16, sapling::tree::Node)>,
Option<(NoteCommitmentSubtreeIndex, sapling::tree::Node)>,
),
NoteCommitmentTreeError,
> {
@ -202,7 +191,7 @@ impl NoteCommitmentTrees {
) -> Result<
(
Arc<orchard::tree::NoteCommitmentTree>,
Option<(u16, orchard::tree::Node)>,
Option<(NoteCommitmentSubtreeIndex, orchard::tree::Node)>,
),
NoteCommitmentTreeError,
> {

View File

@ -33,7 +33,7 @@ use crate::{
serialization::{
serde_helpers, ReadZcashExt, SerializationError, ZcashDeserialize, ZcashSerialize,
},
subtree::TRACKED_SUBTREE_HEIGHT,
subtree::{NoteCommitmentSubtreeIndex, TRACKED_SUBTREE_HEIGHT},
};
pub mod legacy;
@ -370,28 +370,48 @@ impl NoteCommitmentTree {
}
}
/// Returns frontier of non-empty tree, or None.
fn frontier(&self) -> Option<&NonEmptyFrontier<Node>> {
self.inner.value()
}
/// Returns true if the most recently appended leaf completes the subtree
pub fn is_complete_subtree(tree: &NonEmptyFrontier<Node>) -> bool {
pub fn is_complete_subtree(&self) -> bool {
let Some(tree) = self.frontier() else {
// An empty tree can't be a complete subtree.
return false;
};
tree.position()
.is_complete_subtree(TRACKED_SUBTREE_HEIGHT.into())
}
/// Returns subtree address at [`TRACKED_SUBTREE_HEIGHT`]
pub fn subtree_address(tree: &NonEmptyFrontier<Node>) -> incrementalmerkletree::Address {
incrementalmerkletree::Address::above_position(
/// Returns the subtree index at [`TRACKED_SUBTREE_HEIGHT`].
/// This is the number of complete or incomplete subtrees that are currently in the tree.
/// Returns `None` if the tree is empty.
#[allow(clippy::unwrap_in_result)]
pub fn subtree_index(&self) -> Option<NoteCommitmentSubtreeIndex> {
let tree = self.frontier()?;
let index = incrementalmerkletree::Address::above_position(
TRACKED_SUBTREE_HEIGHT.into(),
tree.position(),
)
.index()
.try_into()
.expect("fits in u16");
Some(index)
}
/// Returns subtree index and root if the most recently appended leaf completes the subtree
#[allow(clippy::unwrap_in_result)]
pub fn completed_subtree_index_and_root(&self) -> Option<(u16, Node)> {
let value = self.inner.value()?;
Self::is_complete_subtree(value).then_some(())?;
let address = Self::subtree_address(value);
let index = address.index().try_into().expect("should fit in u16");
let root = value.root(Some(TRACKED_SUBTREE_HEIGHT.into()));
pub fn completed_subtree_index_and_root(&self) -> Option<(NoteCommitmentSubtreeIndex, Node)> {
if !self.is_complete_subtree() {
return None;
}
let index = self.subtree_index()?;
let root = self.frontier()?.root(Some(TRACKED_SUBTREE_HEIGHT.into()));
Some((index, root))
}

View File

@ -1,5 +1,7 @@
//! Struct representing Sapling/Orchard note commitment subtrees
use std::num::TryFromIntError;
use serde::{Deserialize, Serialize};
use crate::block::Height;
@ -23,6 +25,22 @@ impl From<u16> for NoteCommitmentSubtreeIndex {
}
}
impl TryFrom<u64> for NoteCommitmentSubtreeIndex {
type Error = TryFromIntError;
fn try_from(value: u64) -> Result<Self, Self::Error> {
u16::try_from(value).map(Self)
}
}
// If we want to automatically convert NoteCommitmentSubtreeIndex to the generic integer literal
// type, we can only implement conversion into u64. (Or u16, but not both.)
impl From<NoteCommitmentSubtreeIndex> for u64 {
fn from(value: NoteCommitmentSubtreeIndex) -> Self {
value.0.into()
}
}
// TODO:
// - consider defining sapling::SubtreeRoot and orchard::SubtreeRoot types or type wrappers,
// to avoid type confusion between the leaf Node and subtree root types.

View File

@ -48,11 +48,11 @@ pub(crate) const DATABASE_FORMAT_VERSION: u64 = 25;
/// - adding new column families,
/// - changing the format of a column family in a compatible way, or
/// - breaking changes with compatibility code in all supported Zebra versions.
pub(crate) const DATABASE_FORMAT_MINOR_VERSION: u64 = 1;
pub(crate) const DATABASE_FORMAT_MINOR_VERSION: u64 = 2;
/// The database format patch version, incremented each time the on-disk database format has a
/// significant format compatibility fix.
pub(crate) const DATABASE_FORMAT_PATCH_VERSION: u64 = 1;
pub(crate) const DATABASE_FORMAT_PATCH_VERSION: u64 = 0;
/// The name of the file containing the minor and patch database versions.
///

View File

@ -98,7 +98,7 @@ impl FinalizedState {
network: Network,
#[cfg(feature = "elasticsearch")] elastic_db: Option<elasticsearch::Elasticsearch>,
) -> Self {
let db = ZebraDb::new(config, network);
let db = ZebraDb::new(config, network, false);
#[cfg(feature = "elasticsearch")]
let new_state = Self {

View File

@ -25,6 +25,8 @@ use crate::{
Config,
};
pub(crate) mod add_subtrees;
/// The kind of database format change we're performing.
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum DbFormatChange {
@ -195,7 +197,7 @@ impl DbFormatChange {
network,
initial_tip_height,
upgrade_db.clone(),
cancel_receiver,
&cancel_receiver,
)?,
NewlyCreated { .. } => {
@ -218,12 +220,14 @@ impl DbFormatChange {
}
}
// This check should pass for all format changes:
// - upgrades should de-duplicate trees if needed (and they already do this check)
// - an empty state doesn't have any trees, so it can't have duplicate trees
// - since this Zebra code knows how to de-duplicate trees, downgrades using this code
// still know how to make sure trees are unique
Self::check_for_duplicate_trees(upgrade_db);
// These checks should pass for all format changes:
// - upgrades should produce a valid format (and they already do that check)
// - an empty state should pass all the format checks
// - since the running Zebra code knows how to upgrade the database to this format,
// downgrades using this running code still know how to create a valid database
// (unless a future upgrade breaks these format checks)
Self::check_for_duplicate_trees(upgrade_db.clone());
add_subtrees::check(&upgrade_db);
Ok(())
}
@ -245,7 +249,7 @@ impl DbFormatChange {
network: Network,
initial_tip_height: Option<Height>,
db: ZebraDb,
cancel_receiver: mpsc::Receiver<CancelFormatChange>,
cancel_receiver: &mpsc::Receiver<CancelFormatChange>,
) -> Result<(), CancelFormatChange> {
let Upgrade {
newer_running_version,
@ -277,7 +281,7 @@ impl DbFormatChange {
return Ok(());
};
// Start of a database upgrade task.
// Note commitment tree de-duplication database upgrade task.
let version_for_pruning_trees =
Version::parse("25.1.1").expect("Hardcoded version string should be valid.");
@ -339,13 +343,30 @@ impl DbFormatChange {
}
// Before marking the state as upgraded, check that the upgrade completed successfully.
Self::check_for_duplicate_trees(db);
Self::check_for_duplicate_trees(db.clone());
// Mark the database as upgraded. Zebra won't repeat the upgrade anymore once the
// database is marked, so the upgrade MUST be complete at this point.
Self::mark_as_upgraded_to(&version_for_pruning_trees, &config, network);
}
// Note commitment subtree creation database upgrade task.
let version_for_adding_subtrees =
Version::parse("25.2.0").expect("Hardcoded version string should be valid.");
// Check if we need to add note commitment subtrees to the database.
if older_disk_version < version_for_adding_subtrees {
add_subtrees::run(initial_tip_height, &db, cancel_receiver)?;
// Before marking the state as upgraded, check that the upgrade completed successfully.
add_subtrees::check(&db);
// Mark the database as upgraded. Zebra won't repeat the upgrade anymore once the
// database is marked, so the upgrade MUST be complete at this point.
Self::mark_as_upgraded_to(&version_for_adding_subtrees, &config, network);
}
// # New Upgrades Usually Go Here
//
// New code goes above this comment!
@ -353,6 +374,7 @@ impl DbFormatChange {
// Run the latest format upgrade code after the other upgrades are complete,
// then mark the format as upgraded. The code should check `cancel_receiver`
// every time it runs its inner update loop.
info!(
?newer_running_version,
"Zebra automatically upgraded the database format to:"

View File

@ -0,0 +1,466 @@
//! Fully populate the Sapling and Orchard note commitment subtrees for existing blocks in the database.
use std::sync::{mpsc, Arc};
use zebra_chain::{
block::Height,
orchard, sapling,
subtree::{NoteCommitmentSubtree, NoteCommitmentSubtreeIndex},
};
use crate::service::finalized_state::{
disk_format::upgrade::CancelFormatChange, DiskWriteBatch, ZebraDb,
};
/// Runs disk format upgrade for adding Sapling and Orchard note commitment subtrees to database.
///
/// Returns `Ok` if the upgrade completed, and `Err` if it was cancelled.
#[allow(clippy::unwrap_in_result)]
pub fn run(
initial_tip_height: Height,
upgrade_db: &ZebraDb,
cancel_receiver: &mpsc::Receiver<CancelFormatChange>,
) -> Result<(), CancelFormatChange> {
let mut subtree_count = 0;
let mut prev_tree: Option<_> = None;
for (height, tree) in upgrade_db.sapling_tree_by_height_range(..=initial_tip_height) {
// Return early if there is a cancel signal.
if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) {
return Err(CancelFormatChange);
}
// Empty note commitment trees can't contain subtrees.
let Some(end_of_block_subtree_index) = tree.subtree_index() else {
prev_tree = Some(tree);
continue;
};
// Blocks cannot complete multiple level 16 subtrees,
// so the subtree index can increase by a maximum of 1 every ~20 blocks.
// If this block does complete a subtree, the subtree is either completed by a note before
// the final note (so the final note is in the next subtree), or by the final note
// (so the final note is the end of this subtree).
if let Some((index, node)) = tree.completed_subtree_index_and_root() {
// If the leaf at the end of the block is the final leaf in a subtree,
// we already have that subtree root available in the tree.
assert_eq!(
index.0, subtree_count,
"trees are inserted in order with no gaps"
);
write_sapling_subtree(upgrade_db, index, height, node);
subtree_count += 1;
} else if end_of_block_subtree_index.0 > subtree_count {
// If the leaf at the end of the block is in the next subtree,
// we need to calculate that subtree root based on the tree from the previous block.
let mut prev_tree = prev_tree
.take()
.expect("should have some previous sapling frontier");
let sapling_nct = Arc::make_mut(&mut prev_tree);
let block = upgrade_db
.block(height.into())
.expect("height with note commitment tree should have block");
for sapling_note_commitment in block.sapling_note_commitments() {
// Return early if there is a cancel signal.
if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) {
return Err(CancelFormatChange);
}
sapling_nct
.append(*sapling_note_commitment)
.expect("finalized notes should append successfully");
// The loop always breaks on this condition,
// because we checked the block has enough commitments,
// and that the final commitment in the block doesn't complete a subtree.
if sapling_nct.is_complete_subtree() {
break;
}
}
let (index, node) = sapling_nct.completed_subtree_index_and_root().expect(
"block should have completed a subtree before its final note commitment: \
already checked is_complete_subtree(), and that the block must complete a subtree",
);
assert_eq!(
index.0, subtree_count,
"trees are inserted in order with no gaps"
);
write_sapling_subtree(upgrade_db, index, height, node);
subtree_count += 1;
}
prev_tree = Some(tree);
}
let mut subtree_count = 0;
let mut prev_tree: Option<_> = None;
for (height, tree) in upgrade_db.orchard_tree_by_height_range(..=initial_tip_height) {
// Return early if there is a cancel signal.
if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) {
return Err(CancelFormatChange);
}
// Empty note commitment trees can't contain subtrees.
let Some(end_of_block_subtree_index) = tree.subtree_index() else {
prev_tree = Some(tree);
continue;
};
// Blocks cannot complete multiple level 16 subtrees,
// so the subtree index can increase by a maximum of 1 every ~20 blocks.
// If this block does complete a subtree, the subtree is either completed by a note before
// the final note (so the final note is in the next subtree), or by the final note
// (so the final note is the end of this subtree).
if let Some((index, node)) = tree.completed_subtree_index_and_root() {
// If the leaf at the end of the block is the final leaf in a subtree,
// we already have that subtree root available in the tree.
assert_eq!(
index.0, subtree_count,
"trees are inserted in order with no gaps"
);
write_orchard_subtree(upgrade_db, index, height, node);
subtree_count += 1;
} else if end_of_block_subtree_index.0 > subtree_count {
// If the leaf at the end of the block is in the next subtree,
// we need to calculate that subtree root based on the tree from the previous block.
let mut prev_tree = prev_tree
.take()
.expect("should have some previous orchard frontier");
let orchard_nct = Arc::make_mut(&mut prev_tree);
let block = upgrade_db
.block(height.into())
.expect("height with note commitment tree should have block");
for orchard_note_commitment in block.orchard_note_commitments() {
// Return early if there is a cancel signal.
if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) {
return Err(CancelFormatChange);
}
orchard_nct
.append(*orchard_note_commitment)
.expect("finalized notes should append successfully");
// The loop always breaks on this condition,
// because we checked the block has enough commitments,
// and that the final commitment in the block doesn't complete a subtree.
if orchard_nct.is_complete_subtree() {
break;
}
}
let (index, node) = orchard_nct.completed_subtree_index_and_root().expect(
"block should have completed a subtree before its final note commitment: \
already checked is_complete_subtree(), and that the block must complete a subtree",
);
assert_eq!(
index.0, subtree_count,
"trees are inserted in order with no gaps"
);
write_orchard_subtree(upgrade_db, index, height, node);
subtree_count += 1;
}
prev_tree = Some(tree);
}
Ok(())
}
/// Check that note commitment subtrees were correctly added.
///
/// # Panics
///
/// If a note commitment subtree is missing or incorrect.
pub fn check(db: &ZebraDb) {
let check_sapling_subtrees = check_sapling_subtrees(db);
let check_orchard_subtrees = check_orchard_subtrees(db);
if !check_sapling_subtrees || !check_orchard_subtrees {
panic!("missing or bad subtree(s)");
}
}
/// Check that Sapling note commitment subtrees were correctly added.
///
/// # Panics
///
/// If a note commitment subtree is missing or incorrect.
fn check_sapling_subtrees(db: &ZebraDb) -> bool {
let Some(NoteCommitmentSubtreeIndex(mut first_incomplete_subtree_index)) =
db.sapling_tree().subtree_index()
else {
return true;
};
// If there are no incomplete subtrees in the tree, also expect a subtree for the final index.
if db.sapling_tree().is_complete_subtree() {
first_incomplete_subtree_index += 1;
}
let mut is_valid = true;
for index in 0..first_incomplete_subtree_index {
// Check that there's a continuous range of subtrees from index [0, first_incomplete_subtree_index)
let Some(subtree) = db.sapling_subtree_by_index(index) else {
error!(index, "missing subtree");
is_valid = false;
continue;
};
// Check that there was a sapling note at the subtree's end height.
let Some(tree) = db.sapling_tree_by_height(&subtree.end) else {
error!(?subtree.end, "missing note commitment tree at subtree completion height");
is_valid = false;
continue;
};
// Check the index and root if the sapling note commitment tree at this height is a complete subtree.
if let Some((index, node)) = tree.completed_subtree_index_and_root() {
if subtree.index != index {
error!("completed subtree indexes should match");
is_valid = false;
}
if subtree.node != node {
error!("completed subtree roots should match");
is_valid = false;
}
}
// Check that the final note has a greater subtree index if it didn't complete a subtree.
else {
let Some(prev_tree) = db.sapling_tree_by_height(&subtree.end.previous()) else {
error!(?subtree.end, "missing note commitment tree at subtree completion height");
is_valid = false;
continue;
};
let prev_subtree_index = prev_tree.subtree_index();
let subtree_index = tree.subtree_index();
if subtree_index <= prev_subtree_index {
error!(
?subtree_index,
?prev_subtree_index,
"note commitment tree at end height should have incremented subtree index"
);
is_valid = false;
}
}
}
let mut subtree_count = 0;
for (index, height, tree) in db
.sapling_tree_by_height_range(..)
.filter_map(|(height, tree)| Some((tree.subtree_index()?, height, tree)))
.filter_map(|(subtree_index, height, tree)| {
if tree.is_complete_subtree() || subtree_index.0 > subtree_count {
let subtree_index = subtree_count;
subtree_count += 1;
Some((subtree_index, height, tree))
} else {
None
}
})
{
let Some(subtree) = db.sapling_subtree_by_index(index) else {
error!(?index, "missing subtree");
is_valid = false;
continue;
};
if subtree.index.0 != index {
error!("completed subtree indexes should match");
is_valid = false;
}
if subtree.end != height {
let is_complete = tree.is_complete_subtree();
error!(?subtree.end, ?height, ?index, ?is_complete, "bad sapling subtree end height");
is_valid = false;
}
if let Some((_index, node)) = tree.completed_subtree_index_and_root() {
if subtree.node != node {
error!("completed subtree roots should match");
is_valid = false;
}
}
}
if !is_valid {
error!(
?subtree_count,
first_incomplete_subtree_index, "missing or bad sapling subtrees"
);
}
is_valid
}
/// Check that Orchard note commitment subtrees were correctly added.
///
/// # Panics
///
/// If a note commitment subtree is missing or incorrect.
fn check_orchard_subtrees(db: &ZebraDb) -> bool {
let Some(NoteCommitmentSubtreeIndex(mut first_incomplete_subtree_index)) =
db.orchard_tree().subtree_index()
else {
return true;
};
// If there are no incomplete subtrees in the tree, also expect a subtree for the final index.
if db.orchard_tree().is_complete_subtree() {
first_incomplete_subtree_index += 1;
}
let mut is_valid = true;
for index in 0..first_incomplete_subtree_index {
// Check that there's a continuous range of subtrees from index [0, first_incomplete_subtree_index)
let Some(subtree) = db.orchard_subtree_by_index(index) else {
error!(index, "missing subtree");
is_valid = false;
continue;
};
// Check that there was a orchard note at the subtree's end height.
let Some(tree) = db.orchard_tree_by_height(&subtree.end) else {
error!(?subtree.end, "missing note commitment tree at subtree completion height");
is_valid = false;
continue;
};
// Check the index and root if the orchard note commitment tree at this height is a complete subtree.
if let Some((index, node)) = tree.completed_subtree_index_and_root() {
if subtree.index != index {
error!("completed subtree indexes should match");
is_valid = false;
}
if subtree.node != node {
error!("completed subtree roots should match");
is_valid = false;
}
}
// Check that the final note has a greater subtree index if it didn't complete a subtree.
else {
let Some(prev_tree) = db.orchard_tree_by_height(&subtree.end.previous()) else {
error!(?subtree.end, "missing note commitment tree at subtree completion height");
is_valid = false;
continue;
};
let prev_subtree_index = prev_tree.subtree_index();
let subtree_index = tree.subtree_index();
if subtree_index <= prev_subtree_index {
error!(
?subtree_index,
?prev_subtree_index,
"note commitment tree at end height should have incremented subtree index"
);
is_valid = false;
}
}
}
let mut subtree_count = 0;
for (index, height, tree) in db
.orchard_tree_by_height_range(..)
.filter_map(|(height, tree)| Some((tree.subtree_index()?, height, tree)))
.filter_map(|(subtree_index, height, tree)| {
if tree.is_complete_subtree() || subtree_index.0 > subtree_count {
let subtree_index = subtree_count;
subtree_count += 1;
Some((subtree_index, height, tree))
} else {
None
}
})
{
let Some(subtree) = db.orchard_subtree_by_index(index) else {
error!(?index, "missing subtree");
is_valid = false;
continue;
};
if subtree.index.0 != index {
error!("completed subtree indexes should match");
is_valid = false;
}
if subtree.end != height {
let is_complete = tree.is_complete_subtree();
error!(?subtree.end, ?height, ?index, ?is_complete, "bad orchard subtree end height");
is_valid = false;
}
if let Some((_index, node)) = tree.completed_subtree_index_and_root() {
if subtree.node != node {
error!("completed subtree roots should match");
is_valid = false;
}
}
}
if !is_valid {
error!(
?subtree_count,
first_incomplete_subtree_index, "missing or bad orchard subtrees"
);
}
is_valid
}
/// Writes a Sapling note commitment subtree to `upgrade_db`.
fn write_sapling_subtree(
upgrade_db: &ZebraDb,
index: NoteCommitmentSubtreeIndex,
height: Height,
node: sapling::tree::Node,
) {
let subtree = NoteCommitmentSubtree::new(index, height, node);
let mut batch = DiskWriteBatch::new();
batch.insert_sapling_subtree(upgrade_db, &subtree);
upgrade_db
.write_batch(batch)
.expect("writing sapling note commitment subtrees should always succeed.");
if index.0 % 100 == 0 {
info!(?height, index = ?index.0, "calculated and added sapling subtree");
}
// This log happens about once per second on recent machines with SSD disks.
debug!(?height, index = ?index.0, ?node, "calculated and added sapling subtree");
}
/// Writes a Orchard note commitment subtree to `upgrade_db`.
fn write_orchard_subtree(
upgrade_db: &ZebraDb,
index: NoteCommitmentSubtreeIndex,
height: Height,
node: orchard::tree::Node,
) {
let subtree = NoteCommitmentSubtree::new(index, height, node);
let mut batch = DiskWriteBatch::new();
batch.insert_orchard_subtree(upgrade_db, &subtree);
upgrade_db
.write_batch(batch)
.expect("writing orchard note commitment subtrees should always succeed.");
if index.0 % 300 == 0 {
info!(?height, index = ?index.0, "calculated and added orchard subtree");
}
// This log happens about 3 times per second on recent machines with SSD disks.
debug!(?height, index = ?index.0, ?node, "calculated and added orchard subtree");
}

View File

@ -19,7 +19,7 @@ use crate::{
disk_db::DiskDb,
disk_format::{
block::MAX_ON_DISK_HEIGHT,
upgrade::{DbFormatChange, DbFormatChangeThreadHandle},
upgrade::{self, DbFormatChange, DbFormatChangeThreadHandle},
},
},
Config,
@ -60,7 +60,10 @@ pub struct ZebraDb {
impl ZebraDb {
/// Opens or creates the database at `config.path` for `network`,
/// and returns a shared high-level typed database wrapper.
pub fn new(config: &Config, network: Network) -> ZebraDb {
///
/// If `debug_skip_format_upgrades` is true, don't do any format upgrades or format checks.
/// This argument is only used when running tests, it is ignored in production code.
pub fn new(config: &Config, network: Network, debug_skip_format_upgrades: bool) -> ZebraDb {
let running_version = database_format_version_in_code();
let disk_version = database_format_version_on_disk(config, network)
.expect("unable to read database format version file");
@ -84,7 +87,12 @@ impl ZebraDb {
// a while to start, and new blocks can be committed as soon as we return from this method.
let initial_tip_height = db.finalized_tip_height();
// Start any required format changes.
// Always do format upgrades & checks in production code.
if cfg!(test) && debug_skip_format_upgrades {
return db;
}
// Start any required format changes, and do format checks.
//
// TODO: should debug_stop_at_height wait for these upgrades, or not?
if let Some(format_change) = format_change {
@ -108,9 +116,10 @@ impl ZebraDb {
db.format_change_handle = Some(format_change_handle);
} else {
// If we're re-opening a previously upgraded or newly created database,
// the trees should already be de-duplicated.
// the database format should be valid.
// (There's no format change here, so the format change checks won't run.)
DbFormatChange::check_for_duplicate_trees(db.clone());
upgrade::add_subtrees::check(&db.clone());
}
db

View File

@ -26,7 +26,7 @@ use zebra_chain::{
use zebra_test::vectors::{MAINNET_BLOCKS, TESTNET_BLOCKS};
use crate::{
service::finalized_state::{disk_db::DiskWriteBatch, FinalizedState},
service::finalized_state::{disk_db::DiskWriteBatch, ZebraDb},
CheckpointVerifiedBlock, Config,
};
@ -77,11 +77,11 @@ fn test_block_db_round_trip_with(
) {
let _init_guard = zebra_test::init();
let state = FinalizedState::new(
let state = ZebraDb::new(
&Config::ephemeral(),
network,
#[cfg(feature = "elasticsearch")]
None,
// The raw database accesses in this test create invalid database formats.
true,
);
// Check that each block round-trips to the database

View File

@ -180,17 +180,41 @@ impl ZebraDb {
self.db.zs_range_iter(&sapling_trees, range)
}
/// Returns the Sapling note commitment subtree at this `index`.
///
/// # Correctness
///
/// This method should not be used to get subtrees for RPC responses,
/// because those subtree lists require that the start subtree is present in the list.
/// Instead, use `sapling_subtree_list_by_index_for_rpc()`.
#[allow(clippy::unwrap_in_result)]
pub(in super::super) fn sapling_subtree_by_index(
&self,
index: impl Into<NoteCommitmentSubtreeIndex> + Copy,
) -> Option<NoteCommitmentSubtree<sapling::tree::Node>> {
let sapling_subtrees = self
.db
.cf_handle("sapling_note_commitment_subtree")
.unwrap();
let subtree_data: NoteCommitmentSubtreeData<sapling::tree::Node> =
self.db.zs_get(&sapling_subtrees, &index.into())?;
Some(subtree_data.with_index(index))
}
/// Returns a list of Sapling [`NoteCommitmentSubtree`]s starting at `start_index`.
/// If `limit` is provided, the list is limited to `limit` entries.
///
/// If there is no subtree at `start_index`, the returned list is empty.
/// Otherwise, subtrees are continuous up to the finalized tip.
///
/// There is no API for retrieving single subtrees by index, because it can accidentally be used
/// to create an inconsistent list of subtrees after concurrent non-finalized and finalized
/// updates.
/// # Correctness
///
/// This method is specifically designed for the `z_getsubtreesbyindex` state request.
/// It might not work for other RPCs or state checks.
#[allow(clippy::unwrap_in_result)]
pub fn sapling_subtrees_by_index(
pub fn sapling_subtree_list_by_index_for_rpc(
&self,
start_index: NoteCommitmentSubtreeIndex,
limit: Option<NoteCommitmentSubtreeIndex>,
@ -289,17 +313,41 @@ impl ZebraDb {
self.db.zs_range_iter(&orchard_trees, range)
}
/// Returns the Orchard note commitment subtree at this `index`.
///
/// # Correctness
///
/// This method should not be used to get subtrees for RPC responses,
/// because those subtree lists require that the start subtree is present in the list.
/// Instead, use `orchard_subtree_list_by_index_for_rpc()`.
#[allow(clippy::unwrap_in_result)]
pub(in super::super) fn orchard_subtree_by_index(
&self,
index: impl Into<NoteCommitmentSubtreeIndex> + Copy,
) -> Option<NoteCommitmentSubtree<orchard::tree::Node>> {
let orchard_subtrees = self
.db
.cf_handle("orchard_note_commitment_subtree")
.unwrap();
let subtree_data: NoteCommitmentSubtreeData<orchard::tree::Node> =
self.db.zs_get(&orchard_subtrees, &index.into())?;
Some(subtree_data.with_index(index))
}
/// Returns a list of Orchard [`NoteCommitmentSubtree`]s starting at `start_index`.
/// If `limit` is provided, the list is limited to `limit` entries.
///
/// If there is no subtree at `start_index`, the returned list is empty.
/// Otherwise, subtrees are continuous up to the finalized tip.
///
/// There is no API for retrieving single subtrees by index, because it can accidentally be used
/// to create an inconsistent list of subtrees after concurrent non-finalized and finalized
/// updates.
/// # Correctness
///
/// This method is specifically designed for the `z_getsubtreesbyindex` state request.
/// It might not work for other RPCs or state checks.
#[allow(clippy::unwrap_in_result)]
pub fn orchard_subtrees_by_index(
pub fn orchard_subtree_list_by_index_for_rpc(
&self,
start_index: NoteCommitmentSubtreeIndex,
limit: Option<NoteCommitmentSubtreeIndex>,
@ -437,9 +485,6 @@ impl DiskWriteBatch {
let sapling_tree_cf = db.cf_handle("sapling_note_commitment_tree").unwrap();
let orchard_tree_cf = db.cf_handle("orchard_note_commitment_tree").unwrap();
let _sapling_subtree_cf = db.cf_handle("sapling_note_commitment_subtree").unwrap();
let _orchard_subtree_cf = db.cf_handle("orchard_note_commitment_subtree").unwrap();
let height = finalized.verified.height;
let trees = finalized.treestate.note_commitment_trees.clone();
@ -485,19 +530,32 @@ impl DiskWriteBatch {
self.zs_insert(&orchard_tree_cf, height, trees.orchard);
}
// TODO: Increment DATABASE_FORMAT_MINOR_VERSION and uncomment these insertions
if let Some(subtree) = trees.sapling_subtree {
self.insert_sapling_subtree(zebra_db, &subtree);
}
// if let Some(subtree) = trees.sapling_subtree {
// self.zs_insert(&sapling_subtree_cf, subtree.index, subtree.into_data());
// }
// if let Some(subtree) = trees.orchard_subtree {
// self.zs_insert(&orchard_subtree_cf, subtree.index, subtree.into_data());
// }
if let Some(subtree) = trees.orchard_subtree {
self.insert_orchard_subtree(zebra_db, &subtree);
}
self.prepare_history_batch(db, finalized)
}
// Sapling tree methods
/// Inserts the Sapling note commitment subtree.
pub fn insert_sapling_subtree(
&mut self,
zebra_db: &ZebraDb,
subtree: &NoteCommitmentSubtree<sapling::tree::Node>,
) {
let sapling_subtree_cf = zebra_db
.db
.cf_handle("sapling_note_commitment_subtree")
.unwrap();
self.zs_insert(&sapling_subtree_cf, subtree.index, subtree.into_data());
}
/// Deletes the Sapling note commitment tree at the given [`Height`].
pub fn delete_sapling_tree(&mut self, zebra_db: &ZebraDb, height: &Height) {
let sapling_tree_cf = zebra_db
@ -519,6 +577,21 @@ impl DiskWriteBatch {
self.zs_delete_range(&sapling_tree_cf, from, to);
}
// Orchard tree methods
/// Inserts the Orchard note commitment subtree.
pub fn insert_orchard_subtree(
&mut self,
zebra_db: &ZebraDb,
subtree: &NoteCommitmentSubtree<orchard::tree::Node>,
) {
let orchard_subtree_cf = zebra_db
.db
.cf_handle("orchard_note_commitment_subtree")
.unwrap();
self.zs_insert(&orchard_subtree_cf, subtree.index, subtree.into_data());
}
/// Deletes the Orchard note commitment tree at the given [`Height`].
pub fn delete_orchard_tree(&mut self, zebra_db: &ZebraDb, height: &Height) {
let orchard_tree_cf = zebra_db

View File

@ -682,6 +682,11 @@ impl Chain {
/// Returns the Sapling [`NoteCommitmentSubtree`] that was completed at a block with
/// [`HashOrHeight`], if it exists in the non-finalized [`Chain`].
///
/// # Concurrency
///
/// This method should not be used to get subtrees in concurrent code by height,
/// because the same heights in different chain forks can have different subtrees.
pub fn sapling_subtree(
&self,
hash_or_height: HashOrHeight,
@ -872,6 +877,11 @@ impl Chain {
/// Returns the Orchard [`NoteCommitmentSubtree`] that was completed at a block with
/// [`HashOrHeight`], if it exists in the non-finalized [`Chain`].
///
/// # Concurrency
///
/// This method should not be used to get subtrees in concurrent code by height,
/// because the same heights in different chain forks can have different subtrees.
pub fn orchard_subtree(
&self,
hash_or_height: HashOrHeight,

View File

@ -74,7 +74,7 @@ where
// In that case, we ignore all the trees in `chain` after the first inconsistent tree,
// because we know they will be inconsistent as well. (It is cryptographically impossible
// for tree roots to be equal once the leaves have diverged.)
let mut db_list = db.sapling_subtrees_by_index(start_index, limit);
let mut db_list = db.sapling_subtree_list_by_index_for_rpc(start_index, limit);
// If there's no chain, then we have the complete list.
let Some(chain) = chain else {
@ -162,7 +162,7 @@ where
// In that case, we ignore all the trees in `chain` after the first inconsistent tree,
// because we know they will be inconsistent as well. (It is cryptographically impossible
// for tree roots to be equal once the leaves have diverged.)
let mut db_list = db.orchard_subtrees_by_index(start_index, limit);
let mut db_list = db.orchard_subtree_list_by_index_for_rpc(start_index, limit);
// If there's no chain, then we have the complete list.
let Some(chain) = chain else {