fix(state): Use correct end heights for end of block subtrees during the full sync (#7566)

* Avoid manual handling of previous sapling trees by using iterator windows instead

* Avoid manual sapling subtree index handling by comparing prev and current subtree indexes instead

* Simplify adding notes by using the exact number of remaining notes

* Simplify by skipping the first block, because it can't complete a subtree

* Re-use existing tree update code

* Apply the sapling changes to orchard subtree updates

* add a reverse database column family iterator function

* Make skipping the lowest tree independent of iteration order

* Move new subtree checks into the iterator, rename to end_height

* Split subtree calculation into a new method

* Split the calculate and write methods

* Quickly check the first subtree before running the full upgrade

* Do the quick checks every time Zebra runs, and refactor slow check error handling

* Do quick checks for orchard as well

* Make orchard tree upgrade match sapling upgrade code

* Upgrade subtrees in reverse height order

* Bump the database patch version so the upgrade runs again

* Reset previous subtree upgrade data before doing this one

* Add extra checks to subtree calculation to diagnose errors

* Use correct heights for subtrees completed at the end of a block

* Add even more checks to diagnose issues

* Instrument upgrade methods to improve diagnostics

* Prevent modification of re-used trees

* Debug with subtree positions as well

* Fix an off-by-one error with completed subtrees

* Fix typos and confusing comments

Co-authored-by: Marek <mail@marek.onl>

* Fix mistaken previous tree handling and end tree comments

* Remove unnecessary subtraction in remaining leaves calc

* Log heights when assertions fail

* Fix new subtree detection filter

* Move new subtree check into a method, cleanup unused code

* Remove redundant assertions

* Wait for subtree upgrade before testing RPCs

* Fix subtree search in quick check

* Temporarily upgrade subtrees in forward height order

* Clarify some comments

* Fix missing test imports

* Fix subtree logging

* Add a comment about a potential hang with future upgrades

* Fix zebrad var ownership

* Log more info when add_subtrees.rs fails

* cargo fmt --all

* Fix unrelated clippy::unnecessary_unwrap

* cargo clippy --fix --all-features --all-targets; cargo fmt --all

* Stop the quick check depending on tree de-duplication

* Refactor waiting for the upgrade into functions

* Wait for state upgrades whenever the cached state is updated

* Wait for the testnet upgrade in the right place

* Fix unused variable

* Fix a subtree detection bug and comments

* Remove an early reference to reverse direction

* Stop skipping subtrees completed at the end of blocks

* Actually fix new subtree code

---------

Co-authored-by: Marek <mail@marek.onl>
This commit is contained in:
teor 2023-09-20 00:49:36 +10:00 committed by GitHub
parent 0ee8b95a01
commit 7a7d79dfaf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 1089 additions and 306 deletions

View File

@ -5740,6 +5740,7 @@ dependencies = [
"futures",
"halo2_proofs",
"hex",
"hex-literal",
"howudoin",
"indexmap 2.0.0",
"insta",

View File

@ -517,7 +517,16 @@ impl Block {
}
}
// update history tree for the next block
if history_tree.is_none() {
if let Some(history_tree) = history_tree.as_mut() {
history_tree
.push(
current.network,
Arc::new(block.clone()),
sapling_tree.root(),
orchard_tree.root(),
)
.unwrap();
} else {
history_tree = Some(
HistoryTree::from_block(
current.network,
@ -527,17 +536,6 @@ impl Block {
)
.unwrap(),
);
} else {
history_tree
.as_mut()
.unwrap()
.push(
current.network,
Arc::new(block.clone()),
sapling_tree.root(),
orchard_tree.root(),
)
.unwrap();
}
}

View File

@ -394,6 +394,72 @@ impl NoteCommitmentTree {
self.inner.value()
}
/// Returns the position of the most recently appended leaf in the tree.
///
/// This method is used for debugging, use `incrementalmerkletree::Address` for tree operations.
pub fn position(&self) -> Option<u64> {
let Some(tree) = self.frontier() else {
// An empty tree doesn't have a previous leaf.
return None;
};
Some(tree.position().into())
}
/// Returns true if this tree has at least one new subtree, when compared with `prev_tree`.
pub fn contains_new_subtree(&self, prev_tree: &Self) -> bool {
// Use -1 for the index of the subtree with no notes, so the comparisons are valid.
let index = self.subtree_index().map_or(-1, |index| i32::from(index.0));
let prev_index = prev_tree
.subtree_index()
.map_or(-1, |index| i32::from(index.0));
// This calculation can't overflow, because we're using i32 for u16 values.
let index_difference = index - prev_index;
// There are 4 cases we need to handle:
// - lower index: never a new subtree
// - equal index: sometimes a new subtree
// - next index: sometimes a new subtree
// - greater than the next index: always a new subtree
//
// To simplify the function, we deal with the simple cases first.
// There can't be any new subtrees if the current index is strictly lower.
if index < prev_index {
return false;
}
// There is at least one new subtree, even if there is a spurious index difference.
if index_difference > 1 {
return true;
}
// If the indexes are equal, there can only be a new subtree if `self` just completed it.
if index == prev_index {
return self.is_complete_subtree();
}
// If `self` is the next index, check for spurious index differences.
//
// There is one new subtree somewhere in the trees. It is either:
// - a new subtree at the end of the previous tree, or
// - a new subtree in this tree (but not at the end).
//
// Spurious index differences happen because the subtree index only increases when the
// first note is added to the new subtree. So we need to exclude subtrees completed by the
// last note commitment in the previous tree.
//
// We also need to exclude empty previous subtrees, because the index changes to zero when
// the first note is added, but a subtree wasn't completed.
if prev_tree.is_complete_subtree() || prev_index == -1 {
return false;
}
// A new subtree was completed by a note commitment that isn't in the previous tree.
true
}
/// Returns true if the most recently appended leaf completes the subtree
pub fn is_complete_subtree(&self) -> bool {
let Some(tree) = self.frontier() else {
@ -423,6 +489,46 @@ impl NoteCommitmentTree {
Some(index)
}
/// Returns the number of leaf nodes required to complete the subtree at
/// [`TRACKED_SUBTREE_HEIGHT`].
///
/// Returns `2^TRACKED_SUBTREE_HEIGHT` if the tree is empty.
#[allow(clippy::unwrap_in_result)]
pub fn remaining_subtree_leaf_nodes(&self) -> usize {
let remaining = match self.frontier() {
// If the subtree has at least one leaf node, the remaining number of nodes can be
// calculated using the maximum subtree position and the current position.
Some(tree) => {
let max_position = incrementalmerkletree::Address::above_position(
TRACKED_SUBTREE_HEIGHT.into(),
tree.position(),
)
.max_position();
max_position - tree.position().into()
}
// If the subtree has no nodes, the remaining number of nodes is the number of nodes in
// a subtree.
None => {
let subtree_address = incrementalmerkletree::Address::above_position(
TRACKED_SUBTREE_HEIGHT.into(),
// This position is guaranteed to be in the first subtree.
0.into(),
);
assert_eq!(
subtree_address.position_range_start(),
0.into(),
"address is not in the first subtree"
);
subtree_address.position_range_end()
}
};
u64::from(remaining).try_into().expect("fits in usize")
}
/// Returns subtree index and root if the most recently appended leaf completes the subtree
pub fn completed_subtree_index_and_root(&self) -> Option<(NoteCommitmentSubtreeIndex, Node)> {
if !self.is_complete_subtree() {

View File

@ -128,6 +128,7 @@ impl NoteCommitmentTrees {
}
/// Update the sprout note commitment tree.
/// This method modifies the tree inside the `Arc`, if the `Arc` only has one reference.
fn update_sprout_note_commitment_tree(
mut sprout: Arc<sprout::tree::NoteCommitmentTree>,
sprout_note_commitments: Vec<sprout::NoteCommitment>,
@ -145,8 +146,9 @@ impl NoteCommitmentTrees {
}
/// Update the sapling note commitment tree.
/// This method modifies the tree inside the `Arc`, if the `Arc` only has one reference.
#[allow(clippy::unwrap_in_result)]
fn update_sapling_note_commitment_tree(
pub fn update_sapling_note_commitment_tree(
mut sapling: Arc<sapling::tree::NoteCommitmentTree>,
sapling_note_commitments: Vec<sapling::tree::NoteCommitmentUpdate>,
) -> Result<
@ -170,11 +172,14 @@ impl NoteCommitmentTrees {
let mut subtree_root = None;
for sapling_note_commitment in sapling_note_commitments {
sapling_nct.append(sapling_note_commitment)?;
// Subtrees end heights come from the blocks they are completed in,
// so we check for new subtrees after appending the note.
// (If we check before, subtrees at the end of blocks have the wrong heights.)
if let Some(index_and_node) = sapling_nct.completed_subtree_index_and_root() {
subtree_root = Some(index_and_node);
}
sapling_nct.append(sapling_note_commitment)?;
}
// Re-calculate and cache the tree root.
@ -184,8 +189,9 @@ impl NoteCommitmentTrees {
}
/// Update the orchard note commitment tree.
/// This method modifies the tree inside the `Arc`, if the `Arc` only has one reference.
#[allow(clippy::unwrap_in_result)]
fn update_orchard_note_commitment_tree(
pub fn update_orchard_note_commitment_tree(
mut orchard: Arc<orchard::tree::NoteCommitmentTree>,
orchard_note_commitments: Vec<orchard::tree::NoteCommitmentUpdate>,
) -> Result<
@ -203,11 +209,14 @@ impl NoteCommitmentTrees {
let mut subtree_root = None;
for orchard_note_commitment in orchard_note_commitments {
orchard_nct.append(orchard_note_commitment)?;
// Subtrees end heights come from the blocks they are completed in,
// so we check for new subtrees after appending the note.
// (If we check before, subtrees at the end of blocks have the wrong heights.)
if let Some(index_and_node) = orchard_nct.completed_subtree_index_and_root() {
subtree_root = Some(index_and_node);
}
orchard_nct.append(orchard_note_commitment)?;
}
// Re-calculate and cache the tree root.

View File

@ -375,6 +375,72 @@ impl NoteCommitmentTree {
self.inner.value()
}
/// Returns the position of the most recently appended leaf in the tree.
///
/// This method is used for debugging, use `incrementalmerkletree::Address` for tree operations.
pub fn position(&self) -> Option<u64> {
let Some(tree) = self.frontier() else {
// An empty tree doesn't have a previous leaf.
return None;
};
Some(tree.position().into())
}
/// Returns true if this tree has at least one new subtree, when compared with `prev_tree`.
pub fn contains_new_subtree(&self, prev_tree: &Self) -> bool {
// Use -1 for the index of the subtree with no notes, so the comparisons are valid.
let index = self.subtree_index().map_or(-1, |index| i32::from(index.0));
let prev_index = prev_tree
.subtree_index()
.map_or(-1, |index| i32::from(index.0));
// This calculation can't overflow, because we're using i32 for u16 values.
let index_difference = index - prev_index;
// There are 4 cases we need to handle:
// - lower index: never a new subtree
// - equal index: sometimes a new subtree
// - next index: sometimes a new subtree
// - greater than the next index: always a new subtree
//
// To simplify the function, we deal with the simple cases first.
// There can't be any new subtrees if the current index is strictly lower.
if index < prev_index {
return false;
}
// There is at least one new subtree, even if there is a spurious index difference.
if index_difference > 1 {
return true;
}
// If the indexes are equal, there can only be a new subtree if `self` just completed it.
if index == prev_index {
return self.is_complete_subtree();
}
// If `self` is the next index, check for spurious index differences.
//
// There is one new subtree somewhere in the trees. It is either:
// - a new subtree at the end of the previous tree, or
// - a new subtree in this tree (but not at the end).
//
// Spurious index differences happen because the subtree index only increases when the
// first note is added to the new subtree. So we need to exclude subtrees completed by the
// last note commitment in the previous tree.
//
// We also need to exclude empty previous subtrees, because the index changes to zero when
// the first note is added, but a subtree wasn't completed.
if prev_tree.is_complete_subtree() || prev_index == -1 {
return false;
}
// A new subtree was completed by a note commitment that isn't in the previous tree.
true
}
/// Returns true if the most recently appended leaf completes the subtree
pub fn is_complete_subtree(&self) -> bool {
let Some(tree) = self.frontier() else {
@ -404,6 +470,46 @@ impl NoteCommitmentTree {
Some(index)
}
/// Returns the number of leaf nodes required to complete the subtree at
/// [`TRACKED_SUBTREE_HEIGHT`].
///
/// Returns `2^TRACKED_SUBTREE_HEIGHT` if the tree is empty.
#[allow(clippy::unwrap_in_result)]
pub fn remaining_subtree_leaf_nodes(&self) -> usize {
let remaining = match self.frontier() {
// If the subtree has at least one leaf node, the remaining number of nodes can be
// calculated using the maximum subtree position and the current position.
Some(tree) => {
let max_position = incrementalmerkletree::Address::above_position(
TRACKED_SUBTREE_HEIGHT.into(),
tree.position(),
)
.max_position();
max_position - tree.position().into()
}
// If the subtree has no nodes, the remaining number of nodes is the number of nodes in
// a subtree.
None => {
let subtree_address = incrementalmerkletree::Address::above_position(
TRACKED_SUBTREE_HEIGHT.into(),
// This position is guaranteed to be in the first subtree.
0.into(),
);
assert_eq!(
subtree_address.position_range_start(),
0.into(),
"address is not in the first subtree"
);
subtree_address.position_range_end()
}
};
u64::from(remaining).try_into().expect("fits in usize")
}
/// Returns subtree index and root if the most recently appended leaf completes the subtree
pub fn completed_subtree_index_and_root(&self) -> Option<(NoteCommitmentSubtreeIndex, Node)> {
if !self.is_complete_subtree() {

View File

@ -1,6 +1,6 @@
//! Struct representing Sapling/Orchard note commitment subtrees
use std::num::TryFromIntError;
use std::{fmt, num::TryFromIntError};
use serde::{Deserialize, Serialize};
@ -19,6 +19,12 @@ pub const TRACKED_SUBTREE_HEIGHT: u8 = 16;
#[serde(transparent)]
pub struct NoteCommitmentSubtreeIndex(pub u16);
impl fmt::Display for NoteCommitmentSubtreeIndex {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(&self.0.to_string())
}
}
impl From<u16> for NoteCommitmentSubtreeIndex {
fn from(value: u16) -> Self {
Self(value)

View File

@ -48,6 +48,7 @@ chrono = { version = "0.4.30", default-features = false, features = ["clock", "s
dirs = "5.0.1"
futures = "0.3.28"
hex = "0.4.3"
hex-literal = "0.4.1"
indexmap = "2.0.0"
itertools = "0.11.0"
lazy_static = "1.4.0"

View File

@ -2,6 +2,7 @@
use lazy_static::lazy_static;
use regex::Regex;
use semver::Version;
// For doc comment links
#[allow(unused_imports)]
@ -52,7 +53,14 @@ pub(crate) const DATABASE_FORMAT_MINOR_VERSION: u64 = 2;
/// The database format patch version, incremented each time the on-disk database format has a
/// significant format compatibility fix.
pub(crate) const DATABASE_FORMAT_PATCH_VERSION: u64 = 0;
pub(crate) const DATABASE_FORMAT_PATCH_VERSION: u64 = 1;
/// Returns the highest database version that modifies the subtree index format.
///
/// This version is used by tests to wait for the subtree upgrade to finish.
pub fn latest_version_for_adding_subtrees() -> Version {
Version::parse("25.2.1").expect("Hardcoded version string should be valid.")
}
/// The name of the file containing the minor and patch database versions.
///

View File

@ -73,4 +73,7 @@ pub use service::{
#[cfg(any(test, feature = "proptest-impl"))]
pub use config::write_database_format_version_to_disk;
#[cfg(any(test, feature = "proptest-impl"))]
pub use constants::latest_version_for_adding_subtrees;
pub(crate) use request::ContextuallyVerifiedBlock;

View File

@ -64,6 +64,9 @@ pub struct DiskDb {
// This configuration cannot be modified after the database is initialized,
// because some clones would have different values.
//
/// The configured network for this database.
network: Network,
/// The configured temporary database setting.
///
/// If true, the database files are deleted on drop.
@ -247,6 +250,10 @@ pub trait ReadDisk {
impl PartialEq for DiskDb {
fn eq(&self, other: &Self) -> bool {
if self.db.path() == other.db.path() {
assert_eq!(
self.network, other.network,
"database with same path but different network configs",
);
assert_eq!(
self.ephemeral, other.ephemeral,
"database with same path but different ephemeral configs",
@ -569,6 +576,7 @@ impl DiskDb {
info!("Opened Zebra state cache at {}", path.display());
let db = DiskDb {
network,
ephemeral: config.ephemeral,
db: Arc::new(db),
};
@ -589,6 +597,11 @@ impl DiskDb {
// Accessor methods
/// Returns the configured network for this database.
pub fn network(&self) -> Network {
self.network
}
/// Returns the `Path` where the files used by this database are located.
pub fn path(&self) -> &Path {
self.db.path()

View File

@ -19,7 +19,7 @@ use DbFormatChange::*;
use crate::{
config::write_database_format_version_to_disk,
constants::DATABASE_FORMAT_VERSION,
constants::{latest_version_for_adding_subtrees, DATABASE_FORMAT_VERSION},
database_format_version_in_code, database_format_version_on_disk,
service::finalized_state::{DiskWriteBatch, ZebraDb},
Config,
@ -90,7 +90,7 @@ impl DbFormatChange {
pub fn new(running_version: Version, disk_version: Option<Version>) -> Option<Self> {
let Some(disk_version) = disk_version else {
info!(
?running_version,
%running_version,
"creating new database with the current format"
);
@ -100,8 +100,8 @@ impl DbFormatChange {
match disk_version.cmp(&running_version) {
Ordering::Less => {
info!(
?running_version,
?disk_version,
%running_version,
%disk_version,
"trying to open older database format: launching upgrade task"
);
@ -112,8 +112,8 @@ impl DbFormatChange {
}
Ordering::Greater => {
info!(
?running_version,
?disk_version,
%running_version,
%disk_version,
"trying to open newer database format: data should be compatible"
);
@ -123,7 +123,7 @@ impl DbFormatChange {
})
}
Ordering::Equal => {
info!(?running_version, "trying to open current database format");
info!(%running_version, "trying to open current database format");
None
}
@ -190,6 +190,10 @@ impl DbFormatChange {
upgrade_db: ZebraDb,
cancel_receiver: mpsc::Receiver<CancelFormatChange>,
) -> Result<(), CancelFormatChange> {
// These quick checks should pass for all format changes.
// (See the detailed comment at the end of this method.)
add_subtrees::quick_check(&upgrade_db);
match self {
// Perform any required upgrades, then mark the state as upgraded.
Upgrade { .. } => self.apply_format_upgrade(
@ -265,16 +269,16 @@ impl DbFormatChange {
let Some(initial_tip_height) = initial_tip_height else {
// If the database is empty, then the RocksDb format doesn't need any changes.
info!(
?newer_running_version,
?older_disk_version,
%newer_running_version,
%older_disk_version,
"marking empty database as upgraded"
);
Self::mark_as_upgraded_to(&database_format_version_in_code(), &config, network);
info!(
?newer_running_version,
?older_disk_version,
%newer_running_version,
%older_disk_version,
"empty database is fully upgraded"
);
@ -352,11 +356,17 @@ impl DbFormatChange {
// Note commitment subtree creation database upgrade task.
let version_for_adding_subtrees =
let latest_version_for_adding_subtrees = latest_version_for_adding_subtrees();
let first_version_for_adding_subtrees =
Version::parse("25.2.0").expect("Hardcoded version string should be valid.");
// Check if we need to add note commitment subtrees to the database.
if older_disk_version < version_for_adding_subtrees {
// Check if we need to add or fix note commitment subtrees in the database.
if older_disk_version < latest_version_for_adding_subtrees {
if older_disk_version >= first_version_for_adding_subtrees {
// Clear previous upgrade data, because it was incorrect.
add_subtrees::reset(initial_tip_height, &db, cancel_receiver)?;
}
add_subtrees::run(initial_tip_height, &db, cancel_receiver)?;
// Before marking the state as upgraded, check that the upgrade completed successfully.
@ -364,7 +374,7 @@ impl DbFormatChange {
// Mark the database as upgraded. Zebra won't repeat the upgrade anymore once the
// database is marked, so the upgrade MUST be complete at this point.
Self::mark_as_upgraded_to(&version_for_adding_subtrees, &config, network);
Self::mark_as_upgraded_to(&latest_version_for_adding_subtrees, &config, network);
}
// # New Upgrades Usually Go Here
@ -376,7 +386,7 @@ impl DbFormatChange {
// every time it runs its inner update loop.
info!(
?newer_running_version,
%newer_running_version,
"Zebra automatically upgraded the database format to:"
);
@ -474,8 +484,8 @@ impl DbFormatChange {
.expect("unable to write database format version file to disk");
info!(
?running_version,
?disk_version,
%running_version,
disk_version = %disk_version.map_or("None".to_string(), |version| version.to_string()),
"marked database format as newly created"
);
}
@ -535,9 +545,11 @@ impl DbFormatChange {
.expect("unable to write database format version file to disk");
info!(
?running_version,
?format_upgrade_version,
?disk_version,
%running_version,
%disk_version,
// wait_for_state_version_upgrade() needs this to be the last field,
// so the regex matches correctly
%format_upgrade_version,
"marked database format as upgraded"
);
}
@ -574,8 +586,8 @@ impl DbFormatChange {
.expect("unable to write database format version file to disk");
info!(
?running_version,
?disk_version,
%running_version,
%disk_version,
"marked database format as downgraded"
);
}

View File

@ -2,9 +2,16 @@
use std::sync::{mpsc, Arc};
use hex_literal::hex;
use itertools::Itertools;
use tracing::instrument;
use zebra_chain::{
block::Height,
orchard, sapling,
orchard,
parallel::tree::NoteCommitmentTrees,
parameters::Network::*,
sapling,
subtree::{NoteCommitmentSubtree, NoteCommitmentSubtreeIndex},
};
@ -16,159 +23,265 @@ use crate::service::finalized_state::{
///
/// Returns `Ok` if the upgrade completed, and `Err` if it was cancelled.
#[allow(clippy::unwrap_in_result)]
#[instrument(skip(upgrade_db, cancel_receiver))]
pub fn run(
initial_tip_height: Height,
upgrade_db: &ZebraDb,
cancel_receiver: &mpsc::Receiver<CancelFormatChange>,
) -> Result<(), CancelFormatChange> {
let mut subtree_count = 0;
let mut prev_tree: Option<_> = None;
for (height, tree) in upgrade_db.sapling_tree_by_height_range(..=initial_tip_height) {
// Return early if there is a cancel signal.
// # Consensus
//
// Zebra stores exactly one note commitment tree for every block with sapling notes.
// (It also stores the empty note commitment tree for the genesis block, but we skip that.)
//
// The consensus rules limit blocks to less than 2^16 sapling and 2^16 orchard outputs. So a
// block can't complete multiple level 16 subtrees (or complete an entire subtree by itself).
// Currently, with 2MB blocks and v4/v5 sapling and orchard output sizes, the subtree index can
// increase by at most 1 every ~20 blocks.
// Generate a list of sapling subtree inputs: previous and current trees, and their end heights.
let subtrees = upgrade_db
.sapling_tree_by_height_range(..=initial_tip_height)
// We need both the tree and its previous tree for each shielded block.
.tuple_windows()
.map(|((prev_end_height, prev_tree), (end_height, tree))| {
(prev_end_height, prev_tree, end_height, tree)
})
// Find new subtrees.
.filter(|(_prev_end_height, prev_tree, _end_height, tree)| {
tree.contains_new_subtree(prev_tree)
});
for (prev_end_height, prev_tree, end_height, tree) in subtrees {
// Return early if the upgrade is cancelled.
if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) {
return Err(CancelFormatChange);
}
// Empty note commitment trees can't contain subtrees.
let Some(end_of_block_subtree_index) = tree.subtree_index() else {
prev_tree = Some(tree);
continue;
};
// Blocks cannot complete multiple level 16 subtrees,
// so the subtree index can increase by a maximum of 1 every ~20 blocks.
// If this block does complete a subtree, the subtree is either completed by a note before
// the final note (so the final note is in the next subtree), or by the final note
// (so the final note is the end of this subtree).
if let Some((index, node)) = tree.completed_subtree_index_and_root() {
// If the leaf at the end of the block is the final leaf in a subtree,
// we already have that subtree root available in the tree.
assert_eq!(
index.0, subtree_count,
"trees are inserted in order with no gaps"
);
write_sapling_subtree(upgrade_db, index, height, node);
subtree_count += 1;
} else if end_of_block_subtree_index.0 > subtree_count {
// If the leaf at the end of the block is in the next subtree,
// we need to calculate that subtree root based on the tree from the previous block.
let mut prev_tree = prev_tree
.take()
.expect("should have some previous sapling frontier");
let sapling_nct = Arc::make_mut(&mut prev_tree);
let block = upgrade_db
.block(height.into())
.expect("height with note commitment tree should have block");
for sapling_note_commitment in block.sapling_note_commitments() {
// Return early if there is a cancel signal.
if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) {
return Err(CancelFormatChange);
}
sapling_nct
.append(*sapling_note_commitment)
.expect("finalized notes should append successfully");
// The loop always breaks on this condition,
// because we checked the block has enough commitments,
// and that the final commitment in the block doesn't complete a subtree.
if sapling_nct.is_complete_subtree() {
break;
}
}
let (index, node) = sapling_nct.completed_subtree_index_and_root().expect(
"block should have completed a subtree before its final note commitment: \
already checked is_complete_subtree(), and that the block must complete a subtree",
);
assert_eq!(
index.0, subtree_count,
"trees are inserted in order with no gaps"
);
write_sapling_subtree(upgrade_db, index, height, node);
subtree_count += 1;
}
prev_tree = Some(tree);
let subtree =
calculate_sapling_subtree(upgrade_db, prev_end_height, prev_tree, end_height, tree);
write_sapling_subtree(upgrade_db, subtree);
}
let mut subtree_count = 0;
let mut prev_tree: Option<_> = None;
for (height, tree) in upgrade_db.orchard_tree_by_height_range(..=initial_tip_height) {
// Return early if there is a cancel signal.
// Generate a list of orchard subtree inputs: previous and current trees, and their end heights.
let subtrees = upgrade_db
.orchard_tree_by_height_range(..=initial_tip_height)
// We need both the tree and its previous tree for each shielded block.
.tuple_windows()
.map(|((prev_end_height, prev_tree), (end_height, tree))| {
(prev_end_height, prev_tree, end_height, tree)
})
// Find new subtrees.
.filter(|(_prev_end_height, prev_tree, _end_height, tree)| {
tree.contains_new_subtree(prev_tree)
});
for (prev_end_height, prev_tree, end_height, tree) in subtrees {
// Return early if the upgrade is cancelled.
if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) {
return Err(CancelFormatChange);
}
// Empty note commitment trees can't contain subtrees.
let Some(end_of_block_subtree_index) = tree.subtree_index() else {
prev_tree = Some(tree);
continue;
};
let subtree =
calculate_orchard_subtree(upgrade_db, prev_end_height, prev_tree, end_height, tree);
write_orchard_subtree(upgrade_db, subtree);
}
// Blocks cannot complete multiple level 16 subtrees,
// so the subtree index can increase by a maximum of 1 every ~20 blocks.
// If this block does complete a subtree, the subtree is either completed by a note before
// the final note (so the final note is in the next subtree), or by the final note
// (so the final note is the end of this subtree).
Ok(())
}
if let Some((index, node)) = tree.completed_subtree_index_and_root() {
// If the leaf at the end of the block is the final leaf in a subtree,
// we already have that subtree root available in the tree.
assert_eq!(
index.0, subtree_count,
"trees are inserted in order with no gaps"
);
write_orchard_subtree(upgrade_db, index, height, node);
subtree_count += 1;
} else if end_of_block_subtree_index.0 > subtree_count {
// If the leaf at the end of the block is in the next subtree,
// we need to calculate that subtree root based on the tree from the previous block.
let mut prev_tree = prev_tree
.take()
.expect("should have some previous orchard frontier");
let orchard_nct = Arc::make_mut(&mut prev_tree);
/// Reset data from previous upgrades. This data can be complete or incomplete.
///
/// Returns `Ok` if the upgrade completed, and `Err` if it was cancelled.
#[allow(clippy::unwrap_in_result)]
#[instrument(skip(upgrade_db, cancel_receiver))]
pub fn reset(
_initial_tip_height: Height,
upgrade_db: &ZebraDb,
cancel_receiver: &mpsc::Receiver<CancelFormatChange>,
) -> Result<(), CancelFormatChange> {
// Return early if the upgrade is cancelled.
if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) {
return Err(CancelFormatChange);
}
let block = upgrade_db
.block(height.into())
.expect("height with note commitment tree should have block");
// This doesn't delete the maximum index, but the consensus rules make that subtree impossible.
// (Adding a note to a full note commitment tree is an error.)
//
// TODO: convert zs_delete_range() to take std::ops::RangeBounds, and delete the upper bound.
let mut batch = DiskWriteBatch::new();
batch.delete_range_sapling_subtree(upgrade_db, 0.into(), u16::MAX.into());
upgrade_db
.write_batch(batch)
.expect("deleting old sapling note commitment subtrees is a valid database operation");
for orchard_note_commitment in block.orchard_note_commitments() {
// Return early if there is a cancel signal.
if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) {
return Err(CancelFormatChange);
}
if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) {
return Err(CancelFormatChange);
}
orchard_nct
.append(*orchard_note_commitment)
.expect("finalized notes should append successfully");
let mut batch = DiskWriteBatch::new();
batch.delete_range_orchard_subtree(upgrade_db, 0.into(), u16::MAX.into());
upgrade_db
.write_batch(batch)
.expect("deleting old orchard note commitment subtrees is a valid database operation");
// The loop always breaks on this condition,
// because we checked the block has enough commitments,
// and that the final commitment in the block doesn't complete a subtree.
if orchard_nct.is_complete_subtree() {
break;
}
}
Ok(())
}
let (index, node) = orchard_nct.completed_subtree_index_and_root().expect(
"block should have completed a subtree before its final note commitment: \
already checked is_complete_subtree(), and that the block must complete a subtree",
);
/// Quickly check that the first calculated subtree is correct.
///
/// This allows us to fail the upgrade quickly in tests and during development,
/// rather than waiting ~20 minutes to see if it failed.
///
/// # Panics
///
/// If a note commitment subtree is missing or incorrect.
pub fn quick_check(db: &ZebraDb) {
let sapling_result = quick_check_sapling_subtrees(db);
let orchard_result = quick_check_orchard_subtrees(db);
assert_eq!(
index.0, subtree_count,
"trees are inserted in order with no gaps"
);
write_orchard_subtree(upgrade_db, index, height, node);
subtree_count += 1;
}
if sapling_result.is_err() || orchard_result.is_err() {
// TODO: when the check functions are refactored so they are called from a single function,
// move this panic into that function, but still log a detailed message here
panic!(
"missing or bad first subtree: sapling: {sapling_result:?}, orchard: {orchard_result:?}"
);
}
}
prev_tree = Some(tree);
/// A quick test vector that allows us to fail an incorrect upgrade within a few seconds.
fn first_sapling_mainnet_subtree() -> NoteCommitmentSubtree<sapling::tree::Node> {
// This test vector was generated using the command:
// ```sh
// zcash-cli z_getsubtreesbyindex sapling 0 1
// ```
NoteCommitmentSubtree {
index: 0.into(),
node: hex!("754bb593ea42d231a7ddf367640f09bbf59dc00f2c1d2003cc340e0c016b5b13")
.as_slice()
.try_into()
.expect("test vector is valid"),
end: Height(558822),
}
}
/// A quick test vector that allows us to fail an incorrect upgrade within a few seconds.
fn first_orchard_mainnet_subtree() -> NoteCommitmentSubtree<orchard::tree::Node> {
// This test vector was generated using the command:
// ```sh
// zcash-cli z_getsubtreesbyindex orchard 0 1
// ```
NoteCommitmentSubtree {
index: 0.into(),
node: hex!("d4e323b3ae0cabfb6be4087fec8c66d9a9bbfc354bf1d9588b6620448182063b")
.as_slice()
.try_into()
.expect("test vector is valid"),
end: Height(1707429),
}
}
/// Quickly check that the first calculated sapling subtree is correct.
///
/// This allows us to fail the upgrade quickly in tests and during development,
/// rather than waiting ~20 minutes to see if it failed.
///
/// Returns an error if a note commitment subtree is missing or incorrect.
fn quick_check_sapling_subtrees(db: &ZebraDb) -> Result<(), &'static str> {
// We check the first sapling subtree on mainnet, so skip this check if it isn't available.
if db.network() != Mainnet {
return Ok(());
}
let Some(NoteCommitmentSubtreeIndex(tip_subtree_index)) = db.sapling_tree().subtree_index()
else {
return Ok(());
};
if tip_subtree_index == 0 && !db.sapling_tree().is_complete_subtree() {
return Ok(());
}
// Find the first complete subtree: previous and current trees, and their end heights.
let first_complete_subtree = db
.sapling_tree_by_height_range(..)
// We need both the tree and its previous tree for each shielded block.
.tuple_windows()
.map(|((prev_end_height, prev_tree), (end_height, tree))| {
(prev_end_height, prev_tree, end_height, tree)
})
.find(|(_prev_end_height, prev_tree, _end_height, tree)| {
tree.contains_new_subtree(prev_tree)
});
let Some((prev_end_height, prev_tree, end_height, tree)) = first_complete_subtree else {
let result = Err("iterator did not find complete subtree, but the tree has it");
error!(?result);
return result;
};
// Creating this test vector involves a cryptographic check, so only do it once.
let expected_subtree = first_sapling_mainnet_subtree();
let db_subtree = calculate_sapling_subtree(db, prev_end_height, prev_tree, end_height, tree);
if db_subtree != expected_subtree {
let result = Err("first subtree did not match expected test vector");
error!(?result, ?db_subtree, ?expected_subtree);
return result;
}
Ok(())
}
/// Quickly check that the first calculated orchard subtree is correct.
///
/// This allows us to fail the upgrade quickly in tests and during development,
/// rather than waiting ~20 minutes to see if it failed.
///
/// Returns an error if a note commitment subtree is missing or incorrect.
fn quick_check_orchard_subtrees(db: &ZebraDb) -> Result<(), &'static str> {
// We check the first orchard subtree on mainnet, so skip this check if it isn't available.
if db.network() != Mainnet {
return Ok(());
}
let Some(NoteCommitmentSubtreeIndex(tip_subtree_index)) = db.orchard_tree().subtree_index()
else {
return Ok(());
};
if tip_subtree_index == 0 && !db.orchard_tree().is_complete_subtree() {
return Ok(());
}
// Find the first complete subtree: previous and current trees, and their end heights.
let first_complete_subtree = db
.orchard_tree_by_height_range(..)
// We need both the tree and its previous tree for each shielded block.
.tuple_windows()
.map(|((prev_end_height, prev_tree), (end_height, tree))| {
(prev_end_height, prev_tree, end_height, tree)
})
.find(|(_prev_end_height, prev_tree, _end_height, tree)| {
tree.contains_new_subtree(prev_tree)
});
let Some((prev_end_height, prev_tree, end_height, tree)) = first_complete_subtree else {
let result = Err("iterator did not find complete subtree, but the tree has it");
error!(?result);
return result;
};
// Creating this test vector involves a cryptographic check, so only do it once.
let expected_subtree = first_orchard_mainnet_subtree();
let db_subtree = calculate_orchard_subtree(db, prev_end_height, prev_tree, end_height, tree);
if db_subtree != expected_subtree {
let result = Err("first subtree did not match expected test vector");
error!(?result, ?db_subtree, ?expected_subtree);
return result;
}
Ok(())
@ -180,24 +293,29 @@ pub fn run(
///
/// If a note commitment subtree is missing or incorrect.
pub fn check(db: &ZebraDb) {
let check_sapling_subtrees = check_sapling_subtrees(db);
let check_orchard_subtrees = check_orchard_subtrees(db);
if !check_sapling_subtrees || !check_orchard_subtrees {
// TODO: make this a panic before releasing the subtree change (#7532)
error!("missing or bad subtree(s)");
// This check is partly redundant, but we want to make sure it's never missed.
quick_check(db);
let sapling_result = check_sapling_subtrees(db);
let orchard_result = check_orchard_subtrees(db);
if sapling_result.is_err() || orchard_result.is_err() {
// TODO: when the check functions are refactored so they are called from a single function,
// move this panic into that function, but still log a detailed message here
panic!(
"missing or bad subtree(s): sapling: {sapling_result:?}, orchard: {orchard_result:?}"
);
}
}
/// Check that Sapling note commitment subtrees were correctly added.
///
/// # Panics
///
/// If a note commitment subtree is missing or incorrect.
fn check_sapling_subtrees(db: &ZebraDb) -> bool {
/// Returns an error if a note commitment subtree is missing or incorrect.
fn check_sapling_subtrees(db: &ZebraDb) -> Result<(), &'static str> {
let Some(NoteCommitmentSubtreeIndex(mut first_incomplete_subtree_index)) =
db.sapling_tree().subtree_index()
else {
return true;
return Ok(());
};
// If there are no incomplete subtrees in the tree, also expect a subtree for the final index.
@ -205,51 +323,48 @@ fn check_sapling_subtrees(db: &ZebraDb) -> bool {
first_incomplete_subtree_index += 1;
}
let mut is_valid = true;
let mut result = Ok(());
for index in 0..first_incomplete_subtree_index {
// Check that there's a continuous range of subtrees from index [0, first_incomplete_subtree_index)
let Some(subtree) = db.sapling_subtree_by_index(index) else {
error!(index, "missing subtree");
is_valid = false;
result = Err("missing subtree");
error!(?result, index);
continue;
};
// Check that there was a sapling note at the subtree's end height.
let Some(tree) = db.sapling_tree_by_height(&subtree.end) else {
error!(?subtree.end, "missing note commitment tree at subtree completion height");
is_valid = false;
result = Err("missing note commitment tree at subtree completion height");
error!(?result, ?subtree.end);
continue;
};
// Check the index and root if the sapling note commitment tree at this height is a complete subtree.
if let Some((index, node)) = tree.completed_subtree_index_and_root() {
if subtree.index != index {
error!("completed subtree indexes should match");
is_valid = false;
result = Err("completed subtree indexes should match");
error!(?result);
}
if subtree.node != node {
error!("completed subtree roots should match");
is_valid = false;
result = Err("completed subtree roots should match");
error!(?result);
}
}
// Check that the final note has a greater subtree index if it didn't complete a subtree.
else {
let Some(prev_tree) = db.sapling_tree_by_height(&subtree.end.previous()) else {
error!(?subtree.end, "missing note commitment tree at subtree completion height");
is_valid = false;
result = Err("missing note commitment tree below subtree completion height");
error!(?result, ?subtree.end);
continue;
};
let prev_subtree_index = prev_tree.subtree_index();
let subtree_index = tree.subtree_index();
if subtree_index <= prev_subtree_index {
error!(
?subtree_index,
?prev_subtree_index,
"note commitment tree at end height should have incremented subtree index"
);
is_valid = false;
result =
Err("note commitment tree at end height should have incremented subtree index");
error!(?result, ?subtree_index, ?prev_subtree_index,);
}
}
}
@ -272,47 +387,47 @@ fn check_sapling_subtrees(db: &ZebraDb) -> bool {
{
// Check that there's an entry for every completed sapling subtree root in all sapling trees
let Some(subtree) = db.sapling_subtree_by_index(index) else {
error!(?index, "missing subtree");
is_valid = false;
result = Err("missing subtree");
error!(?result, index);
continue;
};
// Check that the subtree end height matches that in the sapling trees.
if subtree.end != height {
let is_complete = tree.is_complete_subtree();
error!(?subtree.end, ?height, ?index, ?is_complete, "bad sapling subtree end height");
is_valid = false;
result = Err("bad sapling subtree end height");
error!(?result, ?subtree.end, ?height, ?index, ?is_complete, );
}
// Check the root if the sapling note commitment tree at this height is a complete subtree.
if let Some((_index, node)) = tree.completed_subtree_index_and_root() {
if subtree.node != node {
error!("completed subtree roots should match");
is_valid = false;
result = Err("completed subtree roots should match");
error!(?result);
}
}
}
if !is_valid {
if result.is_err() {
error!(
?result,
?subtree_count,
first_incomplete_subtree_index, "missing or bad sapling subtrees"
first_incomplete_subtree_index,
"missing or bad sapling subtrees"
);
}
is_valid
result
}
/// Check that Orchard note commitment subtrees were correctly added.
///
/// # Panics
///
/// If a note commitment subtree is missing or incorrect.
fn check_orchard_subtrees(db: &ZebraDb) -> bool {
/// Returns an error if a note commitment subtree is missing or incorrect.
fn check_orchard_subtrees(db: &ZebraDb) -> Result<(), &'static str> {
let Some(NoteCommitmentSubtreeIndex(mut first_incomplete_subtree_index)) =
db.orchard_tree().subtree_index()
else {
return true;
return Ok(());
};
// If there are no incomplete subtrees in the tree, also expect a subtree for the final index.
@ -320,51 +435,48 @@ fn check_orchard_subtrees(db: &ZebraDb) -> bool {
first_incomplete_subtree_index += 1;
}
let mut is_valid = true;
let mut result = Ok(());
for index in 0..first_incomplete_subtree_index {
// Check that there's a continuous range of subtrees from index [0, first_incomplete_subtree_index)
let Some(subtree) = db.orchard_subtree_by_index(index) else {
error!(index, "missing subtree");
is_valid = false;
result = Err("missing subtree");
error!(?result, index);
continue;
};
// Check that there was a orchard note at the subtree's end height.
let Some(tree) = db.orchard_tree_by_height(&subtree.end) else {
error!(?subtree.end, "missing note commitment tree at subtree completion height");
is_valid = false;
result = Err("missing note commitment tree at subtree completion height");
error!(?result, ?subtree.end);
continue;
};
// Check the index and root if the orchard note commitment tree at this height is a complete subtree.
if let Some((index, node)) = tree.completed_subtree_index_and_root() {
if subtree.index != index {
error!("completed subtree indexes should match");
is_valid = false;
result = Err("completed subtree indexes should match");
error!(?result);
}
if subtree.node != node {
error!("completed subtree roots should match");
is_valid = false;
result = Err("completed subtree roots should match");
error!(?result);
}
}
// Check that the final note has a greater subtree index if it didn't complete a subtree.
else {
let Some(prev_tree) = db.orchard_tree_by_height(&subtree.end.previous()) else {
error!(?subtree.end, "missing note commitment tree at subtree completion height");
is_valid = false;
result = Err("missing note commitment tree below subtree completion height");
error!(?result, ?subtree.end);
continue;
};
let prev_subtree_index = prev_tree.subtree_index();
let subtree_index = tree.subtree_index();
if subtree_index <= prev_subtree_index {
error!(
?subtree_index,
?prev_subtree_index,
"note commitment tree at end height should have incremented subtree index"
);
is_valid = false;
result =
Err("note commitment tree at end height should have incremented subtree index");
error!(?result, ?subtree_index, ?prev_subtree_index,);
}
}
}
@ -374,7 +486,7 @@ fn check_orchard_subtrees(db: &ZebraDb) -> bool {
.orchard_tree_by_height_range(..)
// Exclude empty orchard tree and add subtree indexes
.filter_map(|(height, tree)| Some((tree.subtree_index()?, height, tree)))
// Exclude heights that don't complete a subtree and count completed subtree
// Exclude heights that don't complete a subtree and count completed subtrees
.filter_map(|(subtree_index, height, tree)| {
if tree.is_complete_subtree() || subtree_index.0 > subtree_count {
let subtree_index = subtree_count;
@ -387,46 +499,294 @@ fn check_orchard_subtrees(db: &ZebraDb) -> bool {
{
// Check that there's an entry for every completed orchard subtree root in all orchard trees
let Some(subtree) = db.orchard_subtree_by_index(index) else {
error!(?index, "missing subtree");
is_valid = false;
result = Err("missing subtree");
error!(?result, index);
continue;
};
// Check that the subtree end height matches that in the orchard trees.
if subtree.end != height {
let is_complete = tree.is_complete_subtree();
error!(?subtree.end, ?height, ?index, ?is_complete, "bad orchard subtree end height");
is_valid = false;
result = Err("bad orchard subtree end height");
error!(?result, ?subtree.end, ?height, ?index, ?is_complete, );
}
// Check the root if the orchard note commitment tree at this height is a complete subtree.
if let Some((_index, node)) = tree.completed_subtree_index_and_root() {
if subtree.node != node {
error!("completed subtree roots should match");
is_valid = false;
result = Err("completed subtree roots should match");
error!(?result);
}
}
}
if !is_valid {
if result.is_err() {
error!(
?result,
?subtree_count,
first_incomplete_subtree_index, "missing or bad orchard subtrees"
first_incomplete_subtree_index,
"missing or bad orchard subtrees"
);
}
is_valid
result
}
/// Calculates a note commitment subtree for Sapling, reading blocks from `read_db` if needed.
///
/// The subtree must be completed by a note commitment in the block at `end_height`.
/// `tree` is the tree for that block, and `prev_tree` is the tree for the previous block.
///
/// `prev_tree` is only used to rebuild the subtree if it was completed without using the last
/// note commitment in the block at `end_height`.
///
/// # Panics
///
/// If a subtree is not completed by a note commitment in the block at `end_height`.
#[must_use = "subtree should be written to the database after it is calculated"]
#[instrument(skip(read_db, prev_tree, tree))]
fn calculate_sapling_subtree(
read_db: &ZebraDb,
prev_end_height: Height,
prev_tree: Arc<sapling::tree::NoteCommitmentTree>,
end_height: Height,
tree: Arc<sapling::tree::NoteCommitmentTree>,
) -> NoteCommitmentSubtree<sapling::tree::Node> {
// If a subtree is completed by a note commitment in the block at `end_height`,
// then that subtree can be completed in two different ways:
if let Some((index, node)) = tree.completed_subtree_index_and_root() {
// If the subtree is completed by the last note commitment in that block,
// we already have that subtree root available in the tree.
NoteCommitmentSubtree::new(index, end_height, node)
} else {
// If the subtree is completed without using the last note commitment in the block,
// we need to calculate the subtree root, starting with the tree from the previous block.
// TODO: move the assertion/panic log string formatting into a separate function?
let prev_position = prev_tree.position().unwrap_or_else(|| {
panic!(
"previous block must have a partial subtree:\n\
previous subtree:\n\
height: {prev_end_height:?}\n\
current subtree:\n\
height: {end_height:?}"
)
});
let prev_index = prev_tree
.subtree_index()
.expect("previous block must have a partial subtree");
let prev_remaining_notes = prev_tree.remaining_subtree_leaf_nodes();
let current_position = tree.position().unwrap_or_else(|| {
panic!(
"current block must have a subtree:\n\
previous subtree:\n\
height: {prev_end_height:?}\n\
index: {prev_index}\n\
position: {prev_position}\n\
remaining: {prev_remaining_notes}\n\
current subtree:\n\
height: {end_height:?}"
)
});
let current_index = tree
.subtree_index()
.expect("current block must have a subtree");
let current_remaining_notes = tree.remaining_subtree_leaf_nodes();
assert_eq!(
prev_index.0 + 1,
current_index.0,
"subtree must have been completed by the current block:\n\
previous subtree:\n\
height: {prev_end_height:?}\n\
index: {prev_index}\n\
position: {prev_position}\n\
remaining: {prev_remaining_notes}\n\
current subtree:\n\
height: {end_height:?}\n\
index: {current_index}\n\
position: {current_position}\n\
remaining: {current_remaining_notes}"
);
// Get the missing notes needed to complete the subtree.
//
// TODO: consider just reading the block's transactions from the database file,
// because we don't use the block header data at all.
let block = read_db
.block(end_height.into())
.expect("height with note commitment tree should have block");
let sapling_note_commitments = block
.sapling_note_commitments()
.take(prev_remaining_notes)
.cloned()
.collect();
// This takes less than 1 second per tree, so we don't need to make it cancellable.
let (sapling_nct, subtree) = NoteCommitmentTrees::update_sapling_note_commitment_tree(
prev_tree,
sapling_note_commitments,
)
.expect("finalized notes should append successfully");
let (index, node) = subtree.unwrap_or_else(|| {
panic!(
"already checked that the block completed a subtree:\n\
updated subtree:\n\
index: {:?}\n\
position: {:?}\n\
remaining notes: {}\n\
original previous subtree:\n\
height: {prev_end_height:?}\n\
index: {prev_index}\n\
position: {prev_position}\n\
remaining: {prev_remaining_notes}\n\
original current subtree:\n\
height: {end_height:?}\n\
index: {current_index}\n\
position: {current_position}\n\
remaining: {current_remaining_notes}",
sapling_nct.subtree_index(),
sapling_nct.position(),
sapling_nct.remaining_subtree_leaf_nodes(),
)
});
NoteCommitmentSubtree::new(index, end_height, node)
}
}
/// Calculates a note commitment subtree for Orchard, reading blocks from `read_db` if needed.
///
/// The subtree must be completed by a note commitment in the block at `end_height`.
/// `tree` is the tree for that block, and `prev_tree` is the tree for the previous block.
///
/// `prev_tree` is only used to rebuild the subtree if it was completed without using the last
/// note commitment in the block at `end_height`.
///
/// # Panics
///
/// If a subtree is not completed by a note commitment in the block at `end_height`.
#[must_use = "subtree should be written to the database after it is calculated"]
#[instrument(skip(read_db, prev_tree, tree))]
fn calculate_orchard_subtree(
read_db: &ZebraDb,
prev_end_height: Height,
prev_tree: Arc<orchard::tree::NoteCommitmentTree>,
end_height: Height,
tree: Arc<orchard::tree::NoteCommitmentTree>,
) -> NoteCommitmentSubtree<orchard::tree::Node> {
// If a subtree is completed by a note commitment in the block at `end_height`,
// then that subtree can be completed in two different ways:
if let Some((index, node)) = tree.completed_subtree_index_and_root() {
// If the subtree is completed by the last note commitment in that block,
// we already have that subtree root available in the tree.
NoteCommitmentSubtree::new(index, end_height, node)
} else {
// If the subtree is completed without using the last note commitment in the block,
// we need to calculate the subtree root, starting with the tree from the previous block.
// TODO: move the assertion/panic log string formatting into a separate function?
let prev_position = prev_tree.position().unwrap_or_else(|| {
panic!(
"previous block must have a partial subtree:\n\
previous subtree:\n\
height: {prev_end_height:?}\n\
current subtree:\n\
height: {end_height:?}"
)
});
let prev_index = prev_tree
.subtree_index()
.expect("previous block must have a partial subtree");
let prev_remaining_notes = prev_tree.remaining_subtree_leaf_nodes();
let current_position = tree.position().unwrap_or_else(|| {
panic!(
"current block must have a subtree:\n\
previous subtree:\n\
height: {prev_end_height:?}\n\
index: {prev_index}\n\
position: {prev_position}\n\
remaining: {prev_remaining_notes}\n\
current subtree:\n\
height: {end_height:?}"
)
});
let current_index = tree
.subtree_index()
.expect("current block must have a subtree");
let current_remaining_notes = tree.remaining_subtree_leaf_nodes();
assert_eq!(
prev_index.0 + 1,
current_index.0,
"subtree must have been completed by the current block:\n\
previous subtree:\n\
height: {prev_end_height:?}\n\
index: {prev_index}\n\
position: {prev_position}\n\
remaining: {prev_remaining_notes}\n\
current subtree:\n\
height: {end_height:?}\n\
index: {current_index}\n\
position: {current_position}\n\
remaining: {current_remaining_notes}"
);
// Get the missing notes needed to complete the subtree.
//
// TODO: consider just reading the block's transactions from the database file,
// because we don't use the block header data at all.
let block = read_db
.block(end_height.into())
.expect("height with note commitment tree should have block");
let orchard_note_commitments = block
.orchard_note_commitments()
.take(prev_remaining_notes)
.cloned()
.collect();
// This takes less than 1 second per tree, so we don't need to make it cancellable.
let (orchard_nct, subtree) = NoteCommitmentTrees::update_orchard_note_commitment_tree(
prev_tree,
orchard_note_commitments,
)
.expect("finalized notes should append successfully");
let (index, node) = subtree.unwrap_or_else(|| {
panic!(
"already checked that the block completed a subtree:\n\
updated subtree:\n\
index: {:?}\n\
position: {:?}\n\
remaining notes: {}\n\
original previous subtree:\n\
height: {prev_end_height:?}\n\
index: {prev_index}\n\
position: {prev_position}\n\
remaining: {prev_remaining_notes}\n\
original current subtree:\n\
height: {end_height:?}\n\
index: {current_index}\n\
position: {current_position}\n\
remaining: {current_remaining_notes}",
orchard_nct.subtree_index(),
orchard_nct.position(),
orchard_nct.remaining_subtree_leaf_nodes(),
)
});
NoteCommitmentSubtree::new(index, end_height, node)
}
}
/// Writes a Sapling note commitment subtree to `upgrade_db`.
fn write_sapling_subtree(
upgrade_db: &ZebraDb,
index: NoteCommitmentSubtreeIndex,
height: Height,
node: sapling::tree::Node,
subtree: NoteCommitmentSubtree<sapling::tree::Node>,
) {
let subtree = NoteCommitmentSubtree::new(index, height, node);
let mut batch = DiskWriteBatch::new();
batch.insert_sapling_subtree(upgrade_db, &subtree);
@ -435,22 +795,18 @@ fn write_sapling_subtree(
.write_batch(batch)
.expect("writing sapling note commitment subtrees should always succeed.");
if index.0 % 100 == 0 {
info!(?height, index = ?index.0, "calculated and added sapling subtree");
if subtree.index.0 % 100 == 0 {
info!(end_height = ?subtree.end, index = ?subtree.index.0, "calculated and added sapling subtree");
}
// This log happens about once per second on recent machines with SSD disks.
debug!(?height, index = ?index.0, ?node, "calculated and added sapling subtree");
debug!(end_height = ?subtree.end, index = ?subtree.index.0, "calculated and added sapling subtree");
}
/// Writes a Orchard note commitment subtree to `upgrade_db`.
/// Writes an Orchard note commitment subtree to `upgrade_db`.
fn write_orchard_subtree(
upgrade_db: &ZebraDb,
index: NoteCommitmentSubtreeIndex,
height: Height,
node: orchard::tree::Node,
subtree: NoteCommitmentSubtree<orchard::tree::Node>,
) {
let subtree = NoteCommitmentSubtree::new(index, height, node);
let mut batch = DiskWriteBatch::new();
batch.insert_orchard_subtree(upgrade_db, &subtree);
@ -459,9 +815,9 @@ fn write_orchard_subtree(
.write_batch(batch)
.expect("writing orchard note commitment subtrees should always succeed.");
if index.0 % 300 == 0 {
info!(?height, index = ?index.0, "calculated and added orchard subtree");
if subtree.index.0 % 100 == 0 {
info!(end_height = ?subtree.end, index = ?subtree.index.0, "calculated and added orchard subtree");
}
// This log happens about 3 times per second on recent machines with SSD disks.
debug!(?height, index = ?index.0, ?node, "calculated and added orchard subtree");
// This log happens about once per second on recent machines with SSD disks.
debug!(end_height = ?subtree.end, index = ?subtree.index.0, "calculated and added orchard subtree");
}

View File

@ -118,13 +118,22 @@ impl ZebraDb {
// If we're re-opening a previously upgraded or newly created database,
// the database format should be valid.
// (There's no format change here, so the format change checks won't run.)
//
// Do the quick checks first, then the slower checks.
upgrade::add_subtrees::quick_check(&db);
DbFormatChange::check_for_duplicate_trees(db.clone());
upgrade::add_subtrees::check(&db.clone());
upgrade::add_subtrees::check(&db);
}
db
}
/// Returns the configured network for this database.
pub fn network(&self) -> Network {
self.db.network()
}
/// Returns the `Path` where the files used by this database are located.
pub fn path(&self) -> &Path {
self.db.path()

View File

@ -167,7 +167,7 @@ impl ZebraDb {
Some(Arc::new(tree))
}
/// Returns the Sapling note commitment trees in the supplied range.
/// Returns the Sapling note commitment trees in the supplied range, in increasing height order.
#[allow(clippy::unwrap_in_result)]
pub fn sapling_tree_by_height_range<R>(
&self,
@ -300,7 +300,7 @@ impl ZebraDb {
Some(Arc::new(tree))
}
/// Returns the Orchard note commitment trees in the supplied range.
/// Returns the Orchard note commitment trees in the supplied range, in increasing height order.
#[allow(clippy::unwrap_in_result)]
pub fn orchard_tree_by_height_range<R>(
&self,
@ -577,6 +577,23 @@ impl DiskWriteBatch {
self.zs_delete_range(&sapling_tree_cf, from, to);
}
/// Deletes the range of Sapling subtrees at the given [`NoteCommitmentSubtreeIndex`]es.
/// Doesn't delete the upper bound.
pub fn delete_range_sapling_subtree(
&mut self,
zebra_db: &ZebraDb,
from: NoteCommitmentSubtreeIndex,
to: NoteCommitmentSubtreeIndex,
) {
let sapling_subtree_cf = zebra_db
.db
.cf_handle("sapling_note_commitment_subtree")
.unwrap();
// TODO: convert zs_delete_range() to take std::ops::RangeBounds
self.zs_delete_range(&sapling_subtree_cf, from, to);
}
// Orchard tree methods
/// Inserts the Orchard note commitment subtree.
@ -612,4 +629,21 @@ impl DiskWriteBatch {
// TODO: convert zs_delete_range() to take std::ops::RangeBounds
self.zs_delete_range(&orchard_tree_cf, from, to);
}
/// Deletes the range of Orchard subtrees at the given [`NoteCommitmentSubtreeIndex`]es.
/// Doesn't delete the upper bound.
pub fn delete_range_orchard_subtree(
&mut self,
zebra_db: &ZebraDb,
from: NoteCommitmentSubtreeIndex,
to: NoteCommitmentSubtreeIndex,
) {
let orchard_subtree_cf = zebra_db
.db
.cf_handle("orchard_note_commitment_subtree")
.unwrap();
// TODO: convert zs_delete_range() to take std::ops::RangeBounds
self.zs_delete_range(&orchard_subtree_cf, from, to);
}
}

View File

@ -184,6 +184,8 @@ use common::{
test_type::TestType::{self, *},
};
use crate::common::cached_state::{wait_for_state_version_message, wait_for_state_version_upgrade};
/// The maximum amount of time that we allow the creation of a future to block the `tokio` executor.
///
/// This should be larger than the amount of time between thread time slices on a busy test VM.
@ -1780,6 +1782,9 @@ fn lightwalletd_integration_test(test_type: TestType) -> Result<()> {
return Ok(());
};
// Store the state version message so we can wait for the upgrade later if needed.
let state_version_message = wait_for_state_version_message(&mut zebrad)?;
if test_type.needs_zebra_cached_state() {
zebrad
.expect_stdout_line_matches(r"loaded Zebra state cache .*tip.*=.*Height\([0-9]{7}\)")?;
@ -1875,6 +1880,7 @@ fn lightwalletd_integration_test(test_type: TestType) -> Result<()> {
None
};
// Wait for zebrad and lightwalletd to sync, if needed.
let (mut zebrad, lightwalletd) = if test_type.needs_zebra_cached_state() {
if let Some((lightwalletd, lightwalletd_rpc_port)) = lightwalletd_and_port {
#[cfg(feature = "lightwalletd-grpc-tests")]
@ -1886,7 +1892,7 @@ fn lightwalletd_integration_test(test_type: TestType) -> Result<()> {
"waiting for zebrad and lightwalletd to sync...",
);
let (lightwalletd, zebrad) = wait_for_zebrad_and_lightwalletd_sync(
let (lightwalletd, mut zebrad) = wait_for_zebrad_and_lightwalletd_sync(
lightwalletd,
lightwalletd_rpc_port,
zebrad,
@ -1897,6 +1903,18 @@ fn lightwalletd_integration_test(test_type: TestType) -> Result<()> {
use_internet_connection,
)?;
// Before we write a cached state image, wait for a database upgrade.
//
// TODO: this line will hang if the state upgrade finishes before zebra is synced.
// But that is unlikely with the 25.2 upgrade, because it takes 20+ minutes.
// If it happens for a later upgrade, this code can be moved earlier in the test,
// as long as all the cached states are version 25.2.2 or later.
wait_for_state_version_upgrade(
&mut zebrad,
&state_version_message,
database_format_version_in_code(),
)?;
(zebrad, Some(lightwalletd))
}
@ -1912,6 +1930,18 @@ fn lightwalletd_integration_test(test_type: TestType) -> Result<()> {
tracing::info!(?test_type, "waiting for zebrad to sync to the tip");
zebrad.expect_stdout_line_matches(SYNC_FINISHED_REGEX)?;
// Before we write a cached state image, wait for a database upgrade.
//
// TODO: this line will hang if the state upgrade finishes before zebra is synced.
// But that is unlikely with the 25.2 upgrade, because it takes 20+ minutes.
// If it happens for a later upgrade, this code can be moved earlier in the test,
// as long as all the cached states are version 25.2.2 or later.
wait_for_state_version_upgrade(
&mut zebrad,
&state_version_message,
database_format_version_in_code(),
)?;
(zebrad, None)
}
} else {

View File

@ -11,6 +11,7 @@ use std::{
};
use color_eyre::eyre::{eyre, Result};
use semver::Version;
use tower::{util::BoxService, Service};
use zebra_chain::{
@ -21,6 +22,7 @@ use zebra_chain::{
};
use zebra_node_services::rpc_client::RpcRequestClient;
use zebra_state::{ChainTipChange, LatestChainTip, MAX_BLOCK_REORG_HEIGHT};
use zebra_test::command::TestChild;
use crate::common::{
launch::spawn_zebrad_for_rpc,
@ -35,6 +37,59 @@ pub const ZEBRA_CACHED_STATE_DIR: &str = "ZEBRA_CACHED_STATE_DIR";
pub type BoxStateService =
BoxService<zebra_state::Request, zebra_state::Response, zebra_state::BoxError>;
/// Waits for the startup logs generated by the cached state version checks.
/// Returns the state version log message.
///
/// This function should be called immediately after launching `zebrad`.
#[tracing::instrument(skip(zebrad))]
pub fn wait_for_state_version_message<T>(zebrad: &mut TestChild<T>) -> Result<String> {
tracing::info!(
zebrad = ?zebrad.cmd,
"launched zebrad, waiting for zebrad to open the state database..."
);
// Zebra logs one of these lines on startup, depending on the disk and running formats.
zebrad.expect_stdout_line_matches(
"(creating new database with the current format)|\
(trying to open older database format)|\
(trying to open newer database format)|\
(trying to open current database format)",
)
}
/// Waits for the `required_version` state upgrade to complete, if needed.
///
/// This function should be called with the output of [`wait_for_state_version_message()`].
#[tracing::instrument(skip(zebrad))]
pub fn wait_for_state_version_upgrade<T>(
zebrad: &mut TestChild<T>,
state_version_message: &str,
required_version: Version,
) -> Result<()> {
if state_version_message.contains("launching upgrade task") {
tracing::info!(
zebrad = ?zebrad.cmd,
%state_version_message,
%required_version,
"waiting for zebrad state upgrade..."
);
let upgrade_message = zebrad.expect_stdout_line_matches(&format!(
"marked database format as upgraded.*format_upgrade_version.*=.*{required_version}"
))?;
tracing::info!(
zebrad = ?zebrad.cmd,
%state_version_message,
%required_version,
%upgrade_message,
"zebrad state has been upgraded"
);
}
Ok(())
}
/// Starts a state service using the provided `cache_dir` as the directory with the chain state.
#[tracing::instrument(skip(cache_dir))]
pub async fn start_state_service_with_cache_dir(

View File

@ -15,11 +15,12 @@ use tempfile::TempDir;
use zebra_chain::{
block::{Height, HeightDiff, TryIntoHeight},
parameters::Network,
parameters::Network::{self, *},
transparent::MIN_TRANSPARENT_COINBASE_MATURITY,
};
use zebra_consensus::MAX_CHECKPOINT_HEIGHT_GAP;
use zebra_node_services::rpc_client::RpcRequestClient;
use zebra_state::database_format_version_in_code;
use zebra_test::{
args,
command::{Arguments, TestDirExt, NO_MATCHES_REGEX_ITER},
@ -27,6 +28,7 @@ use zebra_test::{
};
use crate::common::{
cached_state::{wait_for_state_version_message, wait_for_state_version_upgrade},
launch::spawn_zebrad_for_rpc,
sync::{CHECKPOINT_VERIFIER_REGEX, SYNC_FINISHED_REGEX},
test_type::TestType::*,
@ -77,6 +79,23 @@ pub async fn run(network: Network) -> Result<()> {
return Ok(());
};
// Wait for the upgrade if needed.
// Currently we only write an image for testnet, which is quick.
// (Mainnet would need to wait at the end of this function, if the upgrade is long.)
if network == Testnet {
let state_version_message = wait_for_state_version_message(&mut zebrad)?;
// Before we write a cached state image, wait for a database upgrade.
//
// TODO: this line will hang if the state upgrade is slower than the RPC server spawn.
// But that is unlikely, because both 25.1 and 25.2 are quick on testnet.
wait_for_state_version_upgrade(
&mut zebrad,
&state_version_message,
database_format_version_in_code(),
)?;
}
let zebra_rpc_address = zebra_rpc_address.expect("zebra_checkpoints test must have RPC port");
tracing::info!(

View File

@ -43,8 +43,10 @@ use zebra_chain::{
parameters::NetworkUpgrade::{Nu5, Sapling},
serialization::ZcashDeserializeInto,
};
use zebra_state::latest_version_for_adding_subtrees;
use crate::common::{
cached_state::{wait_for_state_version_message, wait_for_state_version_upgrade},
launch::spawn_zebrad_for_rpc,
lightwalletd::{
can_spawn_lightwalletd_for_rpc, spawn_lightwalletd_for_rpc,
@ -97,6 +99,9 @@ pub async fn run() -> Result<()> {
let zebra_rpc_address = zebra_rpc_address.expect("lightwalletd test must have RPC port");
// Store the state version message so we can wait for the upgrade later if needed.
let state_version_message = wait_for_state_version_message(&mut zebrad)?;
tracing::info!(
?test_type,
?zebra_rpc_address,
@ -119,7 +124,7 @@ pub async fn run() -> Result<()> {
"spawned lightwalletd connected to zebrad, waiting for them both to sync...",
);
let (_lightwalletd, _zebrad) = wait_for_zebrad_and_lightwalletd_sync(
let (_lightwalletd, mut zebrad) = wait_for_zebrad_and_lightwalletd_sync(
lightwalletd,
lightwalletd_rpc_port,
zebrad,
@ -339,7 +344,59 @@ pub async fn run() -> Result<()> {
*zebra_test::vectors::SAPLING_TREESTATE_MAINNET_419201_STRING
);
// Call `z_getsubtreesbyindex` separately for
// Call `GetAddressUtxos` with the ZF funding stream address that will always have utxos
let utxos = rpc_client
.get_address_utxos(GetAddressUtxosArg {
addresses: vec!["t3dvVE3SQEi7kqNzwrfNePxZ1d4hUyztBA1".to_string()],
start_height: 1,
max_entries: 1,
})
.await?
.into_inner();
// As we requested one entry we should get a response of length 1
assert_eq!(utxos.address_utxos.len(), 1);
// Call `GetAddressUtxosStream` with the ZF funding stream address that will always have utxos
let mut utxos_zf = rpc_client
.get_address_utxos_stream(GetAddressUtxosArg {
addresses: vec!["t3dvVE3SQEi7kqNzwrfNePxZ1d4hUyztBA1".to_string()],
start_height: 1,
max_entries: 2,
})
.await?
.into_inner();
let mut counter = 0;
while let Some(_utxos) = utxos_zf.message().await? {
counter += 1;
}
// As we are in a "in sync" chain we know there are more than 2 utxos for this address
// but we will receive the max of 2 from the stream response because we used a limit of 2 `max_entries`.
assert_eq!(2, counter);
// Call `GetLightdInfo`
let lightd_info = rpc_client.get_lightd_info(Empty {}).await?.into_inner();
// Make sure the subversion field is zebra the user agent
assert_eq!(
lightd_info.zcashd_subversion,
zebrad::application::user_agent()
);
// Before we call `z_getsubtreesbyindex`, we might need to wait for a database upgrade.
//
// TODO: this line will hang if the state upgrade finishes before the subtree tests start.
// But that is unlikely with the 25.2 upgrade, because it takes 20+ minutes.
// If it happens for a later upgrade, this code can be moved earlier in the test,
// as long as all the cached states are version 25.2.2 or later.
wait_for_state_version_upgrade(
&mut zebrad,
&state_version_message,
latest_version_for_adding_subtrees(),
)?;
// Call `z_getsubtreesbyindex` separately for...
// ... Sapling.
let mut subtrees = rpc_client
@ -411,45 +468,5 @@ pub async fn run() -> Result<()> {
}
assert_eq!(counter, 2);
// Call `GetAddressUtxos` with the ZF funding stream address that will always have utxos
let utxos = rpc_client
.get_address_utxos(GetAddressUtxosArg {
addresses: vec!["t3dvVE3SQEi7kqNzwrfNePxZ1d4hUyztBA1".to_string()],
start_height: 1,
max_entries: 1,
})
.await?
.into_inner();
// As we requested one entry we should get a response of length 1
assert_eq!(utxos.address_utxos.len(), 1);
// Call `GetAddressUtxosStream` with the ZF funding stream address that will always have utxos
let mut utxos_zf = rpc_client
.get_address_utxos_stream(GetAddressUtxosArg {
addresses: vec!["t3dvVE3SQEi7kqNzwrfNePxZ1d4hUyztBA1".to_string()],
start_height: 1,
max_entries: 2,
})
.await?
.into_inner();
let mut counter = 0;
while let Some(_utxos) = utxos_zf.message().await? {
counter += 1;
}
// As we are in a "in sync" chain we know there are more than 2 utxos for this address
// but we will receive the max of 2 from the stream response because we used a limit of 2 `max_entries`.
assert_eq!(2, counter);
// Call `GetLightdInfo`
let lightd_info = rpc_client.get_lightd_info(Empty {}).await?.into_inner();
// Make sure the subversion field is zebra the user agent
assert_eq!(
lightd_info.zcashd_subversion,
zebrad::application::user_agent()
);
Ok(())
}