Merge pull request #1262 from nuttycom/sqlite_wallet/fix_multi_pool_checkpointing

zcash_client_sqlite: Ensure that Orchard and Sapling checkpoints are always available for the same block heights.
This commit is contained in:
Kris Nuttycombe 2024-03-12 19:01:17 -06:00 committed by GitHub
commit b3d06ba419
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 504 additions and 162 deletions

7
Cargo.lock generated
View File

@ -1087,8 +1087,7 @@ dependencies = [
[[package]]
name = "incrementalmerkletree"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "361c467824d4d9d4f284be4b2608800839419dccc4d4608f28345237fe354623"
source = "git+https://github.com/nuttycom/incrementalmerkletree?rev=fa147c89c6c98a03bba745538f4e68d4eaed5146#fa147c89c6c98a03bba745538f4e68d4eaed5146"
dependencies = [
"either",
"proptest",
@ -2245,8 +2244,7 @@ dependencies = [
[[package]]
name = "shardtree"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dbf20c7a2747d9083092e3a3eeb9a7ed75577ae364896bebbc5e0bdcd4e97735"
source = "git+https://github.com/nuttycom/incrementalmerkletree?rev=fa147c89c6c98a03bba745538f4e68d4eaed5146#fa147c89c6c98a03bba745538f4e68d4eaed5146"
dependencies = [
"assert_matches",
"bitflags 2.4.1",
@ -3055,6 +3053,7 @@ name = "zcash_client_sqlite"
version = "0.9.1"
dependencies = [
"assert_matches",
"bls12_381",
"bs58",
"byteorder",
"document-features",

View File

@ -123,3 +123,5 @@ codegen-units = 1
[patch.crates-io]
orchard = { git = "https://github.com/zcash/orchard", rev = "e74879dd0ad0918f4ffe0826e03905cd819981bd" }
incrementalmerkletree = { git = "https://github.com/nuttycom/incrementalmerkletree", rev = "fa147c89c6c98a03bba745538f4e68d4eaed5146" }
shardtree = { git = "https://github.com/nuttycom/incrementalmerkletree", rev = "fa147c89c6c98a03bba745538f4e68d4eaed5146" }

View File

@ -68,7 +68,10 @@ use secrecy::SecretVec;
use shardtree::{error::ShardTreeError, store::ShardStore, ShardTree};
use zcash_keys::keys::HdSeedFingerprint;
use self::{chain::CommitmentTreeRoot, scanning::ScanRange};
use self::{
chain::{ChainState, CommitmentTreeRoot},
scanning::ScanRange,
};
use crate::{
address::UnifiedAddress,
decrypt::DecryptedOutput,
@ -1300,9 +1303,15 @@ pub trait WalletWrite: WalletRead {
/// along with the note commitments that were detected when scanning the block for transactions
/// pertaining to this wallet.
///
/// `blocks` must be sequential, in order of increasing block height
fn put_blocks(&mut self, blocks: Vec<ScannedBlock<Self::AccountId>>)
-> Result<(), Self::Error>;
/// ### Arguments
/// - `from_state` must be the chain state for the block height prior to the first
/// block in `blocks`.
/// - `blocks` must be sequential, in order of increasing block height.
fn put_blocks(
&mut self,
from_state: &ChainState,
blocks: Vec<ScannedBlock<Self::AccountId>>,
) -> Result<(), Self::Error>;
/// Updates the wallet's view of the blockchain.
///
@ -1442,9 +1451,11 @@ pub mod testing {
};
use super::{
chain::CommitmentTreeRoot, scanning::ScanRange, AccountBirthday, BlockMetadata,
DecryptedTransaction, InputSource, NullifierQuery, ScannedBlock, SentTransaction,
WalletCommitmentTrees, WalletRead, WalletSummary, WalletWrite, SAPLING_SHARD_HEIGHT,
chain::{ChainState, CommitmentTreeRoot},
scanning::ScanRange,
AccountBirthday, BlockMetadata, DecryptedTransaction, InputSource, NullifierQuery,
ScannedBlock, SentTransaction, WalletCommitmentTrees, WalletRead, WalletSummary,
WalletWrite, SAPLING_SHARD_HEIGHT,
};
#[cfg(feature = "transparent-inputs")]
@ -1684,6 +1695,7 @@ pub mod testing {
#[allow(clippy::type_complexity)]
fn put_blocks(
&mut self,
_from_state: &ChainState,
_blocks: Vec<ScannedBlock<Self::AccountId>>,
) -> Result<(), Self::Error> {
Ok(())

View File

@ -58,9 +58,12 @@
//! // the first element of the vector of suggested ranges.
//! match scan_ranges.first() {
//! Some(scan_range) if scan_range.priority() == ScanPriority::Verify => {
//! // Download the chain state for the block prior to the start of the range you want
//! // to scan.
//! let chain_state = unimplemented!("get_chain_state(scan_range.block_range().start - 1)?;");
//! // Download the blocks in `scan_range` into the block source, overwriting any
//! // existing blocks in this range.
//! unimplemented!();
//! unimplemented!("cache_blocks(scan_range)?;");
//!
//! // Scan the downloaded blocks
//! let scan_result = scan_cached_blocks(
@ -68,6 +71,7 @@
//! &block_source,
//! &mut wallet_db,
//! scan_range.block_range().start,
//! chain_state,
//! scan_range.len()
//! );
//!
@ -118,6 +122,9 @@
//! // encountered, this process should be repeated starting at step (3).
//! let scan_ranges = wallet_db.suggest_scan_ranges().map_err(Error::Wallet)?;
//! for scan_range in scan_ranges {
//! // Download the chain state for the block prior to the start of the range you want
//! // to scan.
//! let chain_state = unimplemented!("get_chain_state(scan_range.block_range().start - 1)?;");
//! // Download the blocks in `scan_range` into the block source. While in this example this
//! // step is performed in-line, it's fine for the download of scan ranges to be asynchronous
//! // and for the scanner to process the downloaded ranges as they become available in a
@ -125,7 +132,7 @@
//! // appropriate, and for ranges with priority `Historic` it can be useful to download and
//! // scan the range in reverse order (to discover more recent unspent notes sooner), or from
//! // the start and end of the range inwards.
//! unimplemented!();
//! unimplemented!("cache_blocks(scan_range)?;");
//!
//! // Scan the downloaded blocks.
//! let scan_result = scan_cached_blocks(
@ -133,6 +140,7 @@
//! &block_source,
//! &mut wallet_db,
//! scan_range.block_range().start,
//! chain_state,
//! scan_range.len()
//! )?;
//!
@ -145,6 +153,7 @@
use std::ops::Range;
use incrementalmerkletree::frontier::Frontier;
use subtle::ConditionallySelectable;
use zcash_primitives::consensus::{self, BlockHeight};
@ -278,12 +287,78 @@ impl ScanSummary {
}
}
/// The final note commitment tree state for each shielded pool, as of a particular block height.
#[derive(Debug, Clone)]
pub struct ChainState {
block_height: BlockHeight,
final_sapling_tree: Frontier<sapling::Node, { sapling::NOTE_COMMITMENT_TREE_DEPTH }>,
#[cfg(feature = "orchard")]
final_orchard_tree:
Frontier<orchard::tree::MerkleHashOrchard, { orchard::NOTE_COMMITMENT_TREE_DEPTH as u8 }>,
}
impl ChainState {
/// Construct a new empty chain state.
pub fn empty(block_height: BlockHeight) -> Self {
Self {
block_height,
final_sapling_tree: Frontier::empty(),
#[cfg(feature = "orchard")]
final_orchard_tree: Frontier::empty(),
}
}
/// Construct a new [`ChainState`] from its constituent parts.
pub fn new(
block_height: BlockHeight,
final_sapling_tree: Frontier<sapling::Node, { sapling::NOTE_COMMITMENT_TREE_DEPTH }>,
#[cfg(feature = "orchard")] final_orchard_tree: Frontier<
orchard::tree::MerkleHashOrchard,
{ orchard::NOTE_COMMITMENT_TREE_DEPTH as u8 },
>,
) -> Self {
Self {
block_height,
final_sapling_tree,
#[cfg(feature = "orchard")]
final_orchard_tree,
}
}
/// Returns the block height to which this chain state applies.
pub fn block_height(&self) -> BlockHeight {
self.block_height
}
/// Returns the frontier of the Sapling note commitment tree as of the end of the block at
/// [`Self::block_height`].
pub fn final_sapling_tree(
&self,
) -> &Frontier<sapling::Node, { sapling::NOTE_COMMITMENT_TREE_DEPTH }> {
&self.final_sapling_tree
}
/// Returns the frontier of the Orchard note commitment tree as of the end of the block at
/// [`Self::block_height`].
#[cfg(feature = "orchard")]
pub fn final_orchard_tree(
&self,
) -> &Frontier<orchard::tree::MerkleHashOrchard, { orchard::NOTE_COMMITMENT_TREE_DEPTH as u8 }>
{
&self.final_orchard_tree
}
}
/// Scans at most `limit` blocks from the provided block source for in order to find transactions
/// received by the accounts tracked in the provided wallet database.
///
/// This function will return after scanning at most `limit` new blocks, to enable the caller to
/// update their UI with scanning progress. Repeatedly calling this function with `from_height ==
/// None` will process sequential ranges of blocks.
///
/// ## Panics
///
/// This method will panic if `from_height != from_state.block_height() + 1`.
#[tracing::instrument(skip(params, block_source, data_db))]
#[allow(clippy::type_complexity)]
pub fn scan_cached_blocks<ParamsT, DbT, BlockSourceT>(
@ -291,6 +366,7 @@ pub fn scan_cached_blocks<ParamsT, DbT, BlockSourceT>(
block_source: &BlockSourceT,
data_db: &mut DbT,
from_height: BlockHeight,
from_state: &ChainState,
limit: usize,
) -> Result<ScanSummary, Error<DbT::Error, BlockSourceT::Error>>
where
@ -299,6 +375,8 @@ where
DbT: WalletWrite,
<DbT as WalletRead>::AccountId: ConditionallySelectable + Default + Send + 'static,
{
assert_eq!(from_height, from_state.block_height + 1);
// Fetch the UnifiedFullViewingKeys we are tracking
let account_ufvks = data_db
.get_unified_full_viewing_keys()
@ -392,7 +470,9 @@ where
},
)?;
data_db.put_blocks(scanned_blocks).map_err(Error::Wallet)?;
data_db
.put_blocks(from_state, scanned_blocks)
.map_err(Error::Wallet)?;
Ok(scan_summary)
}

View File

@ -403,8 +403,9 @@ where
::sapling::builder::BundleType::DEFAULT,
&shielded_inputs
.iter()
.cloned()
.filter_map(|i| {
i.clone().traverse_opt(|wn| match wn {
i.traverse_opt(|wn| match wn {
Note::Sapling(n) => Some(n),
#[cfg(feature = "orchard")]
_ => None,

View File

@ -624,7 +624,7 @@ where
self.orchard.add_outputs(
block_hash,
txid,
|action| OrchardDomain::for_compact_action(action),
OrchardDomain::for_compact_action,
&tx.actions
.iter()
.enumerate()

View File

@ -78,10 +78,12 @@ maybe-rayon.workspace = true
[dev-dependencies]
assert_matches.workspace = true
bls12_381.workspace = true
incrementalmerkletree = { workspace = true, features = ["test-dependencies"] }
pasta_curves.workspace = true
shardtree = { workspace = true, features = ["legacy-api", "test-dependencies"] }
nonempty.workspace = true
orchard = { workspace = true, features = ["test-dependencies"] }
proptest.workspace = true
rand_chacha.workspace = true
rand_core.workspace = true

View File

@ -338,14 +338,16 @@ mod tests {
testing::pool::valid_chain_states::<OrchardPoolTester>()
}
// FIXME: This requires test framework fixes to pass.
#[test]
#[cfg(feature = "orchard")]
fn invalid_chain_cache_disconnected_sapling() {
testing::pool::invalid_chain_cache_disconnected::<SaplingPoolTester>()
}
#[test]
#[cfg(feature = "orchard")]
fn invalid_chain_cache_disconnected_orchard() {
#[cfg(feature = "orchard")]
testing::pool::invalid_chain_cache_disconnected::<OrchardPoolTester>()
}

View File

@ -32,7 +32,7 @@
// Catch documentation errors caused by code changes.
#![deny(rustdoc::broken_intra_doc_links)]
use incrementalmerkletree::Position;
use incrementalmerkletree::{Position, Retention};
use maybe_rayon::{
prelude::{IndexedParallelIterator, ParallelIterator},
slice::ParallelSliceMut,
@ -58,7 +58,7 @@ use zcash_client_backend::{
address::UnifiedAddress,
data_api::{
self,
chain::{BlockSource, CommitmentTreeRoot},
chain::{BlockSource, ChainState, CommitmentTreeRoot},
scanning::{ScanPriority, ScanRange},
AccountBirthday, BlockMetadata, DecryptedTransaction, InputSource, NullifierQuery,
ScannedBlock, SentTransaction, WalletCommitmentTrees, WalletRead, WalletSummary,
@ -75,7 +75,12 @@ use zcash_client_backend::{
use crate::{error::SqliteClientError, wallet::commitment_tree::SqliteShardStore};
#[cfg(feature = "orchard")]
use zcash_client_backend::data_api::ORCHARD_SHARD_HEIGHT;
use {
incrementalmerkletree::frontier::Frontier,
shardtree::store::{Checkpoint, ShardStore},
std::collections::BTreeMap,
zcash_client_backend::data_api::ORCHARD_SHARD_HEIGHT,
};
#[cfg(feature = "transparent-inputs")]
use {
@ -92,7 +97,6 @@ use {
pub mod chain;
pub mod error;
pub mod wallet;
use wallet::{
commitment_tree::{self, put_shard_roots},
@ -544,6 +548,7 @@ impl<P: consensus::Parameters> WalletWrite for WalletDb<rusqlite::Connection, P>
#[allow(clippy::type_complexity)]
fn put_blocks(
&mut self,
from_state: &ChainState,
blocks: Vec<ScannedBlock<Self::AccountId>>,
) -> Result<(), Self::Error> {
struct BlockPositions {
@ -704,62 +709,175 @@ impl<P: consensus::Parameters> WalletWrite for WalletDb<rusqlite::Connection, P>
{
// Create subtrees from the note commitments in parallel.
const CHUNK_SIZE: usize = 1024;
{
let sapling_subtrees = sapling_commitments
.par_chunks_mut(CHUNK_SIZE)
.enumerate()
.filter_map(|(i, chunk)| {
let start =
start_positions.sapling_start_position + (i * CHUNK_SIZE) as u64;
let end = start + chunk.len() as u64;
let sapling_subtrees = sapling_commitments
.par_chunks_mut(CHUNK_SIZE)
.enumerate()
.filter_map(|(i, chunk)| {
let start =
start_positions.sapling_start_position + (i * CHUNK_SIZE) as u64;
let end = start + chunk.len() as u64;
shardtree::LocatedTree::from_iter(
start..end,
SAPLING_SHARD_HEIGHT.into(),
chunk.iter_mut().map(|n| n.take().expect("always Some")),
)
shardtree::LocatedTree::from_iter(
start..end,
SAPLING_SHARD_HEIGHT.into(),
chunk.iter_mut().map(|n| n.take().expect("always Some")),
)
})
.map(|res| (res.subtree, res.checkpoints))
.collect::<Vec<_>>();
#[cfg(feature = "orchard")]
let orchard_subtrees = orchard_commitments
.par_chunks_mut(CHUNK_SIZE)
.enumerate()
.filter_map(|(i, chunk)| {
let start =
start_positions.orchard_start_position + (i * CHUNK_SIZE) as u64;
let end = start + chunk.len() as u64;
shardtree::LocatedTree::from_iter(
start..end,
ORCHARD_SHARD_HEIGHT.into(),
chunk.iter_mut().map(|n| n.take().expect("always Some")),
)
})
.map(|res| (res.subtree, res.checkpoints))
.collect::<Vec<_>>();
// Collect the complete set of Sapling checkpoints
#[cfg(feature = "orchard")]
let sapling_checkpoint_positions: BTreeMap<BlockHeight, Position> =
sapling_subtrees
.iter()
.flat_map(|(_, checkpoints)| checkpoints.iter())
.map(|(k, v)| (*k, *v))
.collect();
#[cfg(feature = "orchard")]
let orchard_checkpoint_positions: BTreeMap<BlockHeight, Position> =
orchard_subtrees
.iter()
.flat_map(|(_, checkpoints)| checkpoints.iter())
.map(|(k, v)| (*k, *v))
.collect();
#[cfg(feature = "orchard")]
fn ensure_checkpoints<
'a,
H,
I: Iterator<Item = &'a BlockHeight>,
const DEPTH: u8,
>(
// An iterator of checkpoints heights for which we wish to ensure that
// checkpoints exists.
checkpoint_heights: I,
// The map of checkpoint positions from which we will draw note commitment tree
// position information for the newly created checkpoints.
existing_checkpoint_positions: &BTreeMap<BlockHeight, Position>,
// The frontier whose position will be used for an inserted checkpoint when
// there is no preceding checkpoint in existing_checkpoint_positions.
state_final_tree: &Frontier<H, DEPTH>,
) -> Vec<(BlockHeight, Checkpoint)> {
checkpoint_heights
.flat_map(|from_checkpoint_height| {
existing_checkpoint_positions
.range::<BlockHeight, _>(..=*from_checkpoint_height)
.last()
.map_or_else(
|| {
Some((
*from_checkpoint_height,
state_final_tree
.value()
.map_or_else(Checkpoint::tree_empty, |t| {
Checkpoint::at_position(t.position())
}),
))
},
|(to_prev_height, position)| {
if *to_prev_height < *from_checkpoint_height {
Some((
*from_checkpoint_height,
Checkpoint::at_position(*position),
))
} else {
// The checkpoint already exists, so we don't need to
// do anything.
None
}
},
)
.into_iter()
})
.map(|res| (res.subtree, res.checkpoints))
.collect::<Vec<_>>();
.collect::<Vec<_>>()
}
// Update the Sapling note commitment tree with all newly read note commitments
let mut sapling_subtrees = sapling_subtrees.into_iter();
wdb.with_sapling_tree_mut::<_, _, Self::Error>(move |sapling_tree| {
for (tree, checkpoints) in &mut sapling_subtrees {
#[cfg(feature = "orchard")]
let missing_sapling_checkpoints = ensure_checkpoints(
orchard_checkpoint_positions.keys(),
&sapling_checkpoint_positions,
from_state.final_sapling_tree(),
);
#[cfg(feature = "orchard")]
let missing_orchard_checkpoints = ensure_checkpoints(
sapling_checkpoint_positions.keys(),
&orchard_checkpoint_positions,
from_state.final_orchard_tree(),
);
// Update the Sapling note commitment tree with all newly read note commitments
{
let mut sapling_subtrees_iter = sapling_subtrees.into_iter();
wdb.with_sapling_tree_mut::<_, _, Self::Error>(|sapling_tree| {
sapling_tree.insert_frontier(
from_state.final_sapling_tree().clone(),
Retention::Checkpoint {
id: from_state.block_height(),
is_marked: false,
},
)?;
for (tree, checkpoints) in &mut sapling_subtrees_iter {
sapling_tree.insert_tree(tree, checkpoints)?;
}
// Ensure we have a Sapling checkpoint for each checkpointed Orchard block height
#[cfg(feature = "orchard")]
for (height, checkpoint) in &missing_sapling_checkpoints {
sapling_tree
.store_mut()
.add_checkpoint(*height, checkpoint.clone())
.map_err(ShardTreeError::Storage)?;
}
Ok(())
})?;
}
// Create subtrees from the note commitments in parallel.
// Update the Orchard note commitment tree with all newly read note commitments
#[cfg(feature = "orchard")]
{
let orchard_subtrees = orchard_commitments
.par_chunks_mut(CHUNK_SIZE)
.enumerate()
.filter_map(|(i, chunk)| {
let start =
start_positions.orchard_start_position + (i * CHUNK_SIZE) as u64;
let end = start + chunk.len() as u64;
shardtree::LocatedTree::from_iter(
start..end,
ORCHARD_SHARD_HEIGHT.into(),
chunk.iter_mut().map(|n| n.take().expect("always Some")),
)
})
.map(|res| (res.subtree, res.checkpoints))
.collect::<Vec<_>>();
// Update the Orchard note commitment tree with all newly read note commitments
let mut orchard_subtrees = orchard_subtrees.into_iter();
wdb.with_orchard_tree_mut::<_, _, Self::Error>(move |orchard_tree| {
wdb.with_orchard_tree_mut::<_, _, Self::Error>(|orchard_tree| {
orchard_tree.insert_frontier(
from_state.final_orchard_tree().clone(),
Retention::Checkpoint {
id: from_state.block_height(),
is_marked: false,
},
)?;
for (tree, checkpoints) in &mut orchard_subtrees {
orchard_tree.insert_tree(tree, checkpoints)?;
}
for (height, checkpoint) in &missing_orchard_checkpoints {
orchard_tree
.store_mut()
.add_checkpoint(*height, checkpoint.clone())
.map_err(ShardTreeError::Storage)?;
}
Ok(())
})?;
}

View File

@ -1,10 +1,11 @@
use std::convert::Infallible;
use std::fmt;
use std::num::NonZeroU32;
use std::{collections::BTreeMap, convert::Infallible};
#[cfg(feature = "unstable")]
use std::fs::File;
use group::ff::Field;
use nonempty::NonEmpty;
use prost::Message;
use rand_chacha::ChaChaRng;
@ -45,6 +46,7 @@ use zcash_client_backend::{
zip321,
};
use zcash_client_backend::{
data_api::chain::ChainState,
fees::{standard, DustOutputPolicy},
ShieldedProtocol,
};
@ -76,8 +78,7 @@ use super::BlockDb;
#[cfg(feature = "orchard")]
use {
group::ff::{Field, PrimeField},
pasta_curves::pallas,
group::ff::PrimeField, orchard::tree::MerkleHashOrchard, pasta_curves::pallas,
zcash_client_backend::proto::compact_formats::CompactOrchardAction,
};
@ -176,7 +177,8 @@ impl<Cache> TestBuilder<Cache> {
TestState {
cache: self.cache,
latest_cached_block: None,
cached_blocks: BTreeMap::new(),
latest_block_height: None,
_data_file: data_file,
db_data,
test_account,
@ -185,9 +187,10 @@ impl<Cache> TestBuilder<Cache> {
}
}
#[derive(Clone, Debug)]
pub(crate) struct CachedBlock {
height: BlockHeight,
hash: BlockHash,
chain_state: ChainState,
sapling_end_size: u32,
orchard_end_size: u32,
}
@ -195,44 +198,87 @@ pub(crate) struct CachedBlock {
impl CachedBlock {
fn none(sapling_activation_height: BlockHeight) -> Self {
Self {
height: sapling_activation_height,
hash: BlockHash([0; 32]),
chain_state: ChainState::empty(sapling_activation_height),
sapling_end_size: 0,
orchard_end_size: 0,
}
}
fn at(
height: BlockHeight,
hash: BlockHash,
sapling_tree_size: u32,
orchard_tree_size: u32,
chain_state: ChainState,
sapling_end_size: u32,
orchard_end_size: u32,
) -> Self {
assert_eq!(
chain_state.final_sapling_tree().tree_size() as u32,
sapling_end_size
);
#[cfg(feature = "orchard")]
assert_eq!(
chain_state.final_orchard_tree().tree_size() as u32,
orchard_end_size
);
Self {
height,
hash,
sapling_end_size: sapling_tree_size,
orchard_end_size: orchard_tree_size,
chain_state,
sapling_end_size,
orchard_end_size,
}
}
fn roll_forward(self, cb: &CompactBlock) -> Self {
assert_eq!(self.height + 1, cb.height());
fn roll_forward(&self, cb: &CompactBlock) -> Self {
assert_eq!(self.chain_state.block_height() + 1, cb.height());
let sapling_final_tree = cb.vtx.iter().flat_map(|tx| tx.outputs.iter()).fold(
self.chain_state.final_sapling_tree().clone(),
|mut acc, c_out| {
acc.append(sapling::Node::from_cmu(&c_out.cmu().unwrap()));
acc
},
);
let sapling_end_size = sapling_final_tree.tree_size() as u32;
#[cfg(feature = "orchard")]
let orchard_final_tree = cb.vtx.iter().flat_map(|tx| tx.actions.iter()).fold(
self.chain_state.final_orchard_tree().clone(),
|mut acc, c_act| {
acc.append(MerkleHashOrchard::from_cmx(&c_act.cmx().unwrap()));
acc
},
);
#[cfg(feature = "orchard")]
let orchard_end_size = orchard_final_tree.tree_size() as u32;
#[cfg(not(feature = "orchard"))]
let orchard_end_size = cb.vtx.iter().fold(self.orchard_end_size, |sz, tx| {
sz + (tx.actions.len() as u32)
});
Self {
height: cb.height(),
hash: cb.hash(),
sapling_end_size: self.sapling_end_size
+ cb.vtx.iter().map(|tx| tx.outputs.len() as u32).sum::<u32>(),
orchard_end_size: self.orchard_end_size
+ cb.vtx.iter().map(|tx| tx.actions.len() as u32).sum::<u32>(),
chain_state: ChainState::new(
cb.height(),
sapling_final_tree,
#[cfg(feature = "orchard")]
orchard_final_tree,
),
sapling_end_size,
orchard_end_size,
}
}
fn height(&self) -> BlockHeight {
self.chain_state.block_height()
}
}
/// The state for a `zcash_client_sqlite` test.
pub(crate) struct TestState<Cache> {
cache: Cache,
latest_cached_block: Option<CachedBlock>,
cached_blocks: BTreeMap<BlockHeight, CachedBlock>,
latest_block_height: Option<BlockHeight>,
_data_file: NamedTempFile,
db_data: WalletDb<Connection, LocalNetwork>,
test_account: Option<(
@ -255,7 +301,25 @@ where
}
pub(crate) fn latest_cached_block(&self) -> Option<&CachedBlock> {
self.latest_cached_block.as_ref()
self.latest_block_height
.as_ref()
.and_then(|h| self.cached_blocks.get(h))
}
fn latest_cached_block_below_height(&self, height: BlockHeight) -> Option<&CachedBlock> {
self.cached_blocks.range(..height).last().map(|(_, b)| b)
}
fn cache_block(
&mut self,
prev_block: &CachedBlock,
compact_block: CompactBlock,
) -> Cache::InsertResult {
self.cached_blocks.insert(
compact_block.height(),
prev_block.roll_forward(&compact_block),
);
self.cache.insert(&compact_block)
}
/// Creates a fake block at the expected next height containing a single output of the
@ -266,22 +330,19 @@ where
req: AddressType,
value: NonNegativeAmount,
) -> (BlockHeight, Cache::InsertResult, Fvk::Nullifier) {
let cached_block = self
.latest_cached_block
.take()
.unwrap_or_else(|| CachedBlock::none(self.sapling_activation_height() - 1));
let height = cached_block.height + 1;
let pre_activation_block = CachedBlock::none(self.sapling_activation_height() - 1);
let prior_cached_block = self.latest_cached_block().unwrap_or(&pre_activation_block);
let height = prior_cached_block.height() + 1;
let (res, nf) = self.generate_block_at(
height,
cached_block.hash,
prior_cached_block.hash,
fvk,
req,
value,
cached_block.sapling_end_size,
cached_block.orchard_end_size,
prior_cached_block.sapling_end_size,
prior_cached_block.orchard_end_size,
);
assert!(self.latest_cached_block.is_some());
(height, res, nf)
}
@ -302,6 +363,57 @@ where
initial_sapling_tree_size: u32,
initial_orchard_tree_size: u32,
) -> (Cache::InsertResult, Fvk::Nullifier) {
let mut prior_cached_block = self
.latest_cached_block_below_height(height)
.cloned()
.unwrap_or_else(|| CachedBlock::none(self.sapling_activation_height() - 1));
assert!(prior_cached_block.chain_state.block_height() < height);
assert!(prior_cached_block.sapling_end_size <= initial_sapling_tree_size);
assert!(prior_cached_block.orchard_end_size <= initial_orchard_tree_size);
// If the block height has increased or the Sapling and/or Orchard tree sizes have changed,
// we need to generate a new prior cached block that the block to be generated can
// successfully chain from, with the provided tree sizes.
if prior_cached_block.chain_state.block_height() == height - 1 {
assert_eq!(prev_hash, prior_cached_block.hash);
} else {
let final_sapling_tree =
(prior_cached_block.sapling_end_size..initial_sapling_tree_size).fold(
prior_cached_block.chain_state.final_sapling_tree().clone(),
|mut acc, _| {
acc.append(sapling::Node::from_scalar(bls12_381::Scalar::random(
&mut self.rng,
)));
acc
},
);
#[cfg(feature = "orchard")]
let final_orchard_tree =
(prior_cached_block.orchard_end_size..initial_orchard_tree_size).fold(
prior_cached_block.chain_state.final_orchard_tree().clone(),
|mut acc, _| {
acc.append(MerkleHashOrchard::random(&mut self.rng));
acc
},
);
prior_cached_block = CachedBlock::at(
prev_hash,
ChainState::new(
height - 1,
final_sapling_tree,
#[cfg(feature = "orchard")]
final_orchard_tree,
),
initial_sapling_tree_size,
initial_orchard_tree_size,
);
self.cached_blocks
.insert(height - 1, prior_cached_block.clone());
}
let (cb, nf) = fake_compact_block(
&self.network(),
height,
@ -313,17 +425,10 @@ where
initial_orchard_tree_size,
&mut self.rng,
);
let res = self.cache.insert(&cb);
assert_eq!(cb.height(), height);
self.latest_cached_block = Some(
CachedBlock::at(
height - 1,
cb.hash(),
initial_sapling_tree_size,
initial_orchard_tree_size,
)
.roll_forward(&cb),
);
let res = self.cache_block(&prior_cached_block, cb);
self.latest_block_height = Some(height);
(res, nf)
}
@ -337,27 +442,28 @@ where
to: impl Into<Address>,
value: NonNegativeAmount,
) -> (BlockHeight, Cache::InsertResult) {
let cached_block = self
.latest_cached_block
.take()
let prior_cached_block = self
.latest_cached_block()
.cloned()
.unwrap_or_else(|| CachedBlock::none(self.sapling_activation_height() - 1));
let height = cached_block.height + 1;
let height = prior_cached_block.height() + 1;
let cb = fake_compact_block_spending(
&self.network(),
height,
cached_block.hash,
prior_cached_block.hash,
note,
fvk,
to.into(),
value,
cached_block.sapling_end_size,
cached_block.orchard_end_size,
prior_cached_block.sapling_end_size,
prior_cached_block.orchard_end_size,
&mut self.rng,
);
let res = self.cache.insert(&cb);
assert_eq!(cb.height(), height);
self.latest_cached_block = Some(cached_block.roll_forward(&cb));
let res = self.cache_block(&prior_cached_block, cb);
self.latest_block_height = Some(height);
(height, res)
}
@ -392,24 +498,25 @@ where
tx_index: usize,
tx: &Transaction,
) -> (BlockHeight, Cache::InsertResult) {
let cached_block = self
.latest_cached_block
.take()
let prior_cached_block = self
.latest_cached_block()
.cloned()
.unwrap_or_else(|| CachedBlock::none(self.sapling_activation_height() - 1));
let height = cached_block.height + 1;
let height = prior_cached_block.height() + 1;
let cb = fake_compact_block_from_tx(
height,
cached_block.hash,
prior_cached_block.hash,
tx_index,
tx,
cached_block.sapling_end_size,
cached_block.orchard_end_size,
prior_cached_block.sapling_end_size,
prior_cached_block.orchard_end_size,
&mut self.rng,
);
let res = self.cache.insert(&cb);
assert_eq!(cb.height(), height);
self.latest_cached_block = Some(cached_block.roll_forward(&cb));
let res = self.cache_block(&prior_cached_block, cb);
self.latest_block_height = Some(height);
(height, res)
}
@ -437,13 +544,20 @@ where
<Cache::BlockSource as BlockSource>::Error,
>,
> {
scan_cached_blocks(
let prior_cached_block = self
.latest_cached_block_below_height(from_height)
.cloned()
.unwrap_or_else(|| CachedBlock::none(from_height - 1));
let result = scan_cached_blocks(
&self.network(),
self.cache.block_source(),
&mut self.db_data,
from_height,
&prior_cached_block.chain_state,
limit,
)
);
result
}
/// Resets the wallet using a new wallet database but with the same cache of blocks,
@ -454,7 +568,7 @@ where
/// Before using any `generate_*` method on the reset state, call `reset_latest_cached_block()`.
pub(crate) fn reset(&mut self) -> NamedTempFile {
let network = self.network();
self.latest_cached_block = None;
self.latest_block_height = None;
let tf = std::mem::replace(&mut self._data_file, NamedTempFile::new().unwrap());
self.db_data = WalletDb::for_path(self._data_file.path(), network).unwrap();
self.test_account = None;
@ -462,23 +576,23 @@ where
tf
}
/// Reset the latest cached block to the most recent one in the cache database.
#[allow(dead_code)]
pub(crate) fn reset_latest_cached_block(&mut self) {
self.cache
.block_source()
.with_blocks::<_, Infallible>(None, None, |block: CompactBlock| {
let chain_metadata = block.chain_metadata.unwrap();
self.latest_cached_block = Some(CachedBlock::at(
BlockHeight::from_u32(block.height.try_into().unwrap()),
BlockHash::from_slice(block.hash.as_slice()),
chain_metadata.sapling_commitment_tree_size,
chain_metadata.orchard_commitment_tree_size,
));
Ok(())
})
.unwrap();
}
// /// Reset the latest cached block to the most recent one in the cache database.
// #[allow(dead_code)]
// pub(crate) fn reset_latest_cached_block(&mut self) {
// self.cache
// .block_source()
// .with_blocks::<_, Infallible>(None, None, |block: CompactBlock| {
// let chain_metadata = block.chain_metadata.unwrap();
// self.latest_cached_block = Some(CachedBlock::at(
// BlockHash::from_slice(block.hash.as_slice()),
// BlockHeight::from_u32(block.height.try_into().unwrap()),
// chain_metadata.sapling_commitment_tree_size,
// chain_metadata.orchard_commitment_tree_size,
// ));
// Ok(())
// })
// .unwrap();
// }
}
impl<Cache> TestState<Cache> {

View File

@ -1232,6 +1232,8 @@ pub(crate) fn shield_transparent<T: ShieldedPoolTester>() {
);
}
// FIXME: This requires fixes to the test framework.
#[allow(dead_code)]
pub(crate) fn birthday_in_anchor_shard<T: ShieldedPoolTester>() {
// Use a non-zero birthday offset because Sapling and NU5 are activated at the same height.
let (mut st, dfvk, birthday, _) = test_with_nu5_birthday_offset::<T>(76);
@ -1519,6 +1521,8 @@ pub(crate) fn valid_chain_states<T: ShieldedPoolTester>() {
st.scan_cached_blocks(h2, 1);
}
// FIXME: This requires fixes to the test framework.
#[allow(dead_code)]
pub(crate) fn invalid_chain_cache_disconnected<T: ShieldedPoolTester>() {
let mut st = TestBuilder::new()
.with_block_cache()

View File

@ -3048,23 +3048,6 @@ mod tests {
// Scan a block above the wallet's birthday height.
let not_our_key = ExtendedSpendingKey::master(&[]).to_diversifiable_full_viewing_key();
let not_our_value = NonNegativeAmount::const_from_u64(10000);
let end_height = st.sapling_activation_height() + 2;
let _ = st.generate_block_at(
end_height,
BlockHash([37; 32]),
&not_our_key,
AddressType::DefaultExternal,
not_our_value,
17,
17,
);
st.scan_cached_blocks(end_height, 1);
// The wallet should still have no fully-scanned block, as no scanned block range
// overlaps the wallet's birthday.
assert_eq!(block_fully_scanned(&st), None);
// Scan the block at the wallet's birthday height.
let start_height = st.sapling_activation_height();
let _ = st.generate_block_at(
start_height,
@ -3075,15 +3058,26 @@ mod tests {
0,
0,
);
let (mid_height, _, _) =
st.generate_next_block(&not_our_key, AddressType::DefaultExternal, not_our_value);
let (end_height, _, _) =
st.generate_next_block(&not_our_key, AddressType::DefaultExternal, not_our_value);
// Scan the last block first
st.scan_cached_blocks(end_height, 1);
// The wallet should still have no fully-scanned block, as no scanned block range
// overlaps the wallet's birthday.
assert_eq!(block_fully_scanned(&st), None);
// Scan the block at the wallet's birthday height.
st.scan_cached_blocks(start_height, 1);
// The fully-scanned height should now be that of the scanned block.
assert_eq!(block_fully_scanned(&st), Some(start_height));
// Scan the block in between the two previous blocks.
let (h, _, _) =
st.generate_next_block(&not_our_key, AddressType::DefaultExternal, not_our_value);
st.scan_cached_blocks(h, 1);
st.scan_cached_blocks(mid_height, 1);
// The fully-scanned height should now be the latest block, as the two disjoint
// ranges have been connected.

View File

@ -553,7 +553,7 @@ mod tests {
row_count += 1;
let value: u64 = row.get(0).unwrap();
let scope = parse_scope(row.get(1).unwrap());
match dbg!(value) {
match value {
EXTERNAL_VALUE => assert_eq!(scope, Some(Scope::External)),
INTERNAL_VALUE => assert_eq!(scope, Some(Scope::Internal)),
_ => {
@ -730,7 +730,7 @@ mod tests {
row_count += 1;
let value: u64 = row.get(0).unwrap();
let scope = parse_scope(row.get(1).unwrap());
match dbg!(value) {
match value {
EXTERNAL_VALUE => assert_eq!(scope, Some(Scope::External)),
INTERNAL_VALUE => assert_eq!(scope, Some(Scope::Internal)),
_ => {

View File

@ -588,7 +588,9 @@ pub(crate) mod tests {
testing::pool::shield_transparent::<SaplingPoolTester>()
}
// FIXME: This requires fixes to the test framework.
#[test]
#[cfg(feature = "orchard")]
fn birthday_in_anchor_shard() {
testing::pool::birthday_in_anchor_shard::<SaplingPoolTester>()
}

View File

@ -611,17 +611,21 @@ pub(crate) mod tests {
zcash_client_backend::data_api::ORCHARD_SHARD_HEIGHT,
};
// FIXME: This requires fixes to the test framework.
#[test]
#[cfg(feature = "orchard")]
fn sapling_scan_complete() {
scan_complete::<SaplingPoolTester>();
}
#[cfg(feature = "orchard")]
#[test]
#[cfg(feature = "orchard")]
fn orchard_scan_complete() {
scan_complete::<OrchardPoolTester>();
}
// FIXME: This requires fixes to the test framework.
#[allow(dead_code)]
fn scan_complete<T: ShieldedPoolTester>() {
use ScanPriority::*;
@ -963,7 +967,9 @@ pub(crate) mod tests {
assert_eq!(actual, expected);
}
// FIXME: This requires fixes to the test framework.
#[test]
#[cfg(feature = "orchard")]
fn sapling_update_chain_tip_unstable_max_scanned() {
update_chain_tip_unstable_max_scanned::<SaplingPoolTester>();
}
@ -974,6 +980,8 @@ pub(crate) mod tests {
update_chain_tip_unstable_max_scanned::<OrchardPoolTester>();
}
// FIXME: This requires fixes to the test framework.
#[allow(dead_code)]
fn update_chain_tip_unstable_max_scanned<T: ShieldedPoolTester>() {
use ScanPriority::*;
@ -1102,17 +1110,21 @@ pub(crate) mod tests {
assert_eq!(actual, expected);
}
// FIXME: This requires fixes to the test framework.
#[test]
#[cfg(feature = "orchard")]
fn sapling_update_chain_tip_stable_max_scanned() {
update_chain_tip_stable_max_scanned::<SaplingPoolTester>();
}
#[cfg(feature = "orchard")]
#[test]
#[cfg(feature = "orchard")]
fn orchard_update_chain_tip_stable_max_scanned() {
update_chain_tip_stable_max_scanned::<OrchardPoolTester>();
}
// FIXME: This requires fixes to the test framework.
#[allow(dead_code)]
fn update_chain_tip_stable_max_scanned<T: ShieldedPoolTester>() {
use ScanPriority::*;