Merge pull request #831 from nuttycom/feature/pre_dag_sync

Migrations & data storage for pre-DAG-sync
This commit is contained in:
Kris Nuttycombe 2023-07-04 13:52:10 -06:00 committed by GitHub
commit d8148f90e7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
34 changed files with 2880 additions and 1168 deletions

View File

@ -17,3 +17,8 @@ members = [
lto = true
panic = 'abort'
codegen-units = 1
[patch.crates-io]
incrementalmerkletree = { git = "https://github.com/zcash/incrementalmerkletree.git", rev = "082109deacf8611ee7917732e19b56158bda96d5" }
shardtree = { git = "https://github.com/zcash/incrementalmerkletree.git", rev = "082109deacf8611ee7917732e19b56158bda96d5" }
orchard = { git = "https://github.com/zcash/orchard.git", rev = "5da41a6bbb44290e353ee4b38bcafe37ffe79ce8" }

View File

@ -9,21 +9,82 @@ and this library adheres to Rust's notion of
### Added
- `impl Eq for zcash_client_backend::address::RecipientAddress`
- `impl Eq for zcash_client_backend::zip321::{Payment, TransactionRequest}`
- `data_api::NullifierQuery` for use with `WalletRead::get_sapling_nullifiers`
- `impl Debug` for `zcash_client_backend::{data_api::wallet::input_selection::Proposal, wallet::ReceivedSaplingNote}`
- `zcash_client_backend::data_api`:
- `WalletRead::{block_metadata, block_fully_scanned, suggest_scan_ranges}`
- `WalletWrite::put_block`
- `WalletCommitmentTrees`
- `testing::MockWalletDb::new`
- `NullifierQuery` for use with `WalletRead::get_sapling_nullifiers`
- `BlockMetadata`
- `ScannedBlock`
- `wallet::input_sellection::Proposal::{min_target_height, min_anchor_height}`:
- `zcash_client_backend::wallet::WalletSaplingOutput::note_commitment_tree_position`
- `zcash_client_backend::scanning::ScanError`
### Changed
- MSRV is now 1.65.0.
- Bumped dependencies to `hdwallet 0.4`, `zcash_primitives 0.12`, `zcash_note_encryption 0.4`,
`incrementalmerkletree 0.4`, `orchard 0.5`, `bs58 0.5`
- `WalletRead::get_memo` now returns `Result<Option<Memo>, Self::Error>`
instead of `Result<Memo, Self::Error>` in order to make representable
wallet states where the full note plaintext is not available.
- `WalletRead::get_nullifiers` has been renamed to `WalletRead::get_sapling_nullifiers`
and its signature has changed; it now subsumes the removed `WalletRead::get_all_nullifiers`.
- `wallet::SpendableNote` has been renamed to `wallet::ReceivedSaplingNote`.
- `zcash_client_backend::data_api`:
- `WalletRead::get_memo` now returns `Result<Option<Memo>, Self::Error>`
instead of `Result<Memo, Self::Error>` in order to make representable
wallet states where the full note plaintext is not available.
- `WalletRead::get_nullifiers` has been renamed to `WalletRead::get_sapling_nullifiers`
and its signature has changed; it now subsumes the removed `WalletRead::get_all_nullifiers`.
- `WalletRead::get_target_and_anchor_heights` now takes its argument as a `NonZeroU32`
- `chain::scan_cached_blocks` now takes a `from_height` argument that
permits the caller to control the starting position of the scan range.
- A new `CommitmentTree` variant has been added to `data_api::error::Error`
- `data_api::wallet::{create_spend_to_address, create_proposed_transaction,
shield_transparent_funds}` all now require that `WalletCommitmentTrees` be
implemented for the type passed to them for the `wallet_db` parameter.
- `data_api::wallet::create_proposed_transaction` now takes an additional
`min_confirmations` argument.
- `data_api::wallet::{spend, create_spend_to_address, shield_transparent_funds,
propose_transfer, propose_shielding, create_proposed_transaction}` now take their
respective `min_confirmations` arguments as `NonZeroU32`
- `data_api::wallet::input_selection::InputSelector::{propose_transaction, propose_shielding}`
now take their respective `min_confirmations` arguments as `NonZeroU32`
- A new `Scan` variant has been added to `data_api::chain::error::Error`.
- A new `SyncRequired` variant has been added to `data_api::wallet::input_selection::InputSelectorError`.
- `zcash_client_backend::wallet`:
- `SpendableNote` has been renamed to `ReceivedSaplingNote`.
- Arguments to `WalletSaplingOutput::from_parts` have changed.
- `zcash_client_backend::data_api::wallet::input_selection::InputSelector`:
- Arguments to `{propose_transaction, propose_shielding}` have changed.
- `zcash_client_backend::wallet::ReceivedSaplingNote::note_commitment_tree_position`
has replaced the `witness` field in the same struct.
- `zcash_client_backend::welding_rig` has been renamed to `zcash_client_backend::scanning`
- `zcash_client_backend::scanning::ScanningKey::sapling_nf` has been changed to
take a note position instead of an incremental witness for the note.
- Arguments to `zcash_client_backend::scanning::scan_block` have changed. This
method now takes an optional `BlockMetadata` argument instead of a base commitment
tree and incremental witnesses for each previously-known note. In addition, the
return type has now been updated to return a `Result<ScannedBlock, ScanError>`.
### Removed
- `WalletRead::get_all_nullifiers`
- `zcash_client_backend::data_api`:
- `WalletRead::get_all_nullifiers`
- `WalletRead::{get_commitment_tree, get_witnesses}` have been removed
without replacement. The utility of these methods is now subsumed
by those available from the `WalletCommitmentTrees` trait.
- `WalletWrite::advance_by_block` (use `WalletWrite::put_block` instead).
- `PrunedBlock` has been replaced by `ScannedBlock`
- `testing::MockWalletDb`, which is available under the `test-dependencies`
feature flag, has been modified by the addition of a `sapling_tree` property.
- `wallet::input_selection`:
- `Proposal::target_height` (use `Proposal::min_target_height` instead).
- `zcash_client_backend::data_api::chain::validate_chain` TODO: document how
to handle validation given out-of-order blocks.
- `zcash_client_backend::data_api::chain::error::{ChainError, Cause}` have been
replaced by `zcash_client_backend::scanning::ScanError`
- `zcash_client_backend::wallet::WalletSaplingOutput::{witness, witness_mut}`
have been removed as individual incremental witnesses are no longer tracked on a
per-note basis. The global note commitment tree for the wallet should be used
to obtain witnesses for spend operations instead.
## [0.9.0] - 2023-04-28
### Added

View File

@ -21,6 +21,7 @@ development = ["zcash_proofs"]
[dependencies]
incrementalmerkletree = { version = "0.4", features = ["legacy-api"] }
shardtree = "0.0"
zcash_address = { version = "0.3", path = "../components/zcash_address" }
zcash_encoding = { version = "0.2", path = "../components/zcash_encoding" }
zcash_note_encryption = "0.4"

View File

@ -45,6 +45,10 @@ fn build() -> io::Result<()> {
// Build the gRPC types and client.
tonic_build::configure()
.build_server(false)
.extern_path(
".cash.z.wallet.sdk.rpc.ChainMetadata",
"crate::proto::compact_formats::ChainMetadata",
)
.extern_path(
".cash.z.wallet.sdk.rpc.CompactBlock",
"crate::proto::compact_formats::CompactBlock",

View File

@ -10,18 +10,25 @@ option swift_prefix = "";
// Remember that proto3 fields are all optional. A field that is not present will be set to its zero value.
// bytes fields of hashes are in canonical little-endian format.
// ChainMetadata represents information about the state of the chain as of a given block.
message ChainMetadata {
uint32 saplingCommitmentTreeSize = 1; // the size of the Sapling note commitment tree as of the end of this block
uint32 orchardCommitmentTreeSize = 2; // the size of the Orchard note commitment tree as of the end of this block
}
// CompactBlock is a packaging of ONLY the data from a block that's needed to:
// 1. Detect a payment to your shielded Sapling address
// 2. Detect a spend of your shielded Sapling notes
// 3. Update your witnesses to generate new Sapling spend proofs.
message CompactBlock {
uint32 protoVersion = 1; // the version of this wire format, for storage
uint64 height = 2; // the height of this block
bytes hash = 3; // the ID (hash) of this block, same as in block explorers
bytes prevHash = 4; // the ID (hash) of this block's predecessor
uint32 time = 5; // Unix epoch time when the block was mined
bytes header = 6; // (hash, prevHash, and time) OR (full header)
repeated CompactTx vtx = 7; // zero or more compact transactions from this block
uint32 protoVersion = 1; // the version of this wire format, for storage
uint64 height = 2; // the height of this block
bytes hash = 3; // the ID (hash) of this block, same as in block explorers
bytes prevHash = 4; // the ID (hash) of this block's predecessor
uint32 time = 5; // Unix epoch time when the block was mined
bytes header = 6; // (hash, prevHash, and time) OR (full header)
repeated CompactTx vtx = 7; // zero or more compact transactions from this block
ChainMetadata chainMetadata = 8; // information about the state of the chain as of this block
}
// CompactTx contains the minimum information for a wallet to know if this transaction

View File

@ -1,10 +1,13 @@
//! Interfaces for wallet data persistence & low-level wallet utilities.
use std::cmp;
use std::collections::HashMap;
use std::fmt::Debug;
use std::num::NonZeroU32;
use std::{cmp, ops::Range};
use incrementalmerkletree::Retention;
use secrecy::SecretVec;
use shardtree::{ShardStore, ShardTree, ShardTreeError};
use zcash_primitives::{
block::BlockHash,
consensus::BlockHeight,
@ -29,6 +32,8 @@ pub mod chain;
pub mod error;
pub mod wallet;
pub const SAPLING_SHARD_HEIGHT: u8 = sapling::NOTE_COMMITMENT_TREE_DEPTH / 2;
pub enum NullifierQuery {
Unspent,
All,
@ -61,6 +66,32 @@ pub trait WalletRead {
/// This will return `Ok(None)` if no block data is present in the database.
fn block_height_extrema(&self) -> Result<Option<(BlockHeight, BlockHeight)>, Self::Error>;
/// Returns the available block metadata for the block at the specified height, if any.
fn block_metadata(&self, height: BlockHeight) -> Result<Option<BlockMetadata>, Self::Error>;
/// Returns the metadata for the block at the height to which the wallet has been fully
/// scanned.
///
/// This is the height for which the wallet has fully trial-decrypted this and all preceding
/// blocks above the wallet's birthday height. Along with this height, this method returns
/// metadata describing the state of the wallet's note commitment trees as of the end of that
/// block.
fn block_fully_scanned(&self) -> Result<Option<BlockMetadata>, Self::Error>;
/// Returns a vector of suggested scan ranges based upon the current wallet state.
///
/// This method should only be used in cases where the [`CompactBlock`] data that will be made
/// available to `scan_cached_blocks` for the requested block ranges includes note commitment
/// tree size information for each block; or else the scan is likely to fail if notes belonging
/// to the wallet are detected.
///
/// [`CompactBlock`]: crate::proto::compact_formats::CompactBlock
fn suggest_scan_ranges(
&self,
batch_size: usize,
limit: usize,
) -> Result<Vec<Range<BlockHeight>>, Self::Error>;
/// Returns the default target height (for the block in which a new
/// transaction would be mined) and anchor height (to use for a new
/// transaction), given the range of block heights that the backend
@ -69,7 +100,7 @@ pub trait WalletRead {
/// This will return `Ok(None)` if no block data is present in the database.
fn get_target_and_anchor_heights(
&self,
min_confirmations: u32,
min_confirmations: NonZeroU32,
) -> Result<Option<(BlockHeight, BlockHeight)>, Self::Error> {
self.block_height_extrema().map(|heights| {
heights.map(|(min_height, max_height)| {
@ -78,7 +109,7 @@ pub trait WalletRead {
// Select an anchor min_confirmations back from the target block,
// unless that would be before the earliest block we have.
let anchor_height = BlockHeight::from(cmp::max(
u32::from(target_height).saturating_sub(min_confirmations),
u32::from(target_height).saturating_sub(min_confirmations.into()),
u32::from(min_height),
));
@ -165,19 +196,6 @@ pub trait WalletRead {
/// Returns a transaction.
fn get_transaction(&self, id_tx: Self::TxRef) -> Result<Transaction, Self::Error>;
/// Returns the note commitment tree at the specified block height.
fn get_commitment_tree(
&self,
block_height: BlockHeight,
) -> Result<Option<sapling::CommitmentTree>, Self::Error>;
/// Returns the incremental witnesses as of the specified block height.
#[allow(clippy::type_complexity)]
fn get_witnesses(
&self,
block_height: BlockHeight,
) -> Result<Vec<(Self::NoteRef, sapling::IncrementalWitness)>, Self::Error>;
/// Returns the nullifiers for notes that the wallet is tracking, along with their associated
/// account IDs, that are either unspent or have not yet been confirmed as spent (in that a
/// spending transaction known to the wallet has not yet been included in a block).
@ -232,16 +250,99 @@ pub trait WalletRead {
) -> Result<HashMap<TransparentAddress, Amount>, Self::Error>;
}
/// Metadata describing the sizes of the zcash note commitment trees as of a particular block.
#[derive(Debug, Clone, Copy)]
pub struct BlockMetadata {
block_height: BlockHeight,
block_hash: BlockHash,
sapling_tree_size: u32,
//TODO: orchard_tree_size: u32
}
impl BlockMetadata {
/// Constructs a new [`BlockMetadata`] value from its constituent parts.
pub fn from_parts(
block_height: BlockHeight,
block_hash: BlockHash,
sapling_tree_size: u32,
) -> Self {
Self {
block_height,
block_hash,
sapling_tree_size,
}
}
/// Returns the block height.
pub fn block_height(&self) -> BlockHeight {
self.block_height
}
/// Returns the hash of the block
pub fn block_hash(&self) -> BlockHash {
self.block_hash
}
/// Returns the size of the Sapling note commitment tree as of the block that this
/// [`BlockMetadata`] describes.
pub fn sapling_tree_size(&self) -> u32 {
self.sapling_tree_size
}
}
/// The subset of information that is relevant to this wallet that has been
/// decrypted and extracted from a [`CompactBlock`].
///
/// [`CompactBlock`]: crate::proto::compact_formats::CompactBlock
pub struct PrunedBlock<'a> {
pub block_height: BlockHeight,
pub block_hash: BlockHash,
pub block_time: u32,
pub commitment_tree: &'a sapling::CommitmentTree,
pub transactions: &'a Vec<WalletTx<sapling::Nullifier>>,
pub struct ScannedBlock<Nf> {
metadata: BlockMetadata,
block_time: u32,
transactions: Vec<WalletTx<Nf>>,
sapling_commitments: Vec<(sapling::Node, Retention<BlockHeight>)>,
}
impl<Nf> ScannedBlock<Nf> {
pub fn from_parts(
metadata: BlockMetadata,
block_time: u32,
transactions: Vec<WalletTx<Nf>>,
sapling_commitments: Vec<(sapling::Node, Retention<BlockHeight>)>,
) -> Self {
Self {
metadata,
block_time,
transactions,
sapling_commitments,
}
}
pub fn height(&self) -> BlockHeight {
self.metadata.block_height
}
pub fn block_hash(&self) -> BlockHash {
self.metadata.block_hash
}
pub fn block_time(&self) -> u32 {
self.block_time
}
pub fn metadata(&self) -> &BlockMetadata {
&self.metadata
}
pub fn transactions(&self) -> &[WalletTx<Nf>] {
&self.transactions
}
pub fn sapling_commitments(&self) -> &[(sapling::Node, Retention<BlockHeight>)] {
&self.sapling_commitments
}
pub fn into_sapling_commitments(self) -> Vec<(sapling::Node, Retention<BlockHeight>)> {
self.sapling_commitments
}
}
/// A transaction that was detected during scanning of the blockchain,
@ -381,16 +482,14 @@ pub trait WalletWrite: WalletRead {
account: AccountId,
) -> Result<Option<UnifiedAddress>, Self::Error>;
/// Updates the state of the wallet database by persisting the provided
/// block information, along with the updated witness data that was
/// produced when scanning the block for transactions pertaining to
/// this wallet.
/// Updates the state of the wallet database by persisting the provided block information,
/// along with the note commitments that were detected when scanning the block for transactions
/// pertaining to this wallet.
#[allow(clippy::type_complexity)]
fn advance_by_block(
fn put_block(
&mut self,
block: &PrunedBlock,
updated_witnesses: &[(Self::NoteRef, sapling::IncrementalWitness)],
) -> Result<Vec<(Self::NoteRef, sapling::IncrementalWitness)>, Self::Error>;
block: ScannedBlock<sapling::Nullifier>,
) -> Result<Vec<Self::NoteRef>, Self::Error>;
/// Caches a decrypted transaction in the persistent wallet store.
fn store_decrypted_tx(
@ -424,10 +523,35 @@ pub trait WalletWrite: WalletRead {
) -> Result<Self::UtxoRef, Self::Error>;
}
/// This trait describes a capability for manipulating wallet note commitment trees.
///
/// At present, this only serves the Sapling protocol, but it will be modified to
/// also provide operations related to Orchard note commitment trees in the future.
pub trait WalletCommitmentTrees {
type Error;
type SaplingShardStore<'a>: ShardStore<
H = sapling::Node,
CheckpointId = BlockHeight,
Error = Self::Error,
>;
fn with_sapling_tree_mut<F, A, E>(&mut self, callback: F) -> Result<A, E>
where
for<'a> F: FnMut(
&'a mut ShardTree<
Self::SaplingShardStore<'a>,
{ sapling::NOTE_COMMITMENT_TREE_DEPTH },
SAPLING_SHARD_HEIGHT,
>,
) -> Result<A, E>,
E: From<ShardTreeError<Self::Error>>;
}
#[cfg(feature = "test-dependencies")]
pub mod testing {
use secrecy::{ExposeSecret, SecretVec};
use std::collections::HashMap;
use shardtree::{MemoryShardStore, ShardTree, ShardTreeError};
use std::{collections::HashMap, convert::Infallible, ops::Range};
use zcash_primitives::{
block::BlockHash,
@ -449,11 +573,26 @@ pub mod testing {
};
use super::{
DecryptedTransaction, NullifierQuery, PrunedBlock, SentTransaction, WalletRead, WalletWrite,
BlockMetadata, DecryptedTransaction, NullifierQuery, ScannedBlock, SentTransaction,
WalletCommitmentTrees, WalletRead, WalletWrite, SAPLING_SHARD_HEIGHT,
};
pub struct MockWalletDb {
pub network: Network,
pub sapling_tree: ShardTree<
MemoryShardStore<sapling::Node, BlockHeight>,
{ SAPLING_SHARD_HEIGHT * 2 },
SAPLING_SHARD_HEIGHT,
>,
}
impl MockWalletDb {
pub fn new(network: Network) -> Self {
Self {
network,
sapling_tree: ShardTree::new(MemoryShardStore::empty(), 100),
}
}
}
impl WalletRead for MockWalletDb {
@ -465,6 +604,25 @@ pub mod testing {
Ok(None)
}
fn block_metadata(
&self,
_height: BlockHeight,
) -> Result<Option<BlockMetadata>, Self::Error> {
Ok(None)
}
fn block_fully_scanned(&self) -> Result<Option<BlockMetadata>, Self::Error> {
Ok(None)
}
fn suggest_scan_ranges(
&self,
_batch_size: usize,
_limit: usize,
) -> Result<Vec<Range<BlockHeight>>, Self::Error> {
Ok(vec![])
}
fn get_min_unspent_height(&self) -> Result<Option<BlockHeight>, Self::Error> {
Ok(None)
}
@ -524,21 +682,6 @@ pub mod testing {
Err(())
}
fn get_commitment_tree(
&self,
_block_height: BlockHeight,
) -> Result<Option<sapling::CommitmentTree>, Self::Error> {
Ok(None)
}
#[allow(clippy::type_complexity)]
fn get_witnesses(
&self,
_block_height: BlockHeight,
) -> Result<Vec<(Self::NoteRef, sapling::IncrementalWitness)>, Self::Error> {
Ok(Vec::new())
}
fn get_sapling_nullifiers(
&self,
_query: NullifierQuery,
@ -611,11 +754,10 @@ pub mod testing {
}
#[allow(clippy::type_complexity)]
fn advance_by_block(
fn put_block(
&mut self,
_block: &PrunedBlock,
_updated_witnesses: &[(Self::NoteRef, sapling::IncrementalWitness)],
) -> Result<Vec<(Self::NoteRef, sapling::IncrementalWitness)>, Self::Error> {
_block: ScannedBlock<sapling::Nullifier>,
) -> Result<Vec<Self::NoteRef>, Self::Error> {
Ok(vec![])
}
@ -645,4 +787,23 @@ pub mod testing {
Ok(0)
}
}
impl WalletCommitmentTrees for MockWalletDb {
type Error = Infallible;
type SaplingShardStore<'a> = MemoryShardStore<sapling::Node, BlockHeight>;
fn with_sapling_tree_mut<F, A, E>(&mut self, mut callback: F) -> Result<A, E>
where
for<'a> F: FnMut(
&'a mut ShardTree<
Self::SaplingShardStore<'a>,
{ sapling::NOTE_COMMITMENT_TREE_DEPTH },
SAPLING_SHARD_HEIGHT,
>,
) -> Result<A, E>,
E: From<ShardTreeError<Infallible>>,
{
callback(&mut self.sapling_tree)
}
}
}

View File

@ -17,7 +17,6 @@
//! BlockSource,
//! error::Error,
//! scan_cached_blocks,
//! validate_chain,
//! testing as chain_testing,
//! },
//! testing,
@ -30,81 +29,42 @@
//! # test();
//! # }
//! #
//! # fn test() -> Result<(), Error<(), Infallible, u32>> {
//! # fn test() -> Result<(), Error<(), Infallible>> {
//! let network = Network::TestNetwork;
//! let block_source = chain_testing::MockBlockSource;
//! let mut db_data = testing::MockWalletDb {
//! network: Network::TestNetwork
//! };
//! let mut db_data = testing::MockWalletDb::new(Network::TestNetwork);
//!
//! // 1) Download new CompactBlocks into block_source.
//!
//! // 2) Run the chain validator on the received blocks.
//! //
//! // Given that we assume the server always gives us correct-at-the-time blocks, any
//! // errors are in the blocks we have previously cached or scanned.
//! let max_height_hash = db_data.get_max_height_hash().map_err(Error::Wallet)?;
//! if let Err(e) = validate_chain(&block_source, max_height_hash, None) {
//! match e {
//! Error::Chain(e) => {
//! // a) Pick a height to rewind to.
//! //
//! // This might be informed by some external chain reorg information, or
//! // heuristics such as the platform, available bandwidth, size of recent
//! // CompactBlocks, etc.
//! let rewind_height = e.at_height() - 10;
//!
//! // b) Rewind scanned block information.
//! db_data.truncate_to_height(rewind_height);
//!
//! // c) Delete cached blocks from rewind_height onwards.
//! //
//! // This does imply that assumed-valid blocks will be re-downloaded, but it
//! // is also possible that in the intervening time, a chain reorg has
//! // occurred that orphaned some of those blocks.
//!
//! // d) If there is some separate thread or service downloading
//! // CompactBlocks, tell it to go back and download from rewind_height
//! // onwards.
//! },
//! e => {
//! // handle or return other errors
//!
//! }
//! }
//! }
//!
//! // 3) Scan (any remaining) cached blocks.
//! // 2) FIXME: Obtain necessary block metadata for continuity checking?
//! //
//! // 3) Scan cached blocks.
//! //
//! // FIXME: update documentation on how to detect when a rewind is required.
//! //
//! // At this point, the cache and scanned data are locally consistent (though not
//! // necessarily consistent with the latest chain tip - this would be discovered the
//! // next time this codepath is executed after new blocks are received).
//! scan_cached_blocks(&network, &block_source, &mut db_data, None)
//! scan_cached_blocks(&network, &block_source, &mut db_data, None, None)
//! # }
//! # }
//! ```
use std::convert::Infallible;
use zcash_primitives::{
block::BlockHash,
consensus::{self, BlockHeight},
sapling::{self, note_encryption::PreparedIncomingViewingKey, Nullifier},
sapling::{self, note_encryption::PreparedIncomingViewingKey},
zip32::Scope,
};
use crate::{
data_api::{PrunedBlock, WalletWrite},
data_api::{NullifierQuery, WalletWrite},
proto::compact_formats::CompactBlock,
scan::BatchRunner,
wallet::WalletTx,
welding_rig::{add_block_to_runner, scan_block_with_runner},
scanning::{add_block_to_runner, scan_block_with_runner},
};
pub mod error;
use error::{ChainError, Error};
use super::NullifierQuery;
use error::Error;
/// This trait provides sequential access to raw blockchain data via a callback-oriented
/// API.
@ -119,112 +79,47 @@ pub trait BlockSource {
/// as part of processing each row.
/// * `NoteRefT`: the type of note identifiers in the wallet data store, for use in
/// reporting errors related to specific notes.
fn with_blocks<F, WalletErrT, NoteRefT>(
fn with_blocks<F, WalletErrT>(
&self,
from_height: Option<BlockHeight>,
limit: Option<u32>,
with_row: F,
) -> Result<(), error::Error<WalletErrT, Self::Error, NoteRefT>>
) -> Result<(), error::Error<WalletErrT, Self::Error>>
where
F: FnMut(CompactBlock) -> Result<(), error::Error<WalletErrT, Self::Error, NoteRefT>>;
}
/// Checks that the scanned blocks in the data database, when combined with the recent
/// `CompactBlock`s in the block_source database, form a valid chain.
///
/// This function is built on the core assumption that the information provided in the
/// block source is more likely to be accurate than the previously-scanned information.
/// This follows from the design (and trust) assumption that the `lightwalletd` server
/// provides accurate block information as of the time it was requested.
///
/// Arguments:
/// - `block_source` Source of compact blocks
/// - `validate_from` Height & hash of last validated block;
/// - `limit` specified number of blocks that will be valididated. Callers providing
/// a `limit` argument are responsible of making subsequent calls to `validate_chain()`
/// to complete validating the remaining blocks stored on the `block_source`. If `none`
/// is provided, there will be no limit set to the validation and upper bound of the
/// validation range will be the latest height present in the `block_source`.
///
/// Returns:
/// - `Ok(())` if the combined chain is valid up to the given height
/// and block hash.
/// - `Err(Error::Chain(cause))` if the combined chain is invalid.
/// - `Err(e)` if there was an error during validation unrelated to chain validity.
pub fn validate_chain<BlockSourceT>(
block_source: &BlockSourceT,
mut validate_from: Option<(BlockHeight, BlockHash)>,
limit: Option<u32>,
) -> Result<(), Error<Infallible, BlockSourceT::Error, Infallible>>
where
BlockSourceT: BlockSource,
{
// The block source will contain blocks above the `validate_from` height. Validate from that
// maximum height up to the chain tip, returning the hash of the block found in the block
// source at the `validate_from` height, which can then be used to verify chain integrity by
// comparing against the `validate_from` hash.
block_source.with_blocks::<_, Infallible, Infallible>(
validate_from.map(|(h, _)| h),
limit,
move |block| {
if let Some((valid_height, valid_hash)) = validate_from {
if block.height() != valid_height + 1 {
return Err(ChainError::block_height_discontinuity(
valid_height + 1,
block.height(),
)
.into());
} else if block.prev_hash() != valid_hash {
return Err(ChainError::prev_hash_mismatch(block.height()).into());
}
}
validate_from = Some((block.height(), block.hash()));
Ok(())
},
)
F: FnMut(CompactBlock) -> Result<(), error::Error<WalletErrT, Self::Error>>;
}
/// Scans at most `limit` new blocks added to the block source for any transactions received by the
/// tracked accounts.
///
/// If the `from_height` argument is not `None`, then this method block source will begin
/// requesting blocks from the provided block source at the specified height; if `from_height` is
/// `None then this will begin scanning at first block after the position to which the wallet has
/// previously fully scanned the chain, thereby beginning or continuing a linear scan over all
/// blocks.
///
/// This function will return without error after scanning at most `limit` new blocks, to enable
/// the caller to update their UI with scanning progress. Repeatedly calling this function will
/// process sequential ranges of blocks, and is equivalent to calling `scan_cached_blocks` and
/// passing `None` for the optional `limit` value.
/// the caller to update their UI with scanning progress. Repeatedly calling this function with
/// `from_height == None` will process sequential ranges of blocks.
///
/// This function pays attention only to cached blocks with heights greater than the highest
/// scanned block in `data`. Cached blocks with lower heights are not verified against
/// previously-scanned blocks. In particular, this function **assumes** that the caller is handling
/// rollbacks.
///
/// For brand-new light client databases, this function starts scanning from the Sapling activation
/// height. This height can be fast-forwarded to a more recent block by initializing the client
/// database with a starting block (for example, calling `init_blocks_table` before this function
/// if using `zcash_client_sqlite`).
///
/// Scanned blocks are required to be height-sequential. If a block is missing from the block
/// source, an error will be returned with cause [`error::Cause::BlockHeightDiscontinuity`].
/// For brand-new light client databases, if `from_height == None` this function starts scanning
/// from the Sapling activation height. This height can be fast-forwarded to a more recent block by
/// initializing the client database with a starting block (for example, calling
/// `init_blocks_table` before this function if using `zcash_client_sqlite`).
#[tracing::instrument(skip(params, block_source, data_db))]
#[allow(clippy::type_complexity)]
pub fn scan_cached_blocks<ParamsT, DbT, BlockSourceT>(
params: &ParamsT,
block_source: &BlockSourceT,
data_db: &mut DbT,
from_height: Option<BlockHeight>,
limit: Option<u32>,
) -> Result<(), Error<DbT::Error, BlockSourceT::Error, DbT::NoteRef>>
) -> Result<(), Error<DbT::Error, BlockSourceT::Error>>
where
ParamsT: consensus::Parameters + Send + 'static,
BlockSourceT: BlockSource,
DbT: WalletWrite,
{
// Recall where we synced up to previously.
let mut last_height = data_db
.block_height_extrema()
.map_err(Error::Wallet)?
.map(|(_, max)| max);
// Fetch the UnifiedFullViewingKeys we are tracking
let ufvks = data_db
.get_unified_full_viewing_keys()
@ -236,25 +131,8 @@ where
.filter_map(|(account, ufvk)| ufvk.sapling().map(move |k| (account, k)))
.collect();
// Get the most recent CommitmentTree
let mut tree = last_height.map_or_else(
|| Ok(sapling::CommitmentTree::empty()),
|h| {
data_db
.get_commitment_tree(h)
.map(|t| t.unwrap_or_else(sapling::CommitmentTree::empty))
.map_err(Error::Wallet)
},
)?;
// Get most recent incremental witnesses for the notes we are tracking
let mut witnesses = last_height.map_or_else(
|| Ok(vec![]),
|h| data_db.get_witnesses(h).map_err(Error::Wallet),
)?;
// Get the nullifiers for the notes we are tracking
let mut nullifiers = data_db
// Get the nullifiers for the unspent notes we are tracking
let mut sapling_nullifiers = data_db
.get_sapling_nullifiers(NullifierQuery::Unspent)
.map_err(Error::Wallet)?;
@ -271,106 +149,61 @@ where
.map(|(tag, ivk)| (tag, PreparedIncomingViewingKey::new(&ivk))),
);
block_source.with_blocks::<_, DbT::Error, DbT::NoteRef>(
last_height,
limit,
|block: CompactBlock| {
add_block_to_runner(params, block, &mut batch_runner);
Ok(())
},
)?;
// Start at either the provided height, or where we synced up to previously.
let (scan_from, mut prior_block_metadata) = match from_height {
Some(h) => {
// if we are provided with a starting height, obtain the metadata for the previous
// block (if any is available)
(
Some(h),
if h > BlockHeight::from(0) {
data_db.block_metadata(h - 1).map_err(Error::Wallet)?
} else {
None
},
)
}
None => {
let last_scanned = data_db.block_fully_scanned().map_err(Error::Wallet)?;
last_scanned.map_or_else(|| (None, None), |m| (Some(m.block_height + 1), Some(m)))
}
};
block_source.with_blocks::<_, DbT::Error>(scan_from, limit, |block: CompactBlock| {
add_block_to_runner(params, block, &mut batch_runner);
Ok(())
})?;
batch_runner.flush();
block_source.with_blocks::<_, DbT::Error, DbT::NoteRef>(
last_height,
limit,
|block: CompactBlock| {
let current_height = block.height();
block_source.with_blocks::<_, DbT::Error>(scan_from, limit, |block: CompactBlock| {
let scanned_block = scan_block_with_runner(
params,
block,
&dfvks,
&sapling_nullifiers,
prior_block_metadata.as_ref(),
Some(&mut batch_runner),
)
.map_err(Error::Scan)?;
// Scanned blocks MUST be height-sequential.
if let Some(h) = last_height {
if current_height != (h + 1) {
return Err(
ChainError::block_height_discontinuity(h + 1, current_height).into(),
);
}
}
let spent_nf: Vec<&sapling::Nullifier> = scanned_block
.transactions
.iter()
.flat_map(|tx| tx.sapling_spends.iter().map(|spend| spend.nf()))
.collect();
let block_hash = BlockHash::from_slice(&block.hash);
let block_time = block.time;
let txs: Vec<WalletTx<Nullifier>> = {
let mut witness_refs: Vec<_> = witnesses.iter_mut().map(|w| &mut w.1).collect();
scan_block_with_runner(
params,
block,
&dfvks,
&nullifiers,
&mut tree,
&mut witness_refs[..],
Some(&mut batch_runner),
)
};
// Enforce that all roots match. This is slow, so only include in debug builds.
#[cfg(debug_assertions)]
{
let cur_root = tree.root();
for row in &witnesses {
if row.1.root() != cur_root {
return Err(
ChainError::invalid_witness_anchor(current_height, row.0).into()
);
}
}
for tx in &txs {
for output in tx.sapling_outputs.iter() {
if output.witness().root() != cur_root {
return Err(ChainError::invalid_new_witness_anchor(
current_height,
tx.txid,
output.index(),
output.witness().root(),
)
.into());
}
}
}
}
let new_witnesses = data_db
.advance_by_block(
&(PrunedBlock {
block_height: current_height,
block_hash,
block_time,
commitment_tree: &tree,
transactions: &txs,
}),
&witnesses,
)
.map_err(Error::Wallet)?;
let spent_nf: Vec<&Nullifier> = txs
sapling_nullifiers.retain(|(_, nf)| !spent_nf.contains(&nf));
sapling_nullifiers.extend(scanned_block.transactions.iter().flat_map(|tx| {
tx.sapling_outputs
.iter()
.flat_map(|tx| tx.sapling_spends.iter().map(|spend| spend.nf()))
.collect();
nullifiers.retain(|(_, nf)| !spent_nf.contains(&nf));
nullifiers.extend(txs.iter().flat_map(|tx| {
tx.sapling_outputs
.iter()
.map(|out| (out.account(), *out.nf()))
}));
.map(|out| (out.account(), *out.nf()))
}));
witnesses.extend(new_witnesses);
last_height = Some(current_height);
Ok(())
},
)?;
prior_block_metadata = Some(*scanned_block.metadata());
data_db.put_block(scanned_block).map_err(Error::Wallet)?;
Ok(())
})?;
Ok(())
}
@ -389,14 +222,14 @@ pub mod testing {
impl BlockSource for MockBlockSource {
type Error = Infallible;
fn with_blocks<F, DbErrT, NoteRef>(
fn with_blocks<F, DbErrT>(
&self,
_from_height: Option<BlockHeight>,
_limit: Option<u32>,
_with_row: F,
) -> Result<(), Error<DbErrT, Infallible, NoteRef>>
) -> Result<(), Error<DbErrT, Infallible>>
where
F: FnMut(CompactBlock) -> Result<(), Error<DbErrT, Infallible, NoteRef>>,
F: FnMut(CompactBlock) -> Result<(), Error<DbErrT, Infallible>>,
{
Ok(())
}

View File

@ -3,134 +3,11 @@
use std::error;
use std::fmt::{self, Debug, Display};
use zcash_primitives::{consensus::BlockHeight, sapling, transaction::TxId};
/// The underlying cause of a [`ChainError`].
#[derive(Copy, Clone, Debug)]
pub enum Cause<NoteRef> {
/// The hash of the parent block given by a proposed new chain tip does not match the hash of
/// the current chain tip.
PrevHashMismatch,
/// The block height field of the proposed new chain tip is not equal to the height of the
/// previous chain tip + 1. This variant stores a copy of the incorrect height value for
/// reporting purposes.
BlockHeightDiscontinuity(BlockHeight),
/// The root of an output's witness tree in a newly arrived transaction does not correspond to
/// root of the stored commitment tree at the recorded height.
///
/// This error is currently only produced when performing the slow checks that are enabled by
/// compiling with `-C debug-assertions`.
InvalidNewWitnessAnchor {
/// The id of the transaction containing the mismatched witness.
txid: TxId,
/// The index of the shielded output within the transaction where the witness root does not
/// match.
index: usize,
/// The root of the witness that failed to match the root of the current note commitment
/// tree.
node: sapling::Node,
},
/// The root of an output's witness tree in a previously stored transaction does not correspond
/// to root of the current commitment tree.
///
/// This error is currently only produced when performing the slow checks that are enabled by
/// compiling with `-C debug-assertions`.
InvalidWitnessAnchor(NoteRef),
}
/// Errors that may occur in chain scanning or validation.
#[derive(Copy, Clone, Debug)]
pub struct ChainError<NoteRef> {
at_height: BlockHeight,
cause: Cause<NoteRef>,
}
impl<N: Display> fmt::Display for ChainError<N> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match &self.cause {
Cause::PrevHashMismatch => write!(
f,
"The parent hash of proposed block does not correspond to the block hash at height {}.",
self.at_height
),
Cause::BlockHeightDiscontinuity(h) => {
write!(f, "Block height discontinuity at height {}; next height is : {}", self.at_height, h)
}
Cause::InvalidNewWitnessAnchor { txid, index, node } => write!(
f,
"New witness for output {} in tx {} at height {} has incorrect anchor: {:?}",
index, txid, self.at_height, node,
),
Cause::InvalidWitnessAnchor(id_note) => {
write!(f, "Witness for note {} has incorrect anchor for height {}", id_note, self.at_height)
}
}
}
}
impl<NoteRef> ChainError<NoteRef> {
/// Constructs an error that indicates block hashes failed to chain.
///
/// * `at_height` the height of the block whose parent hash does not match the hash of the
/// previous block
pub fn prev_hash_mismatch(at_height: BlockHeight) -> Self {
ChainError {
at_height,
cause: Cause::PrevHashMismatch,
}
}
/// Constructs an error that indicates a gap in block heights.
///
/// * `at_height` the height of the block being added to the chain.
/// * `prev_chain_tip` the height of the previous chain tip.
pub fn block_height_discontinuity(at_height: BlockHeight, prev_chain_tip: BlockHeight) -> Self {
ChainError {
at_height,
cause: Cause::BlockHeightDiscontinuity(prev_chain_tip),
}
}
/// Constructs an error that indicates a mismatch between an updated note's witness and the
/// root of the current note commitment tree.
pub fn invalid_witness_anchor(at_height: BlockHeight, note_ref: NoteRef) -> Self {
ChainError {
at_height,
cause: Cause::InvalidWitnessAnchor(note_ref),
}
}
/// Constructs an error that indicates a mismatch between a new note's witness and the root of
/// the current note commitment tree.
pub fn invalid_new_witness_anchor(
at_height: BlockHeight,
txid: TxId,
index: usize,
node: sapling::Node,
) -> Self {
ChainError {
at_height,
cause: Cause::InvalidNewWitnessAnchor { txid, index, node },
}
}
/// Returns the block height at which this error was discovered.
pub fn at_height(&self) -> BlockHeight {
self.at_height
}
/// Returns the cause of this error.
pub fn cause(&self) -> &Cause<NoteRef> {
&self.cause
}
}
use crate::scanning::ScanError;
/// Errors related to chain validation and scanning.
#[derive(Debug)]
pub enum Error<WalletError, BlockSourceError, NoteRef> {
pub enum Error<WalletError, BlockSourceError> {
/// An error that was produced by wallet operations in the course of scanning the chain.
Wallet(WalletError),
@ -141,10 +18,10 @@ pub enum Error<WalletError, BlockSourceError, NoteRef> {
/// A block that was received violated rules related to chain continuity or contained note
/// commitments that could not be reconciled with the note commitment tree(s) maintained by the
/// wallet.
Chain(ChainError<NoteRef>),
Scan(ScanError),
}
impl<WE: fmt::Display, BE: fmt::Display, N: Display> fmt::Display for Error<WE, BE, N> {
impl<WE: fmt::Display, BE: fmt::Display> fmt::Display for Error<WE, BE> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match &self {
Error::Wallet(e) => {
@ -161,18 +38,17 @@ impl<WE: fmt::Display, BE: fmt::Display, N: Display> fmt::Display for Error<WE,
e
)
}
Error::Chain(err) => {
write!(f, "{}", err)
Error::Scan(e) => {
write!(f, "Scanning produced the following error: {}", e)
}
}
}
}
impl<WE, BE, N> error::Error for Error<WE, BE, N>
impl<WE, BE> error::Error for Error<WE, BE>
where
WE: Debug + Display + error::Error + 'static,
BE: Debug + Display + error::Error + 'static,
N: Debug + Display,
{
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match &self {
@ -183,8 +59,8 @@ where
}
}
impl<WE, BSE, N> From<ChainError<N>> for Error<WE, BSE, N> {
fn from(e: ChainError<N>) -> Self {
Error::Chain(e)
impl<WE, BSE> From<ScanError> for Error<WE, BSE> {
fn from(e: ScanError) -> Self {
Error::Scan(e)
}
}

View File

@ -1,5 +1,6 @@
//! Types for wallet error handling.
use shardtree::ShardTreeError;
use std::error;
use std::fmt::{self, Debug, Display};
use zcash_primitives::{
@ -20,10 +21,13 @@ use zcash_primitives::{legacy::TransparentAddress, zip32::DiversifierIndex};
/// Errors that can occur as a consequence of wallet operations.
#[derive(Debug)]
pub enum Error<DataSourceError, SelectionError, FeeError, NoteRef> {
pub enum Error<DataSourceError, CommitmentTreeError, SelectionError, FeeError, NoteRef> {
/// An error occurred retrieving data from the underlying data source
DataSource(DataSourceError),
/// An error in computations involving the note commitment trees.
CommitmentTree(ShardTreeError<CommitmentTreeError>),
/// An error in note selection
NoteSelection(SelectionError),
@ -60,9 +64,10 @@ pub enum Error<DataSourceError, SelectionError, FeeError, NoteRef> {
ChildIndexOutOfRange(DiversifierIndex),
}
impl<DE, SE, FE, N> fmt::Display for Error<DE, SE, FE, N>
impl<DE, CE, SE, FE, N> fmt::Display for Error<DE, CE, SE, FE, N>
where
DE: fmt::Display,
CE: fmt::Display,
SE: fmt::Display,
FE: fmt::Display,
N: fmt::Display,
@ -76,6 +81,9 @@ where
e
)
}
Error::CommitmentTree(e) => {
write!(f, "An error occurred in querying or updating a note commitment tree: {}", e)
}
Error::NoteSelection(e) => {
write!(f, "Note selection encountered the following error: {}", e)
}
@ -120,9 +128,10 @@ where
}
}
impl<DE, SE, FE, N> error::Error for Error<DE, SE, FE, N>
impl<DE, CE, SE, FE, N> error::Error for Error<DE, CE, SE, FE, N>
where
DE: Debug + Display + error::Error + 'static,
CE: Debug + Display + error::Error + 'static,
SE: Debug + Display + error::Error + 'static,
FE: Debug + Display + 'static,
N: Debug + Display,
@ -130,6 +139,7 @@ where
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match &self {
Error::DataSource(e) => Some(e),
Error::CommitmentTree(e) => Some(e),
Error::NoteSelection(e) => Some(e),
Error::Builder(e) => Some(e),
_ => None,
@ -137,19 +147,19 @@ where
}
}
impl<DE, SE, FE, N> From<builder::Error<FE>> for Error<DE, SE, FE, N> {
impl<DE, CE, SE, FE, N> From<builder::Error<FE>> for Error<DE, CE, SE, FE, N> {
fn from(e: builder::Error<FE>) -> Self {
Error::Builder(e)
}
}
impl<DE, SE, FE, N> From<BalanceError> for Error<DE, SE, FE, N> {
impl<DE, CE, SE, FE, N> From<BalanceError> for Error<DE, CE, SE, FE, N> {
fn from(e: BalanceError) -> Self {
Error::BalanceError(e)
}
}
impl<DE, SE, FE, N> From<InputSelectorError<DE, SE>> for Error<DE, SE, FE, N> {
impl<DE, CE, SE, FE, N> From<InputSelectorError<DE, SE>> for Error<DE, CE, SE, FE, N> {
fn from(e: InputSelectorError<DE, SE>) -> Self {
match e {
InputSelectorError::DataSource(e) => Error::DataSource(e),
@ -161,18 +171,25 @@ impl<DE, SE, FE, N> From<InputSelectorError<DE, SE>> for Error<DE, SE, FE, N> {
available,
required,
},
InputSelectorError::SyncRequired => Error::ScanRequired,
}
}
}
impl<DE, SE, FE, N> From<sapling::builder::Error> for Error<DE, SE, FE, N> {
impl<DE, CE, SE, FE, N> From<sapling::builder::Error> for Error<DE, CE, SE, FE, N> {
fn from(e: sapling::builder::Error) -> Self {
Error::Builder(builder::Error::SaplingBuild(e))
}
}
impl<DE, SE, FE, N> From<transparent::builder::Error> for Error<DE, SE, FE, N> {
impl<DE, CE, SE, FE, N> From<transparent::builder::Error> for Error<DE, CE, SE, FE, N> {
fn from(e: transparent::builder::Error) -> Self {
Error::Builder(builder::Error::TransparentBuild(e))
}
}
impl<DE, CE, SE, FE, N> From<ShardTreeError<CE>> for Error<DE, CE, SE, FE, N> {
fn from(e: ShardTreeError<CE>) -> Self {
Error::CommitmentTree(e)
}
}

View File

@ -1,8 +1,9 @@
use std::convert::Infallible;
use std::fmt::Debug;
use std::{convert::Infallible, num::NonZeroU32};
use shardtree::{ShardStore, ShardTree, ShardTreeError};
use zcash_primitives::{
consensus::{self, NetworkUpgrade},
consensus::{self, BlockHeight, NetworkUpgrade},
memo::MemoBytes,
sapling::{
self,
@ -23,7 +24,8 @@ use crate::{
address::RecipientAddress,
data_api::{
error::Error, wallet::input_selection::Proposal, DecryptedTransaction, PoolType, Recipient,
SentTransaction, SentTransactionOutput, WalletWrite,
SentTransaction, SentTransactionOutput, WalletCommitmentTrees, WalletRead, WalletWrite,
SAPLING_SHARD_HEIGHT,
},
decrypt_transaction,
fees::{self, ChangeValue, DustOutputPolicy},
@ -117,12 +119,13 @@ where
/// can allow the sender to view the resulting notes on the blockchain.
/// * `min_confirmations`: The minimum number of confirmations that a previously
/// received note must have in the blockchain in order to be considered for being
/// spent. A value of 10 confirmations is recommended.
/// spent. A value of 10 confirmations is recommended and 0-conf transactions are
/// not supported.
///
/// # Examples
///
/// ```
/// # #[cfg(feature = "test-dependencies")]
/// # #[cfg(all(feature = "test-dependencies", feature = "local-prover"))]
/// # {
/// use tempfile::NamedTempFile;
/// use zcash_primitives::{
@ -196,11 +199,12 @@ pub fn create_spend_to_address<DbT, ParamsT>(
amount: Amount,
memo: Option<MemoBytes>,
ovk_policy: OvkPolicy,
min_confirmations: u32,
min_confirmations: NonZeroU32,
) -> Result<
DbT::TxRef,
Error<
DbT::Error,
<DbT as WalletRead>::Error,
<DbT as WalletCommitmentTrees>::Error,
GreedyInputSelectorError<BalanceError, DbT::NoteRef>,
Infallible,
DbT::NoteRef,
@ -208,7 +212,7 @@ pub fn create_spend_to_address<DbT, ParamsT>(
>
where
ParamsT: consensus::Parameters + Clone,
DbT: WalletWrite,
DbT: WalletWrite + WalletCommitmentTrees,
DbT::NoteRef: Copy + Eq + Ord,
{
let req = zip321::TransactionRequest::new(vec![Payment {
@ -284,7 +288,8 @@ where
/// can allow the sender to view the resulting notes on the blockchain.
/// * `min_confirmations`: The minimum number of confirmations that a previously
/// received note must have in the blockchain in order to be considered for being
/// spent. A value of 10 confirmations is recommended.
/// spent. A value of 10 confirmations is recommended and 0-conf transactions are
/// not supported.
///
/// [`sapling::TxProver`]: zcash_primitives::sapling::prover::TxProver
#[allow(clippy::too_many_arguments)]
@ -297,13 +302,19 @@ pub fn spend<DbT, ParamsT, InputsT>(
usk: &UnifiedSpendingKey,
request: zip321::TransactionRequest,
ovk_policy: OvkPolicy,
min_confirmations: u32,
min_confirmations: NonZeroU32,
) -> Result<
DbT::TxRef,
Error<DbT::Error, InputsT::Error, <InputsT::FeeRule as FeeRule>::Error, DbT::NoteRef>,
Error<
<DbT as WalletRead>::Error,
<DbT as WalletCommitmentTrees>::Error,
InputsT::Error,
<InputsT::FeeRule as FeeRule>::Error,
DbT::NoteRef,
>,
>
where
DbT: WalletWrite,
DbT: WalletWrite + WalletCommitmentTrees,
DbT::TxRef: Copy + Debug,
DbT::NoteRef: Copy + Eq + Ord,
ParamsT: consensus::Parameters + Clone,
@ -323,7 +334,16 @@ where
min_confirmations,
)?;
create_proposed_transaction(wallet_db, params, prover, usk, ovk_policy, proposal, None)
create_proposed_transaction(
wallet_db,
params,
prover,
usk,
ovk_policy,
proposal,
min_confirmations,
None,
)
}
/// Select transaction inputs, compute fees, and construct a proposal for a transaction
@ -331,16 +351,22 @@ where
/// [`create_proposed_transaction`].
#[allow(clippy::too_many_arguments)]
#[allow(clippy::type_complexity)]
pub fn propose_transfer<DbT, ParamsT, InputsT>(
pub fn propose_transfer<DbT, ParamsT, InputsT, CommitmentTreeErrT>(
wallet_db: &mut DbT,
params: &ParamsT,
spend_from_account: AccountId,
input_selector: &InputsT,
request: zip321::TransactionRequest,
min_confirmations: u32,
min_confirmations: NonZeroU32,
) -> Result<
Proposal<InputsT::FeeRule, DbT::NoteRef>,
Error<DbT::Error, InputsT::Error, <InputsT::FeeRule as FeeRule>::Error, DbT::NoteRef>,
Error<
DbT::Error,
CommitmentTreeErrT,
InputsT::Error,
<InputsT::FeeRule as FeeRule>::Error,
DbT::NoteRef,
>,
>
where
DbT: WalletWrite,
@ -348,20 +374,13 @@ where
ParamsT: consensus::Parameters + Clone,
InputsT: InputSelector<DataSource = DbT>,
{
// Target the next block, assuming we are up-to-date.
let (target_height, anchor_height) = wallet_db
.get_target_and_anchor_heights(min_confirmations)
.map_err(Error::DataSource)
.and_then(|x| x.ok_or(Error::ScanRequired))?;
input_selector
.propose_transaction(
params,
wallet_db,
spend_from_account,
anchor_height,
target_height,
request,
min_confirmations,
)
.map_err(Error::from)
}
@ -369,16 +388,22 @@ where
#[cfg(feature = "transparent-inputs")]
#[allow(clippy::too_many_arguments)]
#[allow(clippy::type_complexity)]
pub fn propose_shielding<DbT, ParamsT, InputsT>(
pub fn propose_shielding<DbT, ParamsT, InputsT, CommitmentTreeErrT>(
wallet_db: &mut DbT,
params: &ParamsT,
input_selector: &InputsT,
shielding_threshold: NonNegativeAmount,
from_addrs: &[TransparentAddress],
min_confirmations: u32,
min_confirmations: NonZeroU32,
) -> Result<
Proposal<InputsT::FeeRule, DbT::NoteRef>,
Error<DbT::Error, InputsT::Error, <InputsT::FeeRule as FeeRule>::Error, DbT::NoteRef>,
Error<
DbT::Error,
CommitmentTreeErrT,
InputsT::Error,
<InputsT::FeeRule as FeeRule>::Error,
DbT::NoteRef,
>,
>
where
ParamsT: consensus::Parameters,
@ -386,19 +411,13 @@ where
DbT::NoteRef: Copy + Eq + Ord,
InputsT: InputSelector<DataSource = DbT>,
{
let (target_height, latest_anchor) = wallet_db
.get_target_and_anchor_heights(min_confirmations)
.map_err(Error::DataSource)
.and_then(|x| x.ok_or(Error::ScanRequired))?;
input_selector
.propose_shielding(
params,
wallet_db,
shielding_threshold,
from_addrs,
latest_anchor,
target_height,
min_confirmations,
)
.map_err(Error::from)
}
@ -417,10 +436,20 @@ pub fn create_proposed_transaction<DbT, ParamsT, InputsErrT, FeeRuleT>(
usk: &UnifiedSpendingKey,
ovk_policy: OvkPolicy,
proposal: Proposal<FeeRuleT, DbT::NoteRef>,
min_confirmations: NonZeroU32,
change_memo: Option<MemoBytes>,
) -> Result<DbT::TxRef, Error<DbT::Error, InputsErrT, FeeRuleT::Error, DbT::NoteRef>>
) -> Result<
DbT::TxRef,
Error<
<DbT as WalletRead>::Error,
<DbT as WalletCommitmentTrees>::Error,
InputsErrT,
FeeRuleT::Error,
DbT::NoteRef,
>,
>
where
DbT: WalletWrite,
DbT: WalletWrite + WalletCommitmentTrees,
DbT::TxRef: Copy + Debug,
DbT::NoteRef: Copy + Eq + Ord,
ParamsT: consensus::Parameters + Clone,
@ -459,14 +488,23 @@ where
// Create the transaction. The type of the proposal ensures that there
// are no possible transparent inputs, so we ignore those
let mut builder = Builder::new(params.clone(), proposal.target_height(), None);
let mut builder = Builder::new(params.clone(), proposal.min_target_height(), None);
for selected in proposal.sapling_inputs() {
let (note, key, merkle_path) = select_key_for_note(selected, usk.sapling(), &dfvk)
wallet_db.with_sapling_tree_mut::<_, _, Error<_, _, _, _, _>>(|sapling_tree| {
for selected in proposal.sapling_inputs() {
let (note, key, merkle_path) = select_key_for_note(
sapling_tree,
selected,
usk.sapling(),
&dfvk,
usize::try_from(u32::from(min_confirmations) - 1).unwrap(),
)?
.ok_or(Error::NoteMismatch(selected.note_id))?;
builder.add_sapling_spend(key, selected.diversifier, note, merkle_path)?;
}
builder.add_sapling_spend(key, selected.diversifier, note, merkle_path)?;
}
Ok(())
})?;
#[cfg(feature = "transparent-inputs")]
let utxos = {
@ -577,7 +615,7 @@ where
tx.sapling_bundle().and_then(|bundle| {
try_sapling_note_decryption(
params,
proposal.target_height(),
proposal.min_target_height(),
&internal_ivk,
&bundle.shielded_outputs()[output_index],
)
@ -653,8 +691,9 @@ where
/// to the wallet that the wallet can use to improve how it represents those
/// shielding transactions to the user.
/// * `min_confirmations`: The minimum number of confirmations that a previously
/// received UTXO must have in the blockchain in order to be considered for being
/// spent.
/// received note must have in the blockchain in order to be considered for being
/// spent. A value of 10 confirmations is recommended and 0-conf transactions are
/// not supported.
///
/// [`sapling::TxProver`]: zcash_primitives::sapling::prover::TxProver
#[cfg(feature = "transparent-inputs")]
@ -669,14 +708,20 @@ pub fn shield_transparent_funds<DbT, ParamsT, InputsT>(
usk: &UnifiedSpendingKey,
from_addrs: &[TransparentAddress],
memo: &MemoBytes,
min_confirmations: u32,
min_confirmations: NonZeroU32,
) -> Result<
DbT::TxRef,
Error<DbT::Error, InputsT::Error, <InputsT::FeeRule as FeeRule>::Error, DbT::NoteRef>,
Error<
<DbT as WalletRead>::Error,
<DbT as WalletCommitmentTrees>::Error,
InputsT::Error,
<InputsT::FeeRule as FeeRule>::Error,
DbT::NoteRef,
>,
>
where
ParamsT: consensus::Parameters,
DbT: WalletWrite,
DbT: WalletWrite + WalletCommitmentTrees,
DbT::NoteRef: Copy + Eq + Ord,
InputsT: InputSelector<DataSource = DbT>,
{
@ -696,17 +741,26 @@ where
usk,
OvkPolicy::Sender,
proposal,
min_confirmations,
Some(memo.clone()),
)
}
fn select_key_for_note<N>(
#[allow(clippy::type_complexity)]
fn select_key_for_note<N, S: ShardStore<H = Node, CheckpointId = BlockHeight>>(
commitment_tree: &mut ShardTree<
S,
{ sapling::NOTE_COMMITMENT_TREE_DEPTH },
SAPLING_SHARD_HEIGHT,
>,
selected: &ReceivedSaplingNote<N>,
extsk: &ExtendedSpendingKey,
dfvk: &DiversifiableFullViewingKey,
) -> Option<(sapling::Note, ExtendedSpendingKey, sapling::MerklePath)> {
let merkle_path = selected.witness.path().expect("the tree is not empty");
checkpoint_depth: usize,
) -> Result<
Option<(sapling::Note, ExtendedSpendingKey, sapling::MerklePath)>,
ShardTreeError<S::Error>,
> {
// Attempt to reconstruct the note being spent using both the internal and external dfvks
// corresponding to the unified spending key, checking against the witness we are using
// to spend the note that we've used the correct key.
@ -717,13 +771,16 @@ fn select_key_for_note<N>(
.diversified_change_address(selected.diversifier)
.map(|addr| addr.create_note(selected.note_value.into(), selected.rseed));
let expected_root = selected.witness.root();
external_note
let expected_root = commitment_tree.root_at_checkpoint(checkpoint_depth)?;
let merkle_path = commitment_tree
.witness_caching(selected.note_commitment_tree_position, checkpoint_depth)?;
Ok(external_note
.filter(|n| expected_root == merkle_path.root(Node::from_cmu(&n.cmu())))
.map(|n| (n, extsk.clone(), merkle_path.clone()))
.or_else(|| {
internal_note
.filter(|n| expected_root == merkle_path.root(Node::from_cmu(&n.cmu())))
.map(|n| (n, extsk.derive_internal(), merkle_path))
})
}))
}

View File

@ -1,8 +1,9 @@
//! Types related to the process of selecting inputs to be spent given a transaction request.
use core::marker::PhantomData;
use std::collections::BTreeSet;
use std::fmt;
use std::num::NonZeroU32;
use std::{collections::BTreeSet, fmt::Debug};
use zcash_primitives::{
consensus::{self, BlockHeight},
@ -35,6 +36,9 @@ pub enum InputSelectorError<DbErrT, SelectorErrT> {
/// Insufficient funds were available to satisfy the payment request that inputs were being
/// selected to attempt to satisfy.
InsufficientFunds { available: Amount, required: Amount },
/// The data source does not have enough information to choose an expiry height
/// for the transaction.
SyncRequired,
}
impl<DE: fmt::Display, SE: fmt::Display> fmt::Display for InputSelectorError<DE, SE> {
@ -59,6 +63,9 @@ impl<DE: fmt::Display, SE: fmt::Display> fmt::Display for InputSelectorError<DE,
i64::from(*available),
i64::from(*required)
),
InputSelectorError::SyncRequired => {
write!(f, "Insufficient chain data is available, sync required.")
}
}
}
}
@ -71,7 +78,8 @@ pub struct Proposal<FeeRuleT, NoteRef> {
sapling_inputs: Vec<ReceivedSaplingNote<NoteRef>>,
balance: TransactionBalance,
fee_rule: FeeRuleT,
target_height: BlockHeight,
min_target_height: BlockHeight,
min_anchor_height: BlockHeight,
is_shielding: bool,
}
@ -97,8 +105,19 @@ impl<FeeRuleT, NoteRef> Proposal<FeeRuleT, NoteRef> {
&self.fee_rule
}
/// Returns the target height for which the proposal was prepared.
pub fn target_height(&self) -> BlockHeight {
self.target_height
///
/// The chain must contain at least this many blocks in order for the proposal to
/// be executed.
pub fn min_target_height(&self) -> BlockHeight {
self.min_target_height
}
/// Returns the anchor height used in preparing the proposal.
///
/// If, at the time that the proposal is executed, the anchor height required to satisfy
/// the minimum confirmation depth is less than this height, the proposal execution
/// API should return an error.
pub fn min_anchor_height(&self) -> BlockHeight {
self.min_anchor_height
}
/// Returns a flag indicating whether or not the proposed transaction
/// is exclusively wallet-internal (if it does not involve any external
@ -108,6 +127,21 @@ impl<FeeRuleT, NoteRef> Proposal<FeeRuleT, NoteRef> {
}
}
impl<FeeRuleT, NoteRef> Debug for Proposal<FeeRuleT, NoteRef> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Proposal")
.field("transaction_request", &self.transaction_request)
.field("transparent_inputs", &self.transparent_inputs)
.field("sapling_inputs", &self.sapling_inputs.len())
.field("balance", &self.balance)
//.field("fee_rule", &self.fee_rule)
.field("min_target_height", &self.min_target_height)
.field("min_anchor_height", &self.min_anchor_height)
.field("is_shielding", &self.is_shielding)
.finish_non_exhaustive()
}
}
/// A strategy for selecting transaction inputs and proposing transaction outputs.
///
/// Proposals should include only economically useful inputs, as determined by `Self::FeeRule`;
@ -146,9 +180,8 @@ pub trait InputSelector {
params: &ParamsT,
wallet_db: &Self::DataSource,
account: AccountId,
anchor_height: BlockHeight,
target_height: BlockHeight,
transaction_request: TransactionRequest,
min_confirmations: NonZeroU32,
) -> Result<
Proposal<Self::FeeRule, <<Self as InputSelector>::DataSource as WalletRead>::NoteRef>,
InputSelectorError<<<Self as InputSelector>::DataSource as WalletRead>::Error, Self::Error>,
@ -172,8 +205,7 @@ pub trait InputSelector {
wallet_db: &Self::DataSource,
shielding_threshold: NonNegativeAmount,
source_addrs: &[TransparentAddress],
confirmed_height: BlockHeight,
target_height: BlockHeight,
min_confirmations: NonZeroU32,
) -> Result<
Proposal<Self::FeeRule, <<Self as InputSelector>::DataSource as WalletRead>::NoteRef>,
InputSelectorError<<<Self as InputSelector>::DataSource as WalletRead>::Error, Self::Error>,
@ -292,13 +324,18 @@ where
params: &ParamsT,
wallet_db: &Self::DataSource,
account: AccountId,
anchor_height: BlockHeight,
target_height: BlockHeight,
transaction_request: TransactionRequest,
min_confirmations: NonZeroU32,
) -> Result<Proposal<Self::FeeRule, DbT::NoteRef>, InputSelectorError<DbT::Error, Self::Error>>
where
ParamsT: consensus::Parameters,
{
// Target the next block, assuming we are up-to-date.
let (target_height, anchor_height) = wallet_db
.get_target_and_anchor_heights(min_confirmations)
.map_err(InputSelectorError::DataSource)
.and_then(|x| x.ok_or(InputSelectorError::SyncRequired))?;
let mut transparent_outputs = vec![];
let mut sapling_outputs = vec![];
let mut output_total = Amount::zero();
@ -362,7 +399,8 @@ where
sapling_inputs,
balance,
fee_rule: (*self.change_strategy.fee_rule()).clone(),
target_height,
min_target_height: target_height,
min_anchor_height: anchor_height,
is_shielding: false,
});
}
@ -405,15 +443,19 @@ where
wallet_db: &Self::DataSource,
shielding_threshold: NonNegativeAmount,
source_addrs: &[TransparentAddress],
confirmed_height: BlockHeight,
target_height: BlockHeight,
min_confirmations: NonZeroU32,
) -> Result<Proposal<Self::FeeRule, DbT::NoteRef>, InputSelectorError<DbT::Error, Self::Error>>
where
ParamsT: consensus::Parameters,
{
let (target_height, latest_anchor) = wallet_db
.get_target_and_anchor_heights(min_confirmations)
.map_err(InputSelectorError::DataSource)
.and_then(|x| x.ok_or(InputSelectorError::SyncRequired))?;
let mut transparent_inputs: Vec<WalletTransparentOutput> = source_addrs
.iter()
.map(|taddr| wallet_db.get_unspent_transparent_outputs(taddr, confirmed_height, &[]))
.map(|taddr| wallet_db.get_unspent_transparent_outputs(taddr, latest_anchor, &[]))
.collect::<Result<Vec<Vec<_>>, _>>()
.map_err(InputSelectorError::DataSource)?
.into_iter()
@ -458,7 +500,8 @@ where
sapling_inputs: vec![],
balance,
fee_rule: (*self.change_strategy.fee_rule()).clone(),
target_height,
min_target_height: target_height,
min_anchor_height: latest_anchor,
is_shielding: true,
})
} else {

View File

@ -16,8 +16,8 @@ pub mod fees;
pub mod keys;
pub mod proto;
pub mod scan;
pub mod scanning;
pub mod wallet;
pub mod welding_rig;
pub mod zip321;
pub use decrypt::{decrypt_transaction, DecryptedOutput, TransferType};

View File

@ -1,3 +1,14 @@
/// ChainMetadata represents information about the state of the chain as of a given block.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ChainMetadata {
/// the size of the Sapling note commitment tree as of the end of this block
#[prost(uint32, tag = "1")]
pub sapling_commitment_tree_size: u32,
/// the size of the Orchard note commitment tree as of the end of this block
#[prost(uint32, tag = "2")]
pub orchard_commitment_tree_size: u32,
}
/// CompactBlock is a packaging of ONLY the data from a block that's needed to:
/// 1. Detect a payment to your shielded Sapling address
/// 2. Detect a spend of your shielded Sapling notes
@ -26,6 +37,9 @@ pub struct CompactBlock {
/// zero or more compact transactions from this block
#[prost(message, repeated, tag = "7")]
pub vtx: ::prost::alloc::vec::Vec<CompactTx>,
/// information about the state of the chain as of this block
#[prost(message, optional, tag = "8")]
pub chain_metadata: ::core::option::Option<ChainMetadata>,
}
/// CompactTx contains the minimum information for a wallet to know if this transaction
/// is relevant to it (either pays to it or spends from it) via shielded elements

View File

@ -1,21 +1,27 @@
//! Tools for scanning a compact representation of the Zcash block chain.
//!
//! TODO: rename this module to `block_scanner`
use std::collections::{HashMap, HashSet};
use std::convert::TryFrom;
use std::fmt::{self, Debug};
use incrementalmerkletree::{Position, Retention};
use subtle::{ConditionallySelectable, ConstantTimeEq, CtOption};
use zcash_note_encryption::batch;
use zcash_primitives::consensus::BlockHeight;
use zcash_primitives::{
consensus,
sapling::{
self,
note_encryption::{PreparedIncomingViewingKey, SaplingDomain},
Node, Note, Nullifier, NullifierDerivingKey, SaplingIvk,
SaplingIvk,
},
transaction::components::sapling::CompactOutputDescription,
zip32::{sapling::DiversifiableFullViewingKey, AccountId, Scope},
};
use crate::data_api::{BlockMetadata, ScannedBlock};
use crate::{
proto::compact_formats::CompactBlock,
scan::{Batch, BatchRunner, Tasks},
@ -34,7 +40,7 @@ use crate::{
/// nullifier for the note can also be obtained.
///
/// [`CompactSaplingOutput`]: crate::proto::compact_formats::CompactSaplingOutput
/// [`scan_block`]: crate::welding_rig::scan_block
/// [`scan_block`]: crate::scanning::scan_block
pub trait ScanningKey {
/// The type representing the scope of the scanning key.
type Scope: Clone + Eq + std::hash::Hash + Send + 'static;
@ -56,16 +62,13 @@ pub trait ScanningKey {
/// IVK-based implementations of this trait cannot successfully derive
/// nullifiers, in which case `Self::Nf` should be set to the unit type
/// and this function is a no-op.
fn sapling_nf(
key: &Self::SaplingNk,
note: &Note,
witness: &sapling::IncrementalWitness,
) -> Self::Nf;
fn sapling_nf(key: &Self::SaplingNk, note: &sapling::Note, note_position: Position)
-> Self::Nf;
}
impl ScanningKey for DiversifiableFullViewingKey {
type Scope = Scope;
type SaplingNk = NullifierDerivingKey;
type SaplingNk = sapling::NullifierDerivingKey;
type SaplingKeys = [(Self::Scope, SaplingIvk, Self::SaplingNk); 2];
type Nf = sapling::Nullifier;
@ -84,16 +87,8 @@ impl ScanningKey for DiversifiableFullViewingKey {
]
}
fn sapling_nf(
key: &Self::SaplingNk,
note: &Note,
witness: &sapling::IncrementalWitness,
) -> Self::Nf {
note.nf(
key,
u64::try_from(witness.position())
.expect("Sapling note commitment tree position must fit into a u64"),
)
fn sapling_nf(key: &Self::SaplingNk, note: &sapling::Note, position: Position) -> Self::Nf {
note.nf(key, position.into())
}
}
@ -111,7 +106,45 @@ impl ScanningKey for SaplingIvk {
[((), self.clone(), ())]
}
fn sapling_nf(_key: &Self::SaplingNk, _note: &Note, _witness: &sapling::IncrementalWitness) {}
fn sapling_nf(_key: &Self::SaplingNk, _note: &sapling::Note, _position: Position) {}
}
/// Errors that may occur in chain scanning
#[derive(Copy, Clone, Debug)]
pub enum ScanError {
/// The hash of the parent block given by a proposed new chain tip does not match the hash of
/// the current chain tip.
PrevHashMismatch { at_height: BlockHeight },
/// The block height field of the proposed new chain tip is not equal to the height of the
/// previous chain tip + 1. This variant stores a copy of the incorrect height value for
/// reporting purposes.
BlockHeightDiscontinuity {
previous_tip: BlockHeight,
new_height: BlockHeight,
},
/// The size of the Sapling note commitment tree was not provided as part of a [`CompactBlock`]
/// being scanned, making it impossible to construct the nullifier for a detected note.
SaplingTreeSizeUnknown { at_height: BlockHeight },
}
impl fmt::Display for ScanError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match &self {
ScanError::PrevHashMismatch { at_height } => write!(
f,
"The parent hash of proposed block does not correspond to the block hash at height {}.",
at_height
),
ScanError::BlockHeightDiscontinuity { previous_tip, new_height } => {
write!(f, "Block height discontinuity at height {}; next height is : {}", previous_tip, new_height)
}
ScanError::SaplingTreeSizeUnknown { at_height } => {
write!(f, "Unable to determine Sapling note commitment tree size at height {}", at_height)
}
}
}
}
/// Scans a [`CompactBlock`] with a set of [`ScanningKey`]s.
@ -132,7 +165,7 @@ impl ScanningKey for SaplingIvk {
/// [`ExtendedFullViewingKey`]: zcash_primitives::zip32::ExtendedFullViewingKey
/// [`SaplingIvk`]: zcash_primitives::sapling::SaplingIvk
/// [`CompactBlock`]: crate::proto::compact_formats::CompactBlock
/// [`ScanningKey`]: crate::welding_rig::ScanningKey
/// [`ScanningKey`]: crate::scanning::ScanningKey
/// [`CommitmentTree`]: zcash_primitives::sapling::CommitmentTree
/// [`IncrementalWitness`]: zcash_primitives::sapling::IncrementalWitness
/// [`WalletSaplingOutput`]: crate::wallet::WalletSaplingOutput
@ -141,17 +174,15 @@ pub fn scan_block<P: consensus::Parameters + Send + 'static, K: ScanningKey>(
params: &P,
block: CompactBlock,
vks: &[(&AccountId, &K)],
nullifiers: &[(AccountId, Nullifier)],
tree: &mut sapling::CommitmentTree,
existing_witnesses: &mut [&mut sapling::IncrementalWitness],
) -> Vec<WalletTx<K::Nf>> {
sapling_nullifiers: &[(AccountId, sapling::Nullifier)],
prior_block_metadata: Option<&BlockMetadata>,
) -> Result<ScannedBlock<K::Nf>, ScanError> {
scan_block_with_runner::<_, _, ()>(
params,
block,
vks,
nullifiers,
tree,
existing_witnesses,
sapling_nullifiers,
prior_block_metadata,
None,
)
}
@ -202,21 +233,66 @@ pub(crate) fn scan_block_with_runner<
params: &P,
block: CompactBlock,
vks: &[(&AccountId, &K)],
nullifiers: &[(AccountId, Nullifier)],
tree: &mut sapling::CommitmentTree,
existing_witnesses: &mut [&mut sapling::IncrementalWitness],
nullifiers: &[(AccountId, sapling::Nullifier)],
prior_block_metadata: Option<&BlockMetadata>,
mut batch_runner: Option<&mut TaggedBatchRunner<P, K::Scope, T>>,
) -> Vec<WalletTx<K::Nf>> {
) -> Result<ScannedBlock<K::Nf>, ScanError> {
let mut wtxs: Vec<WalletTx<K::Nf>> = vec![];
let block_height = block.height();
let block_hash = block.hash();
let mut sapling_note_commitments: Vec<(sapling::Node, Retention<BlockHeight>)> = vec![];
let cur_height = block.height();
let cur_hash = block.hash();
for tx in block.vtx.into_iter() {
if let Some(prev) = prior_block_metadata {
if cur_height != prev.block_height() + 1 {
return Err(ScanError::BlockHeightDiscontinuity {
previous_tip: prev.block_height(),
new_height: cur_height,
});
}
if block.prev_hash() != prev.block_hash() {
return Err(ScanError::PrevHashMismatch {
at_height: cur_height,
});
}
}
// It's possible to make progress without a Sapling tree position if we don't have any Sapling
// notes in the block, since we only use the position for constructing nullifiers for our own
// received notes. Thus, we allow it to be optional here, and only produce an error if we try
// to use it. `block.sapling_commitment_tree_size` is expected to be correct as of the end of
// the block, and we can't have a note of ours in a block with no outputs so treating the zero
// default value from the protobuf as `None` is always correct.
let mut sapling_commitment_tree_size = block
.chain_metadata
.as_ref()
.and_then(|m| {
if m.sapling_commitment_tree_size == 0 {
None
} else {
let block_note_count: u32 = block
.vtx
.iter()
.map(|tx| {
u32::try_from(tx.outputs.len()).expect("output count cannot exceed a u32")
})
.sum();
Some(m.sapling_commitment_tree_size - block_note_count)
}
})
.or_else(|| prior_block_metadata.map(|m| m.sapling_tree_size()))
.ok_or(ScanError::SaplingTreeSizeUnknown {
at_height: cur_height,
})?;
let compact_block_tx_count = block.vtx.len();
for (tx_idx, tx) in block.vtx.into_iter().enumerate() {
let txid = tx.txid();
let index = tx.index as usize;
// Check for spent notes
// The only step that is not constant-time is the filter() at the end.
// Check for spent notes. The only step that is not constant-time is
// the filter() at the end.
// TODO: However, this is O(|nullifiers| * |notes|); does using
// constant-time operations here really make sense?
let shielded_spends: Vec<_> = tx
.spends
.into_iter()
@ -248,25 +324,14 @@ pub(crate) fn scan_block_with_runner<
// Check for incoming notes while incrementing tree and witnesses
let mut shielded_outputs: Vec<WalletSaplingOutput<K::Nf>> = vec![];
let tx_outputs_len = u32::try_from(tx.outputs.len()).unwrap();
{
// Grab mutable references to new witnesses from previous transactions
// in this block so that we can update them. Scoped so we don't hold
// mutable references to wtxs for too long.
let mut block_witnesses: Vec<_> = wtxs
.iter_mut()
.flat_map(|tx| {
tx.sapling_outputs
.iter_mut()
.map(|output| output.witness_mut())
})
.collect();
let decoded = &tx
.outputs
.into_iter()
.map(|output| {
(
SaplingDomain::for_height(params.clone(), block_height),
SaplingDomain::for_height(params.clone(), cur_height),
CompactOutputDescription::try_from(output)
.expect("Invalid output found in compact block decoding."),
)
@ -283,7 +348,7 @@ pub(crate) fn scan_block_with_runner<
})
.collect::<HashMap<_, _>>();
let mut decrypted = runner.collect_results(block_hash, txid);
let mut decrypted = runner.collect_results(cur_hash, txid);
(0..decoded.len())
.map(|i| {
decrypted.remove(&(txid, i)).map(|d_note| {
@ -292,7 +357,7 @@ pub(crate) fn scan_block_with_runner<
"The batch runner and scan_block must use the same set of IVKs.",
);
((d_note.note, d_note.recipient), a, (*nk).clone())
(d_note.note, a, (*nk).clone())
})
})
.collect()
@ -312,40 +377,33 @@ pub(crate) fn scan_block_with_runner<
.map(PreparedIncomingViewingKey::new)
.collect::<Vec<_>>();
batch::try_compact_note_decryption(&ivks, decoded)
batch::try_compact_note_decryption(&ivks, &decoded[..])
.into_iter()
.map(|v| {
v.map(|(note_data, ivk_idx)| {
v.map(|((note, _), ivk_idx)| {
let (account, _, nk) = &vks[ivk_idx];
(note_data, *account, (*nk).clone())
(note, *account, (*nk).clone())
})
})
.collect()
};
for (index, ((_, output), dec_output)) in decoded.iter().zip(decrypted).enumerate() {
// Grab mutable references to new witnesses from previous outputs
// in this transaction so that we can update them. Scoped so we
// don't hold mutable references to shielded_outputs for too long.
let new_witnesses: Vec<_> = shielded_outputs
.iter_mut()
.map(|out| out.witness_mut())
.collect();
for (output_idx, ((_, output), dec_output)) in decoded.iter().zip(decrypted).enumerate()
{
// Collect block note commitments
let node = sapling::Node::from_cmu(&output.cmu);
let is_checkpoint =
output_idx + 1 == decoded.len() && tx_idx + 1 == compact_block_tx_count;
let retention = match (dec_output.is_some(), is_checkpoint) {
(is_marked, true) => Retention::Checkpoint {
id: cur_height,
is_marked,
},
(true, false) => Retention::Marked,
(false, false) => Retention::Ephemeral,
};
// Increment tree and witnesses
let node = Node::from_cmu(&output.cmu);
for witness in &mut *existing_witnesses {
witness.append(node).unwrap();
}
for witness in &mut block_witnesses {
witness.append(node).unwrap();
}
for witness in new_witnesses {
witness.append(node).unwrap();
}
tree.append(node).unwrap();
if let Some(((note, _), account, nk)) = dec_output {
if let Some((note, account, nk)) = dec_output {
// A note is marked as "change" if the account that received it
// also spent notes in the same transaction. This will catch,
// for instance:
@ -353,34 +411,45 @@ pub(crate) fn scan_block_with_runner<
// - Notes created by consolidation transactions.
// - Notes sent from one account to itself.
let is_change = spent_from_accounts.contains(&account);
let witness = sapling::IncrementalWitness::from_tree(tree.clone());
let nf = K::sapling_nf(&nk, &note, &witness);
let note_commitment_tree_position = Position::from(u64::from(
sapling_commitment_tree_size + u32::try_from(output_idx).unwrap(),
));
let nf = K::sapling_nf(&nk, &note, note_commitment_tree_position);
shielded_outputs.push(WalletSaplingOutput::from_parts(
index,
output_idx,
output.cmu,
output.ephemeral_key.clone(),
account,
note,
is_change,
witness,
note_commitment_tree_position,
nf,
))
));
}
sapling_note_commitments.push((node, retention));
}
}
if !(shielded_spends.is_empty() && shielded_outputs.is_empty()) {
wtxs.push(WalletTx {
txid,
index,
index: tx.index as usize,
sapling_spends: shielded_spends,
sapling_outputs: shielded_outputs,
});
}
sapling_commitment_tree_size += tx_outputs_len;
}
wtxs
Ok(ScannedBlock::from_parts(
BlockMetadata::from_parts(cur_height, cur_hash, sapling_commitment_tree_size),
block.time,
wtxs,
sapling_note_commitments,
))
}
#[cfg(test)]
@ -389,25 +458,29 @@ mod tests {
ff::{Field, PrimeField},
GroupEncoding,
};
use incrementalmerkletree::{Position, Retention};
use rand_core::{OsRng, RngCore};
use zcash_note_encryption::Domain;
use zcash_primitives::{
block::BlockHash,
consensus::{BlockHeight, Network},
constants::SPENDING_KEY_GENERATOR,
memo::MemoBytes,
sapling::{
self,
note_encryption::{sapling_note_encryption, PreparedIncomingViewingKey, SaplingDomain},
util::generate_random_rseed,
value::NoteValue,
CommitmentTree, Note, Nullifier, SaplingIvk,
Nullifier, SaplingIvk,
},
transaction::components::Amount,
zip32::{AccountId, DiversifiableFullViewingKey, ExtendedSpendingKey},
};
use crate::{
data_api::BlockMetadata,
proto::compact_formats::{
CompactBlock, CompactSaplingOutput, CompactSaplingSpend, CompactTx,
self as compact, CompactBlock, CompactSaplingOutput, CompactSaplingSpend, CompactTx,
},
scan::BatchRunner,
};
@ -449,19 +522,24 @@ mod tests {
/// Create a fake CompactBlock at the given height, with a transaction containing a
/// single spend of the given nullifier and a single output paying the given address.
/// Returns the CompactBlock.
///
/// Set `initial_sapling_tree_size` to `None` to simulate a `CompactBlock` retrieved
/// from a `lightwalletd` that is not currently tracking note commitment tree sizes.
fn fake_compact_block(
height: BlockHeight,
prev_hash: BlockHash,
nf: Nullifier,
dfvk: &DiversifiableFullViewingKey,
value: Amount,
tx_after: bool,
initial_sapling_tree_size: Option<u32>,
) -> CompactBlock {
let to = dfvk.default_address().1;
// Create a fake Note for the account
let mut rng = OsRng;
let rseed = generate_random_rseed(&Network::TestNetwork, height, &mut rng);
let note = Note::from_parts(to, NoteValue::from_raw(value.into()), rseed);
let note = sapling::Note::from_parts(to, NoteValue::from_raw(value.into()), rseed);
let encryptor = sapling_note_encryption::<_, Network>(
Some(dfvk.fvk().ovk),
note.clone(),
@ -481,6 +559,7 @@ mod tests {
rng.fill_bytes(&mut hash);
hash
},
prev_hash: prev_hash.0.to_vec(),
height: height.into(),
..Default::default()
};
@ -514,6 +593,15 @@ mod tests {
cb.vtx.push(tx);
}
cb.chain_metadata = initial_sapling_tree_size.map(|s| compact::ChainMetadata {
sapling_commitment_tree_size: s + cb
.vtx
.iter()
.map(|tx| tx.outputs.len() as u32)
.sum::<u32>(),
..Default::default()
});
cb
}
@ -526,14 +614,15 @@ mod tests {
let cb = fake_compact_block(
1u32.into(),
BlockHash([0; 32]),
Nullifier([0; 32]),
&dfvk,
Amount::from_u64(5).unwrap(),
false,
None,
);
assert_eq!(cb.vtx.len(), 2);
let mut tree = CommitmentTree::empty();
let mut batch_runner = if scan_multithreaded {
let mut runner = BatchRunner::<_, _, _, ()>::new(
10,
@ -551,15 +640,20 @@ mod tests {
None
};
let txs = scan_block_with_runner(
let scanned_block = scan_block_with_runner(
&Network::TestNetwork,
cb,
&[(&account, &dfvk)],
&[],
&mut tree,
&mut [],
Some(&BlockMetadata::from_parts(
BlockHeight::from(0),
BlockHash([0u8; 32]),
0,
)),
batch_runner.as_mut(),
);
)
.unwrap();
let txs = scanned_block.transactions();
assert_eq!(txs.len(), 1);
let tx = &txs[0];
@ -569,9 +663,26 @@ mod tests {
assert_eq!(tx.sapling_outputs[0].index(), 0);
assert_eq!(tx.sapling_outputs[0].account(), account);
assert_eq!(tx.sapling_outputs[0].note().value().inner(), 5);
assert_eq!(
tx.sapling_outputs[0].note_commitment_tree_position(),
Position::from(1)
);
// Check that the witness root matches
assert_eq!(tx.sapling_outputs[0].witness().root(), tree.root());
assert_eq!(scanned_block.metadata().sapling_tree_size(), 2);
assert_eq!(
scanned_block
.sapling_commitments()
.iter()
.map(|(_, retention)| *retention)
.collect::<Vec<_>>(),
vec![
Retention::Ephemeral,
Retention::Checkpoint {
id: scanned_block.height(),
is_marked: true
}
]
);
}
go(false);
@ -587,14 +698,15 @@ mod tests {
let cb = fake_compact_block(
1u32.into(),
BlockHash([0; 32]),
Nullifier([0; 32]),
&dfvk,
Amount::from_u64(5).unwrap(),
true,
Some(0),
);
assert_eq!(cb.vtx.len(), 3);
let mut tree = CommitmentTree::empty();
let mut batch_runner = if scan_multithreaded {
let mut runner = BatchRunner::<_, _, _, ()>::new(
10,
@ -612,15 +724,16 @@ mod tests {
None
};
let txs = scan_block_with_runner(
let scanned_block = scan_block_with_runner(
&Network::TestNetwork,
cb,
&[(&AccountId::from(0), &dfvk)],
&[],
&mut tree,
&mut [],
None,
batch_runner.as_mut(),
);
)
.unwrap();
let txs = scanned_block.transactions();
assert_eq!(txs.len(), 1);
let tx = &txs[0];
@ -631,8 +744,21 @@ mod tests {
assert_eq!(tx.sapling_outputs[0].account(), AccountId::from(0));
assert_eq!(tx.sapling_outputs[0].note().value().inner(), 5);
// Check that the witness root matches
assert_eq!(tx.sapling_outputs[0].witness().root(), tree.root());
assert_eq!(
scanned_block
.sapling_commitments()
.iter()
.map(|(_, retention)| *retention)
.collect::<Vec<_>>(),
vec![
Retention::Ephemeral,
Retention::Marked,
Retention::Checkpoint {
id: scanned_block.height(),
is_marked: false
}
]
);
}
go(false);
@ -646,19 +772,21 @@ mod tests {
let nf = Nullifier([7; 32]);
let account = AccountId::from(12);
let cb = fake_compact_block(1u32.into(), nf, &dfvk, Amount::from_u64(5).unwrap(), false);
let cb = fake_compact_block(
1u32.into(),
BlockHash([0; 32]),
nf,
&dfvk,
Amount::from_u64(5).unwrap(),
false,
Some(0),
);
assert_eq!(cb.vtx.len(), 2);
let vks: Vec<(&AccountId, &SaplingIvk)> = vec![];
let mut tree = CommitmentTree::empty();
let txs = scan_block(
&Network::TestNetwork,
cb,
&vks[..],
&[(account, nf)],
&mut tree,
&mut [],
);
let scanned_block =
scan_block(&Network::TestNetwork, cb, &vks[..], &[(account, nf)], None).unwrap();
let txs = scanned_block.transactions();
assert_eq!(txs.len(), 1);
let tx = &txs[0];
@ -668,5 +796,20 @@ mod tests {
assert_eq!(tx.sapling_spends[0].index(), 0);
assert_eq!(tx.sapling_spends[0].nf(), &nf);
assert_eq!(tx.sapling_spends[0].account(), account);
assert_eq!(
scanned_block
.sapling_commitments()
.iter()
.map(|(_, retention)| *retention)
.collect::<Vec<_>>(),
vec![
Retention::Ephemeral,
Retention::Checkpoint {
id: scanned_block.height(),
is_marked: false
}
]
);
}
}

View File

@ -1,6 +1,7 @@
//! Structs representing transaction data scanned from the block chain by a wallet or
//! light client.
use incrementalmerkletree::Position;
use zcash_note_encryption::EphemeralKeyBytes;
use zcash_primitives::{
consensus::BlockHeight,
@ -117,7 +118,7 @@ pub struct WalletSaplingOutput<N> {
account: AccountId,
note: sapling::Note,
is_change: bool,
witness: sapling::IncrementalWitness,
note_commitment_tree_position: Position,
nf: N,
}
@ -131,7 +132,7 @@ impl<N> WalletSaplingOutput<N> {
account: AccountId,
note: sapling::Note,
is_change: bool,
witness: sapling::IncrementalWitness,
note_commitment_tree_position: Position,
nf: N,
) -> Self {
Self {
@ -141,7 +142,7 @@ impl<N> WalletSaplingOutput<N> {
account,
note,
is_change,
witness,
note_commitment_tree_position,
nf,
}
}
@ -164,11 +165,8 @@ impl<N> WalletSaplingOutput<N> {
pub fn is_change(&self) -> bool {
self.is_change
}
pub fn witness(&self) -> &sapling::IncrementalWitness {
&self.witness
}
pub fn witness_mut(&mut self) -> &mut sapling::IncrementalWitness {
&mut self.witness
pub fn note_commitment_tree_position(&self) -> Position {
self.note_commitment_tree_position
}
pub fn nf(&self) -> &N {
&self.nf
@ -177,12 +175,13 @@ impl<N> WalletSaplingOutput<N> {
/// Information about a note that is tracked by the wallet that is available for spending,
/// with sufficient information for use in note selection.
#[derive(Debug)]
pub struct ReceivedSaplingNote<NoteRef> {
pub note_id: NoteRef,
pub diversifier: sapling::Diversifier,
pub note_value: Amount,
pub rseed: sapling::Rseed,
pub witness: sapling::IncrementalWitness,
pub note_commitment_tree_position: Position,
}
impl<NoteRef> sapling_fees::InputView<NoteRef> for ReceivedSaplingNote<NoteRef> {

View File

@ -6,14 +6,29 @@ and this library adheres to Rust's notion of
[Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
### Added
- `zcash_client_sqlite::serialization` Serialization formats for data stored
as SQLite BLOBs in the wallet database.
### Changed
- MSRV is now 1.65.0.
- Bumped dependencies to `hdwallet 0.4`, `incrementalmerkletree 0.4`, `bs58 0.5`,
`zcash_primitives 0.12`
- A `CommitmentTree` variant has been added to `zcash_client_sqlite::wallet::init::WalletMigrationError`
- `min_confirmations` parameter values are now more strongly enforced. Previously,
a note could be spent with fewer than `min_confirmations` confirmations if the
wallet did not contain enough observed blocks to satisfy the `min_confirmations`
value specified; this situation is now treated as an error.
- A `BlockConflict` variant has been added to `zcash_client_sqlite::error::SqliteClientError`
### Removed
- The empty `wallet::transact` module has been removed.
### Fixed
- Fixed an off-by-one error in the `BlockSource` implementation for the SQLite-backed
`BlockDb` block database which could result in blocks being skipped at the start of
scan ranges.
## [0.7.1] - 2023-05-17
### Fixed

View File

@ -15,7 +15,9 @@ rust-version = "1.65"
[dependencies]
incrementalmerkletree = { version = "0.4", features = ["legacy-api"] }
shardtree = { version = "0.0", features = ["legacy-api"] }
zcash_client_backend = { version = "0.9", path = "../zcash_client_backend" }
zcash_encoding = { version = "0.2", path = "../components/zcash_encoding" }
zcash_primitives = { version = "0.12", path = "../zcash_primitives", default-features = false }
# Dependencies exposed in a public API:
@ -27,15 +29,17 @@ hdwallet = { version = "0.4", optional = true }
# - Logging and metrics
tracing = "0.1"
# - Protobuf interfaces
# - Serialization
byteorder = "1"
prost = "0.11"
either = "1.8"
group = "0.13"
jubjub = "0.10"
# - Secret management
secrecy = "0.8"
# - SQLite databases
group = "0.13"
jubjub = "0.10"
rusqlite = { version = "0.29.0", features = ["bundled", "time", "array"] }
schemer = "0.2"
schemer-rusqlite = "0.2.2"
@ -48,6 +52,7 @@ uuid = "1.1"
[dev-dependencies]
assert_matches = "1.5"
incrementalmerkletree = { version = "0.4", features = ["legacy-api", "test-dependencies"] }
shardtree = { version = "0.0", features = ["legacy-api", "test-dependencies"] }
proptest = "1.0.0"
rand_core = "0.6"
regex = "1.4"
@ -63,6 +68,7 @@ test-dependencies = [
"incrementalmerkletree/test-dependencies",
"zcash_primitives/test-dependencies",
"zcash_client_backend/test-dependencies",
"incrementalmerkletree/test-dependencies",
]
transparent-inputs = ["hdwallet", "zcash_client_backend/transparent-inputs"]
unstable = ["zcash_client_backend/unstable"]

View File

@ -23,19 +23,19 @@ pub mod migrations;
/// Implements a traversal of `limit` blocks of the block cache database.
///
/// Starting at the next block above `last_scanned_height`, the `with_row` callback is invoked with
/// each block retrieved from the backing store. If the `limit` value provided is `None`, all
/// blocks are traversed up to the maximum height.
pub(crate) fn blockdb_with_blocks<F, DbErrT, NoteRef>(
/// Starting at `from_height`, the `with_row` callback is invoked with each block retrieved from
/// the backing store. If the `limit` value provided is `None`, all blocks are traversed up to the
/// maximum height.
pub(crate) fn blockdb_with_blocks<F, DbErrT>(
block_source: &BlockDb,
last_scanned_height: Option<BlockHeight>,
from_height: Option<BlockHeight>,
limit: Option<u32>,
mut with_row: F,
) -> Result<(), Error<DbErrT, SqliteClientError, NoteRef>>
) -> Result<(), Error<DbErrT, SqliteClientError>>
where
F: FnMut(CompactBlock) -> Result<(), Error<DbErrT, SqliteClientError, NoteRef>>,
F: FnMut(CompactBlock) -> Result<(), Error<DbErrT, SqliteClientError>>,
{
fn to_chain_error<D, E: Into<SqliteClientError>, N>(err: E) -> Error<D, SqliteClientError, N> {
fn to_chain_error<D, E: Into<SqliteClientError>>(err: E) -> Error<D, SqliteClientError> {
Error::BlockSource(err.into())
}
@ -43,15 +43,15 @@ where
let mut stmt_blocks = block_source
.0
.prepare(
"SELECT height, data FROM compactblocks
WHERE height > ?
"SELECT height, data FROM compactblocks
WHERE height >= ?
ORDER BY height ASC LIMIT ?",
)
.map_err(to_chain_error)?;
let mut rows = stmt_blocks
.query(params![
last_scanned_height.map_or(0u32, u32::from),
from_height.map_or(0u32, u32::from),
limit.unwrap_or(u32::max_value()),
])
.map_err(to_chain_error)?;
@ -191,20 +191,20 @@ pub(crate) fn blockmetadb_find_block(
/// Implements a traversal of `limit` blocks of the filesystem-backed
/// block cache.
///
/// Starting at the next block height above `last_scanned_height`, the `with_row` callback is
/// invoked with each block retrieved from the backing store. If the `limit` value provided is
/// `None`, all blocks are traversed up to the maximum height for which metadata is available.
/// Starting at `from_height`, the `with_row` callback is invoked with each block retrieved from
/// the backing store. If the `limit` value provided is `None`, all blocks are traversed up to the
/// maximum height for which metadata is available.
#[cfg(feature = "unstable")]
pub(crate) fn fsblockdb_with_blocks<F, DbErrT, NoteRef>(
pub(crate) fn fsblockdb_with_blocks<F, DbErrT>(
cache: &FsBlockDb,
last_scanned_height: Option<BlockHeight>,
from_height: Option<BlockHeight>,
limit: Option<u32>,
mut with_block: F,
) -> Result<(), Error<DbErrT, FsBlockDbError, NoteRef>>
) -> Result<(), Error<DbErrT, FsBlockDbError>>
where
F: FnMut(CompactBlock) -> Result<(), Error<DbErrT, FsBlockDbError, NoteRef>>,
F: FnMut(CompactBlock) -> Result<(), Error<DbErrT, FsBlockDbError>>,
{
fn to_chain_error<D, E: Into<FsBlockDbError>, N>(err: E) -> Error<D, FsBlockDbError, N> {
fn to_chain_error<D, E: Into<FsBlockDbError>>(err: E) -> Error<D, FsBlockDbError> {
Error::BlockSource(err.into())
}
@ -214,7 +214,7 @@ where
.prepare(
"SELECT height, blockhash, time, sapling_outputs_count, orchard_actions_count
FROM compactblocks_meta
WHERE height > ?
WHERE height >= ?
ORDER BY height ASC LIMIT ?",
)
.map_err(to_chain_error)?;
@ -222,7 +222,7 @@ where
let rows = stmt_blocks
.query_map(
params![
last_scanned_height.map_or(0u32, u32::from),
from_height.map_or(0u32, u32::from),
limit.unwrap_or(u32::max_value()),
],
|row| {
@ -265,18 +265,28 @@ where
#[cfg(test)]
#[allow(deprecated)]
mod tests {
use std::num::NonZeroU32;
use secrecy::Secret;
use tempfile::NamedTempFile;
use zcash_primitives::{
block::BlockHash, transaction::components::Amount, zip32::ExtendedSpendingKey,
block::BlockHash,
transaction::{components::Amount, fees::zip317::FeeRule},
zip32::ExtendedSpendingKey,
};
use zcash_client_backend::data_api::chain::{
error::{Cause, Error},
scan_cached_blocks, validate_chain,
use zcash_client_backend::{
address::RecipientAddress,
data_api::{
chain::scan_cached_blocks,
wallet::{input_selection::GreedyInputSelector, spend},
WalletRead, WalletWrite,
},
fees::{zip317::SingleOutputChangeStrategy, DustOutputPolicy},
wallet::OvkPolicy,
zip321::{Payment, TransactionRequest},
};
use zcash_client_backend::data_api::WalletRead;
use crate::{
chain::init::init_cache_database,
@ -314,24 +324,13 @@ mod tests {
&dfvk,
AddressType::DefaultExternal,
Amount::from_u64(5).unwrap(),
0,
);
insert_into_cache(&db_cache, &cb);
// Cache-only chain should be valid
let validate_chain_result = validate_chain(
&db_cache,
Some((fake_block_height, fake_block_hash)),
Some(1),
);
assert_matches!(validate_chain_result, Ok(()));
// Scan the cache
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap();
// Data-only chain should be valid
validate_chain(&db_cache, db_data.get_max_height_hash().unwrap(), None).unwrap();
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Create a second fake CompactBlock sending more value to the address
let (cb2, _) = fake_compact_block(
@ -340,17 +339,12 @@ mod tests {
&dfvk,
AddressType::DefaultExternal,
Amount::from_u64(7).unwrap(),
1,
);
insert_into_cache(&db_cache, &cb2);
// Data+cache chain should be valid
validate_chain(&db_cache, db_data.get_max_height_hash().unwrap(), None).unwrap();
// Scan the cache again
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap();
// Data-only chain should be valid
validate_chain(&db_cache, db_data.get_max_height_hash().unwrap(), None).unwrap();
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
}
#[test]
@ -373,6 +367,7 @@ mod tests {
&dfvk,
AddressType::DefaultExternal,
Amount::from_u64(5).unwrap(),
0,
);
let (cb2, _) = fake_compact_block(
sapling_activation_height() + 1,
@ -380,15 +375,13 @@ mod tests {
&dfvk,
AddressType::DefaultExternal,
Amount::from_u64(7).unwrap(),
1,
);
insert_into_cache(&db_cache, &cb);
insert_into_cache(&db_cache, &cb2);
// Scan the cache
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap();
// Data-only chain should be valid
validate_chain(&db_cache, db_data.get_max_height_hash().unwrap(), None).unwrap();
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Create more fake CompactBlocks that don't connect to the scanned ones
let (cb3, _) = fake_compact_block(
@ -397,6 +390,7 @@ mod tests {
&dfvk,
AddressType::DefaultExternal,
Amount::from_u64(8).unwrap(),
2,
);
let (cb4, _) = fake_compact_block(
sapling_activation_height() + 3,
@ -404,14 +398,16 @@ mod tests {
&dfvk,
AddressType::DefaultExternal,
Amount::from_u64(3).unwrap(),
3,
);
insert_into_cache(&db_cache, &cb3);
insert_into_cache(&db_cache, &cb4);
// Data+cache chain should be invalid at the data/cache boundary
let val_result = validate_chain(&db_cache, db_data.get_max_height_hash().unwrap(), None);
assert_matches!(val_result, Err(Error::Chain(e)) if e.at_height() == sapling_activation_height() + 2);
assert_matches!(
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None),
Err(_) // FIXME: check error result more closely
);
}
#[test]
@ -434,6 +430,7 @@ mod tests {
&dfvk,
AddressType::DefaultExternal,
Amount::from_u64(5).unwrap(),
0,
);
let (cb2, _) = fake_compact_block(
sapling_activation_height() + 1,
@ -441,15 +438,13 @@ mod tests {
&dfvk,
AddressType::DefaultExternal,
Amount::from_u64(7).unwrap(),
1,
);
insert_into_cache(&db_cache, &cb);
insert_into_cache(&db_cache, &cb2);
// Scan the cache
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap();
// Data-only chain should be valid
validate_chain(&db_cache, db_data.get_max_height_hash().unwrap(), None).unwrap();
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Create more fake CompactBlocks that contain a reorg
let (cb3, _) = fake_compact_block(
@ -458,6 +453,7 @@ mod tests {
&dfvk,
AddressType::DefaultExternal,
Amount::from_u64(8).unwrap(),
2,
);
let (cb4, _) = fake_compact_block(
sapling_activation_height() + 3,
@ -465,14 +461,16 @@ mod tests {
&dfvk,
AddressType::DefaultExternal,
Amount::from_u64(3).unwrap(),
3,
);
insert_into_cache(&db_cache, &cb3);
insert_into_cache(&db_cache, &cb4);
// Data+cache chain should be invalid inside the cache
let val_result = validate_chain(&db_cache, db_data.get_max_height_hash().unwrap(), None);
assert_matches!(val_result, Err(Error::Chain(e)) if e.at_height() == sapling_activation_height() + 3);
assert_matches!(
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None),
Err(_) // FIXME: check error result more closely
);
}
#[test]
@ -503,6 +501,7 @@ mod tests {
&dfvk,
AddressType::DefaultExternal,
value,
0,
);
let (cb2, _) = fake_compact_block(
@ -511,12 +510,13 @@ mod tests {
&dfvk,
AddressType::DefaultExternal,
value2,
1,
);
insert_into_cache(&db_cache, &cb);
insert_into_cache(&db_cache, &cb2);
// Scan the cache
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap();
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Account balance should reflect both received notes
assert_eq!(
@ -527,7 +527,7 @@ mod tests {
// "Rewind" to height of last scanned block
db_data
.transactionally(|wdb| {
truncate_to_height(&wdb.conn.0, &wdb.params, sapling_activation_height() + 1)
truncate_to_height(wdb.conn.0, &wdb.params, sapling_activation_height() + 1)
})
.unwrap();
@ -540,7 +540,7 @@ mod tests {
// Rewind so that one block is dropped
db_data
.transactionally(|wdb| {
truncate_to_height(&wdb.conn.0, &wdb.params, sapling_activation_height())
truncate_to_height(wdb.conn.0, &wdb.params, sapling_activation_height())
})
.unwrap();
@ -551,7 +551,7 @@ mod tests {
);
// Scan the cache again
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap();
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Account balance should again reflect both received notes
assert_eq!(
@ -561,7 +561,7 @@ mod tests {
}
#[test]
fn scan_cached_blocks_requires_sequential_blocks() {
fn scan_cached_blocks_allows_blocks_out_of_order() {
let cache_file = NamedTempFile::new().unwrap();
let db_cache = BlockDb::for_path(cache_file.path()).unwrap();
init_cache_database(&db_cache).unwrap();
@ -571,7 +571,9 @@ mod tests {
init_wallet_db(&mut db_data, Some(Secret::new(vec![]))).unwrap();
// Add an account to the wallet
let (dfvk, _taddr) = init_test_accounts_table(&mut db_data);
let seed = Secret::new([0u8; 32].to_vec());
let (_, usk) = db_data.create_account(&seed).unwrap();
let dfvk = usk.sapling().to_diversifiable_full_viewing_key();
// Create a block with height SAPLING_ACTIVATION_HEIGHT
let value = Amount::from_u64(50000).unwrap();
@ -581,21 +583,23 @@ mod tests {
&dfvk,
AddressType::DefaultExternal,
value,
0,
);
insert_into_cache(&db_cache, &cb1);
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap();
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
assert_eq!(
get_balance(&db_data.conn, AccountId::from(0)).unwrap(),
value
);
// We cannot scan a block of height SAPLING_ACTIVATION_HEIGHT + 2 next
// Create blocks to reach SAPLING_ACTIVATION_HEIGHT + 2
let (cb2, _) = fake_compact_block(
sapling_activation_height() + 1,
cb1.hash(),
&dfvk,
AddressType::DefaultExternal,
value,
1,
);
let (cb3, _) = fake_compact_block(
sapling_activation_height() + 2,
@ -603,26 +607,64 @@ mod tests {
&dfvk,
AddressType::DefaultExternal,
value,
2,
);
insert_into_cache(&db_cache, &cb3);
match scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None) {
Err(Error::Chain(e)) => {
assert_matches!(
e.cause(),
Cause::BlockHeightDiscontinuity(h) if *h
== sapling_activation_height() + 2
);
}
Ok(_) | Err(_) => panic!("Should have failed"),
}
// If we add a block of height SAPLING_ACTIVATION_HEIGHT + 1, we can now scan both
// Scan the later block first
insert_into_cache(&db_cache, &cb3);
assert_matches!(
scan_cached_blocks(
&tests::network(),
&db_cache,
&mut db_data,
Some(sapling_activation_height() + 2),
None
),
Ok(_)
);
// If we add a block of height SAPLING_ACTIVATION_HEIGHT + 1, we can now scan that
insert_into_cache(&db_cache, &cb2);
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap();
scan_cached_blocks(
&tests::network(),
&db_cache,
&mut db_data,
Some(sapling_activation_height() + 1),
Some(1),
)
.unwrap();
assert_eq!(
get_balance(&db_data.conn, AccountId::from(0)).unwrap(),
Amount::from_u64(150_000).unwrap()
);
// We can spend the received notes
let req = TransactionRequest::new(vec![Payment {
recipient_address: RecipientAddress::Shielded(dfvk.default_address().1),
amount: Amount::from_u64(110_000).unwrap(),
memo: None,
label: None,
message: None,
other_params: vec![],
}])
.unwrap();
let input_selector = GreedyInputSelector::new(
SingleOutputChangeStrategy::new(FeeRule::standard()),
DustOutputPolicy::default(),
);
assert_matches!(
spend(
&mut db_data,
&tests::network(),
crate::wallet::sapling::tests::test_prover(),
&input_selector,
&usk,
req,
OvkPolicy::Sender,
NonZeroU32::new(1).unwrap(),
),
Ok(_)
);
}
#[test]
@ -652,11 +694,12 @@ mod tests {
&dfvk,
AddressType::DefaultExternal,
value,
0,
);
insert_into_cache(&db_cache, &cb);
// Scan the cache
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap();
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Account balance should reflect the received note
assert_eq!(
@ -672,11 +715,12 @@ mod tests {
&dfvk,
AddressType::DefaultExternal,
value2,
1,
);
insert_into_cache(&db_cache, &cb2);
// Scan the cache again
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap();
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Account balance should reflect both received notes
assert_eq!(
@ -712,11 +756,12 @@ mod tests {
&dfvk,
AddressType::DefaultExternal,
value,
0,
);
insert_into_cache(&db_cache, &cb);
// Scan the cache
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap();
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Account balance should reflect the received note
assert_eq!(
@ -737,11 +782,12 @@ mod tests {
&dfvk,
to2,
value2,
1,
),
);
// Scan the cache again
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap();
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Account balance should equal the change
assert_eq!(

View File

@ -1,12 +1,15 @@
//! Error types for problems that may arise when reading or storing wallet data to SQLite.
use either::Either;
use std::error;
use std::fmt;
use std::io;
use shardtree::ShardTreeError;
use zcash_client_backend::encoding::{Bech32DecodeError, TransparentCodecError};
use zcash_primitives::{consensus::BlockHeight, zip32::AccountId};
use crate::PRUNING_HEIGHT;
use crate::PRUNING_DEPTH;
#[cfg(feature = "transparent-inputs")]
use zcash_primitives::legacy::TransparentAddress;
@ -50,13 +53,15 @@ pub enum SqliteClientError {
/// A received memo cannot be interpreted as a UTF-8 string.
InvalidMemo(zcash_primitives::memo::Error),
/// A requested rewind would violate invariants of the
/// storage layer. The payload returned with this error is
/// (safe rewind height, requested height).
/// An attempt to update block data would overwrite the current hash for a block with a
/// different hash. This indicates that a required rewind was not performed.
BlockConflict(BlockHeight),
/// A requested rewind would violate invariants of the storage layer. The payload returned with
/// this error is (safe rewind height, requested height).
RequestedRewindInvalid(BlockHeight, BlockHeight),
/// The space of allocatable diversifier indices has been exhausted for
/// the given account.
/// The space of allocatable diversifier indices has been exhausted for the given account.
DiversifierIndexOutOfRange,
/// An error occurred deriving a spending key from a seed and an account
@ -74,6 +79,10 @@ pub enum SqliteClientError {
/// belonging to the wallet
#[cfg(feature = "transparent-inputs")]
AddressNotRecognized(TransparentAddress),
/// An error occurred in inserting data into or accessing data from one of the wallet's note
/// commitment trees.
CommitmentTree(ShardTreeError<Either<io::Error, rusqlite::Error>>),
}
impl error::Error for SqliteClientError {
@ -99,7 +108,7 @@ impl fmt::Display for SqliteClientError {
SqliteClientError::InvalidNoteId =>
write!(f, "The note ID associated with an inserted witness must correspond to a received note."),
SqliteClientError::RequestedRewindInvalid(h, r) =>
write!(f, "A rewind must be either of less than {} blocks, or at least back to block {} for your wallet; the requested height was {}.", PRUNING_HEIGHT, h, r),
write!(f, "A rewind must be either of less than {} blocks, or at least back to block {} for your wallet; the requested height was {}.", PRUNING_DEPTH, h, r),
SqliteClientError::Bech32DecodeError(e) => write!(f, "{}", e),
#[cfg(feature = "transparent-inputs")]
SqliteClientError::HdwalletError(e) => write!(f, "{:?}", e),
@ -108,12 +117,14 @@ impl fmt::Display for SqliteClientError {
SqliteClientError::DbError(e) => write!(f, "{}", e),
SqliteClientError::Io(e) => write!(f, "{}", e),
SqliteClientError::InvalidMemo(e) => write!(f, "{}", e),
SqliteClientError::BlockConflict(h) => write!(f, "A block hash conflict occurred at height {}; rewind required.", u32::from(*h)),
SqliteClientError::DiversifierIndexOutOfRange => write!(f, "The space of available diversifier indices is exhausted"),
SqliteClientError::KeyDerivationError(acct_id) => write!(f, "Key derivation failed for account {:?}", acct_id),
SqliteClientError::AccountIdDiscontinuity => write!(f, "Wallet account identifiers must be sequential."),
SqliteClientError::AccountIdOutOfRange => write!(f, "Wallet account identifiers must be less than 0x7FFFFFFF."),
#[cfg(feature = "transparent-inputs")]
SqliteClientError::AddressNotRecognized(_) => write!(f, "The address associated with a received txo is not identifiable as belonging to the wallet."),
SqliteClientError::CommitmentTree(err) => write!(f, "An error occurred accessing or updating note commitment tree data: {}.", err),
}
}
}
@ -160,3 +171,9 @@ impl From<zcash_primitives::memo::Error> for SqliteClientError {
SqliteClientError::InvalidMemo(e)
}
}
impl From<ShardTreeError<Either<io::Error, rusqlite::Error>>> for SqliteClientError {
fn from(e: ShardTreeError<Either<io::Error, rusqlite::Error>>) -> Self {
SqliteClientError::CommitmentTree(e)
}
}

View File

@ -32,10 +32,13 @@
// Catch documentation errors caused by code changes.
#![deny(rustdoc::broken_intra_doc_links)]
use either::Either;
use rusqlite::{self, Connection};
use secrecy::{ExposeSecret, SecretVec};
use std::{borrow::Borrow, collections::HashMap, convert::AsRef, fmt, path::Path};
use std::{borrow::Borrow, collections::HashMap, convert::AsRef, fmt, io, ops::Range, path::Path};
use incrementalmerkletree::Position;
use shardtree::{ShardTree, ShardTreeError};
use zcash_primitives::{
block::BlockHash,
consensus::{self, BlockHeight},
@ -52,8 +55,9 @@ use zcash_primitives::{
use zcash_client_backend::{
address::{AddressMetadata, UnifiedAddress},
data_api::{
self, chain::BlockSource, DecryptedTransaction, NullifierQuery, PoolType, PrunedBlock,
Recipient, SentTransaction, WalletRead, WalletWrite,
self, chain::BlockSource, BlockMetadata, DecryptedTransaction, NullifierQuery, PoolType,
Recipient, ScannedBlock, SentTransaction, WalletCommitmentTrees, WalletRead, WalletWrite,
SAPLING_SHARD_HEIGHT,
},
keys::{UnifiedFullViewingKey, UnifiedSpendingKey},
proto::compact_formats::CompactBlock,
@ -61,23 +65,26 @@ use zcash_client_backend::{
DecryptedOutput, TransferType,
};
use crate::error::SqliteClientError;
use crate::{error::SqliteClientError, wallet::commitment_tree::SqliteShardStore};
#[cfg(feature = "unstable")]
use {
crate::chain::{fsblockdb_with_blocks, BlockMeta},
std::fs,
std::path::PathBuf,
std::{fs, io},
};
pub mod chain;
pub mod error;
pub mod serialization;
pub mod wallet;
/// The maximum number of blocks the wallet is allowed to rewind. This is
/// consistent with the bound in zcashd, and allows block data deeper than
/// this delta from the chain tip to be pruned.
pub(crate) const PRUNING_HEIGHT: u32 = 100;
pub(crate) const PRUNING_DEPTH: u32 = 100;
pub(crate) const SAPLING_TABLES_PREFIX: &str = "sapling";
/// A newtype wrapper for sqlite primary key values for the notes
/// table.
@ -108,11 +115,11 @@ pub struct WalletDb<C, P> {
}
/// A wrapper for a SQLite transaction affecting the wallet database.
pub struct SqlTransaction<'conn>(pub(crate) rusqlite::Transaction<'conn>);
pub struct SqlTransaction<'conn>(pub(crate) &'conn rusqlite::Transaction<'conn>);
impl Borrow<rusqlite::Connection> for SqlTransaction<'_> {
fn borrow(&self) -> &rusqlite::Connection {
&self.0
self.0
}
}
@ -125,16 +132,17 @@ impl<P: consensus::Parameters + Clone> WalletDb<Connection, P> {
})
}
pub fn transactionally<F, A>(&mut self, f: F) -> Result<A, SqliteClientError>
pub fn transactionally<F, A, E: From<rusqlite::Error>>(&mut self, f: F) -> Result<A, E>
where
F: FnOnce(&WalletDb<SqlTransaction<'_>, P>) -> Result<A, SqliteClientError>,
F: FnOnce(&mut WalletDb<SqlTransaction<'_>, P>) -> Result<A, E>,
{
let wdb = WalletDb {
conn: SqlTransaction(self.conn.transaction()?),
let tx = self.conn.transaction()?;
let mut wdb = WalletDb {
conn: SqlTransaction(&tx),
params: self.params.clone(),
};
let result = f(&wdb)?;
wdb.conn.0.commit()?;
let result = f(&mut wdb)?;
tx.commit()?;
Ok(result)
}
}
@ -148,6 +156,22 @@ impl<C: Borrow<rusqlite::Connection>, P: consensus::Parameters> WalletRead for W
wallet::block_height_extrema(self.conn.borrow()).map_err(SqliteClientError::from)
}
fn block_metadata(&self, height: BlockHeight) -> Result<Option<BlockMetadata>, Self::Error> {
wallet::block_metadata(self.conn.borrow(), height)
}
fn block_fully_scanned(&self) -> Result<Option<BlockMetadata>, Self::Error> {
wallet::block_fully_scanned(self.conn.borrow())
}
fn suggest_scan_ranges(
&self,
_batch_size: usize,
_limit: usize,
) -> Result<Vec<Range<BlockHeight>>, Self::Error> {
todo!()
}
fn get_min_unspent_height(&self) -> Result<Option<BlockHeight>, Self::Error> {
wallet::get_min_unspent_height(self.conn.borrow()).map_err(SqliteClientError::from)
}
@ -160,6 +184,14 @@ impl<C: Borrow<rusqlite::Connection>, P: consensus::Parameters> WalletRead for W
wallet::get_tx_height(self.conn.borrow(), txid).map_err(SqliteClientError::from)
}
fn get_current_address(
&self,
account: AccountId,
) -> Result<Option<UnifiedAddress>, Self::Error> {
wallet::get_current_address(self.conn.borrow(), &self.params, account)
.map(|res| res.map(|(addr, _)| addr))
}
fn get_unified_full_viewing_keys(
&self,
) -> Result<HashMap<AccountId, UnifiedFullViewingKey>, Self::Error> {
@ -173,14 +205,6 @@ impl<C: Borrow<rusqlite::Connection>, P: consensus::Parameters> WalletRead for W
wallet::get_account_for_ufvk(self.conn.borrow(), &self.params, ufvk)
}
fn get_current_address(
&self,
account: AccountId,
) -> Result<Option<UnifiedAddress>, Self::Error> {
wallet::get_current_address(self.conn.borrow(), &self.params, account)
.map(|res| res.map(|(addr, _)| addr))
}
fn is_valid_account_extfvk(
&self,
account: AccountId,
@ -197,10 +221,6 @@ impl<C: Borrow<rusqlite::Connection>, P: consensus::Parameters> WalletRead for W
wallet::get_balance_at(self.conn.borrow(), account, anchor_height)
}
fn get_transaction(&self, id_tx: i64) -> Result<Transaction, Self::Error> {
wallet::get_transaction(self.conn.borrow(), &self.params, id_tx)
}
fn get_memo(&self, id_note: Self::NoteRef) -> Result<Option<Memo>, Self::Error> {
match id_note {
NoteId::SentNoteId(id_note) => wallet::get_sent_memo(self.conn.borrow(), id_note),
@ -210,24 +230,13 @@ impl<C: Borrow<rusqlite::Connection>, P: consensus::Parameters> WalletRead for W
}
}
fn get_commitment_tree(
&self,
block_height: BlockHeight,
) -> Result<Option<sapling::CommitmentTree>, Self::Error> {
wallet::sapling::get_sapling_commitment_tree(self.conn.borrow(), block_height)
}
#[allow(clippy::type_complexity)]
fn get_witnesses(
&self,
block_height: BlockHeight,
) -> Result<Vec<(Self::NoteRef, sapling::IncrementalWitness)>, Self::Error> {
wallet::sapling::get_sapling_witnesses(self.conn.borrow(), block_height)
fn get_transaction(&self, id_tx: i64) -> Result<Transaction, Self::Error> {
wallet::get_transaction(self.conn.borrow(), &self.params, id_tx)
}
fn get_sapling_nullifiers(
&self,
query: data_api::NullifierQuery,
query: NullifierQuery,
) -> Result<Vec<(AccountId, sapling::Nullifier)>, Self::Error> {
match query {
NullifierQuery::Unspent => wallet::sapling::get_sapling_nullifiers(self.conn.borrow()),
@ -327,7 +336,7 @@ impl<P: consensus::Parameters> WalletWrite for WalletDb<rusqlite::Connection, P>
seed: &SecretVec<u8>,
) -> Result<(AccountId, UnifiedSpendingKey), Self::Error> {
self.transactionally(|wdb| {
let account = wallet::get_max_account_id(&wdb.conn.0)?
let account = wallet::get_max_account_id(wdb.conn.0)?
.map(|a| AccountId::from(u32::from(a) + 1))
.unwrap_or_else(|| AccountId::from(0));
@ -339,7 +348,7 @@ impl<P: consensus::Parameters> WalletWrite for WalletDb<rusqlite::Connection, P>
.map_err(|_| SqliteClientError::KeyDerivationError(account))?;
let ufvk = usk.to_unified_full_viewing_key();
wallet::add_account(&wdb.conn.0, &wdb.params, account, &ufvk)?;
wallet::add_account(wdb.conn.0, &wdb.params, account, &ufvk)?;
Ok((account, usk))
})
@ -353,7 +362,7 @@ impl<P: consensus::Parameters> WalletWrite for WalletDb<rusqlite::Connection, P>
|wdb| match wdb.get_unified_full_viewing_keys()?.get(&account) {
Some(ufvk) => {
let search_from =
match wallet::get_current_address(&wdb.conn.0, &wdb.params, account)? {
match wallet::get_current_address(wdb.conn.0, &wdb.params, account)? {
Some((_, mut last_diversifier_index)) => {
last_diversifier_index
.increment()
@ -368,7 +377,7 @@ impl<P: consensus::Parameters> WalletWrite for WalletDb<rusqlite::Connection, P>
.ok_or(SqliteClientError::DiversifierIndexOutOfRange)?;
wallet::insert_address(
&wdb.conn.0,
wdb.conn.0,
&wdb.params,
account,
diversifier_index,
@ -382,63 +391,55 @@ impl<P: consensus::Parameters> WalletWrite for WalletDb<rusqlite::Connection, P>
)
}
#[tracing::instrument(skip_all, fields(height = u32::from(block.block_height)))]
#[tracing::instrument(skip_all, fields(height = u32::from(block.height())))]
#[allow(clippy::type_complexity)]
fn advance_by_block(
fn put_block(
&mut self,
block: &PrunedBlock,
updated_witnesses: &[(Self::NoteRef, sapling::IncrementalWitness)],
) -> Result<Vec<(Self::NoteRef, sapling::IncrementalWitness)>, Self::Error> {
block: ScannedBlock<sapling::Nullifier>,
) -> Result<Vec<Self::NoteRef>, Self::Error> {
self.transactionally(|wdb| {
// Insert the block into the database.
wallet::insert_block(
&wdb.conn.0,
block.block_height,
block.block_hash,
block.block_time,
block.commitment_tree,
wallet::put_block(
wdb.conn.0,
block.height(),
block.block_hash(),
block.block_time(),
block.metadata().sapling_tree_size(),
)?;
let mut new_witnesses = vec![];
for tx in block.transactions {
let tx_row = wallet::put_tx_meta(&wdb.conn.0, tx, block.block_height)?;
let mut wallet_note_ids = vec![];
for tx in block.transactions() {
let tx_row = wallet::put_tx_meta(wdb.conn.0, tx, block.height())?;
// Mark notes as spent and remove them from the scanning cache
for spend in &tx.sapling_spends {
wallet::sapling::mark_sapling_note_spent(&wdb.conn.0, tx_row, spend.nf())?;
wallet::sapling::mark_sapling_note_spent(wdb.conn.0, tx_row, spend.nf())?;
}
for output in &tx.sapling_outputs {
let received_note_id =
wallet::sapling::put_received_note(&wdb.conn.0, output, tx_row)?;
wallet::sapling::put_received_note(wdb.conn.0, output, tx_row)?;
// Save witness for note.
new_witnesses.push((received_note_id, output.witness().clone()));
wallet_note_ids.push(received_note_id);
}
}
// Insert current new_witnesses into the database.
for (received_note_id, witness) in updated_witnesses.iter().chain(new_witnesses.iter())
{
if let NoteId::ReceivedNoteId(rnid) = *received_note_id {
wallet::sapling::insert_witness(
&wdb.conn.0,
rnid,
witness,
block.block_height,
)?;
} else {
return Err(SqliteClientError::InvalidNoteId);
}
}
// Prune the stored witnesses (we only expect rollbacks of at most PRUNING_HEIGHT blocks).
wallet::prune_witnesses(&wdb.conn.0, block.block_height - PRUNING_HEIGHT)?;
let block_height = block.height();
let sapling_tree_size = block.metadata().sapling_tree_size();
let sapling_commitments_len = block.sapling_commitments().len();
let mut sapling_commitments = block.into_sapling_commitments().into_iter();
wdb.with_sapling_tree_mut::<_, _, SqliteClientError>(move |sapling_tree| {
let start_position = Position::from(u64::from(sapling_tree_size))
- u64::try_from(sapling_commitments_len).unwrap();
sapling_tree.batch_insert(start_position, &mut sapling_commitments)?;
Ok(())
})?;
// Update now-expired transactions that didn't get mined.
wallet::update_expired_notes(&wdb.conn.0, block.block_height)?;
wallet::update_expired_notes(wdb.conn.0, block_height)?;
Ok(new_witnesses)
Ok(wallet_note_ids)
})
}
@ -447,113 +448,96 @@ impl<P: consensus::Parameters> WalletWrite for WalletDb<rusqlite::Connection, P>
d_tx: DecryptedTransaction,
) -> Result<Self::TxRef, Self::Error> {
self.transactionally(|wdb| {
let tx_ref = wallet::put_tx_data(&wdb.conn.0, d_tx.tx, None, None)?;
let tx_ref = wallet::put_tx_data(wdb.conn.0, d_tx.tx, None, None)?;
let mut spending_account_id: Option<AccountId> = None;
for output in d_tx.sapling_outputs {
match output.transfer_type {
TransferType::Outgoing | TransferType::WalletInternal => {
let recipient = if output.transfer_type == TransferType::Outgoing {
Recipient::Sapling(output.note.recipient())
} else {
Recipient::InternalAccount(output.account, PoolType::Sapling)
};
let mut spending_account_id: Option<AccountId> = None;
for output in d_tx.sapling_outputs {
match output.transfer_type {
TransferType::Outgoing | TransferType::WalletInternal => {
let recipient = if output.transfer_type == TransferType::Outgoing {
Recipient::Sapling(output.note.recipient())
} else {
Recipient::InternalAccount(output.account, PoolType::Sapling)
};
wallet::put_sent_output(
&wdb.conn.0,
&wdb.params,
output.account,
tx_ref,
output.index,
&recipient,
Amount::from_u64(output.note.value().inner()).map_err(|_| {
SqliteClientError::CorruptedData(
"Note value is not a valid Zcash amount.".to_string(),
)
})?,
Some(&output.memo),
)?;
wallet::put_sent_output(
wdb.conn.0,
&wdb.params,
output.account,
tx_ref,
output.index,
&recipient,
Amount::from_u64(output.note.value().inner()).map_err(|_| {
SqliteClientError::CorruptedData(
"Note value is not a valid Zcash amount.".to_string(),
)
})?,
Some(&output.memo),
)?;
if matches!(recipient, Recipient::InternalAccount(_, _)) {
wallet::sapling::put_received_note(&wdb.conn.0, output, tx_ref)?;
if matches!(recipient, Recipient::InternalAccount(_, _)) {
wallet::sapling::put_received_note(wdb.conn.0, output, tx_ref)?;
}
}
}
TransferType::Incoming => {
match spending_account_id {
Some(id) => {
if id != output.account {
panic!("Unable to determine a unique account identifier for z->t spend.");
TransferType::Incoming => {
match spending_account_id {
Some(id) => {
if id != output.account {
panic!("Unable to determine a unique account identifier for z->t spend.");
}
}
None => {
spending_account_id = Some(output.account);
}
}
None => {
spending_account_id = Some(output.account);
wallet::sapling::put_received_note(wdb.conn.0, output, tx_ref)?;
}
}
}
// If any of the utxos spent in the transaction are ours, mark them as spent.
#[cfg(feature = "transparent-inputs")]
for txin in d_tx.tx.transparent_bundle().iter().flat_map(|b| b.vin.iter()) {
wallet::mark_transparent_utxo_spent(wdb.conn.0, tx_ref, &txin.prevout)?;
}
// If we have some transparent outputs:
if d_tx.tx.transparent_bundle().iter().any(|b| !b.vout.is_empty()) {
let nullifiers = wdb.get_sapling_nullifiers(NullifierQuery::All)?;
// If the transaction contains shielded spends from our wallet, we will store z->t
// transactions we observe in the same way they would be stored by
// create_spend_to_address.
if let Some((account_id, _)) = nullifiers.iter().find(
|(_, nf)|
d_tx.tx.sapling_bundle().iter().flat_map(|b| b.shielded_spends().iter())
.any(|input| nf == input.nullifier())
) {
for (output_index, txout) in d_tx.tx.transparent_bundle().iter().flat_map(|b| b.vout.iter()).enumerate() {
if let Some(address) = txout.recipient_address() {
wallet::put_sent_output(
wdb.conn.0,
&wdb.params,
*account_id,
tx_ref,
output_index,
&Recipient::Transparent(address),
txout.value,
None
)?;
}
}
wallet::sapling::put_received_note(&wdb.conn.0, output, tx_ref)?;
}
}
}
// If any of the utxos spent in the transaction are ours, mark them as spent.
#[cfg(feature = "transparent-inputs")]
for txin in d_tx
.tx
.transparent_bundle()
.iter()
.flat_map(|b| b.vin.iter())
{
wallet::mark_transparent_utxo_spent(&wdb.conn.0, tx_ref, &txin.prevout)?;
}
// If we have some transparent outputs:
if !d_tx
.tx
.transparent_bundle()
.iter()
.any(|b| b.vout.is_empty())
{
let nullifiers = wdb.get_sapling_nullifiers(data_api::NullifierQuery::All)?;
// If the transaction contains shielded spends from our wallet, we will store z->t
// transactions we observe in the same way they would be stored by
// create_spend_to_address.
if let Some((account_id, _)) = nullifiers.iter().find(|(_, nf)| {
d_tx.tx
.sapling_bundle()
.iter()
.flat_map(|b| b.shielded_spends().iter())
.any(|input| nf == input.nullifier())
}) {
for (output_index, txout) in d_tx
.tx
.transparent_bundle()
.iter()
.flat_map(|b| b.vout.iter())
.enumerate()
{
if let Some(address) = txout.recipient_address() {
wallet::put_sent_output(
&wdb.conn.0,
&wdb.params,
*account_id,
tx_ref,
output_index,
&Recipient::Transparent(address),
txout.value,
None,
)?;
}
}
}
}
Ok(tx_ref)
Ok(tx_ref)
})
}
fn store_sent_tx(&mut self, sent_tx: &SentTransaction) -> Result<Self::TxRef, Self::Error> {
self.transactionally(|wdb| {
let tx_ref = wallet::put_tx_data(
&wdb.conn.0,
wdb.conn.0,
sent_tx.tx,
Some(sent_tx.fee_amount),
Some(sent_tx.created),
@ -570,7 +554,7 @@ impl<P: consensus::Parameters> WalletWrite for WalletDb<rusqlite::Connection, P>
if let Some(bundle) = sent_tx.tx.sapling_bundle() {
for spend in bundle.shielded_spends() {
wallet::sapling::mark_sapling_note_spent(
&wdb.conn.0,
wdb.conn.0,
tx_ref,
spend.nullifier(),
)?;
@ -579,12 +563,12 @@ impl<P: consensus::Parameters> WalletWrite for WalletDb<rusqlite::Connection, P>
#[cfg(feature = "transparent-inputs")]
for utxo_outpoint in &sent_tx.utxos_spent {
wallet::mark_transparent_utxo_spent(&wdb.conn.0, tx_ref, utxo_outpoint)?;
wallet::mark_transparent_utxo_spent(wdb.conn.0, tx_ref, utxo_outpoint)?;
}
for output in &sent_tx.outputs {
wallet::insert_sent_output(
&wdb.conn.0,
wdb.conn.0,
&wdb.params,
tx_ref,
sent_tx.account,
@ -593,7 +577,7 @@ impl<P: consensus::Parameters> WalletWrite for WalletDb<rusqlite::Connection, P>
if let Some((account, note)) = output.sapling_change_to() {
wallet::sapling::put_received_note(
&wdb.conn.0,
wdb.conn.0,
&DecryptedOutput {
index: output.output_index(),
note: note.clone(),
@ -615,7 +599,7 @@ impl<P: consensus::Parameters> WalletWrite for WalletDb<rusqlite::Connection, P>
fn truncate_to_height(&mut self, block_height: BlockHeight) -> Result<(), Self::Error> {
self.transactionally(|wdb| {
wallet::truncate_to_height(&wdb.conn.0, &wdb.params, block_height)
wallet::truncate_to_height(wdb.conn.0, &wdb.params, block_height)
})
}
@ -633,6 +617,65 @@ impl<P: consensus::Parameters> WalletWrite for WalletDb<rusqlite::Connection, P>
}
}
impl<P: consensus::Parameters> WalletCommitmentTrees for WalletDb<rusqlite::Connection, P> {
type Error = Either<io::Error, rusqlite::Error>;
type SaplingShardStore<'a> =
SqliteShardStore<&'a rusqlite::Transaction<'a>, sapling::Node, SAPLING_SHARD_HEIGHT>;
fn with_sapling_tree_mut<F, A, E>(&mut self, mut callback: F) -> Result<A, E>
where
for<'a> F: FnMut(
&'a mut ShardTree<
Self::SaplingShardStore<'a>,
{ sapling::NOTE_COMMITMENT_TREE_DEPTH },
SAPLING_SHARD_HEIGHT,
>,
) -> Result<A, E>,
E: From<ShardTreeError<Either<io::Error, rusqlite::Error>>>,
{
let tx = self
.conn
.transaction()
.map_err(|e| ShardTreeError::Storage(Either::Right(e)))?;
let shard_store = SqliteShardStore::from_connection(&tx, SAPLING_TABLES_PREFIX)
.map_err(|e| ShardTreeError::Storage(Either::Right(e)))?;
let result = {
let mut shardtree = ShardTree::new(shard_store, PRUNING_DEPTH.try_into().unwrap());
callback(&mut shardtree)?
};
tx.commit()
.map_err(|e| ShardTreeError::Storage(Either::Right(e)))?;
Ok(result)
}
}
impl<'conn, P: consensus::Parameters> WalletCommitmentTrees for WalletDb<SqlTransaction<'conn>, P> {
type Error = Either<io::Error, rusqlite::Error>;
type SaplingShardStore<'a> =
SqliteShardStore<&'a rusqlite::Transaction<'a>, sapling::Node, SAPLING_SHARD_HEIGHT>;
fn with_sapling_tree_mut<F, A, E>(&mut self, mut callback: F) -> Result<A, E>
where
for<'a> F: FnMut(
&'a mut ShardTree<
Self::SaplingShardStore<'a>,
{ sapling::NOTE_COMMITMENT_TREE_DEPTH },
SAPLING_SHARD_HEIGHT,
>,
) -> Result<A, E>,
E: From<ShardTreeError<Either<io::Error, rusqlite::Error>>>,
{
let mut shardtree = ShardTree::new(
SqliteShardStore::from_connection(self.conn.0, SAPLING_TABLES_PREFIX)
.map_err(|e| ShardTreeError::Storage(Either::Right(e)))?,
PRUNING_DEPTH.try_into().unwrap(),
);
let result = callback(&mut shardtree)?;
Ok(result)
}
}
/// A handle for the SQLite block source.
pub struct BlockDb(Connection);
@ -646,17 +689,14 @@ impl BlockDb {
impl BlockSource for BlockDb {
type Error = SqliteClientError;
fn with_blocks<F, DbErrT, NoteRef>(
fn with_blocks<F, DbErrT>(
&self,
from_height: Option<BlockHeight>,
limit: Option<u32>,
with_row: F,
) -> Result<(), data_api::chain::error::Error<DbErrT, Self::Error, NoteRef>>
) -> Result<(), data_api::chain::error::Error<DbErrT, Self::Error>>
where
F: FnMut(
CompactBlock,
)
-> Result<(), data_api::chain::error::Error<DbErrT, Self::Error, NoteRef>>,
F: FnMut(CompactBlock) -> Result<(), data_api::chain::error::Error<DbErrT, Self::Error>>,
{
chain::blockdb_with_blocks(self, from_height, limit, with_row)
}
@ -827,17 +867,14 @@ impl FsBlockDb {
impl BlockSource for FsBlockDb {
type Error = FsBlockDbError;
fn with_blocks<F, DbErrT, NoteRef>(
fn with_blocks<F, DbErrT>(
&self,
from_height: Option<BlockHeight>,
limit: Option<u32>,
with_row: F,
) -> Result<(), data_api::chain::error::Error<DbErrT, Self::Error, NoteRef>>
) -> Result<(), data_api::chain::error::Error<DbErrT, Self::Error>>
where
F: FnMut(
CompactBlock,
)
-> Result<(), data_api::chain::error::Error<DbErrT, Self::Error, NoteRef>>,
F: FnMut(CompactBlock) -> Result<(), data_api::chain::error::Error<DbErrT, Self::Error>>,
{
fsblockdb_with_blocks(self, from_height, limit, with_row)
}
@ -925,7 +962,7 @@ mod tests {
data_api::{WalletRead, WalletWrite},
keys::{sapling, UnifiedFullViewingKey},
proto::compact_formats::{
CompactBlock, CompactSaplingOutput, CompactSaplingSpend, CompactTx,
self as compact, CompactBlock, CompactSaplingOutput, CompactSaplingSpend, CompactTx,
},
};
@ -1024,6 +1061,7 @@ mod tests {
dfvk: &DiversifiableFullViewingKey,
req: AddressType,
value: Amount,
initial_sapling_tree_size: u32,
) -> (CompactBlock, Nullifier) {
let to = match req {
AddressType::DefaultExternal => dfvk.default_address().1,
@ -1069,6 +1107,11 @@ mod tests {
};
cb.prev_hash.extend_from_slice(&prev_hash.0);
cb.vtx.push(ctx);
cb.chain_metadata = Some(compact::ChainMetadata {
sapling_commitment_tree_size: initial_sapling_tree_size
+ cb.vtx.iter().map(|tx| tx.outputs.len() as u32).sum::<u32>(),
..Default::default()
});
(cb, note.nf(&dfvk.fvk().vk.nk, 0))
}
@ -1081,6 +1124,7 @@ mod tests {
dfvk: &DiversifiableFullViewingKey,
to: PaymentAddress,
value: Amount,
initial_sapling_tree_size: u32,
) -> CompactBlock {
let mut rng = OsRng;
let rseed = generate_random_rseed(&network(), height, &mut rng);
@ -1154,6 +1198,11 @@ mod tests {
};
cb.prev_hash.extend_from_slice(&prev_hash.0);
cb.vtx.push(ctx);
cb.chain_metadata = Some(compact::ChainMetadata {
sapling_commitment_tree_size: initial_sapling_tree_size
+ cb.vtx.iter().map(|tx| tx.outputs.len() as u32).sum::<u32>(),
..Default::default()
});
cb
}
@ -1267,6 +1316,7 @@ mod tests {
&dfvk,
AddressType::DefaultExternal,
Amount::from_u64(5).unwrap(),
0,
);
let (cb2, _) = fake_compact_block(
BlockHeight::from_u32(2),
@ -1274,6 +1324,7 @@ mod tests {
&dfvk,
AddressType::DefaultExternal,
Amount::from_u64(10).unwrap(),
1,
);
// Write the CompactBlocks to the BlockMeta DB's corresponding disk storage.

View File

@ -0,0 +1,120 @@
//! Serialization formats for data stored as SQLite BLOBs
use byteorder::{ReadBytesExt, WriteBytesExt};
use core::ops::Deref;
use shardtree::{Node, PrunableTree, RetentionFlags, Tree};
use std::io::{self, Read, Write};
use std::rc::Rc;
use zcash_encoding::Optional;
use zcash_primitives::merkle_tree::HashSer;
const SER_V1: u8 = 1;
const NIL_TAG: u8 = 0;
const LEAF_TAG: u8 = 1;
const PARENT_TAG: u8 = 2;
/// Writes a [`PrunableTree`] to the provided [`Write`] instance.
///
/// This is the primary method used for ShardTree shard persistence. It writes a version identifier
/// for the most-current serialized form, followed by the tree data.
pub fn write_shard<H: HashSer, W: Write>(writer: &mut W, tree: &PrunableTree<H>) -> io::Result<()> {
fn write_inner<H: HashSer, W: Write>(
mut writer: &mut W,
tree: &PrunableTree<H>,
) -> io::Result<()> {
match tree.deref() {
Node::Parent { ann, left, right } => {
writer.write_u8(PARENT_TAG)?;
Optional::write(&mut writer, ann.as_ref(), |w, h| {
<H as HashSer>::write(h, w)
})?;
write_inner(writer, left)?;
write_inner(writer, right)?;
Ok(())
}
Node::Leaf { value } => {
writer.write_u8(LEAF_TAG)?;
value.0.write(&mut writer)?;
writer.write_u8(value.1.bits())?;
Ok(())
}
Node::Nil => {
writer.write_u8(NIL_TAG)?;
Ok(())
}
}
}
writer.write_u8(SER_V1)?;
write_inner(writer, tree)
}
fn read_shard_v1<H: HashSer, R: Read>(mut reader: &mut R) -> io::Result<PrunableTree<H>> {
match reader.read_u8()? {
PARENT_TAG => {
let ann = Optional::read(&mut reader, <H as HashSer>::read)?.map(Rc::new);
let left = read_shard_v1(reader)?;
let right = read_shard_v1(reader)?;
Ok(Tree::parent(ann, left, right))
}
LEAF_TAG => {
let value = <H as HashSer>::read(&mut reader)?;
let flags = reader.read_u8().and_then(|bits| {
RetentionFlags::from_bits(bits).ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Byte value {} does not correspond to a valid set of retention flags",
bits
),
)
})
})?;
Ok(Tree::leaf((value, flags)))
}
NIL_TAG => Ok(Tree::empty()),
other => Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Node tag not recognized: {}", other),
)),
}
}
/// Reads a [`PrunableTree`] from the provided [`Read`] instance.
///
/// This function operates by first parsing a 1-byte version identifier, and then dispatching to
/// the correct deserialization function for the observed version, or returns an
/// [`io::ErrorKind::InvalidData`] error in the case that the version is not recognized.
pub fn read_shard<H: HashSer, R: Read>(mut reader: R) -> io::Result<PrunableTree<H>> {
match reader.read_u8()? {
SER_V1 => read_shard_v1(&mut reader),
other => Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Shard serialization version not recognized: {}", other),
)),
}
}
#[cfg(test)]
mod tests {
use incrementalmerkletree::frontier::testing::{arb_test_node, TestNode};
use proptest::prelude::*;
use shardtree::testing::arb_prunable_tree;
use std::io::Cursor;
use super::{read_shard, write_shard};
proptest! {
#[test]
fn check_shard_roundtrip(
tree in arb_prunable_tree(arb_test_node(), 8, 32)
) {
let mut tree_data = vec![];
write_shard(&mut tree_data, &tree).unwrap();
let cursor = Cursor::new(tree_data);
let tree_result = read_shard::<TestNode, _>(cursor).unwrap();
assert_eq!(tree, tree_result);
}
}
}

View File

@ -64,16 +64,16 @@
//! wallet.
//! - `memo` the shielded memo associated with the output, if any.
use rusqlite::{self, named_params, params, OptionalExtension, ToSql};
use std::collections::HashMap;
use rusqlite::{self, named_params, OptionalExtension, ToSql};
use std::convert::TryFrom;
use std::io::Cursor;
use std::{collections::HashMap, io};
use zcash_primitives::{
block::BlockHash,
consensus::{self, BlockHeight, BranchId, NetworkUpgrade, Parameters},
memo::{Memo, MemoBytes},
merkle_tree::write_commitment_tree,
sapling::CommitmentTree,
merkle_tree::read_commitment_tree,
transaction::{components::Amount, Transaction, TxId},
zip32::{
sapling::{DiversifiableFullViewingKey, ExtendedFullViewingKey},
@ -83,13 +83,15 @@ use zcash_primitives::{
use zcash_client_backend::{
address::{RecipientAddress, UnifiedAddress},
data_api::{PoolType, Recipient, SentTransactionOutput},
data_api::{BlockMetadata, PoolType, Recipient, SentTransactionOutput},
encoding::AddressCodec,
keys::UnifiedFullViewingKey,
wallet::WalletTx,
};
use crate::{error::SqliteClientError, PRUNING_HEIGHT};
use crate::{
error::SqliteClientError, SqlTransaction, WalletCommitmentTrees, WalletDb, PRUNING_DEPTH,
};
#[cfg(feature = "transparent-inputs")]
use {
@ -102,9 +104,12 @@ use {
},
};
pub(crate) mod commitment_tree;
pub mod init;
pub(crate) mod sapling;
pub(crate) const BLOCK_SAPLING_FRONTIER_ABSENT: &[u8] = &[0x0];
pub(crate) fn pool_code(pool_type: PoolType) -> i64 {
// These constants are *incidentally* shared with the typecodes
// for unified addresses, but this is exclusively an internal
@ -536,6 +541,95 @@ pub(crate) fn block_height_extrema(
})
}
fn parse_block_metadata(
row: (BlockHeight, Vec<u8>, Option<u32>, Vec<u8>),
) -> Result<BlockMetadata, SqliteClientError> {
let (block_height, hash_data, sapling_tree_size_opt, sapling_tree) = row;
let sapling_tree_size = sapling_tree_size_opt.map_or_else(|| {
if sapling_tree == BLOCK_SAPLING_FRONTIER_ABSENT {
Err(SqliteClientError::CorruptedData("One of either the Sapling tree size or the legacy Sapling commitment tree must be present.".to_owned()))
} else {
// parse the legacy commitment tree data
read_commitment_tree::<
zcash_primitives::sapling::Node,
_,
{ zcash_primitives::sapling::NOTE_COMMITMENT_TREE_DEPTH },
>(Cursor::new(sapling_tree))
.map(|tree| tree.size().try_into().unwrap())
.map_err(SqliteClientError::from)
}
}, Ok)?;
let block_hash = BlockHash::try_from_slice(&hash_data).ok_or_else(|| {
SqliteClientError::from(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid block hash length: {}", hash_data.len()),
))
})?;
Ok(BlockMetadata::from_parts(
block_height,
block_hash,
sapling_tree_size,
))
}
pub(crate) fn block_metadata(
conn: &rusqlite::Connection,
block_height: BlockHeight,
) -> Result<Option<BlockMetadata>, SqliteClientError> {
conn.query_row(
"SELECT height, hash, sapling_commitment_tree_size, sapling_tree
FROM blocks
WHERE height = :block_height",
named_params![":block_height": u32::from(block_height)],
|row| {
let height: u32 = row.get(0)?;
let block_hash: Vec<u8> = row.get(1)?;
let sapling_tree_size: Option<u32> = row.get(2)?;
let sapling_tree: Vec<u8> = row.get(3)?;
Ok((
BlockHeight::from(height),
block_hash,
sapling_tree_size,
sapling_tree,
))
},
)
.optional()
.map_err(SqliteClientError::from)
.and_then(|meta_row| meta_row.map(parse_block_metadata).transpose())
}
pub(crate) fn block_fully_scanned(
conn: &rusqlite::Connection,
) -> Result<Option<BlockMetadata>, SqliteClientError> {
// FIXME: this will need to be rewritten once out-of-order scan range suggestion
// is implemented.
conn.query_row(
"SELECT height, hash, sapling_commitment_tree_size, sapling_tree
FROM blocks
ORDER BY height DESC
LIMIT 1",
[],
|row| {
let height: u32 = row.get(0)?;
let block_hash: Vec<u8> = row.get(1)?;
let sapling_tree_size: Option<u32> = row.get(2)?;
let sapling_tree: Vec<u8> = row.get(3)?;
Ok((
BlockHeight::from(height),
block_hash,
sapling_tree_size,
sapling_tree,
))
},
)
.optional()
.map_err(SqliteClientError::from)
.and_then(|meta_row| meta_row.map(parse_block_metadata).transpose())
}
/// Returns the block height at which the specified transaction was mined,
/// if any.
pub(crate) fn get_tx_height(
@ -607,7 +701,7 @@ pub(crate) fn truncate_to_height<P: consensus::Parameters>(
.map(|opt| opt.map_or_else(|| sapling_activation_height - 1, BlockHeight::from))
})?;
if block_height < last_scanned_height - PRUNING_HEIGHT {
if block_height < last_scanned_height - PRUNING_DEPTH {
if let Some(h) = get_min_unspent_height(conn)? {
if block_height > h {
return Err(SqliteClientError::RequestedRewindInvalid(h, block_height));
@ -617,7 +711,16 @@ pub(crate) fn truncate_to_height<P: consensus::Parameters>(
// nothing to do if we're deleting back down to the max height
if block_height < last_scanned_height {
// Decrement witnesses.
// Truncate the note commitment trees
let mut wdb = WalletDb {
conn: SqlTransaction(conn),
params: params.clone(),
};
wdb.with_sapling_tree_mut(|tree| {
tree.truncate_removing_checkpoint(&block_height).map(|_| ())
})?;
// Remove any legacy Sapling witnesses
conn.execute(
"DELETE FROM sapling_witnesses WHERE block > ?",
[u32::from(block_height)],
@ -679,15 +782,18 @@ pub(crate) fn get_unspent_transparent_outputs<P: consensus::Parameters>(
FROM utxos u
LEFT OUTER JOIN transactions tx
ON tx.id_tx = u.spent_in_tx
WHERE u.address = ?
AND u.height <= ?
WHERE u.address = :address
AND u.height <= :max_height
AND tx.block IS NULL",
)?;
let addr_str = address.encode(params);
let mut utxos = Vec::<WalletTransparentOutput>::new();
let mut rows = stmt_blocks.query(params![addr_str, u32::from(max_height)])?;
let mut rows = stmt_blocks.query(named_params![
":address": addr_str,
":max_height": u32::from(max_height)
])?;
let excluded: BTreeSet<OutPoint> = exclude.iter().cloned().collect();
while let Some(row) = rows.next()? {
let txid: Vec<u8> = row.get(0)?;
@ -740,14 +846,17 @@ pub(crate) fn get_transparent_balances<P: consensus::Parameters>(
FROM utxos u
LEFT OUTER JOIN transactions tx
ON tx.id_tx = u.spent_in_tx
WHERE u.received_by_account = ?
AND u.height <= ?
WHERE u.received_by_account = :account_id
AND u.height <= :max_height
AND tx.block IS NULL
GROUP BY u.address",
)?;
let mut res = HashMap::new();
let mut rows = stmt_blocks.query(params![u32::from(account), u32::from(max_height)])?;
let mut rows = stmt_blocks.query(named_params![
":account_id": u32::from(account),
":max_height": u32::from(max_height)
])?;
while let Some(row) = rows.next()? {
let taddr_str: String = row.get(0)?;
let taddr = TransparentAddress::decode(params, &taddr_str)?;
@ -760,26 +869,61 @@ pub(crate) fn get_transparent_balances<P: consensus::Parameters>(
}
/// Inserts information about a scanned block into the database.
pub(crate) fn insert_block(
conn: &rusqlite::Connection,
pub(crate) fn put_block(
conn: &rusqlite::Transaction<'_>,
block_height: BlockHeight,
block_hash: BlockHash,
block_time: u32,
commitment_tree: &CommitmentTree,
sapling_commitment_tree_size: u32,
) -> Result<(), SqliteClientError> {
let mut encoded_tree = Vec::new();
write_commitment_tree(commitment_tree, &mut encoded_tree).unwrap();
let block_hash_data = conn
.query_row(
"SELECT hash FROM blocks WHERE height = ?",
[u32::from(block_height)],
|row| row.get::<_, Vec<u8>>(0),
)
.optional()?;
let mut stmt_insert_block = conn.prepare_cached(
"INSERT INTO blocks (height, hash, time, sapling_tree)
VALUES (?, ?, ?, ?)",
// Ensure that in the case of an upsert, we don't overwrite block data
// with information for a block with a different hash.
if let Some(bytes) = block_hash_data {
let expected_hash = BlockHash::try_from_slice(&bytes).ok_or_else(|| {
SqliteClientError::CorruptedData(format!(
"Invalid block hash at height {}",
u32::from(block_height)
))
})?;
if expected_hash != block_hash {
return Err(SqliteClientError::BlockConflict(block_height));
}
}
let mut stmt_upsert_block = conn.prepare_cached(
"INSERT INTO blocks (
height,
hash,
time,
sapling_commitment_tree_size,
sapling_tree
)
VALUES (
:height,
:hash,
:block_time,
:sapling_commitment_tree_size,
x'00'
)
ON CONFLICT (height) DO UPDATE
SET hash = :hash,
time = :block_time,
sapling_commitment_tree_size = :sapling_commitment_tree_size",
)?;
stmt_insert_block.execute(params![
u32::from(block_height),
&block_hash.0[..],
block_time,
encoded_tree
stmt_upsert_block.execute(named_params![
":height": u32::from(block_height),
":hash": &block_hash.0[..],
":block_time": block_time,
":sapling_commitment_tree_size": sapling_commitment_tree_size
])?;
Ok(())
@ -922,7 +1066,7 @@ pub(crate) fn put_legacy_transparent_utxo<P: consensus::Parameters>(
#[cfg(feature = "transparent-inputs")]
let mut stmt_upsert_legacy_transparent_utxo = conn.prepare_cached(
"INSERT INTO utxos (
prevout_txid, prevout_idx,
prevout_txid, prevout_idx,
received_by_account, address, script,
value_zat, height)
VALUES
@ -951,17 +1095,6 @@ pub(crate) fn put_legacy_transparent_utxo<P: consensus::Parameters>(
stmt_upsert_legacy_transparent_utxo.query_row(sql_args, |row| row.get::<_, i64>(0).map(UtxoId))
}
/// Removes old incremental witnesses up to the given block height.
pub(crate) fn prune_witnesses(
conn: &rusqlite::Connection,
below_height: BlockHeight,
) -> Result<(), SqliteClientError> {
let mut stmt_prune_witnesses =
conn.prepare_cached("DELETE FROM sapling_witnesses WHERE block < ?")?;
stmt_prune_witnesses.execute([u32::from(below_height)])?;
Ok(())
}
/// Marks notes that have not been mined in transactions
/// as expired, up to the given block height.
pub(crate) fn update_expired_notes(
@ -1082,6 +1215,8 @@ pub(crate) fn put_sent_output<P: consensus::Parameters>(
#[cfg(test)]
mod tests {
use std::num::NonZeroU32;
use secrecy::Secret;
use tempfile::NamedTempFile;
@ -1124,7 +1259,12 @@ mod tests {
);
// We can't get an anchor height, as we have not scanned any blocks.
assert_eq!(db_data.get_target_and_anchor_heights(10).unwrap(), None);
assert_eq!(
db_data
.get_target_and_anchor_heights(NonZeroU32::new(10).unwrap())
.unwrap(),
None
);
// An invalid account has zero balance
assert_matches!(

View File

@ -0,0 +1,807 @@
use either::Either;
use rusqlite::{self, named_params, OptionalExtension};
use std::{
collections::BTreeSet,
io::{self, Cursor},
marker::PhantomData,
};
use incrementalmerkletree::{Address, Level, Position};
use shardtree::{Checkpoint, LocatedPrunableTree, PrunableTree, ShardStore, TreeState};
use zcash_primitives::{consensus::BlockHeight, merkle_tree::HashSer};
use crate::serialization::{read_shard, write_shard};
pub struct SqliteShardStore<C, H, const SHARD_HEIGHT: u8> {
pub(crate) conn: C,
table_prefix: &'static str,
_hash_type: PhantomData<H>,
}
impl<C, H, const SHARD_HEIGHT: u8> SqliteShardStore<C, H, SHARD_HEIGHT> {
const SHARD_ROOT_LEVEL: Level = Level::new(SHARD_HEIGHT);
pub(crate) fn from_connection(
conn: C,
table_prefix: &'static str,
) -> Result<Self, rusqlite::Error> {
Ok(SqliteShardStore {
conn,
table_prefix,
_hash_type: PhantomData,
})
}
}
impl<'conn, 'a: 'conn, H: HashSer, const SHARD_HEIGHT: u8> ShardStore
for SqliteShardStore<&'a rusqlite::Transaction<'conn>, H, SHARD_HEIGHT>
{
type H = H;
type CheckpointId = BlockHeight;
type Error = Either<io::Error, rusqlite::Error>;
fn get_shard(
&self,
shard_root: Address,
) -> Result<Option<LocatedPrunableTree<Self::H>>, Self::Error> {
get_shard(self.conn, self.table_prefix, shard_root)
}
fn last_shard(&self) -> Result<Option<LocatedPrunableTree<Self::H>>, Self::Error> {
last_shard(self.conn, self.table_prefix, Self::SHARD_ROOT_LEVEL)
}
fn put_shard(&mut self, subtree: LocatedPrunableTree<Self::H>) -> Result<(), Self::Error> {
put_shard(self.conn, self.table_prefix, subtree)
}
fn get_shard_roots(&self) -> Result<Vec<Address>, Self::Error> {
get_shard_roots(self.conn, self.table_prefix, Self::SHARD_ROOT_LEVEL)
}
fn truncate(&mut self, from: Address) -> Result<(), Self::Error> {
truncate(self.conn, self.table_prefix, from)
}
fn get_cap(&self) -> Result<PrunableTree<Self::H>, Self::Error> {
get_cap(self.conn, self.table_prefix)
}
fn put_cap(&mut self, cap: PrunableTree<Self::H>) -> Result<(), Self::Error> {
put_cap(self.conn, self.table_prefix, cap)
}
fn min_checkpoint_id(&self) -> Result<Option<Self::CheckpointId>, Self::Error> {
min_checkpoint_id(self.conn, self.table_prefix)
}
fn max_checkpoint_id(&self) -> Result<Option<Self::CheckpointId>, Self::Error> {
max_checkpoint_id(self.conn, self.table_prefix)
}
fn add_checkpoint(
&mut self,
checkpoint_id: Self::CheckpointId,
checkpoint: Checkpoint,
) -> Result<(), Self::Error> {
add_checkpoint(self.conn, self.table_prefix, checkpoint_id, checkpoint)
}
fn checkpoint_count(&self) -> Result<usize, Self::Error> {
checkpoint_count(self.conn, self.table_prefix)
}
fn get_checkpoint_at_depth(
&self,
checkpoint_depth: usize,
) -> Result<Option<(Self::CheckpointId, Checkpoint)>, Self::Error> {
get_checkpoint_at_depth(self.conn, self.table_prefix, checkpoint_depth)
}
fn get_checkpoint(
&self,
checkpoint_id: &Self::CheckpointId,
) -> Result<Option<Checkpoint>, Self::Error> {
get_checkpoint(self.conn, self.table_prefix, *checkpoint_id)
}
fn with_checkpoints<F>(&mut self, limit: usize, callback: F) -> Result<(), Self::Error>
where
F: FnMut(&Self::CheckpointId, &Checkpoint) -> Result<(), Self::Error>,
{
with_checkpoints(self.conn, self.table_prefix, limit, callback)
}
fn update_checkpoint_with<F>(
&mut self,
checkpoint_id: &Self::CheckpointId,
update: F,
) -> Result<bool, Self::Error>
where
F: Fn(&mut Checkpoint) -> Result<(), Self::Error>,
{
update_checkpoint_with(self.conn, self.table_prefix, *checkpoint_id, update)
}
fn remove_checkpoint(&mut self, checkpoint_id: &Self::CheckpointId) -> Result<(), Self::Error> {
remove_checkpoint(self.conn, self.table_prefix, *checkpoint_id)
}
fn truncate_checkpoints(
&mut self,
checkpoint_id: &Self::CheckpointId,
) -> Result<(), Self::Error> {
truncate_checkpoints(self.conn, self.table_prefix, *checkpoint_id)
}
}
impl<H: HashSer, const SHARD_HEIGHT: u8> ShardStore
for SqliteShardStore<rusqlite::Connection, H, SHARD_HEIGHT>
{
type H = H;
type CheckpointId = BlockHeight;
type Error = Either<io::Error, rusqlite::Error>;
fn get_shard(
&self,
shard_root: Address,
) -> Result<Option<LocatedPrunableTree<Self::H>>, Self::Error> {
get_shard(&self.conn, self.table_prefix, shard_root)
}
fn last_shard(&self) -> Result<Option<LocatedPrunableTree<Self::H>>, Self::Error> {
last_shard(&self.conn, self.table_prefix, Self::SHARD_ROOT_LEVEL)
}
fn put_shard(&mut self, subtree: LocatedPrunableTree<Self::H>) -> Result<(), Self::Error> {
let tx = self.conn.transaction().map_err(Either::Right)?;
put_shard(&tx, self.table_prefix, subtree)?;
tx.commit().map_err(Either::Right)?;
Ok(())
}
fn get_shard_roots(&self) -> Result<Vec<Address>, Self::Error> {
get_shard_roots(&self.conn, self.table_prefix, Self::SHARD_ROOT_LEVEL)
}
fn truncate(&mut self, from: Address) -> Result<(), Self::Error> {
truncate(&self.conn, self.table_prefix, from)
}
fn get_cap(&self) -> Result<PrunableTree<Self::H>, Self::Error> {
get_cap(&self.conn, self.table_prefix)
}
fn put_cap(&mut self, cap: PrunableTree<Self::H>) -> Result<(), Self::Error> {
put_cap(&self.conn, self.table_prefix, cap)
}
fn min_checkpoint_id(&self) -> Result<Option<Self::CheckpointId>, Self::Error> {
min_checkpoint_id(&self.conn, self.table_prefix)
}
fn max_checkpoint_id(&self) -> Result<Option<Self::CheckpointId>, Self::Error> {
max_checkpoint_id(&self.conn, self.table_prefix)
}
fn add_checkpoint(
&mut self,
checkpoint_id: Self::CheckpointId,
checkpoint: Checkpoint,
) -> Result<(), Self::Error> {
let tx = self.conn.transaction().map_err(Either::Right)?;
add_checkpoint(&tx, self.table_prefix, checkpoint_id, checkpoint)?;
tx.commit().map_err(Either::Right)
}
fn checkpoint_count(&self) -> Result<usize, Self::Error> {
checkpoint_count(&self.conn, self.table_prefix)
}
fn get_checkpoint_at_depth(
&self,
checkpoint_depth: usize,
) -> Result<Option<(Self::CheckpointId, Checkpoint)>, Self::Error> {
get_checkpoint_at_depth(&self.conn, self.table_prefix, checkpoint_depth)
}
fn get_checkpoint(
&self,
checkpoint_id: &Self::CheckpointId,
) -> Result<Option<Checkpoint>, Self::Error> {
get_checkpoint(&self.conn, self.table_prefix, *checkpoint_id)
}
fn with_checkpoints<F>(&mut self, limit: usize, callback: F) -> Result<(), Self::Error>
where
F: FnMut(&Self::CheckpointId, &Checkpoint) -> Result<(), Self::Error>,
{
let tx = self.conn.transaction().map_err(Either::Right)?;
with_checkpoints(&tx, self.table_prefix, limit, callback)?;
tx.commit().map_err(Either::Right)
}
fn update_checkpoint_with<F>(
&mut self,
checkpoint_id: &Self::CheckpointId,
update: F,
) -> Result<bool, Self::Error>
where
F: Fn(&mut Checkpoint) -> Result<(), Self::Error>,
{
let tx = self.conn.transaction().map_err(Either::Right)?;
let result = update_checkpoint_with(&tx, self.table_prefix, *checkpoint_id, update)?;
tx.commit().map_err(Either::Right)?;
Ok(result)
}
fn remove_checkpoint(&mut self, checkpoint_id: &Self::CheckpointId) -> Result<(), Self::Error> {
let tx = self.conn.transaction().map_err(Either::Right)?;
remove_checkpoint(&tx, self.table_prefix, *checkpoint_id)?;
tx.commit().map_err(Either::Right)
}
fn truncate_checkpoints(
&mut self,
checkpoint_id: &Self::CheckpointId,
) -> Result<(), Self::Error> {
let tx = self.conn.transaction().map_err(Either::Right)?;
truncate_checkpoints(&tx, self.table_prefix, *checkpoint_id)?;
tx.commit().map_err(Either::Right)
}
}
type Error = Either<io::Error, rusqlite::Error>;
pub(crate) fn get_shard<H: HashSer>(
conn: &rusqlite::Connection,
table_prefix: &'static str,
shard_root: Address,
) -> Result<Option<LocatedPrunableTree<H>>, Error> {
conn.query_row(
&format!(
"SELECT shard_data
FROM {}_tree_shards
WHERE shard_index = :shard_index",
table_prefix
),
named_params![":shard_index": shard_root.index()],
|row| row.get::<_, Vec<u8>>(0),
)
.optional()
.map_err(Either::Right)?
.map(|shard_data| {
let shard_tree = read_shard(&mut Cursor::new(shard_data)).map_err(Either::Left)?;
Ok(LocatedPrunableTree::from_parts(shard_root, shard_tree))
})
.transpose()
}
pub(crate) fn last_shard<H: HashSer>(
conn: &rusqlite::Connection,
table_prefix: &'static str,
shard_root_level: Level,
) -> Result<Option<LocatedPrunableTree<H>>, Error> {
conn.query_row(
&format!(
"SELECT shard_index, shard_data
FROM {}_tree_shards
ORDER BY shard_index DESC
LIMIT 1",
table_prefix
),
[],
|row| {
let shard_index: u64 = row.get(0)?;
let shard_data: Vec<u8> = row.get(1)?;
Ok((shard_index, shard_data))
},
)
.optional()
.map_err(Either::Right)?
.map(|(shard_index, shard_data)| {
let shard_root = Address::from_parts(shard_root_level, shard_index);
let shard_tree = read_shard(&mut Cursor::new(shard_data)).map_err(Either::Left)?;
Ok(LocatedPrunableTree::from_parts(shard_root, shard_tree))
})
.transpose()
}
pub(crate) fn put_shard<H: HashSer>(
conn: &rusqlite::Transaction<'_>,
table_prefix: &'static str,
subtree: LocatedPrunableTree<H>,
) -> Result<(), Error> {
let subtree_root_hash = subtree
.root()
.annotation()
.and_then(|ann| {
ann.as_ref().map(|rc| {
let mut root_hash = vec![];
rc.write(&mut root_hash)?;
Ok(root_hash)
})
})
.transpose()
.map_err(Either::Left)?;
let mut subtree_data = vec![];
write_shard(&mut subtree_data, subtree.root()).map_err(Either::Left)?;
let mut stmt_put_shard = conn
.prepare_cached(&format!(
"INSERT INTO {}_tree_shards (shard_index, root_hash, shard_data)
VALUES (:shard_index, :root_hash, :shard_data)
ON CONFLICT (shard_index) DO UPDATE
SET root_hash = :root_hash,
shard_data = :shard_data",
table_prefix
))
.map_err(Either::Right)?;
stmt_put_shard
.execute(named_params![
":shard_index": subtree.root_addr().index(),
":root_hash": subtree_root_hash,
":shard_data": subtree_data
])
.map_err(Either::Right)?;
Ok(())
}
pub(crate) fn get_shard_roots(
conn: &rusqlite::Connection,
table_prefix: &'static str,
shard_root_level: Level,
) -> Result<Vec<Address>, Error> {
let mut stmt = conn
.prepare(&format!(
"SELECT shard_index FROM {}_tree_shards ORDER BY shard_index",
table_prefix
))
.map_err(Either::Right)?;
let mut rows = stmt.query([]).map_err(Either::Right)?;
let mut res = vec![];
while let Some(row) = rows.next().map_err(Either::Right)? {
res.push(Address::from_parts(
shard_root_level,
row.get(0).map_err(Either::Right)?,
));
}
Ok(res)
}
pub(crate) fn truncate(
conn: &rusqlite::Connection,
table_prefix: &'static str,
from: Address,
) -> Result<(), Error> {
conn.execute(
&format!(
"DELETE FROM {}_tree_shards WHERE shard_index >= ?",
table_prefix
),
[from.index()],
)
.map_err(Either::Right)
.map(|_| ())
}
pub(crate) fn get_cap<H: HashSer>(
conn: &rusqlite::Connection,
table_prefix: &'static str,
) -> Result<PrunableTree<H>, Error> {
conn.query_row(
&format!("SELECT cap_data FROM {}_tree_cap", table_prefix),
[],
|row| row.get::<_, Vec<u8>>(0),
)
.optional()
.map_err(Either::Right)?
.map_or_else(
|| Ok(PrunableTree::empty()),
|cap_data| read_shard(&mut Cursor::new(cap_data)).map_err(Either::Left),
)
}
pub(crate) fn put_cap<H: HashSer>(
conn: &rusqlite::Connection,
table_prefix: &'static str,
cap: PrunableTree<H>,
) -> Result<(), Error> {
let mut stmt = conn
.prepare_cached(&format!(
"INSERT INTO {}_tree_cap (cap_id, cap_data)
VALUES (0, :cap_data)
ON CONFLICT (cap_id) DO UPDATE
SET cap_data = :cap_data",
table_prefix
))
.map_err(Either::Right)?;
let mut cap_data = vec![];
write_shard(&mut cap_data, &cap).map_err(Either::Left)?;
stmt.execute([cap_data]).map_err(Either::Right)?;
Ok(())
}
pub(crate) fn min_checkpoint_id(
conn: &rusqlite::Connection,
table_prefix: &'static str,
) -> Result<Option<BlockHeight>, Error> {
conn.query_row(
&format!(
"SELECT MIN(checkpoint_id) FROM {}_tree_checkpoints",
table_prefix
),
[],
|row| {
row.get::<_, Option<u32>>(0)
.map(|opt| opt.map(BlockHeight::from))
},
)
.map_err(Either::Right)
}
pub(crate) fn max_checkpoint_id(
conn: &rusqlite::Connection,
table_prefix: &'static str,
) -> Result<Option<BlockHeight>, Error> {
conn.query_row(
&format!(
"SELECT MAX(checkpoint_id) FROM {}_tree_checkpoints",
table_prefix
),
[],
|row| {
row.get::<_, Option<u32>>(0)
.map(|opt| opt.map(BlockHeight::from))
},
)
.map_err(Either::Right)
}
pub(crate) fn add_checkpoint(
conn: &rusqlite::Transaction<'_>,
table_prefix: &'static str,
checkpoint_id: BlockHeight,
checkpoint: Checkpoint,
) -> Result<(), Error> {
let mut stmt_insert_checkpoint = conn
.prepare_cached(&format!(
"INSERT INTO {}_tree_checkpoints (checkpoint_id, position)
VALUES (:checkpoint_id, :position)",
table_prefix
))
.map_err(Either::Right)?;
stmt_insert_checkpoint
.execute(named_params![
":checkpoint_id": u32::from(checkpoint_id),
":position": checkpoint.position().map(u64::from)
])
.map_err(Either::Right)?;
let mut stmt_insert_mark_removed = conn
.prepare_cached(&format!(
"INSERT INTO {}_tree_checkpoint_marks_removed (checkpoint_id, mark_removed_position)
VALUES (:checkpoint_id, :position)",
table_prefix
))
.map_err(Either::Right)?;
for pos in checkpoint.marks_removed() {
stmt_insert_mark_removed
.execute(named_params![
":checkpoint_id": u32::from(checkpoint_id),
":position": u64::from(*pos)
])
.map_err(Either::Right)?;
}
Ok(())
}
pub(crate) fn checkpoint_count(
conn: &rusqlite::Connection,
table_prefix: &'static str,
) -> Result<usize, Error> {
conn.query_row(
&format!("SELECT COUNT(*) FROM {}_tree_checkpoints", table_prefix),
[],
|row| row.get::<_, usize>(0),
)
.map_err(Either::Right)
}
pub(crate) fn get_checkpoint(
conn: &rusqlite::Connection,
table_prefix: &'static str,
checkpoint_id: BlockHeight,
) -> Result<Option<Checkpoint>, Error> {
let checkpoint_position = conn
.query_row(
&format!(
"SELECT position
FROM {}_tree_checkpoints
WHERE checkpoint_id = ?",
table_prefix
),
[u32::from(checkpoint_id)],
|row| {
row.get::<_, Option<u64>>(0)
.map(|opt| opt.map(Position::from))
},
)
.optional()
.map_err(Either::Right)?;
checkpoint_position
.map(|pos_opt| {
let mut stmt = conn
.prepare_cached(&format!(
"SELECT mark_removed_position
FROM {}_tree_checkpoint_marks_removed
WHERE checkpoint_id = ?",
table_prefix
))
.map_err(Either::Right)?;
let mark_removed_rows = stmt
.query([u32::from(checkpoint_id)])
.map_err(Either::Right)?;
let marks_removed = mark_removed_rows
.mapped(|row| row.get::<_, u64>(0).map(Position::from))
.collect::<Result<BTreeSet<_>, _>>()
.map_err(Either::Right)?;
Ok(Checkpoint::from_parts(
pos_opt.map_or(TreeState::Empty, TreeState::AtPosition),
marks_removed,
))
})
.transpose()
}
pub(crate) fn get_checkpoint_at_depth(
conn: &rusqlite::Connection,
table_prefix: &'static str,
checkpoint_depth: usize,
) -> Result<Option<(BlockHeight, Checkpoint)>, Error> {
if checkpoint_depth == 0 {
return Ok(None);
}
let checkpoint_parts = conn
.query_row(
&format!(
"SELECT checkpoint_id, position
FROM {}_tree_checkpoints
ORDER BY checkpoint_id DESC
LIMIT 1
OFFSET :offset",
table_prefix
),
named_params![":offset": checkpoint_depth - 1],
|row| {
let checkpoint_id: u32 = row.get(0)?;
let position: Option<u64> = row.get(1)?;
Ok((
BlockHeight::from(checkpoint_id),
position.map(Position::from),
))
},
)
.optional()
.map_err(Either::Right)?;
checkpoint_parts
.map(|(checkpoint_id, pos_opt)| {
let mut stmt = conn
.prepare_cached(&format!(
"SELECT mark_removed_position
FROM {}_tree_checkpoint_marks_removed
WHERE checkpoint_id = ?",
table_prefix
))
.map_err(Either::Right)?;
let mark_removed_rows = stmt
.query([u32::from(checkpoint_id)])
.map_err(Either::Right)?;
let marks_removed = mark_removed_rows
.mapped(|row| row.get::<_, u64>(0).map(Position::from))
.collect::<Result<BTreeSet<_>, _>>()
.map_err(Either::Right)?;
Ok((
checkpoint_id,
Checkpoint::from_parts(
pos_opt.map_or(TreeState::Empty, TreeState::AtPosition),
marks_removed,
),
))
})
.transpose()
}
pub(crate) fn with_checkpoints<F>(
conn: &rusqlite::Transaction<'_>,
table_prefix: &'static str,
limit: usize,
mut callback: F,
) -> Result<(), Error>
where
F: FnMut(&BlockHeight, &Checkpoint) -> Result<(), Error>,
{
let mut stmt_get_checkpoints = conn
.prepare_cached(&format!(
"SELECT checkpoint_id, position
FROM {}_tree_checkpoints
ORDER BY position
LIMIT :limit",
table_prefix
))
.map_err(Either::Right)?;
let mut stmt_get_checkpoint_marks_removed = conn
.prepare_cached(&format!(
"SELECT mark_removed_position
FROM {}_tree_checkpoint_marks_removed
WHERE checkpoint_id = :checkpoint_id",
table_prefix
))
.map_err(Either::Right)?;
let mut rows = stmt_get_checkpoints
.query(named_params![":limit": limit])
.map_err(Either::Right)?;
while let Some(row) = rows.next().map_err(Either::Right)? {
let checkpoint_id = row.get::<_, u32>(0).map_err(Either::Right)?;
let tree_state = row
.get::<_, Option<u64>>(1)
.map(|opt| opt.map_or_else(|| TreeState::Empty, |p| TreeState::AtPosition(p.into())))
.map_err(Either::Right)?;
let mark_removed_rows = stmt_get_checkpoint_marks_removed
.query(named_params![":checkpoint_id": checkpoint_id])
.map_err(Either::Right)?;
let marks_removed = mark_removed_rows
.mapped(|row| row.get::<_, u64>(0).map(Position::from))
.collect::<Result<BTreeSet<_>, _>>()
.map_err(Either::Right)?;
callback(
&BlockHeight::from(checkpoint_id),
&Checkpoint::from_parts(tree_state, marks_removed),
)?
}
Ok(())
}
pub(crate) fn update_checkpoint_with<F>(
conn: &rusqlite::Transaction<'_>,
table_prefix: &'static str,
checkpoint_id: BlockHeight,
update: F,
) -> Result<bool, Error>
where
F: Fn(&mut Checkpoint) -> Result<(), Error>,
{
if let Some(mut c) = get_checkpoint(conn, table_prefix, checkpoint_id)? {
update(&mut c)?;
remove_checkpoint(conn, table_prefix, checkpoint_id)?;
add_checkpoint(conn, table_prefix, checkpoint_id, c)?;
Ok(true)
} else {
Ok(false)
}
}
pub(crate) fn remove_checkpoint(
conn: &rusqlite::Transaction<'_>,
table_prefix: &'static str,
checkpoint_id: BlockHeight,
) -> Result<(), Error> {
// cascading delete here obviates the need to manually delete from
// `tree_checkpoint_marks_removed`
let mut stmt_delete_checkpoint = conn
.prepare_cached(&format!(
"DELETE FROM {}_tree_checkpoints
WHERE checkpoint_id = :checkpoint_id",
table_prefix
))
.map_err(Either::Right)?;
stmt_delete_checkpoint
.execute(named_params![":checkpoint_id": u32::from(checkpoint_id),])
.map_err(Either::Right)?;
Ok(())
}
pub(crate) fn truncate_checkpoints(
conn: &rusqlite::Transaction<'_>,
table_prefix: &'static str,
checkpoint_id: BlockHeight,
) -> Result<(), Error> {
// cascading delete here obviates the need to manually delete from
// `tree_checkpoint_marks_removed`
conn.execute(
&format!(
"DELETE FROM {}_tree_checkpoints WHERE checkpoint_id >= ?",
table_prefix
),
[u32::from(checkpoint_id)],
)
.map_err(Either::Right)?;
Ok(())
}
#[cfg(test)]
mod tests {
use tempfile::NamedTempFile;
use incrementalmerkletree::testing::{
check_append, check_checkpoint_rewind, check_remove_mark, check_rewind_remove_mark,
check_root_hashes, check_witness_consistency, check_witnesses,
};
use shardtree::ShardTree;
use super::SqliteShardStore;
use crate::{tests, wallet::init::init_wallet_db, WalletDb};
fn new_tree(m: usize) -> ShardTree<SqliteShardStore<rusqlite::Connection, String, 3>, 4, 3> {
let data_file = NamedTempFile::new().unwrap();
let mut db_data = WalletDb::for_path(data_file.path(), tests::network()).unwrap();
data_file.keep().unwrap();
init_wallet_db(&mut db_data, None).unwrap();
let store =
SqliteShardStore::<_, String, 3>::from_connection(db_data.conn, "sapling").unwrap();
ShardTree::new(store, m)
}
#[test]
fn append() {
check_append(new_tree);
}
#[test]
fn root_hashes() {
check_root_hashes(new_tree);
}
#[test]
fn witnesses() {
check_witnesses(new_tree);
}
#[test]
fn witness_consistency() {
check_witness_consistency(new_tree);
}
#[test]
fn checkpoint_rewind() {
check_checkpoint_rewind(new_tree);
}
#[test]
fn remove_mark() {
check_remove_mark(new_tree);
}
#[test]
fn rewind_remove_mark() {
check_rewind_remove_mark(new_tree);
}
}

View File

@ -1,23 +1,29 @@
//! Functions for initializing the various databases.
use std::collections::HashMap;
use std::fmt;
use either::Either;
use incrementalmerkletree::Retention;
use std::{collections::HashMap, fmt, io};
use rusqlite::{self, types::ToSql};
use schemer::{Migrator, MigratorError};
use schemer_rusqlite::RusqliteAdapter;
use secrecy::SecretVec;
use shardtree::{ShardTree, ShardTreeError};
use uuid::Uuid;
use zcash_primitives::{
block::BlockHash,
consensus::{self, BlockHeight},
merkle_tree::read_commitment_tree,
sapling,
transaction::components::amount::BalanceError,
zip32::AccountId,
};
use zcash_client_backend::keys::UnifiedFullViewingKey;
use zcash_client_backend::{data_api::SAPLING_SHARD_HEIGHT, keys::UnifiedFullViewingKey};
use crate::{error::SqliteClientError, wallet, WalletDb};
use crate::{error::SqliteClientError, wallet, WalletDb, PRUNING_DEPTH, SAPLING_TABLES_PREFIX};
use super::commitment_tree::SqliteShardStore;
mod migrations;
@ -34,6 +40,9 @@ pub enum WalletMigrationError {
/// Wrapper for amount balance violations
BalanceError(BalanceError),
/// Wrapper for commitment tree invariant violations
CommitmentTree(ShardTreeError<Either<io::Error, rusqlite::Error>>),
}
impl From<rusqlite::Error> for WalletMigrationError {
@ -48,6 +57,12 @@ impl From<BalanceError> for WalletMigrationError {
}
}
impl From<ShardTreeError<Either<io::Error, rusqlite::Error>>> for WalletMigrationError {
fn from(e: ShardTreeError<Either<io::Error, rusqlite::Error>>) -> Self {
WalletMigrationError::CommitmentTree(e)
}
}
impl fmt::Display for WalletMigrationError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match &self {
@ -62,6 +77,7 @@ impl fmt::Display for WalletMigrationError {
}
WalletMigrationError::DbError(e) => write!(f, "{}", e),
WalletMigrationError::BalanceError(e) => write!(f, "Balance error: {:?}", e),
WalletMigrationError::CommitmentTree(e) => write!(f, "Commitment tree error: {:?}", e),
}
}
}
@ -226,7 +242,7 @@ pub fn init_accounts_table<P: consensus::Parameters>(
// Insert accounts atomically
for (account, key) in keys.iter() {
wallet::add_account(&wdb.conn.0, &wdb.params, *account, key)?;
wallet::add_account(wdb.conn.0, &wdb.params, *account, key)?;
}
Ok(())
@ -278,9 +294,21 @@ pub fn init_blocks_table<P: consensus::Parameters>(
return Err(SqliteClientError::TableNotEmpty);
}
let block_end_tree =
read_commitment_tree::<sapling::Node, _, { sapling::NOTE_COMMITMENT_TREE_DEPTH }>(
sapling_tree,
)
.map_err(|e| {
rusqlite::Error::FromSqlConversionFailure(
sapling_tree.len(),
rusqlite::types::Type::Blob,
Box::new(e),
)
})?;
wdb.conn.0.execute(
"INSERT INTO blocks (height, hash, time, sapling_tree)
VALUES (?, ?, ?, ?)",
VALUES (?, ?, ?, ?)",
[
u32::from(height).to_sql()?,
hash.0.to_sql()?,
@ -289,6 +317,26 @@ pub fn init_blocks_table<P: consensus::Parameters>(
],
)?;
if let Some(nonempty_frontier) = block_end_tree.to_frontier().value() {
let shard_store =
SqliteShardStore::<_, sapling::Node, SAPLING_SHARD_HEIGHT>::from_connection(
wdb.conn.0,
SAPLING_TABLES_PREFIX,
)?;
let mut shard_tree: ShardTree<
_,
{ sapling::NOTE_COMMITMENT_TREE_DEPTH },
SAPLING_SHARD_HEIGHT,
> = ShardTree::new(shard_store, PRUNING_DEPTH.try_into().unwrap());
shard_tree.insert_frontier_nodes(
nonempty_frontier.clone(),
Retention::Checkpoint {
id: height,
is_marked: false,
},
)?;
}
Ok(())
})
}
@ -361,8 +409,9 @@ mod tests {
height INTEGER PRIMARY KEY,
hash BLOB NOT NULL,
time INTEGER NOT NULL,
sapling_tree BLOB NOT NULL
)",
sapling_tree BLOB NOT NULL ,
sapling_commitment_tree_size INTEGER,
orchard_commitment_tree_size INTEGER)",
"CREATE TABLE sapling_received_notes (
id_note INTEGER PRIMARY KEY,
tx INTEGER NOT NULL,
@ -375,11 +424,36 @@ mod tests {
is_change INTEGER NOT NULL,
memo BLOB,
spent INTEGER,
commitment_tree_position INTEGER,
FOREIGN KEY (tx) REFERENCES transactions(id_tx),
FOREIGN KEY (account) REFERENCES accounts(account),
FOREIGN KEY (spent) REFERENCES transactions(id_tx),
CONSTRAINT tx_output UNIQUE (tx, output_index)
)",
"CREATE TABLE sapling_tree_cap (
-- cap_id exists only to be able to take advantage of `ON CONFLICT`
-- upsert functionality; the table will only ever contain one row
cap_id INTEGER PRIMARY KEY,
cap_data BLOB NOT NULL
)",
"CREATE TABLE sapling_tree_checkpoint_marks_removed (
checkpoint_id INTEGER NOT NULL,
mark_removed_position INTEGER NOT NULL,
FOREIGN KEY (checkpoint_id) REFERENCES sapling_tree_checkpoints(checkpoint_id)
ON DELETE CASCADE
)",
"CREATE TABLE sapling_tree_checkpoints (
checkpoint_id INTEGER PRIMARY KEY,
position INTEGER
)",
"CREATE TABLE sapling_tree_shards (
shard_index INTEGER PRIMARY KEY,
subtree_end_height INTEGER,
root_hash BLOB,
shard_data BLOB,
contains_marked INTEGER,
CONSTRAINT root_unique UNIQUE (root_hash)
)",
"CREATE TABLE sapling_witnesses (
id_witness INTEGER PRIMARY KEY,
note INTEGER NOT NULL,
@ -842,7 +916,7 @@ mod tests {
// add a sapling sent note
wdb.conn.execute(
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (0, 0, 0, '')",
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (0, 0, 0, x'000000')",
[],
)?;
@ -1006,7 +1080,7 @@ mod tests {
RecipientAddress::Transparent(*ufvk.default_address().0.transparent().unwrap())
.encode(&tests::network());
wdb.conn.execute(
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (0, 0, 0, '')",
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (0, 0, 0, x'000000')",
[],
)?;
wdb.conn.execute(
@ -1117,7 +1191,7 @@ mod tests {
BlockHeight::from(1u32),
BlockHash([1; 32]),
1,
&[],
&[0x0, 0x0, 0x0],
)
.unwrap();
@ -1127,7 +1201,7 @@ mod tests {
BlockHeight::from(2u32),
BlockHash([2; 32]),
2,
&[],
&[0x0, 0x0, 0x0],
)
.unwrap_err();
}

View File

@ -4,6 +4,7 @@ mod addresses_table;
mod initial_setup;
mod received_notes_nullable_nf;
mod sent_notes_to_internal;
mod shardtree_support;
mod ufvk_support;
mod utxos_table;
mod v_transactions_net;
@ -46,5 +47,6 @@ pub(super) fn all_migrations<P: consensus::Parameters + 'static>(
Box::new(add_transaction_views::Migration),
Box::new(v_transactions_net::Migration),
Box::new(received_notes_nullable_nf::Migration),
Box::new(shardtree_support::Migration),
]
}

View File

@ -327,7 +327,7 @@ mod tests {
.unwrap();
db_data.conn.execute_batch(
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (0, 0, 0, '');
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (0, 0, 0, x'00');
INSERT INTO transactions (block, id_tx, txid) VALUES (0, 0, '');
INSERT INTO sent_notes (tx, output_pool, output_index, from_account, address, value)
@ -460,7 +460,7 @@ mod tests {
db_data
.conn
.execute_batch(
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (0, 0, 0, '');",
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (0, 0, 0, x'00');",
)
.unwrap();
db_data.conn.execute(

View File

@ -262,7 +262,7 @@ mod tests {
// Tx 0 contains two received notes of 2 and 5 zatoshis that are controlled by account 0.
db_data.conn.execute_batch(
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (0, 0, 0, '');
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (0, 0, 0, x'00');
INSERT INTO transactions (block, id_tx, txid) VALUES (0, 0, 'tx0');
INSERT INTO received_notes (tx, output_index, account, diversifier, value, rcm, nf, is_change)

View File

@ -0,0 +1,194 @@
//! This migration adds tables to the wallet database that are needed to persist note commitment
//! tree data using the `shardtree` crate, and migrates existing witness data into these data
//! structures.
use std::collections::{BTreeSet, HashSet};
use incrementalmerkletree::Retention;
use rusqlite::{self, named_params, params};
use schemer;
use schemer_rusqlite::RusqliteMigration;
use shardtree::ShardTree;
use uuid::Uuid;
use zcash_client_backend::data_api::SAPLING_SHARD_HEIGHT;
use zcash_primitives::{
consensus::BlockHeight,
merkle_tree::{read_commitment_tree, read_incremental_witness},
sapling,
};
use crate::{
wallet::{
commitment_tree::SqliteShardStore,
init::{migrations::received_notes_nullable_nf, WalletMigrationError},
},
PRUNING_DEPTH, SAPLING_TABLES_PREFIX,
};
pub(super) const MIGRATION_ID: Uuid = Uuid::from_fields(
0x7da6489d,
0xe835,
0x4657,
b"\x8b\xe5\xf5\x12\xbc\xce\x6c\xbf",
);
pub(super) struct Migration;
impl schemer::Migration for Migration {
fn id(&self) -> Uuid {
MIGRATION_ID
}
fn dependencies(&self) -> HashSet<Uuid> {
[received_notes_nullable_nf::MIGRATION_ID]
.into_iter()
.collect()
}
fn description(&self) -> &'static str {
"Add support for receiving storage of note commitment tree data using the `shardtree` crate."
}
}
impl RusqliteMigration for Migration {
type Error = WalletMigrationError;
fn up(&self, transaction: &rusqlite::Transaction) -> Result<(), WalletMigrationError> {
// Add commitment tree sizes to block metadata.
transaction.execute_batch(
"ALTER TABLE blocks ADD COLUMN sapling_commitment_tree_size INTEGER;
ALTER TABLE blocks ADD COLUMN orchard_commitment_tree_size INTEGER;
ALTER TABLE sapling_received_notes ADD COLUMN commitment_tree_position INTEGER;",
)?;
// Add shard persistence
transaction.execute_batch(
"CREATE TABLE sapling_tree_shards (
shard_index INTEGER PRIMARY KEY,
subtree_end_height INTEGER,
root_hash BLOB,
shard_data BLOB,
contains_marked INTEGER,
CONSTRAINT root_unique UNIQUE (root_hash)
);
CREATE TABLE sapling_tree_cap (
-- cap_id exists only to be able to take advantage of `ON CONFLICT`
-- upsert functionality; the table will only ever contain one row
cap_id INTEGER PRIMARY KEY,
cap_data BLOB NOT NULL
);",
)?;
// Add checkpoint persistence
transaction.execute_batch(
"CREATE TABLE sapling_tree_checkpoints (
checkpoint_id INTEGER PRIMARY KEY,
position INTEGER
);
CREATE TABLE sapling_tree_checkpoint_marks_removed (
checkpoint_id INTEGER NOT NULL,
mark_removed_position INTEGER NOT NULL,
FOREIGN KEY (checkpoint_id) REFERENCES sapling_tree_checkpoints(checkpoint_id)
ON DELETE CASCADE
);",
)?;
let shard_store =
SqliteShardStore::<_, sapling::Node, SAPLING_SHARD_HEIGHT>::from_connection(
transaction,
SAPLING_TABLES_PREFIX,
)?;
let mut shard_tree: ShardTree<
_,
{ sapling::NOTE_COMMITMENT_TREE_DEPTH },
SAPLING_SHARD_HEIGHT,
> = ShardTree::new(shard_store, PRUNING_DEPTH.try_into().unwrap());
// Insert all the tree information that we can get from block-end commitment trees
{
let mut stmt_blocks = transaction.prepare("SELECT height, sapling_tree FROM blocks")?;
let mut stmt_update_block_sapling_tree_size = transaction
.prepare("UPDATE blocks SET sapling_commitment_tree_size = ? WHERE height = ?")?;
let mut block_rows = stmt_blocks.query([])?;
while let Some(row) = block_rows.next()? {
let block_height: u32 = row.get(0)?;
let sapling_tree_data: Vec<u8> = row.get(1)?;
let block_end_tree = read_commitment_tree::<
sapling::Node,
_,
{ sapling::NOTE_COMMITMENT_TREE_DEPTH },
>(&sapling_tree_data[..])
.map_err(|e| {
rusqlite::Error::FromSqlConversionFailure(
sapling_tree_data.len(),
rusqlite::types::Type::Blob,
Box::new(e),
)
})?;
stmt_update_block_sapling_tree_size
.execute(params![block_end_tree.size(), block_height])?;
if let Some(nonempty_frontier) = block_end_tree.to_frontier().value() {
shard_tree.insert_frontier_nodes(
nonempty_frontier.clone(),
Retention::Checkpoint {
id: BlockHeight::from(block_height),
is_marked: false,
},
)?;
}
}
}
// Insert all the tree information that we can get from existing incremental witnesses
{
let mut stmt_blocks =
transaction.prepare("SELECT note, block, witness FROM sapling_witnesses")?;
let mut stmt_set_note_position = transaction.prepare(
"UPDATE sapling_received_notes
SET commitment_tree_position = :position
WHERE id_note = :note_id",
)?;
let mut updated_note_positions = BTreeSet::new();
let mut rows = stmt_blocks.query([])?;
while let Some(row) = rows.next()? {
let note_id: i64 = row.get(0)?;
let block_height: u32 = row.get(1)?;
let row_data: Vec<u8> = row.get(2)?;
let witness = read_incremental_witness::<
sapling::Node,
_,
{ sapling::NOTE_COMMITMENT_TREE_DEPTH },
>(&row_data[..])
.map_err(|e| {
rusqlite::Error::FromSqlConversionFailure(
row_data.len(),
rusqlite::types::Type::Blob,
Box::new(e),
)
})?;
let witnessed_position = witness.witnessed_position();
if !updated_note_positions.contains(&witnessed_position) {
stmt_set_note_position.execute(named_params![
":note_id": note_id,
":position": u64::from(witnessed_position)
])?;
updated_note_positions.insert(witnessed_position);
}
shard_tree.insert_witness_nodes(witness, BlockHeight::from(block_height))?;
}
}
Ok(())
}
fn down(&self, _transaction: &rusqlite::Transaction) -> Result<(), WalletMigrationError> {
// TODO: something better than just panic?
panic!("Cannot revert this migration.");
}
}

View File

@ -253,7 +253,7 @@ mod tests {
// - Tx 0 contains two received notes of 2 and 5 zatoshis that are controlled by account 0.
db_data.conn.execute_batch(
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (0, 0, 0, '');
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (0, 0, 0, x'00');
INSERT INTO transactions (block, id_tx, txid) VALUES (0, 0, 'tx0');
INSERT INTO received_notes (tx, output_index, account, diversifier, value, rcm, nf, is_change)
@ -265,7 +265,7 @@ mod tests {
// of 2 zatoshis. This is representative of a historic transaction where no `sent_notes`
// entry was created for the change value.
db_data.conn.execute_batch(
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (1, 1, 1, '');
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (1, 1, 1, x'00');
INSERT INTO transactions (block, id_tx, txid) VALUES (1, 1, 'tx1');
UPDATE received_notes SET spent = 1 WHERE tx = 0;
INSERT INTO sent_notes (tx, output_pool, output_index, from_account, to_account, to_address, value)
@ -279,7 +279,7 @@ mod tests {
// other half to the sending account as change. Also there's a random transparent utxo,
// received, who knows where it came from but it's for account 0.
db_data.conn.execute_batch(
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (2, 2, 2, '');
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (2, 2, 2, x'00');
INSERT INTO transactions (block, id_tx, txid) VALUES (2, 2, 'tx2');
UPDATE received_notes SET spent = 2 WHERE tx = 1;
INSERT INTO utxos (received_by_account, address, prevout_txid, prevout_idx, script, value_zat, height)
@ -297,7 +297,7 @@ mod tests {
// - Tx 3 just receives transparent funds and does nothing else. For this to work, the
// transaction must be retrieved by the wallet.
db_data.conn.execute_batch(
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (3, 3, 3, '');
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (3, 3, 3, x'00');
INSERT INTO transactions (block, id_tx, txid) VALUES (3, 3, 'tx3');
INSERT INTO utxos (received_by_account, address, prevout_txid, prevout_idx, script, value_zat, height)

View File

@ -1,12 +1,13 @@
//! Functions for Sapling support in the wallet.
use group::ff::PrimeField;
use rusqlite::{named_params, params, types::Value, Connection, OptionalExtension, Row};
use incrementalmerkletree::Position;
use rusqlite::{named_params, params, types::Value, Connection, Row};
use std::rc::Rc;
use zcash_primitives::{
consensus::BlockHeight,
memo::MemoBytes,
merkle_tree::{read_commitment_tree, read_incremental_witness, write_incremental_witness},
sapling::{self, Diversifier, Note, Nullifier, Rseed},
transaction::components::Amount,
zip32::AccountId,
@ -28,10 +29,11 @@ pub(crate) trait ReceivedSaplingOutput {
fn note(&self) -> &Note;
fn memo(&self) -> Option<&MemoBytes>;
fn is_change(&self) -> bool;
fn nullifier(&self) -> Option<&Nullifier>;
fn nullifier(&self) -> Option<&sapling::Nullifier>;
fn note_commitment_tree_position(&self) -> Option<Position>;
}
impl ReceivedSaplingOutput for WalletSaplingOutput<Nullifier> {
impl ReceivedSaplingOutput for WalletSaplingOutput<sapling::Nullifier> {
fn index(&self) -> usize {
self.index()
}
@ -47,10 +49,12 @@ impl ReceivedSaplingOutput for WalletSaplingOutput<Nullifier> {
fn is_change(&self) -> bool {
WalletSaplingOutput::is_change(self)
}
fn nullifier(&self) -> Option<&Nullifier> {
fn nullifier(&self) -> Option<&sapling::Nullifier> {
Some(self.nf())
}
fn note_commitment_tree_position(&self) -> Option<Position> {
Some(WalletSaplingOutput::note_commitment_tree_position(self))
}
}
impl ReceivedSaplingOutput for DecryptedOutput<Note> {
@ -69,7 +73,10 @@ impl ReceivedSaplingOutput for DecryptedOutput<Note> {
fn is_change(&self) -> bool {
self.transfer_type == TransferType::WalletInternal
}
fn nullifier(&self) -> Option<&Nullifier> {
fn nullifier(&self) -> Option<&sapling::Nullifier> {
None
}
fn note_commitment_tree_position(&self) -> Option<Position> {
None
}
}
@ -105,17 +112,17 @@ fn to_spendable_note(row: &Row) -> Result<ReceivedSaplingNote<NoteId>, SqliteCli
Rseed::BeforeZip212(rcm)
};
let witness = {
let d: Vec<_> = row.get(4)?;
read_incremental_witness(&d[..])?
};
let note_commitment_tree_position =
Position::from(u64::try_from(row.get::<_, i64>(4)?).map_err(|_| {
SqliteClientError::CorruptedData("Note commitment tree position invalid.".to_string())
})?);
Ok(ReceivedSaplingNote {
note_id,
diversifier,
note_value,
rseed,
witness,
note_commitment_tree_position,
})
}
@ -126,15 +133,13 @@ pub(crate) fn get_spendable_sapling_notes(
exclude: &[NoteId],
) -> Result<Vec<ReceivedSaplingNote<NoteId>>, SqliteClientError> {
let mut stmt_select_notes = conn.prepare_cached(
"SELECT id_note, diversifier, value, rcm, witness
FROM sapling_received_notes
INNER JOIN transactions ON transactions.id_tx = sapling_received_notes.tx
INNER JOIN sapling_witnesses ON sapling_witnesses.note = sapling_received_notes.id_note
WHERE account = :account
AND spent IS NULL
AND transactions.block <= :anchor_height
AND sapling_witnesses.block = :anchor_height
AND id_note NOT IN rarray(:exclude)",
"SELECT id_note, diversifier, value, rcm, commitment_tree_position
FROM sapling_received_notes
INNER JOIN transactions ON transactions.id_tx = sapling_received_notes.tx
WHERE account = :account
AND spent IS NULL
AND transactions.block <= :anchor_height
AND id_note NOT IN rarray(:exclude)",
)?;
let excluded: Vec<Value> = exclude
@ -184,28 +189,22 @@ pub(crate) fn select_spendable_sapling_notes(
//
// 4) Match the selected notes against the witnesses at the desired height.
let mut stmt_select_notes = conn.prepare_cached(
"WITH selected AS (
WITH eligible AS (
SELECT id_note, diversifier, value, rcm,
SUM(value) OVER
(PARTITION BY account, spent ORDER BY id_note) AS so_far
FROM sapling_received_notes
INNER JOIN transactions ON transactions.id_tx = sapling_received_notes.tx
WHERE account = :account
AND spent IS NULL
AND transactions.block <= :anchor_height
AND id_note NOT IN rarray(:exclude)
)
SELECT * FROM eligible WHERE so_far < :target_value
UNION
SELECT * FROM (SELECT * FROM eligible WHERE so_far >= :target_value LIMIT 1)
), witnesses AS (
SELECT note, witness FROM sapling_witnesses
WHERE block = :anchor_height
)
SELECT selected.id_note, selected.diversifier, selected.value, selected.rcm, witnesses.witness
FROM selected
INNER JOIN witnesses ON selected.id_note = witnesses.note",
"WITH eligible AS (
SELECT id_note, diversifier, value, rcm, commitment_tree_position,
SUM(value)
OVER (PARTITION BY account, spent ORDER BY id_note) AS so_far
FROM sapling_received_notes
INNER JOIN transactions ON transactions.id_tx = sapling_received_notes.tx
WHERE account = :account
AND spent IS NULL
AND transactions.block <= :anchor_height
AND id_note NOT IN rarray(:exclude)
)
SELECT id_note, diversifier, value, rcm, commitment_tree_position
FROM eligible WHERE so_far < :target_value
UNION
SELECT id_note, diversifier, value, rcm, commitment_tree_position
FROM (SELECT * from eligible WHERE so_far >= :target_value LIMIT 1)",
)?;
let excluded: Vec<Value> = exclude
@ -230,73 +229,6 @@ pub(crate) fn select_spendable_sapling_notes(
notes.collect::<Result<_, _>>()
}
/// Returns the commitment tree for the block at the specified height,
/// if any.
pub(crate) fn get_sapling_commitment_tree(
conn: &Connection,
block_height: BlockHeight,
) -> Result<Option<sapling::CommitmentTree>, SqliteClientError> {
conn.query_row_and_then(
"SELECT sapling_tree FROM blocks WHERE height = ?",
[u32::from(block_height)],
|row| {
let row_data: Vec<u8> = row.get(0)?;
read_commitment_tree(&row_data[..]).map_err(|e| {
rusqlite::Error::FromSqlConversionFailure(
row_data.len(),
rusqlite::types::Type::Blob,
Box::new(e),
)
})
},
)
.optional()
.map_err(SqliteClientError::from)
}
/// Returns the incremental witnesses for the block at the specified height,
/// if any.
pub(crate) fn get_sapling_witnesses(
conn: &Connection,
block_height: BlockHeight,
) -> Result<Vec<(NoteId, sapling::IncrementalWitness)>, SqliteClientError> {
let mut stmt_fetch_witnesses =
conn.prepare_cached("SELECT note, witness FROM sapling_witnesses WHERE block = ?")?;
let witnesses = stmt_fetch_witnesses
.query_map([u32::from(block_height)], |row| {
let id_note = NoteId::ReceivedNoteId(row.get(0)?);
let witness_data: Vec<u8> = row.get(1)?;
Ok(read_incremental_witness(&witness_data[..]).map(|witness| (id_note, witness)))
})
.map_err(SqliteClientError::from)?;
// unwrap database error & IO error from IncrementalWitness::read
let res: Vec<_> = witnesses.collect::<Result<Result<_, _>, _>>()??;
Ok(res)
}
/// Records the incremental witness for the specified note,
/// as of the given block height.
pub(crate) fn insert_witness(
conn: &Connection,
note_id: i64,
witness: &sapling::IncrementalWitness,
height: BlockHeight,
) -> Result<(), SqliteClientError> {
let mut stmt_insert_witness = conn.prepare_cached(
"INSERT INTO sapling_witnesses (note, block, witness)
VALUES (?, ?, ?)",
)?;
let mut encoded = Vec::new();
write_incremental_witness(witness, &mut encoded).unwrap();
stmt_insert_witness.execute(params![note_id, u32::from(height), encoded])?;
Ok(())
}
/// Retrieves the set of nullifiers for "potentially spendable" Sapling notes that the
/// wallet is tracking.
///
@ -320,7 +252,7 @@ pub(crate) fn get_sapling_nullifiers(
let nf_bytes: Vec<u8> = row.get(2)?;
Ok((
AccountId::from(account),
Nullifier::from_slice(&nf_bytes).unwrap(),
sapling::Nullifier::from_slice(&nf_bytes).unwrap(),
))
})?;
@ -343,7 +275,7 @@ pub(crate) fn get_all_sapling_nullifiers(
let nf_bytes: Vec<u8> = row.get(2)?;
Ok((
AccountId::from(account),
Nullifier::from_slice(&nf_bytes).unwrap(),
sapling::Nullifier::from_slice(&nf_bytes).unwrap(),
))
})?;
@ -359,7 +291,7 @@ pub(crate) fn get_all_sapling_nullifiers(
pub(crate) fn mark_sapling_note_spent(
conn: &Connection,
tx_ref: i64,
nf: &Nullifier,
nf: &sapling::Nullifier,
) -> Result<bool, SqliteClientError> {
let mut stmt_mark_sapling_note_spent =
conn.prepare_cached("UPDATE sapling_received_notes SET spent = ? WHERE nf = ?")?;
@ -383,9 +315,19 @@ pub(crate) fn put_received_note<T: ReceivedSaplingOutput>(
) -> Result<NoteId, SqliteClientError> {
let mut stmt_upsert_received_note = conn.prepare_cached(
"INSERT INTO sapling_received_notes
(tx, output_index, account, diversifier, value, rcm, memo, nf, is_change)
VALUES
(:tx, :output_index, :account, :diversifier, :value, :rcm, :memo, :nf, :is_change)
(tx, output_index, account, diversifier, value, rcm, memo, nf, is_change, commitment_tree_position)
VALUES (
:tx,
:output_index,
:account,
:diversifier,
:value,
:rcm,
:memo,
:nf,
:is_change,
:commitment_tree_position
)
ON CONFLICT (tx, output_index) DO UPDATE
SET account = :account,
diversifier = :diversifier,
@ -393,7 +335,8 @@ pub(crate) fn put_received_note<T: ReceivedSaplingOutput>(
rcm = :rcm,
nf = IFNULL(:nf, nf),
memo = IFNULL(:memo, memo),
is_change = IFNULL(:is_change, is_change)
is_change = IFNULL(:is_change, is_change),
commitment_tree_position = IFNULL(:commitment_tree_position, commitment_tree_position)
RETURNING id_note",
)?;
@ -410,7 +353,8 @@ pub(crate) fn put_received_note<T: ReceivedSaplingOutput>(
":rcm": &rcm.as_ref(),
":nf": output.nullifier().map(|nf| nf.0.as_ref()),
":memo": memo_repr(output.memo()),
":is_change": output.is_change()
":is_change": output.is_change(),
":commitment_tree_position": output.note_commitment_tree_position().map(u64::from),
];
stmt_upsert_received_note
@ -422,7 +366,9 @@ pub(crate) fn put_received_note<T: ReceivedSaplingOutput>(
#[cfg(test)]
#[allow(deprecated)]
mod tests {
pub(crate) mod tests {
use std::num::NonZeroU32;
use rusqlite::Connection;
use secrecy::Secret;
use tempfile::NamedTempFile;
@ -481,7 +427,7 @@ mod tests {
},
};
fn test_prover() -> impl TxProver {
pub(crate) fn test_prover() -> impl TxProver {
match LocalTxProver::with_default_location() {
Some(tx_prover) => tx_prover,
None => {
@ -517,7 +463,7 @@ mod tests {
Amount::from_u64(1).unwrap(),
None,
OvkPolicy::Sender,
10,
NonZeroU32::new(1).unwrap(),
),
Err(data_api::error::Error::KeyNotRecognized)
);
@ -546,7 +492,7 @@ mod tests {
Amount::from_u64(1).unwrap(),
None,
OvkPolicy::Sender,
10,
NonZeroU32::new(1).unwrap(),
),
Err(data_api::error::Error::ScanRequired)
);
@ -562,7 +508,7 @@ mod tests {
BlockHeight::from(1u32),
BlockHash([1; 32]),
1,
&[],
&[0x0, 0x0, 0x0],
)
.unwrap();
@ -589,7 +535,7 @@ mod tests {
Amount::from_u64(1).unwrap(),
None,
OvkPolicy::Sender,
10,
NonZeroU32::new(1).unwrap(),
),
Err(data_api::error::Error::InsufficientFunds {
available,
@ -616,18 +562,22 @@ mod tests {
// Add funds to the wallet in a single note
let value = Amount::from_u64(50000).unwrap();
let (cb, _) = fake_compact_block(
let (mut cb, _) = fake_compact_block(
sapling_activation_height(),
BlockHash([0; 32]),
&dfvk,
AddressType::DefaultExternal,
value,
0,
);
insert_into_cache(&db_cache, &cb);
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap();
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Verified balance matches total balance
let (_, anchor_height) = db_data.get_target_and_anchor_heights(10).unwrap().unwrap();
let (_, anchor_height) = db_data
.get_target_and_anchor_heights(NonZeroU32::new(10).unwrap())
.unwrap()
.unwrap();
assert_eq!(
get_balance(&db_data.conn, AccountId::from(0)).unwrap(),
value
@ -638,18 +588,23 @@ mod tests {
);
// Add more funds to the wallet in a second note
let (cb, _) = fake_compact_block(
cb = fake_compact_block(
sapling_activation_height() + 1,
cb.hash(),
&dfvk,
AddressType::DefaultExternal,
value,
);
1,
)
.0;
insert_into_cache(&db_cache, &cb);
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap();
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Verified balance does not include the second note
let (_, anchor_height2) = db_data.get_target_and_anchor_heights(10).unwrap().unwrap();
let (_, anchor_height2) = db_data
.get_target_and_anchor_heights(NonZeroU32::new(10).unwrap())
.unwrap()
.unwrap();
assert_eq!(
get_balance(&db_data.conn, AccountId::from(0)).unwrap(),
(value + value).unwrap()
@ -672,7 +627,7 @@ mod tests {
Amount::from_u64(70000).unwrap(),
None,
OvkPolicy::Sender,
10,
NonZeroU32::new(10).unwrap(),
),
Err(data_api::error::Error::InsufficientFunds {
available,
@ -685,16 +640,18 @@ mod tests {
// Mine blocks SAPLING_ACTIVATION_HEIGHT + 2 to 9 until just before the second
// note is verified
for i in 2..10 {
let (cb, _) = fake_compact_block(
cb = fake_compact_block(
sapling_activation_height() + i,
cb.hash(),
&dfvk,
AddressType::DefaultExternal,
value,
);
i,
)
.0;
insert_into_cache(&db_cache, &cb);
}
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap();
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Second spend still fails
assert_matches!(
@ -707,7 +664,7 @@ mod tests {
Amount::from_u64(70000).unwrap(),
None,
OvkPolicy::Sender,
10,
NonZeroU32::new(10).unwrap(),
),
Err(data_api::error::Error::InsufficientFunds {
available,
@ -718,15 +675,17 @@ mod tests {
);
// Mine block 11 so that the second note becomes verified
let (cb, _) = fake_compact_block(
cb = fake_compact_block(
sapling_activation_height() + 10,
cb.hash(),
&dfvk,
AddressType::DefaultExternal,
value,
);
11,
)
.0;
insert_into_cache(&db_cache, &cb);
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap();
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Second spend should now succeed
assert_matches!(
@ -739,7 +698,7 @@ mod tests {
Amount::from_u64(70000).unwrap(),
None,
OvkPolicy::Sender,
10,
NonZeroU32::new(10).unwrap(),
),
Ok(_)
);
@ -762,15 +721,16 @@ mod tests {
// Add funds to the wallet in a single note
let value = Amount::from_u64(50000).unwrap();
let (cb, _) = fake_compact_block(
let (mut cb, _) = fake_compact_block(
sapling_activation_height(),
BlockHash([0; 32]),
&dfvk,
AddressType::DefaultExternal,
value,
0,
);
insert_into_cache(&db_cache, &cb);
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap();
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
assert_eq!(
get_balance(&db_data.conn, AccountId::from(0)).unwrap(),
value
@ -789,7 +749,7 @@ mod tests {
Amount::from_u64(15000).unwrap(),
None,
OvkPolicy::Sender,
10,
NonZeroU32::new(1).unwrap(),
),
Ok(_)
);
@ -805,7 +765,7 @@ mod tests {
Amount::from_u64(2000).unwrap(),
None,
OvkPolicy::Sender,
10,
NonZeroU32::new(1).unwrap(),
),
Err(data_api::error::Error::InsufficientFunds {
available,
@ -817,16 +777,18 @@ mod tests {
// Mine blocks SAPLING_ACTIVATION_HEIGHT + 1 to 41 (that don't send us funds)
// until just before the first transaction expires
for i in 1..42 {
let (cb, _) = fake_compact_block(
cb = fake_compact_block(
sapling_activation_height() + i,
cb.hash(),
&ExtendedSpendingKey::master(&[i as u8]).to_diversifiable_full_viewing_key(),
AddressType::DefaultExternal,
value,
);
i,
)
.0;
insert_into_cache(&db_cache, &cb);
}
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap();
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Second spend still fails
assert_matches!(
@ -839,7 +801,7 @@ mod tests {
Amount::from_u64(2000).unwrap(),
None,
OvkPolicy::Sender,
10,
NonZeroU32::new(1).unwrap(),
),
Err(data_api::error::Error::InsufficientFunds {
available,
@ -849,15 +811,17 @@ mod tests {
);
// Mine block SAPLING_ACTIVATION_HEIGHT + 42 so that the first transaction expires
let (cb, _) = fake_compact_block(
cb = fake_compact_block(
sapling_activation_height() + 42,
cb.hash(),
&ExtendedSpendingKey::master(&[42]).to_diversifiable_full_viewing_key(),
AddressType::DefaultExternal,
value,
);
42,
)
.0;
insert_into_cache(&db_cache, &cb);
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap();
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Second spend should now succeed
create_spend_to_address(
@ -869,7 +833,7 @@ mod tests {
Amount::from_u64(2000).unwrap(),
None,
OvkPolicy::Sender,
10,
NonZeroU32::new(1).unwrap(),
)
.unwrap();
}
@ -892,15 +856,16 @@ mod tests {
// Add funds to the wallet in a single note
let value = Amount::from_u64(50000).unwrap();
let (cb, _) = fake_compact_block(
let (mut cb, _) = fake_compact_block(
sapling_activation_height(),
BlockHash([0; 32]),
&dfvk,
AddressType::DefaultExternal,
value,
0,
);
insert_into_cache(&db_cache, &cb);
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap();
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
assert_eq!(
get_balance(&db_data.conn, AccountId::from(0)).unwrap(),
value
@ -920,7 +885,7 @@ mod tests {
Amount::from_u64(15000).unwrap(),
None,
ovk_policy,
10,
NonZeroU32::new(1).unwrap(),
)
.unwrap();
@ -962,16 +927,18 @@ mod tests {
// Mine blocks SAPLING_ACTIVATION_HEIGHT + 1 to 42 (that don't send us funds)
// so that the first transaction expires
for i in 1..=42 {
let (cb, _) = fake_compact_block(
cb = fake_compact_block(
sapling_activation_height() + i,
cb.hash(),
&ExtendedSpendingKey::master(&[i as u8]).to_diversifiable_full_viewing_key(),
AddressType::DefaultExternal,
value,
);
i,
)
.0;
insert_into_cache(&db_cache, &cb);
}
scan_cached_blocks(&network, &db_cache, &mut db_data, None).unwrap();
scan_cached_blocks(&network, &db_cache, &mut db_data, None, None).unwrap();
// Send the funds again, discarding history.
// Neither transaction output is decryptable by the sender.
@ -1001,12 +968,16 @@ mod tests {
&dfvk,
AddressType::DefaultExternal,
value,
0,
);
insert_into_cache(&db_cache, &cb);
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap();
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Verified balance matches total balance
let (_, anchor_height) = db_data.get_target_and_anchor_heights(10).unwrap().unwrap();
let (_, anchor_height) = db_data
.get_target_and_anchor_heights(NonZeroU32::new(1).unwrap())
.unwrap()
.unwrap();
assert_eq!(
get_balance(&db_data.conn, AccountId::from(0)).unwrap(),
value
@ -1027,7 +998,7 @@ mod tests {
Amount::from_u64(50000).unwrap(),
None,
OvkPolicy::Sender,
10,
NonZeroU32::new(1).unwrap(),
),
Ok(_)
);
@ -1056,12 +1027,16 @@ mod tests {
&dfvk,
AddressType::Internal,
value,
0,
);
insert_into_cache(&db_cache, &cb);
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap();
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Verified balance matches total balance
let (_, anchor_height) = db_data.get_target_and_anchor_heights(10).unwrap().unwrap();
let (_, anchor_height) = db_data
.get_target_and_anchor_heights(NonZeroU32::new(10).unwrap())
.unwrap()
.unwrap();
assert_eq!(
get_balance(&db_data.conn, AccountId::from(0)).unwrap(),
value
@ -1082,7 +1057,7 @@ mod tests {
Amount::from_u64(50000).unwrap(),
None,
OvkPolicy::Sender,
10,
NonZeroU32::new(1).unwrap(),
),
Ok(_)
);
@ -1104,32 +1079,38 @@ mod tests {
let dfvk = usk.sapling().to_diversifiable_full_viewing_key();
// Add funds to the wallet
let (cb, _) = fake_compact_block(
let (mut cb, _) = fake_compact_block(
sapling_activation_height(),
BlockHash([0; 32]),
&dfvk,
AddressType::Internal,
Amount::from_u64(50000).unwrap(),
0,
);
insert_into_cache(&db_cache, &cb);
// Add 10 dust notes to the wallet
for i in 1..=10 {
let (cb, _) = fake_compact_block(
cb = fake_compact_block(
sapling_activation_height() + i,
cb.hash(),
&dfvk,
AddressType::DefaultExternal,
Amount::from_u64(1000).unwrap(),
);
i,
)
.0;
insert_into_cache(&db_cache, &cb);
}
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap();
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Verified balance matches total balance
let total = Amount::from_u64(60000).unwrap();
let (_, anchor_height) = db_data.get_target_and_anchor_heights(1).unwrap().unwrap();
let (_, anchor_height) = db_data
.get_target_and_anchor_heights(NonZeroU32::new(1).unwrap())
.unwrap()
.unwrap();
assert_eq!(
get_balance(&db_data.conn, AccountId::from(0)).unwrap(),
total
@ -1164,7 +1145,7 @@ mod tests {
&usk,
req,
OvkPolicy::Sender,
1,
NonZeroU32::new(1).unwrap(),
),
Err(Error::InsufficientFunds { available, required })
if available == Amount::from_u64(51000).unwrap()
@ -1192,7 +1173,7 @@ mod tests {
&usk,
req,
OvkPolicy::Sender,
1,
NonZeroU32::new(1).unwrap(),
),
Ok(_)
);
@ -1234,16 +1215,17 @@ mod tests {
DustOutputPolicy::default(),
);
// Add funds to the wallet
// Ensure that the wallet has at least one block
let (cb, _) = fake_compact_block(
sapling_activation_height(),
BlockHash([0; 32]),
&dfvk,
AddressType::Internal,
Amount::from_u64(50000).unwrap(),
0,
);
insert_into_cache(&db_cache, &cb);
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap();
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
assert_matches!(
shield_transparent_funds(
@ -1255,7 +1237,7 @@ mod tests {
&usk,
&[*taddr],
&MemoBytes::empty(),
0
NonZeroU32::new(1).unwrap()
),
Ok(_)
);

View File

@ -12,6 +12,9 @@ and this library adheres to Rust's notion of
- `Builder::add_orchard_spend`
- `Builder::add_orchard_output`
- `zcash_primitives::transaction::components::orchard::builder` module
- `impl HashSer for String` is provided under the `test-dependencies` feature
flag. This is a test-only impl; the identity leaf value is `_` and the combining
operation is concatenation.
### Changed
- `zcash_primitives::transaction`:

View File

@ -39,10 +39,20 @@ impl BlockHash {
///
/// This function will panic if the slice is not exactly 32 bytes.
pub fn from_slice(bytes: &[u8]) -> Self {
assert_eq!(bytes.len(), 32);
let mut hash = [0; 32];
hash.copy_from_slice(bytes);
BlockHash(hash)
Self::try_from_slice(bytes).unwrap()
}
/// Constructs a [`BlockHash`] from the given slice.
///
/// Returns `None` if `bytes` has any length other than 32
pub fn try_from_slice(bytes: &[u8]) -> Option<Self> {
if bytes.len() == 32 {
let mut hash = [0; 32];
hash.copy_from_slice(bytes);
Some(BlockHash(hash))
} else {
None
}
}
}

View File

@ -627,6 +627,12 @@ pub mod testing {
)
})
}
impl incrementalmerkletree::testing::TestCheckpoint for BlockHeight {
fn from_u64(value: u64) -> Self {
BlockHeight(u32::try_from(value).expect("Test checkpoint ids do not exceed 32 bits"))
}
}
}
#[cfg(test)]

View File

@ -98,7 +98,7 @@ pub fn write_nonempty_frontier_v1<H: HashSer, W: Write>(
frontier: &NonEmptyFrontier<H>,
) -> io::Result<()> {
write_position(&mut writer, frontier.position())?;
if frontier.position().is_odd() {
if frontier.position().is_right_child() {
// The v1 serialization wrote the sibling of a right-hand leaf as an optional value, rather
// than as part of the ommers vector.
frontier
@ -292,6 +292,7 @@ pub mod testing {
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use incrementalmerkletree::frontier::testing::TestNode;
use std::io::{self, Read, Write};
use zcash_encoding::Vector;
use super::HashSer;
@ -304,6 +305,23 @@ pub mod testing {
writer.write_u64::<LittleEndian>(self.0)
}
}
impl HashSer for String {
fn read<R: Read>(reader: R) -> io::Result<String> {
Vector::read(reader, |r| r.read_u8()).and_then(|xs| {
String::from_utf8(xs).map_err(|e| {
io::Error::new(
io::ErrorKind::InvalidData,
format!("Not a valid utf8 string: {:?}", e),
)
})
})
}
fn write<W: Write>(&self, writer: W) -> io::Result<()> {
Vector::write(writer, self.as_bytes(), |w, b| w.write_u8(*b))
}
}
}
#[cfg(test)]