Merge pull request #831 from nuttycom/feature/pre_dag_sync

Migrations & data storage for pre-DAG-sync
This commit is contained in:
Kris Nuttycombe 2023-07-04 13:52:10 -06:00 committed by GitHub
commit d8148f90e7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
34 changed files with 2880 additions and 1168 deletions

View File

@ -17,3 +17,8 @@ members = [
lto = true lto = true
panic = 'abort' panic = 'abort'
codegen-units = 1 codegen-units = 1
[patch.crates-io]
incrementalmerkletree = { git = "https://github.com/zcash/incrementalmerkletree.git", rev = "082109deacf8611ee7917732e19b56158bda96d5" }
shardtree = { git = "https://github.com/zcash/incrementalmerkletree.git", rev = "082109deacf8611ee7917732e19b56158bda96d5" }
orchard = { git = "https://github.com/zcash/orchard.git", rev = "5da41a6bbb44290e353ee4b38bcafe37ffe79ce8" }

View File

@ -9,21 +9,82 @@ and this library adheres to Rust's notion of
### Added ### Added
- `impl Eq for zcash_client_backend::address::RecipientAddress` - `impl Eq for zcash_client_backend::address::RecipientAddress`
- `impl Eq for zcash_client_backend::zip321::{Payment, TransactionRequest}` - `impl Eq for zcash_client_backend::zip321::{Payment, TransactionRequest}`
- `data_api::NullifierQuery` for use with `WalletRead::get_sapling_nullifiers` - `impl Debug` for `zcash_client_backend::{data_api::wallet::input_selection::Proposal, wallet::ReceivedSaplingNote}`
- `zcash_client_backend::data_api`:
- `WalletRead::{block_metadata, block_fully_scanned, suggest_scan_ranges}`
- `WalletWrite::put_block`
- `WalletCommitmentTrees`
- `testing::MockWalletDb::new`
- `NullifierQuery` for use with `WalletRead::get_sapling_nullifiers`
- `BlockMetadata`
- `ScannedBlock`
- `wallet::input_sellection::Proposal::{min_target_height, min_anchor_height}`:
- `zcash_client_backend::wallet::WalletSaplingOutput::note_commitment_tree_position`
- `zcash_client_backend::scanning::ScanError`
### Changed ### Changed
- MSRV is now 1.65.0. - MSRV is now 1.65.0.
- Bumped dependencies to `hdwallet 0.4`, `zcash_primitives 0.12`, `zcash_note_encryption 0.4`, - Bumped dependencies to `hdwallet 0.4`, `zcash_primitives 0.12`, `zcash_note_encryption 0.4`,
`incrementalmerkletree 0.4`, `orchard 0.5`, `bs58 0.5` `incrementalmerkletree 0.4`, `orchard 0.5`, `bs58 0.5`
- `WalletRead::get_memo` now returns `Result<Option<Memo>, Self::Error>` - `zcash_client_backend::data_api`:
instead of `Result<Memo, Self::Error>` in order to make representable - `WalletRead::get_memo` now returns `Result<Option<Memo>, Self::Error>`
wallet states where the full note plaintext is not available. instead of `Result<Memo, Self::Error>` in order to make representable
- `WalletRead::get_nullifiers` has been renamed to `WalletRead::get_sapling_nullifiers` wallet states where the full note plaintext is not available.
and its signature has changed; it now subsumes the removed `WalletRead::get_all_nullifiers`. - `WalletRead::get_nullifiers` has been renamed to `WalletRead::get_sapling_nullifiers`
- `wallet::SpendableNote` has been renamed to `wallet::ReceivedSaplingNote`. and its signature has changed; it now subsumes the removed `WalletRead::get_all_nullifiers`.
- `WalletRead::get_target_and_anchor_heights` now takes its argument as a `NonZeroU32`
- `chain::scan_cached_blocks` now takes a `from_height` argument that
permits the caller to control the starting position of the scan range.
- A new `CommitmentTree` variant has been added to `data_api::error::Error`
- `data_api::wallet::{create_spend_to_address, create_proposed_transaction,
shield_transparent_funds}` all now require that `WalletCommitmentTrees` be
implemented for the type passed to them for the `wallet_db` parameter.
- `data_api::wallet::create_proposed_transaction` now takes an additional
`min_confirmations` argument.
- `data_api::wallet::{spend, create_spend_to_address, shield_transparent_funds,
propose_transfer, propose_shielding, create_proposed_transaction}` now take their
respective `min_confirmations` arguments as `NonZeroU32`
- `data_api::wallet::input_selection::InputSelector::{propose_transaction, propose_shielding}`
now take their respective `min_confirmations` arguments as `NonZeroU32`
- A new `Scan` variant has been added to `data_api::chain::error::Error`.
- A new `SyncRequired` variant has been added to `data_api::wallet::input_selection::InputSelectorError`.
- `zcash_client_backend::wallet`:
- `SpendableNote` has been renamed to `ReceivedSaplingNote`.
- Arguments to `WalletSaplingOutput::from_parts` have changed.
- `zcash_client_backend::data_api::wallet::input_selection::InputSelector`:
- Arguments to `{propose_transaction, propose_shielding}` have changed.
- `zcash_client_backend::wallet::ReceivedSaplingNote::note_commitment_tree_position`
has replaced the `witness` field in the same struct.
- `zcash_client_backend::welding_rig` has been renamed to `zcash_client_backend::scanning`
- `zcash_client_backend::scanning::ScanningKey::sapling_nf` has been changed to
take a note position instead of an incremental witness for the note.
- Arguments to `zcash_client_backend::scanning::scan_block` have changed. This
method now takes an optional `BlockMetadata` argument instead of a base commitment
tree and incremental witnesses for each previously-known note. In addition, the
return type has now been updated to return a `Result<ScannedBlock, ScanError>`.
### Removed ### Removed
- `WalletRead::get_all_nullifiers` - `zcash_client_backend::data_api`:
- `WalletRead::get_all_nullifiers`
- `WalletRead::{get_commitment_tree, get_witnesses}` have been removed
without replacement. The utility of these methods is now subsumed
by those available from the `WalletCommitmentTrees` trait.
- `WalletWrite::advance_by_block` (use `WalletWrite::put_block` instead).
- `PrunedBlock` has been replaced by `ScannedBlock`
- `testing::MockWalletDb`, which is available under the `test-dependencies`
feature flag, has been modified by the addition of a `sapling_tree` property.
- `wallet::input_selection`:
- `Proposal::target_height` (use `Proposal::min_target_height` instead).
- `zcash_client_backend::data_api::chain::validate_chain` TODO: document how
to handle validation given out-of-order blocks.
- `zcash_client_backend::data_api::chain::error::{ChainError, Cause}` have been
replaced by `zcash_client_backend::scanning::ScanError`
- `zcash_client_backend::wallet::WalletSaplingOutput::{witness, witness_mut}`
have been removed as individual incremental witnesses are no longer tracked on a
per-note basis. The global note commitment tree for the wallet should be used
to obtain witnesses for spend operations instead.
## [0.9.0] - 2023-04-28 ## [0.9.0] - 2023-04-28
### Added ### Added

View File

@ -21,6 +21,7 @@ development = ["zcash_proofs"]
[dependencies] [dependencies]
incrementalmerkletree = { version = "0.4", features = ["legacy-api"] } incrementalmerkletree = { version = "0.4", features = ["legacy-api"] }
shardtree = "0.0"
zcash_address = { version = "0.3", path = "../components/zcash_address" } zcash_address = { version = "0.3", path = "../components/zcash_address" }
zcash_encoding = { version = "0.2", path = "../components/zcash_encoding" } zcash_encoding = { version = "0.2", path = "../components/zcash_encoding" }
zcash_note_encryption = "0.4" zcash_note_encryption = "0.4"

View File

@ -45,6 +45,10 @@ fn build() -> io::Result<()> {
// Build the gRPC types and client. // Build the gRPC types and client.
tonic_build::configure() tonic_build::configure()
.build_server(false) .build_server(false)
.extern_path(
".cash.z.wallet.sdk.rpc.ChainMetadata",
"crate::proto::compact_formats::ChainMetadata",
)
.extern_path( .extern_path(
".cash.z.wallet.sdk.rpc.CompactBlock", ".cash.z.wallet.sdk.rpc.CompactBlock",
"crate::proto::compact_formats::CompactBlock", "crate::proto::compact_formats::CompactBlock",

View File

@ -10,18 +10,25 @@ option swift_prefix = "";
// Remember that proto3 fields are all optional. A field that is not present will be set to its zero value. // Remember that proto3 fields are all optional. A field that is not present will be set to its zero value.
// bytes fields of hashes are in canonical little-endian format. // bytes fields of hashes are in canonical little-endian format.
// ChainMetadata represents information about the state of the chain as of a given block.
message ChainMetadata {
uint32 saplingCommitmentTreeSize = 1; // the size of the Sapling note commitment tree as of the end of this block
uint32 orchardCommitmentTreeSize = 2; // the size of the Orchard note commitment tree as of the end of this block
}
// CompactBlock is a packaging of ONLY the data from a block that's needed to: // CompactBlock is a packaging of ONLY the data from a block that's needed to:
// 1. Detect a payment to your shielded Sapling address // 1. Detect a payment to your shielded Sapling address
// 2. Detect a spend of your shielded Sapling notes // 2. Detect a spend of your shielded Sapling notes
// 3. Update your witnesses to generate new Sapling spend proofs. // 3. Update your witnesses to generate new Sapling spend proofs.
message CompactBlock { message CompactBlock {
uint32 protoVersion = 1; // the version of this wire format, for storage uint32 protoVersion = 1; // the version of this wire format, for storage
uint64 height = 2; // the height of this block uint64 height = 2; // the height of this block
bytes hash = 3; // the ID (hash) of this block, same as in block explorers bytes hash = 3; // the ID (hash) of this block, same as in block explorers
bytes prevHash = 4; // the ID (hash) of this block's predecessor bytes prevHash = 4; // the ID (hash) of this block's predecessor
uint32 time = 5; // Unix epoch time when the block was mined uint32 time = 5; // Unix epoch time when the block was mined
bytes header = 6; // (hash, prevHash, and time) OR (full header) bytes header = 6; // (hash, prevHash, and time) OR (full header)
repeated CompactTx vtx = 7; // zero or more compact transactions from this block repeated CompactTx vtx = 7; // zero or more compact transactions from this block
ChainMetadata chainMetadata = 8; // information about the state of the chain as of this block
} }
// CompactTx contains the minimum information for a wallet to know if this transaction // CompactTx contains the minimum information for a wallet to know if this transaction

View File

@ -1,10 +1,13 @@
//! Interfaces for wallet data persistence & low-level wallet utilities. //! Interfaces for wallet data persistence & low-level wallet utilities.
use std::cmp;
use std::collections::HashMap; use std::collections::HashMap;
use std::fmt::Debug; use std::fmt::Debug;
use std::num::NonZeroU32;
use std::{cmp, ops::Range};
use incrementalmerkletree::Retention;
use secrecy::SecretVec; use secrecy::SecretVec;
use shardtree::{ShardStore, ShardTree, ShardTreeError};
use zcash_primitives::{ use zcash_primitives::{
block::BlockHash, block::BlockHash,
consensus::BlockHeight, consensus::BlockHeight,
@ -29,6 +32,8 @@ pub mod chain;
pub mod error; pub mod error;
pub mod wallet; pub mod wallet;
pub const SAPLING_SHARD_HEIGHT: u8 = sapling::NOTE_COMMITMENT_TREE_DEPTH / 2;
pub enum NullifierQuery { pub enum NullifierQuery {
Unspent, Unspent,
All, All,
@ -61,6 +66,32 @@ pub trait WalletRead {
/// This will return `Ok(None)` if no block data is present in the database. /// This will return `Ok(None)` if no block data is present in the database.
fn block_height_extrema(&self) -> Result<Option<(BlockHeight, BlockHeight)>, Self::Error>; fn block_height_extrema(&self) -> Result<Option<(BlockHeight, BlockHeight)>, Self::Error>;
/// Returns the available block metadata for the block at the specified height, if any.
fn block_metadata(&self, height: BlockHeight) -> Result<Option<BlockMetadata>, Self::Error>;
/// Returns the metadata for the block at the height to which the wallet has been fully
/// scanned.
///
/// This is the height for which the wallet has fully trial-decrypted this and all preceding
/// blocks above the wallet's birthday height. Along with this height, this method returns
/// metadata describing the state of the wallet's note commitment trees as of the end of that
/// block.
fn block_fully_scanned(&self) -> Result<Option<BlockMetadata>, Self::Error>;
/// Returns a vector of suggested scan ranges based upon the current wallet state.
///
/// This method should only be used in cases where the [`CompactBlock`] data that will be made
/// available to `scan_cached_blocks` for the requested block ranges includes note commitment
/// tree size information for each block; or else the scan is likely to fail if notes belonging
/// to the wallet are detected.
///
/// [`CompactBlock`]: crate::proto::compact_formats::CompactBlock
fn suggest_scan_ranges(
&self,
batch_size: usize,
limit: usize,
) -> Result<Vec<Range<BlockHeight>>, Self::Error>;
/// Returns the default target height (for the block in which a new /// Returns the default target height (for the block in which a new
/// transaction would be mined) and anchor height (to use for a new /// transaction would be mined) and anchor height (to use for a new
/// transaction), given the range of block heights that the backend /// transaction), given the range of block heights that the backend
@ -69,7 +100,7 @@ pub trait WalletRead {
/// This will return `Ok(None)` if no block data is present in the database. /// This will return `Ok(None)` if no block data is present in the database.
fn get_target_and_anchor_heights( fn get_target_and_anchor_heights(
&self, &self,
min_confirmations: u32, min_confirmations: NonZeroU32,
) -> Result<Option<(BlockHeight, BlockHeight)>, Self::Error> { ) -> Result<Option<(BlockHeight, BlockHeight)>, Self::Error> {
self.block_height_extrema().map(|heights| { self.block_height_extrema().map(|heights| {
heights.map(|(min_height, max_height)| { heights.map(|(min_height, max_height)| {
@ -78,7 +109,7 @@ pub trait WalletRead {
// Select an anchor min_confirmations back from the target block, // Select an anchor min_confirmations back from the target block,
// unless that would be before the earliest block we have. // unless that would be before the earliest block we have.
let anchor_height = BlockHeight::from(cmp::max( let anchor_height = BlockHeight::from(cmp::max(
u32::from(target_height).saturating_sub(min_confirmations), u32::from(target_height).saturating_sub(min_confirmations.into()),
u32::from(min_height), u32::from(min_height),
)); ));
@ -165,19 +196,6 @@ pub trait WalletRead {
/// Returns a transaction. /// Returns a transaction.
fn get_transaction(&self, id_tx: Self::TxRef) -> Result<Transaction, Self::Error>; fn get_transaction(&self, id_tx: Self::TxRef) -> Result<Transaction, Self::Error>;
/// Returns the note commitment tree at the specified block height.
fn get_commitment_tree(
&self,
block_height: BlockHeight,
) -> Result<Option<sapling::CommitmentTree>, Self::Error>;
/// Returns the incremental witnesses as of the specified block height.
#[allow(clippy::type_complexity)]
fn get_witnesses(
&self,
block_height: BlockHeight,
) -> Result<Vec<(Self::NoteRef, sapling::IncrementalWitness)>, Self::Error>;
/// Returns the nullifiers for notes that the wallet is tracking, along with their associated /// Returns the nullifiers for notes that the wallet is tracking, along with their associated
/// account IDs, that are either unspent or have not yet been confirmed as spent (in that a /// account IDs, that are either unspent or have not yet been confirmed as spent (in that a
/// spending transaction known to the wallet has not yet been included in a block). /// spending transaction known to the wallet has not yet been included in a block).
@ -232,16 +250,99 @@ pub trait WalletRead {
) -> Result<HashMap<TransparentAddress, Amount>, Self::Error>; ) -> Result<HashMap<TransparentAddress, Amount>, Self::Error>;
} }
/// Metadata describing the sizes of the zcash note commitment trees as of a particular block.
#[derive(Debug, Clone, Copy)]
pub struct BlockMetadata {
block_height: BlockHeight,
block_hash: BlockHash,
sapling_tree_size: u32,
//TODO: orchard_tree_size: u32
}
impl BlockMetadata {
/// Constructs a new [`BlockMetadata`] value from its constituent parts.
pub fn from_parts(
block_height: BlockHeight,
block_hash: BlockHash,
sapling_tree_size: u32,
) -> Self {
Self {
block_height,
block_hash,
sapling_tree_size,
}
}
/// Returns the block height.
pub fn block_height(&self) -> BlockHeight {
self.block_height
}
/// Returns the hash of the block
pub fn block_hash(&self) -> BlockHash {
self.block_hash
}
/// Returns the size of the Sapling note commitment tree as of the block that this
/// [`BlockMetadata`] describes.
pub fn sapling_tree_size(&self) -> u32 {
self.sapling_tree_size
}
}
/// The subset of information that is relevant to this wallet that has been /// The subset of information that is relevant to this wallet that has been
/// decrypted and extracted from a [`CompactBlock`]. /// decrypted and extracted from a [`CompactBlock`].
/// ///
/// [`CompactBlock`]: crate::proto::compact_formats::CompactBlock /// [`CompactBlock`]: crate::proto::compact_formats::CompactBlock
pub struct PrunedBlock<'a> { pub struct ScannedBlock<Nf> {
pub block_height: BlockHeight, metadata: BlockMetadata,
pub block_hash: BlockHash, block_time: u32,
pub block_time: u32, transactions: Vec<WalletTx<Nf>>,
pub commitment_tree: &'a sapling::CommitmentTree, sapling_commitments: Vec<(sapling::Node, Retention<BlockHeight>)>,
pub transactions: &'a Vec<WalletTx<sapling::Nullifier>>, }
impl<Nf> ScannedBlock<Nf> {
pub fn from_parts(
metadata: BlockMetadata,
block_time: u32,
transactions: Vec<WalletTx<Nf>>,
sapling_commitments: Vec<(sapling::Node, Retention<BlockHeight>)>,
) -> Self {
Self {
metadata,
block_time,
transactions,
sapling_commitments,
}
}
pub fn height(&self) -> BlockHeight {
self.metadata.block_height
}
pub fn block_hash(&self) -> BlockHash {
self.metadata.block_hash
}
pub fn block_time(&self) -> u32 {
self.block_time
}
pub fn metadata(&self) -> &BlockMetadata {
&self.metadata
}
pub fn transactions(&self) -> &[WalletTx<Nf>] {
&self.transactions
}
pub fn sapling_commitments(&self) -> &[(sapling::Node, Retention<BlockHeight>)] {
&self.sapling_commitments
}
pub fn into_sapling_commitments(self) -> Vec<(sapling::Node, Retention<BlockHeight>)> {
self.sapling_commitments
}
} }
/// A transaction that was detected during scanning of the blockchain, /// A transaction that was detected during scanning of the blockchain,
@ -381,16 +482,14 @@ pub trait WalletWrite: WalletRead {
account: AccountId, account: AccountId,
) -> Result<Option<UnifiedAddress>, Self::Error>; ) -> Result<Option<UnifiedAddress>, Self::Error>;
/// Updates the state of the wallet database by persisting the provided /// Updates the state of the wallet database by persisting the provided block information,
/// block information, along with the updated witness data that was /// along with the note commitments that were detected when scanning the block for transactions
/// produced when scanning the block for transactions pertaining to /// pertaining to this wallet.
/// this wallet.
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
fn advance_by_block( fn put_block(
&mut self, &mut self,
block: &PrunedBlock, block: ScannedBlock<sapling::Nullifier>,
updated_witnesses: &[(Self::NoteRef, sapling::IncrementalWitness)], ) -> Result<Vec<Self::NoteRef>, Self::Error>;
) -> Result<Vec<(Self::NoteRef, sapling::IncrementalWitness)>, Self::Error>;
/// Caches a decrypted transaction in the persistent wallet store. /// Caches a decrypted transaction in the persistent wallet store.
fn store_decrypted_tx( fn store_decrypted_tx(
@ -424,10 +523,35 @@ pub trait WalletWrite: WalletRead {
) -> Result<Self::UtxoRef, Self::Error>; ) -> Result<Self::UtxoRef, Self::Error>;
} }
/// This trait describes a capability for manipulating wallet note commitment trees.
///
/// At present, this only serves the Sapling protocol, but it will be modified to
/// also provide operations related to Orchard note commitment trees in the future.
pub trait WalletCommitmentTrees {
type Error;
type SaplingShardStore<'a>: ShardStore<
H = sapling::Node,
CheckpointId = BlockHeight,
Error = Self::Error,
>;
fn with_sapling_tree_mut<F, A, E>(&mut self, callback: F) -> Result<A, E>
where
for<'a> F: FnMut(
&'a mut ShardTree<
Self::SaplingShardStore<'a>,
{ sapling::NOTE_COMMITMENT_TREE_DEPTH },
SAPLING_SHARD_HEIGHT,
>,
) -> Result<A, E>,
E: From<ShardTreeError<Self::Error>>;
}
#[cfg(feature = "test-dependencies")] #[cfg(feature = "test-dependencies")]
pub mod testing { pub mod testing {
use secrecy::{ExposeSecret, SecretVec}; use secrecy::{ExposeSecret, SecretVec};
use std::collections::HashMap; use shardtree::{MemoryShardStore, ShardTree, ShardTreeError};
use std::{collections::HashMap, convert::Infallible, ops::Range};
use zcash_primitives::{ use zcash_primitives::{
block::BlockHash, block::BlockHash,
@ -449,11 +573,26 @@ pub mod testing {
}; };
use super::{ use super::{
DecryptedTransaction, NullifierQuery, PrunedBlock, SentTransaction, WalletRead, WalletWrite, BlockMetadata, DecryptedTransaction, NullifierQuery, ScannedBlock, SentTransaction,
WalletCommitmentTrees, WalletRead, WalletWrite, SAPLING_SHARD_HEIGHT,
}; };
pub struct MockWalletDb { pub struct MockWalletDb {
pub network: Network, pub network: Network,
pub sapling_tree: ShardTree<
MemoryShardStore<sapling::Node, BlockHeight>,
{ SAPLING_SHARD_HEIGHT * 2 },
SAPLING_SHARD_HEIGHT,
>,
}
impl MockWalletDb {
pub fn new(network: Network) -> Self {
Self {
network,
sapling_tree: ShardTree::new(MemoryShardStore::empty(), 100),
}
}
} }
impl WalletRead for MockWalletDb { impl WalletRead for MockWalletDb {
@ -465,6 +604,25 @@ pub mod testing {
Ok(None) Ok(None)
} }
fn block_metadata(
&self,
_height: BlockHeight,
) -> Result<Option<BlockMetadata>, Self::Error> {
Ok(None)
}
fn block_fully_scanned(&self) -> Result<Option<BlockMetadata>, Self::Error> {
Ok(None)
}
fn suggest_scan_ranges(
&self,
_batch_size: usize,
_limit: usize,
) -> Result<Vec<Range<BlockHeight>>, Self::Error> {
Ok(vec![])
}
fn get_min_unspent_height(&self) -> Result<Option<BlockHeight>, Self::Error> { fn get_min_unspent_height(&self) -> Result<Option<BlockHeight>, Self::Error> {
Ok(None) Ok(None)
} }
@ -524,21 +682,6 @@ pub mod testing {
Err(()) Err(())
} }
fn get_commitment_tree(
&self,
_block_height: BlockHeight,
) -> Result<Option<sapling::CommitmentTree>, Self::Error> {
Ok(None)
}
#[allow(clippy::type_complexity)]
fn get_witnesses(
&self,
_block_height: BlockHeight,
) -> Result<Vec<(Self::NoteRef, sapling::IncrementalWitness)>, Self::Error> {
Ok(Vec::new())
}
fn get_sapling_nullifiers( fn get_sapling_nullifiers(
&self, &self,
_query: NullifierQuery, _query: NullifierQuery,
@ -611,11 +754,10 @@ pub mod testing {
} }
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
fn advance_by_block( fn put_block(
&mut self, &mut self,
_block: &PrunedBlock, _block: ScannedBlock<sapling::Nullifier>,
_updated_witnesses: &[(Self::NoteRef, sapling::IncrementalWitness)], ) -> Result<Vec<Self::NoteRef>, Self::Error> {
) -> Result<Vec<(Self::NoteRef, sapling::IncrementalWitness)>, Self::Error> {
Ok(vec![]) Ok(vec![])
} }
@ -645,4 +787,23 @@ pub mod testing {
Ok(0) Ok(0)
} }
} }
impl WalletCommitmentTrees for MockWalletDb {
type Error = Infallible;
type SaplingShardStore<'a> = MemoryShardStore<sapling::Node, BlockHeight>;
fn with_sapling_tree_mut<F, A, E>(&mut self, mut callback: F) -> Result<A, E>
where
for<'a> F: FnMut(
&'a mut ShardTree<
Self::SaplingShardStore<'a>,
{ sapling::NOTE_COMMITMENT_TREE_DEPTH },
SAPLING_SHARD_HEIGHT,
>,
) -> Result<A, E>,
E: From<ShardTreeError<Infallible>>,
{
callback(&mut self.sapling_tree)
}
}
} }

View File

@ -17,7 +17,6 @@
//! BlockSource, //! BlockSource,
//! error::Error, //! error::Error,
//! scan_cached_blocks, //! scan_cached_blocks,
//! validate_chain,
//! testing as chain_testing, //! testing as chain_testing,
//! }, //! },
//! testing, //! testing,
@ -30,81 +29,42 @@
//! # test(); //! # test();
//! # } //! # }
//! # //! #
//! # fn test() -> Result<(), Error<(), Infallible, u32>> { //! # fn test() -> Result<(), Error<(), Infallible>> {
//! let network = Network::TestNetwork; //! let network = Network::TestNetwork;
//! let block_source = chain_testing::MockBlockSource; //! let block_source = chain_testing::MockBlockSource;
//! let mut db_data = testing::MockWalletDb { //! let mut db_data = testing::MockWalletDb::new(Network::TestNetwork);
//! network: Network::TestNetwork
//! };
//! //!
//! // 1) Download new CompactBlocks into block_source. //! // 1) Download new CompactBlocks into block_source.
//!
//! // 2) Run the chain validator on the received blocks.
//! // //! //
//! // Given that we assume the server always gives us correct-at-the-time blocks, any //! // 2) FIXME: Obtain necessary block metadata for continuity checking?
//! // errors are in the blocks we have previously cached or scanned. //! //
//! let max_height_hash = db_data.get_max_height_hash().map_err(Error::Wallet)?; //! // 3) Scan cached blocks.
//! if let Err(e) = validate_chain(&block_source, max_height_hash, None) { //! //
//! match e { //! // FIXME: update documentation on how to detect when a rewind is required.
//! Error::Chain(e) => {
//! // a) Pick a height to rewind to.
//! //
//! // This might be informed by some external chain reorg information, or
//! // heuristics such as the platform, available bandwidth, size of recent
//! // CompactBlocks, etc.
//! let rewind_height = e.at_height() - 10;
//!
//! // b) Rewind scanned block information.
//! db_data.truncate_to_height(rewind_height);
//!
//! // c) Delete cached blocks from rewind_height onwards.
//! //
//! // This does imply that assumed-valid blocks will be re-downloaded, but it
//! // is also possible that in the intervening time, a chain reorg has
//! // occurred that orphaned some of those blocks.
//!
//! // d) If there is some separate thread or service downloading
//! // CompactBlocks, tell it to go back and download from rewind_height
//! // onwards.
//! },
//! e => {
//! // handle or return other errors
//!
//! }
//! }
//! }
//!
//! // 3) Scan (any remaining) cached blocks.
//! // //! //
//! // At this point, the cache and scanned data are locally consistent (though not //! // At this point, the cache and scanned data are locally consistent (though not
//! // necessarily consistent with the latest chain tip - this would be discovered the //! // necessarily consistent with the latest chain tip - this would be discovered the
//! // next time this codepath is executed after new blocks are received). //! // next time this codepath is executed after new blocks are received).
//! scan_cached_blocks(&network, &block_source, &mut db_data, None) //! scan_cached_blocks(&network, &block_source, &mut db_data, None, None)
//! # } //! # }
//! # } //! # }
//! ``` //! ```
use std::convert::Infallible;
use zcash_primitives::{ use zcash_primitives::{
block::BlockHash,
consensus::{self, BlockHeight}, consensus::{self, BlockHeight},
sapling::{self, note_encryption::PreparedIncomingViewingKey, Nullifier}, sapling::{self, note_encryption::PreparedIncomingViewingKey},
zip32::Scope, zip32::Scope,
}; };
use crate::{ use crate::{
data_api::{PrunedBlock, WalletWrite}, data_api::{NullifierQuery, WalletWrite},
proto::compact_formats::CompactBlock, proto::compact_formats::CompactBlock,
scan::BatchRunner, scan::BatchRunner,
wallet::WalletTx, scanning::{add_block_to_runner, scan_block_with_runner},
welding_rig::{add_block_to_runner, scan_block_with_runner},
}; };
pub mod error; pub mod error;
use error::{ChainError, Error}; use error::Error;
use super::NullifierQuery;
/// This trait provides sequential access to raw blockchain data via a callback-oriented /// This trait provides sequential access to raw blockchain data via a callback-oriented
/// API. /// API.
@ -119,112 +79,47 @@ pub trait BlockSource {
/// as part of processing each row. /// as part of processing each row.
/// * `NoteRefT`: the type of note identifiers in the wallet data store, for use in /// * `NoteRefT`: the type of note identifiers in the wallet data store, for use in
/// reporting errors related to specific notes. /// reporting errors related to specific notes.
fn with_blocks<F, WalletErrT, NoteRefT>( fn with_blocks<F, WalletErrT>(
&self, &self,
from_height: Option<BlockHeight>, from_height: Option<BlockHeight>,
limit: Option<u32>, limit: Option<u32>,
with_row: F, with_row: F,
) -> Result<(), error::Error<WalletErrT, Self::Error, NoteRefT>> ) -> Result<(), error::Error<WalletErrT, Self::Error>>
where where
F: FnMut(CompactBlock) -> Result<(), error::Error<WalletErrT, Self::Error, NoteRefT>>; F: FnMut(CompactBlock) -> Result<(), error::Error<WalletErrT, Self::Error>>;
}
/// Checks that the scanned blocks in the data database, when combined with the recent
/// `CompactBlock`s in the block_source database, form a valid chain.
///
/// This function is built on the core assumption that the information provided in the
/// block source is more likely to be accurate than the previously-scanned information.
/// This follows from the design (and trust) assumption that the `lightwalletd` server
/// provides accurate block information as of the time it was requested.
///
/// Arguments:
/// - `block_source` Source of compact blocks
/// - `validate_from` Height & hash of last validated block;
/// - `limit` specified number of blocks that will be valididated. Callers providing
/// a `limit` argument are responsible of making subsequent calls to `validate_chain()`
/// to complete validating the remaining blocks stored on the `block_source`. If `none`
/// is provided, there will be no limit set to the validation and upper bound of the
/// validation range will be the latest height present in the `block_source`.
///
/// Returns:
/// - `Ok(())` if the combined chain is valid up to the given height
/// and block hash.
/// - `Err(Error::Chain(cause))` if the combined chain is invalid.
/// - `Err(e)` if there was an error during validation unrelated to chain validity.
pub fn validate_chain<BlockSourceT>(
block_source: &BlockSourceT,
mut validate_from: Option<(BlockHeight, BlockHash)>,
limit: Option<u32>,
) -> Result<(), Error<Infallible, BlockSourceT::Error, Infallible>>
where
BlockSourceT: BlockSource,
{
// The block source will contain blocks above the `validate_from` height. Validate from that
// maximum height up to the chain tip, returning the hash of the block found in the block
// source at the `validate_from` height, which can then be used to verify chain integrity by
// comparing against the `validate_from` hash.
block_source.with_blocks::<_, Infallible, Infallible>(
validate_from.map(|(h, _)| h),
limit,
move |block| {
if let Some((valid_height, valid_hash)) = validate_from {
if block.height() != valid_height + 1 {
return Err(ChainError::block_height_discontinuity(
valid_height + 1,
block.height(),
)
.into());
} else if block.prev_hash() != valid_hash {
return Err(ChainError::prev_hash_mismatch(block.height()).into());
}
}
validate_from = Some((block.height(), block.hash()));
Ok(())
},
)
} }
/// Scans at most `limit` new blocks added to the block source for any transactions received by the /// Scans at most `limit` new blocks added to the block source for any transactions received by the
/// tracked accounts. /// tracked accounts.
/// ///
/// If the `from_height` argument is not `None`, then this method block source will begin
/// requesting blocks from the provided block source at the specified height; if `from_height` is
/// `None then this will begin scanning at first block after the position to which the wallet has
/// previously fully scanned the chain, thereby beginning or continuing a linear scan over all
/// blocks.
///
/// This function will return without error after scanning at most `limit` new blocks, to enable /// This function will return without error after scanning at most `limit` new blocks, to enable
/// the caller to update their UI with scanning progress. Repeatedly calling this function will /// the caller to update their UI with scanning progress. Repeatedly calling this function with
/// process sequential ranges of blocks, and is equivalent to calling `scan_cached_blocks` and /// `from_height == None` will process sequential ranges of blocks.
/// passing `None` for the optional `limit` value.
/// ///
/// This function pays attention only to cached blocks with heights greater than the highest /// For brand-new light client databases, if `from_height == None` this function starts scanning
/// scanned block in `data`. Cached blocks with lower heights are not verified against /// from the Sapling activation height. This height can be fast-forwarded to a more recent block by
/// previously-scanned blocks. In particular, this function **assumes** that the caller is handling /// initializing the client database with a starting block (for example, calling
/// rollbacks. /// `init_blocks_table` before this function if using `zcash_client_sqlite`).
///
/// For brand-new light client databases, this function starts scanning from the Sapling activation
/// height. This height can be fast-forwarded to a more recent block by initializing the client
/// database with a starting block (for example, calling `init_blocks_table` before this function
/// if using `zcash_client_sqlite`).
///
/// Scanned blocks are required to be height-sequential. If a block is missing from the block
/// source, an error will be returned with cause [`error::Cause::BlockHeightDiscontinuity`].
#[tracing::instrument(skip(params, block_source, data_db))] #[tracing::instrument(skip(params, block_source, data_db))]
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
pub fn scan_cached_blocks<ParamsT, DbT, BlockSourceT>( pub fn scan_cached_blocks<ParamsT, DbT, BlockSourceT>(
params: &ParamsT, params: &ParamsT,
block_source: &BlockSourceT, block_source: &BlockSourceT,
data_db: &mut DbT, data_db: &mut DbT,
from_height: Option<BlockHeight>,
limit: Option<u32>, limit: Option<u32>,
) -> Result<(), Error<DbT::Error, BlockSourceT::Error, DbT::NoteRef>> ) -> Result<(), Error<DbT::Error, BlockSourceT::Error>>
where where
ParamsT: consensus::Parameters + Send + 'static, ParamsT: consensus::Parameters + Send + 'static,
BlockSourceT: BlockSource, BlockSourceT: BlockSource,
DbT: WalletWrite, DbT: WalletWrite,
{ {
// Recall where we synced up to previously.
let mut last_height = data_db
.block_height_extrema()
.map_err(Error::Wallet)?
.map(|(_, max)| max);
// Fetch the UnifiedFullViewingKeys we are tracking // Fetch the UnifiedFullViewingKeys we are tracking
let ufvks = data_db let ufvks = data_db
.get_unified_full_viewing_keys() .get_unified_full_viewing_keys()
@ -236,25 +131,8 @@ where
.filter_map(|(account, ufvk)| ufvk.sapling().map(move |k| (account, k))) .filter_map(|(account, ufvk)| ufvk.sapling().map(move |k| (account, k)))
.collect(); .collect();
// Get the most recent CommitmentTree // Get the nullifiers for the unspent notes we are tracking
let mut tree = last_height.map_or_else( let mut sapling_nullifiers = data_db
|| Ok(sapling::CommitmentTree::empty()),
|h| {
data_db
.get_commitment_tree(h)
.map(|t| t.unwrap_or_else(sapling::CommitmentTree::empty))
.map_err(Error::Wallet)
},
)?;
// Get most recent incremental witnesses for the notes we are tracking
let mut witnesses = last_height.map_or_else(
|| Ok(vec![]),
|h| data_db.get_witnesses(h).map_err(Error::Wallet),
)?;
// Get the nullifiers for the notes we are tracking
let mut nullifiers = data_db
.get_sapling_nullifiers(NullifierQuery::Unspent) .get_sapling_nullifiers(NullifierQuery::Unspent)
.map_err(Error::Wallet)?; .map_err(Error::Wallet)?;
@ -271,106 +149,61 @@ where
.map(|(tag, ivk)| (tag, PreparedIncomingViewingKey::new(&ivk))), .map(|(tag, ivk)| (tag, PreparedIncomingViewingKey::new(&ivk))),
); );
block_source.with_blocks::<_, DbT::Error, DbT::NoteRef>( // Start at either the provided height, or where we synced up to previously.
last_height, let (scan_from, mut prior_block_metadata) = match from_height {
limit, Some(h) => {
|block: CompactBlock| { // if we are provided with a starting height, obtain the metadata for the previous
add_block_to_runner(params, block, &mut batch_runner); // block (if any is available)
Ok(()) (
}, Some(h),
)?; if h > BlockHeight::from(0) {
data_db.block_metadata(h - 1).map_err(Error::Wallet)?
} else {
None
},
)
}
None => {
let last_scanned = data_db.block_fully_scanned().map_err(Error::Wallet)?;
last_scanned.map_or_else(|| (None, None), |m| (Some(m.block_height + 1), Some(m)))
}
};
block_source.with_blocks::<_, DbT::Error>(scan_from, limit, |block: CompactBlock| {
add_block_to_runner(params, block, &mut batch_runner);
Ok(())
})?;
batch_runner.flush(); batch_runner.flush();
block_source.with_blocks::<_, DbT::Error, DbT::NoteRef>( block_source.with_blocks::<_, DbT::Error>(scan_from, limit, |block: CompactBlock| {
last_height, let scanned_block = scan_block_with_runner(
limit, params,
|block: CompactBlock| { block,
let current_height = block.height(); &dfvks,
&sapling_nullifiers,
prior_block_metadata.as_ref(),
Some(&mut batch_runner),
)
.map_err(Error::Scan)?;
// Scanned blocks MUST be height-sequential. let spent_nf: Vec<&sapling::Nullifier> = scanned_block
if let Some(h) = last_height { .transactions
if current_height != (h + 1) { .iter()
return Err( .flat_map(|tx| tx.sapling_spends.iter().map(|spend| spend.nf()))
ChainError::block_height_discontinuity(h + 1, current_height).into(), .collect();
);
}
}
let block_hash = BlockHash::from_slice(&block.hash); sapling_nullifiers.retain(|(_, nf)| !spent_nf.contains(&nf));
let block_time = block.time; sapling_nullifiers.extend(scanned_block.transactions.iter().flat_map(|tx| {
tx.sapling_outputs
let txs: Vec<WalletTx<Nullifier>> = {
let mut witness_refs: Vec<_> = witnesses.iter_mut().map(|w| &mut w.1).collect();
scan_block_with_runner(
params,
block,
&dfvks,
&nullifiers,
&mut tree,
&mut witness_refs[..],
Some(&mut batch_runner),
)
};
// Enforce that all roots match. This is slow, so only include in debug builds.
#[cfg(debug_assertions)]
{
let cur_root = tree.root();
for row in &witnesses {
if row.1.root() != cur_root {
return Err(
ChainError::invalid_witness_anchor(current_height, row.0).into()
);
}
}
for tx in &txs {
for output in tx.sapling_outputs.iter() {
if output.witness().root() != cur_root {
return Err(ChainError::invalid_new_witness_anchor(
current_height,
tx.txid,
output.index(),
output.witness().root(),
)
.into());
}
}
}
}
let new_witnesses = data_db
.advance_by_block(
&(PrunedBlock {
block_height: current_height,
block_hash,
block_time,
commitment_tree: &tree,
transactions: &txs,
}),
&witnesses,
)
.map_err(Error::Wallet)?;
let spent_nf: Vec<&Nullifier> = txs
.iter() .iter()
.flat_map(|tx| tx.sapling_spends.iter().map(|spend| spend.nf())) .map(|out| (out.account(), *out.nf()))
.collect(); }));
nullifiers.retain(|(_, nf)| !spent_nf.contains(&nf));
nullifiers.extend(txs.iter().flat_map(|tx| {
tx.sapling_outputs
.iter()
.map(|out| (out.account(), *out.nf()))
}));
witnesses.extend(new_witnesses); prior_block_metadata = Some(*scanned_block.metadata());
data_db.put_block(scanned_block).map_err(Error::Wallet)?;
last_height = Some(current_height); Ok(())
})?;
Ok(())
},
)?;
Ok(()) Ok(())
} }
@ -389,14 +222,14 @@ pub mod testing {
impl BlockSource for MockBlockSource { impl BlockSource for MockBlockSource {
type Error = Infallible; type Error = Infallible;
fn with_blocks<F, DbErrT, NoteRef>( fn with_blocks<F, DbErrT>(
&self, &self,
_from_height: Option<BlockHeight>, _from_height: Option<BlockHeight>,
_limit: Option<u32>, _limit: Option<u32>,
_with_row: F, _with_row: F,
) -> Result<(), Error<DbErrT, Infallible, NoteRef>> ) -> Result<(), Error<DbErrT, Infallible>>
where where
F: FnMut(CompactBlock) -> Result<(), Error<DbErrT, Infallible, NoteRef>>, F: FnMut(CompactBlock) -> Result<(), Error<DbErrT, Infallible>>,
{ {
Ok(()) Ok(())
} }

View File

@ -3,134 +3,11 @@
use std::error; use std::error;
use std::fmt::{self, Debug, Display}; use std::fmt::{self, Debug, Display};
use zcash_primitives::{consensus::BlockHeight, sapling, transaction::TxId}; use crate::scanning::ScanError;
/// The underlying cause of a [`ChainError`].
#[derive(Copy, Clone, Debug)]
pub enum Cause<NoteRef> {
/// The hash of the parent block given by a proposed new chain tip does not match the hash of
/// the current chain tip.
PrevHashMismatch,
/// The block height field of the proposed new chain tip is not equal to the height of the
/// previous chain tip + 1. This variant stores a copy of the incorrect height value for
/// reporting purposes.
BlockHeightDiscontinuity(BlockHeight),
/// The root of an output's witness tree in a newly arrived transaction does not correspond to
/// root of the stored commitment tree at the recorded height.
///
/// This error is currently only produced when performing the slow checks that are enabled by
/// compiling with `-C debug-assertions`.
InvalidNewWitnessAnchor {
/// The id of the transaction containing the mismatched witness.
txid: TxId,
/// The index of the shielded output within the transaction where the witness root does not
/// match.
index: usize,
/// The root of the witness that failed to match the root of the current note commitment
/// tree.
node: sapling::Node,
},
/// The root of an output's witness tree in a previously stored transaction does not correspond
/// to root of the current commitment tree.
///
/// This error is currently only produced when performing the slow checks that are enabled by
/// compiling with `-C debug-assertions`.
InvalidWitnessAnchor(NoteRef),
}
/// Errors that may occur in chain scanning or validation.
#[derive(Copy, Clone, Debug)]
pub struct ChainError<NoteRef> {
at_height: BlockHeight,
cause: Cause<NoteRef>,
}
impl<N: Display> fmt::Display for ChainError<N> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match &self.cause {
Cause::PrevHashMismatch => write!(
f,
"The parent hash of proposed block does not correspond to the block hash at height {}.",
self.at_height
),
Cause::BlockHeightDiscontinuity(h) => {
write!(f, "Block height discontinuity at height {}; next height is : {}", self.at_height, h)
}
Cause::InvalidNewWitnessAnchor { txid, index, node } => write!(
f,
"New witness for output {} in tx {} at height {} has incorrect anchor: {:?}",
index, txid, self.at_height, node,
),
Cause::InvalidWitnessAnchor(id_note) => {
write!(f, "Witness for note {} has incorrect anchor for height {}", id_note, self.at_height)
}
}
}
}
impl<NoteRef> ChainError<NoteRef> {
/// Constructs an error that indicates block hashes failed to chain.
///
/// * `at_height` the height of the block whose parent hash does not match the hash of the
/// previous block
pub fn prev_hash_mismatch(at_height: BlockHeight) -> Self {
ChainError {
at_height,
cause: Cause::PrevHashMismatch,
}
}
/// Constructs an error that indicates a gap in block heights.
///
/// * `at_height` the height of the block being added to the chain.
/// * `prev_chain_tip` the height of the previous chain tip.
pub fn block_height_discontinuity(at_height: BlockHeight, prev_chain_tip: BlockHeight) -> Self {
ChainError {
at_height,
cause: Cause::BlockHeightDiscontinuity(prev_chain_tip),
}
}
/// Constructs an error that indicates a mismatch between an updated note's witness and the
/// root of the current note commitment tree.
pub fn invalid_witness_anchor(at_height: BlockHeight, note_ref: NoteRef) -> Self {
ChainError {
at_height,
cause: Cause::InvalidWitnessAnchor(note_ref),
}
}
/// Constructs an error that indicates a mismatch between a new note's witness and the root of
/// the current note commitment tree.
pub fn invalid_new_witness_anchor(
at_height: BlockHeight,
txid: TxId,
index: usize,
node: sapling::Node,
) -> Self {
ChainError {
at_height,
cause: Cause::InvalidNewWitnessAnchor { txid, index, node },
}
}
/// Returns the block height at which this error was discovered.
pub fn at_height(&self) -> BlockHeight {
self.at_height
}
/// Returns the cause of this error.
pub fn cause(&self) -> &Cause<NoteRef> {
&self.cause
}
}
/// Errors related to chain validation and scanning. /// Errors related to chain validation and scanning.
#[derive(Debug)] #[derive(Debug)]
pub enum Error<WalletError, BlockSourceError, NoteRef> { pub enum Error<WalletError, BlockSourceError> {
/// An error that was produced by wallet operations in the course of scanning the chain. /// An error that was produced by wallet operations in the course of scanning the chain.
Wallet(WalletError), Wallet(WalletError),
@ -141,10 +18,10 @@ pub enum Error<WalletError, BlockSourceError, NoteRef> {
/// A block that was received violated rules related to chain continuity or contained note /// A block that was received violated rules related to chain continuity or contained note
/// commitments that could not be reconciled with the note commitment tree(s) maintained by the /// commitments that could not be reconciled with the note commitment tree(s) maintained by the
/// wallet. /// wallet.
Chain(ChainError<NoteRef>), Scan(ScanError),
} }
impl<WE: fmt::Display, BE: fmt::Display, N: Display> fmt::Display for Error<WE, BE, N> { impl<WE: fmt::Display, BE: fmt::Display> fmt::Display for Error<WE, BE> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match &self { match &self {
Error::Wallet(e) => { Error::Wallet(e) => {
@ -161,18 +38,17 @@ impl<WE: fmt::Display, BE: fmt::Display, N: Display> fmt::Display for Error<WE,
e e
) )
} }
Error::Chain(err) => { Error::Scan(e) => {
write!(f, "{}", err) write!(f, "Scanning produced the following error: {}", e)
} }
} }
} }
} }
impl<WE, BE, N> error::Error for Error<WE, BE, N> impl<WE, BE> error::Error for Error<WE, BE>
where where
WE: Debug + Display + error::Error + 'static, WE: Debug + Display + error::Error + 'static,
BE: Debug + Display + error::Error + 'static, BE: Debug + Display + error::Error + 'static,
N: Debug + Display,
{ {
fn source(&self) -> Option<&(dyn error::Error + 'static)> { fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match &self { match &self {
@ -183,8 +59,8 @@ where
} }
} }
impl<WE, BSE, N> From<ChainError<N>> for Error<WE, BSE, N> { impl<WE, BSE> From<ScanError> for Error<WE, BSE> {
fn from(e: ChainError<N>) -> Self { fn from(e: ScanError) -> Self {
Error::Chain(e) Error::Scan(e)
} }
} }

View File

@ -1,5 +1,6 @@
//! Types for wallet error handling. //! Types for wallet error handling.
use shardtree::ShardTreeError;
use std::error; use std::error;
use std::fmt::{self, Debug, Display}; use std::fmt::{self, Debug, Display};
use zcash_primitives::{ use zcash_primitives::{
@ -20,10 +21,13 @@ use zcash_primitives::{legacy::TransparentAddress, zip32::DiversifierIndex};
/// Errors that can occur as a consequence of wallet operations. /// Errors that can occur as a consequence of wallet operations.
#[derive(Debug)] #[derive(Debug)]
pub enum Error<DataSourceError, SelectionError, FeeError, NoteRef> { pub enum Error<DataSourceError, CommitmentTreeError, SelectionError, FeeError, NoteRef> {
/// An error occurred retrieving data from the underlying data source /// An error occurred retrieving data from the underlying data source
DataSource(DataSourceError), DataSource(DataSourceError),
/// An error in computations involving the note commitment trees.
CommitmentTree(ShardTreeError<CommitmentTreeError>),
/// An error in note selection /// An error in note selection
NoteSelection(SelectionError), NoteSelection(SelectionError),
@ -60,9 +64,10 @@ pub enum Error<DataSourceError, SelectionError, FeeError, NoteRef> {
ChildIndexOutOfRange(DiversifierIndex), ChildIndexOutOfRange(DiversifierIndex),
} }
impl<DE, SE, FE, N> fmt::Display for Error<DE, SE, FE, N> impl<DE, CE, SE, FE, N> fmt::Display for Error<DE, CE, SE, FE, N>
where where
DE: fmt::Display, DE: fmt::Display,
CE: fmt::Display,
SE: fmt::Display, SE: fmt::Display,
FE: fmt::Display, FE: fmt::Display,
N: fmt::Display, N: fmt::Display,
@ -76,6 +81,9 @@ where
e e
) )
} }
Error::CommitmentTree(e) => {
write!(f, "An error occurred in querying or updating a note commitment tree: {}", e)
}
Error::NoteSelection(e) => { Error::NoteSelection(e) => {
write!(f, "Note selection encountered the following error: {}", e) write!(f, "Note selection encountered the following error: {}", e)
} }
@ -120,9 +128,10 @@ where
} }
} }
impl<DE, SE, FE, N> error::Error for Error<DE, SE, FE, N> impl<DE, CE, SE, FE, N> error::Error for Error<DE, CE, SE, FE, N>
where where
DE: Debug + Display + error::Error + 'static, DE: Debug + Display + error::Error + 'static,
CE: Debug + Display + error::Error + 'static,
SE: Debug + Display + error::Error + 'static, SE: Debug + Display + error::Error + 'static,
FE: Debug + Display + 'static, FE: Debug + Display + 'static,
N: Debug + Display, N: Debug + Display,
@ -130,6 +139,7 @@ where
fn source(&self) -> Option<&(dyn error::Error + 'static)> { fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match &self { match &self {
Error::DataSource(e) => Some(e), Error::DataSource(e) => Some(e),
Error::CommitmentTree(e) => Some(e),
Error::NoteSelection(e) => Some(e), Error::NoteSelection(e) => Some(e),
Error::Builder(e) => Some(e), Error::Builder(e) => Some(e),
_ => None, _ => None,
@ -137,19 +147,19 @@ where
} }
} }
impl<DE, SE, FE, N> From<builder::Error<FE>> for Error<DE, SE, FE, N> { impl<DE, CE, SE, FE, N> From<builder::Error<FE>> for Error<DE, CE, SE, FE, N> {
fn from(e: builder::Error<FE>) -> Self { fn from(e: builder::Error<FE>) -> Self {
Error::Builder(e) Error::Builder(e)
} }
} }
impl<DE, SE, FE, N> From<BalanceError> for Error<DE, SE, FE, N> { impl<DE, CE, SE, FE, N> From<BalanceError> for Error<DE, CE, SE, FE, N> {
fn from(e: BalanceError) -> Self { fn from(e: BalanceError) -> Self {
Error::BalanceError(e) Error::BalanceError(e)
} }
} }
impl<DE, SE, FE, N> From<InputSelectorError<DE, SE>> for Error<DE, SE, FE, N> { impl<DE, CE, SE, FE, N> From<InputSelectorError<DE, SE>> for Error<DE, CE, SE, FE, N> {
fn from(e: InputSelectorError<DE, SE>) -> Self { fn from(e: InputSelectorError<DE, SE>) -> Self {
match e { match e {
InputSelectorError::DataSource(e) => Error::DataSource(e), InputSelectorError::DataSource(e) => Error::DataSource(e),
@ -161,18 +171,25 @@ impl<DE, SE, FE, N> From<InputSelectorError<DE, SE>> for Error<DE, SE, FE, N> {
available, available,
required, required,
}, },
InputSelectorError::SyncRequired => Error::ScanRequired,
} }
} }
} }
impl<DE, SE, FE, N> From<sapling::builder::Error> for Error<DE, SE, FE, N> { impl<DE, CE, SE, FE, N> From<sapling::builder::Error> for Error<DE, CE, SE, FE, N> {
fn from(e: sapling::builder::Error) -> Self { fn from(e: sapling::builder::Error) -> Self {
Error::Builder(builder::Error::SaplingBuild(e)) Error::Builder(builder::Error::SaplingBuild(e))
} }
} }
impl<DE, SE, FE, N> From<transparent::builder::Error> for Error<DE, SE, FE, N> { impl<DE, CE, SE, FE, N> From<transparent::builder::Error> for Error<DE, CE, SE, FE, N> {
fn from(e: transparent::builder::Error) -> Self { fn from(e: transparent::builder::Error) -> Self {
Error::Builder(builder::Error::TransparentBuild(e)) Error::Builder(builder::Error::TransparentBuild(e))
} }
} }
impl<DE, CE, SE, FE, N> From<ShardTreeError<CE>> for Error<DE, CE, SE, FE, N> {
fn from(e: ShardTreeError<CE>) -> Self {
Error::CommitmentTree(e)
}
}

View File

@ -1,8 +1,9 @@
use std::convert::Infallible;
use std::fmt::Debug; use std::fmt::Debug;
use std::{convert::Infallible, num::NonZeroU32};
use shardtree::{ShardStore, ShardTree, ShardTreeError};
use zcash_primitives::{ use zcash_primitives::{
consensus::{self, NetworkUpgrade}, consensus::{self, BlockHeight, NetworkUpgrade},
memo::MemoBytes, memo::MemoBytes,
sapling::{ sapling::{
self, self,
@ -23,7 +24,8 @@ use crate::{
address::RecipientAddress, address::RecipientAddress,
data_api::{ data_api::{
error::Error, wallet::input_selection::Proposal, DecryptedTransaction, PoolType, Recipient, error::Error, wallet::input_selection::Proposal, DecryptedTransaction, PoolType, Recipient,
SentTransaction, SentTransactionOutput, WalletWrite, SentTransaction, SentTransactionOutput, WalletCommitmentTrees, WalletRead, WalletWrite,
SAPLING_SHARD_HEIGHT,
}, },
decrypt_transaction, decrypt_transaction,
fees::{self, ChangeValue, DustOutputPolicy}, fees::{self, ChangeValue, DustOutputPolicy},
@ -117,12 +119,13 @@ where
/// can allow the sender to view the resulting notes on the blockchain. /// can allow the sender to view the resulting notes on the blockchain.
/// * `min_confirmations`: The minimum number of confirmations that a previously /// * `min_confirmations`: The minimum number of confirmations that a previously
/// received note must have in the blockchain in order to be considered for being /// received note must have in the blockchain in order to be considered for being
/// spent. A value of 10 confirmations is recommended. /// spent. A value of 10 confirmations is recommended and 0-conf transactions are
/// not supported.
/// ///
/// # Examples /// # Examples
/// ///
/// ``` /// ```
/// # #[cfg(feature = "test-dependencies")] /// # #[cfg(all(feature = "test-dependencies", feature = "local-prover"))]
/// # { /// # {
/// use tempfile::NamedTempFile; /// use tempfile::NamedTempFile;
/// use zcash_primitives::{ /// use zcash_primitives::{
@ -196,11 +199,12 @@ pub fn create_spend_to_address<DbT, ParamsT>(
amount: Amount, amount: Amount,
memo: Option<MemoBytes>, memo: Option<MemoBytes>,
ovk_policy: OvkPolicy, ovk_policy: OvkPolicy,
min_confirmations: u32, min_confirmations: NonZeroU32,
) -> Result< ) -> Result<
DbT::TxRef, DbT::TxRef,
Error< Error<
DbT::Error, <DbT as WalletRead>::Error,
<DbT as WalletCommitmentTrees>::Error,
GreedyInputSelectorError<BalanceError, DbT::NoteRef>, GreedyInputSelectorError<BalanceError, DbT::NoteRef>,
Infallible, Infallible,
DbT::NoteRef, DbT::NoteRef,
@ -208,7 +212,7 @@ pub fn create_spend_to_address<DbT, ParamsT>(
> >
where where
ParamsT: consensus::Parameters + Clone, ParamsT: consensus::Parameters + Clone,
DbT: WalletWrite, DbT: WalletWrite + WalletCommitmentTrees,
DbT::NoteRef: Copy + Eq + Ord, DbT::NoteRef: Copy + Eq + Ord,
{ {
let req = zip321::TransactionRequest::new(vec![Payment { let req = zip321::TransactionRequest::new(vec![Payment {
@ -284,7 +288,8 @@ where
/// can allow the sender to view the resulting notes on the blockchain. /// can allow the sender to view the resulting notes on the blockchain.
/// * `min_confirmations`: The minimum number of confirmations that a previously /// * `min_confirmations`: The minimum number of confirmations that a previously
/// received note must have in the blockchain in order to be considered for being /// received note must have in the blockchain in order to be considered for being
/// spent. A value of 10 confirmations is recommended. /// spent. A value of 10 confirmations is recommended and 0-conf transactions are
/// not supported.
/// ///
/// [`sapling::TxProver`]: zcash_primitives::sapling::prover::TxProver /// [`sapling::TxProver`]: zcash_primitives::sapling::prover::TxProver
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
@ -297,13 +302,19 @@ pub fn spend<DbT, ParamsT, InputsT>(
usk: &UnifiedSpendingKey, usk: &UnifiedSpendingKey,
request: zip321::TransactionRequest, request: zip321::TransactionRequest,
ovk_policy: OvkPolicy, ovk_policy: OvkPolicy,
min_confirmations: u32, min_confirmations: NonZeroU32,
) -> Result< ) -> Result<
DbT::TxRef, DbT::TxRef,
Error<DbT::Error, InputsT::Error, <InputsT::FeeRule as FeeRule>::Error, DbT::NoteRef>, Error<
<DbT as WalletRead>::Error,
<DbT as WalletCommitmentTrees>::Error,
InputsT::Error,
<InputsT::FeeRule as FeeRule>::Error,
DbT::NoteRef,
>,
> >
where where
DbT: WalletWrite, DbT: WalletWrite + WalletCommitmentTrees,
DbT::TxRef: Copy + Debug, DbT::TxRef: Copy + Debug,
DbT::NoteRef: Copy + Eq + Ord, DbT::NoteRef: Copy + Eq + Ord,
ParamsT: consensus::Parameters + Clone, ParamsT: consensus::Parameters + Clone,
@ -323,7 +334,16 @@ where
min_confirmations, min_confirmations,
)?; )?;
create_proposed_transaction(wallet_db, params, prover, usk, ovk_policy, proposal, None) create_proposed_transaction(
wallet_db,
params,
prover,
usk,
ovk_policy,
proposal,
min_confirmations,
None,
)
} }
/// Select transaction inputs, compute fees, and construct a proposal for a transaction /// Select transaction inputs, compute fees, and construct a proposal for a transaction
@ -331,16 +351,22 @@ where
/// [`create_proposed_transaction`]. /// [`create_proposed_transaction`].
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
pub fn propose_transfer<DbT, ParamsT, InputsT>( pub fn propose_transfer<DbT, ParamsT, InputsT, CommitmentTreeErrT>(
wallet_db: &mut DbT, wallet_db: &mut DbT,
params: &ParamsT, params: &ParamsT,
spend_from_account: AccountId, spend_from_account: AccountId,
input_selector: &InputsT, input_selector: &InputsT,
request: zip321::TransactionRequest, request: zip321::TransactionRequest,
min_confirmations: u32, min_confirmations: NonZeroU32,
) -> Result< ) -> Result<
Proposal<InputsT::FeeRule, DbT::NoteRef>, Proposal<InputsT::FeeRule, DbT::NoteRef>,
Error<DbT::Error, InputsT::Error, <InputsT::FeeRule as FeeRule>::Error, DbT::NoteRef>, Error<
DbT::Error,
CommitmentTreeErrT,
InputsT::Error,
<InputsT::FeeRule as FeeRule>::Error,
DbT::NoteRef,
>,
> >
where where
DbT: WalletWrite, DbT: WalletWrite,
@ -348,20 +374,13 @@ where
ParamsT: consensus::Parameters + Clone, ParamsT: consensus::Parameters + Clone,
InputsT: InputSelector<DataSource = DbT>, InputsT: InputSelector<DataSource = DbT>,
{ {
// Target the next block, assuming we are up-to-date.
let (target_height, anchor_height) = wallet_db
.get_target_and_anchor_heights(min_confirmations)
.map_err(Error::DataSource)
.and_then(|x| x.ok_or(Error::ScanRequired))?;
input_selector input_selector
.propose_transaction( .propose_transaction(
params, params,
wallet_db, wallet_db,
spend_from_account, spend_from_account,
anchor_height,
target_height,
request, request,
min_confirmations,
) )
.map_err(Error::from) .map_err(Error::from)
} }
@ -369,16 +388,22 @@ where
#[cfg(feature = "transparent-inputs")] #[cfg(feature = "transparent-inputs")]
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
pub fn propose_shielding<DbT, ParamsT, InputsT>( pub fn propose_shielding<DbT, ParamsT, InputsT, CommitmentTreeErrT>(
wallet_db: &mut DbT, wallet_db: &mut DbT,
params: &ParamsT, params: &ParamsT,
input_selector: &InputsT, input_selector: &InputsT,
shielding_threshold: NonNegativeAmount, shielding_threshold: NonNegativeAmount,
from_addrs: &[TransparentAddress], from_addrs: &[TransparentAddress],
min_confirmations: u32, min_confirmations: NonZeroU32,
) -> Result< ) -> Result<
Proposal<InputsT::FeeRule, DbT::NoteRef>, Proposal<InputsT::FeeRule, DbT::NoteRef>,
Error<DbT::Error, InputsT::Error, <InputsT::FeeRule as FeeRule>::Error, DbT::NoteRef>, Error<
DbT::Error,
CommitmentTreeErrT,
InputsT::Error,
<InputsT::FeeRule as FeeRule>::Error,
DbT::NoteRef,
>,
> >
where where
ParamsT: consensus::Parameters, ParamsT: consensus::Parameters,
@ -386,19 +411,13 @@ where
DbT::NoteRef: Copy + Eq + Ord, DbT::NoteRef: Copy + Eq + Ord,
InputsT: InputSelector<DataSource = DbT>, InputsT: InputSelector<DataSource = DbT>,
{ {
let (target_height, latest_anchor) = wallet_db
.get_target_and_anchor_heights(min_confirmations)
.map_err(Error::DataSource)
.and_then(|x| x.ok_or(Error::ScanRequired))?;
input_selector input_selector
.propose_shielding( .propose_shielding(
params, params,
wallet_db, wallet_db,
shielding_threshold, shielding_threshold,
from_addrs, from_addrs,
latest_anchor, min_confirmations,
target_height,
) )
.map_err(Error::from) .map_err(Error::from)
} }
@ -417,10 +436,20 @@ pub fn create_proposed_transaction<DbT, ParamsT, InputsErrT, FeeRuleT>(
usk: &UnifiedSpendingKey, usk: &UnifiedSpendingKey,
ovk_policy: OvkPolicy, ovk_policy: OvkPolicy,
proposal: Proposal<FeeRuleT, DbT::NoteRef>, proposal: Proposal<FeeRuleT, DbT::NoteRef>,
min_confirmations: NonZeroU32,
change_memo: Option<MemoBytes>, change_memo: Option<MemoBytes>,
) -> Result<DbT::TxRef, Error<DbT::Error, InputsErrT, FeeRuleT::Error, DbT::NoteRef>> ) -> Result<
DbT::TxRef,
Error<
<DbT as WalletRead>::Error,
<DbT as WalletCommitmentTrees>::Error,
InputsErrT,
FeeRuleT::Error,
DbT::NoteRef,
>,
>
where where
DbT: WalletWrite, DbT: WalletWrite + WalletCommitmentTrees,
DbT::TxRef: Copy + Debug, DbT::TxRef: Copy + Debug,
DbT::NoteRef: Copy + Eq + Ord, DbT::NoteRef: Copy + Eq + Ord,
ParamsT: consensus::Parameters + Clone, ParamsT: consensus::Parameters + Clone,
@ -459,14 +488,23 @@ where
// Create the transaction. The type of the proposal ensures that there // Create the transaction. The type of the proposal ensures that there
// are no possible transparent inputs, so we ignore those // are no possible transparent inputs, so we ignore those
let mut builder = Builder::new(params.clone(), proposal.target_height(), None); let mut builder = Builder::new(params.clone(), proposal.min_target_height(), None);
for selected in proposal.sapling_inputs() { wallet_db.with_sapling_tree_mut::<_, _, Error<_, _, _, _, _>>(|sapling_tree| {
let (note, key, merkle_path) = select_key_for_note(selected, usk.sapling(), &dfvk) for selected in proposal.sapling_inputs() {
let (note, key, merkle_path) = select_key_for_note(
sapling_tree,
selected,
usk.sapling(),
&dfvk,
usize::try_from(u32::from(min_confirmations) - 1).unwrap(),
)?
.ok_or(Error::NoteMismatch(selected.note_id))?; .ok_or(Error::NoteMismatch(selected.note_id))?;
builder.add_sapling_spend(key, selected.diversifier, note, merkle_path)?; builder.add_sapling_spend(key, selected.diversifier, note, merkle_path)?;
} }
Ok(())
})?;
#[cfg(feature = "transparent-inputs")] #[cfg(feature = "transparent-inputs")]
let utxos = { let utxos = {
@ -577,7 +615,7 @@ where
tx.sapling_bundle().and_then(|bundle| { tx.sapling_bundle().and_then(|bundle| {
try_sapling_note_decryption( try_sapling_note_decryption(
params, params,
proposal.target_height(), proposal.min_target_height(),
&internal_ivk, &internal_ivk,
&bundle.shielded_outputs()[output_index], &bundle.shielded_outputs()[output_index],
) )
@ -653,8 +691,9 @@ where
/// to the wallet that the wallet can use to improve how it represents those /// to the wallet that the wallet can use to improve how it represents those
/// shielding transactions to the user. /// shielding transactions to the user.
/// * `min_confirmations`: The minimum number of confirmations that a previously /// * `min_confirmations`: The minimum number of confirmations that a previously
/// received UTXO must have in the blockchain in order to be considered for being /// received note must have in the blockchain in order to be considered for being
/// spent. /// spent. A value of 10 confirmations is recommended and 0-conf transactions are
/// not supported.
/// ///
/// [`sapling::TxProver`]: zcash_primitives::sapling::prover::TxProver /// [`sapling::TxProver`]: zcash_primitives::sapling::prover::TxProver
#[cfg(feature = "transparent-inputs")] #[cfg(feature = "transparent-inputs")]
@ -669,14 +708,20 @@ pub fn shield_transparent_funds<DbT, ParamsT, InputsT>(
usk: &UnifiedSpendingKey, usk: &UnifiedSpendingKey,
from_addrs: &[TransparentAddress], from_addrs: &[TransparentAddress],
memo: &MemoBytes, memo: &MemoBytes,
min_confirmations: u32, min_confirmations: NonZeroU32,
) -> Result< ) -> Result<
DbT::TxRef, DbT::TxRef,
Error<DbT::Error, InputsT::Error, <InputsT::FeeRule as FeeRule>::Error, DbT::NoteRef>, Error<
<DbT as WalletRead>::Error,
<DbT as WalletCommitmentTrees>::Error,
InputsT::Error,
<InputsT::FeeRule as FeeRule>::Error,
DbT::NoteRef,
>,
> >
where where
ParamsT: consensus::Parameters, ParamsT: consensus::Parameters,
DbT: WalletWrite, DbT: WalletWrite + WalletCommitmentTrees,
DbT::NoteRef: Copy + Eq + Ord, DbT::NoteRef: Copy + Eq + Ord,
InputsT: InputSelector<DataSource = DbT>, InputsT: InputSelector<DataSource = DbT>,
{ {
@ -696,17 +741,26 @@ where
usk, usk,
OvkPolicy::Sender, OvkPolicy::Sender,
proposal, proposal,
min_confirmations,
Some(memo.clone()), Some(memo.clone()),
) )
} }
fn select_key_for_note<N>( #[allow(clippy::type_complexity)]
fn select_key_for_note<N, S: ShardStore<H = Node, CheckpointId = BlockHeight>>(
commitment_tree: &mut ShardTree<
S,
{ sapling::NOTE_COMMITMENT_TREE_DEPTH },
SAPLING_SHARD_HEIGHT,
>,
selected: &ReceivedSaplingNote<N>, selected: &ReceivedSaplingNote<N>,
extsk: &ExtendedSpendingKey, extsk: &ExtendedSpendingKey,
dfvk: &DiversifiableFullViewingKey, dfvk: &DiversifiableFullViewingKey,
) -> Option<(sapling::Note, ExtendedSpendingKey, sapling::MerklePath)> { checkpoint_depth: usize,
let merkle_path = selected.witness.path().expect("the tree is not empty"); ) -> Result<
Option<(sapling::Note, ExtendedSpendingKey, sapling::MerklePath)>,
ShardTreeError<S::Error>,
> {
// Attempt to reconstruct the note being spent using both the internal and external dfvks // Attempt to reconstruct the note being spent using both the internal and external dfvks
// corresponding to the unified spending key, checking against the witness we are using // corresponding to the unified spending key, checking against the witness we are using
// to spend the note that we've used the correct key. // to spend the note that we've used the correct key.
@ -717,13 +771,16 @@ fn select_key_for_note<N>(
.diversified_change_address(selected.diversifier) .diversified_change_address(selected.diversifier)
.map(|addr| addr.create_note(selected.note_value.into(), selected.rseed)); .map(|addr| addr.create_note(selected.note_value.into(), selected.rseed));
let expected_root = selected.witness.root(); let expected_root = commitment_tree.root_at_checkpoint(checkpoint_depth)?;
external_note let merkle_path = commitment_tree
.witness_caching(selected.note_commitment_tree_position, checkpoint_depth)?;
Ok(external_note
.filter(|n| expected_root == merkle_path.root(Node::from_cmu(&n.cmu()))) .filter(|n| expected_root == merkle_path.root(Node::from_cmu(&n.cmu())))
.map(|n| (n, extsk.clone(), merkle_path.clone())) .map(|n| (n, extsk.clone(), merkle_path.clone()))
.or_else(|| { .or_else(|| {
internal_note internal_note
.filter(|n| expected_root == merkle_path.root(Node::from_cmu(&n.cmu()))) .filter(|n| expected_root == merkle_path.root(Node::from_cmu(&n.cmu())))
.map(|n| (n, extsk.derive_internal(), merkle_path)) .map(|n| (n, extsk.derive_internal(), merkle_path))
}) }))
} }

View File

@ -1,8 +1,9 @@
//! Types related to the process of selecting inputs to be spent given a transaction request. //! Types related to the process of selecting inputs to be spent given a transaction request.
use core::marker::PhantomData; use core::marker::PhantomData;
use std::collections::BTreeSet;
use std::fmt; use std::fmt;
use std::num::NonZeroU32;
use std::{collections::BTreeSet, fmt::Debug};
use zcash_primitives::{ use zcash_primitives::{
consensus::{self, BlockHeight}, consensus::{self, BlockHeight},
@ -35,6 +36,9 @@ pub enum InputSelectorError<DbErrT, SelectorErrT> {
/// Insufficient funds were available to satisfy the payment request that inputs were being /// Insufficient funds were available to satisfy the payment request that inputs were being
/// selected to attempt to satisfy. /// selected to attempt to satisfy.
InsufficientFunds { available: Amount, required: Amount }, InsufficientFunds { available: Amount, required: Amount },
/// The data source does not have enough information to choose an expiry height
/// for the transaction.
SyncRequired,
} }
impl<DE: fmt::Display, SE: fmt::Display> fmt::Display for InputSelectorError<DE, SE> { impl<DE: fmt::Display, SE: fmt::Display> fmt::Display for InputSelectorError<DE, SE> {
@ -59,6 +63,9 @@ impl<DE: fmt::Display, SE: fmt::Display> fmt::Display for InputSelectorError<DE,
i64::from(*available), i64::from(*available),
i64::from(*required) i64::from(*required)
), ),
InputSelectorError::SyncRequired => {
write!(f, "Insufficient chain data is available, sync required.")
}
} }
} }
} }
@ -71,7 +78,8 @@ pub struct Proposal<FeeRuleT, NoteRef> {
sapling_inputs: Vec<ReceivedSaplingNote<NoteRef>>, sapling_inputs: Vec<ReceivedSaplingNote<NoteRef>>,
balance: TransactionBalance, balance: TransactionBalance,
fee_rule: FeeRuleT, fee_rule: FeeRuleT,
target_height: BlockHeight, min_target_height: BlockHeight,
min_anchor_height: BlockHeight,
is_shielding: bool, is_shielding: bool,
} }
@ -97,8 +105,19 @@ impl<FeeRuleT, NoteRef> Proposal<FeeRuleT, NoteRef> {
&self.fee_rule &self.fee_rule
} }
/// Returns the target height for which the proposal was prepared. /// Returns the target height for which the proposal was prepared.
pub fn target_height(&self) -> BlockHeight { ///
self.target_height /// The chain must contain at least this many blocks in order for the proposal to
/// be executed.
pub fn min_target_height(&self) -> BlockHeight {
self.min_target_height
}
/// Returns the anchor height used in preparing the proposal.
///
/// If, at the time that the proposal is executed, the anchor height required to satisfy
/// the minimum confirmation depth is less than this height, the proposal execution
/// API should return an error.
pub fn min_anchor_height(&self) -> BlockHeight {
self.min_anchor_height
} }
/// Returns a flag indicating whether or not the proposed transaction /// Returns a flag indicating whether or not the proposed transaction
/// is exclusively wallet-internal (if it does not involve any external /// is exclusively wallet-internal (if it does not involve any external
@ -108,6 +127,21 @@ impl<FeeRuleT, NoteRef> Proposal<FeeRuleT, NoteRef> {
} }
} }
impl<FeeRuleT, NoteRef> Debug for Proposal<FeeRuleT, NoteRef> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Proposal")
.field("transaction_request", &self.transaction_request)
.field("transparent_inputs", &self.transparent_inputs)
.field("sapling_inputs", &self.sapling_inputs.len())
.field("balance", &self.balance)
//.field("fee_rule", &self.fee_rule)
.field("min_target_height", &self.min_target_height)
.field("min_anchor_height", &self.min_anchor_height)
.field("is_shielding", &self.is_shielding)
.finish_non_exhaustive()
}
}
/// A strategy for selecting transaction inputs and proposing transaction outputs. /// A strategy for selecting transaction inputs and proposing transaction outputs.
/// ///
/// Proposals should include only economically useful inputs, as determined by `Self::FeeRule`; /// Proposals should include only economically useful inputs, as determined by `Self::FeeRule`;
@ -146,9 +180,8 @@ pub trait InputSelector {
params: &ParamsT, params: &ParamsT,
wallet_db: &Self::DataSource, wallet_db: &Self::DataSource,
account: AccountId, account: AccountId,
anchor_height: BlockHeight,
target_height: BlockHeight,
transaction_request: TransactionRequest, transaction_request: TransactionRequest,
min_confirmations: NonZeroU32,
) -> Result< ) -> Result<
Proposal<Self::FeeRule, <<Self as InputSelector>::DataSource as WalletRead>::NoteRef>, Proposal<Self::FeeRule, <<Self as InputSelector>::DataSource as WalletRead>::NoteRef>,
InputSelectorError<<<Self as InputSelector>::DataSource as WalletRead>::Error, Self::Error>, InputSelectorError<<<Self as InputSelector>::DataSource as WalletRead>::Error, Self::Error>,
@ -172,8 +205,7 @@ pub trait InputSelector {
wallet_db: &Self::DataSource, wallet_db: &Self::DataSource,
shielding_threshold: NonNegativeAmount, shielding_threshold: NonNegativeAmount,
source_addrs: &[TransparentAddress], source_addrs: &[TransparentAddress],
confirmed_height: BlockHeight, min_confirmations: NonZeroU32,
target_height: BlockHeight,
) -> Result< ) -> Result<
Proposal<Self::FeeRule, <<Self as InputSelector>::DataSource as WalletRead>::NoteRef>, Proposal<Self::FeeRule, <<Self as InputSelector>::DataSource as WalletRead>::NoteRef>,
InputSelectorError<<<Self as InputSelector>::DataSource as WalletRead>::Error, Self::Error>, InputSelectorError<<<Self as InputSelector>::DataSource as WalletRead>::Error, Self::Error>,
@ -292,13 +324,18 @@ where
params: &ParamsT, params: &ParamsT,
wallet_db: &Self::DataSource, wallet_db: &Self::DataSource,
account: AccountId, account: AccountId,
anchor_height: BlockHeight,
target_height: BlockHeight,
transaction_request: TransactionRequest, transaction_request: TransactionRequest,
min_confirmations: NonZeroU32,
) -> Result<Proposal<Self::FeeRule, DbT::NoteRef>, InputSelectorError<DbT::Error, Self::Error>> ) -> Result<Proposal<Self::FeeRule, DbT::NoteRef>, InputSelectorError<DbT::Error, Self::Error>>
where where
ParamsT: consensus::Parameters, ParamsT: consensus::Parameters,
{ {
// Target the next block, assuming we are up-to-date.
let (target_height, anchor_height) = wallet_db
.get_target_and_anchor_heights(min_confirmations)
.map_err(InputSelectorError::DataSource)
.and_then(|x| x.ok_or(InputSelectorError::SyncRequired))?;
let mut transparent_outputs = vec![]; let mut transparent_outputs = vec![];
let mut sapling_outputs = vec![]; let mut sapling_outputs = vec![];
let mut output_total = Amount::zero(); let mut output_total = Amount::zero();
@ -362,7 +399,8 @@ where
sapling_inputs, sapling_inputs,
balance, balance,
fee_rule: (*self.change_strategy.fee_rule()).clone(), fee_rule: (*self.change_strategy.fee_rule()).clone(),
target_height, min_target_height: target_height,
min_anchor_height: anchor_height,
is_shielding: false, is_shielding: false,
}); });
} }
@ -405,15 +443,19 @@ where
wallet_db: &Self::DataSource, wallet_db: &Self::DataSource,
shielding_threshold: NonNegativeAmount, shielding_threshold: NonNegativeAmount,
source_addrs: &[TransparentAddress], source_addrs: &[TransparentAddress],
confirmed_height: BlockHeight, min_confirmations: NonZeroU32,
target_height: BlockHeight,
) -> Result<Proposal<Self::FeeRule, DbT::NoteRef>, InputSelectorError<DbT::Error, Self::Error>> ) -> Result<Proposal<Self::FeeRule, DbT::NoteRef>, InputSelectorError<DbT::Error, Self::Error>>
where where
ParamsT: consensus::Parameters, ParamsT: consensus::Parameters,
{ {
let (target_height, latest_anchor) = wallet_db
.get_target_and_anchor_heights(min_confirmations)
.map_err(InputSelectorError::DataSource)
.and_then(|x| x.ok_or(InputSelectorError::SyncRequired))?;
let mut transparent_inputs: Vec<WalletTransparentOutput> = source_addrs let mut transparent_inputs: Vec<WalletTransparentOutput> = source_addrs
.iter() .iter()
.map(|taddr| wallet_db.get_unspent_transparent_outputs(taddr, confirmed_height, &[])) .map(|taddr| wallet_db.get_unspent_transparent_outputs(taddr, latest_anchor, &[]))
.collect::<Result<Vec<Vec<_>>, _>>() .collect::<Result<Vec<Vec<_>>, _>>()
.map_err(InputSelectorError::DataSource)? .map_err(InputSelectorError::DataSource)?
.into_iter() .into_iter()
@ -458,7 +500,8 @@ where
sapling_inputs: vec![], sapling_inputs: vec![],
balance, balance,
fee_rule: (*self.change_strategy.fee_rule()).clone(), fee_rule: (*self.change_strategy.fee_rule()).clone(),
target_height, min_target_height: target_height,
min_anchor_height: latest_anchor,
is_shielding: true, is_shielding: true,
}) })
} else { } else {

View File

@ -16,8 +16,8 @@ pub mod fees;
pub mod keys; pub mod keys;
pub mod proto; pub mod proto;
pub mod scan; pub mod scan;
pub mod scanning;
pub mod wallet; pub mod wallet;
pub mod welding_rig;
pub mod zip321; pub mod zip321;
pub use decrypt::{decrypt_transaction, DecryptedOutput, TransferType}; pub use decrypt::{decrypt_transaction, DecryptedOutput, TransferType};

View File

@ -1,3 +1,14 @@
/// ChainMetadata represents information about the state of the chain as of a given block.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ChainMetadata {
/// the size of the Sapling note commitment tree as of the end of this block
#[prost(uint32, tag = "1")]
pub sapling_commitment_tree_size: u32,
/// the size of the Orchard note commitment tree as of the end of this block
#[prost(uint32, tag = "2")]
pub orchard_commitment_tree_size: u32,
}
/// CompactBlock is a packaging of ONLY the data from a block that's needed to: /// CompactBlock is a packaging of ONLY the data from a block that's needed to:
/// 1. Detect a payment to your shielded Sapling address /// 1. Detect a payment to your shielded Sapling address
/// 2. Detect a spend of your shielded Sapling notes /// 2. Detect a spend of your shielded Sapling notes
@ -26,6 +37,9 @@ pub struct CompactBlock {
/// zero or more compact transactions from this block /// zero or more compact transactions from this block
#[prost(message, repeated, tag = "7")] #[prost(message, repeated, tag = "7")]
pub vtx: ::prost::alloc::vec::Vec<CompactTx>, pub vtx: ::prost::alloc::vec::Vec<CompactTx>,
/// information about the state of the chain as of this block
#[prost(message, optional, tag = "8")]
pub chain_metadata: ::core::option::Option<ChainMetadata>,
} }
/// CompactTx contains the minimum information for a wallet to know if this transaction /// CompactTx contains the minimum information for a wallet to know if this transaction
/// is relevant to it (either pays to it or spends from it) via shielded elements /// is relevant to it (either pays to it or spends from it) via shielded elements

View File

@ -1,21 +1,27 @@
//! Tools for scanning a compact representation of the Zcash block chain. //! Tools for scanning a compact representation of the Zcash block chain.
//!
//! TODO: rename this module to `block_scanner`
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use std::convert::TryFrom; use std::convert::TryFrom;
use std::fmt::{self, Debug};
use incrementalmerkletree::{Position, Retention};
use subtle::{ConditionallySelectable, ConstantTimeEq, CtOption}; use subtle::{ConditionallySelectable, ConstantTimeEq, CtOption};
use zcash_note_encryption::batch; use zcash_note_encryption::batch;
use zcash_primitives::consensus::BlockHeight;
use zcash_primitives::{ use zcash_primitives::{
consensus, consensus,
sapling::{ sapling::{
self, self,
note_encryption::{PreparedIncomingViewingKey, SaplingDomain}, note_encryption::{PreparedIncomingViewingKey, SaplingDomain},
Node, Note, Nullifier, NullifierDerivingKey, SaplingIvk, SaplingIvk,
}, },
transaction::components::sapling::CompactOutputDescription, transaction::components::sapling::CompactOutputDescription,
zip32::{sapling::DiversifiableFullViewingKey, AccountId, Scope}, zip32::{sapling::DiversifiableFullViewingKey, AccountId, Scope},
}; };
use crate::data_api::{BlockMetadata, ScannedBlock};
use crate::{ use crate::{
proto::compact_formats::CompactBlock, proto::compact_formats::CompactBlock,
scan::{Batch, BatchRunner, Tasks}, scan::{Batch, BatchRunner, Tasks},
@ -34,7 +40,7 @@ use crate::{
/// nullifier for the note can also be obtained. /// nullifier for the note can also be obtained.
/// ///
/// [`CompactSaplingOutput`]: crate::proto::compact_formats::CompactSaplingOutput /// [`CompactSaplingOutput`]: crate::proto::compact_formats::CompactSaplingOutput
/// [`scan_block`]: crate::welding_rig::scan_block /// [`scan_block`]: crate::scanning::scan_block
pub trait ScanningKey { pub trait ScanningKey {
/// The type representing the scope of the scanning key. /// The type representing the scope of the scanning key.
type Scope: Clone + Eq + std::hash::Hash + Send + 'static; type Scope: Clone + Eq + std::hash::Hash + Send + 'static;
@ -56,16 +62,13 @@ pub trait ScanningKey {
/// IVK-based implementations of this trait cannot successfully derive /// IVK-based implementations of this trait cannot successfully derive
/// nullifiers, in which case `Self::Nf` should be set to the unit type /// nullifiers, in which case `Self::Nf` should be set to the unit type
/// and this function is a no-op. /// and this function is a no-op.
fn sapling_nf( fn sapling_nf(key: &Self::SaplingNk, note: &sapling::Note, note_position: Position)
key: &Self::SaplingNk, -> Self::Nf;
note: &Note,
witness: &sapling::IncrementalWitness,
) -> Self::Nf;
} }
impl ScanningKey for DiversifiableFullViewingKey { impl ScanningKey for DiversifiableFullViewingKey {
type Scope = Scope; type Scope = Scope;
type SaplingNk = NullifierDerivingKey; type SaplingNk = sapling::NullifierDerivingKey;
type SaplingKeys = [(Self::Scope, SaplingIvk, Self::SaplingNk); 2]; type SaplingKeys = [(Self::Scope, SaplingIvk, Self::SaplingNk); 2];
type Nf = sapling::Nullifier; type Nf = sapling::Nullifier;
@ -84,16 +87,8 @@ impl ScanningKey for DiversifiableFullViewingKey {
] ]
} }
fn sapling_nf( fn sapling_nf(key: &Self::SaplingNk, note: &sapling::Note, position: Position) -> Self::Nf {
key: &Self::SaplingNk, note.nf(key, position.into())
note: &Note,
witness: &sapling::IncrementalWitness,
) -> Self::Nf {
note.nf(
key,
u64::try_from(witness.position())
.expect("Sapling note commitment tree position must fit into a u64"),
)
} }
} }
@ -111,7 +106,45 @@ impl ScanningKey for SaplingIvk {
[((), self.clone(), ())] [((), self.clone(), ())]
} }
fn sapling_nf(_key: &Self::SaplingNk, _note: &Note, _witness: &sapling::IncrementalWitness) {} fn sapling_nf(_key: &Self::SaplingNk, _note: &sapling::Note, _position: Position) {}
}
/// Errors that may occur in chain scanning
#[derive(Copy, Clone, Debug)]
pub enum ScanError {
/// The hash of the parent block given by a proposed new chain tip does not match the hash of
/// the current chain tip.
PrevHashMismatch { at_height: BlockHeight },
/// The block height field of the proposed new chain tip is not equal to the height of the
/// previous chain tip + 1. This variant stores a copy of the incorrect height value for
/// reporting purposes.
BlockHeightDiscontinuity {
previous_tip: BlockHeight,
new_height: BlockHeight,
},
/// The size of the Sapling note commitment tree was not provided as part of a [`CompactBlock`]
/// being scanned, making it impossible to construct the nullifier for a detected note.
SaplingTreeSizeUnknown { at_height: BlockHeight },
}
impl fmt::Display for ScanError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match &self {
ScanError::PrevHashMismatch { at_height } => write!(
f,
"The parent hash of proposed block does not correspond to the block hash at height {}.",
at_height
),
ScanError::BlockHeightDiscontinuity { previous_tip, new_height } => {
write!(f, "Block height discontinuity at height {}; next height is : {}", previous_tip, new_height)
}
ScanError::SaplingTreeSizeUnknown { at_height } => {
write!(f, "Unable to determine Sapling note commitment tree size at height {}", at_height)
}
}
}
} }
/// Scans a [`CompactBlock`] with a set of [`ScanningKey`]s. /// Scans a [`CompactBlock`] with a set of [`ScanningKey`]s.
@ -132,7 +165,7 @@ impl ScanningKey for SaplingIvk {
/// [`ExtendedFullViewingKey`]: zcash_primitives::zip32::ExtendedFullViewingKey /// [`ExtendedFullViewingKey`]: zcash_primitives::zip32::ExtendedFullViewingKey
/// [`SaplingIvk`]: zcash_primitives::sapling::SaplingIvk /// [`SaplingIvk`]: zcash_primitives::sapling::SaplingIvk
/// [`CompactBlock`]: crate::proto::compact_formats::CompactBlock /// [`CompactBlock`]: crate::proto::compact_formats::CompactBlock
/// [`ScanningKey`]: crate::welding_rig::ScanningKey /// [`ScanningKey`]: crate::scanning::ScanningKey
/// [`CommitmentTree`]: zcash_primitives::sapling::CommitmentTree /// [`CommitmentTree`]: zcash_primitives::sapling::CommitmentTree
/// [`IncrementalWitness`]: zcash_primitives::sapling::IncrementalWitness /// [`IncrementalWitness`]: zcash_primitives::sapling::IncrementalWitness
/// [`WalletSaplingOutput`]: crate::wallet::WalletSaplingOutput /// [`WalletSaplingOutput`]: crate::wallet::WalletSaplingOutput
@ -141,17 +174,15 @@ pub fn scan_block<P: consensus::Parameters + Send + 'static, K: ScanningKey>(
params: &P, params: &P,
block: CompactBlock, block: CompactBlock,
vks: &[(&AccountId, &K)], vks: &[(&AccountId, &K)],
nullifiers: &[(AccountId, Nullifier)], sapling_nullifiers: &[(AccountId, sapling::Nullifier)],
tree: &mut sapling::CommitmentTree, prior_block_metadata: Option<&BlockMetadata>,
existing_witnesses: &mut [&mut sapling::IncrementalWitness], ) -> Result<ScannedBlock<K::Nf>, ScanError> {
) -> Vec<WalletTx<K::Nf>> {
scan_block_with_runner::<_, _, ()>( scan_block_with_runner::<_, _, ()>(
params, params,
block, block,
vks, vks,
nullifiers, sapling_nullifiers,
tree, prior_block_metadata,
existing_witnesses,
None, None,
) )
} }
@ -202,21 +233,66 @@ pub(crate) fn scan_block_with_runner<
params: &P, params: &P,
block: CompactBlock, block: CompactBlock,
vks: &[(&AccountId, &K)], vks: &[(&AccountId, &K)],
nullifiers: &[(AccountId, Nullifier)], nullifiers: &[(AccountId, sapling::Nullifier)],
tree: &mut sapling::CommitmentTree, prior_block_metadata: Option<&BlockMetadata>,
existing_witnesses: &mut [&mut sapling::IncrementalWitness],
mut batch_runner: Option<&mut TaggedBatchRunner<P, K::Scope, T>>, mut batch_runner: Option<&mut TaggedBatchRunner<P, K::Scope, T>>,
) -> Vec<WalletTx<K::Nf>> { ) -> Result<ScannedBlock<K::Nf>, ScanError> {
let mut wtxs: Vec<WalletTx<K::Nf>> = vec![]; let mut wtxs: Vec<WalletTx<K::Nf>> = vec![];
let block_height = block.height(); let mut sapling_note_commitments: Vec<(sapling::Node, Retention<BlockHeight>)> = vec![];
let block_hash = block.hash(); let cur_height = block.height();
let cur_hash = block.hash();
for tx in block.vtx.into_iter() { if let Some(prev) = prior_block_metadata {
if cur_height != prev.block_height() + 1 {
return Err(ScanError::BlockHeightDiscontinuity {
previous_tip: prev.block_height(),
new_height: cur_height,
});
}
if block.prev_hash() != prev.block_hash() {
return Err(ScanError::PrevHashMismatch {
at_height: cur_height,
});
}
}
// It's possible to make progress without a Sapling tree position if we don't have any Sapling
// notes in the block, since we only use the position for constructing nullifiers for our own
// received notes. Thus, we allow it to be optional here, and only produce an error if we try
// to use it. `block.sapling_commitment_tree_size` is expected to be correct as of the end of
// the block, and we can't have a note of ours in a block with no outputs so treating the zero
// default value from the protobuf as `None` is always correct.
let mut sapling_commitment_tree_size = block
.chain_metadata
.as_ref()
.and_then(|m| {
if m.sapling_commitment_tree_size == 0 {
None
} else {
let block_note_count: u32 = block
.vtx
.iter()
.map(|tx| {
u32::try_from(tx.outputs.len()).expect("output count cannot exceed a u32")
})
.sum();
Some(m.sapling_commitment_tree_size - block_note_count)
}
})
.or_else(|| prior_block_metadata.map(|m| m.sapling_tree_size()))
.ok_or(ScanError::SaplingTreeSizeUnknown {
at_height: cur_height,
})?;
let compact_block_tx_count = block.vtx.len();
for (tx_idx, tx) in block.vtx.into_iter().enumerate() {
let txid = tx.txid(); let txid = tx.txid();
let index = tx.index as usize;
// Check for spent notes // Check for spent notes. The only step that is not constant-time is
// The only step that is not constant-time is the filter() at the end. // the filter() at the end.
// TODO: However, this is O(|nullifiers| * |notes|); does using
// constant-time operations here really make sense?
let shielded_spends: Vec<_> = tx let shielded_spends: Vec<_> = tx
.spends .spends
.into_iter() .into_iter()
@ -248,25 +324,14 @@ pub(crate) fn scan_block_with_runner<
// Check for incoming notes while incrementing tree and witnesses // Check for incoming notes while incrementing tree and witnesses
let mut shielded_outputs: Vec<WalletSaplingOutput<K::Nf>> = vec![]; let mut shielded_outputs: Vec<WalletSaplingOutput<K::Nf>> = vec![];
let tx_outputs_len = u32::try_from(tx.outputs.len()).unwrap();
{ {
// Grab mutable references to new witnesses from previous transactions
// in this block so that we can update them. Scoped so we don't hold
// mutable references to wtxs for too long.
let mut block_witnesses: Vec<_> = wtxs
.iter_mut()
.flat_map(|tx| {
tx.sapling_outputs
.iter_mut()
.map(|output| output.witness_mut())
})
.collect();
let decoded = &tx let decoded = &tx
.outputs .outputs
.into_iter() .into_iter()
.map(|output| { .map(|output| {
( (
SaplingDomain::for_height(params.clone(), block_height), SaplingDomain::for_height(params.clone(), cur_height),
CompactOutputDescription::try_from(output) CompactOutputDescription::try_from(output)
.expect("Invalid output found in compact block decoding."), .expect("Invalid output found in compact block decoding."),
) )
@ -283,7 +348,7 @@ pub(crate) fn scan_block_with_runner<
}) })
.collect::<HashMap<_, _>>(); .collect::<HashMap<_, _>>();
let mut decrypted = runner.collect_results(block_hash, txid); let mut decrypted = runner.collect_results(cur_hash, txid);
(0..decoded.len()) (0..decoded.len())
.map(|i| { .map(|i| {
decrypted.remove(&(txid, i)).map(|d_note| { decrypted.remove(&(txid, i)).map(|d_note| {
@ -292,7 +357,7 @@ pub(crate) fn scan_block_with_runner<
"The batch runner and scan_block must use the same set of IVKs.", "The batch runner and scan_block must use the same set of IVKs.",
); );
((d_note.note, d_note.recipient), a, (*nk).clone()) (d_note.note, a, (*nk).clone())
}) })
}) })
.collect() .collect()
@ -312,40 +377,33 @@ pub(crate) fn scan_block_with_runner<
.map(PreparedIncomingViewingKey::new) .map(PreparedIncomingViewingKey::new)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
batch::try_compact_note_decryption(&ivks, decoded) batch::try_compact_note_decryption(&ivks, &decoded[..])
.into_iter() .into_iter()
.map(|v| { .map(|v| {
v.map(|(note_data, ivk_idx)| { v.map(|((note, _), ivk_idx)| {
let (account, _, nk) = &vks[ivk_idx]; let (account, _, nk) = &vks[ivk_idx];
(note_data, *account, (*nk).clone()) (note, *account, (*nk).clone())
}) })
}) })
.collect() .collect()
}; };
for (index, ((_, output), dec_output)) in decoded.iter().zip(decrypted).enumerate() { for (output_idx, ((_, output), dec_output)) in decoded.iter().zip(decrypted).enumerate()
// Grab mutable references to new witnesses from previous outputs {
// in this transaction so that we can update them. Scoped so we // Collect block note commitments
// don't hold mutable references to shielded_outputs for too long. let node = sapling::Node::from_cmu(&output.cmu);
let new_witnesses: Vec<_> = shielded_outputs let is_checkpoint =
.iter_mut() output_idx + 1 == decoded.len() && tx_idx + 1 == compact_block_tx_count;
.map(|out| out.witness_mut()) let retention = match (dec_output.is_some(), is_checkpoint) {
.collect(); (is_marked, true) => Retention::Checkpoint {
id: cur_height,
is_marked,
},
(true, false) => Retention::Marked,
(false, false) => Retention::Ephemeral,
};
// Increment tree and witnesses if let Some((note, account, nk)) = dec_output {
let node = Node::from_cmu(&output.cmu);
for witness in &mut *existing_witnesses {
witness.append(node).unwrap();
}
for witness in &mut block_witnesses {
witness.append(node).unwrap();
}
for witness in new_witnesses {
witness.append(node).unwrap();
}
tree.append(node).unwrap();
if let Some(((note, _), account, nk)) = dec_output {
// A note is marked as "change" if the account that received it // A note is marked as "change" if the account that received it
// also spent notes in the same transaction. This will catch, // also spent notes in the same transaction. This will catch,
// for instance: // for instance:
@ -353,34 +411,45 @@ pub(crate) fn scan_block_with_runner<
// - Notes created by consolidation transactions. // - Notes created by consolidation transactions.
// - Notes sent from one account to itself. // - Notes sent from one account to itself.
let is_change = spent_from_accounts.contains(&account); let is_change = spent_from_accounts.contains(&account);
let witness = sapling::IncrementalWitness::from_tree(tree.clone()); let note_commitment_tree_position = Position::from(u64::from(
let nf = K::sapling_nf(&nk, &note, &witness); sapling_commitment_tree_size + u32::try_from(output_idx).unwrap(),
));
let nf = K::sapling_nf(&nk, &note, note_commitment_tree_position);
shielded_outputs.push(WalletSaplingOutput::from_parts( shielded_outputs.push(WalletSaplingOutput::from_parts(
index, output_idx,
output.cmu, output.cmu,
output.ephemeral_key.clone(), output.ephemeral_key.clone(),
account, account,
note, note,
is_change, is_change,
witness, note_commitment_tree_position,
nf, nf,
)) ));
} }
sapling_note_commitments.push((node, retention));
} }
} }
if !(shielded_spends.is_empty() && shielded_outputs.is_empty()) { if !(shielded_spends.is_empty() && shielded_outputs.is_empty()) {
wtxs.push(WalletTx { wtxs.push(WalletTx {
txid, txid,
index, index: tx.index as usize,
sapling_spends: shielded_spends, sapling_spends: shielded_spends,
sapling_outputs: shielded_outputs, sapling_outputs: shielded_outputs,
}); });
} }
sapling_commitment_tree_size += tx_outputs_len;
} }
wtxs Ok(ScannedBlock::from_parts(
BlockMetadata::from_parts(cur_height, cur_hash, sapling_commitment_tree_size),
block.time,
wtxs,
sapling_note_commitments,
))
} }
#[cfg(test)] #[cfg(test)]
@ -389,25 +458,29 @@ mod tests {
ff::{Field, PrimeField}, ff::{Field, PrimeField},
GroupEncoding, GroupEncoding,
}; };
use incrementalmerkletree::{Position, Retention};
use rand_core::{OsRng, RngCore}; use rand_core::{OsRng, RngCore};
use zcash_note_encryption::Domain; use zcash_note_encryption::Domain;
use zcash_primitives::{ use zcash_primitives::{
block::BlockHash,
consensus::{BlockHeight, Network}, consensus::{BlockHeight, Network},
constants::SPENDING_KEY_GENERATOR, constants::SPENDING_KEY_GENERATOR,
memo::MemoBytes, memo::MemoBytes,
sapling::{ sapling::{
self,
note_encryption::{sapling_note_encryption, PreparedIncomingViewingKey, SaplingDomain}, note_encryption::{sapling_note_encryption, PreparedIncomingViewingKey, SaplingDomain},
util::generate_random_rseed, util::generate_random_rseed,
value::NoteValue, value::NoteValue,
CommitmentTree, Note, Nullifier, SaplingIvk, Nullifier, SaplingIvk,
}, },
transaction::components::Amount, transaction::components::Amount,
zip32::{AccountId, DiversifiableFullViewingKey, ExtendedSpendingKey}, zip32::{AccountId, DiversifiableFullViewingKey, ExtendedSpendingKey},
}; };
use crate::{ use crate::{
data_api::BlockMetadata,
proto::compact_formats::{ proto::compact_formats::{
CompactBlock, CompactSaplingOutput, CompactSaplingSpend, CompactTx, self as compact, CompactBlock, CompactSaplingOutput, CompactSaplingSpend, CompactTx,
}, },
scan::BatchRunner, scan::BatchRunner,
}; };
@ -449,19 +522,24 @@ mod tests {
/// Create a fake CompactBlock at the given height, with a transaction containing a /// Create a fake CompactBlock at the given height, with a transaction containing a
/// single spend of the given nullifier and a single output paying the given address. /// single spend of the given nullifier and a single output paying the given address.
/// Returns the CompactBlock. /// Returns the CompactBlock.
///
/// Set `initial_sapling_tree_size` to `None` to simulate a `CompactBlock` retrieved
/// from a `lightwalletd` that is not currently tracking note commitment tree sizes.
fn fake_compact_block( fn fake_compact_block(
height: BlockHeight, height: BlockHeight,
prev_hash: BlockHash,
nf: Nullifier, nf: Nullifier,
dfvk: &DiversifiableFullViewingKey, dfvk: &DiversifiableFullViewingKey,
value: Amount, value: Amount,
tx_after: bool, tx_after: bool,
initial_sapling_tree_size: Option<u32>,
) -> CompactBlock { ) -> CompactBlock {
let to = dfvk.default_address().1; let to = dfvk.default_address().1;
// Create a fake Note for the account // Create a fake Note for the account
let mut rng = OsRng; let mut rng = OsRng;
let rseed = generate_random_rseed(&Network::TestNetwork, height, &mut rng); let rseed = generate_random_rseed(&Network::TestNetwork, height, &mut rng);
let note = Note::from_parts(to, NoteValue::from_raw(value.into()), rseed); let note = sapling::Note::from_parts(to, NoteValue::from_raw(value.into()), rseed);
let encryptor = sapling_note_encryption::<_, Network>( let encryptor = sapling_note_encryption::<_, Network>(
Some(dfvk.fvk().ovk), Some(dfvk.fvk().ovk),
note.clone(), note.clone(),
@ -481,6 +559,7 @@ mod tests {
rng.fill_bytes(&mut hash); rng.fill_bytes(&mut hash);
hash hash
}, },
prev_hash: prev_hash.0.to_vec(),
height: height.into(), height: height.into(),
..Default::default() ..Default::default()
}; };
@ -514,6 +593,15 @@ mod tests {
cb.vtx.push(tx); cb.vtx.push(tx);
} }
cb.chain_metadata = initial_sapling_tree_size.map(|s| compact::ChainMetadata {
sapling_commitment_tree_size: s + cb
.vtx
.iter()
.map(|tx| tx.outputs.len() as u32)
.sum::<u32>(),
..Default::default()
});
cb cb
} }
@ -526,14 +614,15 @@ mod tests {
let cb = fake_compact_block( let cb = fake_compact_block(
1u32.into(), 1u32.into(),
BlockHash([0; 32]),
Nullifier([0; 32]), Nullifier([0; 32]),
&dfvk, &dfvk,
Amount::from_u64(5).unwrap(), Amount::from_u64(5).unwrap(),
false, false,
None,
); );
assert_eq!(cb.vtx.len(), 2); assert_eq!(cb.vtx.len(), 2);
let mut tree = CommitmentTree::empty();
let mut batch_runner = if scan_multithreaded { let mut batch_runner = if scan_multithreaded {
let mut runner = BatchRunner::<_, _, _, ()>::new( let mut runner = BatchRunner::<_, _, _, ()>::new(
10, 10,
@ -551,15 +640,20 @@ mod tests {
None None
}; };
let txs = scan_block_with_runner( let scanned_block = scan_block_with_runner(
&Network::TestNetwork, &Network::TestNetwork,
cb, cb,
&[(&account, &dfvk)], &[(&account, &dfvk)],
&[], &[],
&mut tree, Some(&BlockMetadata::from_parts(
&mut [], BlockHeight::from(0),
BlockHash([0u8; 32]),
0,
)),
batch_runner.as_mut(), batch_runner.as_mut(),
); )
.unwrap();
let txs = scanned_block.transactions();
assert_eq!(txs.len(), 1); assert_eq!(txs.len(), 1);
let tx = &txs[0]; let tx = &txs[0];
@ -569,9 +663,26 @@ mod tests {
assert_eq!(tx.sapling_outputs[0].index(), 0); assert_eq!(tx.sapling_outputs[0].index(), 0);
assert_eq!(tx.sapling_outputs[0].account(), account); assert_eq!(tx.sapling_outputs[0].account(), account);
assert_eq!(tx.sapling_outputs[0].note().value().inner(), 5); assert_eq!(tx.sapling_outputs[0].note().value().inner(), 5);
assert_eq!(
tx.sapling_outputs[0].note_commitment_tree_position(),
Position::from(1)
);
// Check that the witness root matches assert_eq!(scanned_block.metadata().sapling_tree_size(), 2);
assert_eq!(tx.sapling_outputs[0].witness().root(), tree.root()); assert_eq!(
scanned_block
.sapling_commitments()
.iter()
.map(|(_, retention)| *retention)
.collect::<Vec<_>>(),
vec![
Retention::Ephemeral,
Retention::Checkpoint {
id: scanned_block.height(),
is_marked: true
}
]
);
} }
go(false); go(false);
@ -587,14 +698,15 @@ mod tests {
let cb = fake_compact_block( let cb = fake_compact_block(
1u32.into(), 1u32.into(),
BlockHash([0; 32]),
Nullifier([0; 32]), Nullifier([0; 32]),
&dfvk, &dfvk,
Amount::from_u64(5).unwrap(), Amount::from_u64(5).unwrap(),
true, true,
Some(0),
); );
assert_eq!(cb.vtx.len(), 3); assert_eq!(cb.vtx.len(), 3);
let mut tree = CommitmentTree::empty();
let mut batch_runner = if scan_multithreaded { let mut batch_runner = if scan_multithreaded {
let mut runner = BatchRunner::<_, _, _, ()>::new( let mut runner = BatchRunner::<_, _, _, ()>::new(
10, 10,
@ -612,15 +724,16 @@ mod tests {
None None
}; };
let txs = scan_block_with_runner( let scanned_block = scan_block_with_runner(
&Network::TestNetwork, &Network::TestNetwork,
cb, cb,
&[(&AccountId::from(0), &dfvk)], &[(&AccountId::from(0), &dfvk)],
&[], &[],
&mut tree, None,
&mut [],
batch_runner.as_mut(), batch_runner.as_mut(),
); )
.unwrap();
let txs = scanned_block.transactions();
assert_eq!(txs.len(), 1); assert_eq!(txs.len(), 1);
let tx = &txs[0]; let tx = &txs[0];
@ -631,8 +744,21 @@ mod tests {
assert_eq!(tx.sapling_outputs[0].account(), AccountId::from(0)); assert_eq!(tx.sapling_outputs[0].account(), AccountId::from(0));
assert_eq!(tx.sapling_outputs[0].note().value().inner(), 5); assert_eq!(tx.sapling_outputs[0].note().value().inner(), 5);
// Check that the witness root matches assert_eq!(
assert_eq!(tx.sapling_outputs[0].witness().root(), tree.root()); scanned_block
.sapling_commitments()
.iter()
.map(|(_, retention)| *retention)
.collect::<Vec<_>>(),
vec![
Retention::Ephemeral,
Retention::Marked,
Retention::Checkpoint {
id: scanned_block.height(),
is_marked: false
}
]
);
} }
go(false); go(false);
@ -646,19 +772,21 @@ mod tests {
let nf = Nullifier([7; 32]); let nf = Nullifier([7; 32]);
let account = AccountId::from(12); let account = AccountId::from(12);
let cb = fake_compact_block(1u32.into(), nf, &dfvk, Amount::from_u64(5).unwrap(), false); let cb = fake_compact_block(
1u32.into(),
BlockHash([0; 32]),
nf,
&dfvk,
Amount::from_u64(5).unwrap(),
false,
Some(0),
);
assert_eq!(cb.vtx.len(), 2); assert_eq!(cb.vtx.len(), 2);
let vks: Vec<(&AccountId, &SaplingIvk)> = vec![]; let vks: Vec<(&AccountId, &SaplingIvk)> = vec![];
let mut tree = CommitmentTree::empty(); let scanned_block =
let txs = scan_block( scan_block(&Network::TestNetwork, cb, &vks[..], &[(account, nf)], None).unwrap();
&Network::TestNetwork, let txs = scanned_block.transactions();
cb,
&vks[..],
&[(account, nf)],
&mut tree,
&mut [],
);
assert_eq!(txs.len(), 1); assert_eq!(txs.len(), 1);
let tx = &txs[0]; let tx = &txs[0];
@ -668,5 +796,20 @@ mod tests {
assert_eq!(tx.sapling_spends[0].index(), 0); assert_eq!(tx.sapling_spends[0].index(), 0);
assert_eq!(tx.sapling_spends[0].nf(), &nf); assert_eq!(tx.sapling_spends[0].nf(), &nf);
assert_eq!(tx.sapling_spends[0].account(), account); assert_eq!(tx.sapling_spends[0].account(), account);
assert_eq!(
scanned_block
.sapling_commitments()
.iter()
.map(|(_, retention)| *retention)
.collect::<Vec<_>>(),
vec![
Retention::Ephemeral,
Retention::Checkpoint {
id: scanned_block.height(),
is_marked: false
}
]
);
} }
} }

View File

@ -1,6 +1,7 @@
//! Structs representing transaction data scanned from the block chain by a wallet or //! Structs representing transaction data scanned from the block chain by a wallet or
//! light client. //! light client.
use incrementalmerkletree::Position;
use zcash_note_encryption::EphemeralKeyBytes; use zcash_note_encryption::EphemeralKeyBytes;
use zcash_primitives::{ use zcash_primitives::{
consensus::BlockHeight, consensus::BlockHeight,
@ -117,7 +118,7 @@ pub struct WalletSaplingOutput<N> {
account: AccountId, account: AccountId,
note: sapling::Note, note: sapling::Note,
is_change: bool, is_change: bool,
witness: sapling::IncrementalWitness, note_commitment_tree_position: Position,
nf: N, nf: N,
} }
@ -131,7 +132,7 @@ impl<N> WalletSaplingOutput<N> {
account: AccountId, account: AccountId,
note: sapling::Note, note: sapling::Note,
is_change: bool, is_change: bool,
witness: sapling::IncrementalWitness, note_commitment_tree_position: Position,
nf: N, nf: N,
) -> Self { ) -> Self {
Self { Self {
@ -141,7 +142,7 @@ impl<N> WalletSaplingOutput<N> {
account, account,
note, note,
is_change, is_change,
witness, note_commitment_tree_position,
nf, nf,
} }
} }
@ -164,11 +165,8 @@ impl<N> WalletSaplingOutput<N> {
pub fn is_change(&self) -> bool { pub fn is_change(&self) -> bool {
self.is_change self.is_change
} }
pub fn witness(&self) -> &sapling::IncrementalWitness { pub fn note_commitment_tree_position(&self) -> Position {
&self.witness self.note_commitment_tree_position
}
pub fn witness_mut(&mut self) -> &mut sapling::IncrementalWitness {
&mut self.witness
} }
pub fn nf(&self) -> &N { pub fn nf(&self) -> &N {
&self.nf &self.nf
@ -177,12 +175,13 @@ impl<N> WalletSaplingOutput<N> {
/// Information about a note that is tracked by the wallet that is available for spending, /// Information about a note that is tracked by the wallet that is available for spending,
/// with sufficient information for use in note selection. /// with sufficient information for use in note selection.
#[derive(Debug)]
pub struct ReceivedSaplingNote<NoteRef> { pub struct ReceivedSaplingNote<NoteRef> {
pub note_id: NoteRef, pub note_id: NoteRef,
pub diversifier: sapling::Diversifier, pub diversifier: sapling::Diversifier,
pub note_value: Amount, pub note_value: Amount,
pub rseed: sapling::Rseed, pub rseed: sapling::Rseed,
pub witness: sapling::IncrementalWitness, pub note_commitment_tree_position: Position,
} }
impl<NoteRef> sapling_fees::InputView<NoteRef> for ReceivedSaplingNote<NoteRef> { impl<NoteRef> sapling_fees::InputView<NoteRef> for ReceivedSaplingNote<NoteRef> {

View File

@ -6,14 +6,29 @@ and this library adheres to Rust's notion of
[Semantic Versioning](https://semver.org/spec/v2.0.0.html). [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased] ## [Unreleased]
### Added
- `zcash_client_sqlite::serialization` Serialization formats for data stored
as SQLite BLOBs in the wallet database.
### Changed ### Changed
- MSRV is now 1.65.0. - MSRV is now 1.65.0.
- Bumped dependencies to `hdwallet 0.4`, `incrementalmerkletree 0.4`, `bs58 0.5`, - Bumped dependencies to `hdwallet 0.4`, `incrementalmerkletree 0.4`, `bs58 0.5`,
`zcash_primitives 0.12` `zcash_primitives 0.12`
- A `CommitmentTree` variant has been added to `zcash_client_sqlite::wallet::init::WalletMigrationError`
- `min_confirmations` parameter values are now more strongly enforced. Previously,
a note could be spent with fewer than `min_confirmations` confirmations if the
wallet did not contain enough observed blocks to satisfy the `min_confirmations`
value specified; this situation is now treated as an error.
- A `BlockConflict` variant has been added to `zcash_client_sqlite::error::SqliteClientError`
### Removed ### Removed
- The empty `wallet::transact` module has been removed. - The empty `wallet::transact` module has been removed.
### Fixed
- Fixed an off-by-one error in the `BlockSource` implementation for the SQLite-backed
`BlockDb` block database which could result in blocks being skipped at the start of
scan ranges.
## [0.7.1] - 2023-05-17 ## [0.7.1] - 2023-05-17
### Fixed ### Fixed

View File

@ -15,7 +15,9 @@ rust-version = "1.65"
[dependencies] [dependencies]
incrementalmerkletree = { version = "0.4", features = ["legacy-api"] } incrementalmerkletree = { version = "0.4", features = ["legacy-api"] }
shardtree = { version = "0.0", features = ["legacy-api"] }
zcash_client_backend = { version = "0.9", path = "../zcash_client_backend" } zcash_client_backend = { version = "0.9", path = "../zcash_client_backend" }
zcash_encoding = { version = "0.2", path = "../components/zcash_encoding" }
zcash_primitives = { version = "0.12", path = "../zcash_primitives", default-features = false } zcash_primitives = { version = "0.12", path = "../zcash_primitives", default-features = false }
# Dependencies exposed in a public API: # Dependencies exposed in a public API:
@ -27,15 +29,17 @@ hdwallet = { version = "0.4", optional = true }
# - Logging and metrics # - Logging and metrics
tracing = "0.1" tracing = "0.1"
# - Protobuf interfaces # - Serialization
byteorder = "1"
prost = "0.11" prost = "0.11"
either = "1.8"
group = "0.13"
jubjub = "0.10"
# - Secret management # - Secret management
secrecy = "0.8" secrecy = "0.8"
# - SQLite databases # - SQLite databases
group = "0.13"
jubjub = "0.10"
rusqlite = { version = "0.29.0", features = ["bundled", "time", "array"] } rusqlite = { version = "0.29.0", features = ["bundled", "time", "array"] }
schemer = "0.2" schemer = "0.2"
schemer-rusqlite = "0.2.2" schemer-rusqlite = "0.2.2"
@ -48,6 +52,7 @@ uuid = "1.1"
[dev-dependencies] [dev-dependencies]
assert_matches = "1.5" assert_matches = "1.5"
incrementalmerkletree = { version = "0.4", features = ["legacy-api", "test-dependencies"] } incrementalmerkletree = { version = "0.4", features = ["legacy-api", "test-dependencies"] }
shardtree = { version = "0.0", features = ["legacy-api", "test-dependencies"] }
proptest = "1.0.0" proptest = "1.0.0"
rand_core = "0.6" rand_core = "0.6"
regex = "1.4" regex = "1.4"
@ -63,6 +68,7 @@ test-dependencies = [
"incrementalmerkletree/test-dependencies", "incrementalmerkletree/test-dependencies",
"zcash_primitives/test-dependencies", "zcash_primitives/test-dependencies",
"zcash_client_backend/test-dependencies", "zcash_client_backend/test-dependencies",
"incrementalmerkletree/test-dependencies",
] ]
transparent-inputs = ["hdwallet", "zcash_client_backend/transparent-inputs"] transparent-inputs = ["hdwallet", "zcash_client_backend/transparent-inputs"]
unstable = ["zcash_client_backend/unstable"] unstable = ["zcash_client_backend/unstable"]

View File

@ -23,19 +23,19 @@ pub mod migrations;
/// Implements a traversal of `limit` blocks of the block cache database. /// Implements a traversal of `limit` blocks of the block cache database.
/// ///
/// Starting at the next block above `last_scanned_height`, the `with_row` callback is invoked with /// Starting at `from_height`, the `with_row` callback is invoked with each block retrieved from
/// each block retrieved from the backing store. If the `limit` value provided is `None`, all /// the backing store. If the `limit` value provided is `None`, all blocks are traversed up to the
/// blocks are traversed up to the maximum height. /// maximum height.
pub(crate) fn blockdb_with_blocks<F, DbErrT, NoteRef>( pub(crate) fn blockdb_with_blocks<F, DbErrT>(
block_source: &BlockDb, block_source: &BlockDb,
last_scanned_height: Option<BlockHeight>, from_height: Option<BlockHeight>,
limit: Option<u32>, limit: Option<u32>,
mut with_row: F, mut with_row: F,
) -> Result<(), Error<DbErrT, SqliteClientError, NoteRef>> ) -> Result<(), Error<DbErrT, SqliteClientError>>
where where
F: FnMut(CompactBlock) -> Result<(), Error<DbErrT, SqliteClientError, NoteRef>>, F: FnMut(CompactBlock) -> Result<(), Error<DbErrT, SqliteClientError>>,
{ {
fn to_chain_error<D, E: Into<SqliteClientError>, N>(err: E) -> Error<D, SqliteClientError, N> { fn to_chain_error<D, E: Into<SqliteClientError>>(err: E) -> Error<D, SqliteClientError> {
Error::BlockSource(err.into()) Error::BlockSource(err.into())
} }
@ -44,14 +44,14 @@ where
.0 .0
.prepare( .prepare(
"SELECT height, data FROM compactblocks "SELECT height, data FROM compactblocks
WHERE height > ? WHERE height >= ?
ORDER BY height ASC LIMIT ?", ORDER BY height ASC LIMIT ?",
) )
.map_err(to_chain_error)?; .map_err(to_chain_error)?;
let mut rows = stmt_blocks let mut rows = stmt_blocks
.query(params![ .query(params![
last_scanned_height.map_or(0u32, u32::from), from_height.map_or(0u32, u32::from),
limit.unwrap_or(u32::max_value()), limit.unwrap_or(u32::max_value()),
]) ])
.map_err(to_chain_error)?; .map_err(to_chain_error)?;
@ -191,20 +191,20 @@ pub(crate) fn blockmetadb_find_block(
/// Implements a traversal of `limit` blocks of the filesystem-backed /// Implements a traversal of `limit` blocks of the filesystem-backed
/// block cache. /// block cache.
/// ///
/// Starting at the next block height above `last_scanned_height`, the `with_row` callback is /// Starting at `from_height`, the `with_row` callback is invoked with each block retrieved from
/// invoked with each block retrieved from the backing store. If the `limit` value provided is /// the backing store. If the `limit` value provided is `None`, all blocks are traversed up to the
/// `None`, all blocks are traversed up to the maximum height for which metadata is available. /// maximum height for which metadata is available.
#[cfg(feature = "unstable")] #[cfg(feature = "unstable")]
pub(crate) fn fsblockdb_with_blocks<F, DbErrT, NoteRef>( pub(crate) fn fsblockdb_with_blocks<F, DbErrT>(
cache: &FsBlockDb, cache: &FsBlockDb,
last_scanned_height: Option<BlockHeight>, from_height: Option<BlockHeight>,
limit: Option<u32>, limit: Option<u32>,
mut with_block: F, mut with_block: F,
) -> Result<(), Error<DbErrT, FsBlockDbError, NoteRef>> ) -> Result<(), Error<DbErrT, FsBlockDbError>>
where where
F: FnMut(CompactBlock) -> Result<(), Error<DbErrT, FsBlockDbError, NoteRef>>, F: FnMut(CompactBlock) -> Result<(), Error<DbErrT, FsBlockDbError>>,
{ {
fn to_chain_error<D, E: Into<FsBlockDbError>, N>(err: E) -> Error<D, FsBlockDbError, N> { fn to_chain_error<D, E: Into<FsBlockDbError>>(err: E) -> Error<D, FsBlockDbError> {
Error::BlockSource(err.into()) Error::BlockSource(err.into())
} }
@ -214,7 +214,7 @@ where
.prepare( .prepare(
"SELECT height, blockhash, time, sapling_outputs_count, orchard_actions_count "SELECT height, blockhash, time, sapling_outputs_count, orchard_actions_count
FROM compactblocks_meta FROM compactblocks_meta
WHERE height > ? WHERE height >= ?
ORDER BY height ASC LIMIT ?", ORDER BY height ASC LIMIT ?",
) )
.map_err(to_chain_error)?; .map_err(to_chain_error)?;
@ -222,7 +222,7 @@ where
let rows = stmt_blocks let rows = stmt_blocks
.query_map( .query_map(
params![ params![
last_scanned_height.map_or(0u32, u32::from), from_height.map_or(0u32, u32::from),
limit.unwrap_or(u32::max_value()), limit.unwrap_or(u32::max_value()),
], ],
|row| { |row| {
@ -265,18 +265,28 @@ where
#[cfg(test)] #[cfg(test)]
#[allow(deprecated)] #[allow(deprecated)]
mod tests { mod tests {
use std::num::NonZeroU32;
use secrecy::Secret; use secrecy::Secret;
use tempfile::NamedTempFile; use tempfile::NamedTempFile;
use zcash_primitives::{ use zcash_primitives::{
block::BlockHash, transaction::components::Amount, zip32::ExtendedSpendingKey, block::BlockHash,
transaction::{components::Amount, fees::zip317::FeeRule},
zip32::ExtendedSpendingKey,
}; };
use zcash_client_backend::data_api::chain::{ use zcash_client_backend::{
error::{Cause, Error}, address::RecipientAddress,
scan_cached_blocks, validate_chain, data_api::{
chain::scan_cached_blocks,
wallet::{input_selection::GreedyInputSelector, spend},
WalletRead, WalletWrite,
},
fees::{zip317::SingleOutputChangeStrategy, DustOutputPolicy},
wallet::OvkPolicy,
zip321::{Payment, TransactionRequest},
}; };
use zcash_client_backend::data_api::WalletRead;
use crate::{ use crate::{
chain::init::init_cache_database, chain::init::init_cache_database,
@ -314,24 +324,13 @@ mod tests {
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
Amount::from_u64(5).unwrap(), Amount::from_u64(5).unwrap(),
0,
); );
insert_into_cache(&db_cache, &cb); insert_into_cache(&db_cache, &cb);
// Cache-only chain should be valid
let validate_chain_result = validate_chain(
&db_cache,
Some((fake_block_height, fake_block_hash)),
Some(1),
);
assert_matches!(validate_chain_result, Ok(()));
// Scan the cache // Scan the cache
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Data-only chain should be valid
validate_chain(&db_cache, db_data.get_max_height_hash().unwrap(), None).unwrap();
// Create a second fake CompactBlock sending more value to the address // Create a second fake CompactBlock sending more value to the address
let (cb2, _) = fake_compact_block( let (cb2, _) = fake_compact_block(
@ -340,17 +339,12 @@ mod tests {
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
Amount::from_u64(7).unwrap(), Amount::from_u64(7).unwrap(),
1,
); );
insert_into_cache(&db_cache, &cb2); insert_into_cache(&db_cache, &cb2);
// Data+cache chain should be valid
validate_chain(&db_cache, db_data.get_max_height_hash().unwrap(), None).unwrap();
// Scan the cache again // Scan the cache again
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Data-only chain should be valid
validate_chain(&db_cache, db_data.get_max_height_hash().unwrap(), None).unwrap();
} }
#[test] #[test]
@ -373,6 +367,7 @@ mod tests {
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
Amount::from_u64(5).unwrap(), Amount::from_u64(5).unwrap(),
0,
); );
let (cb2, _) = fake_compact_block( let (cb2, _) = fake_compact_block(
sapling_activation_height() + 1, sapling_activation_height() + 1,
@ -380,15 +375,13 @@ mod tests {
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
Amount::from_u64(7).unwrap(), Amount::from_u64(7).unwrap(),
1,
); );
insert_into_cache(&db_cache, &cb); insert_into_cache(&db_cache, &cb);
insert_into_cache(&db_cache, &cb2); insert_into_cache(&db_cache, &cb2);
// Scan the cache // Scan the cache
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Data-only chain should be valid
validate_chain(&db_cache, db_data.get_max_height_hash().unwrap(), None).unwrap();
// Create more fake CompactBlocks that don't connect to the scanned ones // Create more fake CompactBlocks that don't connect to the scanned ones
let (cb3, _) = fake_compact_block( let (cb3, _) = fake_compact_block(
@ -397,6 +390,7 @@ mod tests {
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
Amount::from_u64(8).unwrap(), Amount::from_u64(8).unwrap(),
2,
); );
let (cb4, _) = fake_compact_block( let (cb4, _) = fake_compact_block(
sapling_activation_height() + 3, sapling_activation_height() + 3,
@ -404,14 +398,16 @@ mod tests {
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
Amount::from_u64(3).unwrap(), Amount::from_u64(3).unwrap(),
3,
); );
insert_into_cache(&db_cache, &cb3); insert_into_cache(&db_cache, &cb3);
insert_into_cache(&db_cache, &cb4); insert_into_cache(&db_cache, &cb4);
// Data+cache chain should be invalid at the data/cache boundary // Data+cache chain should be invalid at the data/cache boundary
let val_result = validate_chain(&db_cache, db_data.get_max_height_hash().unwrap(), None); assert_matches!(
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None),
assert_matches!(val_result, Err(Error::Chain(e)) if e.at_height() == sapling_activation_height() + 2); Err(_) // FIXME: check error result more closely
);
} }
#[test] #[test]
@ -434,6 +430,7 @@ mod tests {
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
Amount::from_u64(5).unwrap(), Amount::from_u64(5).unwrap(),
0,
); );
let (cb2, _) = fake_compact_block( let (cb2, _) = fake_compact_block(
sapling_activation_height() + 1, sapling_activation_height() + 1,
@ -441,15 +438,13 @@ mod tests {
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
Amount::from_u64(7).unwrap(), Amount::from_u64(7).unwrap(),
1,
); );
insert_into_cache(&db_cache, &cb); insert_into_cache(&db_cache, &cb);
insert_into_cache(&db_cache, &cb2); insert_into_cache(&db_cache, &cb2);
// Scan the cache // Scan the cache
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Data-only chain should be valid
validate_chain(&db_cache, db_data.get_max_height_hash().unwrap(), None).unwrap();
// Create more fake CompactBlocks that contain a reorg // Create more fake CompactBlocks that contain a reorg
let (cb3, _) = fake_compact_block( let (cb3, _) = fake_compact_block(
@ -458,6 +453,7 @@ mod tests {
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
Amount::from_u64(8).unwrap(), Amount::from_u64(8).unwrap(),
2,
); );
let (cb4, _) = fake_compact_block( let (cb4, _) = fake_compact_block(
sapling_activation_height() + 3, sapling_activation_height() + 3,
@ -465,14 +461,16 @@ mod tests {
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
Amount::from_u64(3).unwrap(), Amount::from_u64(3).unwrap(),
3,
); );
insert_into_cache(&db_cache, &cb3); insert_into_cache(&db_cache, &cb3);
insert_into_cache(&db_cache, &cb4); insert_into_cache(&db_cache, &cb4);
// Data+cache chain should be invalid inside the cache // Data+cache chain should be invalid inside the cache
let val_result = validate_chain(&db_cache, db_data.get_max_height_hash().unwrap(), None); assert_matches!(
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None),
assert_matches!(val_result, Err(Error::Chain(e)) if e.at_height() == sapling_activation_height() + 3); Err(_) // FIXME: check error result more closely
);
} }
#[test] #[test]
@ -503,6 +501,7 @@ mod tests {
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
value, value,
0,
); );
let (cb2, _) = fake_compact_block( let (cb2, _) = fake_compact_block(
@ -511,12 +510,13 @@ mod tests {
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
value2, value2,
1,
); );
insert_into_cache(&db_cache, &cb); insert_into_cache(&db_cache, &cb);
insert_into_cache(&db_cache, &cb2); insert_into_cache(&db_cache, &cb2);
// Scan the cache // Scan the cache
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Account balance should reflect both received notes // Account balance should reflect both received notes
assert_eq!( assert_eq!(
@ -527,7 +527,7 @@ mod tests {
// "Rewind" to height of last scanned block // "Rewind" to height of last scanned block
db_data db_data
.transactionally(|wdb| { .transactionally(|wdb| {
truncate_to_height(&wdb.conn.0, &wdb.params, sapling_activation_height() + 1) truncate_to_height(wdb.conn.0, &wdb.params, sapling_activation_height() + 1)
}) })
.unwrap(); .unwrap();
@ -540,7 +540,7 @@ mod tests {
// Rewind so that one block is dropped // Rewind so that one block is dropped
db_data db_data
.transactionally(|wdb| { .transactionally(|wdb| {
truncate_to_height(&wdb.conn.0, &wdb.params, sapling_activation_height()) truncate_to_height(wdb.conn.0, &wdb.params, sapling_activation_height())
}) })
.unwrap(); .unwrap();
@ -551,7 +551,7 @@ mod tests {
); );
// Scan the cache again // Scan the cache again
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Account balance should again reflect both received notes // Account balance should again reflect both received notes
assert_eq!( assert_eq!(
@ -561,7 +561,7 @@ mod tests {
} }
#[test] #[test]
fn scan_cached_blocks_requires_sequential_blocks() { fn scan_cached_blocks_allows_blocks_out_of_order() {
let cache_file = NamedTempFile::new().unwrap(); let cache_file = NamedTempFile::new().unwrap();
let db_cache = BlockDb::for_path(cache_file.path()).unwrap(); let db_cache = BlockDb::for_path(cache_file.path()).unwrap();
init_cache_database(&db_cache).unwrap(); init_cache_database(&db_cache).unwrap();
@ -571,7 +571,9 @@ mod tests {
init_wallet_db(&mut db_data, Some(Secret::new(vec![]))).unwrap(); init_wallet_db(&mut db_data, Some(Secret::new(vec![]))).unwrap();
// Add an account to the wallet // Add an account to the wallet
let (dfvk, _taddr) = init_test_accounts_table(&mut db_data); let seed = Secret::new([0u8; 32].to_vec());
let (_, usk) = db_data.create_account(&seed).unwrap();
let dfvk = usk.sapling().to_diversifiable_full_viewing_key();
// Create a block with height SAPLING_ACTIVATION_HEIGHT // Create a block with height SAPLING_ACTIVATION_HEIGHT
let value = Amount::from_u64(50000).unwrap(); let value = Amount::from_u64(50000).unwrap();
@ -581,21 +583,23 @@ mod tests {
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
value, value,
0,
); );
insert_into_cache(&db_cache, &cb1); insert_into_cache(&db_cache, &cb1);
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
assert_eq!( assert_eq!(
get_balance(&db_data.conn, AccountId::from(0)).unwrap(), get_balance(&db_data.conn, AccountId::from(0)).unwrap(),
value value
); );
// We cannot scan a block of height SAPLING_ACTIVATION_HEIGHT + 2 next // Create blocks to reach SAPLING_ACTIVATION_HEIGHT + 2
let (cb2, _) = fake_compact_block( let (cb2, _) = fake_compact_block(
sapling_activation_height() + 1, sapling_activation_height() + 1,
cb1.hash(), cb1.hash(),
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
value, value,
1,
); );
let (cb3, _) = fake_compact_block( let (cb3, _) = fake_compact_block(
sapling_activation_height() + 2, sapling_activation_height() + 2,
@ -603,26 +607,64 @@ mod tests {
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
value, value,
2,
); );
insert_into_cache(&db_cache, &cb3);
match scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None) {
Err(Error::Chain(e)) => {
assert_matches!(
e.cause(),
Cause::BlockHeightDiscontinuity(h) if *h
== sapling_activation_height() + 2
);
}
Ok(_) | Err(_) => panic!("Should have failed"),
}
// If we add a block of height SAPLING_ACTIVATION_HEIGHT + 1, we can now scan both // Scan the later block first
insert_into_cache(&db_cache, &cb3);
assert_matches!(
scan_cached_blocks(
&tests::network(),
&db_cache,
&mut db_data,
Some(sapling_activation_height() + 2),
None
),
Ok(_)
);
// If we add a block of height SAPLING_ACTIVATION_HEIGHT + 1, we can now scan that
insert_into_cache(&db_cache, &cb2); insert_into_cache(&db_cache, &cb2);
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(
&tests::network(),
&db_cache,
&mut db_data,
Some(sapling_activation_height() + 1),
Some(1),
)
.unwrap();
assert_eq!( assert_eq!(
get_balance(&db_data.conn, AccountId::from(0)).unwrap(), get_balance(&db_data.conn, AccountId::from(0)).unwrap(),
Amount::from_u64(150_000).unwrap() Amount::from_u64(150_000).unwrap()
); );
// We can spend the received notes
let req = TransactionRequest::new(vec![Payment {
recipient_address: RecipientAddress::Shielded(dfvk.default_address().1),
amount: Amount::from_u64(110_000).unwrap(),
memo: None,
label: None,
message: None,
other_params: vec![],
}])
.unwrap();
let input_selector = GreedyInputSelector::new(
SingleOutputChangeStrategy::new(FeeRule::standard()),
DustOutputPolicy::default(),
);
assert_matches!(
spend(
&mut db_data,
&tests::network(),
crate::wallet::sapling::tests::test_prover(),
&input_selector,
&usk,
req,
OvkPolicy::Sender,
NonZeroU32::new(1).unwrap(),
),
Ok(_)
);
} }
#[test] #[test]
@ -652,11 +694,12 @@ mod tests {
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
value, value,
0,
); );
insert_into_cache(&db_cache, &cb); insert_into_cache(&db_cache, &cb);
// Scan the cache // Scan the cache
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Account balance should reflect the received note // Account balance should reflect the received note
assert_eq!( assert_eq!(
@ -672,11 +715,12 @@ mod tests {
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
value2, value2,
1,
); );
insert_into_cache(&db_cache, &cb2); insert_into_cache(&db_cache, &cb2);
// Scan the cache again // Scan the cache again
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Account balance should reflect both received notes // Account balance should reflect both received notes
assert_eq!( assert_eq!(
@ -712,11 +756,12 @@ mod tests {
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
value, value,
0,
); );
insert_into_cache(&db_cache, &cb); insert_into_cache(&db_cache, &cb);
// Scan the cache // Scan the cache
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Account balance should reflect the received note // Account balance should reflect the received note
assert_eq!( assert_eq!(
@ -737,11 +782,12 @@ mod tests {
&dfvk, &dfvk,
to2, to2,
value2, value2,
1,
), ),
); );
// Scan the cache again // Scan the cache again
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Account balance should equal the change // Account balance should equal the change
assert_eq!( assert_eq!(

View File

@ -1,12 +1,15 @@
//! Error types for problems that may arise when reading or storing wallet data to SQLite. //! Error types for problems that may arise when reading or storing wallet data to SQLite.
use either::Either;
use std::error; use std::error;
use std::fmt; use std::fmt;
use std::io;
use shardtree::ShardTreeError;
use zcash_client_backend::encoding::{Bech32DecodeError, TransparentCodecError}; use zcash_client_backend::encoding::{Bech32DecodeError, TransparentCodecError};
use zcash_primitives::{consensus::BlockHeight, zip32::AccountId}; use zcash_primitives::{consensus::BlockHeight, zip32::AccountId};
use crate::PRUNING_HEIGHT; use crate::PRUNING_DEPTH;
#[cfg(feature = "transparent-inputs")] #[cfg(feature = "transparent-inputs")]
use zcash_primitives::legacy::TransparentAddress; use zcash_primitives::legacy::TransparentAddress;
@ -50,13 +53,15 @@ pub enum SqliteClientError {
/// A received memo cannot be interpreted as a UTF-8 string. /// A received memo cannot be interpreted as a UTF-8 string.
InvalidMemo(zcash_primitives::memo::Error), InvalidMemo(zcash_primitives::memo::Error),
/// A requested rewind would violate invariants of the /// An attempt to update block data would overwrite the current hash for a block with a
/// storage layer. The payload returned with this error is /// different hash. This indicates that a required rewind was not performed.
/// (safe rewind height, requested height). BlockConflict(BlockHeight),
/// A requested rewind would violate invariants of the storage layer. The payload returned with
/// this error is (safe rewind height, requested height).
RequestedRewindInvalid(BlockHeight, BlockHeight), RequestedRewindInvalid(BlockHeight, BlockHeight),
/// The space of allocatable diversifier indices has been exhausted for /// The space of allocatable diversifier indices has been exhausted for the given account.
/// the given account.
DiversifierIndexOutOfRange, DiversifierIndexOutOfRange,
/// An error occurred deriving a spending key from a seed and an account /// An error occurred deriving a spending key from a seed and an account
@ -74,6 +79,10 @@ pub enum SqliteClientError {
/// belonging to the wallet /// belonging to the wallet
#[cfg(feature = "transparent-inputs")] #[cfg(feature = "transparent-inputs")]
AddressNotRecognized(TransparentAddress), AddressNotRecognized(TransparentAddress),
/// An error occurred in inserting data into or accessing data from one of the wallet's note
/// commitment trees.
CommitmentTree(ShardTreeError<Either<io::Error, rusqlite::Error>>),
} }
impl error::Error for SqliteClientError { impl error::Error for SqliteClientError {
@ -99,7 +108,7 @@ impl fmt::Display for SqliteClientError {
SqliteClientError::InvalidNoteId => SqliteClientError::InvalidNoteId =>
write!(f, "The note ID associated with an inserted witness must correspond to a received note."), write!(f, "The note ID associated with an inserted witness must correspond to a received note."),
SqliteClientError::RequestedRewindInvalid(h, r) => SqliteClientError::RequestedRewindInvalid(h, r) =>
write!(f, "A rewind must be either of less than {} blocks, or at least back to block {} for your wallet; the requested height was {}.", PRUNING_HEIGHT, h, r), write!(f, "A rewind must be either of less than {} blocks, or at least back to block {} for your wallet; the requested height was {}.", PRUNING_DEPTH, h, r),
SqliteClientError::Bech32DecodeError(e) => write!(f, "{}", e), SqliteClientError::Bech32DecodeError(e) => write!(f, "{}", e),
#[cfg(feature = "transparent-inputs")] #[cfg(feature = "transparent-inputs")]
SqliteClientError::HdwalletError(e) => write!(f, "{:?}", e), SqliteClientError::HdwalletError(e) => write!(f, "{:?}", e),
@ -108,12 +117,14 @@ impl fmt::Display for SqliteClientError {
SqliteClientError::DbError(e) => write!(f, "{}", e), SqliteClientError::DbError(e) => write!(f, "{}", e),
SqliteClientError::Io(e) => write!(f, "{}", e), SqliteClientError::Io(e) => write!(f, "{}", e),
SqliteClientError::InvalidMemo(e) => write!(f, "{}", e), SqliteClientError::InvalidMemo(e) => write!(f, "{}", e),
SqliteClientError::BlockConflict(h) => write!(f, "A block hash conflict occurred at height {}; rewind required.", u32::from(*h)),
SqliteClientError::DiversifierIndexOutOfRange => write!(f, "The space of available diversifier indices is exhausted"), SqliteClientError::DiversifierIndexOutOfRange => write!(f, "The space of available diversifier indices is exhausted"),
SqliteClientError::KeyDerivationError(acct_id) => write!(f, "Key derivation failed for account {:?}", acct_id), SqliteClientError::KeyDerivationError(acct_id) => write!(f, "Key derivation failed for account {:?}", acct_id),
SqliteClientError::AccountIdDiscontinuity => write!(f, "Wallet account identifiers must be sequential."), SqliteClientError::AccountIdDiscontinuity => write!(f, "Wallet account identifiers must be sequential."),
SqliteClientError::AccountIdOutOfRange => write!(f, "Wallet account identifiers must be less than 0x7FFFFFFF."), SqliteClientError::AccountIdOutOfRange => write!(f, "Wallet account identifiers must be less than 0x7FFFFFFF."),
#[cfg(feature = "transparent-inputs")] #[cfg(feature = "transparent-inputs")]
SqliteClientError::AddressNotRecognized(_) => write!(f, "The address associated with a received txo is not identifiable as belonging to the wallet."), SqliteClientError::AddressNotRecognized(_) => write!(f, "The address associated with a received txo is not identifiable as belonging to the wallet."),
SqliteClientError::CommitmentTree(err) => write!(f, "An error occurred accessing or updating note commitment tree data: {}.", err),
} }
} }
} }
@ -160,3 +171,9 @@ impl From<zcash_primitives::memo::Error> for SqliteClientError {
SqliteClientError::InvalidMemo(e) SqliteClientError::InvalidMemo(e)
} }
} }
impl From<ShardTreeError<Either<io::Error, rusqlite::Error>>> for SqliteClientError {
fn from(e: ShardTreeError<Either<io::Error, rusqlite::Error>>) -> Self {
SqliteClientError::CommitmentTree(e)
}
}

View File

@ -32,10 +32,13 @@
// Catch documentation errors caused by code changes. // Catch documentation errors caused by code changes.
#![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::broken_intra_doc_links)]
use either::Either;
use rusqlite::{self, Connection}; use rusqlite::{self, Connection};
use secrecy::{ExposeSecret, SecretVec}; use secrecy::{ExposeSecret, SecretVec};
use std::{borrow::Borrow, collections::HashMap, convert::AsRef, fmt, path::Path}; use std::{borrow::Borrow, collections::HashMap, convert::AsRef, fmt, io, ops::Range, path::Path};
use incrementalmerkletree::Position;
use shardtree::{ShardTree, ShardTreeError};
use zcash_primitives::{ use zcash_primitives::{
block::BlockHash, block::BlockHash,
consensus::{self, BlockHeight}, consensus::{self, BlockHeight},
@ -52,8 +55,9 @@ use zcash_primitives::{
use zcash_client_backend::{ use zcash_client_backend::{
address::{AddressMetadata, UnifiedAddress}, address::{AddressMetadata, UnifiedAddress},
data_api::{ data_api::{
self, chain::BlockSource, DecryptedTransaction, NullifierQuery, PoolType, PrunedBlock, self, chain::BlockSource, BlockMetadata, DecryptedTransaction, NullifierQuery, PoolType,
Recipient, SentTransaction, WalletRead, WalletWrite, Recipient, ScannedBlock, SentTransaction, WalletCommitmentTrees, WalletRead, WalletWrite,
SAPLING_SHARD_HEIGHT,
}, },
keys::{UnifiedFullViewingKey, UnifiedSpendingKey}, keys::{UnifiedFullViewingKey, UnifiedSpendingKey},
proto::compact_formats::CompactBlock, proto::compact_formats::CompactBlock,
@ -61,23 +65,26 @@ use zcash_client_backend::{
DecryptedOutput, TransferType, DecryptedOutput, TransferType,
}; };
use crate::error::SqliteClientError; use crate::{error::SqliteClientError, wallet::commitment_tree::SqliteShardStore};
#[cfg(feature = "unstable")] #[cfg(feature = "unstable")]
use { use {
crate::chain::{fsblockdb_with_blocks, BlockMeta}, crate::chain::{fsblockdb_with_blocks, BlockMeta},
std::fs,
std::path::PathBuf, std::path::PathBuf,
std::{fs, io},
}; };
pub mod chain; pub mod chain;
pub mod error; pub mod error;
pub mod serialization;
pub mod wallet; pub mod wallet;
/// The maximum number of blocks the wallet is allowed to rewind. This is /// The maximum number of blocks the wallet is allowed to rewind. This is
/// consistent with the bound in zcashd, and allows block data deeper than /// consistent with the bound in zcashd, and allows block data deeper than
/// this delta from the chain tip to be pruned. /// this delta from the chain tip to be pruned.
pub(crate) const PRUNING_HEIGHT: u32 = 100; pub(crate) const PRUNING_DEPTH: u32 = 100;
pub(crate) const SAPLING_TABLES_PREFIX: &str = "sapling";
/// A newtype wrapper for sqlite primary key values for the notes /// A newtype wrapper for sqlite primary key values for the notes
/// table. /// table.
@ -108,11 +115,11 @@ pub struct WalletDb<C, P> {
} }
/// A wrapper for a SQLite transaction affecting the wallet database. /// A wrapper for a SQLite transaction affecting the wallet database.
pub struct SqlTransaction<'conn>(pub(crate) rusqlite::Transaction<'conn>); pub struct SqlTransaction<'conn>(pub(crate) &'conn rusqlite::Transaction<'conn>);
impl Borrow<rusqlite::Connection> for SqlTransaction<'_> { impl Borrow<rusqlite::Connection> for SqlTransaction<'_> {
fn borrow(&self) -> &rusqlite::Connection { fn borrow(&self) -> &rusqlite::Connection {
&self.0 self.0
} }
} }
@ -125,16 +132,17 @@ impl<P: consensus::Parameters + Clone> WalletDb<Connection, P> {
}) })
} }
pub fn transactionally<F, A>(&mut self, f: F) -> Result<A, SqliteClientError> pub fn transactionally<F, A, E: From<rusqlite::Error>>(&mut self, f: F) -> Result<A, E>
where where
F: FnOnce(&WalletDb<SqlTransaction<'_>, P>) -> Result<A, SqliteClientError>, F: FnOnce(&mut WalletDb<SqlTransaction<'_>, P>) -> Result<A, E>,
{ {
let wdb = WalletDb { let tx = self.conn.transaction()?;
conn: SqlTransaction(self.conn.transaction()?), let mut wdb = WalletDb {
conn: SqlTransaction(&tx),
params: self.params.clone(), params: self.params.clone(),
}; };
let result = f(&wdb)?; let result = f(&mut wdb)?;
wdb.conn.0.commit()?; tx.commit()?;
Ok(result) Ok(result)
} }
} }
@ -148,6 +156,22 @@ impl<C: Borrow<rusqlite::Connection>, P: consensus::Parameters> WalletRead for W
wallet::block_height_extrema(self.conn.borrow()).map_err(SqliteClientError::from) wallet::block_height_extrema(self.conn.borrow()).map_err(SqliteClientError::from)
} }
fn block_metadata(&self, height: BlockHeight) -> Result<Option<BlockMetadata>, Self::Error> {
wallet::block_metadata(self.conn.borrow(), height)
}
fn block_fully_scanned(&self) -> Result<Option<BlockMetadata>, Self::Error> {
wallet::block_fully_scanned(self.conn.borrow())
}
fn suggest_scan_ranges(
&self,
_batch_size: usize,
_limit: usize,
) -> Result<Vec<Range<BlockHeight>>, Self::Error> {
todo!()
}
fn get_min_unspent_height(&self) -> Result<Option<BlockHeight>, Self::Error> { fn get_min_unspent_height(&self) -> Result<Option<BlockHeight>, Self::Error> {
wallet::get_min_unspent_height(self.conn.borrow()).map_err(SqliteClientError::from) wallet::get_min_unspent_height(self.conn.borrow()).map_err(SqliteClientError::from)
} }
@ -160,6 +184,14 @@ impl<C: Borrow<rusqlite::Connection>, P: consensus::Parameters> WalletRead for W
wallet::get_tx_height(self.conn.borrow(), txid).map_err(SqliteClientError::from) wallet::get_tx_height(self.conn.borrow(), txid).map_err(SqliteClientError::from)
} }
fn get_current_address(
&self,
account: AccountId,
) -> Result<Option<UnifiedAddress>, Self::Error> {
wallet::get_current_address(self.conn.borrow(), &self.params, account)
.map(|res| res.map(|(addr, _)| addr))
}
fn get_unified_full_viewing_keys( fn get_unified_full_viewing_keys(
&self, &self,
) -> Result<HashMap<AccountId, UnifiedFullViewingKey>, Self::Error> { ) -> Result<HashMap<AccountId, UnifiedFullViewingKey>, Self::Error> {
@ -173,14 +205,6 @@ impl<C: Borrow<rusqlite::Connection>, P: consensus::Parameters> WalletRead for W
wallet::get_account_for_ufvk(self.conn.borrow(), &self.params, ufvk) wallet::get_account_for_ufvk(self.conn.borrow(), &self.params, ufvk)
} }
fn get_current_address(
&self,
account: AccountId,
) -> Result<Option<UnifiedAddress>, Self::Error> {
wallet::get_current_address(self.conn.borrow(), &self.params, account)
.map(|res| res.map(|(addr, _)| addr))
}
fn is_valid_account_extfvk( fn is_valid_account_extfvk(
&self, &self,
account: AccountId, account: AccountId,
@ -197,10 +221,6 @@ impl<C: Borrow<rusqlite::Connection>, P: consensus::Parameters> WalletRead for W
wallet::get_balance_at(self.conn.borrow(), account, anchor_height) wallet::get_balance_at(self.conn.borrow(), account, anchor_height)
} }
fn get_transaction(&self, id_tx: i64) -> Result<Transaction, Self::Error> {
wallet::get_transaction(self.conn.borrow(), &self.params, id_tx)
}
fn get_memo(&self, id_note: Self::NoteRef) -> Result<Option<Memo>, Self::Error> { fn get_memo(&self, id_note: Self::NoteRef) -> Result<Option<Memo>, Self::Error> {
match id_note { match id_note {
NoteId::SentNoteId(id_note) => wallet::get_sent_memo(self.conn.borrow(), id_note), NoteId::SentNoteId(id_note) => wallet::get_sent_memo(self.conn.borrow(), id_note),
@ -210,24 +230,13 @@ impl<C: Borrow<rusqlite::Connection>, P: consensus::Parameters> WalletRead for W
} }
} }
fn get_commitment_tree( fn get_transaction(&self, id_tx: i64) -> Result<Transaction, Self::Error> {
&self, wallet::get_transaction(self.conn.borrow(), &self.params, id_tx)
block_height: BlockHeight,
) -> Result<Option<sapling::CommitmentTree>, Self::Error> {
wallet::sapling::get_sapling_commitment_tree(self.conn.borrow(), block_height)
}
#[allow(clippy::type_complexity)]
fn get_witnesses(
&self,
block_height: BlockHeight,
) -> Result<Vec<(Self::NoteRef, sapling::IncrementalWitness)>, Self::Error> {
wallet::sapling::get_sapling_witnesses(self.conn.borrow(), block_height)
} }
fn get_sapling_nullifiers( fn get_sapling_nullifiers(
&self, &self,
query: data_api::NullifierQuery, query: NullifierQuery,
) -> Result<Vec<(AccountId, sapling::Nullifier)>, Self::Error> { ) -> Result<Vec<(AccountId, sapling::Nullifier)>, Self::Error> {
match query { match query {
NullifierQuery::Unspent => wallet::sapling::get_sapling_nullifiers(self.conn.borrow()), NullifierQuery::Unspent => wallet::sapling::get_sapling_nullifiers(self.conn.borrow()),
@ -327,7 +336,7 @@ impl<P: consensus::Parameters> WalletWrite for WalletDb<rusqlite::Connection, P>
seed: &SecretVec<u8>, seed: &SecretVec<u8>,
) -> Result<(AccountId, UnifiedSpendingKey), Self::Error> { ) -> Result<(AccountId, UnifiedSpendingKey), Self::Error> {
self.transactionally(|wdb| { self.transactionally(|wdb| {
let account = wallet::get_max_account_id(&wdb.conn.0)? let account = wallet::get_max_account_id(wdb.conn.0)?
.map(|a| AccountId::from(u32::from(a) + 1)) .map(|a| AccountId::from(u32::from(a) + 1))
.unwrap_or_else(|| AccountId::from(0)); .unwrap_or_else(|| AccountId::from(0));
@ -339,7 +348,7 @@ impl<P: consensus::Parameters> WalletWrite for WalletDb<rusqlite::Connection, P>
.map_err(|_| SqliteClientError::KeyDerivationError(account))?; .map_err(|_| SqliteClientError::KeyDerivationError(account))?;
let ufvk = usk.to_unified_full_viewing_key(); let ufvk = usk.to_unified_full_viewing_key();
wallet::add_account(&wdb.conn.0, &wdb.params, account, &ufvk)?; wallet::add_account(wdb.conn.0, &wdb.params, account, &ufvk)?;
Ok((account, usk)) Ok((account, usk))
}) })
@ -353,7 +362,7 @@ impl<P: consensus::Parameters> WalletWrite for WalletDb<rusqlite::Connection, P>
|wdb| match wdb.get_unified_full_viewing_keys()?.get(&account) { |wdb| match wdb.get_unified_full_viewing_keys()?.get(&account) {
Some(ufvk) => { Some(ufvk) => {
let search_from = let search_from =
match wallet::get_current_address(&wdb.conn.0, &wdb.params, account)? { match wallet::get_current_address(wdb.conn.0, &wdb.params, account)? {
Some((_, mut last_diversifier_index)) => { Some((_, mut last_diversifier_index)) => {
last_diversifier_index last_diversifier_index
.increment() .increment()
@ -368,7 +377,7 @@ impl<P: consensus::Parameters> WalletWrite for WalletDb<rusqlite::Connection, P>
.ok_or(SqliteClientError::DiversifierIndexOutOfRange)?; .ok_or(SqliteClientError::DiversifierIndexOutOfRange)?;
wallet::insert_address( wallet::insert_address(
&wdb.conn.0, wdb.conn.0,
&wdb.params, &wdb.params,
account, account,
diversifier_index, diversifier_index,
@ -382,63 +391,55 @@ impl<P: consensus::Parameters> WalletWrite for WalletDb<rusqlite::Connection, P>
) )
} }
#[tracing::instrument(skip_all, fields(height = u32::from(block.block_height)))] #[tracing::instrument(skip_all, fields(height = u32::from(block.height())))]
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
fn advance_by_block( fn put_block(
&mut self, &mut self,
block: &PrunedBlock, block: ScannedBlock<sapling::Nullifier>,
updated_witnesses: &[(Self::NoteRef, sapling::IncrementalWitness)], ) -> Result<Vec<Self::NoteRef>, Self::Error> {
) -> Result<Vec<(Self::NoteRef, sapling::IncrementalWitness)>, Self::Error> {
self.transactionally(|wdb| { self.transactionally(|wdb| {
// Insert the block into the database. // Insert the block into the database.
wallet::insert_block( wallet::put_block(
&wdb.conn.0, wdb.conn.0,
block.block_height, block.height(),
block.block_hash, block.block_hash(),
block.block_time, block.block_time(),
block.commitment_tree, block.metadata().sapling_tree_size(),
)?; )?;
let mut new_witnesses = vec![]; let mut wallet_note_ids = vec![];
for tx in block.transactions { for tx in block.transactions() {
let tx_row = wallet::put_tx_meta(&wdb.conn.0, tx, block.block_height)?; let tx_row = wallet::put_tx_meta(wdb.conn.0, tx, block.height())?;
// Mark notes as spent and remove them from the scanning cache // Mark notes as spent and remove them from the scanning cache
for spend in &tx.sapling_spends { for spend in &tx.sapling_spends {
wallet::sapling::mark_sapling_note_spent(&wdb.conn.0, tx_row, spend.nf())?; wallet::sapling::mark_sapling_note_spent(wdb.conn.0, tx_row, spend.nf())?;
} }
for output in &tx.sapling_outputs { for output in &tx.sapling_outputs {
let received_note_id = let received_note_id =
wallet::sapling::put_received_note(&wdb.conn.0, output, tx_row)?; wallet::sapling::put_received_note(wdb.conn.0, output, tx_row)?;
// Save witness for note. // Save witness for note.
new_witnesses.push((received_note_id, output.witness().clone())); wallet_note_ids.push(received_note_id);
} }
} }
// Insert current new_witnesses into the database. let block_height = block.height();
for (received_note_id, witness) in updated_witnesses.iter().chain(new_witnesses.iter()) let sapling_tree_size = block.metadata().sapling_tree_size();
{ let sapling_commitments_len = block.sapling_commitments().len();
if let NoteId::ReceivedNoteId(rnid) = *received_note_id { let mut sapling_commitments = block.into_sapling_commitments().into_iter();
wallet::sapling::insert_witness( wdb.with_sapling_tree_mut::<_, _, SqliteClientError>(move |sapling_tree| {
&wdb.conn.0, let start_position = Position::from(u64::from(sapling_tree_size))
rnid, - u64::try_from(sapling_commitments_len).unwrap();
witness, sapling_tree.batch_insert(start_position, &mut sapling_commitments)?;
block.block_height, Ok(())
)?; })?;
} else {
return Err(SqliteClientError::InvalidNoteId);
}
}
// Prune the stored witnesses (we only expect rollbacks of at most PRUNING_HEIGHT blocks).
wallet::prune_witnesses(&wdb.conn.0, block.block_height - PRUNING_HEIGHT)?;
// Update now-expired transactions that didn't get mined. // Update now-expired transactions that didn't get mined.
wallet::update_expired_notes(&wdb.conn.0, block.block_height)?; wallet::update_expired_notes(wdb.conn.0, block_height)?;
Ok(new_witnesses) Ok(wallet_note_ids)
}) })
} }
@ -447,113 +448,96 @@ impl<P: consensus::Parameters> WalletWrite for WalletDb<rusqlite::Connection, P>
d_tx: DecryptedTransaction, d_tx: DecryptedTransaction,
) -> Result<Self::TxRef, Self::Error> { ) -> Result<Self::TxRef, Self::Error> {
self.transactionally(|wdb| { self.transactionally(|wdb| {
let tx_ref = wallet::put_tx_data(&wdb.conn.0, d_tx.tx, None, None)?; let tx_ref = wallet::put_tx_data(wdb.conn.0, d_tx.tx, None, None)?;
let mut spending_account_id: Option<AccountId> = None; let mut spending_account_id: Option<AccountId> = None;
for output in d_tx.sapling_outputs { for output in d_tx.sapling_outputs {
match output.transfer_type { match output.transfer_type {
TransferType::Outgoing | TransferType::WalletInternal => { TransferType::Outgoing | TransferType::WalletInternal => {
let recipient = if output.transfer_type == TransferType::Outgoing { let recipient = if output.transfer_type == TransferType::Outgoing {
Recipient::Sapling(output.note.recipient()) Recipient::Sapling(output.note.recipient())
} else { } else {
Recipient::InternalAccount(output.account, PoolType::Sapling) Recipient::InternalAccount(output.account, PoolType::Sapling)
}; };
wallet::put_sent_output( wallet::put_sent_output(
&wdb.conn.0, wdb.conn.0,
&wdb.params, &wdb.params,
output.account, output.account,
tx_ref, tx_ref,
output.index, output.index,
&recipient, &recipient,
Amount::from_u64(output.note.value().inner()).map_err(|_| { Amount::from_u64(output.note.value().inner()).map_err(|_| {
SqliteClientError::CorruptedData( SqliteClientError::CorruptedData(
"Note value is not a valid Zcash amount.".to_string(), "Note value is not a valid Zcash amount.".to_string(),
) )
})?, })?,
Some(&output.memo), Some(&output.memo),
)?; )?;
if matches!(recipient, Recipient::InternalAccount(_, _)) { if matches!(recipient, Recipient::InternalAccount(_, _)) {
wallet::sapling::put_received_note(&wdb.conn.0, output, tx_ref)?; wallet::sapling::put_received_note(wdb.conn.0, output, tx_ref)?;
}
} }
} TransferType::Incoming => {
TransferType::Incoming => { match spending_account_id {
match spending_account_id { Some(id) => {
Some(id) => { if id != output.account {
if id != output.account { panic!("Unable to determine a unique account identifier for z->t spend.");
panic!("Unable to determine a unique account identifier for z->t spend."); }
}
None => {
spending_account_id = Some(output.account);
} }
} }
None => {
spending_account_id = Some(output.account); wallet::sapling::put_received_note(wdb.conn.0, output, tx_ref)?;
}
}
}
// If any of the utxos spent in the transaction are ours, mark them as spent.
#[cfg(feature = "transparent-inputs")]
for txin in d_tx.tx.transparent_bundle().iter().flat_map(|b| b.vin.iter()) {
wallet::mark_transparent_utxo_spent(wdb.conn.0, tx_ref, &txin.prevout)?;
}
// If we have some transparent outputs:
if d_tx.tx.transparent_bundle().iter().any(|b| !b.vout.is_empty()) {
let nullifiers = wdb.get_sapling_nullifiers(NullifierQuery::All)?;
// If the transaction contains shielded spends from our wallet, we will store z->t
// transactions we observe in the same way they would be stored by
// create_spend_to_address.
if let Some((account_id, _)) = nullifiers.iter().find(
|(_, nf)|
d_tx.tx.sapling_bundle().iter().flat_map(|b| b.shielded_spends().iter())
.any(|input| nf == input.nullifier())
) {
for (output_index, txout) in d_tx.tx.transparent_bundle().iter().flat_map(|b| b.vout.iter()).enumerate() {
if let Some(address) = txout.recipient_address() {
wallet::put_sent_output(
wdb.conn.0,
&wdb.params,
*account_id,
tx_ref,
output_index,
&Recipient::Transparent(address),
txout.value,
None
)?;
} }
} }
wallet::sapling::put_received_note(&wdb.conn.0, output, tx_ref)?;
} }
} }
}
// If any of the utxos spent in the transaction are ours, mark them as spent. Ok(tx_ref)
#[cfg(feature = "transparent-inputs")]
for txin in d_tx
.tx
.transparent_bundle()
.iter()
.flat_map(|b| b.vin.iter())
{
wallet::mark_transparent_utxo_spent(&wdb.conn.0, tx_ref, &txin.prevout)?;
}
// If we have some transparent outputs:
if !d_tx
.tx
.transparent_bundle()
.iter()
.any(|b| b.vout.is_empty())
{
let nullifiers = wdb.get_sapling_nullifiers(data_api::NullifierQuery::All)?;
// If the transaction contains shielded spends from our wallet, we will store z->t
// transactions we observe in the same way they would be stored by
// create_spend_to_address.
if let Some((account_id, _)) = nullifiers.iter().find(|(_, nf)| {
d_tx.tx
.sapling_bundle()
.iter()
.flat_map(|b| b.shielded_spends().iter())
.any(|input| nf == input.nullifier())
}) {
for (output_index, txout) in d_tx
.tx
.transparent_bundle()
.iter()
.flat_map(|b| b.vout.iter())
.enumerate()
{
if let Some(address) = txout.recipient_address() {
wallet::put_sent_output(
&wdb.conn.0,
&wdb.params,
*account_id,
tx_ref,
output_index,
&Recipient::Transparent(address),
txout.value,
None,
)?;
}
}
}
}
Ok(tx_ref)
}) })
} }
fn store_sent_tx(&mut self, sent_tx: &SentTransaction) -> Result<Self::TxRef, Self::Error> { fn store_sent_tx(&mut self, sent_tx: &SentTransaction) -> Result<Self::TxRef, Self::Error> {
self.transactionally(|wdb| { self.transactionally(|wdb| {
let tx_ref = wallet::put_tx_data( let tx_ref = wallet::put_tx_data(
&wdb.conn.0, wdb.conn.0,
sent_tx.tx, sent_tx.tx,
Some(sent_tx.fee_amount), Some(sent_tx.fee_amount),
Some(sent_tx.created), Some(sent_tx.created),
@ -570,7 +554,7 @@ impl<P: consensus::Parameters> WalletWrite for WalletDb<rusqlite::Connection, P>
if let Some(bundle) = sent_tx.tx.sapling_bundle() { if let Some(bundle) = sent_tx.tx.sapling_bundle() {
for spend in bundle.shielded_spends() { for spend in bundle.shielded_spends() {
wallet::sapling::mark_sapling_note_spent( wallet::sapling::mark_sapling_note_spent(
&wdb.conn.0, wdb.conn.0,
tx_ref, tx_ref,
spend.nullifier(), spend.nullifier(),
)?; )?;
@ -579,12 +563,12 @@ impl<P: consensus::Parameters> WalletWrite for WalletDb<rusqlite::Connection, P>
#[cfg(feature = "transparent-inputs")] #[cfg(feature = "transparent-inputs")]
for utxo_outpoint in &sent_tx.utxos_spent { for utxo_outpoint in &sent_tx.utxos_spent {
wallet::mark_transparent_utxo_spent(&wdb.conn.0, tx_ref, utxo_outpoint)?; wallet::mark_transparent_utxo_spent(wdb.conn.0, tx_ref, utxo_outpoint)?;
} }
for output in &sent_tx.outputs { for output in &sent_tx.outputs {
wallet::insert_sent_output( wallet::insert_sent_output(
&wdb.conn.0, wdb.conn.0,
&wdb.params, &wdb.params,
tx_ref, tx_ref,
sent_tx.account, sent_tx.account,
@ -593,7 +577,7 @@ impl<P: consensus::Parameters> WalletWrite for WalletDb<rusqlite::Connection, P>
if let Some((account, note)) = output.sapling_change_to() { if let Some((account, note)) = output.sapling_change_to() {
wallet::sapling::put_received_note( wallet::sapling::put_received_note(
&wdb.conn.0, wdb.conn.0,
&DecryptedOutput { &DecryptedOutput {
index: output.output_index(), index: output.output_index(),
note: note.clone(), note: note.clone(),
@ -615,7 +599,7 @@ impl<P: consensus::Parameters> WalletWrite for WalletDb<rusqlite::Connection, P>
fn truncate_to_height(&mut self, block_height: BlockHeight) -> Result<(), Self::Error> { fn truncate_to_height(&mut self, block_height: BlockHeight) -> Result<(), Self::Error> {
self.transactionally(|wdb| { self.transactionally(|wdb| {
wallet::truncate_to_height(&wdb.conn.0, &wdb.params, block_height) wallet::truncate_to_height(wdb.conn.0, &wdb.params, block_height)
}) })
} }
@ -633,6 +617,65 @@ impl<P: consensus::Parameters> WalletWrite for WalletDb<rusqlite::Connection, P>
} }
} }
impl<P: consensus::Parameters> WalletCommitmentTrees for WalletDb<rusqlite::Connection, P> {
type Error = Either<io::Error, rusqlite::Error>;
type SaplingShardStore<'a> =
SqliteShardStore<&'a rusqlite::Transaction<'a>, sapling::Node, SAPLING_SHARD_HEIGHT>;
fn with_sapling_tree_mut<F, A, E>(&mut self, mut callback: F) -> Result<A, E>
where
for<'a> F: FnMut(
&'a mut ShardTree<
Self::SaplingShardStore<'a>,
{ sapling::NOTE_COMMITMENT_TREE_DEPTH },
SAPLING_SHARD_HEIGHT,
>,
) -> Result<A, E>,
E: From<ShardTreeError<Either<io::Error, rusqlite::Error>>>,
{
let tx = self
.conn
.transaction()
.map_err(|e| ShardTreeError::Storage(Either::Right(e)))?;
let shard_store = SqliteShardStore::from_connection(&tx, SAPLING_TABLES_PREFIX)
.map_err(|e| ShardTreeError::Storage(Either::Right(e)))?;
let result = {
let mut shardtree = ShardTree::new(shard_store, PRUNING_DEPTH.try_into().unwrap());
callback(&mut shardtree)?
};
tx.commit()
.map_err(|e| ShardTreeError::Storage(Either::Right(e)))?;
Ok(result)
}
}
impl<'conn, P: consensus::Parameters> WalletCommitmentTrees for WalletDb<SqlTransaction<'conn>, P> {
type Error = Either<io::Error, rusqlite::Error>;
type SaplingShardStore<'a> =
SqliteShardStore<&'a rusqlite::Transaction<'a>, sapling::Node, SAPLING_SHARD_HEIGHT>;
fn with_sapling_tree_mut<F, A, E>(&mut self, mut callback: F) -> Result<A, E>
where
for<'a> F: FnMut(
&'a mut ShardTree<
Self::SaplingShardStore<'a>,
{ sapling::NOTE_COMMITMENT_TREE_DEPTH },
SAPLING_SHARD_HEIGHT,
>,
) -> Result<A, E>,
E: From<ShardTreeError<Either<io::Error, rusqlite::Error>>>,
{
let mut shardtree = ShardTree::new(
SqliteShardStore::from_connection(self.conn.0, SAPLING_TABLES_PREFIX)
.map_err(|e| ShardTreeError::Storage(Either::Right(e)))?,
PRUNING_DEPTH.try_into().unwrap(),
);
let result = callback(&mut shardtree)?;
Ok(result)
}
}
/// A handle for the SQLite block source. /// A handle for the SQLite block source.
pub struct BlockDb(Connection); pub struct BlockDb(Connection);
@ -646,17 +689,14 @@ impl BlockDb {
impl BlockSource for BlockDb { impl BlockSource for BlockDb {
type Error = SqliteClientError; type Error = SqliteClientError;
fn with_blocks<F, DbErrT, NoteRef>( fn with_blocks<F, DbErrT>(
&self, &self,
from_height: Option<BlockHeight>, from_height: Option<BlockHeight>,
limit: Option<u32>, limit: Option<u32>,
with_row: F, with_row: F,
) -> Result<(), data_api::chain::error::Error<DbErrT, Self::Error, NoteRef>> ) -> Result<(), data_api::chain::error::Error<DbErrT, Self::Error>>
where where
F: FnMut( F: FnMut(CompactBlock) -> Result<(), data_api::chain::error::Error<DbErrT, Self::Error>>,
CompactBlock,
)
-> Result<(), data_api::chain::error::Error<DbErrT, Self::Error, NoteRef>>,
{ {
chain::blockdb_with_blocks(self, from_height, limit, with_row) chain::blockdb_with_blocks(self, from_height, limit, with_row)
} }
@ -827,17 +867,14 @@ impl FsBlockDb {
impl BlockSource for FsBlockDb { impl BlockSource for FsBlockDb {
type Error = FsBlockDbError; type Error = FsBlockDbError;
fn with_blocks<F, DbErrT, NoteRef>( fn with_blocks<F, DbErrT>(
&self, &self,
from_height: Option<BlockHeight>, from_height: Option<BlockHeight>,
limit: Option<u32>, limit: Option<u32>,
with_row: F, with_row: F,
) -> Result<(), data_api::chain::error::Error<DbErrT, Self::Error, NoteRef>> ) -> Result<(), data_api::chain::error::Error<DbErrT, Self::Error>>
where where
F: FnMut( F: FnMut(CompactBlock) -> Result<(), data_api::chain::error::Error<DbErrT, Self::Error>>,
CompactBlock,
)
-> Result<(), data_api::chain::error::Error<DbErrT, Self::Error, NoteRef>>,
{ {
fsblockdb_with_blocks(self, from_height, limit, with_row) fsblockdb_with_blocks(self, from_height, limit, with_row)
} }
@ -925,7 +962,7 @@ mod tests {
data_api::{WalletRead, WalletWrite}, data_api::{WalletRead, WalletWrite},
keys::{sapling, UnifiedFullViewingKey}, keys::{sapling, UnifiedFullViewingKey},
proto::compact_formats::{ proto::compact_formats::{
CompactBlock, CompactSaplingOutput, CompactSaplingSpend, CompactTx, self as compact, CompactBlock, CompactSaplingOutput, CompactSaplingSpend, CompactTx,
}, },
}; };
@ -1024,6 +1061,7 @@ mod tests {
dfvk: &DiversifiableFullViewingKey, dfvk: &DiversifiableFullViewingKey,
req: AddressType, req: AddressType,
value: Amount, value: Amount,
initial_sapling_tree_size: u32,
) -> (CompactBlock, Nullifier) { ) -> (CompactBlock, Nullifier) {
let to = match req { let to = match req {
AddressType::DefaultExternal => dfvk.default_address().1, AddressType::DefaultExternal => dfvk.default_address().1,
@ -1069,6 +1107,11 @@ mod tests {
}; };
cb.prev_hash.extend_from_slice(&prev_hash.0); cb.prev_hash.extend_from_slice(&prev_hash.0);
cb.vtx.push(ctx); cb.vtx.push(ctx);
cb.chain_metadata = Some(compact::ChainMetadata {
sapling_commitment_tree_size: initial_sapling_tree_size
+ cb.vtx.iter().map(|tx| tx.outputs.len() as u32).sum::<u32>(),
..Default::default()
});
(cb, note.nf(&dfvk.fvk().vk.nk, 0)) (cb, note.nf(&dfvk.fvk().vk.nk, 0))
} }
@ -1081,6 +1124,7 @@ mod tests {
dfvk: &DiversifiableFullViewingKey, dfvk: &DiversifiableFullViewingKey,
to: PaymentAddress, to: PaymentAddress,
value: Amount, value: Amount,
initial_sapling_tree_size: u32,
) -> CompactBlock { ) -> CompactBlock {
let mut rng = OsRng; let mut rng = OsRng;
let rseed = generate_random_rseed(&network(), height, &mut rng); let rseed = generate_random_rseed(&network(), height, &mut rng);
@ -1154,6 +1198,11 @@ mod tests {
}; };
cb.prev_hash.extend_from_slice(&prev_hash.0); cb.prev_hash.extend_from_slice(&prev_hash.0);
cb.vtx.push(ctx); cb.vtx.push(ctx);
cb.chain_metadata = Some(compact::ChainMetadata {
sapling_commitment_tree_size: initial_sapling_tree_size
+ cb.vtx.iter().map(|tx| tx.outputs.len() as u32).sum::<u32>(),
..Default::default()
});
cb cb
} }
@ -1267,6 +1316,7 @@ mod tests {
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
Amount::from_u64(5).unwrap(), Amount::from_u64(5).unwrap(),
0,
); );
let (cb2, _) = fake_compact_block( let (cb2, _) = fake_compact_block(
BlockHeight::from_u32(2), BlockHeight::from_u32(2),
@ -1274,6 +1324,7 @@ mod tests {
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
Amount::from_u64(10).unwrap(), Amount::from_u64(10).unwrap(),
1,
); );
// Write the CompactBlocks to the BlockMeta DB's corresponding disk storage. // Write the CompactBlocks to the BlockMeta DB's corresponding disk storage.

View File

@ -0,0 +1,120 @@
//! Serialization formats for data stored as SQLite BLOBs
use byteorder::{ReadBytesExt, WriteBytesExt};
use core::ops::Deref;
use shardtree::{Node, PrunableTree, RetentionFlags, Tree};
use std::io::{self, Read, Write};
use std::rc::Rc;
use zcash_encoding::Optional;
use zcash_primitives::merkle_tree::HashSer;
const SER_V1: u8 = 1;
const NIL_TAG: u8 = 0;
const LEAF_TAG: u8 = 1;
const PARENT_TAG: u8 = 2;
/// Writes a [`PrunableTree`] to the provided [`Write`] instance.
///
/// This is the primary method used for ShardTree shard persistence. It writes a version identifier
/// for the most-current serialized form, followed by the tree data.
pub fn write_shard<H: HashSer, W: Write>(writer: &mut W, tree: &PrunableTree<H>) -> io::Result<()> {
fn write_inner<H: HashSer, W: Write>(
mut writer: &mut W,
tree: &PrunableTree<H>,
) -> io::Result<()> {
match tree.deref() {
Node::Parent { ann, left, right } => {
writer.write_u8(PARENT_TAG)?;
Optional::write(&mut writer, ann.as_ref(), |w, h| {
<H as HashSer>::write(h, w)
})?;
write_inner(writer, left)?;
write_inner(writer, right)?;
Ok(())
}
Node::Leaf { value } => {
writer.write_u8(LEAF_TAG)?;
value.0.write(&mut writer)?;
writer.write_u8(value.1.bits())?;
Ok(())
}
Node::Nil => {
writer.write_u8(NIL_TAG)?;
Ok(())
}
}
}
writer.write_u8(SER_V1)?;
write_inner(writer, tree)
}
fn read_shard_v1<H: HashSer, R: Read>(mut reader: &mut R) -> io::Result<PrunableTree<H>> {
match reader.read_u8()? {
PARENT_TAG => {
let ann = Optional::read(&mut reader, <H as HashSer>::read)?.map(Rc::new);
let left = read_shard_v1(reader)?;
let right = read_shard_v1(reader)?;
Ok(Tree::parent(ann, left, right))
}
LEAF_TAG => {
let value = <H as HashSer>::read(&mut reader)?;
let flags = reader.read_u8().and_then(|bits| {
RetentionFlags::from_bits(bits).ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Byte value {} does not correspond to a valid set of retention flags",
bits
),
)
})
})?;
Ok(Tree::leaf((value, flags)))
}
NIL_TAG => Ok(Tree::empty()),
other => Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Node tag not recognized: {}", other),
)),
}
}
/// Reads a [`PrunableTree`] from the provided [`Read`] instance.
///
/// This function operates by first parsing a 1-byte version identifier, and then dispatching to
/// the correct deserialization function for the observed version, or returns an
/// [`io::ErrorKind::InvalidData`] error in the case that the version is not recognized.
pub fn read_shard<H: HashSer, R: Read>(mut reader: R) -> io::Result<PrunableTree<H>> {
match reader.read_u8()? {
SER_V1 => read_shard_v1(&mut reader),
other => Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Shard serialization version not recognized: {}", other),
)),
}
}
#[cfg(test)]
mod tests {
use incrementalmerkletree::frontier::testing::{arb_test_node, TestNode};
use proptest::prelude::*;
use shardtree::testing::arb_prunable_tree;
use std::io::Cursor;
use super::{read_shard, write_shard};
proptest! {
#[test]
fn check_shard_roundtrip(
tree in arb_prunable_tree(arb_test_node(), 8, 32)
) {
let mut tree_data = vec![];
write_shard(&mut tree_data, &tree).unwrap();
let cursor = Cursor::new(tree_data);
let tree_result = read_shard::<TestNode, _>(cursor).unwrap();
assert_eq!(tree, tree_result);
}
}
}

View File

@ -64,16 +64,16 @@
//! wallet. //! wallet.
//! - `memo` the shielded memo associated with the output, if any. //! - `memo` the shielded memo associated with the output, if any.
use rusqlite::{self, named_params, params, OptionalExtension, ToSql}; use rusqlite::{self, named_params, OptionalExtension, ToSql};
use std::collections::HashMap;
use std::convert::TryFrom; use std::convert::TryFrom;
use std::io::Cursor;
use std::{collections::HashMap, io};
use zcash_primitives::{ use zcash_primitives::{
block::BlockHash, block::BlockHash,
consensus::{self, BlockHeight, BranchId, NetworkUpgrade, Parameters}, consensus::{self, BlockHeight, BranchId, NetworkUpgrade, Parameters},
memo::{Memo, MemoBytes}, memo::{Memo, MemoBytes},
merkle_tree::write_commitment_tree, merkle_tree::read_commitment_tree,
sapling::CommitmentTree,
transaction::{components::Amount, Transaction, TxId}, transaction::{components::Amount, Transaction, TxId},
zip32::{ zip32::{
sapling::{DiversifiableFullViewingKey, ExtendedFullViewingKey}, sapling::{DiversifiableFullViewingKey, ExtendedFullViewingKey},
@ -83,13 +83,15 @@ use zcash_primitives::{
use zcash_client_backend::{ use zcash_client_backend::{
address::{RecipientAddress, UnifiedAddress}, address::{RecipientAddress, UnifiedAddress},
data_api::{PoolType, Recipient, SentTransactionOutput}, data_api::{BlockMetadata, PoolType, Recipient, SentTransactionOutput},
encoding::AddressCodec, encoding::AddressCodec,
keys::UnifiedFullViewingKey, keys::UnifiedFullViewingKey,
wallet::WalletTx, wallet::WalletTx,
}; };
use crate::{error::SqliteClientError, PRUNING_HEIGHT}; use crate::{
error::SqliteClientError, SqlTransaction, WalletCommitmentTrees, WalletDb, PRUNING_DEPTH,
};
#[cfg(feature = "transparent-inputs")] #[cfg(feature = "transparent-inputs")]
use { use {
@ -102,9 +104,12 @@ use {
}, },
}; };
pub(crate) mod commitment_tree;
pub mod init; pub mod init;
pub(crate) mod sapling; pub(crate) mod sapling;
pub(crate) const BLOCK_SAPLING_FRONTIER_ABSENT: &[u8] = &[0x0];
pub(crate) fn pool_code(pool_type: PoolType) -> i64 { pub(crate) fn pool_code(pool_type: PoolType) -> i64 {
// These constants are *incidentally* shared with the typecodes // These constants are *incidentally* shared with the typecodes
// for unified addresses, but this is exclusively an internal // for unified addresses, but this is exclusively an internal
@ -536,6 +541,95 @@ pub(crate) fn block_height_extrema(
}) })
} }
fn parse_block_metadata(
row: (BlockHeight, Vec<u8>, Option<u32>, Vec<u8>),
) -> Result<BlockMetadata, SqliteClientError> {
let (block_height, hash_data, sapling_tree_size_opt, sapling_tree) = row;
let sapling_tree_size = sapling_tree_size_opt.map_or_else(|| {
if sapling_tree == BLOCK_SAPLING_FRONTIER_ABSENT {
Err(SqliteClientError::CorruptedData("One of either the Sapling tree size or the legacy Sapling commitment tree must be present.".to_owned()))
} else {
// parse the legacy commitment tree data
read_commitment_tree::<
zcash_primitives::sapling::Node,
_,
{ zcash_primitives::sapling::NOTE_COMMITMENT_TREE_DEPTH },
>(Cursor::new(sapling_tree))
.map(|tree| tree.size().try_into().unwrap())
.map_err(SqliteClientError::from)
}
}, Ok)?;
let block_hash = BlockHash::try_from_slice(&hash_data).ok_or_else(|| {
SqliteClientError::from(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid block hash length: {}", hash_data.len()),
))
})?;
Ok(BlockMetadata::from_parts(
block_height,
block_hash,
sapling_tree_size,
))
}
pub(crate) fn block_metadata(
conn: &rusqlite::Connection,
block_height: BlockHeight,
) -> Result<Option<BlockMetadata>, SqliteClientError> {
conn.query_row(
"SELECT height, hash, sapling_commitment_tree_size, sapling_tree
FROM blocks
WHERE height = :block_height",
named_params![":block_height": u32::from(block_height)],
|row| {
let height: u32 = row.get(0)?;
let block_hash: Vec<u8> = row.get(1)?;
let sapling_tree_size: Option<u32> = row.get(2)?;
let sapling_tree: Vec<u8> = row.get(3)?;
Ok((
BlockHeight::from(height),
block_hash,
sapling_tree_size,
sapling_tree,
))
},
)
.optional()
.map_err(SqliteClientError::from)
.and_then(|meta_row| meta_row.map(parse_block_metadata).transpose())
}
pub(crate) fn block_fully_scanned(
conn: &rusqlite::Connection,
) -> Result<Option<BlockMetadata>, SqliteClientError> {
// FIXME: this will need to be rewritten once out-of-order scan range suggestion
// is implemented.
conn.query_row(
"SELECT height, hash, sapling_commitment_tree_size, sapling_tree
FROM blocks
ORDER BY height DESC
LIMIT 1",
[],
|row| {
let height: u32 = row.get(0)?;
let block_hash: Vec<u8> = row.get(1)?;
let sapling_tree_size: Option<u32> = row.get(2)?;
let sapling_tree: Vec<u8> = row.get(3)?;
Ok((
BlockHeight::from(height),
block_hash,
sapling_tree_size,
sapling_tree,
))
},
)
.optional()
.map_err(SqliteClientError::from)
.and_then(|meta_row| meta_row.map(parse_block_metadata).transpose())
}
/// Returns the block height at which the specified transaction was mined, /// Returns the block height at which the specified transaction was mined,
/// if any. /// if any.
pub(crate) fn get_tx_height( pub(crate) fn get_tx_height(
@ -607,7 +701,7 @@ pub(crate) fn truncate_to_height<P: consensus::Parameters>(
.map(|opt| opt.map_or_else(|| sapling_activation_height - 1, BlockHeight::from)) .map(|opt| opt.map_or_else(|| sapling_activation_height - 1, BlockHeight::from))
})?; })?;
if block_height < last_scanned_height - PRUNING_HEIGHT { if block_height < last_scanned_height - PRUNING_DEPTH {
if let Some(h) = get_min_unspent_height(conn)? { if let Some(h) = get_min_unspent_height(conn)? {
if block_height > h { if block_height > h {
return Err(SqliteClientError::RequestedRewindInvalid(h, block_height)); return Err(SqliteClientError::RequestedRewindInvalid(h, block_height));
@ -617,7 +711,16 @@ pub(crate) fn truncate_to_height<P: consensus::Parameters>(
// nothing to do if we're deleting back down to the max height // nothing to do if we're deleting back down to the max height
if block_height < last_scanned_height { if block_height < last_scanned_height {
// Decrement witnesses. // Truncate the note commitment trees
let mut wdb = WalletDb {
conn: SqlTransaction(conn),
params: params.clone(),
};
wdb.with_sapling_tree_mut(|tree| {
tree.truncate_removing_checkpoint(&block_height).map(|_| ())
})?;
// Remove any legacy Sapling witnesses
conn.execute( conn.execute(
"DELETE FROM sapling_witnesses WHERE block > ?", "DELETE FROM sapling_witnesses WHERE block > ?",
[u32::from(block_height)], [u32::from(block_height)],
@ -679,15 +782,18 @@ pub(crate) fn get_unspent_transparent_outputs<P: consensus::Parameters>(
FROM utxos u FROM utxos u
LEFT OUTER JOIN transactions tx LEFT OUTER JOIN transactions tx
ON tx.id_tx = u.spent_in_tx ON tx.id_tx = u.spent_in_tx
WHERE u.address = ? WHERE u.address = :address
AND u.height <= ? AND u.height <= :max_height
AND tx.block IS NULL", AND tx.block IS NULL",
)?; )?;
let addr_str = address.encode(params); let addr_str = address.encode(params);
let mut utxos = Vec::<WalletTransparentOutput>::new(); let mut utxos = Vec::<WalletTransparentOutput>::new();
let mut rows = stmt_blocks.query(params![addr_str, u32::from(max_height)])?; let mut rows = stmt_blocks.query(named_params![
":address": addr_str,
":max_height": u32::from(max_height)
])?;
let excluded: BTreeSet<OutPoint> = exclude.iter().cloned().collect(); let excluded: BTreeSet<OutPoint> = exclude.iter().cloned().collect();
while let Some(row) = rows.next()? { while let Some(row) = rows.next()? {
let txid: Vec<u8> = row.get(0)?; let txid: Vec<u8> = row.get(0)?;
@ -740,14 +846,17 @@ pub(crate) fn get_transparent_balances<P: consensus::Parameters>(
FROM utxos u FROM utxos u
LEFT OUTER JOIN transactions tx LEFT OUTER JOIN transactions tx
ON tx.id_tx = u.spent_in_tx ON tx.id_tx = u.spent_in_tx
WHERE u.received_by_account = ? WHERE u.received_by_account = :account_id
AND u.height <= ? AND u.height <= :max_height
AND tx.block IS NULL AND tx.block IS NULL
GROUP BY u.address", GROUP BY u.address",
)?; )?;
let mut res = HashMap::new(); let mut res = HashMap::new();
let mut rows = stmt_blocks.query(params![u32::from(account), u32::from(max_height)])?; let mut rows = stmt_blocks.query(named_params![
":account_id": u32::from(account),
":max_height": u32::from(max_height)
])?;
while let Some(row) = rows.next()? { while let Some(row) = rows.next()? {
let taddr_str: String = row.get(0)?; let taddr_str: String = row.get(0)?;
let taddr = TransparentAddress::decode(params, &taddr_str)?; let taddr = TransparentAddress::decode(params, &taddr_str)?;
@ -760,26 +869,61 @@ pub(crate) fn get_transparent_balances<P: consensus::Parameters>(
} }
/// Inserts information about a scanned block into the database. /// Inserts information about a scanned block into the database.
pub(crate) fn insert_block( pub(crate) fn put_block(
conn: &rusqlite::Connection, conn: &rusqlite::Transaction<'_>,
block_height: BlockHeight, block_height: BlockHeight,
block_hash: BlockHash, block_hash: BlockHash,
block_time: u32, block_time: u32,
commitment_tree: &CommitmentTree, sapling_commitment_tree_size: u32,
) -> Result<(), SqliteClientError> { ) -> Result<(), SqliteClientError> {
let mut encoded_tree = Vec::new(); let block_hash_data = conn
write_commitment_tree(commitment_tree, &mut encoded_tree).unwrap(); .query_row(
"SELECT hash FROM blocks WHERE height = ?",
[u32::from(block_height)],
|row| row.get::<_, Vec<u8>>(0),
)
.optional()?;
let mut stmt_insert_block = conn.prepare_cached( // Ensure that in the case of an upsert, we don't overwrite block data
"INSERT INTO blocks (height, hash, time, sapling_tree) // with information for a block with a different hash.
VALUES (?, ?, ?, ?)", if let Some(bytes) = block_hash_data {
let expected_hash = BlockHash::try_from_slice(&bytes).ok_or_else(|| {
SqliteClientError::CorruptedData(format!(
"Invalid block hash at height {}",
u32::from(block_height)
))
})?;
if expected_hash != block_hash {
return Err(SqliteClientError::BlockConflict(block_height));
}
}
let mut stmt_upsert_block = conn.prepare_cached(
"INSERT INTO blocks (
height,
hash,
time,
sapling_commitment_tree_size,
sapling_tree
)
VALUES (
:height,
:hash,
:block_time,
:sapling_commitment_tree_size,
x'00'
)
ON CONFLICT (height) DO UPDATE
SET hash = :hash,
time = :block_time,
sapling_commitment_tree_size = :sapling_commitment_tree_size",
)?; )?;
stmt_insert_block.execute(params![ stmt_upsert_block.execute(named_params![
u32::from(block_height), ":height": u32::from(block_height),
&block_hash.0[..], ":hash": &block_hash.0[..],
block_time, ":block_time": block_time,
encoded_tree ":sapling_commitment_tree_size": sapling_commitment_tree_size
])?; ])?;
Ok(()) Ok(())
@ -951,17 +1095,6 @@ pub(crate) fn put_legacy_transparent_utxo<P: consensus::Parameters>(
stmt_upsert_legacy_transparent_utxo.query_row(sql_args, |row| row.get::<_, i64>(0).map(UtxoId)) stmt_upsert_legacy_transparent_utxo.query_row(sql_args, |row| row.get::<_, i64>(0).map(UtxoId))
} }
/// Removes old incremental witnesses up to the given block height.
pub(crate) fn prune_witnesses(
conn: &rusqlite::Connection,
below_height: BlockHeight,
) -> Result<(), SqliteClientError> {
let mut stmt_prune_witnesses =
conn.prepare_cached("DELETE FROM sapling_witnesses WHERE block < ?")?;
stmt_prune_witnesses.execute([u32::from(below_height)])?;
Ok(())
}
/// Marks notes that have not been mined in transactions /// Marks notes that have not been mined in transactions
/// as expired, up to the given block height. /// as expired, up to the given block height.
pub(crate) fn update_expired_notes( pub(crate) fn update_expired_notes(
@ -1082,6 +1215,8 @@ pub(crate) fn put_sent_output<P: consensus::Parameters>(
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::num::NonZeroU32;
use secrecy::Secret; use secrecy::Secret;
use tempfile::NamedTempFile; use tempfile::NamedTempFile;
@ -1124,7 +1259,12 @@ mod tests {
); );
// We can't get an anchor height, as we have not scanned any blocks. // We can't get an anchor height, as we have not scanned any blocks.
assert_eq!(db_data.get_target_and_anchor_heights(10).unwrap(), None); assert_eq!(
db_data
.get_target_and_anchor_heights(NonZeroU32::new(10).unwrap())
.unwrap(),
None
);
// An invalid account has zero balance // An invalid account has zero balance
assert_matches!( assert_matches!(

View File

@ -0,0 +1,807 @@
use either::Either;
use rusqlite::{self, named_params, OptionalExtension};
use std::{
collections::BTreeSet,
io::{self, Cursor},
marker::PhantomData,
};
use incrementalmerkletree::{Address, Level, Position};
use shardtree::{Checkpoint, LocatedPrunableTree, PrunableTree, ShardStore, TreeState};
use zcash_primitives::{consensus::BlockHeight, merkle_tree::HashSer};
use crate::serialization::{read_shard, write_shard};
pub struct SqliteShardStore<C, H, const SHARD_HEIGHT: u8> {
pub(crate) conn: C,
table_prefix: &'static str,
_hash_type: PhantomData<H>,
}
impl<C, H, const SHARD_HEIGHT: u8> SqliteShardStore<C, H, SHARD_HEIGHT> {
const SHARD_ROOT_LEVEL: Level = Level::new(SHARD_HEIGHT);
pub(crate) fn from_connection(
conn: C,
table_prefix: &'static str,
) -> Result<Self, rusqlite::Error> {
Ok(SqliteShardStore {
conn,
table_prefix,
_hash_type: PhantomData,
})
}
}
impl<'conn, 'a: 'conn, H: HashSer, const SHARD_HEIGHT: u8> ShardStore
for SqliteShardStore<&'a rusqlite::Transaction<'conn>, H, SHARD_HEIGHT>
{
type H = H;
type CheckpointId = BlockHeight;
type Error = Either<io::Error, rusqlite::Error>;
fn get_shard(
&self,
shard_root: Address,
) -> Result<Option<LocatedPrunableTree<Self::H>>, Self::Error> {
get_shard(self.conn, self.table_prefix, shard_root)
}
fn last_shard(&self) -> Result<Option<LocatedPrunableTree<Self::H>>, Self::Error> {
last_shard(self.conn, self.table_prefix, Self::SHARD_ROOT_LEVEL)
}
fn put_shard(&mut self, subtree: LocatedPrunableTree<Self::H>) -> Result<(), Self::Error> {
put_shard(self.conn, self.table_prefix, subtree)
}
fn get_shard_roots(&self) -> Result<Vec<Address>, Self::Error> {
get_shard_roots(self.conn, self.table_prefix, Self::SHARD_ROOT_LEVEL)
}
fn truncate(&mut self, from: Address) -> Result<(), Self::Error> {
truncate(self.conn, self.table_prefix, from)
}
fn get_cap(&self) -> Result<PrunableTree<Self::H>, Self::Error> {
get_cap(self.conn, self.table_prefix)
}
fn put_cap(&mut self, cap: PrunableTree<Self::H>) -> Result<(), Self::Error> {
put_cap(self.conn, self.table_prefix, cap)
}
fn min_checkpoint_id(&self) -> Result<Option<Self::CheckpointId>, Self::Error> {
min_checkpoint_id(self.conn, self.table_prefix)
}
fn max_checkpoint_id(&self) -> Result<Option<Self::CheckpointId>, Self::Error> {
max_checkpoint_id(self.conn, self.table_prefix)
}
fn add_checkpoint(
&mut self,
checkpoint_id: Self::CheckpointId,
checkpoint: Checkpoint,
) -> Result<(), Self::Error> {
add_checkpoint(self.conn, self.table_prefix, checkpoint_id, checkpoint)
}
fn checkpoint_count(&self) -> Result<usize, Self::Error> {
checkpoint_count(self.conn, self.table_prefix)
}
fn get_checkpoint_at_depth(
&self,
checkpoint_depth: usize,
) -> Result<Option<(Self::CheckpointId, Checkpoint)>, Self::Error> {
get_checkpoint_at_depth(self.conn, self.table_prefix, checkpoint_depth)
}
fn get_checkpoint(
&self,
checkpoint_id: &Self::CheckpointId,
) -> Result<Option<Checkpoint>, Self::Error> {
get_checkpoint(self.conn, self.table_prefix, *checkpoint_id)
}
fn with_checkpoints<F>(&mut self, limit: usize, callback: F) -> Result<(), Self::Error>
where
F: FnMut(&Self::CheckpointId, &Checkpoint) -> Result<(), Self::Error>,
{
with_checkpoints(self.conn, self.table_prefix, limit, callback)
}
fn update_checkpoint_with<F>(
&mut self,
checkpoint_id: &Self::CheckpointId,
update: F,
) -> Result<bool, Self::Error>
where
F: Fn(&mut Checkpoint) -> Result<(), Self::Error>,
{
update_checkpoint_with(self.conn, self.table_prefix, *checkpoint_id, update)
}
fn remove_checkpoint(&mut self, checkpoint_id: &Self::CheckpointId) -> Result<(), Self::Error> {
remove_checkpoint(self.conn, self.table_prefix, *checkpoint_id)
}
fn truncate_checkpoints(
&mut self,
checkpoint_id: &Self::CheckpointId,
) -> Result<(), Self::Error> {
truncate_checkpoints(self.conn, self.table_prefix, *checkpoint_id)
}
}
impl<H: HashSer, const SHARD_HEIGHT: u8> ShardStore
for SqliteShardStore<rusqlite::Connection, H, SHARD_HEIGHT>
{
type H = H;
type CheckpointId = BlockHeight;
type Error = Either<io::Error, rusqlite::Error>;
fn get_shard(
&self,
shard_root: Address,
) -> Result<Option<LocatedPrunableTree<Self::H>>, Self::Error> {
get_shard(&self.conn, self.table_prefix, shard_root)
}
fn last_shard(&self) -> Result<Option<LocatedPrunableTree<Self::H>>, Self::Error> {
last_shard(&self.conn, self.table_prefix, Self::SHARD_ROOT_LEVEL)
}
fn put_shard(&mut self, subtree: LocatedPrunableTree<Self::H>) -> Result<(), Self::Error> {
let tx = self.conn.transaction().map_err(Either::Right)?;
put_shard(&tx, self.table_prefix, subtree)?;
tx.commit().map_err(Either::Right)?;
Ok(())
}
fn get_shard_roots(&self) -> Result<Vec<Address>, Self::Error> {
get_shard_roots(&self.conn, self.table_prefix, Self::SHARD_ROOT_LEVEL)
}
fn truncate(&mut self, from: Address) -> Result<(), Self::Error> {
truncate(&self.conn, self.table_prefix, from)
}
fn get_cap(&self) -> Result<PrunableTree<Self::H>, Self::Error> {
get_cap(&self.conn, self.table_prefix)
}
fn put_cap(&mut self, cap: PrunableTree<Self::H>) -> Result<(), Self::Error> {
put_cap(&self.conn, self.table_prefix, cap)
}
fn min_checkpoint_id(&self) -> Result<Option<Self::CheckpointId>, Self::Error> {
min_checkpoint_id(&self.conn, self.table_prefix)
}
fn max_checkpoint_id(&self) -> Result<Option<Self::CheckpointId>, Self::Error> {
max_checkpoint_id(&self.conn, self.table_prefix)
}
fn add_checkpoint(
&mut self,
checkpoint_id: Self::CheckpointId,
checkpoint: Checkpoint,
) -> Result<(), Self::Error> {
let tx = self.conn.transaction().map_err(Either::Right)?;
add_checkpoint(&tx, self.table_prefix, checkpoint_id, checkpoint)?;
tx.commit().map_err(Either::Right)
}
fn checkpoint_count(&self) -> Result<usize, Self::Error> {
checkpoint_count(&self.conn, self.table_prefix)
}
fn get_checkpoint_at_depth(
&self,
checkpoint_depth: usize,
) -> Result<Option<(Self::CheckpointId, Checkpoint)>, Self::Error> {
get_checkpoint_at_depth(&self.conn, self.table_prefix, checkpoint_depth)
}
fn get_checkpoint(
&self,
checkpoint_id: &Self::CheckpointId,
) -> Result<Option<Checkpoint>, Self::Error> {
get_checkpoint(&self.conn, self.table_prefix, *checkpoint_id)
}
fn with_checkpoints<F>(&mut self, limit: usize, callback: F) -> Result<(), Self::Error>
where
F: FnMut(&Self::CheckpointId, &Checkpoint) -> Result<(), Self::Error>,
{
let tx = self.conn.transaction().map_err(Either::Right)?;
with_checkpoints(&tx, self.table_prefix, limit, callback)?;
tx.commit().map_err(Either::Right)
}
fn update_checkpoint_with<F>(
&mut self,
checkpoint_id: &Self::CheckpointId,
update: F,
) -> Result<bool, Self::Error>
where
F: Fn(&mut Checkpoint) -> Result<(), Self::Error>,
{
let tx = self.conn.transaction().map_err(Either::Right)?;
let result = update_checkpoint_with(&tx, self.table_prefix, *checkpoint_id, update)?;
tx.commit().map_err(Either::Right)?;
Ok(result)
}
fn remove_checkpoint(&mut self, checkpoint_id: &Self::CheckpointId) -> Result<(), Self::Error> {
let tx = self.conn.transaction().map_err(Either::Right)?;
remove_checkpoint(&tx, self.table_prefix, *checkpoint_id)?;
tx.commit().map_err(Either::Right)
}
fn truncate_checkpoints(
&mut self,
checkpoint_id: &Self::CheckpointId,
) -> Result<(), Self::Error> {
let tx = self.conn.transaction().map_err(Either::Right)?;
truncate_checkpoints(&tx, self.table_prefix, *checkpoint_id)?;
tx.commit().map_err(Either::Right)
}
}
type Error = Either<io::Error, rusqlite::Error>;
pub(crate) fn get_shard<H: HashSer>(
conn: &rusqlite::Connection,
table_prefix: &'static str,
shard_root: Address,
) -> Result<Option<LocatedPrunableTree<H>>, Error> {
conn.query_row(
&format!(
"SELECT shard_data
FROM {}_tree_shards
WHERE shard_index = :shard_index",
table_prefix
),
named_params![":shard_index": shard_root.index()],
|row| row.get::<_, Vec<u8>>(0),
)
.optional()
.map_err(Either::Right)?
.map(|shard_data| {
let shard_tree = read_shard(&mut Cursor::new(shard_data)).map_err(Either::Left)?;
Ok(LocatedPrunableTree::from_parts(shard_root, shard_tree))
})
.transpose()
}
pub(crate) fn last_shard<H: HashSer>(
conn: &rusqlite::Connection,
table_prefix: &'static str,
shard_root_level: Level,
) -> Result<Option<LocatedPrunableTree<H>>, Error> {
conn.query_row(
&format!(
"SELECT shard_index, shard_data
FROM {}_tree_shards
ORDER BY shard_index DESC
LIMIT 1",
table_prefix
),
[],
|row| {
let shard_index: u64 = row.get(0)?;
let shard_data: Vec<u8> = row.get(1)?;
Ok((shard_index, shard_data))
},
)
.optional()
.map_err(Either::Right)?
.map(|(shard_index, shard_data)| {
let shard_root = Address::from_parts(shard_root_level, shard_index);
let shard_tree = read_shard(&mut Cursor::new(shard_data)).map_err(Either::Left)?;
Ok(LocatedPrunableTree::from_parts(shard_root, shard_tree))
})
.transpose()
}
pub(crate) fn put_shard<H: HashSer>(
conn: &rusqlite::Transaction<'_>,
table_prefix: &'static str,
subtree: LocatedPrunableTree<H>,
) -> Result<(), Error> {
let subtree_root_hash = subtree
.root()
.annotation()
.and_then(|ann| {
ann.as_ref().map(|rc| {
let mut root_hash = vec![];
rc.write(&mut root_hash)?;
Ok(root_hash)
})
})
.transpose()
.map_err(Either::Left)?;
let mut subtree_data = vec![];
write_shard(&mut subtree_data, subtree.root()).map_err(Either::Left)?;
let mut stmt_put_shard = conn
.prepare_cached(&format!(
"INSERT INTO {}_tree_shards (shard_index, root_hash, shard_data)
VALUES (:shard_index, :root_hash, :shard_data)
ON CONFLICT (shard_index) DO UPDATE
SET root_hash = :root_hash,
shard_data = :shard_data",
table_prefix
))
.map_err(Either::Right)?;
stmt_put_shard
.execute(named_params![
":shard_index": subtree.root_addr().index(),
":root_hash": subtree_root_hash,
":shard_data": subtree_data
])
.map_err(Either::Right)?;
Ok(())
}
pub(crate) fn get_shard_roots(
conn: &rusqlite::Connection,
table_prefix: &'static str,
shard_root_level: Level,
) -> Result<Vec<Address>, Error> {
let mut stmt = conn
.prepare(&format!(
"SELECT shard_index FROM {}_tree_shards ORDER BY shard_index",
table_prefix
))
.map_err(Either::Right)?;
let mut rows = stmt.query([]).map_err(Either::Right)?;
let mut res = vec![];
while let Some(row) = rows.next().map_err(Either::Right)? {
res.push(Address::from_parts(
shard_root_level,
row.get(0).map_err(Either::Right)?,
));
}
Ok(res)
}
pub(crate) fn truncate(
conn: &rusqlite::Connection,
table_prefix: &'static str,
from: Address,
) -> Result<(), Error> {
conn.execute(
&format!(
"DELETE FROM {}_tree_shards WHERE shard_index >= ?",
table_prefix
),
[from.index()],
)
.map_err(Either::Right)
.map(|_| ())
}
pub(crate) fn get_cap<H: HashSer>(
conn: &rusqlite::Connection,
table_prefix: &'static str,
) -> Result<PrunableTree<H>, Error> {
conn.query_row(
&format!("SELECT cap_data FROM {}_tree_cap", table_prefix),
[],
|row| row.get::<_, Vec<u8>>(0),
)
.optional()
.map_err(Either::Right)?
.map_or_else(
|| Ok(PrunableTree::empty()),
|cap_data| read_shard(&mut Cursor::new(cap_data)).map_err(Either::Left),
)
}
pub(crate) fn put_cap<H: HashSer>(
conn: &rusqlite::Connection,
table_prefix: &'static str,
cap: PrunableTree<H>,
) -> Result<(), Error> {
let mut stmt = conn
.prepare_cached(&format!(
"INSERT INTO {}_tree_cap (cap_id, cap_data)
VALUES (0, :cap_data)
ON CONFLICT (cap_id) DO UPDATE
SET cap_data = :cap_data",
table_prefix
))
.map_err(Either::Right)?;
let mut cap_data = vec![];
write_shard(&mut cap_data, &cap).map_err(Either::Left)?;
stmt.execute([cap_data]).map_err(Either::Right)?;
Ok(())
}
pub(crate) fn min_checkpoint_id(
conn: &rusqlite::Connection,
table_prefix: &'static str,
) -> Result<Option<BlockHeight>, Error> {
conn.query_row(
&format!(
"SELECT MIN(checkpoint_id) FROM {}_tree_checkpoints",
table_prefix
),
[],
|row| {
row.get::<_, Option<u32>>(0)
.map(|opt| opt.map(BlockHeight::from))
},
)
.map_err(Either::Right)
}
pub(crate) fn max_checkpoint_id(
conn: &rusqlite::Connection,
table_prefix: &'static str,
) -> Result<Option<BlockHeight>, Error> {
conn.query_row(
&format!(
"SELECT MAX(checkpoint_id) FROM {}_tree_checkpoints",
table_prefix
),
[],
|row| {
row.get::<_, Option<u32>>(0)
.map(|opt| opt.map(BlockHeight::from))
},
)
.map_err(Either::Right)
}
pub(crate) fn add_checkpoint(
conn: &rusqlite::Transaction<'_>,
table_prefix: &'static str,
checkpoint_id: BlockHeight,
checkpoint: Checkpoint,
) -> Result<(), Error> {
let mut stmt_insert_checkpoint = conn
.prepare_cached(&format!(
"INSERT INTO {}_tree_checkpoints (checkpoint_id, position)
VALUES (:checkpoint_id, :position)",
table_prefix
))
.map_err(Either::Right)?;
stmt_insert_checkpoint
.execute(named_params![
":checkpoint_id": u32::from(checkpoint_id),
":position": checkpoint.position().map(u64::from)
])
.map_err(Either::Right)?;
let mut stmt_insert_mark_removed = conn
.prepare_cached(&format!(
"INSERT INTO {}_tree_checkpoint_marks_removed (checkpoint_id, mark_removed_position)
VALUES (:checkpoint_id, :position)",
table_prefix
))
.map_err(Either::Right)?;
for pos in checkpoint.marks_removed() {
stmt_insert_mark_removed
.execute(named_params![
":checkpoint_id": u32::from(checkpoint_id),
":position": u64::from(*pos)
])
.map_err(Either::Right)?;
}
Ok(())
}
pub(crate) fn checkpoint_count(
conn: &rusqlite::Connection,
table_prefix: &'static str,
) -> Result<usize, Error> {
conn.query_row(
&format!("SELECT COUNT(*) FROM {}_tree_checkpoints", table_prefix),
[],
|row| row.get::<_, usize>(0),
)
.map_err(Either::Right)
}
pub(crate) fn get_checkpoint(
conn: &rusqlite::Connection,
table_prefix: &'static str,
checkpoint_id: BlockHeight,
) -> Result<Option<Checkpoint>, Error> {
let checkpoint_position = conn
.query_row(
&format!(
"SELECT position
FROM {}_tree_checkpoints
WHERE checkpoint_id = ?",
table_prefix
),
[u32::from(checkpoint_id)],
|row| {
row.get::<_, Option<u64>>(0)
.map(|opt| opt.map(Position::from))
},
)
.optional()
.map_err(Either::Right)?;
checkpoint_position
.map(|pos_opt| {
let mut stmt = conn
.prepare_cached(&format!(
"SELECT mark_removed_position
FROM {}_tree_checkpoint_marks_removed
WHERE checkpoint_id = ?",
table_prefix
))
.map_err(Either::Right)?;
let mark_removed_rows = stmt
.query([u32::from(checkpoint_id)])
.map_err(Either::Right)?;
let marks_removed = mark_removed_rows
.mapped(|row| row.get::<_, u64>(0).map(Position::from))
.collect::<Result<BTreeSet<_>, _>>()
.map_err(Either::Right)?;
Ok(Checkpoint::from_parts(
pos_opt.map_or(TreeState::Empty, TreeState::AtPosition),
marks_removed,
))
})
.transpose()
}
pub(crate) fn get_checkpoint_at_depth(
conn: &rusqlite::Connection,
table_prefix: &'static str,
checkpoint_depth: usize,
) -> Result<Option<(BlockHeight, Checkpoint)>, Error> {
if checkpoint_depth == 0 {
return Ok(None);
}
let checkpoint_parts = conn
.query_row(
&format!(
"SELECT checkpoint_id, position
FROM {}_tree_checkpoints
ORDER BY checkpoint_id DESC
LIMIT 1
OFFSET :offset",
table_prefix
),
named_params![":offset": checkpoint_depth - 1],
|row| {
let checkpoint_id: u32 = row.get(0)?;
let position: Option<u64> = row.get(1)?;
Ok((
BlockHeight::from(checkpoint_id),
position.map(Position::from),
))
},
)
.optional()
.map_err(Either::Right)?;
checkpoint_parts
.map(|(checkpoint_id, pos_opt)| {
let mut stmt = conn
.prepare_cached(&format!(
"SELECT mark_removed_position
FROM {}_tree_checkpoint_marks_removed
WHERE checkpoint_id = ?",
table_prefix
))
.map_err(Either::Right)?;
let mark_removed_rows = stmt
.query([u32::from(checkpoint_id)])
.map_err(Either::Right)?;
let marks_removed = mark_removed_rows
.mapped(|row| row.get::<_, u64>(0).map(Position::from))
.collect::<Result<BTreeSet<_>, _>>()
.map_err(Either::Right)?;
Ok((
checkpoint_id,
Checkpoint::from_parts(
pos_opt.map_or(TreeState::Empty, TreeState::AtPosition),
marks_removed,
),
))
})
.transpose()
}
pub(crate) fn with_checkpoints<F>(
conn: &rusqlite::Transaction<'_>,
table_prefix: &'static str,
limit: usize,
mut callback: F,
) -> Result<(), Error>
where
F: FnMut(&BlockHeight, &Checkpoint) -> Result<(), Error>,
{
let mut stmt_get_checkpoints = conn
.prepare_cached(&format!(
"SELECT checkpoint_id, position
FROM {}_tree_checkpoints
ORDER BY position
LIMIT :limit",
table_prefix
))
.map_err(Either::Right)?;
let mut stmt_get_checkpoint_marks_removed = conn
.prepare_cached(&format!(
"SELECT mark_removed_position
FROM {}_tree_checkpoint_marks_removed
WHERE checkpoint_id = :checkpoint_id",
table_prefix
))
.map_err(Either::Right)?;
let mut rows = stmt_get_checkpoints
.query(named_params![":limit": limit])
.map_err(Either::Right)?;
while let Some(row) = rows.next().map_err(Either::Right)? {
let checkpoint_id = row.get::<_, u32>(0).map_err(Either::Right)?;
let tree_state = row
.get::<_, Option<u64>>(1)
.map(|opt| opt.map_or_else(|| TreeState::Empty, |p| TreeState::AtPosition(p.into())))
.map_err(Either::Right)?;
let mark_removed_rows = stmt_get_checkpoint_marks_removed
.query(named_params![":checkpoint_id": checkpoint_id])
.map_err(Either::Right)?;
let marks_removed = mark_removed_rows
.mapped(|row| row.get::<_, u64>(0).map(Position::from))
.collect::<Result<BTreeSet<_>, _>>()
.map_err(Either::Right)?;
callback(
&BlockHeight::from(checkpoint_id),
&Checkpoint::from_parts(tree_state, marks_removed),
)?
}
Ok(())
}
pub(crate) fn update_checkpoint_with<F>(
conn: &rusqlite::Transaction<'_>,
table_prefix: &'static str,
checkpoint_id: BlockHeight,
update: F,
) -> Result<bool, Error>
where
F: Fn(&mut Checkpoint) -> Result<(), Error>,
{
if let Some(mut c) = get_checkpoint(conn, table_prefix, checkpoint_id)? {
update(&mut c)?;
remove_checkpoint(conn, table_prefix, checkpoint_id)?;
add_checkpoint(conn, table_prefix, checkpoint_id, c)?;
Ok(true)
} else {
Ok(false)
}
}
pub(crate) fn remove_checkpoint(
conn: &rusqlite::Transaction<'_>,
table_prefix: &'static str,
checkpoint_id: BlockHeight,
) -> Result<(), Error> {
// cascading delete here obviates the need to manually delete from
// `tree_checkpoint_marks_removed`
let mut stmt_delete_checkpoint = conn
.prepare_cached(&format!(
"DELETE FROM {}_tree_checkpoints
WHERE checkpoint_id = :checkpoint_id",
table_prefix
))
.map_err(Either::Right)?;
stmt_delete_checkpoint
.execute(named_params![":checkpoint_id": u32::from(checkpoint_id),])
.map_err(Either::Right)?;
Ok(())
}
pub(crate) fn truncate_checkpoints(
conn: &rusqlite::Transaction<'_>,
table_prefix: &'static str,
checkpoint_id: BlockHeight,
) -> Result<(), Error> {
// cascading delete here obviates the need to manually delete from
// `tree_checkpoint_marks_removed`
conn.execute(
&format!(
"DELETE FROM {}_tree_checkpoints WHERE checkpoint_id >= ?",
table_prefix
),
[u32::from(checkpoint_id)],
)
.map_err(Either::Right)?;
Ok(())
}
#[cfg(test)]
mod tests {
use tempfile::NamedTempFile;
use incrementalmerkletree::testing::{
check_append, check_checkpoint_rewind, check_remove_mark, check_rewind_remove_mark,
check_root_hashes, check_witness_consistency, check_witnesses,
};
use shardtree::ShardTree;
use super::SqliteShardStore;
use crate::{tests, wallet::init::init_wallet_db, WalletDb};
fn new_tree(m: usize) -> ShardTree<SqliteShardStore<rusqlite::Connection, String, 3>, 4, 3> {
let data_file = NamedTempFile::new().unwrap();
let mut db_data = WalletDb::for_path(data_file.path(), tests::network()).unwrap();
data_file.keep().unwrap();
init_wallet_db(&mut db_data, None).unwrap();
let store =
SqliteShardStore::<_, String, 3>::from_connection(db_data.conn, "sapling").unwrap();
ShardTree::new(store, m)
}
#[test]
fn append() {
check_append(new_tree);
}
#[test]
fn root_hashes() {
check_root_hashes(new_tree);
}
#[test]
fn witnesses() {
check_witnesses(new_tree);
}
#[test]
fn witness_consistency() {
check_witness_consistency(new_tree);
}
#[test]
fn checkpoint_rewind() {
check_checkpoint_rewind(new_tree);
}
#[test]
fn remove_mark() {
check_remove_mark(new_tree);
}
#[test]
fn rewind_remove_mark() {
check_rewind_remove_mark(new_tree);
}
}

View File

@ -1,23 +1,29 @@
//! Functions for initializing the various databases. //! Functions for initializing the various databases.
use std::collections::HashMap; use either::Either;
use std::fmt; use incrementalmerkletree::Retention;
use std::{collections::HashMap, fmt, io};
use rusqlite::{self, types::ToSql}; use rusqlite::{self, types::ToSql};
use schemer::{Migrator, MigratorError}; use schemer::{Migrator, MigratorError};
use schemer_rusqlite::RusqliteAdapter; use schemer_rusqlite::RusqliteAdapter;
use secrecy::SecretVec; use secrecy::SecretVec;
use shardtree::{ShardTree, ShardTreeError};
use uuid::Uuid; use uuid::Uuid;
use zcash_primitives::{ use zcash_primitives::{
block::BlockHash, block::BlockHash,
consensus::{self, BlockHeight}, consensus::{self, BlockHeight},
merkle_tree::read_commitment_tree,
sapling,
transaction::components::amount::BalanceError, transaction::components::amount::BalanceError,
zip32::AccountId, zip32::AccountId,
}; };
use zcash_client_backend::keys::UnifiedFullViewingKey; use zcash_client_backend::{data_api::SAPLING_SHARD_HEIGHT, keys::UnifiedFullViewingKey};
use crate::{error::SqliteClientError, wallet, WalletDb}; use crate::{error::SqliteClientError, wallet, WalletDb, PRUNING_DEPTH, SAPLING_TABLES_PREFIX};
use super::commitment_tree::SqliteShardStore;
mod migrations; mod migrations;
@ -34,6 +40,9 @@ pub enum WalletMigrationError {
/// Wrapper for amount balance violations /// Wrapper for amount balance violations
BalanceError(BalanceError), BalanceError(BalanceError),
/// Wrapper for commitment tree invariant violations
CommitmentTree(ShardTreeError<Either<io::Error, rusqlite::Error>>),
} }
impl From<rusqlite::Error> for WalletMigrationError { impl From<rusqlite::Error> for WalletMigrationError {
@ -48,6 +57,12 @@ impl From<BalanceError> for WalletMigrationError {
} }
} }
impl From<ShardTreeError<Either<io::Error, rusqlite::Error>>> for WalletMigrationError {
fn from(e: ShardTreeError<Either<io::Error, rusqlite::Error>>) -> Self {
WalletMigrationError::CommitmentTree(e)
}
}
impl fmt::Display for WalletMigrationError { impl fmt::Display for WalletMigrationError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match &self { match &self {
@ -62,6 +77,7 @@ impl fmt::Display for WalletMigrationError {
} }
WalletMigrationError::DbError(e) => write!(f, "{}", e), WalletMigrationError::DbError(e) => write!(f, "{}", e),
WalletMigrationError::BalanceError(e) => write!(f, "Balance error: {:?}", e), WalletMigrationError::BalanceError(e) => write!(f, "Balance error: {:?}", e),
WalletMigrationError::CommitmentTree(e) => write!(f, "Commitment tree error: {:?}", e),
} }
} }
} }
@ -226,7 +242,7 @@ pub fn init_accounts_table<P: consensus::Parameters>(
// Insert accounts atomically // Insert accounts atomically
for (account, key) in keys.iter() { for (account, key) in keys.iter() {
wallet::add_account(&wdb.conn.0, &wdb.params, *account, key)?; wallet::add_account(wdb.conn.0, &wdb.params, *account, key)?;
} }
Ok(()) Ok(())
@ -278,9 +294,21 @@ pub fn init_blocks_table<P: consensus::Parameters>(
return Err(SqliteClientError::TableNotEmpty); return Err(SqliteClientError::TableNotEmpty);
} }
let block_end_tree =
read_commitment_tree::<sapling::Node, _, { sapling::NOTE_COMMITMENT_TREE_DEPTH }>(
sapling_tree,
)
.map_err(|e| {
rusqlite::Error::FromSqlConversionFailure(
sapling_tree.len(),
rusqlite::types::Type::Blob,
Box::new(e),
)
})?;
wdb.conn.0.execute( wdb.conn.0.execute(
"INSERT INTO blocks (height, hash, time, sapling_tree) "INSERT INTO blocks (height, hash, time, sapling_tree)
VALUES (?, ?, ?, ?)", VALUES (?, ?, ?, ?)",
[ [
u32::from(height).to_sql()?, u32::from(height).to_sql()?,
hash.0.to_sql()?, hash.0.to_sql()?,
@ -289,6 +317,26 @@ pub fn init_blocks_table<P: consensus::Parameters>(
], ],
)?; )?;
if let Some(nonempty_frontier) = block_end_tree.to_frontier().value() {
let shard_store =
SqliteShardStore::<_, sapling::Node, SAPLING_SHARD_HEIGHT>::from_connection(
wdb.conn.0,
SAPLING_TABLES_PREFIX,
)?;
let mut shard_tree: ShardTree<
_,
{ sapling::NOTE_COMMITMENT_TREE_DEPTH },
SAPLING_SHARD_HEIGHT,
> = ShardTree::new(shard_store, PRUNING_DEPTH.try_into().unwrap());
shard_tree.insert_frontier_nodes(
nonempty_frontier.clone(),
Retention::Checkpoint {
id: height,
is_marked: false,
},
)?;
}
Ok(()) Ok(())
}) })
} }
@ -361,8 +409,9 @@ mod tests {
height INTEGER PRIMARY KEY, height INTEGER PRIMARY KEY,
hash BLOB NOT NULL, hash BLOB NOT NULL,
time INTEGER NOT NULL, time INTEGER NOT NULL,
sapling_tree BLOB NOT NULL sapling_tree BLOB NOT NULL ,
)", sapling_commitment_tree_size INTEGER,
orchard_commitment_tree_size INTEGER)",
"CREATE TABLE sapling_received_notes ( "CREATE TABLE sapling_received_notes (
id_note INTEGER PRIMARY KEY, id_note INTEGER PRIMARY KEY,
tx INTEGER NOT NULL, tx INTEGER NOT NULL,
@ -375,11 +424,36 @@ mod tests {
is_change INTEGER NOT NULL, is_change INTEGER NOT NULL,
memo BLOB, memo BLOB,
spent INTEGER, spent INTEGER,
commitment_tree_position INTEGER,
FOREIGN KEY (tx) REFERENCES transactions(id_tx), FOREIGN KEY (tx) REFERENCES transactions(id_tx),
FOREIGN KEY (account) REFERENCES accounts(account), FOREIGN KEY (account) REFERENCES accounts(account),
FOREIGN KEY (spent) REFERENCES transactions(id_tx), FOREIGN KEY (spent) REFERENCES transactions(id_tx),
CONSTRAINT tx_output UNIQUE (tx, output_index) CONSTRAINT tx_output UNIQUE (tx, output_index)
)", )",
"CREATE TABLE sapling_tree_cap (
-- cap_id exists only to be able to take advantage of `ON CONFLICT`
-- upsert functionality; the table will only ever contain one row
cap_id INTEGER PRIMARY KEY,
cap_data BLOB NOT NULL
)",
"CREATE TABLE sapling_tree_checkpoint_marks_removed (
checkpoint_id INTEGER NOT NULL,
mark_removed_position INTEGER NOT NULL,
FOREIGN KEY (checkpoint_id) REFERENCES sapling_tree_checkpoints(checkpoint_id)
ON DELETE CASCADE
)",
"CREATE TABLE sapling_tree_checkpoints (
checkpoint_id INTEGER PRIMARY KEY,
position INTEGER
)",
"CREATE TABLE sapling_tree_shards (
shard_index INTEGER PRIMARY KEY,
subtree_end_height INTEGER,
root_hash BLOB,
shard_data BLOB,
contains_marked INTEGER,
CONSTRAINT root_unique UNIQUE (root_hash)
)",
"CREATE TABLE sapling_witnesses ( "CREATE TABLE sapling_witnesses (
id_witness INTEGER PRIMARY KEY, id_witness INTEGER PRIMARY KEY,
note INTEGER NOT NULL, note INTEGER NOT NULL,
@ -842,7 +916,7 @@ mod tests {
// add a sapling sent note // add a sapling sent note
wdb.conn.execute( wdb.conn.execute(
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (0, 0, 0, '')", "INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (0, 0, 0, x'000000')",
[], [],
)?; )?;
@ -1006,7 +1080,7 @@ mod tests {
RecipientAddress::Transparent(*ufvk.default_address().0.transparent().unwrap()) RecipientAddress::Transparent(*ufvk.default_address().0.transparent().unwrap())
.encode(&tests::network()); .encode(&tests::network());
wdb.conn.execute( wdb.conn.execute(
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (0, 0, 0, '')", "INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (0, 0, 0, x'000000')",
[], [],
)?; )?;
wdb.conn.execute( wdb.conn.execute(
@ -1117,7 +1191,7 @@ mod tests {
BlockHeight::from(1u32), BlockHeight::from(1u32),
BlockHash([1; 32]), BlockHash([1; 32]),
1, 1,
&[], &[0x0, 0x0, 0x0],
) )
.unwrap(); .unwrap();
@ -1127,7 +1201,7 @@ mod tests {
BlockHeight::from(2u32), BlockHeight::from(2u32),
BlockHash([2; 32]), BlockHash([2; 32]),
2, 2,
&[], &[0x0, 0x0, 0x0],
) )
.unwrap_err(); .unwrap_err();
} }

View File

@ -4,6 +4,7 @@ mod addresses_table;
mod initial_setup; mod initial_setup;
mod received_notes_nullable_nf; mod received_notes_nullable_nf;
mod sent_notes_to_internal; mod sent_notes_to_internal;
mod shardtree_support;
mod ufvk_support; mod ufvk_support;
mod utxos_table; mod utxos_table;
mod v_transactions_net; mod v_transactions_net;
@ -46,5 +47,6 @@ pub(super) fn all_migrations<P: consensus::Parameters + 'static>(
Box::new(add_transaction_views::Migration), Box::new(add_transaction_views::Migration),
Box::new(v_transactions_net::Migration), Box::new(v_transactions_net::Migration),
Box::new(received_notes_nullable_nf::Migration), Box::new(received_notes_nullable_nf::Migration),
Box::new(shardtree_support::Migration),
] ]
} }

View File

@ -327,7 +327,7 @@ mod tests {
.unwrap(); .unwrap();
db_data.conn.execute_batch( db_data.conn.execute_batch(
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (0, 0, 0, ''); "INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (0, 0, 0, x'00');
INSERT INTO transactions (block, id_tx, txid) VALUES (0, 0, ''); INSERT INTO transactions (block, id_tx, txid) VALUES (0, 0, '');
INSERT INTO sent_notes (tx, output_pool, output_index, from_account, address, value) INSERT INTO sent_notes (tx, output_pool, output_index, from_account, address, value)
@ -460,7 +460,7 @@ mod tests {
db_data db_data
.conn .conn
.execute_batch( .execute_batch(
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (0, 0, 0, '');", "INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (0, 0, 0, x'00');",
) )
.unwrap(); .unwrap();
db_data.conn.execute( db_data.conn.execute(

View File

@ -262,7 +262,7 @@ mod tests {
// Tx 0 contains two received notes of 2 and 5 zatoshis that are controlled by account 0. // Tx 0 contains two received notes of 2 and 5 zatoshis that are controlled by account 0.
db_data.conn.execute_batch( db_data.conn.execute_batch(
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (0, 0, 0, ''); "INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (0, 0, 0, x'00');
INSERT INTO transactions (block, id_tx, txid) VALUES (0, 0, 'tx0'); INSERT INTO transactions (block, id_tx, txid) VALUES (0, 0, 'tx0');
INSERT INTO received_notes (tx, output_index, account, diversifier, value, rcm, nf, is_change) INSERT INTO received_notes (tx, output_index, account, diversifier, value, rcm, nf, is_change)

View File

@ -0,0 +1,194 @@
//! This migration adds tables to the wallet database that are needed to persist note commitment
//! tree data using the `shardtree` crate, and migrates existing witness data into these data
//! structures.
use std::collections::{BTreeSet, HashSet};
use incrementalmerkletree::Retention;
use rusqlite::{self, named_params, params};
use schemer;
use schemer_rusqlite::RusqliteMigration;
use shardtree::ShardTree;
use uuid::Uuid;
use zcash_client_backend::data_api::SAPLING_SHARD_HEIGHT;
use zcash_primitives::{
consensus::BlockHeight,
merkle_tree::{read_commitment_tree, read_incremental_witness},
sapling,
};
use crate::{
wallet::{
commitment_tree::SqliteShardStore,
init::{migrations::received_notes_nullable_nf, WalletMigrationError},
},
PRUNING_DEPTH, SAPLING_TABLES_PREFIX,
};
pub(super) const MIGRATION_ID: Uuid = Uuid::from_fields(
0x7da6489d,
0xe835,
0x4657,
b"\x8b\xe5\xf5\x12\xbc\xce\x6c\xbf",
);
pub(super) struct Migration;
impl schemer::Migration for Migration {
fn id(&self) -> Uuid {
MIGRATION_ID
}
fn dependencies(&self) -> HashSet<Uuid> {
[received_notes_nullable_nf::MIGRATION_ID]
.into_iter()
.collect()
}
fn description(&self) -> &'static str {
"Add support for receiving storage of note commitment tree data using the `shardtree` crate."
}
}
impl RusqliteMigration for Migration {
type Error = WalletMigrationError;
fn up(&self, transaction: &rusqlite::Transaction) -> Result<(), WalletMigrationError> {
// Add commitment tree sizes to block metadata.
transaction.execute_batch(
"ALTER TABLE blocks ADD COLUMN sapling_commitment_tree_size INTEGER;
ALTER TABLE blocks ADD COLUMN orchard_commitment_tree_size INTEGER;
ALTER TABLE sapling_received_notes ADD COLUMN commitment_tree_position INTEGER;",
)?;
// Add shard persistence
transaction.execute_batch(
"CREATE TABLE sapling_tree_shards (
shard_index INTEGER PRIMARY KEY,
subtree_end_height INTEGER,
root_hash BLOB,
shard_data BLOB,
contains_marked INTEGER,
CONSTRAINT root_unique UNIQUE (root_hash)
);
CREATE TABLE sapling_tree_cap (
-- cap_id exists only to be able to take advantage of `ON CONFLICT`
-- upsert functionality; the table will only ever contain one row
cap_id INTEGER PRIMARY KEY,
cap_data BLOB NOT NULL
);",
)?;
// Add checkpoint persistence
transaction.execute_batch(
"CREATE TABLE sapling_tree_checkpoints (
checkpoint_id INTEGER PRIMARY KEY,
position INTEGER
);
CREATE TABLE sapling_tree_checkpoint_marks_removed (
checkpoint_id INTEGER NOT NULL,
mark_removed_position INTEGER NOT NULL,
FOREIGN KEY (checkpoint_id) REFERENCES sapling_tree_checkpoints(checkpoint_id)
ON DELETE CASCADE
);",
)?;
let shard_store =
SqliteShardStore::<_, sapling::Node, SAPLING_SHARD_HEIGHT>::from_connection(
transaction,
SAPLING_TABLES_PREFIX,
)?;
let mut shard_tree: ShardTree<
_,
{ sapling::NOTE_COMMITMENT_TREE_DEPTH },
SAPLING_SHARD_HEIGHT,
> = ShardTree::new(shard_store, PRUNING_DEPTH.try_into().unwrap());
// Insert all the tree information that we can get from block-end commitment trees
{
let mut stmt_blocks = transaction.prepare("SELECT height, sapling_tree FROM blocks")?;
let mut stmt_update_block_sapling_tree_size = transaction
.prepare("UPDATE blocks SET sapling_commitment_tree_size = ? WHERE height = ?")?;
let mut block_rows = stmt_blocks.query([])?;
while let Some(row) = block_rows.next()? {
let block_height: u32 = row.get(0)?;
let sapling_tree_data: Vec<u8> = row.get(1)?;
let block_end_tree = read_commitment_tree::<
sapling::Node,
_,
{ sapling::NOTE_COMMITMENT_TREE_DEPTH },
>(&sapling_tree_data[..])
.map_err(|e| {
rusqlite::Error::FromSqlConversionFailure(
sapling_tree_data.len(),
rusqlite::types::Type::Blob,
Box::new(e),
)
})?;
stmt_update_block_sapling_tree_size
.execute(params![block_end_tree.size(), block_height])?;
if let Some(nonempty_frontier) = block_end_tree.to_frontier().value() {
shard_tree.insert_frontier_nodes(
nonempty_frontier.clone(),
Retention::Checkpoint {
id: BlockHeight::from(block_height),
is_marked: false,
},
)?;
}
}
}
// Insert all the tree information that we can get from existing incremental witnesses
{
let mut stmt_blocks =
transaction.prepare("SELECT note, block, witness FROM sapling_witnesses")?;
let mut stmt_set_note_position = transaction.prepare(
"UPDATE sapling_received_notes
SET commitment_tree_position = :position
WHERE id_note = :note_id",
)?;
let mut updated_note_positions = BTreeSet::new();
let mut rows = stmt_blocks.query([])?;
while let Some(row) = rows.next()? {
let note_id: i64 = row.get(0)?;
let block_height: u32 = row.get(1)?;
let row_data: Vec<u8> = row.get(2)?;
let witness = read_incremental_witness::<
sapling::Node,
_,
{ sapling::NOTE_COMMITMENT_TREE_DEPTH },
>(&row_data[..])
.map_err(|e| {
rusqlite::Error::FromSqlConversionFailure(
row_data.len(),
rusqlite::types::Type::Blob,
Box::new(e),
)
})?;
let witnessed_position = witness.witnessed_position();
if !updated_note_positions.contains(&witnessed_position) {
stmt_set_note_position.execute(named_params![
":note_id": note_id,
":position": u64::from(witnessed_position)
])?;
updated_note_positions.insert(witnessed_position);
}
shard_tree.insert_witness_nodes(witness, BlockHeight::from(block_height))?;
}
}
Ok(())
}
fn down(&self, _transaction: &rusqlite::Transaction) -> Result<(), WalletMigrationError> {
// TODO: something better than just panic?
panic!("Cannot revert this migration.");
}
}

View File

@ -253,7 +253,7 @@ mod tests {
// - Tx 0 contains two received notes of 2 and 5 zatoshis that are controlled by account 0. // - Tx 0 contains two received notes of 2 and 5 zatoshis that are controlled by account 0.
db_data.conn.execute_batch( db_data.conn.execute_batch(
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (0, 0, 0, ''); "INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (0, 0, 0, x'00');
INSERT INTO transactions (block, id_tx, txid) VALUES (0, 0, 'tx0'); INSERT INTO transactions (block, id_tx, txid) VALUES (0, 0, 'tx0');
INSERT INTO received_notes (tx, output_index, account, diversifier, value, rcm, nf, is_change) INSERT INTO received_notes (tx, output_index, account, diversifier, value, rcm, nf, is_change)
@ -265,7 +265,7 @@ mod tests {
// of 2 zatoshis. This is representative of a historic transaction where no `sent_notes` // of 2 zatoshis. This is representative of a historic transaction where no `sent_notes`
// entry was created for the change value. // entry was created for the change value.
db_data.conn.execute_batch( db_data.conn.execute_batch(
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (1, 1, 1, ''); "INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (1, 1, 1, x'00');
INSERT INTO transactions (block, id_tx, txid) VALUES (1, 1, 'tx1'); INSERT INTO transactions (block, id_tx, txid) VALUES (1, 1, 'tx1');
UPDATE received_notes SET spent = 1 WHERE tx = 0; UPDATE received_notes SET spent = 1 WHERE tx = 0;
INSERT INTO sent_notes (tx, output_pool, output_index, from_account, to_account, to_address, value) INSERT INTO sent_notes (tx, output_pool, output_index, from_account, to_account, to_address, value)
@ -279,7 +279,7 @@ mod tests {
// other half to the sending account as change. Also there's a random transparent utxo, // other half to the sending account as change. Also there's a random transparent utxo,
// received, who knows where it came from but it's for account 0. // received, who knows where it came from but it's for account 0.
db_data.conn.execute_batch( db_data.conn.execute_batch(
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (2, 2, 2, ''); "INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (2, 2, 2, x'00');
INSERT INTO transactions (block, id_tx, txid) VALUES (2, 2, 'tx2'); INSERT INTO transactions (block, id_tx, txid) VALUES (2, 2, 'tx2');
UPDATE received_notes SET spent = 2 WHERE tx = 1; UPDATE received_notes SET spent = 2 WHERE tx = 1;
INSERT INTO utxos (received_by_account, address, prevout_txid, prevout_idx, script, value_zat, height) INSERT INTO utxos (received_by_account, address, prevout_txid, prevout_idx, script, value_zat, height)
@ -297,7 +297,7 @@ mod tests {
// - Tx 3 just receives transparent funds and does nothing else. For this to work, the // - Tx 3 just receives transparent funds and does nothing else. For this to work, the
// transaction must be retrieved by the wallet. // transaction must be retrieved by the wallet.
db_data.conn.execute_batch( db_data.conn.execute_batch(
"INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (3, 3, 3, ''); "INSERT INTO blocks (height, hash, time, sapling_tree) VALUES (3, 3, 3, x'00');
INSERT INTO transactions (block, id_tx, txid) VALUES (3, 3, 'tx3'); INSERT INTO transactions (block, id_tx, txid) VALUES (3, 3, 'tx3');
INSERT INTO utxos (received_by_account, address, prevout_txid, prevout_idx, script, value_zat, height) INSERT INTO utxos (received_by_account, address, prevout_txid, prevout_idx, script, value_zat, height)

View File

@ -1,12 +1,13 @@
//! Functions for Sapling support in the wallet. //! Functions for Sapling support in the wallet.
use group::ff::PrimeField; use group::ff::PrimeField;
use rusqlite::{named_params, params, types::Value, Connection, OptionalExtension, Row}; use incrementalmerkletree::Position;
use rusqlite::{named_params, params, types::Value, Connection, Row};
use std::rc::Rc; use std::rc::Rc;
use zcash_primitives::{ use zcash_primitives::{
consensus::BlockHeight, consensus::BlockHeight,
memo::MemoBytes, memo::MemoBytes,
merkle_tree::{read_commitment_tree, read_incremental_witness, write_incremental_witness},
sapling::{self, Diversifier, Note, Nullifier, Rseed}, sapling::{self, Diversifier, Note, Nullifier, Rseed},
transaction::components::Amount, transaction::components::Amount,
zip32::AccountId, zip32::AccountId,
@ -28,10 +29,11 @@ pub(crate) trait ReceivedSaplingOutput {
fn note(&self) -> &Note; fn note(&self) -> &Note;
fn memo(&self) -> Option<&MemoBytes>; fn memo(&self) -> Option<&MemoBytes>;
fn is_change(&self) -> bool; fn is_change(&self) -> bool;
fn nullifier(&self) -> Option<&Nullifier>; fn nullifier(&self) -> Option<&sapling::Nullifier>;
fn note_commitment_tree_position(&self) -> Option<Position>;
} }
impl ReceivedSaplingOutput for WalletSaplingOutput<Nullifier> { impl ReceivedSaplingOutput for WalletSaplingOutput<sapling::Nullifier> {
fn index(&self) -> usize { fn index(&self) -> usize {
self.index() self.index()
} }
@ -47,10 +49,12 @@ impl ReceivedSaplingOutput for WalletSaplingOutput<Nullifier> {
fn is_change(&self) -> bool { fn is_change(&self) -> bool {
WalletSaplingOutput::is_change(self) WalletSaplingOutput::is_change(self)
} }
fn nullifier(&self) -> Option<&sapling::Nullifier> {
fn nullifier(&self) -> Option<&Nullifier> {
Some(self.nf()) Some(self.nf())
} }
fn note_commitment_tree_position(&self) -> Option<Position> {
Some(WalletSaplingOutput::note_commitment_tree_position(self))
}
} }
impl ReceivedSaplingOutput for DecryptedOutput<Note> { impl ReceivedSaplingOutput for DecryptedOutput<Note> {
@ -69,7 +73,10 @@ impl ReceivedSaplingOutput for DecryptedOutput<Note> {
fn is_change(&self) -> bool { fn is_change(&self) -> bool {
self.transfer_type == TransferType::WalletInternal self.transfer_type == TransferType::WalletInternal
} }
fn nullifier(&self) -> Option<&Nullifier> { fn nullifier(&self) -> Option<&sapling::Nullifier> {
None
}
fn note_commitment_tree_position(&self) -> Option<Position> {
None None
} }
} }
@ -105,17 +112,17 @@ fn to_spendable_note(row: &Row) -> Result<ReceivedSaplingNote<NoteId>, SqliteCli
Rseed::BeforeZip212(rcm) Rseed::BeforeZip212(rcm)
}; };
let witness = { let note_commitment_tree_position =
let d: Vec<_> = row.get(4)?; Position::from(u64::try_from(row.get::<_, i64>(4)?).map_err(|_| {
read_incremental_witness(&d[..])? SqliteClientError::CorruptedData("Note commitment tree position invalid.".to_string())
}; })?);
Ok(ReceivedSaplingNote { Ok(ReceivedSaplingNote {
note_id, note_id,
diversifier, diversifier,
note_value, note_value,
rseed, rseed,
witness, note_commitment_tree_position,
}) })
} }
@ -126,15 +133,13 @@ pub(crate) fn get_spendable_sapling_notes(
exclude: &[NoteId], exclude: &[NoteId],
) -> Result<Vec<ReceivedSaplingNote<NoteId>>, SqliteClientError> { ) -> Result<Vec<ReceivedSaplingNote<NoteId>>, SqliteClientError> {
let mut stmt_select_notes = conn.prepare_cached( let mut stmt_select_notes = conn.prepare_cached(
"SELECT id_note, diversifier, value, rcm, witness "SELECT id_note, diversifier, value, rcm, commitment_tree_position
FROM sapling_received_notes FROM sapling_received_notes
INNER JOIN transactions ON transactions.id_tx = sapling_received_notes.tx INNER JOIN transactions ON transactions.id_tx = sapling_received_notes.tx
INNER JOIN sapling_witnesses ON sapling_witnesses.note = sapling_received_notes.id_note WHERE account = :account
WHERE account = :account AND spent IS NULL
AND spent IS NULL AND transactions.block <= :anchor_height
AND transactions.block <= :anchor_height AND id_note NOT IN rarray(:exclude)",
AND sapling_witnesses.block = :anchor_height
AND id_note NOT IN rarray(:exclude)",
)?; )?;
let excluded: Vec<Value> = exclude let excluded: Vec<Value> = exclude
@ -184,28 +189,22 @@ pub(crate) fn select_spendable_sapling_notes(
// //
// 4) Match the selected notes against the witnesses at the desired height. // 4) Match the selected notes against the witnesses at the desired height.
let mut stmt_select_notes = conn.prepare_cached( let mut stmt_select_notes = conn.prepare_cached(
"WITH selected AS ( "WITH eligible AS (
WITH eligible AS ( SELECT id_note, diversifier, value, rcm, commitment_tree_position,
SELECT id_note, diversifier, value, rcm, SUM(value)
SUM(value) OVER OVER (PARTITION BY account, spent ORDER BY id_note) AS so_far
(PARTITION BY account, spent ORDER BY id_note) AS so_far FROM sapling_received_notes
FROM sapling_received_notes INNER JOIN transactions ON transactions.id_tx = sapling_received_notes.tx
INNER JOIN transactions ON transactions.id_tx = sapling_received_notes.tx WHERE account = :account
WHERE account = :account AND spent IS NULL
AND spent IS NULL AND transactions.block <= :anchor_height
AND transactions.block <= :anchor_height AND id_note NOT IN rarray(:exclude)
AND id_note NOT IN rarray(:exclude) )
) SELECT id_note, diversifier, value, rcm, commitment_tree_position
SELECT * FROM eligible WHERE so_far < :target_value FROM eligible WHERE so_far < :target_value
UNION UNION
SELECT * FROM (SELECT * FROM eligible WHERE so_far >= :target_value LIMIT 1) SELECT id_note, diversifier, value, rcm, commitment_tree_position
), witnesses AS ( FROM (SELECT * from eligible WHERE so_far >= :target_value LIMIT 1)",
SELECT note, witness FROM sapling_witnesses
WHERE block = :anchor_height
)
SELECT selected.id_note, selected.diversifier, selected.value, selected.rcm, witnesses.witness
FROM selected
INNER JOIN witnesses ON selected.id_note = witnesses.note",
)?; )?;
let excluded: Vec<Value> = exclude let excluded: Vec<Value> = exclude
@ -230,73 +229,6 @@ pub(crate) fn select_spendable_sapling_notes(
notes.collect::<Result<_, _>>() notes.collect::<Result<_, _>>()
} }
/// Returns the commitment tree for the block at the specified height,
/// if any.
pub(crate) fn get_sapling_commitment_tree(
conn: &Connection,
block_height: BlockHeight,
) -> Result<Option<sapling::CommitmentTree>, SqliteClientError> {
conn.query_row_and_then(
"SELECT sapling_tree FROM blocks WHERE height = ?",
[u32::from(block_height)],
|row| {
let row_data: Vec<u8> = row.get(0)?;
read_commitment_tree(&row_data[..]).map_err(|e| {
rusqlite::Error::FromSqlConversionFailure(
row_data.len(),
rusqlite::types::Type::Blob,
Box::new(e),
)
})
},
)
.optional()
.map_err(SqliteClientError::from)
}
/// Returns the incremental witnesses for the block at the specified height,
/// if any.
pub(crate) fn get_sapling_witnesses(
conn: &Connection,
block_height: BlockHeight,
) -> Result<Vec<(NoteId, sapling::IncrementalWitness)>, SqliteClientError> {
let mut stmt_fetch_witnesses =
conn.prepare_cached("SELECT note, witness FROM sapling_witnesses WHERE block = ?")?;
let witnesses = stmt_fetch_witnesses
.query_map([u32::from(block_height)], |row| {
let id_note = NoteId::ReceivedNoteId(row.get(0)?);
let witness_data: Vec<u8> = row.get(1)?;
Ok(read_incremental_witness(&witness_data[..]).map(|witness| (id_note, witness)))
})
.map_err(SqliteClientError::from)?;
// unwrap database error & IO error from IncrementalWitness::read
let res: Vec<_> = witnesses.collect::<Result<Result<_, _>, _>>()??;
Ok(res)
}
/// Records the incremental witness for the specified note,
/// as of the given block height.
pub(crate) fn insert_witness(
conn: &Connection,
note_id: i64,
witness: &sapling::IncrementalWitness,
height: BlockHeight,
) -> Result<(), SqliteClientError> {
let mut stmt_insert_witness = conn.prepare_cached(
"INSERT INTO sapling_witnesses (note, block, witness)
VALUES (?, ?, ?)",
)?;
let mut encoded = Vec::new();
write_incremental_witness(witness, &mut encoded).unwrap();
stmt_insert_witness.execute(params![note_id, u32::from(height), encoded])?;
Ok(())
}
/// Retrieves the set of nullifiers for "potentially spendable" Sapling notes that the /// Retrieves the set of nullifiers for "potentially spendable" Sapling notes that the
/// wallet is tracking. /// wallet is tracking.
/// ///
@ -320,7 +252,7 @@ pub(crate) fn get_sapling_nullifiers(
let nf_bytes: Vec<u8> = row.get(2)?; let nf_bytes: Vec<u8> = row.get(2)?;
Ok(( Ok((
AccountId::from(account), AccountId::from(account),
Nullifier::from_slice(&nf_bytes).unwrap(), sapling::Nullifier::from_slice(&nf_bytes).unwrap(),
)) ))
})?; })?;
@ -343,7 +275,7 @@ pub(crate) fn get_all_sapling_nullifiers(
let nf_bytes: Vec<u8> = row.get(2)?; let nf_bytes: Vec<u8> = row.get(2)?;
Ok(( Ok((
AccountId::from(account), AccountId::from(account),
Nullifier::from_slice(&nf_bytes).unwrap(), sapling::Nullifier::from_slice(&nf_bytes).unwrap(),
)) ))
})?; })?;
@ -359,7 +291,7 @@ pub(crate) fn get_all_sapling_nullifiers(
pub(crate) fn mark_sapling_note_spent( pub(crate) fn mark_sapling_note_spent(
conn: &Connection, conn: &Connection,
tx_ref: i64, tx_ref: i64,
nf: &Nullifier, nf: &sapling::Nullifier,
) -> Result<bool, SqliteClientError> { ) -> Result<bool, SqliteClientError> {
let mut stmt_mark_sapling_note_spent = let mut stmt_mark_sapling_note_spent =
conn.prepare_cached("UPDATE sapling_received_notes SET spent = ? WHERE nf = ?")?; conn.prepare_cached("UPDATE sapling_received_notes SET spent = ? WHERE nf = ?")?;
@ -383,9 +315,19 @@ pub(crate) fn put_received_note<T: ReceivedSaplingOutput>(
) -> Result<NoteId, SqliteClientError> { ) -> Result<NoteId, SqliteClientError> {
let mut stmt_upsert_received_note = conn.prepare_cached( let mut stmt_upsert_received_note = conn.prepare_cached(
"INSERT INTO sapling_received_notes "INSERT INTO sapling_received_notes
(tx, output_index, account, diversifier, value, rcm, memo, nf, is_change) (tx, output_index, account, diversifier, value, rcm, memo, nf, is_change, commitment_tree_position)
VALUES VALUES (
(:tx, :output_index, :account, :diversifier, :value, :rcm, :memo, :nf, :is_change) :tx,
:output_index,
:account,
:diversifier,
:value,
:rcm,
:memo,
:nf,
:is_change,
:commitment_tree_position
)
ON CONFLICT (tx, output_index) DO UPDATE ON CONFLICT (tx, output_index) DO UPDATE
SET account = :account, SET account = :account,
diversifier = :diversifier, diversifier = :diversifier,
@ -393,7 +335,8 @@ pub(crate) fn put_received_note<T: ReceivedSaplingOutput>(
rcm = :rcm, rcm = :rcm,
nf = IFNULL(:nf, nf), nf = IFNULL(:nf, nf),
memo = IFNULL(:memo, memo), memo = IFNULL(:memo, memo),
is_change = IFNULL(:is_change, is_change) is_change = IFNULL(:is_change, is_change),
commitment_tree_position = IFNULL(:commitment_tree_position, commitment_tree_position)
RETURNING id_note", RETURNING id_note",
)?; )?;
@ -410,7 +353,8 @@ pub(crate) fn put_received_note<T: ReceivedSaplingOutput>(
":rcm": &rcm.as_ref(), ":rcm": &rcm.as_ref(),
":nf": output.nullifier().map(|nf| nf.0.as_ref()), ":nf": output.nullifier().map(|nf| nf.0.as_ref()),
":memo": memo_repr(output.memo()), ":memo": memo_repr(output.memo()),
":is_change": output.is_change() ":is_change": output.is_change(),
":commitment_tree_position": output.note_commitment_tree_position().map(u64::from),
]; ];
stmt_upsert_received_note stmt_upsert_received_note
@ -422,7 +366,9 @@ pub(crate) fn put_received_note<T: ReceivedSaplingOutput>(
#[cfg(test)] #[cfg(test)]
#[allow(deprecated)] #[allow(deprecated)]
mod tests { pub(crate) mod tests {
use std::num::NonZeroU32;
use rusqlite::Connection; use rusqlite::Connection;
use secrecy::Secret; use secrecy::Secret;
use tempfile::NamedTempFile; use tempfile::NamedTempFile;
@ -481,7 +427,7 @@ mod tests {
}, },
}; };
fn test_prover() -> impl TxProver { pub(crate) fn test_prover() -> impl TxProver {
match LocalTxProver::with_default_location() { match LocalTxProver::with_default_location() {
Some(tx_prover) => tx_prover, Some(tx_prover) => tx_prover,
None => { None => {
@ -517,7 +463,7 @@ mod tests {
Amount::from_u64(1).unwrap(), Amount::from_u64(1).unwrap(),
None, None,
OvkPolicy::Sender, OvkPolicy::Sender,
10, NonZeroU32::new(1).unwrap(),
), ),
Err(data_api::error::Error::KeyNotRecognized) Err(data_api::error::Error::KeyNotRecognized)
); );
@ -546,7 +492,7 @@ mod tests {
Amount::from_u64(1).unwrap(), Amount::from_u64(1).unwrap(),
None, None,
OvkPolicy::Sender, OvkPolicy::Sender,
10, NonZeroU32::new(1).unwrap(),
), ),
Err(data_api::error::Error::ScanRequired) Err(data_api::error::Error::ScanRequired)
); );
@ -562,7 +508,7 @@ mod tests {
BlockHeight::from(1u32), BlockHeight::from(1u32),
BlockHash([1; 32]), BlockHash([1; 32]),
1, 1,
&[], &[0x0, 0x0, 0x0],
) )
.unwrap(); .unwrap();
@ -589,7 +535,7 @@ mod tests {
Amount::from_u64(1).unwrap(), Amount::from_u64(1).unwrap(),
None, None,
OvkPolicy::Sender, OvkPolicy::Sender,
10, NonZeroU32::new(1).unwrap(),
), ),
Err(data_api::error::Error::InsufficientFunds { Err(data_api::error::Error::InsufficientFunds {
available, available,
@ -616,18 +562,22 @@ mod tests {
// Add funds to the wallet in a single note // Add funds to the wallet in a single note
let value = Amount::from_u64(50000).unwrap(); let value = Amount::from_u64(50000).unwrap();
let (cb, _) = fake_compact_block( let (mut cb, _) = fake_compact_block(
sapling_activation_height(), sapling_activation_height(),
BlockHash([0; 32]), BlockHash([0; 32]),
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
value, value,
0,
); );
insert_into_cache(&db_cache, &cb); insert_into_cache(&db_cache, &cb);
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Verified balance matches total balance // Verified balance matches total balance
let (_, anchor_height) = db_data.get_target_and_anchor_heights(10).unwrap().unwrap(); let (_, anchor_height) = db_data
.get_target_and_anchor_heights(NonZeroU32::new(10).unwrap())
.unwrap()
.unwrap();
assert_eq!( assert_eq!(
get_balance(&db_data.conn, AccountId::from(0)).unwrap(), get_balance(&db_data.conn, AccountId::from(0)).unwrap(),
value value
@ -638,18 +588,23 @@ mod tests {
); );
// Add more funds to the wallet in a second note // Add more funds to the wallet in a second note
let (cb, _) = fake_compact_block( cb = fake_compact_block(
sapling_activation_height() + 1, sapling_activation_height() + 1,
cb.hash(), cb.hash(),
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
value, value,
); 1,
)
.0;
insert_into_cache(&db_cache, &cb); insert_into_cache(&db_cache, &cb);
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Verified balance does not include the second note // Verified balance does not include the second note
let (_, anchor_height2) = db_data.get_target_and_anchor_heights(10).unwrap().unwrap(); let (_, anchor_height2) = db_data
.get_target_and_anchor_heights(NonZeroU32::new(10).unwrap())
.unwrap()
.unwrap();
assert_eq!( assert_eq!(
get_balance(&db_data.conn, AccountId::from(0)).unwrap(), get_balance(&db_data.conn, AccountId::from(0)).unwrap(),
(value + value).unwrap() (value + value).unwrap()
@ -672,7 +627,7 @@ mod tests {
Amount::from_u64(70000).unwrap(), Amount::from_u64(70000).unwrap(),
None, None,
OvkPolicy::Sender, OvkPolicy::Sender,
10, NonZeroU32::new(10).unwrap(),
), ),
Err(data_api::error::Error::InsufficientFunds { Err(data_api::error::Error::InsufficientFunds {
available, available,
@ -685,16 +640,18 @@ mod tests {
// Mine blocks SAPLING_ACTIVATION_HEIGHT + 2 to 9 until just before the second // Mine blocks SAPLING_ACTIVATION_HEIGHT + 2 to 9 until just before the second
// note is verified // note is verified
for i in 2..10 { for i in 2..10 {
let (cb, _) = fake_compact_block( cb = fake_compact_block(
sapling_activation_height() + i, sapling_activation_height() + i,
cb.hash(), cb.hash(),
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
value, value,
); i,
)
.0;
insert_into_cache(&db_cache, &cb); insert_into_cache(&db_cache, &cb);
} }
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Second spend still fails // Second spend still fails
assert_matches!( assert_matches!(
@ -707,7 +664,7 @@ mod tests {
Amount::from_u64(70000).unwrap(), Amount::from_u64(70000).unwrap(),
None, None,
OvkPolicy::Sender, OvkPolicy::Sender,
10, NonZeroU32::new(10).unwrap(),
), ),
Err(data_api::error::Error::InsufficientFunds { Err(data_api::error::Error::InsufficientFunds {
available, available,
@ -718,15 +675,17 @@ mod tests {
); );
// Mine block 11 so that the second note becomes verified // Mine block 11 so that the second note becomes verified
let (cb, _) = fake_compact_block( cb = fake_compact_block(
sapling_activation_height() + 10, sapling_activation_height() + 10,
cb.hash(), cb.hash(),
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
value, value,
); 11,
)
.0;
insert_into_cache(&db_cache, &cb); insert_into_cache(&db_cache, &cb);
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Second spend should now succeed // Second spend should now succeed
assert_matches!( assert_matches!(
@ -739,7 +698,7 @@ mod tests {
Amount::from_u64(70000).unwrap(), Amount::from_u64(70000).unwrap(),
None, None,
OvkPolicy::Sender, OvkPolicy::Sender,
10, NonZeroU32::new(10).unwrap(),
), ),
Ok(_) Ok(_)
); );
@ -762,15 +721,16 @@ mod tests {
// Add funds to the wallet in a single note // Add funds to the wallet in a single note
let value = Amount::from_u64(50000).unwrap(); let value = Amount::from_u64(50000).unwrap();
let (cb, _) = fake_compact_block( let (mut cb, _) = fake_compact_block(
sapling_activation_height(), sapling_activation_height(),
BlockHash([0; 32]), BlockHash([0; 32]),
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
value, value,
0,
); );
insert_into_cache(&db_cache, &cb); insert_into_cache(&db_cache, &cb);
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
assert_eq!( assert_eq!(
get_balance(&db_data.conn, AccountId::from(0)).unwrap(), get_balance(&db_data.conn, AccountId::from(0)).unwrap(),
value value
@ -789,7 +749,7 @@ mod tests {
Amount::from_u64(15000).unwrap(), Amount::from_u64(15000).unwrap(),
None, None,
OvkPolicy::Sender, OvkPolicy::Sender,
10, NonZeroU32::new(1).unwrap(),
), ),
Ok(_) Ok(_)
); );
@ -805,7 +765,7 @@ mod tests {
Amount::from_u64(2000).unwrap(), Amount::from_u64(2000).unwrap(),
None, None,
OvkPolicy::Sender, OvkPolicy::Sender,
10, NonZeroU32::new(1).unwrap(),
), ),
Err(data_api::error::Error::InsufficientFunds { Err(data_api::error::Error::InsufficientFunds {
available, available,
@ -817,16 +777,18 @@ mod tests {
// Mine blocks SAPLING_ACTIVATION_HEIGHT + 1 to 41 (that don't send us funds) // Mine blocks SAPLING_ACTIVATION_HEIGHT + 1 to 41 (that don't send us funds)
// until just before the first transaction expires // until just before the first transaction expires
for i in 1..42 { for i in 1..42 {
let (cb, _) = fake_compact_block( cb = fake_compact_block(
sapling_activation_height() + i, sapling_activation_height() + i,
cb.hash(), cb.hash(),
&ExtendedSpendingKey::master(&[i as u8]).to_diversifiable_full_viewing_key(), &ExtendedSpendingKey::master(&[i as u8]).to_diversifiable_full_viewing_key(),
AddressType::DefaultExternal, AddressType::DefaultExternal,
value, value,
); i,
)
.0;
insert_into_cache(&db_cache, &cb); insert_into_cache(&db_cache, &cb);
} }
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Second spend still fails // Second spend still fails
assert_matches!( assert_matches!(
@ -839,7 +801,7 @@ mod tests {
Amount::from_u64(2000).unwrap(), Amount::from_u64(2000).unwrap(),
None, None,
OvkPolicy::Sender, OvkPolicy::Sender,
10, NonZeroU32::new(1).unwrap(),
), ),
Err(data_api::error::Error::InsufficientFunds { Err(data_api::error::Error::InsufficientFunds {
available, available,
@ -849,15 +811,17 @@ mod tests {
); );
// Mine block SAPLING_ACTIVATION_HEIGHT + 42 so that the first transaction expires // Mine block SAPLING_ACTIVATION_HEIGHT + 42 so that the first transaction expires
let (cb, _) = fake_compact_block( cb = fake_compact_block(
sapling_activation_height() + 42, sapling_activation_height() + 42,
cb.hash(), cb.hash(),
&ExtendedSpendingKey::master(&[42]).to_diversifiable_full_viewing_key(), &ExtendedSpendingKey::master(&[42]).to_diversifiable_full_viewing_key(),
AddressType::DefaultExternal, AddressType::DefaultExternal,
value, value,
); 42,
)
.0;
insert_into_cache(&db_cache, &cb); insert_into_cache(&db_cache, &cb);
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Second spend should now succeed // Second spend should now succeed
create_spend_to_address( create_spend_to_address(
@ -869,7 +833,7 @@ mod tests {
Amount::from_u64(2000).unwrap(), Amount::from_u64(2000).unwrap(),
None, None,
OvkPolicy::Sender, OvkPolicy::Sender,
10, NonZeroU32::new(1).unwrap(),
) )
.unwrap(); .unwrap();
} }
@ -892,15 +856,16 @@ mod tests {
// Add funds to the wallet in a single note // Add funds to the wallet in a single note
let value = Amount::from_u64(50000).unwrap(); let value = Amount::from_u64(50000).unwrap();
let (cb, _) = fake_compact_block( let (mut cb, _) = fake_compact_block(
sapling_activation_height(), sapling_activation_height(),
BlockHash([0; 32]), BlockHash([0; 32]),
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
value, value,
0,
); );
insert_into_cache(&db_cache, &cb); insert_into_cache(&db_cache, &cb);
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
assert_eq!( assert_eq!(
get_balance(&db_data.conn, AccountId::from(0)).unwrap(), get_balance(&db_data.conn, AccountId::from(0)).unwrap(),
value value
@ -920,7 +885,7 @@ mod tests {
Amount::from_u64(15000).unwrap(), Amount::from_u64(15000).unwrap(),
None, None,
ovk_policy, ovk_policy,
10, NonZeroU32::new(1).unwrap(),
) )
.unwrap(); .unwrap();
@ -962,16 +927,18 @@ mod tests {
// Mine blocks SAPLING_ACTIVATION_HEIGHT + 1 to 42 (that don't send us funds) // Mine blocks SAPLING_ACTIVATION_HEIGHT + 1 to 42 (that don't send us funds)
// so that the first transaction expires // so that the first transaction expires
for i in 1..=42 { for i in 1..=42 {
let (cb, _) = fake_compact_block( cb = fake_compact_block(
sapling_activation_height() + i, sapling_activation_height() + i,
cb.hash(), cb.hash(),
&ExtendedSpendingKey::master(&[i as u8]).to_diversifiable_full_viewing_key(), &ExtendedSpendingKey::master(&[i as u8]).to_diversifiable_full_viewing_key(),
AddressType::DefaultExternal, AddressType::DefaultExternal,
value, value,
); i,
)
.0;
insert_into_cache(&db_cache, &cb); insert_into_cache(&db_cache, &cb);
} }
scan_cached_blocks(&network, &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(&network, &db_cache, &mut db_data, None, None).unwrap();
// Send the funds again, discarding history. // Send the funds again, discarding history.
// Neither transaction output is decryptable by the sender. // Neither transaction output is decryptable by the sender.
@ -1001,12 +968,16 @@ mod tests {
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
value, value,
0,
); );
insert_into_cache(&db_cache, &cb); insert_into_cache(&db_cache, &cb);
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Verified balance matches total balance // Verified balance matches total balance
let (_, anchor_height) = db_data.get_target_and_anchor_heights(10).unwrap().unwrap(); let (_, anchor_height) = db_data
.get_target_and_anchor_heights(NonZeroU32::new(1).unwrap())
.unwrap()
.unwrap();
assert_eq!( assert_eq!(
get_balance(&db_data.conn, AccountId::from(0)).unwrap(), get_balance(&db_data.conn, AccountId::from(0)).unwrap(),
value value
@ -1027,7 +998,7 @@ mod tests {
Amount::from_u64(50000).unwrap(), Amount::from_u64(50000).unwrap(),
None, None,
OvkPolicy::Sender, OvkPolicy::Sender,
10, NonZeroU32::new(1).unwrap(),
), ),
Ok(_) Ok(_)
); );
@ -1056,12 +1027,16 @@ mod tests {
&dfvk, &dfvk,
AddressType::Internal, AddressType::Internal,
value, value,
0,
); );
insert_into_cache(&db_cache, &cb); insert_into_cache(&db_cache, &cb);
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Verified balance matches total balance // Verified balance matches total balance
let (_, anchor_height) = db_data.get_target_and_anchor_heights(10).unwrap().unwrap(); let (_, anchor_height) = db_data
.get_target_and_anchor_heights(NonZeroU32::new(10).unwrap())
.unwrap()
.unwrap();
assert_eq!( assert_eq!(
get_balance(&db_data.conn, AccountId::from(0)).unwrap(), get_balance(&db_data.conn, AccountId::from(0)).unwrap(),
value value
@ -1082,7 +1057,7 @@ mod tests {
Amount::from_u64(50000).unwrap(), Amount::from_u64(50000).unwrap(),
None, None,
OvkPolicy::Sender, OvkPolicy::Sender,
10, NonZeroU32::new(1).unwrap(),
), ),
Ok(_) Ok(_)
); );
@ -1104,32 +1079,38 @@ mod tests {
let dfvk = usk.sapling().to_diversifiable_full_viewing_key(); let dfvk = usk.sapling().to_diversifiable_full_viewing_key();
// Add funds to the wallet // Add funds to the wallet
let (cb, _) = fake_compact_block( let (mut cb, _) = fake_compact_block(
sapling_activation_height(), sapling_activation_height(),
BlockHash([0; 32]), BlockHash([0; 32]),
&dfvk, &dfvk,
AddressType::Internal, AddressType::Internal,
Amount::from_u64(50000).unwrap(), Amount::from_u64(50000).unwrap(),
0,
); );
insert_into_cache(&db_cache, &cb); insert_into_cache(&db_cache, &cb);
// Add 10 dust notes to the wallet // Add 10 dust notes to the wallet
for i in 1..=10 { for i in 1..=10 {
let (cb, _) = fake_compact_block( cb = fake_compact_block(
sapling_activation_height() + i, sapling_activation_height() + i,
cb.hash(), cb.hash(),
&dfvk, &dfvk,
AddressType::DefaultExternal, AddressType::DefaultExternal,
Amount::from_u64(1000).unwrap(), Amount::from_u64(1000).unwrap(),
); i,
)
.0;
insert_into_cache(&db_cache, &cb); insert_into_cache(&db_cache, &cb);
} }
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
// Verified balance matches total balance // Verified balance matches total balance
let total = Amount::from_u64(60000).unwrap(); let total = Amount::from_u64(60000).unwrap();
let (_, anchor_height) = db_data.get_target_and_anchor_heights(1).unwrap().unwrap(); let (_, anchor_height) = db_data
.get_target_and_anchor_heights(NonZeroU32::new(1).unwrap())
.unwrap()
.unwrap();
assert_eq!( assert_eq!(
get_balance(&db_data.conn, AccountId::from(0)).unwrap(), get_balance(&db_data.conn, AccountId::from(0)).unwrap(),
total total
@ -1164,7 +1145,7 @@ mod tests {
&usk, &usk,
req, req,
OvkPolicy::Sender, OvkPolicy::Sender,
1, NonZeroU32::new(1).unwrap(),
), ),
Err(Error::InsufficientFunds { available, required }) Err(Error::InsufficientFunds { available, required })
if available == Amount::from_u64(51000).unwrap() if available == Amount::from_u64(51000).unwrap()
@ -1192,7 +1173,7 @@ mod tests {
&usk, &usk,
req, req,
OvkPolicy::Sender, OvkPolicy::Sender,
1, NonZeroU32::new(1).unwrap(),
), ),
Ok(_) Ok(_)
); );
@ -1234,16 +1215,17 @@ mod tests {
DustOutputPolicy::default(), DustOutputPolicy::default(),
); );
// Add funds to the wallet // Ensure that the wallet has at least one block
let (cb, _) = fake_compact_block( let (cb, _) = fake_compact_block(
sapling_activation_height(), sapling_activation_height(),
BlockHash([0; 32]), BlockHash([0; 32]),
&dfvk, &dfvk,
AddressType::Internal, AddressType::Internal,
Amount::from_u64(50000).unwrap(), Amount::from_u64(50000).unwrap(),
0,
); );
insert_into_cache(&db_cache, &cb); insert_into_cache(&db_cache, &cb);
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None).unwrap(); scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
assert_matches!( assert_matches!(
shield_transparent_funds( shield_transparent_funds(
@ -1255,7 +1237,7 @@ mod tests {
&usk, &usk,
&[*taddr], &[*taddr],
&MemoBytes::empty(), &MemoBytes::empty(),
0 NonZeroU32::new(1).unwrap()
), ),
Ok(_) Ok(_)
); );

View File

@ -12,6 +12,9 @@ and this library adheres to Rust's notion of
- `Builder::add_orchard_spend` - `Builder::add_orchard_spend`
- `Builder::add_orchard_output` - `Builder::add_orchard_output`
- `zcash_primitives::transaction::components::orchard::builder` module - `zcash_primitives::transaction::components::orchard::builder` module
- `impl HashSer for String` is provided under the `test-dependencies` feature
flag. This is a test-only impl; the identity leaf value is `_` and the combining
operation is concatenation.
### Changed ### Changed
- `zcash_primitives::transaction`: - `zcash_primitives::transaction`:

View File

@ -39,10 +39,20 @@ impl BlockHash {
/// ///
/// This function will panic if the slice is not exactly 32 bytes. /// This function will panic if the slice is not exactly 32 bytes.
pub fn from_slice(bytes: &[u8]) -> Self { pub fn from_slice(bytes: &[u8]) -> Self {
assert_eq!(bytes.len(), 32); Self::try_from_slice(bytes).unwrap()
let mut hash = [0; 32]; }
hash.copy_from_slice(bytes);
BlockHash(hash) /// Constructs a [`BlockHash`] from the given slice.
///
/// Returns `None` if `bytes` has any length other than 32
pub fn try_from_slice(bytes: &[u8]) -> Option<Self> {
if bytes.len() == 32 {
let mut hash = [0; 32];
hash.copy_from_slice(bytes);
Some(BlockHash(hash))
} else {
None
}
} }
} }

View File

@ -627,6 +627,12 @@ pub mod testing {
) )
}) })
} }
impl incrementalmerkletree::testing::TestCheckpoint for BlockHeight {
fn from_u64(value: u64) -> Self {
BlockHeight(u32::try_from(value).expect("Test checkpoint ids do not exceed 32 bits"))
}
}
} }
#[cfg(test)] #[cfg(test)]

View File

@ -98,7 +98,7 @@ pub fn write_nonempty_frontier_v1<H: HashSer, W: Write>(
frontier: &NonEmptyFrontier<H>, frontier: &NonEmptyFrontier<H>,
) -> io::Result<()> { ) -> io::Result<()> {
write_position(&mut writer, frontier.position())?; write_position(&mut writer, frontier.position())?;
if frontier.position().is_odd() { if frontier.position().is_right_child() {
// The v1 serialization wrote the sibling of a right-hand leaf as an optional value, rather // The v1 serialization wrote the sibling of a right-hand leaf as an optional value, rather
// than as part of the ommers vector. // than as part of the ommers vector.
frontier frontier
@ -292,6 +292,7 @@ pub mod testing {
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use incrementalmerkletree::frontier::testing::TestNode; use incrementalmerkletree::frontier::testing::TestNode;
use std::io::{self, Read, Write}; use std::io::{self, Read, Write};
use zcash_encoding::Vector;
use super::HashSer; use super::HashSer;
@ -304,6 +305,23 @@ pub mod testing {
writer.write_u64::<LittleEndian>(self.0) writer.write_u64::<LittleEndian>(self.0)
} }
} }
impl HashSer for String {
fn read<R: Read>(reader: R) -> io::Result<String> {
Vector::read(reader, |r| r.read_u8()).and_then(|xs| {
String::from_utf8(xs).map_err(|e| {
io::Error::new(
io::ErrorKind::InvalidData,
format!("Not a valid utf8 string: {:?}", e),
)
})
})
}
fn write<W: Write>(&self, writer: W) -> io::Result<()> {
Vector::write(writer, self.as_bytes(), |w, b| w.write_u8(*b))
}
}
} }
#[cfg(test)] #[cfg(test)]