Move scan_cached_blocks out of sqlite crate.

This commit is contained in:
Kris Nuttycombe 2020-08-20 17:03:43 -06:00
parent d16c124ffe
commit 746c4c9a00
5 changed files with 219 additions and 213 deletions

View File

@ -33,6 +33,8 @@ protobuf-codegen-pure = "2.15"
[dev-dependencies]
rand_core = "0.5.1"
rand_xorshift = "0.2"
tempfile = "3.1.0"
zcash_client_sqlite = { version = "0.2", path = "../zcash_client_sqlite" }
[features]
test-dependencies = ["proptest", "zcash_primitives/test-dependencies"]

View File

@ -1,10 +1,19 @@
use std::cmp;
use zcash_primitives::consensus::{self, BlockHeight, NetworkUpgrade};
use zcash_primitives::{
block::BlockHash,
consensus::{self, BlockHeight, NetworkUpgrade},
merkle_tree::CommitmentTree,
};
use crate::data_api::{
error::{ChainInvalid, Error},
CacheOps, DBOps,
use crate::{
data_api::{
error::{ChainInvalid, Error},
CacheOps, DBOps, DBUpdate,
},
proto::compact_formats::CompactBlock,
wallet::{AccountId, WalletTx},
welding_rig::scan_block,
};
pub const ANCHOR_OFFSET: u32 = 10;
@ -109,3 +118,193 @@ where
}
})
}
/// Scans at most `limit` new blocks added to the cache for any transactions received by
/// the tracked accounts.
///
/// This function will return without error after scanning at most `limit` new blocks, to
/// enable the caller to update their UI with scanning progress. Repeatedly calling this
/// function will process sequential ranges of blocks, and is equivalent to calling
/// `scan_cached_blocks` and passing `None` for the optional `limit` value.
///
/// This function pays attention only to cached blocks with heights greater than the
/// highest scanned block in `db_data`. Cached blocks with lower heights are not verified
/// against previously-scanned blocks. In particular, this function **assumes** that the
/// caller is handling rollbacks.
///
/// For brand-new light client databases, this function starts scanning from the Sapling
/// activation height. This height can be fast-forwarded to a more recent block by calling
/// [`init_blocks_table`] before this function.
///
/// Scanned blocks are required to be height-sequential. If a block is missing from the
/// cache, an error will be returned with kind [`ChainInvalid::HeightMismatch`].
///
/// # Examples
///
/// ```
/// use tempfile::NamedTempFile;
/// use zcash_primitives::consensus::{
/// Network,
/// Parameters,
/// };
/// use zcash_client_backend::{
/// data_api::chain::scan_cached_blocks,
/// };
/// use zcash_client_sqlite::{
/// CacheConnection,
/// DataConnection,
/// };
///
/// let cache_file = NamedTempFile::new().unwrap();
/// let cache = CacheConnection::for_path(cache_file).unwrap();
/// let data_file = NamedTempFile::new().unwrap();
/// let data = DataConnection::for_path(data_file).unwrap();
/// scan_cached_blocks(&Network::TestNetwork, &cache, &data, None);
/// ```
///
/// [`init_blocks_table`]: crate::init::init_blocks_table
pub fn scan_cached_blocks<'db, E, E0, N, P, C, D>(
params: &P,
cache: &C,
data: &'db D,
limit: Option<u32>,
) -> Result<(), E>
where
P: consensus::Parameters,
C: CacheOps<Error = E>,
&'db D: DBOps<Error = E, NoteRef = N>,
N: Copy,
E: From<Error<E0, N>>,
{
let sapling_activation_height = params
.activation_height(NetworkUpgrade::Sapling)
.ok_or(Error::SaplingNotActive)?;
// Recall where we synced up to previously.
// If we have never synced, use sapling activation height to select all cached CompactBlocks.
let mut last_height = data.block_height_extrema().map(|opt| {
opt.map(|(_, max)| max)
.unwrap_or(sapling_activation_height - 1)
})?;
// Raise SQL errors from the query, IO errors from parsing, and incorrect HRP errors.
let extfvks = data.get_extended_full_viewing_keys(params)?;
// Get the most recent CommitmentTree
let mut tree = data
.get_commitment_tree(last_height)
.map(|t| t.unwrap_or(CommitmentTree::new()))?;
// Get most recent incremental witnesses for the notes we are tracking
let mut witnesses = data.get_witnesses(last_height)?;
// Get the nullifiers for the notes we are tracking
let mut nullifiers = data.get_nullifiers()?;
cache.with_cached_blocks(
last_height,
limit,
|height: BlockHeight, block: CompactBlock| {
// Scanned blocks MUST be height-sequential.
if height != (last_height + 1) {
return Err(ChainInvalid::block_height_mismatch(last_height + 1, height).into());
}
last_height = height;
let block_hash = BlockHash::from_slice(&block.hash);
let block_time = block.time;
let txs: Vec<WalletTx> = {
let nf_refs: Vec<_> = nullifiers
.iter()
.map(|(nf, acc)| (&nf[..], acc.0 as usize))
.collect();
let mut witness_refs: Vec<_> = witnesses.iter_mut().map(|w| &mut w.1).collect();
scan_block(
params,
block,
&extfvks[..],
&nf_refs,
&mut tree,
&mut witness_refs[..],
)
};
// Enforce that all roots match. This is slow, so only include in debug builds.
#[cfg(debug_assertions)]
{
let cur_root = tree.root();
for row in &witnesses {
if row.1.root() != cur_root {
return Err(Error::InvalidWitnessAnchor(row.0, last_height).into());
}
}
for tx in &txs {
for output in tx.shielded_outputs.iter() {
if output.witness.root() != cur_root {
return Err(Error::InvalidNewWitnessAnchor(
output.index,
tx.txid,
last_height,
output.witness.root(),
)
.into());
}
}
}
}
// database updates for each block are transactional
data.transactionally(&mut data.get_mutator()?, |mutator| {
// Insert the block into the database.
mutator.insert_block(height, block_hash, block_time, &tree)?;
for tx in txs {
let tx_row = mutator.put_tx(&tx, height)?;
// Mark notes as spent and remove them from the scanning cache
for spend in &tx.shielded_spends {
mutator.mark_spent(tx_row, &spend.nf)?;
}
nullifiers.retain(|(nf, _acc)| {
tx.shielded_spends
.iter()
.find(|spend| &spend.nf == nf)
.is_none()
});
for output in tx.shielded_outputs {
let nf = output.note.nf(
&extfvks[output.account].fvk.vk,
output.witness.position() as u64,
);
let note_id = mutator.put_note(&output, &nf, tx_row)?;
// Save witness for note.
witnesses.push((note_id, output.witness));
// Cache nullifier for note (to detect subsequent spends in this scan).
nullifiers.push((nf, AccountId(output.account as u32)));
}
}
// Insert current witnesses into the database.
for (note_id, witness) in witnesses.iter() {
mutator.insert_witness(*note_id, witness, last_height)?;
}
// Prune the stored witnesses (we only expect rollbacks of at most 100 blocks).
mutator.prune_witnesses(last_height - 100)?;
// Update now-expired transactions that didn't get mined.
mutator.update_expired_notes(last_height)?;
Ok(())
})
},
)?;
Ok(())
}

View File

@ -10,7 +10,10 @@
//!
//! use zcash_client_backend::{
//! data_api::{
//! chain::validate_combined_chain,
//! chain::{
//! validate_combined_chain,
//! scan_cached_blocks,
//! },
//! error::Error,
//! }
//! };
@ -19,7 +22,6 @@
//! DataConnection,
//! CacheConnection,
//! chain::{rewind_to_height},
//! scan::scan_cached_blocks,
//! };
//!
//! let network = Network::TestNetwork;
@ -245,12 +247,14 @@ mod tests {
zip32::{ExtendedFullViewingKey, ExtendedSpendingKey},
};
use zcash_client_backend::data_api::{chain::validate_combined_chain, error::Error};
use zcash_client_backend::data_api::{
chain::{scan_cached_blocks, validate_combined_chain},
error::Error,
};
use crate::{
init::{init_accounts_table, init_cache_database, init_data_database},
query::get_balance,
scan::scan_cached_blocks,
tests::{self, fake_compact_block, insert_into_cache, sapling_activation_height},
AccountId, CacheConnection, DataConnection,
};

View File

@ -6,220 +6,22 @@ use protobuf::parse_from_bytes;
use rusqlite::{types::ToSql, OptionalExtension, NO_PARAMS};
use zcash_primitives::{
block::BlockHash,
consensus::{self, BlockHeight, NetworkUpgrade},
merkle_tree::CommitmentTree,
transaction::Transaction,
};
use zcash_client_backend::{
address::RecipientAddress,
data_api::{
error::{ChainInvalid, Error},
CacheOps, DBOps, DBUpdate,
},
decrypt_transaction,
encoding::decode_extended_full_viewing_key,
proto::compact_formats::CompactBlock,
wallet::WalletTx,
welding_rig::scan_block,
address::RecipientAddress, data_api::error::Error, decrypt_transaction,
encoding::decode_extended_full_viewing_key, proto::compact_formats::CompactBlock,
};
use crate::{error::SqliteClientError, AccountId, CacheConnection, DataConnection};
use crate::{error::SqliteClientError, CacheConnection, DataConnection};
struct CompactBlockRow {
height: BlockHeight,
data: Vec<u8>,
}
/// Scans at most `limit` new blocks added to the cache for any transactions received by
/// the tracked accounts.
///
/// This function will return without error after scanning at most `limit` new blocks, to
/// enable the caller to update their UI with scanning progress. Repeatedly calling this
/// function will process sequential ranges of blocks, and is equivalent to calling
/// `scan_cached_blocks` and passing `None` for the optional `limit` value.
///
/// This function pays attention only to cached blocks with heights greater than the
/// highest scanned block in `db_data`. Cached blocks with lower heights are not verified
/// against previously-scanned blocks. In particular, this function **assumes** that the
/// caller is handling rollbacks.
///
/// For brand-new light client databases, this function starts scanning from the Sapling
/// activation height. This height can be fast-forwarded to a more recent block by calling
/// [`init_blocks_table`] before this function.
///
/// Scanned blocks are required to be height-sequential. If a block is missing from the
/// cache, an error will be returned with kind [`ChainInvalid::HeightMismatch`].
///
/// # Examples
///
/// ```
/// use tempfile::NamedTempFile;
/// use zcash_primitives::consensus::{
/// Network,
/// Parameters,
/// };
/// use zcash_client_sqlite::{
/// CacheConnection,
/// DataConnection,
/// scan::scan_cached_blocks,
/// };
///
/// let cache_file = NamedTempFile::new().unwrap();
/// let cache = CacheConnection::for_path(cache_file).unwrap();
/// let data_file = NamedTempFile::new().unwrap();
/// let data = DataConnection::for_path(data_file).unwrap();
/// scan_cached_blocks(&Network::TestNetwork, &cache, &data, None);
/// ```
///
/// [`init_blocks_table`]: crate::init::init_blocks_table
pub fn scan_cached_blocks<'db, E, E0, N, P, C, D>(
params: &P,
cache: &C,
data: &'db D,
limit: Option<u32>,
) -> Result<(), E>
where
P: consensus::Parameters,
C: CacheOps<Error = E>,
&'db D: DBOps<Error = E, NoteRef = N>,
N: Copy,
E: From<Error<E0, N>>,
{
let sapling_activation_height = params
.activation_height(NetworkUpgrade::Sapling)
.ok_or(Error::SaplingNotActive)?;
// Recall where we synced up to previously.
// If we have never synced, use sapling activation height to select all cached CompactBlocks.
let mut last_height = data.block_height_extrema().map(|opt| {
opt.map(|(_, max)| max)
.unwrap_or(sapling_activation_height - 1)
})?;
// Raise SQL errors from the query, IO errors from parsing, and incorrect HRP errors.
let extfvks = data.get_extended_full_viewing_keys(params)?;
// Get the most recent CommitmentTree
let mut tree = data
.get_commitment_tree(last_height)
.map(|t| t.unwrap_or(CommitmentTree::new()))?;
// Get most recent incremental witnesses for the notes we are tracking
let mut witnesses = data.get_witnesses(last_height)?;
// Get the nullifiers for the notes we are tracking
let mut nullifiers = data.get_nullifiers()?;
cache.with_cached_blocks(
last_height,
limit,
|height: BlockHeight, block: CompactBlock| {
// Scanned blocks MUST be height-sequential.
if height != (last_height + 1) {
return Err(ChainInvalid::block_height_mismatch(last_height + 1, height).into());
}
last_height = height;
let block_hash = BlockHash::from_slice(&block.hash);
let block_time = block.time;
let txs: Vec<WalletTx> = {
let nf_refs: Vec<_> = nullifiers
.iter()
.map(|(nf, acc)| (&nf[..], acc.0 as usize))
.collect();
let mut witness_refs: Vec<_> = witnesses.iter_mut().map(|w| &mut w.1).collect();
scan_block(
params,
block,
&extfvks[..],
&nf_refs,
&mut tree,
&mut witness_refs[..],
)
};
// Enforce that all roots match. This is slow, so only include in debug builds.
#[cfg(debug_assertions)]
{
let cur_root = tree.root();
for row in &witnesses {
if row.1.root() != cur_root {
return Err(Error::InvalidWitnessAnchor(row.0, last_height).into());
}
}
for tx in &txs {
for output in tx.shielded_outputs.iter() {
if output.witness.root() != cur_root {
return Err(Error::InvalidNewWitnessAnchor(
output.index,
tx.txid,
last_height,
output.witness.root(),
)
.into());
}
}
}
}
// database updates for each block are transactional
data.transactionally(&mut data.get_mutator()?, |mutator| {
// Insert the block into the database.
mutator.insert_block(height, block_hash, block_time, &tree)?;
for tx in txs {
let tx_row = mutator.put_tx(&tx, height)?;
// Mark notes as spent and remove them from the scanning cache
for spend in &tx.shielded_spends {
mutator.mark_spent(tx_row, &spend.nf)?;
}
nullifiers.retain(|(nf, _acc)| {
tx.shielded_spends
.iter()
.find(|spend| &spend.nf == nf)
.is_none()
});
for output in tx.shielded_outputs {
let nf = output.note.nf(
&extfvks[output.account].fvk.vk,
output.witness.position() as u64,
);
let note_id = mutator.put_note(&output, &nf, tx_row)?;
// Save witness for note.
witnesses.push((note_id, output.witness));
// Cache nullifier for note (to detect subsequent spends in this scan).
nullifiers.push((nf, AccountId(output.account as u32)));
}
}
// Insert current witnesses into the database.
for (note_id, witness) in witnesses.iter() {
mutator.insert_witness(*note_id, witness, last_height)?;
}
// Prune the stored witnesses (we only expect rollbacks of at most 100 blocks).
mutator.prune_witnesses(last_height - 100)?;
// Update now-expired transactions that didn't get mined.
mutator.update_expired_notes(last_height)?;
Ok(())
})
},
)?;
Ok(())
}
pub fn with_cached_blocks<F>(
cache: &CacheConnection,
from_height: BlockHeight,
@ -437,7 +239,7 @@ mod tests {
zip32::{ExtendedFullViewingKey, ExtendedSpendingKey},
};
use zcash_client_backend::data_api::error::ChainInvalid;
use zcash_client_backend::data_api::{chain::scan_cached_blocks, error::ChainInvalid};
use crate::{
init::{init_accounts_table, init_cache_database, init_data_database},
@ -449,8 +251,6 @@ mod tests {
AccountId, CacheConnection, DataConnection, NoteId,
};
use super::scan_cached_blocks;
#[test]
fn scan_cached_blocks_requires_sequential_blocks() {
let cache_file = NamedTempFile::new().unwrap();

View File

@ -386,10 +386,11 @@ mod tests {
use zcash_proofs::prover::LocalTxProver;
use zcash_client_backend::data_api::chain::scan_cached_blocks;
use crate::{
init::{init_accounts_table, init_blocks_table, init_cache_database, init_data_database},
query::{get_balance, get_verified_balance},
scan::scan_cached_blocks,
tests::{self, fake_compact_block, insert_into_cache, sapling_activation_height},
AccountId, CacheConnection, DataConnection,
};