Move traversal of cached blocks to CacheOps
This commit is contained in:
parent
604294dd9f
commit
06c1772692
|
@ -121,4 +121,13 @@ pub trait CacheOps {
|
|||
) -> Result<Option<BlockHash>, Self::Error>
|
||||
where
|
||||
F: Fn(&CompactBlock, &CompactBlock) -> Result<(), Self::Error>;
|
||||
|
||||
fn with_cached_blocks<F>(
|
||||
&self,
|
||||
from_height: BlockHeight,
|
||||
limit: Option<u32>,
|
||||
with_row: F,
|
||||
) -> Result<(), Self::Error>
|
||||
where
|
||||
F: FnMut(BlockHeight, CompactBlock) -> Result<(), Self::Error>;
|
||||
}
|
||||
|
|
|
@ -195,6 +195,18 @@ impl CacheOps for CacheConnection {
|
|||
{
|
||||
chain::validate_chain(self, from_height, validate)
|
||||
}
|
||||
|
||||
fn with_cached_blocks<F>(
|
||||
&self,
|
||||
from_height: BlockHeight,
|
||||
limit: Option<u32>,
|
||||
with_row: F,
|
||||
) -> Result<(), Self::Error>
|
||||
where
|
||||
F: FnMut(BlockHeight, CompactBlock) -> Result<(), Self::Error>,
|
||||
{
|
||||
scan::with_cached_blocks(self, from_height, limit, with_row)
|
||||
}
|
||||
}
|
||||
|
||||
fn address_from_extfvk<P: consensus::Parameters>(
|
||||
|
|
|
@ -9,7 +9,7 @@ use zcash_client_backend::{
|
|||
address::RecipientAddress,
|
||||
data_api::{
|
||||
error::{ChainInvalid, Error},
|
||||
DBOps,
|
||||
CacheOps, DBOps,
|
||||
},
|
||||
decrypt_transaction,
|
||||
encoding::decode_extended_full_viewing_key,
|
||||
|
@ -107,6 +107,7 @@ pub fn scan_cached_blocks<P: consensus::Parameters>(
|
|||
// Get most recent incremental witnesses for the notes we are tracking
|
||||
let mut witnesses = data.get_witnesses(last_height)?;
|
||||
|
||||
// Get the nullifiers for the notes we are tracking
|
||||
let mut nullifiers = data.get_nullifiers()?;
|
||||
|
||||
// Prepare per-block SQL statements
|
||||
|
@ -154,39 +155,22 @@ pub fn scan_cached_blocks<P: consensus::Parameters>(
|
|||
)",
|
||||
)?;
|
||||
|
||||
// Fetch the CompactBlocks we need to scan
|
||||
let mut stmt_blocks = cache.0.prepare(
|
||||
"SELECT height, data FROM compactblocks WHERE height > ? ORDER BY height ASC LIMIT ?",
|
||||
)?;
|
||||
let rows = stmt_blocks.query_map(
|
||||
&[
|
||||
u32::from(last_height).to_sql()?,
|
||||
limit.unwrap_or(u32::max_value()).to_sql()?,
|
||||
],
|
||||
|row| {
|
||||
Ok(CompactBlockRow {
|
||||
height: BlockHeight::from_u32(row.get(0)?),
|
||||
data: row.get(1)?,
|
||||
})
|
||||
},
|
||||
)?;
|
||||
|
||||
for row in rows {
|
||||
let row = row?;
|
||||
|
||||
cache.with_cached_blocks(
|
||||
last_height,
|
||||
limit,
|
||||
|height: BlockHeight, block: CompactBlock| {
|
||||
// Start an SQL transaction for this block.
|
||||
data.0.execute("BEGIN IMMEDIATE", NO_PARAMS)?;
|
||||
|
||||
// Scanned blocks MUST be height-sequential.
|
||||
if row.height != (last_height + 1) {
|
||||
if height != (last_height + 1) {
|
||||
return Err(SqliteClientError(ChainInvalid::block_height_mismatch(
|
||||
last_height + 1,
|
||||
row.height,
|
||||
height,
|
||||
)));
|
||||
}
|
||||
last_height = row.height;
|
||||
last_height = height;
|
||||
|
||||
let block: CompactBlock = parse_from_bytes(&row.data)?;
|
||||
let block_hash = block.hash.clone();
|
||||
let block_time = block.time;
|
||||
|
||||
|
@ -238,7 +222,7 @@ pub fn scan_cached_blocks<P: consensus::Parameters>(
|
|||
tree.write(&mut encoded_tree)
|
||||
.expect("Should be able to write to a Vec");
|
||||
stmt_insert_block.execute(&[
|
||||
u32::from(row.height).to_sql()?,
|
||||
u32::from(height).to_sql()?,
|
||||
block_hash.to_sql()?,
|
||||
block_time.to_sql()?,
|
||||
encoded_tree.to_sql()?,
|
||||
|
@ -248,7 +232,7 @@ pub fn scan_cached_blocks<P: consensus::Parameters>(
|
|||
// First try update an existing transaction in the database.
|
||||
let txid = tx.txid.0.to_vec();
|
||||
let tx_row = if stmt_update_tx.execute(&[
|
||||
u32::from(row.height).to_sql()?,
|
||||
u32::from(height).to_sql()?,
|
||||
(tx.index as i64).to_sql()?,
|
||||
txid.to_sql()?,
|
||||
])? == 0
|
||||
|
@ -256,7 +240,7 @@ pub fn scan_cached_blocks<P: consensus::Parameters>(
|
|||
// It isn't there, so insert our transaction into the database.
|
||||
stmt_insert_tx.execute(&[
|
||||
txid.to_sql()?,
|
||||
u32::from(row.height).to_sql()?,
|
||||
u32::from(height).to_sql()?,
|
||||
(tx.index as i64).to_sql()?,
|
||||
])?;
|
||||
data.0.last_insert_rowid()
|
||||
|
@ -269,15 +253,13 @@ pub fn scan_cached_blocks<P: consensus::Parameters>(
|
|||
for spend in &tx.shielded_spends {
|
||||
stmt_mark_spent_note.execute(&[tx_row.to_sql()?, spend.nf.to_sql()?])?;
|
||||
}
|
||||
nullifiers = nullifiers
|
||||
.into_iter()
|
||||
.filter(|(nf, _acc)| {
|
||||
|
||||
nullifiers.retain(|(nf, _acc)| {
|
||||
tx.shielded_spends
|
||||
.iter()
|
||||
.find(|spend| &spend.nf == nf)
|
||||
.is_none()
|
||||
})
|
||||
.collect();
|
||||
});
|
||||
|
||||
for output in tx.shielded_outputs {
|
||||
let rcm = output.note.rcm().to_repr();
|
||||
|
@ -353,6 +335,43 @@ pub fn scan_cached_blocks<P: consensus::Parameters>(
|
|||
|
||||
// Commit the SQL transaction, writing this block's data atomically.
|
||||
data.0.execute("COMMIT", NO_PARAMS)?;
|
||||
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn with_cached_blocks<F>(
|
||||
cache: &CacheConnection,
|
||||
from_height: BlockHeight,
|
||||
limit: Option<u32>,
|
||||
mut with_row: F,
|
||||
) -> Result<(), SqliteClientError>
|
||||
where
|
||||
F: FnMut(BlockHeight, CompactBlock) -> Result<(), SqliteClientError>,
|
||||
{
|
||||
// Fetch the CompactBlocks we need to scan
|
||||
let mut stmt_blocks = cache.0.prepare(
|
||||
"SELECT height, data FROM compactblocks WHERE height > ? ORDER BY height ASC LIMIT ?",
|
||||
)?;
|
||||
let rows = stmt_blocks.query_map(
|
||||
&[
|
||||
u32::from(from_height).to_sql()?,
|
||||
limit.unwrap_or(u32::max_value()).to_sql()?,
|
||||
],
|
||||
|row| {
|
||||
Ok(CompactBlockRow {
|
||||
height: BlockHeight::from_u32(row.get(0)?),
|
||||
data: row.get(1)?,
|
||||
})
|
||||
},
|
||||
)?;
|
||||
|
||||
for row_result in rows {
|
||||
let row = row_result?;
|
||||
with_row(row.height, parse_from_bytes(&row.data)?)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
|
Loading…
Reference in New Issue