Remove `zcash_client_backend::data_api::chain::validate_chain`
Local chain validation will be performed internal to `scan_cached_blocks`, and as handling of chain reorgs will need to change to support out-of-order scanning, the `validate_chain` method will be superfluous. It is removed in advance of other changes in order to avoid updating it to reflect the forthcoming changes.
This commit is contained in:
parent
e225a54d2e
commit
77b638012b
|
@ -75,6 +75,8 @@ and this library adheres to Rust's notion of
|
||||||
feature flag, has been modified by the addition of a `sapling_tree` property.
|
feature flag, has been modified by the addition of a `sapling_tree` property.
|
||||||
- `wallet::input_selection`:
|
- `wallet::input_selection`:
|
||||||
- `Proposal::target_height` (use `Proposal::min_target_height` instead).
|
- `Proposal::target_height` (use `Proposal::min_target_height` instead).
|
||||||
|
- `zcash_client_backend::data_api::chain::validate_chain` TODO: document how
|
||||||
|
to handle validation given out-of-order blocks.
|
||||||
- `zcash_client_backend::wallet::WalletSaplingOutput::{witness, witness_mut}`
|
- `zcash_client_backend::wallet::WalletSaplingOutput::{witness, witness_mut}`
|
||||||
have been removed as individual incremental witnesses are no longer tracked on a
|
have been removed as individual incremental witnesses are no longer tracked on a
|
||||||
per-note basis. The global note commitment tree for the wallet should be used
|
per-note basis. The global note commitment tree for the wallet should be used
|
||||||
|
|
|
@ -36,43 +36,12 @@
|
||||||
//! let mut db_data = testing::MockWalletDb::new(Network::TestNetwork);
|
//! let mut db_data = testing::MockWalletDb::new(Network::TestNetwork);
|
||||||
//!
|
//!
|
||||||
//! // 1) Download new CompactBlocks into block_source.
|
//! // 1) Download new CompactBlocks into block_source.
|
||||||
//!
|
|
||||||
//! // 2) Run the chain validator on the received blocks.
|
|
||||||
//! //
|
//! //
|
||||||
//! // Given that we assume the server always gives us correct-at-the-time blocks, any
|
//! // 2) FIXME: Obtain necessary block metadata for continuity checking?
|
||||||
//! // errors are in the blocks we have previously cached or scanned.
|
//! //
|
||||||
//! let max_height_hash = db_data.get_max_height_hash().map_err(Error::Wallet)?;
|
//! // 3) Scan cached blocks.
|
||||||
//! if let Err(e) = validate_chain(&block_source, max_height_hash, None) {
|
//! //
|
||||||
//! match e {
|
//! // FIXME: update documentation on how to detect when a rewind is required.
|
||||||
//! Error::Chain(e) => {
|
|
||||||
//! // a) Pick a height to rewind to.
|
|
||||||
//! //
|
|
||||||
//! // This might be informed by some external chain reorg information, or
|
|
||||||
//! // heuristics such as the platform, available bandwidth, size of recent
|
|
||||||
//! // CompactBlocks, etc.
|
|
||||||
//! let rewind_height = e.at_height() - 10;
|
|
||||||
//!
|
|
||||||
//! // b) Rewind scanned block information.
|
|
||||||
//! db_data.truncate_to_height(rewind_height);
|
|
||||||
//!
|
|
||||||
//! // c) Delete cached blocks from rewind_height onwards.
|
|
||||||
//! //
|
|
||||||
//! // This does imply that assumed-valid blocks will be re-downloaded, but it
|
|
||||||
//! // is also possible that in the intervening time, a chain reorg has
|
|
||||||
//! // occurred that orphaned some of those blocks.
|
|
||||||
//!
|
|
||||||
//! // d) If there is some separate thread or service downloading
|
|
||||||
//! // CompactBlocks, tell it to go back and download from rewind_height
|
|
||||||
//! // onwards.
|
|
||||||
//! },
|
|
||||||
//! e => {
|
|
||||||
//! // handle or return other errors
|
|
||||||
//!
|
|
||||||
//! }
|
|
||||||
//! }
|
|
||||||
//! }
|
|
||||||
//!
|
|
||||||
//! // 3) Scan (any remaining) cached blocks.
|
|
||||||
//! //
|
//! //
|
||||||
//! // At this point, the cache and scanned data are locally consistent (though not
|
//! // At this point, the cache and scanned data are locally consistent (though not
|
||||||
//! // necessarily consistent with the latest chain tip - this would be discovered the
|
//! // necessarily consistent with the latest chain tip - this would be discovered the
|
||||||
|
@ -82,10 +51,7 @@
|
||||||
//! # }
|
//! # }
|
||||||
//! ```
|
//! ```
|
||||||
|
|
||||||
use std::convert::Infallible;
|
|
||||||
|
|
||||||
use zcash_primitives::{
|
use zcash_primitives::{
|
||||||
block::BlockHash,
|
|
||||||
consensus::{self, BlockHeight},
|
consensus::{self, BlockHeight},
|
||||||
sapling::{self, note_encryption::PreparedIncomingViewingKey},
|
sapling::{self, note_encryption::PreparedIncomingViewingKey},
|
||||||
zip32::Scope,
|
zip32::Scope,
|
||||||
|
@ -143,63 +109,6 @@ pub trait BlockSource {
|
||||||
F: FnMut(CompactBlock) -> Result<(), error::Error<WalletErrT, Self::Error, NoteRefT>>;
|
F: FnMut(CompactBlock) -> Result<(), error::Error<WalletErrT, Self::Error, NoteRefT>>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks that the scanned blocks in the data database, when combined with the recent
|
|
||||||
/// `CompactBlock`s in the block_source database, form a valid chain.
|
|
||||||
///
|
|
||||||
/// This function is built on the core assumption that the information provided in the
|
|
||||||
/// block source is more likely to be accurate than the previously-scanned information.
|
|
||||||
/// This follows from the design (and trust) assumption that the `lightwalletd` server
|
|
||||||
/// provides accurate block information as of the time it was requested.
|
|
||||||
///
|
|
||||||
/// Arguments:
|
|
||||||
/// - `block_source` Source of compact blocks
|
|
||||||
/// - `validate_from` Height & hash of last validated block;
|
|
||||||
/// - `limit` specified number of blocks that will be valididated. Callers providing
|
|
||||||
/// a `limit` argument are responsible of making subsequent calls to `validate_chain()`
|
|
||||||
/// to complete validating the remaining blocks stored on the `block_source`. If `none`
|
|
||||||
/// is provided, there will be no limit set to the validation and upper bound of the
|
|
||||||
/// validation range will be the latest height present in the `block_source`.
|
|
||||||
///
|
|
||||||
/// Returns:
|
|
||||||
/// - `Ok(())` if the combined chain is valid up to the given height
|
|
||||||
/// and block hash.
|
|
||||||
/// - `Err(Error::Chain(cause))` if the combined chain is invalid.
|
|
||||||
/// - `Err(e)` if there was an error during validation unrelated to chain validity.
|
|
||||||
pub fn validate_chain<BlockSourceT>(
|
|
||||||
block_source: &BlockSourceT,
|
|
||||||
mut validate_from: Option<(BlockHeight, BlockHash)>,
|
|
||||||
limit: Option<u32>,
|
|
||||||
) -> Result<(), Error<Infallible, BlockSourceT::Error, Infallible>>
|
|
||||||
where
|
|
||||||
BlockSourceT: BlockSource,
|
|
||||||
{
|
|
||||||
// The block source will contain blocks above the `validate_from` height. Validate from that
|
|
||||||
// maximum height up to the chain tip, returning the hash of the block found in the block
|
|
||||||
// source at the `validate_from` height, which can then be used to verify chain integrity by
|
|
||||||
// comparing against the `validate_from` hash.
|
|
||||||
|
|
||||||
block_source.with_blocks::<_, Infallible, Infallible>(
|
|
||||||
validate_from.map(|(h, _)| h + 1),
|
|
||||||
limit,
|
|
||||||
move |block| {
|
|
||||||
if let Some((valid_height, valid_hash)) = validate_from {
|
|
||||||
if block.height() != valid_height + 1 {
|
|
||||||
return Err(ChainError::block_height_discontinuity(
|
|
||||||
valid_height + 1,
|
|
||||||
block.height(),
|
|
||||||
)
|
|
||||||
.into());
|
|
||||||
} else if block.prev_hash() != valid_hash {
|
|
||||||
return Err(ChainError::prev_hash_mismatch(block.height()).into());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
validate_from = Some((block.height(), block.hash()));
|
|
||||||
Ok(())
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Scans at most `limit` new blocks added to the block source for any transactions received by the
|
/// Scans at most `limit` new blocks added to the block source for any transactions received by the
|
||||||
/// tracked accounts.
|
/// tracked accounts.
|
||||||
///
|
///
|
||||||
|
|
|
@ -279,7 +279,7 @@ mod tests {
|
||||||
use zcash_client_backend::{
|
use zcash_client_backend::{
|
||||||
address::RecipientAddress,
|
address::RecipientAddress,
|
||||||
data_api::{
|
data_api::{
|
||||||
chain::{error::Error, scan_cached_blocks, validate_chain},
|
chain::scan_cached_blocks,
|
||||||
wallet::{input_selection::GreedyInputSelector, spend},
|
wallet::{input_selection::GreedyInputSelector, spend},
|
||||||
WalletRead, WalletWrite,
|
WalletRead, WalletWrite,
|
||||||
},
|
},
|
||||||
|
@ -329,21 +329,9 @@ mod tests {
|
||||||
|
|
||||||
insert_into_cache(&db_cache, &cb);
|
insert_into_cache(&db_cache, &cb);
|
||||||
|
|
||||||
// Cache-only chain should be valid
|
|
||||||
let validate_chain_result = validate_chain(
|
|
||||||
&db_cache,
|
|
||||||
Some((fake_block_height, fake_block_hash)),
|
|
||||||
Some(1),
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_matches!(validate_chain_result, Ok(()));
|
|
||||||
|
|
||||||
// Scan the cache
|
// Scan the cache
|
||||||
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
|
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
|
||||||
|
|
||||||
// Data-only chain should be valid
|
|
||||||
validate_chain(&db_cache, db_data.get_max_height_hash().unwrap(), None).unwrap();
|
|
||||||
|
|
||||||
// Create a second fake CompactBlock sending more value to the address
|
// Create a second fake CompactBlock sending more value to the address
|
||||||
let (cb2, _) = fake_compact_block(
|
let (cb2, _) = fake_compact_block(
|
||||||
sapling_activation_height() + 1,
|
sapling_activation_height() + 1,
|
||||||
|
@ -355,14 +343,8 @@ mod tests {
|
||||||
);
|
);
|
||||||
insert_into_cache(&db_cache, &cb2);
|
insert_into_cache(&db_cache, &cb2);
|
||||||
|
|
||||||
// Data+cache chain should be valid
|
|
||||||
validate_chain(&db_cache, db_data.get_max_height_hash().unwrap(), None).unwrap();
|
|
||||||
|
|
||||||
// Scan the cache again
|
// Scan the cache again
|
||||||
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
|
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
|
||||||
|
|
||||||
// Data-only chain should be valid
|
|
||||||
validate_chain(&db_cache, db_data.get_max_height_hash().unwrap(), None).unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -401,9 +383,6 @@ mod tests {
|
||||||
// Scan the cache
|
// Scan the cache
|
||||||
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
|
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
|
||||||
|
|
||||||
// Data-only chain should be valid
|
|
||||||
validate_chain(&db_cache, db_data.get_max_height_hash().unwrap(), None).unwrap();
|
|
||||||
|
|
||||||
// Create more fake CompactBlocks that don't connect to the scanned ones
|
// Create more fake CompactBlocks that don't connect to the scanned ones
|
||||||
let (cb3, _) = fake_compact_block(
|
let (cb3, _) = fake_compact_block(
|
||||||
sapling_activation_height() + 2,
|
sapling_activation_height() + 2,
|
||||||
|
@ -425,9 +404,10 @@ mod tests {
|
||||||
insert_into_cache(&db_cache, &cb4);
|
insert_into_cache(&db_cache, &cb4);
|
||||||
|
|
||||||
// Data+cache chain should be invalid at the data/cache boundary
|
// Data+cache chain should be invalid at the data/cache boundary
|
||||||
let val_result = validate_chain(&db_cache, db_data.get_max_height_hash().unwrap(), None);
|
assert_matches!(
|
||||||
|
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None),
|
||||||
assert_matches!(val_result, Err(Error::Chain(e)) if e.at_height() == sapling_activation_height() + 2);
|
Err(_) // FIXME: check error result more closely
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -466,9 +446,6 @@ mod tests {
|
||||||
// Scan the cache
|
// Scan the cache
|
||||||
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
|
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None).unwrap();
|
||||||
|
|
||||||
// Data-only chain should be valid
|
|
||||||
validate_chain(&db_cache, db_data.get_max_height_hash().unwrap(), None).unwrap();
|
|
||||||
|
|
||||||
// Create more fake CompactBlocks that contain a reorg
|
// Create more fake CompactBlocks that contain a reorg
|
||||||
let (cb3, _) = fake_compact_block(
|
let (cb3, _) = fake_compact_block(
|
||||||
sapling_activation_height() + 2,
|
sapling_activation_height() + 2,
|
||||||
|
@ -490,9 +467,10 @@ mod tests {
|
||||||
insert_into_cache(&db_cache, &cb4);
|
insert_into_cache(&db_cache, &cb4);
|
||||||
|
|
||||||
// Data+cache chain should be invalid inside the cache
|
// Data+cache chain should be invalid inside the cache
|
||||||
let val_result = validate_chain(&db_cache, db_data.get_max_height_hash().unwrap(), None);
|
assert_matches!(
|
||||||
|
scan_cached_blocks(&tests::network(), &db_cache, &mut db_data, None, None),
|
||||||
assert_matches!(val_result, Err(Error::Chain(e)) if e.at_height() == sapling_activation_height() + 3);
|
Err(_) // FIXME: check error result more closely
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|
Loading…
Reference in New Issue