Reject duplicate sprout nullifiers in the state (#2477)

* Reject duplicate sprout nullifiers in the state

* Improve docs and error messages

* Clarify "must be present" assert logs

* Move nullifier checks to their own module

Also:
* make non-finalized nullifier checks and errors generic over
  sprout, sapling, and orchard
* create and update module and function documentation

* Fix a block type name in docs

* Move state assertions or skip them during tests

These changes enable state testing, while still asserting in production.

* Add sprout duplicate nullifier tests

* Improve comments

* Set value balance to 0 to pass future chain value pool checks

* Test finalized state in sprout nullifier accept test

* Replace assert with expect

* Improve assertion messages
This commit is contained in:
teor 2021-07-14 22:06:43 +10:00 committed by GitHub
parent 6676eb96b3
commit 0f5eced5c7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 658 additions and 49 deletions

1
Cargo.lock generated
View File

@ -4629,6 +4629,7 @@ dependencies = [
"displaydoc",
"futures 0.3.15",
"hex",
"itertools 0.10.1",
"lazy_static",
"metrics",
"once_cell",

View File

@ -34,8 +34,10 @@ zebra-test = { path = "../zebra-test/" }
color-eyre = "0.5.11"
once_cell = "1.8"
itertools = "0.10.1"
spandoc = "0.2"
tempdir = "0.3.7"
tokio = { version = "0.3.6", features = ["full"] }
proptest = "0.10.1"
proptest-derive = "0.3"

View File

@ -0,0 +1,7 @@
# Seeds for failure cases proptest has generated in the past. It is
# automatically read and these particular cases re-run before any
# novel cases are generated.
#
# It is recommended to check this file in to source control so that
# everyone who runs the test benefits from these saved cases.
cc 3b0fa74b6a4ffeb31638d5a38f832ffc972be3a66e390a2a88d8bcef2519d67c # shrinks to mut joinsplit = zebra_chain::sprout::joinsplit::JoinSplit<zebra_chain::primitives::proofs::groth16::Groth16Proof>, mut joinsplit_data = zebra_chain::transaction::joinsplit::JoinSplitData<zebra_chain::primitives::proofs::groth16::Groth16Proof>

View File

@ -3,7 +3,7 @@ use std::sync::Arc;
use chrono::{DateTime, Utc};
use thiserror::Error;
use zebra_chain::{block, work::difficulty::CompactDifficulty};
use zebra_chain::{block, sprout, work::difficulty::CompactDifficulty};
/// A wrapper for type erased errors that is itself clonable and implements the
/// Error trait
@ -31,12 +31,12 @@ impl From<BoxError> for CloneError {
pub type BoxError = Box<dyn std::error::Error + Send + Sync + 'static>;
/// An error describing the reason a block could not be committed to the state.
#[derive(Debug, Error)]
#[derive(Debug, Error, PartialEq, Eq)]
#[error("block is not contextually valid")]
pub struct CommitBlockError(#[from] ValidateContextError);
/// An error describing why a block failed contextual validation.
#[derive(Debug, Error)]
#[derive(Debug, Error, PartialEq, Eq)]
#[non_exhaustive]
#[allow(missing_docs)]
pub enum ValidateContextError {
@ -74,4 +74,26 @@ pub enum ValidateContextError {
difficulty_threshold: CompactDifficulty,
expected_difficulty: CompactDifficulty,
},
#[error("sprout double-spend: duplicate nullifier: {nullifier:?}, in finalized state: {in_finalized_state:?}")]
#[non_exhaustive]
DuplicateSproutNullifier {
nullifier: sprout::Nullifier,
in_finalized_state: bool,
},
}
/// Trait for creating the corresponding duplicate nullifier error from a nullifier.
pub(crate) trait DuplicateNullifierError {
/// Returns the corresponding duplicate nullifier error for `self`.
fn duplicate_nullifier_error(&self, in_finalized_state: bool) -> ValidateContextError;
}
impl DuplicateNullifierError for sprout::Nullifier {
fn duplicate_nullifier_error(&self, in_finalized_state: bool) -> ValidateContextError {
ValidateContextError::DuplicateSproutNullifier {
nullifier: *self,
in_finalized_state,
}
}
}

View File

@ -174,13 +174,6 @@ impl StateService {
/// Run contextual validation on the prepared block and add it to the
/// non-finalized state if it is contextually valid.
fn validate_and_commit(&mut self, prepared: PreparedBlock) -> Result<(), CommitBlockError> {
let mandatory_checkpoint = self.network.mandatory_checkpoint_height();
if prepared.height <= mandatory_checkpoint {
panic!(
"invalid non-finalized block height: the canopy checkpoint is mandatory, pre-canopy blocks, and the canopy activation block, must be committed to the state as finalized blocks"
);
}
self.check_contextual_validity(&prepared)?;
let parent_hash = prepared.block.header.previous_block_hash;
@ -208,6 +201,23 @@ impl StateService {
let queued_children = self.queued_blocks.dequeue_children(parent_hash);
for (child, rsp_tx) in queued_children {
// required by validate_and_commit, moved here to make testing easier
assert!(
child.height > self.network.mandatory_checkpoint_height(),
"invalid non-finalized block height: the canopy checkpoint is mandatory, \
pre-canopy blocks, and the canopy activation block, \
must be committed to the state as finalized blocks"
);
// required by check_contextual_validity, moved here to make testing easier
let relevant_chain =
self.any_ancestor_blocks(child.block.header.previous_block_hash);
assert!(
relevant_chain.len() >= POW_AVERAGING_WINDOW + POW_MEDIAN_BLOCK_SPAN,
"contextual validation requires at least \
28 (POW_AVERAGING_WINDOW + POW_MEDIAN_BLOCK_SPAN) blocks"
);
let child_hash = child.hash;
let result;
@ -244,9 +254,8 @@ impl StateService {
prepared: &PreparedBlock,
) -> Result<(), ValidateContextError> {
let relevant_chain = self.any_ancestor_blocks(prepared.block.header.previous_block_hash);
assert!(relevant_chain.len() >= POW_AVERAGING_WINDOW + POW_MEDIAN_BLOCK_SPAN,
"contextual validation requires at least 28 (POW_AVERAGING_WINDOW + POW_MEDIAN_BLOCK_SPAN) blocks");
// Security: check proof of work before any other checks
check::block_is_contextually_valid(
prepared,
self.network,
@ -254,6 +263,8 @@ impl StateService {
relevant_chain,
)?;
check::nullifier::no_duplicates_in_finalized_chain(prepared, &self.disk)?;
Ok(())
}

View File

@ -3,6 +3,7 @@
use std::borrow::Borrow;
use chrono::Duration;
use zebra_chain::{
block::{self, Block},
parameters::POW_AVERAGING_WINDOW,
@ -17,6 +18,8 @@ use super::check;
use difficulty::{AdjustedDifficulty, POW_MEDIAN_BLOCK_SPAN};
pub(crate) mod difficulty;
pub(crate) mod nullifier;
#[cfg(test)]
mod tests;
@ -56,11 +59,6 @@ where
.into_iter()
.take(MAX_CONTEXT_BLOCKS)
.collect();
assert_eq!(
relevant_chain.len(),
POW_AVERAGING_WINDOW + POW_MEDIAN_BLOCK_SPAN,
"state must contain enough blocks to do contextual validation"
);
let parent_block = relevant_chain
.get(0)
@ -71,6 +69,20 @@ where
.expect("valid blocks have a coinbase height");
check::height_one_more_than_parent_height(parent_height, prepared.height)?;
// skip this check during tests if we don't have enough blocks in the chain
#[cfg(test)]
if relevant_chain.len() < POW_AVERAGING_WINDOW + POW_MEDIAN_BLOCK_SPAN {
return Ok(());
}
// process_queued also checks the chain length, so we can skip this assertion during testing
// (tests that want to check this code should use the correct number of blocks)
assert_eq!(
relevant_chain.len(),
POW_AVERAGING_WINDOW + POW_MEDIAN_BLOCK_SPAN,
"state must contain enough blocks to do proof of work contextual validation, \
and validation must receive the exact number of required blocks"
);
let relevant_data = relevant_chain.iter().map(|block| {
(
block.borrow().header.difficulty_threshold,
@ -84,7 +96,6 @@ where
difficulty_adjustment,
)?;
// TODO: other contextual validation design and implementation
Ok(())
}

View File

@ -0,0 +1,119 @@
//! Checks for nullifier uniqueness.
//!
//! "A nullifier MUST NOT repeat either within a transaction,
//! or across transactions in a valid blockchain.
//! Sprout and Sapling and Orchard nullifiers are considered disjoint,
//! even if they have the same bit pattern."
//!
//! https://zips.z.cash/protocol/protocol.pdf#nullifierset
use std::collections::HashSet;
use tracing::trace;
use crate::{
error::DuplicateNullifierError, service::finalized_state::FinalizedState, PreparedBlock,
ValidateContextError,
};
/// Reject double-spends of nullifers:
/// - one from this [`PreparedBlock`], and the other already committed to the [`FinalizedState`].
///
/// (Duplicate non-finalized nullifiers are rejected during the chain update,
/// see [`add_to_non_finalized_chain_unique`] for details.)
///
/// "A transaction is not valid if it would have added a nullifier
/// to the nullifier set that already exists in the set"
///
/// https://zips.z.cash/protocol/protocol.pdf#commitmentsandnullifiers
#[tracing::instrument(skip(prepared, finalized_state))]
pub(crate) fn no_duplicates_in_finalized_chain(
prepared: &PreparedBlock,
finalized_state: &FinalizedState,
) -> Result<(), ValidateContextError> {
for nullifier in prepared.block.sprout_nullifiers() {
if finalized_state.contains_sprout_nullifier(nullifier) {
Err(nullifier.duplicate_nullifier_error(true))?;
}
}
// TODO: sapling and orchard nullifiers (#2231)
Ok(())
}
/// Reject double-spends of nullifers:
/// - both within the same [`JoinSplit`] (sprout only),
/// - from different [`JoinSplit`]s, [`sapling::Spend`]s or [`Action`]s
/// in this [`Transaction`]'s shielded data, or
/// - one from this shielded data, and another from:
/// - a previous transaction in this [`Block`], or
/// - a previous block in this non-finalized [`Chain`].
///
/// (Duplicate finalized nullifiers are rejected during service contextual validation,
/// see [`no_duplicates_in_finalized_chain`] for details.)
///
/// "A transaction is not valid if it would have added a nullifier
/// to the nullifier set that already exists in the set"
///
/// https://zips.z.cash/protocol/protocol.pdf#commitmentsandnullifiers
#[tracing::instrument(skip(chain_nullifiers, shielded_data_nullifiers))]
pub(crate) fn add_to_non_finalized_chain_unique<'block, NullifierT>(
chain_nullifiers: &mut HashSet<NullifierT>,
shielded_data_nullifiers: impl IntoIterator<Item = &'block NullifierT>,
) -> Result<(), ValidateContextError>
where
NullifierT: DuplicateNullifierError + Copy + std::fmt::Debug + Eq + std::hash::Hash + 'block,
{
for nullifier in shielded_data_nullifiers.into_iter() {
trace!(?nullifier, "adding nullifier");
// reject the nullifier if it is already present in this non-finalized chain
if !chain_nullifiers.insert(*nullifier) {
Err(nullifier.duplicate_nullifier_error(false))?;
}
}
// TODO: test that the chain's nullifiers are not modified on error (this PR)
Ok(())
}
/// Remove nullifiers that were previously added to this non-finalized [`Chain`]
/// by this shielded data.
///
/// "A note can change from being unspent to spent as a nodes view
/// of the best valid block chain is extended by new transactions.
///
/// Also, block chain reorganizations can cause a node to switch
/// to a different best valid block chain that does not contain
/// the transaction in which a note was output"
///
/// https://zips.z.cash/protocol/nu5.pdf#decryptivk
///
/// Note: reorganizations can also change the best chain to one
/// where a note was unspent, rather than spent.
///
/// # Panics
///
/// Panics if any nullifier is missing from the chain when we try to remove it.
///
/// Blocks with duplicate nullifiers are rejected by
/// [`add_to_non_finalized_chain_unique`], so this shielded data should
/// be the only shielded data that added this nullifier to this [`Chain`].
#[tracing::instrument(skip(chain_nullifiers, shielded_data_nullifiers))]
pub(crate) fn remove_from_non_finalized_chain<'block, NullifierT>(
chain_nullifiers: &mut HashSet<NullifierT>,
shielded_data_nullifiers: impl IntoIterator<Item = &'block NullifierT>,
) where
NullifierT: std::fmt::Debug + Eq + std::hash::Hash + 'block,
{
for nullifier in shielded_data_nullifiers.into_iter() {
trace!(?nullifier, "removing nullifier");
assert!(
chain_nullifiers.remove(nullifier),
"nullifier must be present if block was added to chain"
);
}
}

View File

@ -1,3 +1,4 @@
//! Tests for state contextual validation checks.
mod prop;
mod vectors;

View File

@ -0,0 +1,413 @@
//! Randomised property tests for state contextual validation nullifier: (), in_finalized_state: () nullifier: (), in_finalized_state: () checks.
use std::{convert::TryInto, sync::Arc};
use itertools::Itertools;
use proptest::prelude::*;
use zebra_chain::{
block::{Block, Height},
fmt::TypeNameToDebug,
parameters::Network::*,
primitives::Groth16Proof,
serialization::ZcashDeserializeInto,
sprout::{self, JoinSplit},
transaction::{JoinSplitData, LockTime, Transaction},
};
use crate::{
config::Config, service::StateService, tests::Prepare, FinalizedBlock,
ValidateContextError::DuplicateSproutNullifier,
};
// These tests use the `Arbitrary` trait to easily generate complex types,
// then modify those types to cause an error (or to ensure success).
//
// We could use mainnet or testnet blocks in these tests,
// but the differences shouldn't matter,
// because we're only interested in spend validation,
// (and passing various other state checks).
proptest! {
/// Make sure an arbitrary sprout nullifier is accepted by state contextual validation.
///
/// This test makes sure there are no spurious rejections that might hide bugs in the other tests.
/// (And that the test infrastructure generally works.)
#[test]
fn accept_distinct_arbitrary_sprout_nullifiers(
mut joinsplit in TypeNameToDebug::<JoinSplit::<Groth16Proof>>::arbitrary(),
mut joinsplit_data in TypeNameToDebug::<JoinSplitData::<Groth16Proof>>::arbitrary(),
use_finalized_state in any::<bool>(),
) {
zebra_test::init();
let mut block1 = zebra_test::vectors::BLOCK_MAINNET_1_BYTES
.zcash_deserialize_into::<Block>()
.expect("block should deserialize");
make_distinct_nullifiers(&mut joinsplit.nullifiers);
// make sure there are no other nullifiers
joinsplit_data.first = joinsplit.0;
joinsplit_data.rest = Vec::new();
let transaction = transaction_v4_with_joinsplit_data(joinsplit_data.0);
// convert the coinbase transaction to a version that the non-finalized state will accept
block1.transactions[0] = transaction_v4_from_coinbase(&block1.transactions[0]).into();
block1
.transactions
.push(transaction.into());
let (mut state, _genesis) = new_state_with_mainnet_genesis();
// randomly choose to commit the block to the finalized or non-finalized state
if use_finalized_state {
let block1 = FinalizedBlock::from(Arc::new(block1));
let commit_result = state.disk.commit_finalized_direct(block1.clone(), "test");
prop_assert_eq!(Some((Height(1), block1.hash)), state.best_tip());
prop_assert!(commit_result.is_ok());
} else {
let block1 = Arc::new(block1).prepare();
let commit_result =
state.validate_and_commit(block1.clone());
prop_assert_eq!(commit_result, Ok(()));
prop_assert_eq!(Some((Height(1), block1.hash)), state.best_tip());
}
}
/// Make sure duplicate sprout nullifiers are rejected by state contextual validation,
/// if they come from the same JoinSplit.
#[test]
fn reject_duplicate_sprout_nullifiers_in_joinsplit(
mut joinsplit in TypeNameToDebug::<JoinSplit::<Groth16Proof>>::arbitrary(),
mut joinsplit_data in TypeNameToDebug::<JoinSplitData::<Groth16Proof>>::arbitrary(),
) {
zebra_test::init();
let mut block1 = zebra_test::vectors::BLOCK_MAINNET_1_BYTES
.zcash_deserialize_into::<Block>()
.expect("block should deserialize");
// create a double-spend within the same joinsplit
// this might not actually be valid under the nullifier generation consensus rules
let duplicate_nullifier = joinsplit.nullifiers[0];
joinsplit.nullifiers[1] = duplicate_nullifier;
joinsplit_data.first = joinsplit.0;
joinsplit_data.rest = Vec::new();
let transaction = transaction_v4_with_joinsplit_data(joinsplit_data.0);
block1.transactions[0] = transaction_v4_from_coinbase(&block1.transactions[0]).into();
block1
.transactions
.push(transaction.into());
let (mut state, genesis) = new_state_with_mainnet_genesis();
let block1 = Arc::new(block1).prepare();
let commit_result =
state.validate_and_commit(block1);
// if the random proptest data produces other errors,
// we might need to just check `is_err()` here
prop_assert_eq!(
commit_result,
Err(
DuplicateSproutNullifier {
nullifier: duplicate_nullifier,
in_finalized_state: false,
}.into()
)
);
// block was rejected
prop_assert_eq!(Some((Height(0), genesis.hash)), state.best_tip());
}
/// Make sure duplicate sprout nullifiers are rejected by state contextual validation,
/// if they come from different JoinSplits in the same JoinSplitData/Transaction.
#[test]
fn reject_duplicate_sprout_nullifiers_in_transaction(
mut joinsplit1 in TypeNameToDebug::<JoinSplit::<Groth16Proof>>::arbitrary(),
mut joinsplit2 in TypeNameToDebug::<JoinSplit::<Groth16Proof>>::arbitrary(),
mut joinsplit_data in TypeNameToDebug::<JoinSplitData::<Groth16Proof>>::arbitrary(),
) {
zebra_test::init();
let mut block1 = zebra_test::vectors::BLOCK_MAINNET_1_BYTES
.zcash_deserialize_into::<Block>()
.expect("block should deserialize");
make_distinct_nullifiers(&mut joinsplit1.nullifiers.iter_mut().chain(joinsplit2.nullifiers.iter_mut()));
// create a double-spend across two joinsplits
let duplicate_nullifier = joinsplit1.nullifiers[0];
joinsplit2.nullifiers[0] = duplicate_nullifier;
// make sure there are no other nullifiers
joinsplit_data.first = joinsplit1.0;
joinsplit_data.rest = vec![joinsplit2.0];
let transaction = transaction_v4_with_joinsplit_data(joinsplit_data.0);
block1.transactions[0] = transaction_v4_from_coinbase(&block1.transactions[0]).into();
block1
.transactions
.push(transaction.into());
let (mut state, genesis) = new_state_with_mainnet_genesis();
let block1 = Arc::new(block1).prepare();
let commit_result =
state.validate_and_commit(block1);
prop_assert_eq!(
commit_result,
Err(
DuplicateSproutNullifier {
nullifier: duplicate_nullifier,
in_finalized_state: false,
}.into()
)
);
prop_assert_eq!(Some((Height(0), genesis.hash)), state.best_tip());
}
/// Make sure duplicate sprout nullifiers are rejected by state contextual validation,
/// if they come from different transactions in the same block.
#[test]
fn reject_duplicate_sprout_nullifiers_in_block(
mut joinsplit1 in TypeNameToDebug::<JoinSplit::<Groth16Proof>>::arbitrary(),
mut joinsplit2 in TypeNameToDebug::<JoinSplit::<Groth16Proof>>::arbitrary(),
mut joinsplit_data1 in TypeNameToDebug::<JoinSplitData::<Groth16Proof>>::arbitrary(),
mut joinsplit_data2 in TypeNameToDebug::<JoinSplitData::<Groth16Proof>>::arbitrary(),
) {
zebra_test::init();
let mut block1 = zebra_test::vectors::BLOCK_MAINNET_1_BYTES
.zcash_deserialize_into::<Block>()
.expect("block should deserialize");
make_distinct_nullifiers(&mut joinsplit1.nullifiers.iter_mut().chain(joinsplit2.nullifiers.iter_mut()));
// create a double-spend across two transactions
let duplicate_nullifier = joinsplit1.nullifiers[0];
joinsplit2.nullifiers[0] = duplicate_nullifier;
// make sure there are no other nullifiers
joinsplit_data1.first = joinsplit1.0;
joinsplit_data1.rest = Vec::new();
joinsplit_data2.first = joinsplit2.0;
joinsplit_data2.rest = Vec::new();
let transaction1 = transaction_v4_with_joinsplit_data(joinsplit_data1.0);
let transaction2 = transaction_v4_with_joinsplit_data(joinsplit_data2.0);
block1.transactions[0] = transaction_v4_from_coinbase(&block1.transactions[0]).into();
block1
.transactions
.push(transaction1.into());
block1
.transactions
.push(transaction2.into());
let (mut state, genesis) = new_state_with_mainnet_genesis();
let block1 = Arc::new(block1).prepare();
let commit_result =
state.validate_and_commit(block1);
prop_assert_eq!(
commit_result,
Err(
DuplicateSproutNullifier {
nullifier: duplicate_nullifier,
in_finalized_state: false,
}.into()
)
);
prop_assert_eq!(Some((Height(0), genesis.hash)), state.best_tip());
}
/// Make sure duplicate sprout nullifiers are rejected by state contextual validation,
/// if they come from different blocks in the same chain.
#[test]
fn reject_duplicate_sprout_nullifiers_in_chain(
mut joinsplit1 in TypeNameToDebug::<JoinSplit::<Groth16Proof>>::arbitrary(),
mut joinsplit2 in TypeNameToDebug::<JoinSplit::<Groth16Proof>>::arbitrary(),
mut joinsplit_data1 in TypeNameToDebug::<JoinSplitData::<Groth16Proof>>::arbitrary(),
mut joinsplit_data2 in TypeNameToDebug::<JoinSplitData::<Groth16Proof>>::arbitrary(),
duplicate_in_finalized_state in any::<bool>(),
) {
zebra_test::init();
let mut block1 = zebra_test::vectors::BLOCK_MAINNET_1_BYTES
.zcash_deserialize_into::<Block>()
.expect("block should deserialize");
let mut block2 = zebra_test::vectors::BLOCK_MAINNET_2_BYTES
.zcash_deserialize_into::<Block>()
.expect("block should deserialize");
make_distinct_nullifiers(&mut joinsplit1.nullifiers.iter_mut().chain(joinsplit2.nullifiers.iter_mut()));
// create a double-spend across two blocks
let duplicate_nullifier = joinsplit1.nullifiers[0];
joinsplit2.nullifiers[0] = duplicate_nullifier;
// make sure there are no other nullifiers
joinsplit_data1.first = joinsplit1.0;
joinsplit_data1.rest = Vec::new();
joinsplit_data2.first = joinsplit2.0;
joinsplit_data2.rest = Vec::new();
let transaction1 = transaction_v4_with_joinsplit_data(joinsplit_data1.0);
let transaction2 = transaction_v4_with_joinsplit_data(joinsplit_data2.0);
block1.transactions[0] = transaction_v4_from_coinbase(&block1.transactions[0]).into();
block2.transactions[0] = transaction_v4_from_coinbase(&block2.transactions[0]).into();
block1
.transactions
.push(transaction1.into());
block2
.transactions
.push(transaction2.into());
let (mut state, _genesis) = new_state_with_mainnet_genesis();
let block1_hash;
// randomly choose to commit the next block to the finalized or non-finalized state
if duplicate_in_finalized_state {
let block1 = FinalizedBlock::from(Arc::new(block1));
let commit_result = state.disk.commit_finalized_direct(block1.clone(), "test");
prop_assert_eq!(Some((Height(1), block1.hash)), state.best_tip());
prop_assert!(commit_result.is_ok());
block1_hash = block1.hash;
} else {
let block1 = Arc::new(block1).prepare();
let commit_result =
state.validate_and_commit(block1.clone());
prop_assert_eq!(commit_result, Ok(()));
prop_assert_eq!(Some((Height(1), block1.hash)), state.best_tip());
block1_hash = block1.hash;
}
let block2 = Arc::new(block2).prepare();
let commit_result =
state.validate_and_commit(block2);
prop_assert_eq!(
commit_result,
Err(
DuplicateSproutNullifier {
nullifier: duplicate_nullifier,
in_finalized_state: duplicate_in_finalized_state,
}.into()
)
);
prop_assert_eq!(Some((Height(1), block1_hash)), state.best_tip());
}
}
/// Return a new `StateService` containing the mainnet genesis block.
/// Also returns the finalized genesis block itself.
fn new_state_with_mainnet_genesis() -> (StateService, FinalizedBlock) {
let genesis = zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES
.zcash_deserialize_into::<Arc<Block>>()
.expect("block should deserialize");
let mut state = StateService::new(Config::ephemeral(), Mainnet);
assert_eq!(None, state.best_tip());
let genesis = FinalizedBlock::from(genesis);
state
.disk
.commit_finalized_direct(genesis.clone(), "test")
.expect("unexpected invalid genesis block test vector");
assert_eq!(Some((Height(0), genesis.hash)), state.best_tip());
(state, genesis)
}
/// Make sure the supplied nullifiers are distinct, modifying them if necessary.
fn make_distinct_nullifiers<'joinsplit>(
nullifiers: impl IntoIterator<Item = &'joinsplit mut sprout::Nullifier>,
) {
let nullifiers: Vec<_> = nullifiers.into_iter().collect();
if nullifiers.iter().unique().count() < nullifiers.len() {
let mut tweak: u8 = 0x00;
for nullifier in nullifiers {
nullifier.0[0] = tweak;
tweak = tweak
.checked_add(0x01)
.expect("unexpectedly large nullifier list");
}
}
}
/// Return a `Transaction::V4` containing `joinsplit_data`.
///
/// Other fields have empty or default values.
fn transaction_v4_with_joinsplit_data(
joinsplit_data: impl Into<Option<JoinSplitData<Groth16Proof>>>,
) -> Transaction {
let mut joinsplit_data = joinsplit_data.into();
// set value balance to 0 to pass the chain value pool checks
if let Some(ref mut joinsplit_data) = joinsplit_data {
let zero_amount = 0.try_into().expect("unexpected invalid zero amount");
joinsplit_data.first.vpub_old = zero_amount;
joinsplit_data.first.vpub_new = zero_amount;
for mut joinsplit in &mut joinsplit_data.rest {
joinsplit.vpub_old = zero_amount;
joinsplit.vpub_new = zero_amount;
}
}
Transaction::V4 {
inputs: Vec::new(),
outputs: Vec::new(),
lock_time: LockTime::min_lock_time(),
expiry_height: Height(0),
joinsplit_data,
sapling_shielded_data: None,
}
}
/// Return a `Transaction::V4` with the coinbase data from `coinbase`.
///
/// Used to convert a coinbase transaction to a version that the non-finalized state will accept.
fn transaction_v4_from_coinbase(coinbase: &Transaction) -> Transaction {
assert!(
!coinbase.has_sapling_shielded_data(),
"conversion assumes sapling shielded data is None"
);
Transaction::V4 {
inputs: coinbase.inputs().to_vec(),
outputs: coinbase.outputs().to_vec(),
lock_time: coinbase.lock_time(),
// `Height(0)` means that the expiry height is ignored
expiry_height: coinbase.expiry_height().unwrap_or(Height(0)),
// invalid for coinbase transactions
joinsplit_data: None,
sapling_shielded_data: None,
}
}

View File

@ -7,11 +7,12 @@ mod tests;
use std::{collections::HashMap, convert::TryInto, path::Path, sync::Arc};
use zebra_chain::transparent;
use zebra_chain::{
block::{self, Block},
parameters::{Network, GENESIS_PREVIOUS_BLOCK_HASH},
sprout,
transaction::{self, Transaction},
transparent,
};
use crate::{BoxError, Config, FinalizedBlock, HashOrHeight};
@ -368,6 +369,12 @@ impl FinalizedState {
self.db.zs_get(utxo_by_outpoint, outpoint)
}
/// Returns `true` if the finalized state contains `sprout_nullifier`.
pub fn contains_sprout_nullifier(&self, sprout_nullifier: &sprout::Nullifier) -> bool {
let sprout_nullifiers = self.db.cf_handle("sprout_nullifiers").unwrap();
self.db.zs_contains(sprout_nullifiers, &sprout_nullifier)
}
/// Returns the finalized hash for a given `block::Height` if it is present.
pub fn hash(&self, height: block::Height) -> Option<block::Hash> {
let hash_by_height = self.db.cf_handle("hash_by_height").unwrap();

View File

@ -258,12 +258,16 @@ impl DiskSerialize for rocksdb::WriteBatch {
/// Helper trait for retrieving values from rocksdb column familys with a consistently
/// defined format
pub trait DiskDeserialize {
/// Serialize the given key and use that to get and deserialize the
/// corresponding value from a rocksdb column family, if it is present.
/// Returns the value for `key` in the rocksdb column family `cf`, if present.
fn zs_get<K, V>(&self, cf: &rocksdb::ColumnFamily, key: &K) -> Option<V>
where
K: IntoDisk,
V: FromDisk;
/// Check if a rocksdb column family `cf` contains the serialized form of `key`.
fn zs_contains<K>(&self, cf: &rocksdb::ColumnFamily, key: &K) -> bool
where
K: IntoDisk;
}
impl DiskDeserialize for rocksdb::DB {
@ -275,7 +279,7 @@ impl DiskDeserialize for rocksdb::DB {
let key_bytes = key.as_bytes();
// We use `get_pinned_cf` to avoid taking ownership of the serialized
// format because we're going to deserialize it anyways, which avoids an
// value, because we're going to deserialize it anyways, which avoids an
// extra copy
let value_bytes = self
.get_pinned_cf(cf, key_bytes)
@ -283,6 +287,19 @@ impl DiskDeserialize for rocksdb::DB {
value_bytes.map(V::from_bytes)
}
fn zs_contains<K>(&self, cf: &rocksdb::ColumnFamily, key: &K) -> bool
where
K: IntoDisk,
{
let key_bytes = key.as_bytes();
// We use `get_pinned_cf` to avoid taking ownership of the serialized
// value, because we don't use the value at all. This avoids an extra copy.
self.get_pinned_cf(cf, key_bytes)
.expect("expected that disk errors would not occur")
.is_some()
}
}
#[cfg(test)]

View File

@ -4,13 +4,14 @@ use std::{
ops::Deref,
};
use tracing::{debug_span, instrument, trace};
use tracing::instrument;
use zebra_chain::{
block, orchard, primitives::Groth16Proof, sapling, sprout, transaction,
transaction::Transaction::*, transparent, work::difficulty::PartialCumulativeWork,
};
use crate::{PreparedBlock, ValidateContextError};
use crate::{service::check, PreparedBlock, ValidateContextError};
#[derive(Default, Clone)]
pub struct Chain {
@ -238,7 +239,7 @@ impl UpdateWith<PreparedBlock> for Chain {
// remove the blocks hash from `height_by_hash`
assert!(
self.height_by_hash.remove(&hash).is_some(),
"hash must be present if block was"
"hash must be present if block was added to chain"
);
// remove work from partial_cumulative_work
@ -286,7 +287,7 @@ impl UpdateWith<PreparedBlock> for Chain {
// remove `transaction.hash` from `tx_by_hash`
assert!(
self.tx_by_hash.remove(transaction_hash).is_some(),
"transactions must be present if block was"
"transactions must be present if block was added to chain"
);
// remove the utxos this produced
@ -344,7 +345,7 @@ impl UpdateWith<Vec<transparent::Input>> for Chain {
transparent::Input::PrevOut { outpoint, .. } => {
assert!(
self.spent_utxos.remove(outpoint),
"spent_utxos must be present if block was"
"spent_utxos must be present if block was added to chain"
);
}
transparent::Input::Coinbase { .. } => {}
@ -360,36 +361,29 @@ impl UpdateWith<Option<transaction::JoinSplitData<Groth16Proof>>> for Chain {
joinsplit_data: &Option<transaction::JoinSplitData<Groth16Proof>>,
) -> Result<(), ValidateContextError> {
if let Some(joinsplit_data) = joinsplit_data {
for sprout::JoinSplit { nullifiers, .. } in joinsplit_data.joinsplits() {
let span = debug_span!("revert_chain_state_with", ?nullifiers);
let _entered = span.enter();
trace!("Adding sprout nullifiers.");
self.sprout_nullifiers.insert(nullifiers[0]);
self.sprout_nullifiers.insert(nullifiers[1]);
}
check::nullifier::add_to_non_finalized_chain_unique(
&mut self.sprout_nullifiers,
joinsplit_data.nullifiers(),
)?;
}
Ok(())
}
/// # Panics
///
/// Panics if any nullifier is missing from the chain when we try to remove it.
///
/// See [`check::nullifier::remove_from_non_finalized_chain`] for details.
#[instrument(skip(self, joinsplit_data))]
fn revert_chain_state_with(
&mut self,
joinsplit_data: &Option<transaction::JoinSplitData<Groth16Proof>>,
) {
if let Some(joinsplit_data) = joinsplit_data {
for sprout::JoinSplit { nullifiers, .. } in joinsplit_data.joinsplits() {
let span = debug_span!("revert_chain_state_with", ?nullifiers);
let _entered = span.enter();
trace!("Removing sprout nullifiers.");
assert!(
self.sprout_nullifiers.remove(&nullifiers[0]),
"nullifiers must be present if block was"
);
assert!(
self.sprout_nullifiers.remove(&nullifiers[1]),
"nullifiers must be present if block was"
);
}
check::nullifier::remove_from_non_finalized_chain(
&mut self.sprout_nullifiers,
joinsplit_data.nullifiers(),
);
}
}
}
@ -404,6 +398,7 @@ where
) -> Result<(), ValidateContextError> {
if let Some(sapling_shielded_data) = sapling_shielded_data {
for nullifier in sapling_shielded_data.nullifiers() {
// TODO: check sapling nullifiers are unique (#2231)
self.sapling_nullifiers.insert(*nullifier);
}
}
@ -416,9 +411,10 @@ where
) {
if let Some(sapling_shielded_data) = sapling_shielded_data {
for nullifier in sapling_shielded_data.nullifiers() {
// TODO: refactor using generic assert function (#2231)
assert!(
self.sapling_nullifiers.remove(nullifier),
"nullifier must be present if block was"
"nullifier must be present if block was added to chain"
);
}
}
@ -431,6 +427,7 @@ impl UpdateWith<Option<orchard::ShieldedData>> for Chain {
orchard_shielded_data: &Option<orchard::ShieldedData>,
) -> Result<(), ValidateContextError> {
if let Some(orchard_shielded_data) = orchard_shielded_data {
// TODO: check orchard nullifiers are unique (#2231)
for nullifier in orchard_shielded_data.nullifiers() {
self.orchard_nullifiers.insert(*nullifier);
}
@ -441,9 +438,10 @@ impl UpdateWith<Option<orchard::ShieldedData>> for Chain {
fn revert_chain_state_with(&mut self, orchard_shielded_data: &Option<orchard::ShieldedData>) {
if let Some(orchard_shielded_data) = orchard_shielded_data {
for nullifier in orchard_shielded_data.nullifiers() {
// TODO: refactor using generic assert function (#2231)
assert!(
self.orchard_nullifiers.remove(nullifier),
"nullifier must be present if block was"
"nullifier must be present if block was added to chain"
);
}
}