4. test(db): add large transaction tests (#3759)

* refactor(test/block): rename large single transaction function

```sh
fastmod single_transaction_block single_transaction_block_many_inputs
```

* rustfmt

* test(block): add a test block with many transparent outputs

* doc(db): explain why we can't just get the UTXOs right before they are deleted

* refactor(db): split out a block data write method

* refactor(block): add a height argument to new_outputs

* test(db): add block and transaction round-trip tests

Including large blocks and transactions.

* test(db): fix large block serialization instability in the tests

* doc(block): add TODOs for generating correct blocks

* Make transparent output functions which take a height test-only

* make sure generated blocks are actually over/under-sized

* replace println!() with an error!() log
This commit is contained in:
teor 2022-03-10 09:34:50 +10:00 committed by GitHub
parent 3291db35c0
commit 7283b4bfd0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 542 additions and 102 deletions

View File

@ -7,7 +7,9 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use zebra_chain::{
block::{
tests::generate::{large_multi_transaction_block, large_single_transaction_block},
tests::generate::{
large_multi_transaction_block, large_single_transaction_block_many_inputs,
},
Block,
},
serialization::{ZcashDeserialize, ZcashSerialize},
@ -26,8 +28,8 @@ fn block_serialization(c: &mut Criterion) {
large_multi_transaction_block(),
),
(
"large_single_transaction_block",
large_single_transaction_block(),
"large_single_transaction_block_many_inputs",
large_single_transaction_block_many_inputs(),
),
];

View File

@ -1,106 +1,207 @@
//! Generate blockchain testing constructions
//! Generate large transparent blocks and transactions for testing.
use chrono::{DateTime, NaiveDateTime, Utc};
use std::sync::Arc;
use crate::{
block::{serialize::MAX_BLOCK_BYTES, Block, Header},
serialization::{ZcashDeserialize, ZcashSerialize},
transaction::{LockTime, Transaction},
transparent,
};
use super::super::{serialize::MAX_BLOCK_BYTES, Block, Header};
/// The minimum size of the blocks produced by this module.
pub const MIN_LARGE_BLOCK_BYTES: u64 = MAX_BLOCK_BYTES - 100;
/// Generate a block header
pub fn block_header() -> Header {
Header::zcash_deserialize(&zebra_test::vectors::DUMMY_HEADER[..]).unwrap()
/// The maximum number of bytes used to serialize a CompactSize,
/// for the transaction, input, and output counts generated by this module.
pub const MAX_COMPACT_SIZE_BYTES: usize = 4;
/// The number of bytes used to serialize a version 1 transaction header.
pub const TX_V1_HEADER_BYTES: usize = 4;
/// Returns a generated block header, and its canonical serialized bytes.
pub fn block_header() -> (Header, Vec<u8>) {
// Some of the test vectors are in a non-canonical format,
// so we have to round-trip serialize them.
let block_header = Header::zcash_deserialize(&zebra_test::vectors::DUMMY_HEADER[..]).unwrap();
let block_header_bytes = block_header.zcash_serialize_to_vec().unwrap();
(block_header, block_header_bytes)
}
/// Generate a block with multiple transactions just below limit
/// Returns a generated transparent transaction, and its canonical serialized bytes.
pub fn transaction() -> (Transaction, Vec<u8>) {
// Some of the test vectors are in a non-canonical format,
// so we have to round-trip serialize them.
let transaction = Transaction::zcash_deserialize(&zebra_test::vectors::DUMMY_TX1[..]).unwrap();
let transaction_bytes = transaction.zcash_serialize_to_vec().unwrap();
(transaction, transaction_bytes)
}
/// Returns a generated transparent lock time, and its canonical serialized bytes.
pub fn lock_time() -> (LockTime, Vec<u8>) {
let lock_time = LockTime::Time(DateTime::<Utc>::from_utc(
NaiveDateTime::from_timestamp(61, 0),
Utc,
));
let lock_time_bytes = lock_time.zcash_serialize_to_vec().unwrap();
(lock_time, lock_time_bytes)
}
/// Returns a generated transparent input, and its canonical serialized bytes.
pub fn input() -> (transparent::Input, Vec<u8>) {
// Some of the test vectors are in a non-canonical format,
// so we have to round-trip serialize them.
let input =
transparent::Input::zcash_deserialize(&zebra_test::vectors::DUMMY_INPUT1[..]).unwrap();
let input_bytes = input.zcash_serialize_to_vec().unwrap();
(input, input_bytes)
}
/// Returns a generated transparent output, and its canonical serialized bytes.
pub fn output() -> (transparent::Output, Vec<u8>) {
// Some of the test vectors are in a non-canonical format,
// so we have to round-trip serialize them.
let output =
transparent::Output::zcash_deserialize(&zebra_test::vectors::DUMMY_OUTPUT1[..]).unwrap();
let output_bytes = output.zcash_serialize_to_vec().unwrap();
(output, output_bytes)
}
/// Generate a block with multiple transparent transactions just below limit
///
/// TODO: add a coinbase height to the returned block
pub fn large_multi_transaction_block() -> Block {
multi_transaction_block(false)
}
/// Generate a block with one transaction and multiple inputs just below limit
pub fn large_single_transaction_block() -> Block {
single_transaction_block(false)
/// Generate a block with one transaction and multiple transparent inputs just below limit
///
/// TODO: add a coinbase height to the returned block
/// make the returned block stable under round-trip serialization
pub fn large_single_transaction_block_many_inputs() -> Block {
single_transaction_block_many_inputs(false)
}
/// Generate a block with multiple transactions just above limit
/// Generate a block with one transaction and multiple transparent outputs just below limit
///
/// TODO: add a coinbase height to the returned block
/// make the returned block stable under round-trip serialization
pub fn large_single_transaction_block_many_outputs() -> Block {
single_transaction_block_many_outputs(false)
}
/// Generate a block with multiple transparent transactions just above limit
///
/// TODO: add a coinbase height to the returned block
pub fn oversized_multi_transaction_block() -> Block {
multi_transaction_block(true)
}
/// Generate a block with one transaction and multiple inputs just above limit
pub fn oversized_single_transaction_block() -> Block {
single_transaction_block(true)
/// Generate a block with one transaction and multiple transparent inputs just above limit
///
/// TODO: add a coinbase height to the returned block
/// make the returned block stable under round-trip serialization
pub fn oversized_single_transaction_block_many_inputs() -> Block {
single_transaction_block_many_inputs(true)
}
// Implementation of block generation with multiple transactions
/// Generate a block with one transaction and multiple transparent outputs just above limit
///
/// TODO: add a coinbase height to the returned block
/// make the returned block stable under round-trip serialization
pub fn oversized_single_transaction_block_many_outputs() -> Block {
single_transaction_block_many_outputs(true)
}
/// Implementation of block generation with multiple transparent transactions
fn multi_transaction_block(oversized: bool) -> Block {
// A dummy transaction
let tx = Transaction::zcash_deserialize(&zebra_test::vectors::DUMMY_TX1[..]).unwrap();
let (transaction, transaction_bytes) = transaction();
// A block header
let header = block_header();
let (block_header, block_header_bytes) = block_header();
// Serialize header
let mut data_header = Vec::new();
header
.zcash_serialize(&mut data_header)
.expect("Block header should serialize");
// Calculate the number of transactions we need
let mut max_transactions_in_block =
(MAX_BLOCK_BYTES as usize - data_header.len()) / zebra_test::vectors::DUMMY_TX1[..].len();
// Calculate the number of transactions we need,
// subtracting the bytes used to serialize the expected transaction count.
let mut max_transactions_in_block = (usize::try_from(MAX_BLOCK_BYTES).unwrap()
- block_header_bytes.len()
- MAX_COMPACT_SIZE_BYTES)
/ transaction_bytes.len();
if oversized {
max_transactions_in_block += 1;
}
// Create transactions to be just below or just above the limit
let transactions = std::iter::repeat(Arc::new(tx))
let transactions = std::iter::repeat(Arc::new(transaction))
.take(max_transactions_in_block)
.collect::<Vec<_>>();
// Add the transactions into a block
Block {
header,
let block = Block {
header: block_header,
transactions,
}
};
let serialized_len = block.zcash_serialize_to_vec().unwrap().len();
assert_eq!(
oversized,
serialized_len > MAX_BLOCK_BYTES.try_into().unwrap(),
"block is over-sized if requested:\n\
oversized: {},\n\
serialized_len: {},\n\
MAX_BLOCK_BYTES: {},",
oversized,
serialized_len,
MAX_BLOCK_BYTES,
);
assert!(
serialized_len > MIN_LARGE_BLOCK_BYTES.try_into().unwrap(),
"block is large\n\
oversized: {},\n\
serialized_len: {},\n\
MIN_LARGE_BLOCK_BYTES: {},",
oversized,
serialized_len,
MIN_LARGE_BLOCK_BYTES,
);
block
}
// Implementation of block generation with one transaction and multiple inputs
fn single_transaction_block(oversized: bool) -> Block {
/// Implementation of block generation with one transaction and multiple transparent inputs
fn single_transaction_block_many_inputs(oversized: bool) -> Block {
// Dummy input and output
let input =
transparent::Input::zcash_deserialize(&zebra_test::vectors::DUMMY_INPUT1[..]).unwrap();
let output =
transparent::Output::zcash_deserialize(&zebra_test::vectors::DUMMY_OUTPUT1[..]).unwrap();
let (input, input_bytes) = input();
let (output, output_bytes) = output();
// A block header
let header = block_header();
let (block_header, block_header_bytes) = block_header();
// Serialize header
let mut data_header = Vec::new();
header
.zcash_serialize(&mut data_header)
.expect("Block header should serialize");
// A LockTime
let (lock_time, lock_time_bytes) = lock_time();
// Serialize a LockTime
let lock_time = LockTime::Time(DateTime::<Utc>::from_utc(
NaiveDateTime::from_timestamp(61, 0),
Utc,
));
let mut data_locktime = Vec::new();
lock_time
.zcash_serialize(&mut data_locktime)
.expect("LockTime should serialize");
// Calculate the number of inputs we need
let mut max_inputs_in_tx = (MAX_BLOCK_BYTES as usize
- data_header.len()
- zebra_test::vectors::DUMMY_OUTPUT1[..].len()
- data_locktime.len())
/ (zebra_test::vectors::DUMMY_INPUT1[..].len() - 1);
// Calculate the number of inputs we need,
// subtracting the bytes used to serialize the expected input count,
// transaction count, and output count.
let mut max_inputs_in_tx = (usize::try_from(MAX_BLOCK_BYTES).unwrap()
- block_header_bytes.len()
- 1
- TX_V1_HEADER_BYTES
- lock_time_bytes.len()
- MAX_COMPACT_SIZE_BYTES
- 1
- output_bytes.len())
/ input_bytes.len();
if oversized {
max_inputs_in_tx += 1;
@ -125,8 +226,112 @@ fn single_transaction_block(oversized: bool) -> Block {
// Put the big transaction into a block
let transactions = vec![Arc::new(big_transaction)];
Block {
header,
let block = Block {
header: block_header,
transactions,
}
};
let serialized_len = block.zcash_serialize_to_vec().unwrap().len();
assert_eq!(
oversized,
serialized_len > MAX_BLOCK_BYTES.try_into().unwrap(),
"block is over-sized if requested:\n\
oversized: {},\n\
serialized_len: {},\n\
MAX_BLOCK_BYTES: {},",
oversized,
serialized_len,
MAX_BLOCK_BYTES,
);
assert!(
serialized_len > MIN_LARGE_BLOCK_BYTES.try_into().unwrap(),
"block is large\n\
oversized: {},\n\
serialized_len: {},\n\
MIN_LARGE_BLOCK_BYTES: {},",
oversized,
serialized_len,
MIN_LARGE_BLOCK_BYTES,
);
block
}
/// Implementation of block generation with one transaction and multiple transparent outputs
fn single_transaction_block_many_outputs(oversized: bool) -> Block {
// Dummy input and output
let (input, input_bytes) = input();
let (output, output_bytes) = output();
// A block header
let (block_header, block_header_bytes) = block_header();
// A LockTime
let (lock_time, lock_time_bytes) = lock_time();
// Calculate the number of outputs we need,
// subtracting the bytes used to serialize the expected output count,
// transaction count, and input count.
let mut max_outputs_in_tx = (usize::try_from(MAX_BLOCK_BYTES).unwrap()
- block_header_bytes.len()
- 1
- TX_V1_HEADER_BYTES
- lock_time_bytes.len()
- 1
- input_bytes.len()
- MAX_COMPACT_SIZE_BYTES)
/ output_bytes.len();
if oversized {
max_outputs_in_tx += 1;
}
// 1 single input
let inputs = vec![input];
// Create outputs to be just below the limit
let outputs = std::iter::repeat(output)
.take(max_outputs_in_tx)
.collect::<Vec<_>>();
// Create a big transaction
let big_transaction = Transaction::V1 {
inputs,
outputs,
lock_time,
};
// Put the big transaction into a block
let transactions = vec![Arc::new(big_transaction)];
let block = Block {
header: block_header,
transactions,
};
let serialized_len = block.zcash_serialize_to_vec().unwrap().len();
assert_eq!(
oversized,
serialized_len > MAX_BLOCK_BYTES.try_into().unwrap(),
"block is over-sized if requested:\n\
oversized: {},\n\
serialized_len: {},\n\
MAX_BLOCK_BYTES: {},",
oversized,
serialized_len,
MAX_BLOCK_BYTES,
);
assert!(
serialized_len > MIN_LARGE_BLOCK_BYTES.try_into().unwrap(),
"block is large\n\
oversized: {},\n\
serialized_len: {},\n\
MIN_LARGE_BLOCK_BYTES: {},",
oversized,
serialized_len,
MIN_LARGE_BLOCK_BYTES,
);
block
}

View File

@ -39,7 +39,7 @@ fn blockheaderhash_debug() {
fn blockheaderhash_from_blockheader() {
zebra_test::init();
let blockheader = generate::block_header();
let (blockheader, _blockheader_bytes) = generate::block_header();
let hash = Hash::from(&blockheader);
@ -290,7 +290,7 @@ fn block_limits_single_tx() {
// Test block limit with a big single transaction
// Create a block just below the limit
let mut block = generate::large_single_transaction_block();
let mut block = generate::large_single_transaction_block_many_inputs();
// Serialize the block
let mut data = Vec::new();
@ -305,7 +305,7 @@ fn block_limits_single_tx() {
.expect("block should deserialize as we are just below limit");
// Add 1 more input to the transaction, limit will be reached
block = generate::oversized_single_transaction_block();
block = generate::oversized_single_transaction_block_many_inputs();
let mut data = Vec::new();
block
@ -326,7 +326,7 @@ fn node_time_check(
block_header_time: DateTime<Utc>,
now: DateTime<Utc>,
) -> Result<(), BlockTimeError> {
let mut header = generate::block_header();
let (mut header, _header_bytes) = generate::block_header();
header.time = block_header_time;
// pass a zero height and hash - they are only used in the returned error
header.time_is_valid_at(now, &Height(0), &Hash([0; 32]))

View File

@ -1,5 +1,4 @@
//! Transparent-related (Bitcoin-inherited) functionality.
#![allow(clippy::unit_arg)]
mod address;
mod keys;
@ -11,20 +10,21 @@ pub use address::Address;
pub use script::Script;
pub use serialize::GENESIS_COINBASE_DATA;
pub use utxo::{
new_ordered_outputs, new_outputs, utxos_from_ordered_utxos, CoinbaseSpendRestriction,
OrderedUtxo, Utxo,
new_ordered_outputs, new_outputs, outputs_from_utxos, utxos_from_ordered_utxos,
CoinbaseSpendRestriction, OrderedUtxo, Utxo,
};
pub(crate) use utxo::outputs_from_utxos;
#[cfg(any(test, feature = "proptest-impl"))]
pub(crate) use utxo::new_transaction_ordered_outputs;
pub use utxo::{
new_ordered_outputs_with_height, new_outputs_with_height, new_transaction_ordered_outputs,
};
#[cfg(any(test, feature = "proptest-impl"))]
use proptest_derive::Arbitrary;
#[cfg(any(test, feature = "proptest-impl"))]
mod arbitrary;
#[cfg(test)]
mod tests;

View File

@ -3,7 +3,7 @@
use std::{collections::HashMap, convert::TryInto};
use crate::{
block::{self, Block},
block::{self, Block, Height},
transaction::{self, Transaction},
transparent,
};
@ -100,7 +100,7 @@ pub fn utxos_from_ordered_utxos(
}
/// Compute an index of [`Output`]s, given an index of [`Utxo`]s.
pub(crate) fn outputs_from_utxos(
pub fn outputs_from_utxos(
utxos: HashMap<transparent::OutPoint, Utxo>,
) -> HashMap<transparent::OutPoint, transparent::Output> {
utxos
@ -118,15 +118,46 @@ pub fn new_outputs(
utxos_from_ordered_utxos(new_ordered_outputs(block, transaction_hashes))
}
/// Compute an index of newly created [`Utxo`]s, given a block and a
/// list of precomputed transaction hashes.
///
/// This is a test-only function, prefer [`new_outputs`].
#[cfg(any(test, feature = "proptest-impl"))]
pub fn new_outputs_with_height(
block: &Block,
height: Height,
transaction_hashes: &[transaction::Hash],
) -> HashMap<transparent::OutPoint, Utxo> {
utxos_from_ordered_utxos(new_ordered_outputs_with_height(
block,
height,
transaction_hashes,
))
}
/// Compute an index of newly created [`OrderedUtxo`]s, given a block and a
/// list of precomputed transaction hashes.
pub fn new_ordered_outputs(
block: &Block,
transaction_hashes: &[transaction::Hash],
) -> HashMap<transparent::OutPoint, OrderedUtxo> {
let mut new_ordered_outputs = HashMap::new();
let height = block.coinbase_height().expect("block has coinbase height");
new_ordered_outputs_with_height(block, height, transaction_hashes)
}
/// Compute an index of newly created [`OrderedUtxo`]s, given a block and a
/// list of precomputed transaction hashes.
///
/// This function is intended for use in this module, and in tests.
/// Prefer [`new_ordered_outputs`].
pub fn new_ordered_outputs_with_height(
block: &Block,
height: Height,
transaction_hashes: &[transaction::Hash],
) -> HashMap<transparent::OutPoint, OrderedUtxo> {
let mut new_ordered_outputs = HashMap::new();
for (tx_index_in_block, (transaction, hash)) in block
.transactions
.iter()
@ -148,8 +179,8 @@ pub fn new_ordered_outputs(
/// its precomputed transaction hash, the transaction's index in its block,
/// and the block's height.
///
/// This function is only intended for use in tests.
pub(crate) fn new_transaction_ordered_outputs(
/// This function is only for use in this module, and in tests.
pub fn new_transaction_ordered_outputs(
transaction: &Transaction,
hash: transaction::Hash,
tx_index_in_block: usize,

View File

@ -11,7 +11,9 @@ use zebra_chain::{
amount::{Amount, MAX_MONEY},
block::{
self,
tests::generate::{large_multi_transaction_block, large_single_transaction_block},
tests::generate::{
large_multi_transaction_block, large_single_transaction_block_many_inputs,
},
Block, Height,
},
parameters::{Network, NetworkUpgrade},
@ -631,7 +633,7 @@ fn legacy_sigops_count_for_large_generated_blocks() {
// We can't test sigops using the transaction verifier, because it looks up UTXOs.
let block = large_single_transaction_block();
let block = large_single_transaction_block_many_inputs();
let mut legacy_sigop_count = 0;
for transaction in block.transactions {
let cached_ffi_transaction =

View File

@ -8,7 +8,10 @@ use zebra_chain::{
value_balance::ValueBalance,
};
use crate::{request::ContextuallyValidBlock, service::chain_tip::ChainTipBlock, PreparedBlock};
use crate::{
request::ContextuallyValidBlock, service::chain_tip::ChainTipBlock, FinalizedBlock,
PreparedBlock,
};
/// Mocks computation done during semantic validation
pub trait Prepare {
@ -21,7 +24,8 @@ impl Prepare for Arc<Block> {
let hash = block.hash();
let height = block.coinbase_height().unwrap();
let transaction_hashes: Arc<[_]> = block.transactions.iter().map(|tx| tx.hash()).collect();
let new_outputs = transparent::new_ordered_outputs(&block, &transaction_hashes);
let new_outputs =
transparent::new_ordered_outputs_with_height(&block, height, &transaction_hashes);
PreparedBlock {
block,
@ -155,3 +159,27 @@ impl ContextuallyValidBlock {
Self::test_with_chain_pool_change(block, ValueBalance::zero())
}
}
impl FinalizedBlock {
/// Create a block that's ready to be committed to the finalized state,
/// using a precalculated [`block::Hash`] and [`block::Height`].
///
/// This is a test-only method, prefer [`FinalizedBlock::with_hash`].
#[cfg(any(test, feature = "proptest-impl"))]
pub fn with_hash_and_height(
block: Arc<Block>,
hash: block::Hash,
height: block::Height,
) -> Self {
let transaction_hashes: Arc<[_]> = block.transactions.iter().map(|tx| tx.hash()).collect();
let new_outputs = transparent::new_outputs_with_height(&block, height, &transaction_hashes);
Self {
block,
hash,
height,
new_outputs,
transaction_hashes,
}
}
}

View File

@ -179,15 +179,14 @@ impl ContextuallyValidBlock {
impl FinalizedBlock {
/// Create a block that's ready to be committed to the finalized state,
/// using a precalculated [`block::Hash`] and [`block::Height`].
/// using a precalculated [`block::Hash`].
///
/// Note: a [`FinalizedBlock`] isn't actually finalized
/// until [`Request::CommitFinalizedBlock`] returns success.
pub fn with_hash_and_height(
block: Arc<Block>,
hash: block::Hash,
height: block::Height,
) -> Self {
pub fn with_hash(block: Arc<Block>, hash: block::Hash) -> Self {
let height = block
.coinbase_height()
.expect("coinbase height was already checked");
let transaction_hashes: Arc<[_]> = block.transactions.iter().map(|tx| tx.hash()).collect();
let new_outputs = transparent::new_outputs(&block, &transaction_hashes);
@ -204,11 +203,8 @@ impl FinalizedBlock {
impl From<Arc<Block>> for FinalizedBlock {
fn from(block: Arc<Block>) -> Self {
let hash = block.hash();
let height = block
.coinbase_height()
.expect("finalized blocks must have a valid coinbase height");
FinalizedBlock::with_hash_and_height(block, hash, height)
FinalizedBlock::with_hash(block, hash)
}
}

View File

@ -327,6 +327,7 @@ impl FinalizedState {
) -> Result<block::Hash, BoxError> {
let finalized_hash = finalized.hash;
// Get a list of the spent UTXOs, before we delete any from the database
let all_utxos_spent_by_block = finalized
.block
.transactions

View File

@ -120,10 +120,6 @@ impl DiskWriteBatch {
history_tree: HistoryTree,
value_pool: ValueBalance<NonNegative>,
) -> Result<(), BoxError> {
let hash_by_height = db.cf_handle("hash_by_height").unwrap();
let height_by_hash = db.cf_handle("height_by_hash").unwrap();
let block_by_height = db.cf_handle("block_by_height").unwrap();
let FinalizedBlock {
block,
hash,
@ -131,12 +127,9 @@ impl DiskWriteBatch {
..
} = &finalized;
// Index the block
self.zs_insert(hash_by_height, height, hash);
self.zs_insert(height_by_hash, hash, height);
// TODO: as part of ticket #3151, commit transaction data, but not UTXOs or address indexes
self.zs_insert(block_by_height, height, block);
// Commit block and transaction data,
// but not transaction indexes, note commitments, or UTXOs.
self.prepare_block_header_transactions_batch(db, &finalized)?;
// # Consensus
//
@ -151,6 +144,7 @@ impl DiskWriteBatch {
return Ok(());
}
// Commit transaction indexes
self.prepare_transaction_index_batch(db, &finalized, &mut note_commitment_trees)?;
self.prepare_note_commitment_batch(
@ -161,6 +155,7 @@ impl DiskWriteBatch {
history_tree,
)?;
// Commit UTXOs and value pools
self.prepare_chain_value_pools_batch(db, &finalized, all_utxos_spent_by_block, value_pool)?;
// The block has passed contextual validation, so update the metrics
@ -169,6 +164,38 @@ impl DiskWriteBatch {
Ok(())
}
/// Prepare a database batch containing the block header and transactions
/// from `finalized.block`, and return it (without actually writing anything).
///
/// # Errors
///
/// - This method does not currently return any errors.
pub fn prepare_block_header_transactions_batch(
&mut self,
db: &DiskDb,
finalized: &FinalizedBlock,
) -> Result<(), BoxError> {
let hash_by_height = db.cf_handle("hash_by_height").unwrap();
let height_by_hash = db.cf_handle("height_by_hash").unwrap();
let block_by_height = db.cf_handle("block_by_height").unwrap();
let FinalizedBlock {
block,
hash,
height,
..
} = finalized;
// Index the block
self.zs_insert(hash_by_height, height, hash);
self.zs_insert(height_by_hash, hash, height);
// Commit block and transaction data, but not UTXOs or address indexes
self.zs_insert(block_by_height, height, block);
Ok(())
}
/// If `finalized.block` is a genesis block,
/// prepare a database batch that finishes initializing the database,
/// and return `true` (without actually writing anything).

View File

@ -1,3 +1,4 @@
//! Tests for finalized database blocks and transactions.
mod snapshot;
mod vectors;

View File

@ -0,0 +1,147 @@
//! Fixed database test vectors for blocks and transactions.
//!
//! These tests check that the database correctly serializes
//! and deserializes large heights, blocks and transactions.
//!
//! # TODO
//!
//! Test large blocks and transactions with shielded data,
//! including data activated in Overwinter and later network upgrades.
//!
//! Check transparent address indexes, UTXOs, etc.
use std::{iter, sync::Arc};
use zebra_chain::{
block::{
tests::generate::{
large_multi_transaction_block, large_single_transaction_block_many_inputs,
large_single_transaction_block_many_outputs,
},
Block, Height,
},
parameters::Network::{self, *},
serialization::{ZcashDeserializeInto, ZcashSerialize},
};
use zebra_test::vectors::{MAINNET_BLOCKS, TESTNET_BLOCKS};
use crate::{
service::finalized_state::{disk_db::DiskWriteBatch, disk_format::IntoDisk, FinalizedState},
Config, FinalizedBlock,
};
/// Storage round-trip test for block and transaction data in the finalized state database.
#[test]
fn test_block_db_round_trip() {
let mainnet_test_cases = MAINNET_BLOCKS
.iter()
.map(|(_height, block)| block.zcash_deserialize_into().unwrap());
let testnet_test_cases = TESTNET_BLOCKS
.iter()
.map(|(_height, block)| block.zcash_deserialize_into().unwrap());
test_block_db_round_trip_with(Mainnet, mainnet_test_cases);
test_block_db_round_trip_with(Testnet, testnet_test_cases);
// It doesn't matter if these blocks are mainnet or testnet,
// because there is no validation at this level of the database.
//
// These blocks have the same height and header hash, so they each need a new state.
test_block_db_round_trip_with(Mainnet, iter::once(large_multi_transaction_block()));
// These blocks are unstable under serialization, so we apply a round-trip first.
//
// TODO: fix the bug in the generated test vectors.
let block = large_single_transaction_block_many_inputs();
let block_data = block
.zcash_serialize_to_vec()
.expect("serialization to vec never fails");
let block: Block = block_data
.zcash_deserialize_into()
.expect("deserialization of valid serialized block never fails");
test_block_db_round_trip_with(Mainnet, iter::once(block));
let block = large_single_transaction_block_many_outputs();
let block_data = block
.zcash_serialize_to_vec()
.expect("serialization to vec never fails");
let block: Block = block_data
.zcash_deserialize_into()
.expect("deserialization of valid serialized block never fails");
test_block_db_round_trip_with(Mainnet, iter::once(block));
}
fn test_block_db_round_trip_with(
network: Network,
block_test_cases: impl IntoIterator<Item = Block>,
) {
zebra_test::init();
let state = FinalizedState::new(&Config::ephemeral(), network);
// Check that each block round-trips to the database
for original_block in block_test_cases.into_iter() {
// First, check that the block round-trips without using the database
let block_data = original_block
.zcash_serialize_to_vec()
.expect("serialization to vec never fails");
let round_trip_block: Block = block_data
.zcash_deserialize_into()
.expect("deserialization of valid serialized block never fails");
let round_trip_data = round_trip_block
.zcash_serialize_to_vec()
.expect("serialization to vec never fails");
assert_eq!(
original_block, round_trip_block,
"test block structure must round-trip",
);
assert_eq!(
block_data, round_trip_data,
"test block data must round-trip",
);
// Now, use the database
let original_block = Arc::new(original_block);
let finalized = if original_block.coinbase_height().is_some() {
original_block.clone().into()
} else {
// Fake a zero height
FinalizedBlock::with_hash_and_height(
original_block.clone(),
original_block.hash(),
Height(0),
)
};
// Skip validation by writing the block directly to the database
let mut batch = DiskWriteBatch::new();
batch
.prepare_block_header_transactions_batch(&state.db, &finalized)
.expect("block is valid for batch");
state.db.write(batch).expect("block is valid for writing");
// Now read it back from the state
let stored_block = state
.block(finalized.height.into())
.expect("block was stored at height");
if stored_block != original_block {
error!(
"
detailed block mismatch report:
original: {:?}\n\
original data: {:?}\n\
stored: {:?}\n\
stored data: {:?}\n\
",
original_block,
hex::encode(original_block.as_bytes()),
stored_block,
hex::encode(stored_block.as_bytes()),
);
}
assert_eq!(stored_block, original_block);
}
}