Only fetch block headers from the database to answer headers requests (#4792)

This commit is contained in:
teor 2022-07-22 09:15:22 +10:00 committed by GitHub
parent 71fe4c4c73
commit cbb3232769
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 180 additions and 106 deletions

View File

@ -1,6 +1,6 @@
//! Blocks and block-related structures (heights, headers, etc.)
use std::{collections::HashMap, fmt, ops::Neg};
use std::{collections::HashMap, fmt, ops::Neg, sync::Arc};
use crate::{
amount::NegativeAllowed,
@ -46,9 +46,9 @@ pub use arbitrary::LedgerState;
#[cfg_attr(any(test, feature = "proptest-impl"), derive(Serialize))]
pub struct Block {
/// The block header, containing block metadata.
pub header: Header,
pub header: Arc<Header>,
/// The block transactions.
pub transactions: Vec<std::sync::Arc<Transaction>>,
pub transactions: Vec<Arc<Transaction>>,
}
impl fmt::Display for Block {
@ -219,7 +219,7 @@ impl Block {
impl<'a> From<&'a Block> for Hash {
fn from(block: &'a Block) -> Hash {
block.header.into()
block.header.as_ref().into()
}
}

View File

@ -359,7 +359,7 @@ impl Arbitrary for Block {
(Header::arbitrary_with(ledger_state), transactions_strategy)
.prop_map(move |(header, transactions)| Self {
header,
header: header.into(),
transactions,
})
.boxed()
@ -431,7 +431,7 @@ impl Block {
for (height, block) in vec.iter_mut() {
// fixup the previous block hash
if let Some(previous_block_hash) = previous_block_hash {
block.header.previous_block_hash = previous_block_hash;
Arc::make_mut(&mut block.header).previous_block_hash = previous_block_hash;
}
let mut new_transactions = Vec::new();
@ -471,18 +471,21 @@ impl Block {
.activation_height(current.network)
.unwrap();
let nu5_height = NetworkUpgrade::Nu5.activation_height(current.network);
match current_height.cmp(&heartwood_height) {
std::cmp::Ordering::Less => {
// In pre-Heartwood blocks this is the Sapling note commitment tree root.
// We don't validate it since we checkpoint on Canopy, but it
// needs to be well-formed, i.e. smaller than 𝑞_J, so we
// arbitrarily set it to 1.
block.header.commitment_bytes = [0u8; 32];
block.header.commitment_bytes[0] = 1;
let block_header = Arc::make_mut(&mut block.header);
block_header.commitment_bytes = [0u8; 32];
block_header.commitment_bytes[0] = 1;
}
std::cmp::Ordering::Equal => {
// The Heartwood activation block has a hardcoded all-zeroes commitment.
block.header.commitment_bytes = [0u8; 32];
let block_header = Arc::make_mut(&mut block.header);
block_header.commitment_bytes = [0u8; 32];
}
std::cmp::Ordering::Greater => {
// Set the correct commitment bytes according to the network upgrade.
@ -498,9 +501,11 @@ impl Block {
&history_tree_root,
&auth_data_root,
);
block.header.commitment_bytes = hash_block_commitments.into();
let block_header = Arc::make_mut(&mut block.header);
block_header.commitment_bytes = hash_block_commitments.into();
} else {
block.header.commitment_bytes = history_tree_root.into();
let block_header = Arc::make_mut(&mut block.header);
block_header.commitment_bytes = history_tree_root.into();
}
}
}

View File

@ -1,4 +1,4 @@
use std::{fmt, io};
use std::{fmt, io, sync::Arc};
use hex::{FromHex, ToHex};
@ -105,6 +105,22 @@ impl From<Header> for Hash {
}
}
impl From<&Arc<Header>> for Hash {
// The borrow is actually needed to use From<&Header>
#[allow(clippy::needless_borrow)]
fn from(block_header: &Arc<Header>) -> Self {
block_header.as_ref().into()
}
}
impl From<Arc<Header>> for Hash {
// The borrow is actually needed to use From<&Header>
#[allow(clippy::needless_borrow)]
fn from(block_header: Arc<Header>) -> Self {
block_header.as_ref().into()
}
}
impl ZcashSerialize for Hash {
fn zcash_serialize<W: io::Write>(&self, mut writer: W) -> Result<(), io::Error> {
writer.write_all(&self.0)?;

View File

@ -1,12 +1,12 @@
//! The block header.
use std::usize;
use std::sync::Arc;
use chrono::{DateTime, Duration, Utc};
use thiserror::Error;
use crate::{
serialization::{CompactSizeMessage, TrustedPreallocate, MAX_PROTOCOL_MESSAGE_LEN},
serialization::{TrustedPreallocate, MAX_PROTOCOL_MESSAGE_LEN},
work::{difficulty::CompactDifficulty, equihash::Solution},
};
@ -125,18 +125,14 @@ impl Header {
}
/// A header with a count of the number of transactions in its block.
///
/// This structure is used in the Bitcoin network protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
///
/// The transaction count field is always zero, so we don't store it in the struct.
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))]
pub struct CountedHeader {
/// The header for a block
pub header: Header,
/// The number of transactions that come after the header
///
/// TODO: should this always be zero? (#1924)
pub transaction_count: CompactSizeMessage,
pub header: Arc<Header>,
}
/// The serialized size of a Zcash block header.

View File

@ -1,11 +1,14 @@
use std::{borrow::Borrow, convert::TryInto, io};
//! Serialization and deserialization for Zcash blocks.
use std::{borrow::Borrow, io};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use chrono::{TimeZone, Utc};
use crate::{
serialization::{
ReadZcashExt, SerializationError, ZcashDeserialize, ZcashDeserializeInto, ZcashSerialize,
CompactSizeMessage, ReadZcashExt, SerializationError, ZcashDeserialize,
ZcashDeserializeInto, ZcashSerialize,
},
work::{difficulty::CompactDifficulty, equihash},
};
@ -93,19 +96,30 @@ impl ZcashDeserialize for Header {
}
impl ZcashSerialize for CountedHeader {
#[allow(clippy::unwrap_in_result)]
fn zcash_serialize<W: io::Write>(&self, mut writer: W) -> Result<(), io::Error> {
self.header.zcash_serialize(&mut writer)?;
self.transaction_count.zcash_serialize(&mut writer)?;
// A header-only message has zero transactions in it.
let transaction_count =
CompactSizeMessage::try_from(0).expect("0 is below the message size limit");
transaction_count.zcash_serialize(&mut writer)?;
Ok(())
}
}
impl ZcashDeserialize for CountedHeader {
fn zcash_deserialize<R: io::Read>(mut reader: R) -> Result<Self, SerializationError> {
Ok(CountedHeader {
let header = CountedHeader {
header: (&mut reader).zcash_deserialize_into()?,
transaction_count: (&mut reader).zcash_deserialize_into()?,
})
};
// We ignore the number of transactions in a header-only message,
// it should always be zero.
let _transaction_count: CompactSizeMessage = (&mut reader).zcash_deserialize_into()?;
Ok(header)
}
}

View File

@ -148,7 +148,7 @@ fn multi_transaction_block(oversized: bool) -> Block {
// Add the transactions into a block
let block = Block {
header: block_header,
header: block_header.into(),
transactions,
};
@ -228,7 +228,7 @@ fn single_transaction_block_many_inputs(oversized: bool) -> Block {
let transactions = vec![Arc::new(big_transaction)];
let block = Block {
header: block_header,
header: block_header.into(),
transactions,
};
@ -306,7 +306,7 @@ fn single_transaction_block_many_outputs(oversized: bool) -> Block {
let transactions = vec![Arc::new(big_transaction)];
let block = Block {
header: block_header,
header: block_header.into(),
transactions,
};

View File

@ -1,6 +1,6 @@
//! Tests for trusted preallocation during deserialization.
use std::convert::TryInto;
use std::sync::Arc;
use proptest::prelude::*;
@ -9,10 +9,7 @@ use crate::{
header::MIN_COUNTED_HEADER_LEN, CountedHeader, Hash, Header, BLOCK_HASH_SIZE,
MAX_PROTOCOL_MESSAGE_LEN,
},
serialization::{
arbitrary::max_allocation_is_big_enough, CompactSizeMessage, TrustedPreallocate,
ZcashSerialize,
},
serialization::{arbitrary::max_allocation_is_big_enough, TrustedPreallocate, ZcashSerialize},
};
proptest! {
@ -49,10 +46,9 @@ proptest! {
/// Confirm that each counted header takes at least COUNTED_HEADER_LEN bytes when serialized.
/// This verifies that our calculated [`TrustedPreallocate::max_allocation`] is indeed an upper bound.
#[test]
fn counted_header_min_length(header in any::<Header>(), transaction_count in any::<CompactSizeMessage>()) {
fn counted_header_min_length(header in any::<Arc<Header>>()) {
let header = CountedHeader {
header,
transaction_count,
};
let serialized_header = header.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
prop_assert!(serialized_header.len() >= MIN_COUNTED_HEADER_LEN)
@ -66,10 +62,9 @@ proptest! {
/// 1. The smallest disallowed vector of `CountedHeaders`s is too large to send via the Zcash Wire Protocol
/// 2. The largest allowed vector is small enough to fit in a legal Zcash Wire Protocol message
#[test]
fn counted_header_max_allocation(header in any::<Header>()) {
fn counted_header_max_allocation(header in any::<Arc<Header>>()) {
let header = CountedHeader {
header,
transaction_count: 0.try_into().expect("zero is less than MAX_PROTOCOL_MESSAGE_LEN"),
};
let (

View File

@ -1,6 +1,8 @@
//! Randomised property tests for Proof of Work.
use proptest::{arbitrary::any, prelude::*, test_runner::Config};
use std::env;
use std::{env, sync::Arc};
use crate::block::{self, Block};
use crate::serialization::{ZcashDeserialize, ZcashDeserializeInto, ZcashSerialize};
@ -31,10 +33,12 @@ prop_compose! {
.prop_filter("solution must not be the actual solution", move |s| {
s != &real_header.solution
})
) -> block::Header {
) -> Arc<block::Header> {
let mut fake_header = real_header;
fake_header.solution = fake_solution;
fake_header
Arc::new(fake_header)
}
}
@ -53,7 +57,7 @@ fn equihash_prop_test_solution() -> color_eyre::eyre::Result<()> {
.ok()
.and_then(|v| v.parse().ok())
.unwrap_or(DEFAULT_TEST_INPUT_PROPTEST_CASES)),
|(fake_header in randomized_solutions(block.header))| {
|(fake_header in randomized_solutions(*block.header.as_ref()))| {
fake_header.solution
.check(&fake_header)
.expect_err("block header should not validate on randomized solution");
@ -69,10 +73,12 @@ prop_compose! {
.prop_filter("nonce must not be the actual nonce", move |fake_nonce| {
fake_nonce != &real_header.nonce
})
) -> block::Header {
let mut fake_header = real_header;
fake_header.nonce = fake_nonce;
fake_header
) -> Arc<block::Header> {
let mut fake_header = real_header;
fake_header.nonce = fake_nonce;
Arc::new(fake_header)
}
}
@ -85,7 +91,7 @@ fn equihash_prop_test_nonce() -> color_eyre::eyre::Result<()> {
.expect("block test vector should deserialize");
block.header.solution.check(&block.header)?;
proptest!(|(fake_header in randomized_nonce(block.header))| {
proptest!(|(fake_header in randomized_nonce(*block.header.as_ref()))| {
fake_header.solution
.check(&fake_header)
.expect_err("block header should not validate on randomized nonce");
@ -101,13 +107,14 @@ prop_compose! {
.prop_map(move |mut fake_header| {
fake_header.nonce = real_header.nonce;
fake_header.solution = real_header.solution;
fake_header
Arc::new(fake_header)
})
.prop_filter("input must not be the actual input", move |fake_header| {
fake_header != &real_header
fake_header.as_ref() != &real_header
})
) -> block::Header {
fake_header
) -> Arc<block::Header> {
fake_header
}
}
@ -124,7 +131,7 @@ fn equihash_prop_test_input() -> color_eyre::eyre::Result<()> {
.ok()
.and_then(|v| v.parse().ok())
.unwrap_or(DEFAULT_TEST_INPUT_PROPTEST_CASES)),
|(fake_header in randomized_input(block.header))| {
|(fake_header in randomized_input(*block.header.as_ref()))| {
fake_header.solution
.check(&fake_header)
.expect_err("equihash solution should not validate on randomized input");

View File

@ -1,6 +1,6 @@
//! Tests for block verification
use std::{convert::TryFrom, sync::Arc};
use std::sync::Arc;
use chrono::Utc;
use color_eyre::eyre::{eyre, Report};
@ -53,7 +53,7 @@ static INVALID_TIME_BLOCK_TRANSCRIPT: Lazy<
.checked_add_signed(chrono::Duration::hours(3))
.ok_or_else(|| eyre!("overflow when calculating 3 hours in the future"))
.unwrap();
block.header.time = three_hours_in_the_future;
Arc::make_mut(&mut block.header).time = three_hours_in_the_future;
vec![(Arc::new(block), Err(ExpectedTranscriptError::Any))]
});
@ -65,7 +65,7 @@ static INVALID_HEADER_SOLUTION_TRANSCRIPT: Lazy<
Block::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES[..]).unwrap();
// Change nonce to something invalid
block.header.nonce = [0; 32];
Arc::make_mut(&mut block.header).nonce = [0; 32];
vec![(Arc::new(block), Err(ExpectedTranscriptError::Any))]
});
@ -77,7 +77,7 @@ static INVALID_COINBASE_TRANSCRIPT: Lazy<
// Test 1: Empty transaction
let block1 = Block {
header,
header: header.into(),
transactions: Vec::new(),
};
@ -88,7 +88,7 @@ static INVALID_COINBASE_TRANSCRIPT: Lazy<
.unwrap();
transactions.push(tx);
let block2 = Block {
header,
header: header.into(),
transactions,
};
@ -205,7 +205,7 @@ fn difficulty_validation_failure() -> Result<(), Report> {
let hash = block.hash();
// Set the difficulty field to an invalid value
block.header.difficulty_threshold = INVALID_COMPACT_DIFFICULTY;
Arc::make_mut(&mut block.header).difficulty_threshold = INVALID_COMPACT_DIFFICULTY;
// Validate the block
let result =
@ -440,7 +440,7 @@ fn funding_stream_validation_failure() -> Result<(), Report> {
// Build new block
let transactions: Vec<Arc<zebra_chain::transaction::Transaction>> = vec![Arc::new(tx)];
let block = Block {
header: block.header,
header: block.header.clone(),
transactions,
};
@ -618,7 +618,7 @@ fn merkle_root_fake_v5_for_network(network: Network) -> Result<(), Report> {
// Replace the merkle root so that it matches the modified transactions.
// This test provides some transaction id and merkle root coverage,
// but we also need to test against zcashd test vectors.
block.header.merkle_root = transaction_hashes.iter().cloned().collect();
Arc::make_mut(&mut block.header).merkle_root = transaction_hashes.iter().cloned().collect();
check::merkle_root_validity(network, &block, &transaction_hashes)
.expect("merkle root should be valid for this block");

View File

@ -9,7 +9,7 @@ use tower::{layer::Layer, timeout::TimeoutLayer, Service};
use zebra_chain::{
block::{self, Block},
parameters::Network,
serialization::ZcashDeserialize,
serialization::{ZcashDeserialize, ZcashDeserializeInto},
};
use zebra_state as zs;
use zebra_test::transcript::{ExpectedTranscriptError, Transcript};
@ -36,7 +36,9 @@ const VERIFY_TIMEOUT_SECONDS: u64 = 10;
/// The generated block should fail validation.
pub fn block_no_transactions() -> Block {
Block {
header: block::Header::zcash_deserialize(&zebra_test::vectors::DUMMY_HEADER[..]).unwrap(),
header: zebra_test::vectors::DUMMY_HEADER[..]
.zcash_deserialize_into()
.unwrap(),
transactions: Vec::new(),
}
}

View File

@ -1,16 +1,12 @@
//! Tests for checkpoint-based block verification
use super::*;
use super::types::Progress::*;
use super::types::TargetHeight::*;
use std::{cmp::min, mem::drop, time::Duration};
use color_eyre::eyre::{eyre, Report};
use futures::{
future::TryFutureExt,
stream::{FuturesUnordered, StreamExt},
};
use std::{cmp::min, convert::TryInto, mem::drop, time::Duration};
use tokio::time::timeout;
use tower::{Service, ServiceExt};
use tracing_futures::Instrument;
@ -18,6 +14,11 @@ use tracing_futures::Instrument;
use zebra_chain::parameters::Network::*;
use zebra_chain::serialization::ZcashDeserialize;
use super::{
types::{Progress::*, TargetHeight::*},
*,
};
/// The timeout we apply to each verify future during testing.
///
/// The checkpoint verifier uses `tokio::sync::oneshot` channels as futures.
@ -505,11 +506,11 @@ async fn wrong_checkpoint_hash_fail() -> Result<(), Report> {
let good_block0 =
Arc::<Block>::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES[..])?;
let good_block0_hash = good_block0.hash();
// Change the header hash
let mut bad_block0 = good_block0.clone();
let mut bad_block0 = Arc::make_mut(&mut bad_block0);
bad_block0.header.version = 0;
let bad_block0: Arc<Block> = bad_block0.clone().into();
let bad_block0_mut = Arc::make_mut(&mut bad_block0);
Arc::make_mut(&mut bad_block0_mut.header).version = 0;
// Make a checkpoint list containing the genesis block checkpoint
let genesis_checkpoint_list: BTreeMap<block::Height, block::Hash> =

View File

@ -477,6 +477,12 @@ impl StateService {
read::block(self.mem.best_chain(), self.disk.db(), hash_or_height)
}
/// Returns the [`block::Header`] with [`Hash`](zebra_chain::block::Hash) or
/// [`Height`](zebra_chain::block::Height), if it exists in the current best chain.
pub fn best_block_header(&self, hash_or_height: HashOrHeight) -> Option<Arc<block::Header>> {
read::block_header(self.mem.best_chain(), self.disk.db(), hash_or_height)
}
/// Returns the [`Transaction`] with [`transaction::Hash`],
/// if it exists in the current best chain.
pub fn best_transaction(&self, hash: transaction::Hash) -> Option<Arc<Transaction>> {
@ -516,6 +522,8 @@ impl StateService {
/// - they are not in the best chain, or
/// - their block fails contextual validation.
pub fn any_utxo(&self, outpoint: &transparent::OutPoint) -> Option<transparent::Utxo> {
// We ignore any UTXOs in FinalizedState.queued_by_prev_hash,
// because it is only used during checkpoint verification.
self.mem
.any_utxo(outpoint)
.or_else(|| self.queued_blocks.utxo(outpoint))
@ -918,17 +926,10 @@ impl Service<Request> for StateService {
let res: Vec<_> = res
.iter()
.map(|&hash| {
let block = self
.best_block(hash.into())
.expect("block for found hash is in the best chain");
block::CountedHeader {
transaction_count: block
.transactions
.len()
.try_into()
.expect("transaction count has already been validated"),
header: block.header,
}
let header = self
.best_block_header(hash.into())
.expect("block header for found hash is in the best chain");
block::CountedHeader { header }
})
.collect();
async move { Ok(Response::BlockHeaders(res)) }.boxed()

View File

@ -1,3 +1,5 @@
//! Randomised property tests for the Zebra chain tip.
use std::{collections::HashSet, env, sync::Arc};
use futures::FutureExt;
@ -50,7 +52,9 @@ proptest! {
// prepare the update
if connection.is_grow() {
if let (Some(mut block), Some(last_block_hash)) = (update.block(), last_block_hash) {
Arc::make_mut(&mut block).header.previous_block_hash = last_block_hash;
let block_mut = Arc::make_mut(&mut block);
Arc::make_mut(&mut block_mut.header).previous_block_hash = last_block_hash;
*update.block_mut() = Some(block);
}
}

View File

@ -79,19 +79,30 @@ impl ZebraDb {
self.db.zs_get(&height_by_hash, &hash)
}
/// Returns the [`block::Header`](zebra_chain::block::Header) with [`block::Hash`](zebra_chain::block::Hash)
/// or [`Height`](zebra_chain::block::Height), if it exists in the finalized chain.
//
// TODO: move this method to the start of the section
#[allow(clippy::unwrap_in_result)]
pub fn block_header(&self, hash_or_height: HashOrHeight) -> Option<Arc<block::Header>> {
// Block Header
let block_header_by_height = self.db.cf_handle("block_header_by_height").unwrap();
let height = hash_or_height.height_or_else(|hash| self.height(hash))?;
let header = self.db.zs_get(&block_header_by_height, &height)?;
Some(header)
}
/// Returns the [`Block`] with [`block::Hash`](zebra_chain::block::Hash) or
/// [`Height`](zebra_chain::block::Height), if it exists in the finalized chain.
//
// TODO: move this method to the start of the section
#[allow(clippy::unwrap_in_result)]
pub fn block(&self, hash_or_height: HashOrHeight) -> Option<Arc<Block>> {
// Blocks
let block_header_by_height = self.db.cf_handle("block_header_by_height").unwrap();
let height_by_hash = self.db.cf_handle("height_by_hash").unwrap();
let height =
hash_or_height.height_or_else(|hash| self.db.zs_get(&height_by_hash, &hash))?;
let header = self.db.zs_get(&block_header_by_height, &height)?;
// Block
let height = hash_or_height.height_or_else(|hash| self.height(hash))?;
let header = self.block_header(height.into())?;
// Transactions
let tx_by_loc = self.db.cf_handle("tx_by_loc").unwrap();
@ -440,7 +451,7 @@ impl DiskWriteBatch {
} = finalized;
// Commit block header data
self.zs_insert(&block_header_by_height, height, block.header);
self.zs_insert(&block_header_by_height, height, &block.header);
// Index the block hash and height
self.zs_insert(&hash_by_height, height, hash);

View File

@ -511,7 +511,7 @@ fn rejection_restores_internal_state_genesis() -> Result<()> {
prop_assert_eq!(state.best_tip(), reject_state.best_tip());
prop_assert!(state.eq_internal_state(&reject_state));
bad_block.header.previous_block_hash = valid_tip_hash;
Arc::make_mut(&mut bad_block.header).previous_block_hash = valid_tip_hash;
let bad_block = Arc::new(bad_block.0).prepare();
let reject_result = reject_state.commit_block(bad_block, &finalized_state);

View File

@ -77,6 +77,33 @@ where
.or_else(|| db.block(hash_or_height))
}
/// Returns the [`block:Header`] with [`block::Hash`](zebra_chain::block::Hash) or
/// [`Height`](zebra_chain::block::Height), if it exists in the non-finalized
/// `chain` or finalized `db`.
pub(crate) fn block_header<C>(
chain: Option<C>,
db: &ZebraDb,
hash_or_height: HashOrHeight,
) -> Option<Arc<block::Header>>
where
C: AsRef<Chain>,
{
// # Correctness
//
// The StateService commits blocks to the finalized state before updating
// the latest chain, and it can commit additional blocks after we've cloned
// this `chain` variable.
//
// Since blocks are the same in the finalized and non-finalized state, we
// check the most efficient alternative first. (`chain` is always in memory,
// but `db` stores blocks on disk, with a memory cache.)
chain
.as_ref()
.and_then(|chain| chain.as_ref().block(hash_or_height))
.map(|contextual| contextual.block.header.clone())
.or_else(|| db.block_header(hash_or_height))
}
/// Returns the [`Transaction`] with [`transaction::Hash`], if it exists in the
/// non-finalized `chain` or finalized `db`.
pub(crate) fn transaction<C>(
@ -159,7 +186,6 @@ where
/// Returns the total transparent balance for the supplied [`transparent::Address`]es.
///
/// If the addresses do not exist in the non-finalized `chain` or finalized `db`, returns zero.
#[allow(dead_code)]
pub(crate) fn transparent_balance(
chain: Option<Arc<Chain>>,
db: &ZebraDb,
@ -282,7 +308,6 @@ fn apply_balance_change(
///
/// If the addresses do not exist in the non-finalized `chain` or finalized `db`,
/// returns an empty list.
#[allow(dead_code)]
pub(crate) fn transparent_utxos<C>(
network: Network,
chain: Option<C>,

View File

@ -2,7 +2,7 @@
//!
//! TODO: move these tests into tests::vectors and tests::prop modules.
use std::{convert::TryInto, env, sync::Arc};
use std::{env, sync::Arc};
use tower::{buffer::Buffer, util::BoxService};
@ -40,12 +40,7 @@ async fn test_populated_state_responds_correctly(
let block_headers: Vec<CountedHeader> = blocks
.iter()
.map(|block| CountedHeader {
header: block.header,
transaction_count: block
.transactions
.len()
.try_into()
.expect("test block transaction counts are valid"),
header: block.header.clone(),
})
.collect();

View File

@ -1,4 +1,6 @@
use std::{convert::TryFrom, mem, sync::Arc};
//! Tests for the Zebra state service.
use std::{mem, sync::Arc};
use zebra_chain::{
block::{self, Block},
@ -42,7 +44,7 @@ impl FakeChainHelper for Arc<Block> {
}
child.transactions.push(tx);
child.header.previous_block_hash = parent_hash;
Arc::make_mut(&mut child.header).previous_block_hash = parent_hash;
Arc::new(child)
}
@ -52,13 +54,13 @@ impl FakeChainHelper for Arc<Block> {
let expanded = work_to_expanded(work);
let block = Arc::make_mut(&mut self);
block.header.difficulty_threshold = expanded.into();
Arc::make_mut(&mut block.header).difficulty_threshold = expanded.into();
self
}
fn set_block_commitment(mut self, block_commitment: [u8; 32]) -> Arc<Block> {
let block = Arc::make_mut(&mut self);
block.header.commitment_bytes = block_commitment;
Arc::make_mut(&mut block.header).commitment_bytes = block_commitment;
self
}
}