Only fetch block headers from the database to answer headers requests (#4792)

This commit is contained in:
teor 2022-07-22 09:15:22 +10:00 committed by GitHub
parent 71fe4c4c73
commit cbb3232769
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 180 additions and 106 deletions

View File

@ -1,6 +1,6 @@
//! Blocks and block-related structures (heights, headers, etc.) //! Blocks and block-related structures (heights, headers, etc.)
use std::{collections::HashMap, fmt, ops::Neg}; use std::{collections::HashMap, fmt, ops::Neg, sync::Arc};
use crate::{ use crate::{
amount::NegativeAllowed, amount::NegativeAllowed,
@ -46,9 +46,9 @@ pub use arbitrary::LedgerState;
#[cfg_attr(any(test, feature = "proptest-impl"), derive(Serialize))] #[cfg_attr(any(test, feature = "proptest-impl"), derive(Serialize))]
pub struct Block { pub struct Block {
/// The block header, containing block metadata. /// The block header, containing block metadata.
pub header: Header, pub header: Arc<Header>,
/// The block transactions. /// The block transactions.
pub transactions: Vec<std::sync::Arc<Transaction>>, pub transactions: Vec<Arc<Transaction>>,
} }
impl fmt::Display for Block { impl fmt::Display for Block {
@ -219,7 +219,7 @@ impl Block {
impl<'a> From<&'a Block> for Hash { impl<'a> From<&'a Block> for Hash {
fn from(block: &'a Block) -> Hash { fn from(block: &'a Block) -> Hash {
block.header.into() block.header.as_ref().into()
} }
} }

View File

@ -359,7 +359,7 @@ impl Arbitrary for Block {
(Header::arbitrary_with(ledger_state), transactions_strategy) (Header::arbitrary_with(ledger_state), transactions_strategy)
.prop_map(move |(header, transactions)| Self { .prop_map(move |(header, transactions)| Self {
header, header: header.into(),
transactions, transactions,
}) })
.boxed() .boxed()
@ -431,7 +431,7 @@ impl Block {
for (height, block) in vec.iter_mut() { for (height, block) in vec.iter_mut() {
// fixup the previous block hash // fixup the previous block hash
if let Some(previous_block_hash) = previous_block_hash { if let Some(previous_block_hash) = previous_block_hash {
block.header.previous_block_hash = previous_block_hash; Arc::make_mut(&mut block.header).previous_block_hash = previous_block_hash;
} }
let mut new_transactions = Vec::new(); let mut new_transactions = Vec::new();
@ -471,18 +471,21 @@ impl Block {
.activation_height(current.network) .activation_height(current.network)
.unwrap(); .unwrap();
let nu5_height = NetworkUpgrade::Nu5.activation_height(current.network); let nu5_height = NetworkUpgrade::Nu5.activation_height(current.network);
match current_height.cmp(&heartwood_height) { match current_height.cmp(&heartwood_height) {
std::cmp::Ordering::Less => { std::cmp::Ordering::Less => {
// In pre-Heartwood blocks this is the Sapling note commitment tree root. // In pre-Heartwood blocks this is the Sapling note commitment tree root.
// We don't validate it since we checkpoint on Canopy, but it // We don't validate it since we checkpoint on Canopy, but it
// needs to be well-formed, i.e. smaller than 𝑞_J, so we // needs to be well-formed, i.e. smaller than 𝑞_J, so we
// arbitrarily set it to 1. // arbitrarily set it to 1.
block.header.commitment_bytes = [0u8; 32]; let block_header = Arc::make_mut(&mut block.header);
block.header.commitment_bytes[0] = 1; block_header.commitment_bytes = [0u8; 32];
block_header.commitment_bytes[0] = 1;
} }
std::cmp::Ordering::Equal => { std::cmp::Ordering::Equal => {
// The Heartwood activation block has a hardcoded all-zeroes commitment. // The Heartwood activation block has a hardcoded all-zeroes commitment.
block.header.commitment_bytes = [0u8; 32]; let block_header = Arc::make_mut(&mut block.header);
block_header.commitment_bytes = [0u8; 32];
} }
std::cmp::Ordering::Greater => { std::cmp::Ordering::Greater => {
// Set the correct commitment bytes according to the network upgrade. // Set the correct commitment bytes according to the network upgrade.
@ -498,9 +501,11 @@ impl Block {
&history_tree_root, &history_tree_root,
&auth_data_root, &auth_data_root,
); );
block.header.commitment_bytes = hash_block_commitments.into(); let block_header = Arc::make_mut(&mut block.header);
block_header.commitment_bytes = hash_block_commitments.into();
} else { } else {
block.header.commitment_bytes = history_tree_root.into(); let block_header = Arc::make_mut(&mut block.header);
block_header.commitment_bytes = history_tree_root.into();
} }
} }
} }

View File

@ -1,4 +1,4 @@
use std::{fmt, io}; use std::{fmt, io, sync::Arc};
use hex::{FromHex, ToHex}; use hex::{FromHex, ToHex};
@ -105,6 +105,22 @@ impl From<Header> for Hash {
} }
} }
impl From<&Arc<Header>> for Hash {
// The borrow is actually needed to use From<&Header>
#[allow(clippy::needless_borrow)]
fn from(block_header: &Arc<Header>) -> Self {
block_header.as_ref().into()
}
}
impl From<Arc<Header>> for Hash {
// The borrow is actually needed to use From<&Header>
#[allow(clippy::needless_borrow)]
fn from(block_header: Arc<Header>) -> Self {
block_header.as_ref().into()
}
}
impl ZcashSerialize for Hash { impl ZcashSerialize for Hash {
fn zcash_serialize<W: io::Write>(&self, mut writer: W) -> Result<(), io::Error> { fn zcash_serialize<W: io::Write>(&self, mut writer: W) -> Result<(), io::Error> {
writer.write_all(&self.0)?; writer.write_all(&self.0)?;

View File

@ -1,12 +1,12 @@
//! The block header. //! The block header.
use std::usize; use std::sync::Arc;
use chrono::{DateTime, Duration, Utc}; use chrono::{DateTime, Duration, Utc};
use thiserror::Error; use thiserror::Error;
use crate::{ use crate::{
serialization::{CompactSizeMessage, TrustedPreallocate, MAX_PROTOCOL_MESSAGE_LEN}, serialization::{TrustedPreallocate, MAX_PROTOCOL_MESSAGE_LEN},
work::{difficulty::CompactDifficulty, equihash::Solution}, work::{difficulty::CompactDifficulty, equihash::Solution},
}; };
@ -125,18 +125,14 @@ impl Header {
} }
/// A header with a count of the number of transactions in its block. /// A header with a count of the number of transactions in its block.
///
/// This structure is used in the Bitcoin network protocol. /// This structure is used in the Bitcoin network protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq)] ///
/// The transaction count field is always zero, so we don't store it in the struct.
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))] #[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))]
pub struct CountedHeader { pub struct CountedHeader {
/// The header for a block /// The header for a block
pub header: Header, pub header: Arc<Header>,
/// The number of transactions that come after the header
///
/// TODO: should this always be zero? (#1924)
pub transaction_count: CompactSizeMessage,
} }
/// The serialized size of a Zcash block header. /// The serialized size of a Zcash block header.

View File

@ -1,11 +1,14 @@
use std::{borrow::Borrow, convert::TryInto, io}; //! Serialization and deserialization for Zcash blocks.
use std::{borrow::Borrow, io};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use chrono::{TimeZone, Utc}; use chrono::{TimeZone, Utc};
use crate::{ use crate::{
serialization::{ serialization::{
ReadZcashExt, SerializationError, ZcashDeserialize, ZcashDeserializeInto, ZcashSerialize, CompactSizeMessage, ReadZcashExt, SerializationError, ZcashDeserialize,
ZcashDeserializeInto, ZcashSerialize,
}, },
work::{difficulty::CompactDifficulty, equihash}, work::{difficulty::CompactDifficulty, equihash},
}; };
@ -93,19 +96,30 @@ impl ZcashDeserialize for Header {
} }
impl ZcashSerialize for CountedHeader { impl ZcashSerialize for CountedHeader {
#[allow(clippy::unwrap_in_result)]
fn zcash_serialize<W: io::Write>(&self, mut writer: W) -> Result<(), io::Error> { fn zcash_serialize<W: io::Write>(&self, mut writer: W) -> Result<(), io::Error> {
self.header.zcash_serialize(&mut writer)?; self.header.zcash_serialize(&mut writer)?;
self.transaction_count.zcash_serialize(&mut writer)?;
// A header-only message has zero transactions in it.
let transaction_count =
CompactSizeMessage::try_from(0).expect("0 is below the message size limit");
transaction_count.zcash_serialize(&mut writer)?;
Ok(()) Ok(())
} }
} }
impl ZcashDeserialize for CountedHeader { impl ZcashDeserialize for CountedHeader {
fn zcash_deserialize<R: io::Read>(mut reader: R) -> Result<Self, SerializationError> { fn zcash_deserialize<R: io::Read>(mut reader: R) -> Result<Self, SerializationError> {
Ok(CountedHeader { let header = CountedHeader {
header: (&mut reader).zcash_deserialize_into()?, header: (&mut reader).zcash_deserialize_into()?,
transaction_count: (&mut reader).zcash_deserialize_into()?, };
})
// We ignore the number of transactions in a header-only message,
// it should always be zero.
let _transaction_count: CompactSizeMessage = (&mut reader).zcash_deserialize_into()?;
Ok(header)
} }
} }

View File

@ -148,7 +148,7 @@ fn multi_transaction_block(oversized: bool) -> Block {
// Add the transactions into a block // Add the transactions into a block
let block = Block { let block = Block {
header: block_header, header: block_header.into(),
transactions, transactions,
}; };
@ -228,7 +228,7 @@ fn single_transaction_block_many_inputs(oversized: bool) -> Block {
let transactions = vec![Arc::new(big_transaction)]; let transactions = vec![Arc::new(big_transaction)];
let block = Block { let block = Block {
header: block_header, header: block_header.into(),
transactions, transactions,
}; };
@ -306,7 +306,7 @@ fn single_transaction_block_many_outputs(oversized: bool) -> Block {
let transactions = vec![Arc::new(big_transaction)]; let transactions = vec![Arc::new(big_transaction)];
let block = Block { let block = Block {
header: block_header, header: block_header.into(),
transactions, transactions,
}; };

View File

@ -1,6 +1,6 @@
//! Tests for trusted preallocation during deserialization. //! Tests for trusted preallocation during deserialization.
use std::convert::TryInto; use std::sync::Arc;
use proptest::prelude::*; use proptest::prelude::*;
@ -9,10 +9,7 @@ use crate::{
header::MIN_COUNTED_HEADER_LEN, CountedHeader, Hash, Header, BLOCK_HASH_SIZE, header::MIN_COUNTED_HEADER_LEN, CountedHeader, Hash, Header, BLOCK_HASH_SIZE,
MAX_PROTOCOL_MESSAGE_LEN, MAX_PROTOCOL_MESSAGE_LEN,
}, },
serialization::{ serialization::{arbitrary::max_allocation_is_big_enough, TrustedPreallocate, ZcashSerialize},
arbitrary::max_allocation_is_big_enough, CompactSizeMessage, TrustedPreallocate,
ZcashSerialize,
},
}; };
proptest! { proptest! {
@ -49,10 +46,9 @@ proptest! {
/// Confirm that each counted header takes at least COUNTED_HEADER_LEN bytes when serialized. /// Confirm that each counted header takes at least COUNTED_HEADER_LEN bytes when serialized.
/// This verifies that our calculated [`TrustedPreallocate::max_allocation`] is indeed an upper bound. /// This verifies that our calculated [`TrustedPreallocate::max_allocation`] is indeed an upper bound.
#[test] #[test]
fn counted_header_min_length(header in any::<Header>(), transaction_count in any::<CompactSizeMessage>()) { fn counted_header_min_length(header in any::<Arc<Header>>()) {
let header = CountedHeader { let header = CountedHeader {
header, header,
transaction_count,
}; };
let serialized_header = header.zcash_serialize_to_vec().expect("Serialization to vec must succeed"); let serialized_header = header.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
prop_assert!(serialized_header.len() >= MIN_COUNTED_HEADER_LEN) prop_assert!(serialized_header.len() >= MIN_COUNTED_HEADER_LEN)
@ -66,10 +62,9 @@ proptest! {
/// 1. The smallest disallowed vector of `CountedHeaders`s is too large to send via the Zcash Wire Protocol /// 1. The smallest disallowed vector of `CountedHeaders`s is too large to send via the Zcash Wire Protocol
/// 2. The largest allowed vector is small enough to fit in a legal Zcash Wire Protocol message /// 2. The largest allowed vector is small enough to fit in a legal Zcash Wire Protocol message
#[test] #[test]
fn counted_header_max_allocation(header in any::<Header>()) { fn counted_header_max_allocation(header in any::<Arc<Header>>()) {
let header = CountedHeader { let header = CountedHeader {
header, header,
transaction_count: 0.try_into().expect("zero is less than MAX_PROTOCOL_MESSAGE_LEN"),
}; };
let ( let (

View File

@ -1,6 +1,8 @@
//! Randomised property tests for Proof of Work.
use proptest::{arbitrary::any, prelude::*, test_runner::Config}; use proptest::{arbitrary::any, prelude::*, test_runner::Config};
use std::env; use std::{env, sync::Arc};
use crate::block::{self, Block}; use crate::block::{self, Block};
use crate::serialization::{ZcashDeserialize, ZcashDeserializeInto, ZcashSerialize}; use crate::serialization::{ZcashDeserialize, ZcashDeserializeInto, ZcashSerialize};
@ -31,10 +33,12 @@ prop_compose! {
.prop_filter("solution must not be the actual solution", move |s| { .prop_filter("solution must not be the actual solution", move |s| {
s != &real_header.solution s != &real_header.solution
}) })
) -> block::Header { ) -> Arc<block::Header> {
let mut fake_header = real_header; let mut fake_header = real_header;
fake_header.solution = fake_solution; fake_header.solution = fake_solution;
fake_header
Arc::new(fake_header)
} }
} }
@ -53,7 +57,7 @@ fn equihash_prop_test_solution() -> color_eyre::eyre::Result<()> {
.ok() .ok()
.and_then(|v| v.parse().ok()) .and_then(|v| v.parse().ok())
.unwrap_or(DEFAULT_TEST_INPUT_PROPTEST_CASES)), .unwrap_or(DEFAULT_TEST_INPUT_PROPTEST_CASES)),
|(fake_header in randomized_solutions(block.header))| { |(fake_header in randomized_solutions(*block.header.as_ref()))| {
fake_header.solution fake_header.solution
.check(&fake_header) .check(&fake_header)
.expect_err("block header should not validate on randomized solution"); .expect_err("block header should not validate on randomized solution");
@ -69,10 +73,12 @@ prop_compose! {
.prop_filter("nonce must not be the actual nonce", move |fake_nonce| { .prop_filter("nonce must not be the actual nonce", move |fake_nonce| {
fake_nonce != &real_header.nonce fake_nonce != &real_header.nonce
}) })
) -> block::Header { ) -> Arc<block::Header> {
let mut fake_header = real_header;
fake_header.nonce = fake_nonce; let mut fake_header = real_header;
fake_header fake_header.nonce = fake_nonce;
Arc::new(fake_header)
} }
} }
@ -85,7 +91,7 @@ fn equihash_prop_test_nonce() -> color_eyre::eyre::Result<()> {
.expect("block test vector should deserialize"); .expect("block test vector should deserialize");
block.header.solution.check(&block.header)?; block.header.solution.check(&block.header)?;
proptest!(|(fake_header in randomized_nonce(block.header))| { proptest!(|(fake_header in randomized_nonce(*block.header.as_ref()))| {
fake_header.solution fake_header.solution
.check(&fake_header) .check(&fake_header)
.expect_err("block header should not validate on randomized nonce"); .expect_err("block header should not validate on randomized nonce");
@ -101,13 +107,14 @@ prop_compose! {
.prop_map(move |mut fake_header| { .prop_map(move |mut fake_header| {
fake_header.nonce = real_header.nonce; fake_header.nonce = real_header.nonce;
fake_header.solution = real_header.solution; fake_header.solution = real_header.solution;
fake_header Arc::new(fake_header)
}) })
.prop_filter("input must not be the actual input", move |fake_header| { .prop_filter("input must not be the actual input", move |fake_header| {
fake_header != &real_header fake_header.as_ref() != &real_header
}) })
) -> block::Header { ) -> Arc<block::Header> {
fake_header
fake_header
} }
} }
@ -124,7 +131,7 @@ fn equihash_prop_test_input() -> color_eyre::eyre::Result<()> {
.ok() .ok()
.and_then(|v| v.parse().ok()) .and_then(|v| v.parse().ok())
.unwrap_or(DEFAULT_TEST_INPUT_PROPTEST_CASES)), .unwrap_or(DEFAULT_TEST_INPUT_PROPTEST_CASES)),
|(fake_header in randomized_input(block.header))| { |(fake_header in randomized_input(*block.header.as_ref()))| {
fake_header.solution fake_header.solution
.check(&fake_header) .check(&fake_header)
.expect_err("equihash solution should not validate on randomized input"); .expect_err("equihash solution should not validate on randomized input");

View File

@ -1,6 +1,6 @@
//! Tests for block verification //! Tests for block verification
use std::{convert::TryFrom, sync::Arc}; use std::sync::Arc;
use chrono::Utc; use chrono::Utc;
use color_eyre::eyre::{eyre, Report}; use color_eyre::eyre::{eyre, Report};
@ -53,7 +53,7 @@ static INVALID_TIME_BLOCK_TRANSCRIPT: Lazy<
.checked_add_signed(chrono::Duration::hours(3)) .checked_add_signed(chrono::Duration::hours(3))
.ok_or_else(|| eyre!("overflow when calculating 3 hours in the future")) .ok_or_else(|| eyre!("overflow when calculating 3 hours in the future"))
.unwrap(); .unwrap();
block.header.time = three_hours_in_the_future; Arc::make_mut(&mut block.header).time = three_hours_in_the_future;
vec![(Arc::new(block), Err(ExpectedTranscriptError::Any))] vec![(Arc::new(block), Err(ExpectedTranscriptError::Any))]
}); });
@ -65,7 +65,7 @@ static INVALID_HEADER_SOLUTION_TRANSCRIPT: Lazy<
Block::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES[..]).unwrap(); Block::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES[..]).unwrap();
// Change nonce to something invalid // Change nonce to something invalid
block.header.nonce = [0; 32]; Arc::make_mut(&mut block.header).nonce = [0; 32];
vec![(Arc::new(block), Err(ExpectedTranscriptError::Any))] vec![(Arc::new(block), Err(ExpectedTranscriptError::Any))]
}); });
@ -77,7 +77,7 @@ static INVALID_COINBASE_TRANSCRIPT: Lazy<
// Test 1: Empty transaction // Test 1: Empty transaction
let block1 = Block { let block1 = Block {
header, header: header.into(),
transactions: Vec::new(), transactions: Vec::new(),
}; };
@ -88,7 +88,7 @@ static INVALID_COINBASE_TRANSCRIPT: Lazy<
.unwrap(); .unwrap();
transactions.push(tx); transactions.push(tx);
let block2 = Block { let block2 = Block {
header, header: header.into(),
transactions, transactions,
}; };
@ -205,7 +205,7 @@ fn difficulty_validation_failure() -> Result<(), Report> {
let hash = block.hash(); let hash = block.hash();
// Set the difficulty field to an invalid value // Set the difficulty field to an invalid value
block.header.difficulty_threshold = INVALID_COMPACT_DIFFICULTY; Arc::make_mut(&mut block.header).difficulty_threshold = INVALID_COMPACT_DIFFICULTY;
// Validate the block // Validate the block
let result = let result =
@ -440,7 +440,7 @@ fn funding_stream_validation_failure() -> Result<(), Report> {
// Build new block // Build new block
let transactions: Vec<Arc<zebra_chain::transaction::Transaction>> = vec![Arc::new(tx)]; let transactions: Vec<Arc<zebra_chain::transaction::Transaction>> = vec![Arc::new(tx)];
let block = Block { let block = Block {
header: block.header, header: block.header.clone(),
transactions, transactions,
}; };
@ -618,7 +618,7 @@ fn merkle_root_fake_v5_for_network(network: Network) -> Result<(), Report> {
// Replace the merkle root so that it matches the modified transactions. // Replace the merkle root so that it matches the modified transactions.
// This test provides some transaction id and merkle root coverage, // This test provides some transaction id and merkle root coverage,
// but we also need to test against zcashd test vectors. // but we also need to test against zcashd test vectors.
block.header.merkle_root = transaction_hashes.iter().cloned().collect(); Arc::make_mut(&mut block.header).merkle_root = transaction_hashes.iter().cloned().collect();
check::merkle_root_validity(network, &block, &transaction_hashes) check::merkle_root_validity(network, &block, &transaction_hashes)
.expect("merkle root should be valid for this block"); .expect("merkle root should be valid for this block");

View File

@ -9,7 +9,7 @@ use tower::{layer::Layer, timeout::TimeoutLayer, Service};
use zebra_chain::{ use zebra_chain::{
block::{self, Block}, block::{self, Block},
parameters::Network, parameters::Network,
serialization::ZcashDeserialize, serialization::{ZcashDeserialize, ZcashDeserializeInto},
}; };
use zebra_state as zs; use zebra_state as zs;
use zebra_test::transcript::{ExpectedTranscriptError, Transcript}; use zebra_test::transcript::{ExpectedTranscriptError, Transcript};
@ -36,7 +36,9 @@ const VERIFY_TIMEOUT_SECONDS: u64 = 10;
/// The generated block should fail validation. /// The generated block should fail validation.
pub fn block_no_transactions() -> Block { pub fn block_no_transactions() -> Block {
Block { Block {
header: block::Header::zcash_deserialize(&zebra_test::vectors::DUMMY_HEADER[..]).unwrap(), header: zebra_test::vectors::DUMMY_HEADER[..]
.zcash_deserialize_into()
.unwrap(),
transactions: Vec::new(), transactions: Vec::new(),
} }
} }

View File

@ -1,16 +1,12 @@
//! Tests for checkpoint-based block verification //! Tests for checkpoint-based block verification
use super::*; use std::{cmp::min, mem::drop, time::Duration};
use super::types::Progress::*;
use super::types::TargetHeight::*;
use color_eyre::eyre::{eyre, Report}; use color_eyre::eyre::{eyre, Report};
use futures::{ use futures::{
future::TryFutureExt, future::TryFutureExt,
stream::{FuturesUnordered, StreamExt}, stream::{FuturesUnordered, StreamExt},
}; };
use std::{cmp::min, convert::TryInto, mem::drop, time::Duration};
use tokio::time::timeout; use tokio::time::timeout;
use tower::{Service, ServiceExt}; use tower::{Service, ServiceExt};
use tracing_futures::Instrument; use tracing_futures::Instrument;
@ -18,6 +14,11 @@ use tracing_futures::Instrument;
use zebra_chain::parameters::Network::*; use zebra_chain::parameters::Network::*;
use zebra_chain::serialization::ZcashDeserialize; use zebra_chain::serialization::ZcashDeserialize;
use super::{
types::{Progress::*, TargetHeight::*},
*,
};
/// The timeout we apply to each verify future during testing. /// The timeout we apply to each verify future during testing.
/// ///
/// The checkpoint verifier uses `tokio::sync::oneshot` channels as futures. /// The checkpoint verifier uses `tokio::sync::oneshot` channels as futures.
@ -505,11 +506,11 @@ async fn wrong_checkpoint_hash_fail() -> Result<(), Report> {
let good_block0 = let good_block0 =
Arc::<Block>::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES[..])?; Arc::<Block>::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES[..])?;
let good_block0_hash = good_block0.hash(); let good_block0_hash = good_block0.hash();
// Change the header hash // Change the header hash
let mut bad_block0 = good_block0.clone(); let mut bad_block0 = good_block0.clone();
let mut bad_block0 = Arc::make_mut(&mut bad_block0); let bad_block0_mut = Arc::make_mut(&mut bad_block0);
bad_block0.header.version = 0; Arc::make_mut(&mut bad_block0_mut.header).version = 0;
let bad_block0: Arc<Block> = bad_block0.clone().into();
// Make a checkpoint list containing the genesis block checkpoint // Make a checkpoint list containing the genesis block checkpoint
let genesis_checkpoint_list: BTreeMap<block::Height, block::Hash> = let genesis_checkpoint_list: BTreeMap<block::Height, block::Hash> =

View File

@ -477,6 +477,12 @@ impl StateService {
read::block(self.mem.best_chain(), self.disk.db(), hash_or_height) read::block(self.mem.best_chain(), self.disk.db(), hash_or_height)
} }
/// Returns the [`block::Header`] with [`Hash`](zebra_chain::block::Hash) or
/// [`Height`](zebra_chain::block::Height), if it exists in the current best chain.
pub fn best_block_header(&self, hash_or_height: HashOrHeight) -> Option<Arc<block::Header>> {
read::block_header(self.mem.best_chain(), self.disk.db(), hash_or_height)
}
/// Returns the [`Transaction`] with [`transaction::Hash`], /// Returns the [`Transaction`] with [`transaction::Hash`],
/// if it exists in the current best chain. /// if it exists in the current best chain.
pub fn best_transaction(&self, hash: transaction::Hash) -> Option<Arc<Transaction>> { pub fn best_transaction(&self, hash: transaction::Hash) -> Option<Arc<Transaction>> {
@ -516,6 +522,8 @@ impl StateService {
/// - they are not in the best chain, or /// - they are not in the best chain, or
/// - their block fails contextual validation. /// - their block fails contextual validation.
pub fn any_utxo(&self, outpoint: &transparent::OutPoint) -> Option<transparent::Utxo> { pub fn any_utxo(&self, outpoint: &transparent::OutPoint) -> Option<transparent::Utxo> {
// We ignore any UTXOs in FinalizedState.queued_by_prev_hash,
// because it is only used during checkpoint verification.
self.mem self.mem
.any_utxo(outpoint) .any_utxo(outpoint)
.or_else(|| self.queued_blocks.utxo(outpoint)) .or_else(|| self.queued_blocks.utxo(outpoint))
@ -918,17 +926,10 @@ impl Service<Request> for StateService {
let res: Vec<_> = res let res: Vec<_> = res
.iter() .iter()
.map(|&hash| { .map(|&hash| {
let block = self let header = self
.best_block(hash.into()) .best_block_header(hash.into())
.expect("block for found hash is in the best chain"); .expect("block header for found hash is in the best chain");
block::CountedHeader { block::CountedHeader { header }
transaction_count: block
.transactions
.len()
.try_into()
.expect("transaction count has already been validated"),
header: block.header,
}
}) })
.collect(); .collect();
async move { Ok(Response::BlockHeaders(res)) }.boxed() async move { Ok(Response::BlockHeaders(res)) }.boxed()

View File

@ -1,3 +1,5 @@
//! Randomised property tests for the Zebra chain tip.
use std::{collections::HashSet, env, sync::Arc}; use std::{collections::HashSet, env, sync::Arc};
use futures::FutureExt; use futures::FutureExt;
@ -50,7 +52,9 @@ proptest! {
// prepare the update // prepare the update
if connection.is_grow() { if connection.is_grow() {
if let (Some(mut block), Some(last_block_hash)) = (update.block(), last_block_hash) { if let (Some(mut block), Some(last_block_hash)) = (update.block(), last_block_hash) {
Arc::make_mut(&mut block).header.previous_block_hash = last_block_hash; let block_mut = Arc::make_mut(&mut block);
Arc::make_mut(&mut block_mut.header).previous_block_hash = last_block_hash;
*update.block_mut() = Some(block); *update.block_mut() = Some(block);
} }
} }

View File

@ -79,19 +79,30 @@ impl ZebraDb {
self.db.zs_get(&height_by_hash, &hash) self.db.zs_get(&height_by_hash, &hash)
} }
/// Returns the [`block::Header`](zebra_chain::block::Header) with [`block::Hash`](zebra_chain::block::Hash)
/// or [`Height`](zebra_chain::block::Height), if it exists in the finalized chain.
//
// TODO: move this method to the start of the section
#[allow(clippy::unwrap_in_result)]
pub fn block_header(&self, hash_or_height: HashOrHeight) -> Option<Arc<block::Header>> {
// Block Header
let block_header_by_height = self.db.cf_handle("block_header_by_height").unwrap();
let height = hash_or_height.height_or_else(|hash| self.height(hash))?;
let header = self.db.zs_get(&block_header_by_height, &height)?;
Some(header)
}
/// Returns the [`Block`] with [`block::Hash`](zebra_chain::block::Hash) or /// Returns the [`Block`] with [`block::Hash`](zebra_chain::block::Hash) or
/// [`Height`](zebra_chain::block::Height), if it exists in the finalized chain. /// [`Height`](zebra_chain::block::Height), if it exists in the finalized chain.
// //
// TODO: move this method to the start of the section // TODO: move this method to the start of the section
#[allow(clippy::unwrap_in_result)] #[allow(clippy::unwrap_in_result)]
pub fn block(&self, hash_or_height: HashOrHeight) -> Option<Arc<Block>> { pub fn block(&self, hash_or_height: HashOrHeight) -> Option<Arc<Block>> {
// Blocks // Block
let block_header_by_height = self.db.cf_handle("block_header_by_height").unwrap(); let height = hash_or_height.height_or_else(|hash| self.height(hash))?;
let height_by_hash = self.db.cf_handle("height_by_hash").unwrap(); let header = self.block_header(height.into())?;
let height =
hash_or_height.height_or_else(|hash| self.db.zs_get(&height_by_hash, &hash))?;
let header = self.db.zs_get(&block_header_by_height, &height)?;
// Transactions // Transactions
let tx_by_loc = self.db.cf_handle("tx_by_loc").unwrap(); let tx_by_loc = self.db.cf_handle("tx_by_loc").unwrap();
@ -440,7 +451,7 @@ impl DiskWriteBatch {
} = finalized; } = finalized;
// Commit block header data // Commit block header data
self.zs_insert(&block_header_by_height, height, block.header); self.zs_insert(&block_header_by_height, height, &block.header);
// Index the block hash and height // Index the block hash and height
self.zs_insert(&hash_by_height, height, hash); self.zs_insert(&hash_by_height, height, hash);

View File

@ -511,7 +511,7 @@ fn rejection_restores_internal_state_genesis() -> Result<()> {
prop_assert_eq!(state.best_tip(), reject_state.best_tip()); prop_assert_eq!(state.best_tip(), reject_state.best_tip());
prop_assert!(state.eq_internal_state(&reject_state)); prop_assert!(state.eq_internal_state(&reject_state));
bad_block.header.previous_block_hash = valid_tip_hash; Arc::make_mut(&mut bad_block.header).previous_block_hash = valid_tip_hash;
let bad_block = Arc::new(bad_block.0).prepare(); let bad_block = Arc::new(bad_block.0).prepare();
let reject_result = reject_state.commit_block(bad_block, &finalized_state); let reject_result = reject_state.commit_block(bad_block, &finalized_state);

View File

@ -77,6 +77,33 @@ where
.or_else(|| db.block(hash_or_height)) .or_else(|| db.block(hash_or_height))
} }
/// Returns the [`block:Header`] with [`block::Hash`](zebra_chain::block::Hash) or
/// [`Height`](zebra_chain::block::Height), if it exists in the non-finalized
/// `chain` or finalized `db`.
pub(crate) fn block_header<C>(
chain: Option<C>,
db: &ZebraDb,
hash_or_height: HashOrHeight,
) -> Option<Arc<block::Header>>
where
C: AsRef<Chain>,
{
// # Correctness
//
// The StateService commits blocks to the finalized state before updating
// the latest chain, and it can commit additional blocks after we've cloned
// this `chain` variable.
//
// Since blocks are the same in the finalized and non-finalized state, we
// check the most efficient alternative first. (`chain` is always in memory,
// but `db` stores blocks on disk, with a memory cache.)
chain
.as_ref()
.and_then(|chain| chain.as_ref().block(hash_or_height))
.map(|contextual| contextual.block.header.clone())
.or_else(|| db.block_header(hash_or_height))
}
/// Returns the [`Transaction`] with [`transaction::Hash`], if it exists in the /// Returns the [`Transaction`] with [`transaction::Hash`], if it exists in the
/// non-finalized `chain` or finalized `db`. /// non-finalized `chain` or finalized `db`.
pub(crate) fn transaction<C>( pub(crate) fn transaction<C>(
@ -159,7 +186,6 @@ where
/// Returns the total transparent balance for the supplied [`transparent::Address`]es. /// Returns the total transparent balance for the supplied [`transparent::Address`]es.
/// ///
/// If the addresses do not exist in the non-finalized `chain` or finalized `db`, returns zero. /// If the addresses do not exist in the non-finalized `chain` or finalized `db`, returns zero.
#[allow(dead_code)]
pub(crate) fn transparent_balance( pub(crate) fn transparent_balance(
chain: Option<Arc<Chain>>, chain: Option<Arc<Chain>>,
db: &ZebraDb, db: &ZebraDb,
@ -282,7 +308,6 @@ fn apply_balance_change(
/// ///
/// If the addresses do not exist in the non-finalized `chain` or finalized `db`, /// If the addresses do not exist in the non-finalized `chain` or finalized `db`,
/// returns an empty list. /// returns an empty list.
#[allow(dead_code)]
pub(crate) fn transparent_utxos<C>( pub(crate) fn transparent_utxos<C>(
network: Network, network: Network,
chain: Option<C>, chain: Option<C>,

View File

@ -2,7 +2,7 @@
//! //!
//! TODO: move these tests into tests::vectors and tests::prop modules. //! TODO: move these tests into tests::vectors and tests::prop modules.
use std::{convert::TryInto, env, sync::Arc}; use std::{env, sync::Arc};
use tower::{buffer::Buffer, util::BoxService}; use tower::{buffer::Buffer, util::BoxService};
@ -40,12 +40,7 @@ async fn test_populated_state_responds_correctly(
let block_headers: Vec<CountedHeader> = blocks let block_headers: Vec<CountedHeader> = blocks
.iter() .iter()
.map(|block| CountedHeader { .map(|block| CountedHeader {
header: block.header, header: block.header.clone(),
transaction_count: block
.transactions
.len()
.try_into()
.expect("test block transaction counts are valid"),
}) })
.collect(); .collect();

View File

@ -1,4 +1,6 @@
use std::{convert::TryFrom, mem, sync::Arc}; //! Tests for the Zebra state service.
use std::{mem, sync::Arc};
use zebra_chain::{ use zebra_chain::{
block::{self, Block}, block::{self, Block},
@ -42,7 +44,7 @@ impl FakeChainHelper for Arc<Block> {
} }
child.transactions.push(tx); child.transactions.push(tx);
child.header.previous_block_hash = parent_hash; Arc::make_mut(&mut child.header).previous_block_hash = parent_hash;
Arc::new(child) Arc::new(child)
} }
@ -52,13 +54,13 @@ impl FakeChainHelper for Arc<Block> {
let expanded = work_to_expanded(work); let expanded = work_to_expanded(work);
let block = Arc::make_mut(&mut self); let block = Arc::make_mut(&mut self);
block.header.difficulty_threshold = expanded.into(); Arc::make_mut(&mut block.header).difficulty_threshold = expanded.into();
self self
} }
fn set_block_commitment(mut self, block_commitment: [u8; 32]) -> Arc<Block> { fn set_block_commitment(mut self, block_commitment: [u8; 32]) -> Arc<Block> {
let block = Arc::make_mut(&mut self); let block = Arc::make_mut(&mut self);
block.header.commitment_bytes = block_commitment; Arc::make_mut(&mut block.header).commitment_bytes = block_commitment;
self self
} }
} }