Consensus refactor (#629)
* Flatten consensus::verify::* to consensus::* * Move consensus::*::tests into their own files * Move CheckpointList into its own file * Move Progress and Target into a types module QueuedBlock and QueuedBlockList can stay in checkpoint.rs, because they are tightly coupled to CheckpointVerifier.
This commit is contained in:
parent
5d6a5ca329
commit
8b5ec155f0
|
@ -0,0 +1,172 @@
|
|||
//! Block verification and chain state updates for Zebra.
|
||||
//!
|
||||
//! Verification occurs in multiple stages:
|
||||
//! - getting blocks (disk- or network-bound)
|
||||
//! - context-free verification of signatures, proofs, and scripts (CPU-bound)
|
||||
//! - context-dependent verification of the chain state (awaits a verified parent block)
|
||||
//!
|
||||
//! Verification is provided via a `tower::Service`, to support backpressure and batch
|
||||
//! verification.
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
use chrono::{DateTime, Duration, Utc};
|
||||
use futures_util::FutureExt;
|
||||
use std::{
|
||||
error,
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
use tower::{buffer::Buffer, Service, ServiceExt};
|
||||
|
||||
use zebra_chain::block::{Block, BlockHeaderHash};
|
||||
|
||||
/// Check if `block_header_time` is less than or equal to
|
||||
/// 2 hours in the future, according to the node's local clock (`now`).
|
||||
///
|
||||
/// This is a non-deterministic rule, as clocks vary over time, and
|
||||
/// between different nodes.
|
||||
///
|
||||
/// "In addition, a full validator MUST NOT accept blocks with nTime
|
||||
/// more than two hours in the future according to its clock. This
|
||||
/// is not strictly a consensus rule because it is nondeterministic,
|
||||
/// and clock time varies between nodes. Also note that a block that
|
||||
/// is rejected by this rule at a given point in time may later be
|
||||
/// accepted."[S 7.5][7.5]
|
||||
///
|
||||
/// [7.5]: https://zips.z.cash/protocol/protocol.pdf#blockheader
|
||||
pub(crate) fn node_time_check(
|
||||
block_header_time: DateTime<Utc>,
|
||||
now: DateTime<Utc>,
|
||||
) -> Result<(), Error> {
|
||||
let two_hours_in_the_future = now
|
||||
.checked_add_signed(Duration::hours(2))
|
||||
.ok_or("overflow when calculating 2 hours in the future")?;
|
||||
|
||||
if block_header_time <= two_hours_in_the_future {
|
||||
Ok(())
|
||||
} else {
|
||||
Err("block header time is more than 2 hours in the future".into())
|
||||
}
|
||||
}
|
||||
|
||||
/// [3.10]: https://zips.z.cash/protocol/protocol.pdf#coinbasetransactions
|
||||
pub(crate) fn coinbase_check(block: &Block) -> Result<(), Error> {
|
||||
if block.coinbase_height().is_some() {
|
||||
// No coinbase inputs in additional transactions allowed
|
||||
if block
|
||||
.transactions
|
||||
.iter()
|
||||
.skip(1)
|
||||
.any(|tx| tx.contains_coinbase_input())
|
||||
{
|
||||
Err("coinbase input found in additional transaction")?
|
||||
}
|
||||
Ok(())
|
||||
} else {
|
||||
Err("no coinbase transaction in block")?
|
||||
}
|
||||
}
|
||||
|
||||
struct BlockVerifier<S> {
|
||||
/// The underlying `ZebraState`, possibly wrapped in other services.
|
||||
state_service: S,
|
||||
}
|
||||
|
||||
/// The error type for the BlockVerifier Service.
|
||||
// TODO(jlusby): Error = Report ?
|
||||
type Error = Box<dyn error::Error + Send + Sync + 'static>;
|
||||
|
||||
/// The BlockVerifier service implementation.
|
||||
///
|
||||
/// After verification, blocks are added to the underlying state service.
|
||||
impl<S> Service<Arc<Block>> for BlockVerifier<S>
|
||||
where
|
||||
S: Service<zebra_state::Request, Response = zebra_state::Response, Error = Error>
|
||||
+ Send
|
||||
+ Clone
|
||||
+ 'static,
|
||||
S::Future: Send + 'static,
|
||||
{
|
||||
type Response = BlockHeaderHash;
|
||||
type Error = Error;
|
||||
type Future =
|
||||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
// We don't expect the state to exert backpressure on verifier users,
|
||||
// so we don't need to call `state_service.poll_ready()` here.
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, block: Arc<Block>) -> Self::Future {
|
||||
// TODO(jlusby): Error = Report, handle errors from state_service.
|
||||
// TODO(teor):
|
||||
// - handle chain reorgs
|
||||
// - adjust state_service "unique block height" conditions
|
||||
let mut state_service = self.state_service.clone();
|
||||
|
||||
async move {
|
||||
// Since errors cause an early exit, try to do the
|
||||
// quick checks first.
|
||||
|
||||
let now = Utc::now();
|
||||
node_time_check(block.header.time, now)?;
|
||||
block.header.is_equihash_solution_valid()?;
|
||||
coinbase_check(block.as_ref())?;
|
||||
|
||||
// `Tower::Buffer` requires a 1:1 relationship between `poll()`s
|
||||
// and `call()`s, because it reserves a buffer slot in each
|
||||
// `call()`.
|
||||
let add_block = state_service
|
||||
.ready_and()
|
||||
.await?
|
||||
.call(zebra_state::Request::AddBlock { block });
|
||||
|
||||
match add_block.await? {
|
||||
zebra_state::Response::Added { hash } => Ok(hash),
|
||||
_ => Err("adding block to zebra-state failed".into()),
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a block verification service, using the provided state service.
|
||||
///
|
||||
/// The block verifier holds a state service of type `S`, used as context for
|
||||
/// block validation and to which newly verified blocks will be committed. This
|
||||
/// state is pluggable to allow for testing or instrumentation.
|
||||
///
|
||||
/// The returned type is opaque to allow instrumentation or other wrappers, but
|
||||
/// can be boxed for storage. It is also `Clone` to allow sharing of a
|
||||
/// verification service.
|
||||
///
|
||||
/// This function should be called only once for a particular state service (and
|
||||
/// the result be shared) rather than constructing multiple verification services
|
||||
/// backed by the same state layer.
|
||||
//
|
||||
// Only used by tests and other modules
|
||||
#[allow(dead_code)]
|
||||
pub fn init<S>(
|
||||
state_service: S,
|
||||
) -> impl Service<
|
||||
Arc<Block>,
|
||||
Response = BlockHeaderHash,
|
||||
Error = Error,
|
||||
Future = impl Future<Output = Result<BlockHeaderHash, Error>>,
|
||||
> + Send
|
||||
+ Clone
|
||||
+ 'static
|
||||
where
|
||||
S: Service<zebra_state::Request, Response = zebra_state::Response, Error = Error>
|
||||
+ Send
|
||||
+ Clone
|
||||
+ 'static,
|
||||
S::Future: Send + 'static,
|
||||
{
|
||||
Buffer::new(BlockVerifier { state_service }, 1)
|
||||
}
|
|
@ -0,0 +1,444 @@
|
|||
//! Tests for block verification
|
||||
|
||||
use super::*;
|
||||
|
||||
use chrono::offset::{LocalResult, TimeZone};
|
||||
use chrono::{Duration, Utc};
|
||||
use color_eyre::eyre::Report;
|
||||
use color_eyre::eyre::{bail, eyre};
|
||||
use std::sync::Arc;
|
||||
use tower::{util::ServiceExt, Service};
|
||||
|
||||
use zebra_chain::block::Block;
|
||||
use zebra_chain::block::BlockHeader;
|
||||
use zebra_chain::serialization::ZcashDeserialize;
|
||||
use zebra_chain::transaction::Transaction;
|
||||
|
||||
#[test]
|
||||
fn time_check_past_block() {
|
||||
// This block is also verified as part of the BlockVerifier service
|
||||
// tests.
|
||||
let block =
|
||||
Arc::<Block>::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_415000_BYTES[..])
|
||||
.expect("block should deserialize");
|
||||
let now = Utc::now();
|
||||
|
||||
// This check is non-deterministic, but BLOCK_MAINNET_415000 is
|
||||
// a long time in the past. So it's unlikely that the test machine
|
||||
// will have a clock that's far enough in the past for the test to
|
||||
// fail.
|
||||
node_time_check(block.header.time, now)
|
||||
.expect("the header time from a mainnet block should be valid");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn time_check_now() {
|
||||
// These checks are deteministic, because all the times are offset
|
||||
// from the current time.
|
||||
let now = Utc::now();
|
||||
let three_hours_in_the_past = now - Duration::hours(3);
|
||||
let two_hours_in_the_future = now + Duration::hours(2);
|
||||
let two_hours_and_one_second_in_the_future = now + Duration::hours(2) + Duration::seconds(1);
|
||||
|
||||
node_time_check(now, now).expect("the current time should be valid as a block header time");
|
||||
node_time_check(three_hours_in_the_past, now)
|
||||
.expect("a past time should be valid as a block header time");
|
||||
node_time_check(two_hours_in_the_future, now)
|
||||
.expect("2 hours in the future should be valid as a block header time");
|
||||
node_time_check(two_hours_and_one_second_in_the_future, now)
|
||||
.expect_err("2 hours and 1 second in the future should be invalid as a block header time");
|
||||
|
||||
// Now invert the tests
|
||||
// 3 hours in the future should fail
|
||||
node_time_check(now, three_hours_in_the_past)
|
||||
.expect_err("3 hours in the future should be invalid as a block header time");
|
||||
// The past should succeed
|
||||
node_time_check(now, two_hours_in_the_future)
|
||||
.expect("2 hours in the past should be valid as a block header time");
|
||||
node_time_check(now, two_hours_and_one_second_in_the_future)
|
||||
.expect("2 hours and 1 second in the past should be valid as a block header time");
|
||||
}
|
||||
|
||||
/// Valid unix epoch timestamps for blocks, in seconds
|
||||
static BLOCK_HEADER_VALID_TIMESTAMPS: &[i64] = &[
|
||||
// These times are currently invalid DateTimes, but they could
|
||||
// become valid in future chrono versions
|
||||
i64::MIN,
|
||||
i64::MIN + 1,
|
||||
// These times are valid DateTimes
|
||||
(u32::MIN as i64) - 1,
|
||||
(u32::MIN as i64),
|
||||
(u32::MIN as i64) + 1,
|
||||
(i32::MIN as i64) - 1,
|
||||
(i32::MIN as i64),
|
||||
(i32::MIN as i64) + 1,
|
||||
-1,
|
||||
0,
|
||||
1,
|
||||
// maximum nExpiryHeight or lock_time, in blocks
|
||||
499_999_999,
|
||||
// minimum lock_time, in seconds
|
||||
500_000_000,
|
||||
500_000_001,
|
||||
];
|
||||
|
||||
/// Invalid unix epoch timestamps for blocks, in seconds
|
||||
static BLOCK_HEADER_INVALID_TIMESTAMPS: &[i64] = &[
|
||||
(i32::MAX as i64) - 1,
|
||||
(i32::MAX as i64),
|
||||
(i32::MAX as i64) + 1,
|
||||
(u32::MAX as i64) - 1,
|
||||
(u32::MAX as i64),
|
||||
(u32::MAX as i64) + 1,
|
||||
// These times are currently invalid DateTimes, but they could
|
||||
// become valid in future chrono versions
|
||||
i64::MAX - 1,
|
||||
i64::MAX,
|
||||
];
|
||||
|
||||
#[test]
|
||||
fn time_check_fixed() {
|
||||
// These checks are non-deterministic, but the times are all in the
|
||||
// distant past or far future. So it's unlikely that the test
|
||||
// machine will have a clock that makes these tests fail.
|
||||
let now = Utc::now();
|
||||
|
||||
for valid_timestamp in BLOCK_HEADER_VALID_TIMESTAMPS {
|
||||
let block_header_time = match Utc.timestamp_opt(*valid_timestamp, 0) {
|
||||
LocalResult::Single(time) => time,
|
||||
LocalResult::None => {
|
||||
// Skip the test if the timestamp is invalid
|
||||
continue;
|
||||
}
|
||||
LocalResult::Ambiguous(_, _) => {
|
||||
// Utc doesn't have ambiguous times
|
||||
unreachable!();
|
||||
}
|
||||
};
|
||||
node_time_check(block_header_time, now)
|
||||
.expect("the time should be valid as a block header time");
|
||||
// Invert the check, leading to an invalid time
|
||||
node_time_check(now, block_header_time)
|
||||
.expect_err("the inverse comparison should be invalid");
|
||||
}
|
||||
|
||||
for invalid_timestamp in BLOCK_HEADER_INVALID_TIMESTAMPS {
|
||||
let block_header_time = match Utc.timestamp_opt(*invalid_timestamp, 0) {
|
||||
LocalResult::Single(time) => time,
|
||||
LocalResult::None => {
|
||||
// Skip the test if the timestamp is invalid
|
||||
continue;
|
||||
}
|
||||
LocalResult::Ambiguous(_, _) => {
|
||||
// Utc doesn't have ambiguous times
|
||||
unreachable!();
|
||||
}
|
||||
};
|
||||
node_time_check(block_header_time, now)
|
||||
.expect_err("the time should be invalid as a block header time");
|
||||
// Invert the check, leading to a valid time
|
||||
node_time_check(now, block_header_time).expect("the inverse comparison should be valid");
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn verify_test() -> Result<(), Report> {
|
||||
verify().await
|
||||
}
|
||||
|
||||
#[spandoc::spandoc]
|
||||
async fn verify() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
let block =
|
||||
Arc::<Block>::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_415000_BYTES[..])?;
|
||||
let hash: BlockHeaderHash = block.as_ref().into();
|
||||
|
||||
let state_service = Box::new(zebra_state::in_memory::init());
|
||||
let mut block_verifier = super::init(state_service);
|
||||
|
||||
/// SPANDOC: Make sure the verifier service is ready
|
||||
let ready_verifier_service = block_verifier.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
/// SPANDOC: Verify the block
|
||||
let verify_response = ready_verifier_service
|
||||
.call(block.clone())
|
||||
.await
|
||||
.map_err(|e| eyre!(e))?;
|
||||
|
||||
assert_eq!(verify_response, hash);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn round_trip_test() -> Result<(), Report> {
|
||||
round_trip().await
|
||||
}
|
||||
|
||||
#[spandoc::spandoc]
|
||||
async fn round_trip() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
let block =
|
||||
Arc::<Block>::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_415000_BYTES[..])?;
|
||||
let hash: BlockHeaderHash = block.as_ref().into();
|
||||
|
||||
let mut state_service = zebra_state::in_memory::init();
|
||||
let mut block_verifier = super::init(state_service.clone());
|
||||
|
||||
/// SPANDOC: Make sure the verifier service is ready
|
||||
let ready_verifier_service = block_verifier.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
/// SPANDOC: Verify the block
|
||||
let verify_response = ready_verifier_service
|
||||
.call(block.clone())
|
||||
.await
|
||||
.map_err(|e| eyre!(e))?;
|
||||
|
||||
assert_eq!(verify_response, hash);
|
||||
|
||||
/// SPANDOC: Make sure the state service is ready
|
||||
let ready_state_service = state_service.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
/// SPANDOC: Make sure the block was added to the state
|
||||
let state_response = ready_state_service
|
||||
.call(zebra_state::Request::GetBlock { hash })
|
||||
.await
|
||||
.map_err(|e| eyre!(e))?;
|
||||
|
||||
if let zebra_state::Response::Block {
|
||||
block: returned_block,
|
||||
} = state_response
|
||||
{
|
||||
assert_eq!(block, returned_block);
|
||||
} else {
|
||||
bail!("unexpected response kind: {:?}", state_response);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn verify_fail_add_block_test() -> Result<(), Report> {
|
||||
verify_fail_add_block().await
|
||||
}
|
||||
|
||||
#[spandoc::spandoc]
|
||||
async fn verify_fail_add_block() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
let block =
|
||||
Arc::<Block>::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_415000_BYTES[..])?;
|
||||
let hash: BlockHeaderHash = block.as_ref().into();
|
||||
|
||||
let mut state_service = zebra_state::in_memory::init();
|
||||
let mut block_verifier = super::init(state_service.clone());
|
||||
|
||||
/// SPANDOC: Make sure the verifier service is ready (1/2)
|
||||
let ready_verifier_service = block_verifier.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
/// SPANDOC: Verify the block for the first time
|
||||
let verify_response = ready_verifier_service
|
||||
.call(block.clone())
|
||||
.await
|
||||
.map_err(|e| eyre!(e))?;
|
||||
|
||||
assert_eq!(verify_response, hash);
|
||||
|
||||
/// SPANDOC: Make sure the state service is ready (1/2)
|
||||
let ready_state_service = state_service.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
/// SPANDOC: Make sure the block was added to the state
|
||||
let state_response = ready_state_service
|
||||
.call(zebra_state::Request::GetBlock { hash })
|
||||
.await
|
||||
.map_err(|e| eyre!(e))?;
|
||||
|
||||
if let zebra_state::Response::Block {
|
||||
block: returned_block,
|
||||
} = state_response
|
||||
{
|
||||
assert_eq!(block, returned_block);
|
||||
} else {
|
||||
bail!("unexpected response kind: {:?}", state_response);
|
||||
}
|
||||
|
||||
/// SPANDOC: Make sure the verifier service is ready (2/2)
|
||||
let ready_verifier_service = block_verifier.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
/// SPANDOC: Now try to add the block again, verify should fail
|
||||
// TODO(teor): ignore duplicate block verifies?
|
||||
// TODO(teor || jlusby): check error kind
|
||||
ready_verifier_service
|
||||
.call(block.clone())
|
||||
.await
|
||||
.unwrap_err();
|
||||
|
||||
/// SPANDOC: Make sure the state service is ready (2/2)
|
||||
let ready_state_service = state_service.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
/// SPANDOC: But the state should still return the original block we added
|
||||
let state_response = ready_state_service
|
||||
.call(zebra_state::Request::GetBlock { hash })
|
||||
.await
|
||||
.map_err(|e| eyre!(e))?;
|
||||
|
||||
if let zebra_state::Response::Block {
|
||||
block: returned_block,
|
||||
} = state_response
|
||||
{
|
||||
assert_eq!(block, returned_block);
|
||||
} else {
|
||||
bail!("unexpected response kind: {:?}", state_response);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn verify_fail_future_time_test() -> Result<(), Report> {
|
||||
verify_fail_future_time().await
|
||||
}
|
||||
|
||||
#[spandoc::spandoc]
|
||||
async fn verify_fail_future_time() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
let mut block =
|
||||
<Block>::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_415000_BYTES[..])?;
|
||||
|
||||
let mut state_service = zebra_state::in_memory::init();
|
||||
let mut block_verifier = super::init(state_service.clone());
|
||||
|
||||
// Modify the block's time
|
||||
// Changing the block header also invalidates the header hashes, but
|
||||
// those checks should be performed later in validation, because they
|
||||
// are more expensive.
|
||||
let three_hours_in_the_future = Utc::now()
|
||||
.checked_add_signed(Duration::hours(3))
|
||||
.ok_or("overflow when calculating 3 hours in the future")
|
||||
.map_err(|e| eyre!(e))?;
|
||||
block.header.time = three_hours_in_the_future;
|
||||
|
||||
let arc_block: Arc<Block> = block.into();
|
||||
|
||||
/// SPANDOC: Make sure the verifier service is ready
|
||||
let ready_verifier_service = block_verifier.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
/// SPANDOC: Try to add the block, and expect failure
|
||||
// TODO(teor || jlusby): check error kind
|
||||
ready_verifier_service
|
||||
.call(arc_block.clone())
|
||||
.await
|
||||
.unwrap_err();
|
||||
|
||||
/// SPANDOC: Make sure the state service is ready (2/2)
|
||||
let ready_state_service = state_service.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
/// SPANDOC: Now make sure the block isn't in the state
|
||||
// TODO(teor || jlusby): check error kind
|
||||
ready_state_service
|
||||
.call(zebra_state::Request::GetBlock {
|
||||
hash: arc_block.as_ref().into(),
|
||||
})
|
||||
.await
|
||||
.unwrap_err();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn header_solution_test() -> Result<(), Report> {
|
||||
header_solution().await
|
||||
}
|
||||
|
||||
#[spandoc::spandoc]
|
||||
async fn header_solution() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
// Service variables
|
||||
let state_service = Box::new(zebra_state::in_memory::init());
|
||||
let mut block_verifier = super::init(state_service.clone());
|
||||
|
||||
let ready_verifier_service = block_verifier.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
|
||||
// Get a valid block
|
||||
let mut block = Block::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_415000_BYTES[..])
|
||||
.expect("block test vector should deserialize");
|
||||
|
||||
// This should be ok
|
||||
ready_verifier_service
|
||||
.call(Arc::new(block.clone()))
|
||||
.await
|
||||
.map_err(|e| eyre!(e))?;
|
||||
|
||||
// Change nonce to something invalid
|
||||
block.header.nonce = [0; 32];
|
||||
|
||||
let ready_verifier_service = block_verifier.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
|
||||
// Error: invalid equihash solution for BlockHeader
|
||||
ready_verifier_service
|
||||
.call(Arc::new(block.clone()))
|
||||
.await
|
||||
.expect_err("expected the equihash solution to be invalid");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[spandoc::spandoc]
|
||||
async fn coinbase() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
// Service variables
|
||||
let state_service = Box::new(zebra_state::in_memory::init());
|
||||
let mut block_verifier = super::init(state_service.clone());
|
||||
|
||||
// Get a header of a block
|
||||
let header = BlockHeader::zcash_deserialize(&zebra_test::vectors::DUMMY_HEADER[..]).unwrap();
|
||||
|
||||
let ready_verifier_service = block_verifier.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
|
||||
// Test 1: Empty transaction
|
||||
let block = Block {
|
||||
header,
|
||||
transactions: Vec::new(),
|
||||
};
|
||||
|
||||
// Error: no coinbase transaction in block
|
||||
ready_verifier_service
|
||||
.call(Arc::new(block.clone()))
|
||||
.await
|
||||
.expect_err("fail with no coinbase transaction in block");
|
||||
|
||||
let ready_verifier_service = block_verifier.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
|
||||
// Test 2: Transaction at first position is not coinbase
|
||||
let mut transactions = Vec::new();
|
||||
let tx = Transaction::zcash_deserialize(&zebra_test::vectors::DUMMY_TX1[..]).unwrap();
|
||||
transactions.push(Arc::new(tx));
|
||||
let block = Block {
|
||||
header,
|
||||
transactions,
|
||||
};
|
||||
|
||||
// Error: no coinbase transaction in block
|
||||
ready_verifier_service
|
||||
.call(Arc::new(block))
|
||||
.await
|
||||
.expect_err("fail with no coinbase transaction in block");
|
||||
|
||||
let ready_verifier_service = block_verifier.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
|
||||
// Test 3: Invalid coinbase position
|
||||
let mut block = Block::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_415000_BYTES[..])?;
|
||||
assert_eq!(block.transactions.len(), 1);
|
||||
|
||||
// Extract the coinbase transaction from the block
|
||||
let coinbase_transaction = block.transactions.get(0).unwrap().clone();
|
||||
|
||||
// Add another coinbase transaction to block
|
||||
block.transactions.push(coinbase_transaction);
|
||||
assert_eq!(block.transactions.len(), 2);
|
||||
|
||||
// Error: coinbase input found in additional transaction
|
||||
ready_verifier_service
|
||||
.call(Arc::new(block))
|
||||
.await
|
||||
.expect_err("fail with coinbase input found in additional transaction");
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -13,13 +13,22 @@
|
|||
//! Verification is provided via a `tower::Service`, to support backpressure and batch
|
||||
//! verification.
|
||||
|
||||
mod list;
|
||||
mod types;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
use list::CheckpointList;
|
||||
use types::{Progress, Progress::*};
|
||||
use types::{Target, Target::*};
|
||||
|
||||
use futures_util::FutureExt;
|
||||
use std::{
|
||||
cmp::Ordering,
|
||||
collections::BTreeMap,
|
||||
error,
|
||||
future::Future,
|
||||
ops::{Bound, Bound::*, RangeBounds},
|
||||
ops::{Bound, Bound::*},
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
|
@ -51,157 +60,6 @@ struct QueuedBlock {
|
|||
/// has an old chain fork. (Or sends us a bad block.)
|
||||
type QueuedBlockList = Vec<QueuedBlock>;
|
||||
|
||||
/// A `CheckpointVerifier`'s current progress verifying the chain.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
enum Progress<HeightOrHash> {
|
||||
/// We have not verified any blocks yet.
|
||||
BeforeGenesis,
|
||||
/// We have verified up to and including this checkpoint.
|
||||
PreviousCheckpoint(HeightOrHash),
|
||||
/// We have finished verifying.
|
||||
///
|
||||
/// The final checkpoint is not included in this variant. The verifier has
|
||||
/// finished, so the checkpoints aren't particularly useful.
|
||||
/// To get the value of the final checkpoint, use `checkpoint_list.max_height()`.
|
||||
FinalCheckpoint,
|
||||
}
|
||||
|
||||
/// Block height progress, in chain order.
|
||||
impl Ord for Progress<BlockHeight> {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
if self == other {
|
||||
return Ordering::Equal;
|
||||
}
|
||||
match (self, other) {
|
||||
(BeforeGenesis, _) => Ordering::Less,
|
||||
(_, BeforeGenesis) => Ordering::Greater,
|
||||
(FinalCheckpoint, _) => Ordering::Greater,
|
||||
(_, FinalCheckpoint) => Ordering::Less,
|
||||
(PreviousCheckpoint(self_height), PreviousCheckpoint(other_height)) => {
|
||||
self_height.cmp(other_height)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Partial order for block height progress.
|
||||
///
|
||||
/// The partial order must match the total order.
|
||||
impl PartialOrd for Progress<BlockHeight> {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
/// A `CheckpointVerifier`'s target checkpoint, based on the current queue.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
enum Target<HeightOrHash> {
|
||||
/// We need more blocks before we can choose a target checkpoint.
|
||||
WaitingForBlocks,
|
||||
/// We want to verify this checkpoint.
|
||||
///
|
||||
/// The target checkpoint can be multiple checkpoints ahead of the previous
|
||||
/// checkpoint.
|
||||
Checkpoint(HeightOrHash),
|
||||
/// We have finished verifying, there will be no more targets.
|
||||
FinishedVerifying,
|
||||
}
|
||||
|
||||
/// Block height target, in chain order.
|
||||
///
|
||||
/// `WaitingForBlocks` is incomparable with itself and `Checkpoint(_)`.
|
||||
impl PartialOrd for Target<BlockHeight> {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
match (self, other) {
|
||||
// FinishedVerifying is the final state
|
||||
(FinishedVerifying, FinishedVerifying) => Some(Ordering::Equal),
|
||||
(FinishedVerifying, _) => Some(Ordering::Greater),
|
||||
(_, FinishedVerifying) => Some(Ordering::Less),
|
||||
// Checkpoints are comparable with each other by height
|
||||
(Checkpoint(self_height), Checkpoint(other_height)) => {
|
||||
self_height.partial_cmp(other_height)
|
||||
}
|
||||
// We can wait for blocks before or after any target checkpoint,
|
||||
// so there is no ordering between checkpoint and waiting.
|
||||
(WaitingForBlocks, Checkpoint(_)) => None,
|
||||
(Checkpoint(_), WaitingForBlocks) => None,
|
||||
// However, we consider waiting equal to itself.
|
||||
(WaitingForBlocks, WaitingForBlocks) => Some(Ordering::Equal),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
use Progress::*;
|
||||
use Target::*;
|
||||
|
||||
/// Each checkpoint consists of a coinbase height and block header hash.
|
||||
///
|
||||
/// Checkpoints should be chosen to avoid forks or chain reorganizations,
|
||||
/// which only happen in the last few hundred blocks in the chain.
|
||||
/// (zcashd allows chain reorganizations up to 99 blocks, and prunes
|
||||
/// orphaned side-chains after 288 blocks.)
|
||||
///
|
||||
/// There must be a checkpoint for the genesis block at BlockHeight 0.
|
||||
/// (All other checkpoints are optional.)
|
||||
#[derive(Debug)]
|
||||
struct CheckpointList(BTreeMap<BlockHeight, BlockHeaderHash>);
|
||||
|
||||
impl CheckpointList {
|
||||
/// Create a new checkpoint list from `checkpoint_list`.
|
||||
fn new(
|
||||
checkpoint_list: impl IntoIterator<Item = (BlockHeight, BlockHeaderHash)>,
|
||||
) -> Result<Self, Error> {
|
||||
let checkpoints: BTreeMap<BlockHeight, BlockHeaderHash> =
|
||||
checkpoint_list.into_iter().collect();
|
||||
|
||||
// An empty checkpoint list can't actually verify any blocks.
|
||||
match checkpoints.keys().next() {
|
||||
Some(BlockHeight(0)) => {}
|
||||
None => Err("there must be at least one checkpoint, for the genesis block")?,
|
||||
_ => Err("checkpoints must start at the genesis block height 0")?,
|
||||
};
|
||||
|
||||
Ok(CheckpointList(checkpoints))
|
||||
}
|
||||
|
||||
/// Is there a checkpoint at `height`?
|
||||
///
|
||||
/// See `BTreeMap::contains_key()` for details.
|
||||
fn contains(&self, height: BlockHeight) -> bool {
|
||||
self.0.contains_key(&height)
|
||||
}
|
||||
|
||||
/// Returns the hash corresponding to the checkpoint at `height`,
|
||||
/// or None if there is no checkpoint at that height.
|
||||
///
|
||||
/// See `BTreeMap::get()` for details.
|
||||
fn hash(&self, height: BlockHeight) -> Option<BlockHeaderHash> {
|
||||
self.0.get(&height).cloned()
|
||||
}
|
||||
|
||||
/// Return the block height of the highest checkpoint in the checkpoint list.
|
||||
///
|
||||
/// If there is only a single checkpoint, then the maximum height will be
|
||||
/// zero. (The genesis block.)
|
||||
///
|
||||
/// The maximum height is constant for each checkpoint list.
|
||||
fn max_height(&self) -> BlockHeight {
|
||||
self.0
|
||||
.keys()
|
||||
.cloned()
|
||||
.next_back()
|
||||
.expect("checkpoint lists must have at least one checkpoint")
|
||||
}
|
||||
|
||||
/// Return the block height of the highest checkpoint in a sub-range.
|
||||
fn max_height_in_range<R>(&self, range: R) -> Option<BlockHeight>
|
||||
where
|
||||
R: RangeBounds<BlockHeight>,
|
||||
{
|
||||
self.0.range(range).map(|(height, _)| *height).next_back()
|
||||
}
|
||||
}
|
||||
|
||||
/// A checkpointing block verifier.
|
||||
///
|
||||
/// Verifies blocks using a supplied list of checkpoints. There must be at
|
||||
|
@ -726,552 +584,3 @@ impl Service<Arc<Block>> for CheckpointVerifier {
|
|||
.boxed()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
use color_eyre::eyre::{eyre, Report};
|
||||
use futures::future::TryFutureExt;
|
||||
use std::{cmp::min, mem::drop, time::Duration};
|
||||
use tokio::time::timeout;
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use zebra_chain::serialization::ZcashDeserialize;
|
||||
|
||||
/// The timeout we apply to each verify future during testing.
|
||||
///
|
||||
/// The checkpoint verifier uses `tokio::sync::oneshot` channels as futures.
|
||||
/// If the verifier doesn't send a message on the channel, any tests that
|
||||
/// await the channel future will hang.
|
||||
///
|
||||
/// This value is set to a large value, to avoid spurious failures due to
|
||||
/// high system load.
|
||||
const VERIFY_TIMEOUT_SECONDS: u64 = 10;
|
||||
|
||||
#[tokio::test]
|
||||
async fn single_item_checkpoint_list_test() -> Result<(), Report> {
|
||||
single_item_checkpoint_list().await
|
||||
}
|
||||
|
||||
#[spandoc::spandoc]
|
||||
async fn single_item_checkpoint_list() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
let block0 =
|
||||
Arc::<Block>::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES[..])?;
|
||||
let hash0: BlockHeaderHash = block0.as_ref().into();
|
||||
|
||||
// Make a checkpoint list containing only the genesis block
|
||||
let genesis_checkpoint_list: BTreeMap<BlockHeight, BlockHeaderHash> =
|
||||
[(block0.coinbase_height().unwrap(), hash0)]
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
let mut checkpoint_verifier =
|
||||
CheckpointVerifier::new(genesis_checkpoint_list).map_err(|e| eyre!(e))?;
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
BeforeGenesis
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
WaitingForBlocks
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(0)
|
||||
);
|
||||
|
||||
/// SPANDOC: Make sure the verifier service is ready
|
||||
let ready_verifier_service = checkpoint_verifier
|
||||
.ready_and()
|
||||
.map_err(|e| eyre!(e))
|
||||
.await?;
|
||||
/// SPANDOC: Set up the future for block 0
|
||||
let verify_future = timeout(
|
||||
Duration::from_secs(VERIFY_TIMEOUT_SECONDS),
|
||||
ready_verifier_service.call(block0.clone()),
|
||||
);
|
||||
/// SPANDOC: Wait for the response for block 0
|
||||
// TODO(teor || jlusby): check error kind
|
||||
let verify_response = verify_future
|
||||
.map_err(|e| eyre!(e))
|
||||
.await
|
||||
.expect("timeout should not happen")
|
||||
.expect("block should verify");
|
||||
|
||||
assert_eq!(verify_response, hash0);
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
FinalCheckpoint
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
FinishedVerifying
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(0)
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn multi_item_checkpoint_list_test() -> Result<(), Report> {
|
||||
multi_item_checkpoint_list().await
|
||||
}
|
||||
|
||||
#[spandoc::spandoc]
|
||||
async fn multi_item_checkpoint_list() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
// Parse all the blocks
|
||||
let mut checkpoint_data = Vec::new();
|
||||
for b in &[
|
||||
&zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES[..],
|
||||
&zebra_test::vectors::BLOCK_MAINNET_1_BYTES[..],
|
||||
// TODO(teor): not continuous, so they hang
|
||||
//&zebra_test::vectors::BLOCK_MAINNET_415000_BYTES[..],
|
||||
//&zebra_test::vectors::BLOCK_MAINNET_434873_BYTES[..],
|
||||
] {
|
||||
let block = Arc::<Block>::zcash_deserialize(*b)?;
|
||||
let hash: BlockHeaderHash = block.as_ref().into();
|
||||
checkpoint_data.push((block.clone(), block.coinbase_height().unwrap(), hash));
|
||||
}
|
||||
|
||||
// Make a checkpoint list containing all the blocks
|
||||
let checkpoint_list: BTreeMap<BlockHeight, BlockHeaderHash> = checkpoint_data
|
||||
.iter()
|
||||
.map(|(_block, height, hash)| (*height, *hash))
|
||||
.collect();
|
||||
|
||||
let mut checkpoint_verifier =
|
||||
CheckpointVerifier::new(checkpoint_list).map_err(|e| eyre!(e))?;
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
BeforeGenesis
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
WaitingForBlocks
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(1)
|
||||
);
|
||||
|
||||
// Now verify each block
|
||||
for (block, height, hash) in checkpoint_data {
|
||||
/// SPANDOC: Make sure the verifier service is ready
|
||||
let ready_verifier_service = checkpoint_verifier
|
||||
.ready_and()
|
||||
.map_err(|e| eyre!(e))
|
||||
.await?;
|
||||
|
||||
/// SPANDOC: Set up the future for block {?height}
|
||||
let verify_future = timeout(
|
||||
Duration::from_secs(VERIFY_TIMEOUT_SECONDS),
|
||||
ready_verifier_service.call(block.clone()),
|
||||
);
|
||||
/// SPANDOC: Wait for the response for block {?height}
|
||||
// TODO(teor || jlusby): check error kind
|
||||
let verify_response = verify_future
|
||||
.map_err(|e| eyre!(e))
|
||||
.await
|
||||
.expect("timeout should not happen")
|
||||
.expect("future should succeed");
|
||||
|
||||
assert_eq!(verify_response, hash);
|
||||
|
||||
if height < checkpoint_verifier.checkpoint_list.max_height() {
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
PreviousCheckpoint(height)
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
WaitingForBlocks
|
||||
);
|
||||
} else {
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
FinalCheckpoint
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
FinishedVerifying
|
||||
);
|
||||
}
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(1)
|
||||
);
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
FinalCheckpoint
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
FinishedVerifying
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(1)
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn block_higher_than_max_checkpoint_fail_test() -> Result<(), Report> {
|
||||
block_higher_than_max_checkpoint_fail().await
|
||||
}
|
||||
|
||||
#[spandoc::spandoc]
|
||||
async fn block_higher_than_max_checkpoint_fail() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
let block0 =
|
||||
Arc::<Block>::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES[..])?;
|
||||
let block415000 =
|
||||
Arc::<Block>::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_415000_BYTES[..])?;
|
||||
|
||||
// Make a checkpoint list containing only the genesis block
|
||||
let genesis_checkpoint_list: BTreeMap<BlockHeight, BlockHeaderHash> =
|
||||
[(block0.coinbase_height().unwrap(), block0.as_ref().into())]
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
let mut checkpoint_verifier =
|
||||
CheckpointVerifier::new(genesis_checkpoint_list).map_err(|e| eyre!(e))?;
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
BeforeGenesis
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
WaitingForBlocks
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(0)
|
||||
);
|
||||
|
||||
/// SPANDOC: Make sure the verifier service is ready
|
||||
let ready_verifier_service = checkpoint_verifier
|
||||
.ready_and()
|
||||
.map_err(|e| eyre!(e))
|
||||
.await?;
|
||||
/// SPANDOC: Set up the future for block 415000
|
||||
let verify_future = timeout(
|
||||
Duration::from_secs(VERIFY_TIMEOUT_SECONDS),
|
||||
ready_verifier_service.call(block415000.clone()),
|
||||
);
|
||||
/// SPANDOC: Wait for the response for block 415000, and expect failure
|
||||
// TODO(teor || jlusby): check error kind
|
||||
let _ = verify_future
|
||||
.map_err(|e| eyre!(e))
|
||||
.await
|
||||
.expect("timeout should not happen")
|
||||
.expect_err("bad block hash should fail");
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
BeforeGenesis
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
WaitingForBlocks
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(0)
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn wrong_checkpoint_hash_fail_test() -> Result<(), Report> {
|
||||
wrong_checkpoint_hash_fail().await
|
||||
}
|
||||
|
||||
#[spandoc::spandoc]
|
||||
async fn wrong_checkpoint_hash_fail() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
let good_block0 =
|
||||
Arc::<Block>::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES[..])?;
|
||||
let good_block0_hash: BlockHeaderHash = good_block0.as_ref().into();
|
||||
// Change the header hash
|
||||
let mut bad_block0 = good_block0.clone();
|
||||
let mut bad_block0 = Arc::make_mut(&mut bad_block0);
|
||||
bad_block0.header.version = 0;
|
||||
let bad_block0: Arc<Block> = bad_block0.clone().into();
|
||||
|
||||
// Make a checkpoint list containing the genesis block checkpoint
|
||||
let genesis_checkpoint_list: BTreeMap<BlockHeight, BlockHeaderHash> =
|
||||
[(good_block0.coinbase_height().unwrap(), good_block0_hash)]
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
let mut checkpoint_verifier =
|
||||
CheckpointVerifier::new(genesis_checkpoint_list).map_err(|e| eyre!(e))?;
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
BeforeGenesis
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
WaitingForBlocks
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(0)
|
||||
);
|
||||
|
||||
/// SPANDOC: Make sure the verifier service is ready (1/3)
|
||||
let ready_verifier_service = checkpoint_verifier
|
||||
.ready_and()
|
||||
.map_err(|e| eyre!(e))
|
||||
.await?;
|
||||
/// SPANDOC: Set up the future for bad block 0 (1/3)
|
||||
// TODO(teor || jlusby): check error kind
|
||||
let bad_verify_future_1 = timeout(
|
||||
Duration::from_secs(VERIFY_TIMEOUT_SECONDS),
|
||||
ready_verifier_service.call(bad_block0.clone()),
|
||||
);
|
||||
// We can't await the future yet, because bad blocks aren't cleared
|
||||
// until the chain is verified
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
BeforeGenesis
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
WaitingForBlocks
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(0)
|
||||
);
|
||||
|
||||
/// SPANDOC: Make sure the verifier service is ready (2/3)
|
||||
let ready_verifier_service = checkpoint_verifier
|
||||
.ready_and()
|
||||
.map_err(|e| eyre!(e))
|
||||
.await?;
|
||||
/// SPANDOC: Set up the future for bad block 0 again (2/3)
|
||||
// TODO(teor || jlusby): check error kind
|
||||
let bad_verify_future_2 = timeout(
|
||||
Duration::from_secs(VERIFY_TIMEOUT_SECONDS),
|
||||
ready_verifier_service.call(bad_block0.clone()),
|
||||
);
|
||||
// We can't await the future yet, because bad blocks aren't cleared
|
||||
// until the chain is verified
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
BeforeGenesis
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
WaitingForBlocks
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(0)
|
||||
);
|
||||
|
||||
/// SPANDOC: Make sure the verifier service is ready (3/3)
|
||||
let ready_verifier_service = checkpoint_verifier
|
||||
.ready_and()
|
||||
.map_err(|e| eyre!(e))
|
||||
.await?;
|
||||
/// SPANDOC: Set up the future for good block 0 (3/3)
|
||||
let good_verify_future = timeout(
|
||||
Duration::from_secs(VERIFY_TIMEOUT_SECONDS),
|
||||
ready_verifier_service.call(good_block0.clone()),
|
||||
);
|
||||
/// SPANDOC: Wait for the response for good block 0, and expect success (3/3)
|
||||
// TODO(teor || jlusby): check error kind
|
||||
let verify_response = good_verify_future
|
||||
.map_err(|e| eyre!(e))
|
||||
.await
|
||||
.expect("timeout should not happen")
|
||||
.expect("future should succeed");
|
||||
|
||||
assert_eq!(verify_response, good_block0_hash);
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
FinalCheckpoint
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
FinishedVerifying
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(0)
|
||||
);
|
||||
|
||||
// Now, await the bad futures, which should have completed
|
||||
|
||||
/// SPANDOC: Wait for the response for block 0, and expect failure (1/3)
|
||||
// TODO(teor || jlusby): check error kind
|
||||
let _ = bad_verify_future_1
|
||||
.map_err(|e| eyre!(e))
|
||||
.await
|
||||
.expect("timeout should not happen")
|
||||
.expect_err("bad block hash should fail");
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
FinalCheckpoint
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
FinishedVerifying
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(0)
|
||||
);
|
||||
|
||||
/// SPANDOC: Wait for the response for block 0, and expect failure again (2/3)
|
||||
// TODO(teor || jlusby): check error kind
|
||||
let _ = bad_verify_future_2
|
||||
.map_err(|e| eyre!(e))
|
||||
.await
|
||||
.expect("timeout should not happen")
|
||||
.expect_err("bad block hash should fail");
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
FinalCheckpoint
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
FinishedVerifying
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(0)
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn checkpoint_drop_cancel_test() -> Result<(), Report> {
|
||||
checkpoint_drop_cancel().await
|
||||
}
|
||||
|
||||
#[spandoc::spandoc]
|
||||
async fn checkpoint_drop_cancel() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
// Parse all the blocks
|
||||
let mut checkpoint_data = Vec::new();
|
||||
for b in &[
|
||||
&zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES[..],
|
||||
&zebra_test::vectors::BLOCK_MAINNET_1_BYTES[..],
|
||||
&zebra_test::vectors::BLOCK_MAINNET_415000_BYTES[..],
|
||||
&zebra_test::vectors::BLOCK_MAINNET_434873_BYTES[..],
|
||||
] {
|
||||
let block = Arc::<Block>::zcash_deserialize(*b)?;
|
||||
let hash: BlockHeaderHash = block.as_ref().into();
|
||||
checkpoint_data.push((block.clone(), block.coinbase_height().unwrap(), hash));
|
||||
}
|
||||
|
||||
// Make a checkpoint list containing all the blocks
|
||||
let checkpoint_list: BTreeMap<BlockHeight, BlockHeaderHash> = checkpoint_data
|
||||
.iter()
|
||||
.map(|(_block, height, hash)| (*height, *hash))
|
||||
.collect();
|
||||
|
||||
let mut checkpoint_verifier =
|
||||
CheckpointVerifier::new(checkpoint_list).map_err(|e| eyre!(e))?;
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
BeforeGenesis
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
WaitingForBlocks
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(434873)
|
||||
);
|
||||
|
||||
let mut futures = Vec::new();
|
||||
// Now collect verify futures for each block
|
||||
for (block, height, hash) in checkpoint_data {
|
||||
/// SPANDOC: Make sure the verifier service is ready
|
||||
let ready_verifier_service = checkpoint_verifier
|
||||
.ready_and()
|
||||
.map_err(|e| eyre!(e))
|
||||
.await?;
|
||||
|
||||
/// SPANDOC: Set up the future for block {?height}
|
||||
let verify_future = timeout(
|
||||
Duration::from_secs(VERIFY_TIMEOUT_SECONDS),
|
||||
ready_verifier_service.call(block.clone()),
|
||||
);
|
||||
|
||||
futures.push((verify_future, height, hash));
|
||||
|
||||
// Only continuous checkpoints verify
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
PreviousCheckpoint(BlockHeight(min(height.0, 1)))
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
WaitingForBlocks
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(434873)
|
||||
);
|
||||
}
|
||||
|
||||
// Now drop the verifier, to cancel the futures
|
||||
drop(checkpoint_verifier);
|
||||
|
||||
for (verify_future, height, hash) in futures {
|
||||
/// SPANDOC: Check the response for block {?height}
|
||||
let verify_response = verify_future
|
||||
.map_err(|e| eyre!(e))
|
||||
.await
|
||||
.expect("timeout should not happen");
|
||||
|
||||
if height <= BlockHeight(1) {
|
||||
let verify_hash = verify_response
|
||||
.expect("Continuous checkpoints should have succeeded before drop");
|
||||
assert_eq!(verify_hash, hash);
|
||||
} else {
|
||||
// TODO(teor || jlusby): check error kind
|
||||
verify_response.expect_err("Pending futures should fail on drop");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
//! Checkpoint lists for checkpoint-based block verification
|
||||
//!
|
||||
//! Each checkpoint consists of a coinbase height and block header hash.
|
||||
//!
|
||||
//! Checkpoints can be used to verify their ancestors, by chaining backwards
|
||||
//! to another checkpoint, via each block's parent block hash.
|
||||
|
||||
use std::{collections::BTreeMap, error, ops::RangeBounds};
|
||||
|
||||
use zebra_chain::block::BlockHeaderHash;
|
||||
use zebra_chain::types::BlockHeight;
|
||||
|
||||
/// The inner error type for CheckpointVerifier.
|
||||
// TODO(jlusby): Error = Report ?
|
||||
type Error = Box<dyn error::Error + Send + Sync + 'static>;
|
||||
|
||||
/// Checkpoint lists are implemented using a BTreeMap.
|
||||
///
|
||||
/// Checkpoints should be chosen to avoid forks or chain reorganizations,
|
||||
/// which only happen in the last few hundred blocks in the chain.
|
||||
/// (zcashd allows chain reorganizations up to 99 blocks, and prunes
|
||||
/// orphaned side-chains after 288 blocks.)
|
||||
///
|
||||
/// There must be a checkpoint for the genesis block at BlockHeight 0.
|
||||
/// (All other checkpoints are optional.)
|
||||
#[derive(Debug)]
|
||||
pub struct CheckpointList(BTreeMap<BlockHeight, BlockHeaderHash>);
|
||||
|
||||
impl CheckpointList {
|
||||
/// Create a new checkpoint list from `checkpoint_list`.
|
||||
pub fn new(
|
||||
checkpoint_list: impl IntoIterator<Item = (BlockHeight, BlockHeaderHash)>,
|
||||
) -> Result<Self, Error> {
|
||||
let checkpoints: BTreeMap<BlockHeight, BlockHeaderHash> =
|
||||
checkpoint_list.into_iter().collect();
|
||||
|
||||
// An empty checkpoint list can't actually verify any blocks.
|
||||
match checkpoints.keys().next() {
|
||||
Some(BlockHeight(0)) => {}
|
||||
None => Err("there must be at least one checkpoint, for the genesis block")?,
|
||||
_ => Err("checkpoints must start at the genesis block height 0")?,
|
||||
};
|
||||
|
||||
Ok(CheckpointList(checkpoints))
|
||||
}
|
||||
|
||||
/// Is there a checkpoint at `height`?
|
||||
///
|
||||
/// See `BTreeMap::contains_key()` for details.
|
||||
pub fn contains(&self, height: BlockHeight) -> bool {
|
||||
self.0.contains_key(&height)
|
||||
}
|
||||
|
||||
/// Returns the hash corresponding to the checkpoint at `height`,
|
||||
/// or None if there is no checkpoint at that height.
|
||||
///
|
||||
/// See `BTreeMap::get()` for details.
|
||||
pub fn hash(&self, height: BlockHeight) -> Option<BlockHeaderHash> {
|
||||
self.0.get(&height).cloned()
|
||||
}
|
||||
|
||||
/// Return the block height of the highest checkpoint in the checkpoint list.
|
||||
///
|
||||
/// If there is only a single checkpoint, then the maximum height will be
|
||||
/// zero. (The genesis block.)
|
||||
///
|
||||
/// The maximum height is constant for each checkpoint list.
|
||||
pub fn max_height(&self) -> BlockHeight {
|
||||
self.0
|
||||
.keys()
|
||||
.cloned()
|
||||
.next_back()
|
||||
.expect("checkpoint lists must have at least one checkpoint")
|
||||
}
|
||||
|
||||
/// Return the block height of the highest checkpoint in a sub-range.
|
||||
pub fn max_height_in_range<R>(&self, range: R) -> Option<BlockHeight>
|
||||
where
|
||||
R: RangeBounds<BlockHeight>,
|
||||
{
|
||||
self.0.range(range).map(|(height, _)| *height).next_back()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,548 @@
|
|||
//! Tests for checkpoint-based block verification
|
||||
|
||||
use super::*;
|
||||
|
||||
use super::types::Progress::*;
|
||||
use super::types::Target::*;
|
||||
|
||||
use color_eyre::eyre::{eyre, Report};
|
||||
use futures::future::TryFutureExt;
|
||||
use std::{cmp::min, mem::drop, time::Duration};
|
||||
use tokio::time::timeout;
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use zebra_chain::serialization::ZcashDeserialize;
|
||||
|
||||
/// The timeout we apply to each verify future during testing.
|
||||
///
|
||||
/// The checkpoint verifier uses `tokio::sync::oneshot` channels as futures.
|
||||
/// If the verifier doesn't send a message on the channel, any tests that
|
||||
/// await the channel future will hang.
|
||||
///
|
||||
/// This value is set to a large value, to avoid spurious failures due to
|
||||
/// high system load.
|
||||
const VERIFY_TIMEOUT_SECONDS: u64 = 10;
|
||||
|
||||
#[tokio::test]
|
||||
async fn single_item_checkpoint_list_test() -> Result<(), Report> {
|
||||
single_item_checkpoint_list().await
|
||||
}
|
||||
|
||||
#[spandoc::spandoc]
|
||||
async fn single_item_checkpoint_list() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
let block0 =
|
||||
Arc::<Block>::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES[..])?;
|
||||
let hash0: BlockHeaderHash = block0.as_ref().into();
|
||||
|
||||
// Make a checkpoint list containing only the genesis block
|
||||
let genesis_checkpoint_list: BTreeMap<BlockHeight, BlockHeaderHash> =
|
||||
[(block0.coinbase_height().unwrap(), hash0)]
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
let mut checkpoint_verifier =
|
||||
CheckpointVerifier::new(genesis_checkpoint_list).map_err(|e| eyre!(e))?;
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
BeforeGenesis
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
WaitingForBlocks
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(0)
|
||||
);
|
||||
|
||||
/// SPANDOC: Make sure the verifier service is ready
|
||||
let ready_verifier_service = checkpoint_verifier
|
||||
.ready_and()
|
||||
.map_err(|e| eyre!(e))
|
||||
.await?;
|
||||
/// SPANDOC: Set up the future for block 0
|
||||
let verify_future = timeout(
|
||||
Duration::from_secs(VERIFY_TIMEOUT_SECONDS),
|
||||
ready_verifier_service.call(block0.clone()),
|
||||
);
|
||||
/// SPANDOC: Wait for the response for block 0
|
||||
// TODO(teor || jlusby): check error kind
|
||||
let verify_response = verify_future
|
||||
.map_err(|e| eyre!(e))
|
||||
.await
|
||||
.expect("timeout should not happen")
|
||||
.expect("block should verify");
|
||||
|
||||
assert_eq!(verify_response, hash0);
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
FinalCheckpoint
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
FinishedVerifying
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(0)
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn multi_item_checkpoint_list_test() -> Result<(), Report> {
|
||||
multi_item_checkpoint_list().await
|
||||
}
|
||||
|
||||
#[spandoc::spandoc]
|
||||
async fn multi_item_checkpoint_list() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
// Parse all the blocks
|
||||
let mut checkpoint_data = Vec::new();
|
||||
for b in &[
|
||||
&zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES[..],
|
||||
&zebra_test::vectors::BLOCK_MAINNET_1_BYTES[..],
|
||||
// TODO(teor): not continuous, so they hang
|
||||
//&zebra_test::vectors::BLOCK_MAINNET_415000_BYTES[..],
|
||||
//&zebra_test::vectors::BLOCK_MAINNET_434873_BYTES[..],
|
||||
] {
|
||||
let block = Arc::<Block>::zcash_deserialize(*b)?;
|
||||
let hash: BlockHeaderHash = block.as_ref().into();
|
||||
checkpoint_data.push((block.clone(), block.coinbase_height().unwrap(), hash));
|
||||
}
|
||||
|
||||
// Make a checkpoint list containing all the blocks
|
||||
let checkpoint_list: BTreeMap<BlockHeight, BlockHeaderHash> = checkpoint_data
|
||||
.iter()
|
||||
.map(|(_block, height, hash)| (*height, *hash))
|
||||
.collect();
|
||||
|
||||
let mut checkpoint_verifier = CheckpointVerifier::new(checkpoint_list).map_err(|e| eyre!(e))?;
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
BeforeGenesis
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
WaitingForBlocks
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(1)
|
||||
);
|
||||
|
||||
// Now verify each block
|
||||
for (block, height, hash) in checkpoint_data {
|
||||
/// SPANDOC: Make sure the verifier service is ready
|
||||
let ready_verifier_service = checkpoint_verifier
|
||||
.ready_and()
|
||||
.map_err(|e| eyre!(e))
|
||||
.await?;
|
||||
|
||||
/// SPANDOC: Set up the future for block {?height}
|
||||
let verify_future = timeout(
|
||||
Duration::from_secs(VERIFY_TIMEOUT_SECONDS),
|
||||
ready_verifier_service.call(block.clone()),
|
||||
);
|
||||
/// SPANDOC: Wait for the response for block {?height}
|
||||
// TODO(teor || jlusby): check error kind
|
||||
let verify_response = verify_future
|
||||
.map_err(|e| eyre!(e))
|
||||
.await
|
||||
.expect("timeout should not happen")
|
||||
.expect("future should succeed");
|
||||
|
||||
assert_eq!(verify_response, hash);
|
||||
|
||||
if height < checkpoint_verifier.checkpoint_list.max_height() {
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
PreviousCheckpoint(height)
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
WaitingForBlocks
|
||||
);
|
||||
} else {
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
FinalCheckpoint
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
FinishedVerifying
|
||||
);
|
||||
}
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(1)
|
||||
);
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
FinalCheckpoint
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
FinishedVerifying
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(1)
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn block_higher_than_max_checkpoint_fail_test() -> Result<(), Report> {
|
||||
block_higher_than_max_checkpoint_fail().await
|
||||
}
|
||||
|
||||
#[spandoc::spandoc]
|
||||
async fn block_higher_than_max_checkpoint_fail() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
let block0 =
|
||||
Arc::<Block>::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES[..])?;
|
||||
let block415000 =
|
||||
Arc::<Block>::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_415000_BYTES[..])?;
|
||||
|
||||
// Make a checkpoint list containing only the genesis block
|
||||
let genesis_checkpoint_list: BTreeMap<BlockHeight, BlockHeaderHash> =
|
||||
[(block0.coinbase_height().unwrap(), block0.as_ref().into())]
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
let mut checkpoint_verifier =
|
||||
CheckpointVerifier::new(genesis_checkpoint_list).map_err(|e| eyre!(e))?;
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
BeforeGenesis
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
WaitingForBlocks
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(0)
|
||||
);
|
||||
|
||||
/// SPANDOC: Make sure the verifier service is ready
|
||||
let ready_verifier_service = checkpoint_verifier
|
||||
.ready_and()
|
||||
.map_err(|e| eyre!(e))
|
||||
.await?;
|
||||
/// SPANDOC: Set up the future for block 415000
|
||||
let verify_future = timeout(
|
||||
Duration::from_secs(VERIFY_TIMEOUT_SECONDS),
|
||||
ready_verifier_service.call(block415000.clone()),
|
||||
);
|
||||
/// SPANDOC: Wait for the response for block 415000, and expect failure
|
||||
// TODO(teor || jlusby): check error kind
|
||||
let _ = verify_future
|
||||
.map_err(|e| eyre!(e))
|
||||
.await
|
||||
.expect("timeout should not happen")
|
||||
.expect_err("bad block hash should fail");
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
BeforeGenesis
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
WaitingForBlocks
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(0)
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn wrong_checkpoint_hash_fail_test() -> Result<(), Report> {
|
||||
wrong_checkpoint_hash_fail().await
|
||||
}
|
||||
|
||||
#[spandoc::spandoc]
|
||||
async fn wrong_checkpoint_hash_fail() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
let good_block0 =
|
||||
Arc::<Block>::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES[..])?;
|
||||
let good_block0_hash: BlockHeaderHash = good_block0.as_ref().into();
|
||||
// Change the header hash
|
||||
let mut bad_block0 = good_block0.clone();
|
||||
let mut bad_block0 = Arc::make_mut(&mut bad_block0);
|
||||
bad_block0.header.version = 0;
|
||||
let bad_block0: Arc<Block> = bad_block0.clone().into();
|
||||
|
||||
// Make a checkpoint list containing the genesis block checkpoint
|
||||
let genesis_checkpoint_list: BTreeMap<BlockHeight, BlockHeaderHash> =
|
||||
[(good_block0.coinbase_height().unwrap(), good_block0_hash)]
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
let mut checkpoint_verifier =
|
||||
CheckpointVerifier::new(genesis_checkpoint_list).map_err(|e| eyre!(e))?;
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
BeforeGenesis
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
WaitingForBlocks
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(0)
|
||||
);
|
||||
|
||||
/// SPANDOC: Make sure the verifier service is ready (1/3)
|
||||
let ready_verifier_service = checkpoint_verifier
|
||||
.ready_and()
|
||||
.map_err(|e| eyre!(e))
|
||||
.await?;
|
||||
/// SPANDOC: Set up the future for bad block 0 (1/3)
|
||||
// TODO(teor || jlusby): check error kind
|
||||
let bad_verify_future_1 = timeout(
|
||||
Duration::from_secs(VERIFY_TIMEOUT_SECONDS),
|
||||
ready_verifier_service.call(bad_block0.clone()),
|
||||
);
|
||||
// We can't await the future yet, because bad blocks aren't cleared
|
||||
// until the chain is verified
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
BeforeGenesis
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
WaitingForBlocks
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(0)
|
||||
);
|
||||
|
||||
/// SPANDOC: Make sure the verifier service is ready (2/3)
|
||||
let ready_verifier_service = checkpoint_verifier
|
||||
.ready_and()
|
||||
.map_err(|e| eyre!(e))
|
||||
.await?;
|
||||
/// SPANDOC: Set up the future for bad block 0 again (2/3)
|
||||
// TODO(teor || jlusby): check error kind
|
||||
let bad_verify_future_2 = timeout(
|
||||
Duration::from_secs(VERIFY_TIMEOUT_SECONDS),
|
||||
ready_verifier_service.call(bad_block0.clone()),
|
||||
);
|
||||
// We can't await the future yet, because bad blocks aren't cleared
|
||||
// until the chain is verified
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
BeforeGenesis
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
WaitingForBlocks
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(0)
|
||||
);
|
||||
|
||||
/// SPANDOC: Make sure the verifier service is ready (3/3)
|
||||
let ready_verifier_service = checkpoint_verifier
|
||||
.ready_and()
|
||||
.map_err(|e| eyre!(e))
|
||||
.await?;
|
||||
/// SPANDOC: Set up the future for good block 0 (3/3)
|
||||
let good_verify_future = timeout(
|
||||
Duration::from_secs(VERIFY_TIMEOUT_SECONDS),
|
||||
ready_verifier_service.call(good_block0.clone()),
|
||||
);
|
||||
/// SPANDOC: Wait for the response for good block 0, and expect success (3/3)
|
||||
// TODO(teor || jlusby): check error kind
|
||||
let verify_response = good_verify_future
|
||||
.map_err(|e| eyre!(e))
|
||||
.await
|
||||
.expect("timeout should not happen")
|
||||
.expect("future should succeed");
|
||||
|
||||
assert_eq!(verify_response, good_block0_hash);
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
FinalCheckpoint
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
FinishedVerifying
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(0)
|
||||
);
|
||||
|
||||
// Now, await the bad futures, which should have completed
|
||||
|
||||
/// SPANDOC: Wait for the response for block 0, and expect failure (1/3)
|
||||
// TODO(teor || jlusby): check error kind
|
||||
let _ = bad_verify_future_1
|
||||
.map_err(|e| eyre!(e))
|
||||
.await
|
||||
.expect("timeout should not happen")
|
||||
.expect_err("bad block hash should fail");
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
FinalCheckpoint
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
FinishedVerifying
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(0)
|
||||
);
|
||||
|
||||
/// SPANDOC: Wait for the response for block 0, and expect failure again (2/3)
|
||||
// TODO(teor || jlusby): check error kind
|
||||
let _ = bad_verify_future_2
|
||||
.map_err(|e| eyre!(e))
|
||||
.await
|
||||
.expect("timeout should not happen")
|
||||
.expect_err("bad block hash should fail");
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
FinalCheckpoint
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
FinishedVerifying
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(0)
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn checkpoint_drop_cancel_test() -> Result<(), Report> {
|
||||
checkpoint_drop_cancel().await
|
||||
}
|
||||
|
||||
#[spandoc::spandoc]
|
||||
async fn checkpoint_drop_cancel() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
// Parse all the blocks
|
||||
let mut checkpoint_data = Vec::new();
|
||||
for b in &[
|
||||
&zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES[..],
|
||||
&zebra_test::vectors::BLOCK_MAINNET_1_BYTES[..],
|
||||
&zebra_test::vectors::BLOCK_MAINNET_415000_BYTES[..],
|
||||
&zebra_test::vectors::BLOCK_MAINNET_434873_BYTES[..],
|
||||
] {
|
||||
let block = Arc::<Block>::zcash_deserialize(*b)?;
|
||||
let hash: BlockHeaderHash = block.as_ref().into();
|
||||
checkpoint_data.push((block.clone(), block.coinbase_height().unwrap(), hash));
|
||||
}
|
||||
|
||||
// Make a checkpoint list containing all the blocks
|
||||
let checkpoint_list: BTreeMap<BlockHeight, BlockHeaderHash> = checkpoint_data
|
||||
.iter()
|
||||
.map(|(_block, height, hash)| (*height, *hash))
|
||||
.collect();
|
||||
|
||||
let mut checkpoint_verifier = CheckpointVerifier::new(checkpoint_list).map_err(|e| eyre!(e))?;
|
||||
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
BeforeGenesis
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
WaitingForBlocks
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(434873)
|
||||
);
|
||||
|
||||
let mut futures = Vec::new();
|
||||
// Now collect verify futures for each block
|
||||
for (block, height, hash) in checkpoint_data {
|
||||
/// SPANDOC: Make sure the verifier service is ready
|
||||
let ready_verifier_service = checkpoint_verifier
|
||||
.ready_and()
|
||||
.map_err(|e| eyre!(e))
|
||||
.await?;
|
||||
|
||||
/// SPANDOC: Set up the future for block {?height}
|
||||
let verify_future = timeout(
|
||||
Duration::from_secs(VERIFY_TIMEOUT_SECONDS),
|
||||
ready_verifier_service.call(block.clone()),
|
||||
);
|
||||
|
||||
futures.push((verify_future, height, hash));
|
||||
|
||||
// Only continuous checkpoints verify
|
||||
assert_eq!(
|
||||
checkpoint_verifier.previous_checkpoint_height(),
|
||||
PreviousCheckpoint(BlockHeight(min(height.0, 1)))
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.target_checkpoint_height(),
|
||||
WaitingForBlocks
|
||||
);
|
||||
assert_eq!(
|
||||
checkpoint_verifier.checkpoint_list.max_height(),
|
||||
BlockHeight(434873)
|
||||
);
|
||||
}
|
||||
|
||||
// Now drop the verifier, to cancel the futures
|
||||
drop(checkpoint_verifier);
|
||||
|
||||
for (verify_future, height, hash) in futures {
|
||||
/// SPANDOC: Check the response for block {?height}
|
||||
let verify_response = verify_future
|
||||
.map_err(|e| eyre!(e))
|
||||
.await
|
||||
.expect("timeout should not happen");
|
||||
|
||||
if height <= BlockHeight(1) {
|
||||
let verify_hash =
|
||||
verify_response.expect("Continuous checkpoints should have succeeded before drop");
|
||||
assert_eq!(verify_hash, hash);
|
||||
} else {
|
||||
// TODO(teor || jlusby): check error kind
|
||||
verify_response.expect_err("Pending futures should fail on drop");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
//! Supporting types for checkpoint-based block verification
|
||||
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use zebra_chain::types::BlockHeight;
|
||||
|
||||
use Progress::*;
|
||||
use Target::*;
|
||||
|
||||
/// A `CheckpointVerifier`'s current progress verifying the chain.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub enum Progress<HeightOrHash> {
|
||||
/// We have not verified any blocks yet.
|
||||
BeforeGenesis,
|
||||
/// We have verified up to and including this checkpoint.
|
||||
PreviousCheckpoint(HeightOrHash),
|
||||
/// We have finished verifying.
|
||||
///
|
||||
/// The final checkpoint is not included in this variant. The verifier has
|
||||
/// finished, so the checkpoints aren't particularly useful.
|
||||
/// To get the value of the final checkpoint, use `checkpoint_list.max_height()`.
|
||||
FinalCheckpoint,
|
||||
}
|
||||
|
||||
/// Block height progress, in chain order.
|
||||
impl Ord for Progress<BlockHeight> {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
if self == other {
|
||||
return Ordering::Equal;
|
||||
}
|
||||
match (self, other) {
|
||||
(BeforeGenesis, _) => Ordering::Less,
|
||||
(_, BeforeGenesis) => Ordering::Greater,
|
||||
(FinalCheckpoint, _) => Ordering::Greater,
|
||||
(_, FinalCheckpoint) => Ordering::Less,
|
||||
(PreviousCheckpoint(self_height), PreviousCheckpoint(other_height)) => {
|
||||
self_height.cmp(other_height)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Partial order for block height progress.
|
||||
///
|
||||
/// The partial order must match the total order.
|
||||
impl PartialOrd for Progress<BlockHeight> {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
/// A `CheckpointVerifier`'s target checkpoint, based on the current queue.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub enum Target<HeightOrHash> {
|
||||
/// We need more blocks before we can choose a target checkpoint.
|
||||
WaitingForBlocks,
|
||||
/// We want to verify this checkpoint.
|
||||
///
|
||||
/// The target checkpoint can be multiple checkpoints ahead of the previous
|
||||
/// checkpoint.
|
||||
Checkpoint(HeightOrHash),
|
||||
/// We have finished verifying, there will be no more targets.
|
||||
FinishedVerifying,
|
||||
}
|
||||
|
||||
/// Block height target, in chain order.
|
||||
///
|
||||
/// `WaitingForBlocks` is incomparable with itself and `Checkpoint(_)`.
|
||||
impl PartialOrd for Target<BlockHeight> {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
match (self, other) {
|
||||
// FinishedVerifying is the final state
|
||||
(FinishedVerifying, FinishedVerifying) => Some(Ordering::Equal),
|
||||
(FinishedVerifying, _) => Some(Ordering::Greater),
|
||||
(_, FinishedVerifying) => Some(Ordering::Less),
|
||||
// Checkpoints are comparable with each other by height
|
||||
(Checkpoint(self_height), Checkpoint(other_height)) => {
|
||||
self_height.partial_cmp(other_height)
|
||||
}
|
||||
// We can wait for blocks before or after any target checkpoint,
|
||||
// so there is no ordering between checkpoint and waiting.
|
||||
(WaitingForBlocks, Checkpoint(_)) => None,
|
||||
(Checkpoint(_), WaitingForBlocks) => None,
|
||||
// However, we consider waiting equal to itself.
|
||||
(WaitingForBlocks, WaitingForBlocks) => Some(Ordering::Equal),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -14,6 +14,9 @@
|
|||
#![deny(missing_docs)]
|
||||
#![allow(clippy::try_err)]
|
||||
|
||||
pub mod block;
|
||||
pub mod checkpoint;
|
||||
pub mod mempool;
|
||||
pub mod verify;
|
||||
pub mod redjubjub;
|
||||
mod script;
|
||||
mod transaction;
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
//! Async RedJubjub batch verifier service
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
use std::{
|
||||
future::Future,
|
||||
mem,
|
||||
|
@ -83,73 +86,3 @@ impl Drop for Verifier {
|
|||
let _ = self.tx.send(batch.verify(thread_rng()));
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use color_eyre::eyre::Result;
|
||||
use futures::stream::{FuturesUnordered, StreamExt};
|
||||
use tower::ServiceExt;
|
||||
use tower_batch::Batch;
|
||||
|
||||
async fn sign_and_verify<V>(mut verifier: V, n: usize) -> Result<(), V::Error>
|
||||
where
|
||||
V: Service<Item, Response = ()>,
|
||||
{
|
||||
let rng = thread_rng();
|
||||
let mut results = FuturesUnordered::new();
|
||||
for i in 0..n {
|
||||
let span = tracing::trace_span!("sig", i);
|
||||
let msg = b"BatchVerifyTest";
|
||||
|
||||
match i % 2 {
|
||||
0 => {
|
||||
let sk = SigningKey::<SpendAuth>::new(rng);
|
||||
let vk = VerificationKey::from(&sk);
|
||||
let sig = sk.sign(rng, &msg[..]);
|
||||
verifier.ready_and().await?;
|
||||
results.push(span.in_scope(|| verifier.call((vk.into(), sig, msg).into())))
|
||||
}
|
||||
1 => {
|
||||
let sk = SigningKey::<Binding>::new(rng);
|
||||
let vk = VerificationKey::from(&sk);
|
||||
let sig = sk.sign(rng, &msg[..]);
|
||||
verifier.ready_and().await?;
|
||||
results.push(span.in_scope(|| verifier.call((vk.into(), sig, msg).into())))
|
||||
}
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
while let Some(result) = results.next().await {
|
||||
result?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[spandoc::spandoc]
|
||||
async fn batch_flushes_on_max_items() -> Result<()> {
|
||||
use tokio::time::timeout;
|
||||
|
||||
// Use a very long max_latency and a short timeout to check that
|
||||
// flushing is happening based on hitting max_items.
|
||||
let verifier = Batch::new(Verifier::new(), 10, Duration::from_secs(1000));
|
||||
timeout(Duration::from_secs(5), sign_and_verify(verifier, 100)).await?
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[spandoc::spandoc]
|
||||
async fn batch_flushes_on_max_latency() -> Result<()> {
|
||||
use tokio::time::timeout;
|
||||
|
||||
// Use a very high max_items and a short timeout to check that
|
||||
// flushing is happening based on hitting max_latency.
|
||||
let verifier = Batch::new(Verifier::new(), 100, Duration::from_millis(500));
|
||||
timeout(Duration::from_secs(5), sign_and_verify(verifier, 10)).await?
|
||||
}
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
//! Tests for redjubjub signature verification
|
||||
|
||||
use super::*;
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use color_eyre::eyre::Result;
|
||||
use futures::stream::{FuturesUnordered, StreamExt};
|
||||
use tower::ServiceExt;
|
||||
use tower_batch::Batch;
|
||||
|
||||
async fn sign_and_verify<V>(mut verifier: V, n: usize) -> Result<(), V::Error>
|
||||
where
|
||||
V: Service<Item, Response = ()>,
|
||||
{
|
||||
let rng = thread_rng();
|
||||
let mut results = FuturesUnordered::new();
|
||||
for i in 0..n {
|
||||
let span = tracing::trace_span!("sig", i);
|
||||
let msg = b"BatchVerifyTest";
|
||||
|
||||
match i % 2 {
|
||||
0 => {
|
||||
let sk = SigningKey::<SpendAuth>::new(rng);
|
||||
let vk = VerificationKey::from(&sk);
|
||||
let sig = sk.sign(rng, &msg[..]);
|
||||
verifier.ready_and().await?;
|
||||
results.push(span.in_scope(|| verifier.call((vk.into(), sig, msg).into())))
|
||||
}
|
||||
1 => {
|
||||
let sk = SigningKey::<Binding>::new(rng);
|
||||
let vk = VerificationKey::from(&sk);
|
||||
let sig = sk.sign(rng, &msg[..]);
|
||||
verifier.ready_and().await?;
|
||||
results.push(span.in_scope(|| verifier.call((vk.into(), sig, msg).into())))
|
||||
}
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
while let Some(result) = results.next().await {
|
||||
result?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[spandoc::spandoc]
|
||||
async fn batch_flushes_on_max_items() -> Result<()> {
|
||||
use tokio::time::timeout;
|
||||
|
||||
// Use a very long max_latency and a short timeout to check that
|
||||
// flushing is happening based on hitting max_items.
|
||||
let verifier = Batch::new(Verifier::new(), 10, Duration::from_secs(1000));
|
||||
timeout(Duration::from_secs(5), sign_and_verify(verifier, 100)).await?
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[spandoc::spandoc]
|
||||
async fn batch_flushes_on_max_latency() -> Result<()> {
|
||||
use tokio::time::timeout;
|
||||
|
||||
// Use a very high max_items and a short timeout to check that
|
||||
// flushing is happening based on hitting max_latency.
|
||||
let verifier = Batch::new(Verifier::new(), 100, Duration::from_millis(500));
|
||||
timeout(Duration::from_secs(5), sign_and_verify(verifier, 10)).await?
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
//! Block and transaction verification for Zebra.
|
||||
//!
|
||||
//! Verification is provided via `tower::Service`s, to support backpressure and batch
|
||||
//! verification.
|
||||
|
||||
mod block;
|
||||
pub mod redjubjub;
|
||||
mod script;
|
||||
mod transaction;
|
||||
|
||||
pub use block::init;
|
|
@ -1,618 +0,0 @@
|
|||
//! Block verification and chain state updates for Zebra.
|
||||
//!
|
||||
//! Verification occurs in multiple stages:
|
||||
//! - getting blocks (disk- or network-bound)
|
||||
//! - context-free verification of signatures, proofs, and scripts (CPU-bound)
|
||||
//! - context-dependent verification of the chain state (awaits a verified parent block)
|
||||
//!
|
||||
//! Verification is provided via a `tower::Service`, to support backpressure and batch
|
||||
//! verification.
|
||||
|
||||
use chrono::{DateTime, Duration, Utc};
|
||||
use futures_util::FutureExt;
|
||||
use std::{
|
||||
error,
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
use tower::{buffer::Buffer, Service, ServiceExt};
|
||||
|
||||
use zebra_chain::block::{Block, BlockHeaderHash};
|
||||
|
||||
/// Check if `block_header_time` is less than or equal to
|
||||
/// 2 hours in the future, according to the node's local clock (`now`).
|
||||
///
|
||||
/// This is a non-deterministic rule, as clocks vary over time, and
|
||||
/// between different nodes.
|
||||
///
|
||||
/// "In addition, a full validator MUST NOT accept blocks with nTime
|
||||
/// more than two hours in the future according to its clock. This
|
||||
/// is not strictly a consensus rule because it is nondeterministic,
|
||||
/// and clock time varies between nodes. Also note that a block that
|
||||
/// is rejected by this rule at a given point in time may later be
|
||||
/// accepted."[S 7.5][7.5]
|
||||
///
|
||||
/// [7.5]: https://zips.z.cash/protocol/protocol.pdf#blockheader
|
||||
pub(crate) fn node_time_check(
|
||||
block_header_time: DateTime<Utc>,
|
||||
now: DateTime<Utc>,
|
||||
) -> Result<(), Error> {
|
||||
let two_hours_in_the_future = now
|
||||
.checked_add_signed(Duration::hours(2))
|
||||
.ok_or("overflow when calculating 2 hours in the future")?;
|
||||
|
||||
if block_header_time <= two_hours_in_the_future {
|
||||
Ok(())
|
||||
} else {
|
||||
Err("block header time is more than 2 hours in the future".into())
|
||||
}
|
||||
}
|
||||
|
||||
/// [3.10]: https://zips.z.cash/protocol/protocol.pdf#coinbasetransactions
|
||||
pub(crate) fn coinbase_check(block: &Block) -> Result<(), Error> {
|
||||
if block.coinbase_height().is_some() {
|
||||
// No coinbase inputs in additional transactions allowed
|
||||
if block
|
||||
.transactions
|
||||
.iter()
|
||||
.skip(1)
|
||||
.any(|tx| tx.contains_coinbase_input())
|
||||
{
|
||||
Err("coinbase input found in additional transaction")?
|
||||
}
|
||||
Ok(())
|
||||
} else {
|
||||
Err("no coinbase transaction in block")?
|
||||
}
|
||||
}
|
||||
|
||||
struct BlockVerifier<S> {
|
||||
/// The underlying `ZebraState`, possibly wrapped in other services.
|
||||
state_service: S,
|
||||
}
|
||||
|
||||
/// The error type for the BlockVerifier Service.
|
||||
// TODO(jlusby): Error = Report ?
|
||||
type Error = Box<dyn error::Error + Send + Sync + 'static>;
|
||||
|
||||
/// The BlockVerifier service implementation.
|
||||
///
|
||||
/// After verification, blocks are added to the underlying state service.
|
||||
impl<S> Service<Arc<Block>> for BlockVerifier<S>
|
||||
where
|
||||
S: Service<zebra_state::Request, Response = zebra_state::Response, Error = Error>
|
||||
+ Send
|
||||
+ Clone
|
||||
+ 'static,
|
||||
S::Future: Send + 'static,
|
||||
{
|
||||
type Response = BlockHeaderHash;
|
||||
type Error = Error;
|
||||
type Future =
|
||||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
// We don't expect the state to exert backpressure on verifier users,
|
||||
// so we don't need to call `state_service.poll_ready()` here.
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, block: Arc<Block>) -> Self::Future {
|
||||
// TODO(jlusby): Error = Report, handle errors from state_service.
|
||||
// TODO(teor):
|
||||
// - handle chain reorgs
|
||||
// - adjust state_service "unique block height" conditions
|
||||
let mut state_service = self.state_service.clone();
|
||||
|
||||
async move {
|
||||
// Since errors cause an early exit, try to do the
|
||||
// quick checks first.
|
||||
|
||||
let now = Utc::now();
|
||||
node_time_check(block.header.time, now)?;
|
||||
block.header.is_equihash_solution_valid()?;
|
||||
coinbase_check(block.as_ref())?;
|
||||
|
||||
// `Tower::Buffer` requires a 1:1 relationship between `poll()`s
|
||||
// and `call()`s, because it reserves a buffer slot in each
|
||||
// `call()`.
|
||||
let add_block = state_service
|
||||
.ready_and()
|
||||
.await?
|
||||
.call(zebra_state::Request::AddBlock { block });
|
||||
|
||||
match add_block.await? {
|
||||
zebra_state::Response::Added { hash } => Ok(hash),
|
||||
_ => Err("adding block to zebra-state failed".into()),
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a block verification service, using the provided state service.
|
||||
///
|
||||
/// The block verifier holds a state service of type `S`, used as context for
|
||||
/// block validation and to which newly verified blocks will be committed. This
|
||||
/// state is pluggable to allow for testing or instrumentation.
|
||||
///
|
||||
/// The returned type is opaque to allow instrumentation or other wrappers, but
|
||||
/// can be boxed for storage. It is also `Clone` to allow sharing of a
|
||||
/// verification service.
|
||||
///
|
||||
/// This function should be called only once for a particular state service (and
|
||||
/// the result be shared) rather than constructing multiple verification services
|
||||
/// backed by the same state layer.
|
||||
pub fn init<S>(
|
||||
state_service: S,
|
||||
) -> impl Service<
|
||||
Arc<Block>,
|
||||
Response = BlockHeaderHash,
|
||||
Error = Error,
|
||||
Future = impl Future<Output = Result<BlockHeaderHash, Error>>,
|
||||
> + Send
|
||||
+ Clone
|
||||
+ 'static
|
||||
where
|
||||
S: Service<zebra_state::Request, Response = zebra_state::Response, Error = Error>
|
||||
+ Send
|
||||
+ Clone
|
||||
+ 'static,
|
||||
S::Future: Send + 'static,
|
||||
{
|
||||
Buffer::new(BlockVerifier { state_service }, 1)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
use chrono::offset::{LocalResult, TimeZone};
|
||||
use chrono::{Duration, Utc};
|
||||
use color_eyre::eyre::Report;
|
||||
use color_eyre::eyre::{bail, eyre};
|
||||
use std::sync::Arc;
|
||||
use tower::{util::ServiceExt, Service};
|
||||
|
||||
use zebra_chain::block::Block;
|
||||
use zebra_chain::block::BlockHeader;
|
||||
use zebra_chain::serialization::ZcashDeserialize;
|
||||
use zebra_chain::transaction::Transaction;
|
||||
|
||||
#[test]
|
||||
fn time_check_past_block() {
|
||||
// This block is also verified as part of the BlockVerifier service
|
||||
// tests.
|
||||
let block =
|
||||
Arc::<Block>::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_415000_BYTES[..])
|
||||
.expect("block should deserialize");
|
||||
let now = Utc::now();
|
||||
|
||||
// This check is non-deterministic, but BLOCK_MAINNET_415000 is
|
||||
// a long time in the past. So it's unlikely that the test machine
|
||||
// will have a clock that's far enough in the past for the test to
|
||||
// fail.
|
||||
node_time_check(block.header.time, now)
|
||||
.expect("the header time from a mainnet block should be valid");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn time_check_now() {
|
||||
// These checks are deteministic, because all the times are offset
|
||||
// from the current time.
|
||||
let now = Utc::now();
|
||||
let three_hours_in_the_past = now - Duration::hours(3);
|
||||
let two_hours_in_the_future = now + Duration::hours(2);
|
||||
let two_hours_and_one_second_in_the_future =
|
||||
now + Duration::hours(2) + Duration::seconds(1);
|
||||
|
||||
node_time_check(now, now).expect("the current time should be valid as a block header time");
|
||||
node_time_check(three_hours_in_the_past, now)
|
||||
.expect("a past time should be valid as a block header time");
|
||||
node_time_check(two_hours_in_the_future, now)
|
||||
.expect("2 hours in the future should be valid as a block header time");
|
||||
node_time_check(two_hours_and_one_second_in_the_future, now).expect_err(
|
||||
"2 hours and 1 second in the future should be invalid as a block header time",
|
||||
);
|
||||
|
||||
// Now invert the tests
|
||||
// 3 hours in the future should fail
|
||||
node_time_check(now, three_hours_in_the_past)
|
||||
.expect_err("3 hours in the future should be invalid as a block header time");
|
||||
// The past should succeed
|
||||
node_time_check(now, two_hours_in_the_future)
|
||||
.expect("2 hours in the past should be valid as a block header time");
|
||||
node_time_check(now, two_hours_and_one_second_in_the_future)
|
||||
.expect("2 hours and 1 second in the past should be valid as a block header time");
|
||||
}
|
||||
|
||||
/// Valid unix epoch timestamps for blocks, in seconds
|
||||
static BLOCK_HEADER_VALID_TIMESTAMPS: &[i64] = &[
|
||||
// These times are currently invalid DateTimes, but they could
|
||||
// become valid in future chrono versions
|
||||
i64::MIN,
|
||||
i64::MIN + 1,
|
||||
// These times are valid DateTimes
|
||||
(u32::MIN as i64) - 1,
|
||||
(u32::MIN as i64),
|
||||
(u32::MIN as i64) + 1,
|
||||
(i32::MIN as i64) - 1,
|
||||
(i32::MIN as i64),
|
||||
(i32::MIN as i64) + 1,
|
||||
-1,
|
||||
0,
|
||||
1,
|
||||
// maximum nExpiryHeight or lock_time, in blocks
|
||||
499_999_999,
|
||||
// minimum lock_time, in seconds
|
||||
500_000_000,
|
||||
500_000_001,
|
||||
];
|
||||
|
||||
/// Invalid unix epoch timestamps for blocks, in seconds
|
||||
static BLOCK_HEADER_INVALID_TIMESTAMPS: &[i64] = &[
|
||||
(i32::MAX as i64) - 1,
|
||||
(i32::MAX as i64),
|
||||
(i32::MAX as i64) + 1,
|
||||
(u32::MAX as i64) - 1,
|
||||
(u32::MAX as i64),
|
||||
(u32::MAX as i64) + 1,
|
||||
// These times are currently invalid DateTimes, but they could
|
||||
// become valid in future chrono versions
|
||||
i64::MAX - 1,
|
||||
i64::MAX,
|
||||
];
|
||||
|
||||
#[test]
|
||||
fn time_check_fixed() {
|
||||
// These checks are non-deterministic, but the times are all in the
|
||||
// distant past or far future. So it's unlikely that the test
|
||||
// machine will have a clock that makes these tests fail.
|
||||
let now = Utc::now();
|
||||
|
||||
for valid_timestamp in BLOCK_HEADER_VALID_TIMESTAMPS {
|
||||
let block_header_time = match Utc.timestamp_opt(*valid_timestamp, 0) {
|
||||
LocalResult::Single(time) => time,
|
||||
LocalResult::None => {
|
||||
// Skip the test if the timestamp is invalid
|
||||
continue;
|
||||
}
|
||||
LocalResult::Ambiguous(_, _) => {
|
||||
// Utc doesn't have ambiguous times
|
||||
unreachable!();
|
||||
}
|
||||
};
|
||||
node_time_check(block_header_time, now)
|
||||
.expect("the time should be valid as a block header time");
|
||||
// Invert the check, leading to an invalid time
|
||||
node_time_check(now, block_header_time)
|
||||
.expect_err("the inverse comparison should be invalid");
|
||||
}
|
||||
|
||||
for invalid_timestamp in BLOCK_HEADER_INVALID_TIMESTAMPS {
|
||||
let block_header_time = match Utc.timestamp_opt(*invalid_timestamp, 0) {
|
||||
LocalResult::Single(time) => time,
|
||||
LocalResult::None => {
|
||||
// Skip the test if the timestamp is invalid
|
||||
continue;
|
||||
}
|
||||
LocalResult::Ambiguous(_, _) => {
|
||||
// Utc doesn't have ambiguous times
|
||||
unreachable!();
|
||||
}
|
||||
};
|
||||
node_time_check(block_header_time, now)
|
||||
.expect_err("the time should be invalid as a block header time");
|
||||
// Invert the check, leading to a valid time
|
||||
node_time_check(now, block_header_time)
|
||||
.expect("the inverse comparison should be valid");
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn verify_test() -> Result<(), Report> {
|
||||
verify().await
|
||||
}
|
||||
|
||||
#[spandoc::spandoc]
|
||||
async fn verify() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
let block =
|
||||
Arc::<Block>::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_415000_BYTES[..])?;
|
||||
let hash: BlockHeaderHash = block.as_ref().into();
|
||||
|
||||
let state_service = Box::new(zebra_state::in_memory::init());
|
||||
let mut block_verifier = super::init(state_service);
|
||||
|
||||
/// SPANDOC: Make sure the verifier service is ready
|
||||
let ready_verifier_service = block_verifier.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
/// SPANDOC: Verify the block
|
||||
let verify_response = ready_verifier_service
|
||||
.call(block.clone())
|
||||
.await
|
||||
.map_err(|e| eyre!(e))?;
|
||||
|
||||
assert_eq!(verify_response, hash);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn round_trip_test() -> Result<(), Report> {
|
||||
round_trip().await
|
||||
}
|
||||
|
||||
#[spandoc::spandoc]
|
||||
async fn round_trip() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
let block =
|
||||
Arc::<Block>::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_415000_BYTES[..])?;
|
||||
let hash: BlockHeaderHash = block.as_ref().into();
|
||||
|
||||
let mut state_service = zebra_state::in_memory::init();
|
||||
let mut block_verifier = super::init(state_service.clone());
|
||||
|
||||
/// SPANDOC: Make sure the verifier service is ready
|
||||
let ready_verifier_service = block_verifier.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
/// SPANDOC: Verify the block
|
||||
let verify_response = ready_verifier_service
|
||||
.call(block.clone())
|
||||
.await
|
||||
.map_err(|e| eyre!(e))?;
|
||||
|
||||
assert_eq!(verify_response, hash);
|
||||
|
||||
/// SPANDOC: Make sure the state service is ready
|
||||
let ready_state_service = state_service.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
/// SPANDOC: Make sure the block was added to the state
|
||||
let state_response = ready_state_service
|
||||
.call(zebra_state::Request::GetBlock { hash })
|
||||
.await
|
||||
.map_err(|e| eyre!(e))?;
|
||||
|
||||
if let zebra_state::Response::Block {
|
||||
block: returned_block,
|
||||
} = state_response
|
||||
{
|
||||
assert_eq!(block, returned_block);
|
||||
} else {
|
||||
bail!("unexpected response kind: {:?}", state_response);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn verify_fail_add_block_test() -> Result<(), Report> {
|
||||
verify_fail_add_block().await
|
||||
}
|
||||
|
||||
#[spandoc::spandoc]
|
||||
async fn verify_fail_add_block() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
let block =
|
||||
Arc::<Block>::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_415000_BYTES[..])?;
|
||||
let hash: BlockHeaderHash = block.as_ref().into();
|
||||
|
||||
let mut state_service = zebra_state::in_memory::init();
|
||||
let mut block_verifier = super::init(state_service.clone());
|
||||
|
||||
/// SPANDOC: Make sure the verifier service is ready (1/2)
|
||||
let ready_verifier_service = block_verifier.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
/// SPANDOC: Verify the block for the first time
|
||||
let verify_response = ready_verifier_service
|
||||
.call(block.clone())
|
||||
.await
|
||||
.map_err(|e| eyre!(e))?;
|
||||
|
||||
assert_eq!(verify_response, hash);
|
||||
|
||||
/// SPANDOC: Make sure the state service is ready (1/2)
|
||||
let ready_state_service = state_service.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
/// SPANDOC: Make sure the block was added to the state
|
||||
let state_response = ready_state_service
|
||||
.call(zebra_state::Request::GetBlock { hash })
|
||||
.await
|
||||
.map_err(|e| eyre!(e))?;
|
||||
|
||||
if let zebra_state::Response::Block {
|
||||
block: returned_block,
|
||||
} = state_response
|
||||
{
|
||||
assert_eq!(block, returned_block);
|
||||
} else {
|
||||
bail!("unexpected response kind: {:?}", state_response);
|
||||
}
|
||||
|
||||
/// SPANDOC: Make sure the verifier service is ready (2/2)
|
||||
let ready_verifier_service = block_verifier.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
/// SPANDOC: Now try to add the block again, verify should fail
|
||||
// TODO(teor): ignore duplicate block verifies?
|
||||
// TODO(teor || jlusby): check error kind
|
||||
ready_verifier_service
|
||||
.call(block.clone())
|
||||
.await
|
||||
.unwrap_err();
|
||||
|
||||
/// SPANDOC: Make sure the state service is ready (2/2)
|
||||
let ready_state_service = state_service.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
/// SPANDOC: But the state should still return the original block we added
|
||||
let state_response = ready_state_service
|
||||
.call(zebra_state::Request::GetBlock { hash })
|
||||
.await
|
||||
.map_err(|e| eyre!(e))?;
|
||||
|
||||
if let zebra_state::Response::Block {
|
||||
block: returned_block,
|
||||
} = state_response
|
||||
{
|
||||
assert_eq!(block, returned_block);
|
||||
} else {
|
||||
bail!("unexpected response kind: {:?}", state_response);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn verify_fail_future_time_test() -> Result<(), Report> {
|
||||
verify_fail_future_time().await
|
||||
}
|
||||
|
||||
#[spandoc::spandoc]
|
||||
async fn verify_fail_future_time() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
let mut block =
|
||||
<Block>::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_415000_BYTES[..])?;
|
||||
|
||||
let mut state_service = zebra_state::in_memory::init();
|
||||
let mut block_verifier = super::init(state_service.clone());
|
||||
|
||||
// Modify the block's time
|
||||
// Changing the block header also invalidates the header hashes, but
|
||||
// those checks should be performed later in validation, because they
|
||||
// are more expensive.
|
||||
let three_hours_in_the_future = Utc::now()
|
||||
.checked_add_signed(Duration::hours(3))
|
||||
.ok_or("overflow when calculating 3 hours in the future")
|
||||
.map_err(|e| eyre!(e))?;
|
||||
block.header.time = three_hours_in_the_future;
|
||||
|
||||
let arc_block: Arc<Block> = block.into();
|
||||
|
||||
/// SPANDOC: Make sure the verifier service is ready
|
||||
let ready_verifier_service = block_verifier.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
/// SPANDOC: Try to add the block, and expect failure
|
||||
// TODO(teor || jlusby): check error kind
|
||||
ready_verifier_service
|
||||
.call(arc_block.clone())
|
||||
.await
|
||||
.unwrap_err();
|
||||
|
||||
/// SPANDOC: Make sure the state service is ready (2/2)
|
||||
let ready_state_service = state_service.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
/// SPANDOC: Now make sure the block isn't in the state
|
||||
// TODO(teor || jlusby): check error kind
|
||||
ready_state_service
|
||||
.call(zebra_state::Request::GetBlock {
|
||||
hash: arc_block.as_ref().into(),
|
||||
})
|
||||
.await
|
||||
.unwrap_err();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn header_solution_test() -> Result<(), Report> {
|
||||
header_solution().await
|
||||
}
|
||||
|
||||
#[spandoc::spandoc]
|
||||
async fn header_solution() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
// Service variables
|
||||
let state_service = Box::new(zebra_state::in_memory::init());
|
||||
let mut block_verifier = super::init(state_service.clone());
|
||||
|
||||
let ready_verifier_service = block_verifier.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
|
||||
// Get a valid block
|
||||
let mut block =
|
||||
Block::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_415000_BYTES[..])
|
||||
.expect("block test vector should deserialize");
|
||||
|
||||
// This should be ok
|
||||
ready_verifier_service
|
||||
.call(Arc::new(block.clone()))
|
||||
.await
|
||||
.map_err(|e| eyre!(e))?;
|
||||
|
||||
// Change nonce to something invalid
|
||||
block.header.nonce = [0; 32];
|
||||
|
||||
let ready_verifier_service = block_verifier.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
|
||||
// Error: invalid equihash solution for BlockHeader
|
||||
ready_verifier_service
|
||||
.call(Arc::new(block.clone()))
|
||||
.await
|
||||
.expect_err("expected the equihash solution to be invalid");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[spandoc::spandoc]
|
||||
async fn coinbase() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
// Service variables
|
||||
let state_service = Box::new(zebra_state::in_memory::init());
|
||||
let mut block_verifier = super::init(state_service.clone());
|
||||
|
||||
// Get a header of a block
|
||||
let header =
|
||||
BlockHeader::zcash_deserialize(&zebra_test::vectors::DUMMY_HEADER[..]).unwrap();
|
||||
|
||||
let ready_verifier_service = block_verifier.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
|
||||
// Test 1: Empty transaction
|
||||
let block = Block {
|
||||
header,
|
||||
transactions: Vec::new(),
|
||||
};
|
||||
|
||||
// Error: no coinbase transaction in block
|
||||
ready_verifier_service
|
||||
.call(Arc::new(block.clone()))
|
||||
.await
|
||||
.expect_err("fail with no coinbase transaction in block");
|
||||
|
||||
let ready_verifier_service = block_verifier.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
|
||||
// Test 2: Transaction at first position is not coinbase
|
||||
let mut transactions = Vec::new();
|
||||
let tx = Transaction::zcash_deserialize(&zebra_test::vectors::DUMMY_TX1[..]).unwrap();
|
||||
transactions.push(Arc::new(tx));
|
||||
let block = Block {
|
||||
header,
|
||||
transactions,
|
||||
};
|
||||
|
||||
// Error: no coinbase transaction in block
|
||||
ready_verifier_service
|
||||
.call(Arc::new(block))
|
||||
.await
|
||||
.expect_err("fail with no coinbase transaction in block");
|
||||
|
||||
let ready_verifier_service = block_verifier.ready_and().await.map_err(|e| eyre!(e))?;
|
||||
|
||||
// Test 3: Invalid coinbase position
|
||||
let mut block =
|
||||
Block::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_415000_BYTES[..])?;
|
||||
assert_eq!(block.transactions.len(), 1);
|
||||
|
||||
// Extract the coinbase transaction from the block
|
||||
let coinbase_transaction = block.transactions.get(0).unwrap().clone();
|
||||
|
||||
// Add another coinbase transaction to block
|
||||
block.transactions.push(coinbase_transaction);
|
||||
assert_eq!(block.transactions.len(), 2);
|
||||
|
||||
// Error: coinbase input found in additional transaction
|
||||
ready_verifier_service
|
||||
.call(Arc::new(block))
|
||||
.await
|
||||
.expect_err("fail with coinbase input found in additional transaction");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
|
@ -56,7 +56,7 @@ impl StartCmd {
|
|||
let config = app_config().network.clone();
|
||||
let state = zebra_state::on_disk::init(zebra_state::Config::default());
|
||||
let (peer_set, _address_book) = zebra_network::init(config, node).await;
|
||||
let verifier = zebra_consensus::verify::init(state.clone());
|
||||
let verifier = zebra_consensus::block::init(state.clone());
|
||||
|
||||
let mut syncer = sync::Syncer::new(peer_set, state, verifier);
|
||||
|
||||
|
|
Loading…
Reference in New Issue