rename(state): Rename state verifiers and related code (#6762)

* rename verifiers

* rename `PreparedBlock` to `SemanticallyVerifiedBlock`

* rename `CommitBlock` to `SemanticallyVerifiedBlock`

* rename `FinalizedBlock` to `CheckpointVerifiedBlock`

* rename `CommitFinalizedBlock` to `CommitCheckpointVerifiedBlock`

* rename `FinalizedWithTrees` to `ContextuallyVerifiedBlockWithTrees`

* rename `ContextuallyValidBlock` to `ContextuallyVerifiedBlock`

* change some `finalized` variables or function arguments to `checkpoint_verified`

* fix docs

* document the difference between `CheckpointVerifiedBlock` and `ContextuallyVerifiedBlock`

* fix doc links

* apply suggestions to request

Co-authored-by: Marek <mail@marek.onl>

* apply suggestions to service

Co-authored-by: Marek <mail@marek.onl>

* apply suggestions to finalized_state.rs and write.rs

Co-authored-by: Marek <mail@marek.onl>

* fmt

* change some more variable names

* change a few missing generics

* fix checkpoint log issue

* rename more `prepared` vars `semantically_verified`

* fix test regex

* fix test regex 2

---------

Co-authored-by: Marek <mail@marek.onl>
This commit is contained in:
Alfredo Garcia 2023-06-01 09:29:03 -03:00 committed by GitHub
parent 618d3fcca0
commit eb07bb31d6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
59 changed files with 552 additions and 508 deletions

View File

@ -74,15 +74,15 @@
<path fill="none" stroke="#000000" d="M261.2822,-293.6228C291.5824,-281.7662 337.5245,-263.7888 371.0388,-250.6745"/>
<polygon fill="#000000" stroke="#000000" points="372.5364,-253.847 380.5734,-246.9436 369.9855,-247.3283 372.5364,-253.847"/>
</g>
<!-- chain_verifier -->
<!-- router_verifier -->
<a id="node6" class="node" target="_blank" href="https://doc-internal.zebra.zfnd.org/zebra_consensus/chain/index.html">
<title>chain_verifier</title>
<title>router_verifier</title>
<ellipse fill="transparent" stroke="#000000" cx="244.6515" cy="-234" rx="65.3859" ry="18"/>
<text text-anchor="middle" x="244.6515" y="-229.8" font-family="'Opens sans', sans-serif" font-size="14.00" fill="#000000">chain_verifier</text>
<text text-anchor="middle" x="244.6515" y="-229.8" font-family="'Opens sans', sans-serif" font-size="14.00" fill="#000000">router_verifier</text>
</a>
<!-- inbound&#45;&gt;chain_verifier -->
<!-- inbound&#45;&gt;router_verifier -->
<g id="edge9" class="edge">
<title>inbound&#45;&gt;chain_verifier</title>
<title>inbound&#45;&gt;router_verifier</title>
<path fill="none" stroke="#000000" d="M233.4366,-287.8314C235.0409,-280.131 236.9485,-270.9743 238.7314,-262.4166"/>
<polygon fill="#000000" stroke="#000000" points="242.2022,-262.9169 240.8154,-252.4133 235.3494,-261.4892 242.2022,-262.9169"/>
</g>
@ -104,9 +104,9 @@
<path fill="none" stroke="#000000" d="M383.846,-360.9895C393.4567,-351.221 404.1854,-338.1106 409.6515,-324 417.2551,-304.3715 417.9695,-280.5065 416.9367,-262.2845"/>
<polygon fill="#000000" stroke="#000000" points="420.424,-261.9839 416.1656,-252.2825 413.4447,-262.522 420.424,-261.9839"/>
</g>
<!-- rpc_server&#45;&gt;chain_verifier -->
<!-- rpc_server&#45;&gt;router_verifier -->
<g id="edge11" class="edge">
<title>rpc_server&#45;&gt;chain_verifier</title>
<title>rpc_server&#45;&gt;router_verifier</title>
<path fill="none" stroke="#000000" stroke-dasharray="1,5" d="M350.1767,-360.6302C329.2082,-335.4681 290.2442,-288.7112 265.9807,-259.595"/>
<polygon fill="#000000" stroke="#000000" points="268.6447,-257.3247 259.5541,-251.8831 263.2672,-261.806 268.6447,-257.3247"/>
</g>
@ -116,9 +116,9 @@
<ellipse fill="transparent" stroke="#000000" cx="112.6515" cy="-90" rx="86.7972" ry="18"/>
<text text-anchor="middle" x="112.6515" y="-85.8" font-family="'Opens sans', sans-serif" font-size="14.00" fill="#000000">checkpoint_verifier</text>
</a>
<!-- chain_verifier&#45;&gt;checkpoint_verifier -->
<!-- router_verifier&#45;&gt;checkpoint_verifier -->
<g id="edge6" class="edge">
<title>chain_verifier&#45;&gt;checkpoint_verifier</title>
<title>router_verifier&#45;&gt;checkpoint_verifier</title>
<path fill="none" stroke="#000000" d="M216.638,-217.5178C201.6091,-207.8136 183.4054,-194.5969 169.6515,-180 151.8569,-161.1147 136.447,-135.8982 126.1523,-116.962"/>
<polygon fill="#000000" stroke="#000000" points="129.1594,-115.1615 121.3857,-107.9628 122.9735,-118.438 129.1594,-115.1615"/>
</g>
@ -128,9 +128,9 @@
<ellipse fill="transparent" stroke="#000000" cx="244.6515" cy="-162" rx="65.9697" ry="18"/>
<text text-anchor="middle" x="244.6515" y="-157.8" font-family="'Opens sans', sans-serif" font-size="14.00" fill="#000000">block_verifier</text>
</a>
<!-- chain_verifier&#45;&gt;block_verifier -->
<!-- router_verifier&#45;&gt;block_verifier -->
<g id="edge17" class="edge">
<title>chain_verifier&#45;&gt;block_verifier</title>
<title>router_verifier&#45;&gt;block_verifier</title>
<path fill="none" stroke="#000000" d="M244.6515,-215.8314C244.6515,-208.131 244.6515,-198.9743 244.6515,-190.4166"/>
<polygon fill="#000000" stroke="#000000" points="248.1516,-190.4132 244.6515,-180.4133 241.1516,-190.4133 248.1516,-190.4132"/>
</g>
@ -146,9 +146,9 @@
<ellipse fill="transparent" stroke="#000000" cx="364.6515" cy="-306" rx="36.4761" ry="18"/>
<text text-anchor="middle" x="364.6515" y="-301.8" font-family="'Opens sans', sans-serif" font-size="14.00" fill="#000000">syncer</text>
</a>
<!-- syncer&#45;&gt;chain_verifier -->
<!-- syncer&#45;&gt;router_verifier -->
<g id="edge10" class="edge">
<title>syncer&#45;&gt;chain_verifier</title>
<title>syncer&#45;&gt;router_verifier</title>
<path fill="none" stroke="#000000" d="M341.5143,-292.1177C324.2684,-281.7701 300.3887,-267.4423 280.6551,-255.6022"/>
<polygon fill="#000000" stroke="#000000" points="282.2946,-252.5042 271.9189,-250.3604 278.6931,-258.5067 282.2946,-252.5042"/>
</g>

Before

Width:  |  Height:  |  Size: 11 KiB

After

Width:  |  Height:  |  Size: 11 KiB

View File

@ -56,18 +56,18 @@ digraph services {
inbound -> state
rpc_server -> state
mempool -> transaction_verifier
chain_verifier -> checkpoint_verifier
router_verifier -> checkpoint_verifier
inbound -> mempool
rpc_server -> mempool
inbound -> chain_verifier
syncer -> chain_verifier
rpc_server -> chain_verifier [style=dotted]
inbound -> router_verifier
syncer -> router_verifier
rpc_server -> router_verifier [style=dotted]
syncer -> peer_set
mempool -> peer_set
block_verifier -> state
checkpoint_verifier -> state
block_verifier -> transaction_verifier
chain_verifier -> block_verifier
router_verifier -> block_verifier
rpc_server -> inbound [style=invis] // for layout of the diagram
}

View File

@ -35,9 +35,9 @@ pub use request::Request;
#[cfg(test)]
mod tests;
/// Asynchronous block verification.
/// Asynchronous semantic block verification.
#[derive(Debug)]
pub struct BlockVerifier<S, V> {
pub struct SemanticBlockVerifier<S, V> {
/// The network to be verified.
network: Network,
state_service: S,
@ -100,14 +100,14 @@ impl VerifyBlockError {
/// <https://github.com/zcash/zcash/blob/bad7f7eadbbb3466bebe3354266c7f69f607fcfd/src/consensus/consensus.h#L30>
pub const MAX_BLOCK_SIGOPS: u64 = 20_000;
impl<S, V> BlockVerifier<S, V>
impl<S, V> SemanticBlockVerifier<S, V>
where
S: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone + 'static,
S::Future: Send + 'static,
V: Service<tx::Request, Response = tx::Response, Error = BoxError> + Send + Clone + 'static,
V::Future: Send + 'static,
{
/// Creates a new BlockVerifier
/// Creates a new SemanticBlockVerifier
pub fn new(network: Network, state_service: S, transaction_verifier: V) -> Self {
Self {
network,
@ -117,7 +117,7 @@ where
}
}
impl<S, V> Service<Request> for BlockVerifier<S, V>
impl<S, V> Service<Request> for SemanticBlockVerifier<S, V>
where
S: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone + 'static,
S::Future: Send + 'static,
@ -283,7 +283,7 @@ where
let new_outputs = Arc::try_unwrap(known_utxos)
.expect("all verification tasks using known_utxos are complete");
let prepared_block = zs::PreparedBlock {
let prepared_block = zs::SemanticallyVerifiedBlock {
block,
hash,
height,
@ -311,7 +311,7 @@ where
.ready()
.await
.map_err(VerifyBlockError::Commit)?
.call(zs::Request::CommitBlock(prepared_block))
.call(zs::Request::CommitSemanticallyVerifiedBlock(prepared_block))
.await
.map_err(VerifyBlockError::Commit)?
{
@ -319,7 +319,7 @@ where
assert_eq!(committed_hash, hash, "state must commit correct hash");
Ok(hash)
}
_ => unreachable!("wrong response for CommitBlock"),
_ => unreachable!("wrong response for CommitSemanticallyVerifiedBlock"),
}
}
.instrument(span)

View File

@ -144,7 +144,7 @@ async fn check_transcripts() -> Result<(), Report> {
let transaction = transaction::Verifier::new(network, state_service.clone());
let transaction = Buffer::new(BoxService::new(transaction), 1);
let block_verifier = Buffer::new(
BlockVerifier::new(network, state_service.clone(), transaction),
SemanticBlockVerifier::new(network, state_service.clone(), transaction),
1,
);

View File

@ -32,7 +32,7 @@ use zebra_chain::{
parameters::{Network, GENESIS_PREVIOUS_BLOCK_HASH},
work::equihash,
};
use zebra_state::{self as zs, FinalizedBlock};
use zebra_state::{self as zs, CheckpointVerifiedBlock};
use crate::{
block::VerifyBlockError,
@ -59,7 +59,7 @@ pub use list::CheckpointList;
#[derive(Debug)]
struct QueuedBlock {
/// The block, with additional precalculated data.
block: FinalizedBlock,
block: CheckpointVerifiedBlock,
/// The transmitting end of the oneshot channel for this block's result.
tx: oneshot::Sender<Result<block::Hash, VerifyCheckpointError>>,
}
@ -68,7 +68,7 @@ struct QueuedBlock {
#[derive(Debug)]
struct RequestBlock {
/// The block, with additional precalculated data.
block: FinalizedBlock,
block: CheckpointVerifiedBlock,
/// The receiving end of the oneshot channel for this block's result.
rx: oneshot::Receiver<Result<block::Hash, VerifyCheckpointError>>,
}
@ -580,7 +580,7 @@ where
/// Check that the block height, proof of work, and Merkle root are valid.
///
/// Returns a [`FinalizedBlock`] with precalculated block data.
/// Returns a [`CheckpointVerifiedBlock`] with precalculated block data.
///
/// ## Security
///
@ -590,7 +590,10 @@ where
/// Checking the Merkle root ensures that the block hash binds the block
/// contents. To prevent malleability (CVE-2012-2459), we also need to check
/// whether the transaction hashes are unique.
fn check_block(&self, block: Arc<Block>) -> Result<FinalizedBlock, VerifyCheckpointError> {
fn check_block(
&self,
block: Arc<Block>,
) -> Result<CheckpointVerifiedBlock, VerifyCheckpointError> {
let hash = block.hash();
let height = block
.coinbase_height()
@ -601,7 +604,7 @@ where
crate::block::check::equihash_solution_is_valid(&block.header)?;
// don't do precalculation until the block passes basic difficulty checks
let block = FinalizedBlock::with_hash(block, hash);
let block = CheckpointVerifiedBlock::with_hash(block, hash);
crate::block::check::merkle_root_validity(
self.network,
@ -1092,7 +1095,7 @@ where
// We use a `ServiceExt::oneshot`, so that every state service
// `poll_ready` has a corresponding `call`. See #1593.
match state_service
.oneshot(zs::Request::CommitFinalizedBlock(req_block.block))
.oneshot(zs::Request::CommitCheckpointVerifiedBlock(req_block.block))
.map_err(VerifyCheckpointError::CommitFinalized)
.await?
{
@ -1100,7 +1103,7 @@ where
assert_eq!(committed_hash, hash, "state must commit correct hash");
Ok(hash)
}
_ => unreachable!("wrong response for CommitFinalizedBlock"),
_ => unreachable!("wrong response for CommitCheckpointVerifiedBlock"),
}
});

View File

@ -326,7 +326,7 @@ async fn continuous_blockchain(
// SPANDOC: Add block directly to the state {?height}
ready_state_service
.call(zebra_state::Request::CommitFinalizedBlock(
.call(zebra_state::Request::CommitCheckpointVerifiedBlock(
block.clone().into(),
))
.await

View File

@ -41,8 +41,8 @@ mod parameters;
mod primitives;
mod script;
pub mod chain;
pub mod error;
pub mod router;
pub mod transaction;
pub use block::{
@ -55,7 +55,6 @@ pub use block::{
},
Request, VerifyBlockError, MAX_BLOCK_SIGOPS,
};
pub use chain::VerifyChainError;
pub use checkpoint::{
CheckpointList, VerifyCheckpointError, MAX_CHECKPOINT_BYTE_COUNT, MAX_CHECKPOINT_HEIGHT_GAP,
};
@ -63,6 +62,7 @@ pub use config::Config;
pub use error::BlockError;
pub use parameters::FundingStreamReceiver;
pub use primitives::{ed25519, groth16, halo2, redjubjub, redpallas};
pub use router::RouterError;
/// A boxed [`std::error::Error`].
pub type BoxError = Box<dyn std::error::Error + Send + Sync + 'static>;

View File

@ -1,6 +1,6 @@
//! Top-level semantic block verification for Zebra.
//!
//! Verifies blocks using the [`CheckpointVerifier`] or full [`BlockVerifier`],
//! Verifies blocks using the [`CheckpointVerifier`] or full [`SemanticBlockVerifier`],
//! depending on the config and block height.
//!
//! # Correctness
@ -33,7 +33,7 @@ use zebra_chain::{
use zebra_state as zs;
use crate::{
block::{BlockVerifier, Request, VerifyBlockError},
block::{Request, SemanticBlockVerifier, VerifyBlockError},
checkpoint::{CheckpointList, CheckpointVerifier, VerifyCheckpointError},
error::TransactionError,
transaction, BoxError, Config,
@ -56,15 +56,15 @@ mod tests;
/// memory, but missing slots can significantly slow down Zebra.
const VERIFIER_BUFFER_BOUND: usize = 5;
/// The chain verifier routes requests to either the checkpoint verifier or the
/// block verifier, depending on the maximum checkpoint height.
/// The block verifier router routes requests to either the checkpoint verifier or the
/// semantic block verifier, depending on the maximum checkpoint height.
///
/// # Correctness
///
/// Block verification requests should be wrapped in a timeout, so that
/// out-of-order and invalid requests do not hang indefinitely. See the [`chain`](`crate::chain`)
/// out-of-order and invalid requests do not hang indefinitely. See the [`router`](`crate::router`)
/// module documentation for details.
struct ChainVerifier<S, V>
struct BlockVerifierRouter<S, V>
where
S: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone + 'static,
S::Future: Send + 'static,
@ -84,8 +84,8 @@ where
/// This height must be in the `checkpoint` verifier's checkpoint list.
max_checkpoint_height: block::Height,
/// The full block verifier, used for blocks after `max_checkpoint_height`.
block: BlockVerifier<S, V>,
/// The full semantic block verifier, used for blocks after `max_checkpoint_height`.
block: SemanticBlockVerifier<S, V>,
}
/// An error while semantically verifying a block.
@ -93,41 +93,41 @@ where
// One or both of these error variants are at least 140 bytes
#[derive(Debug, Display, Error)]
#[allow(missing_docs)]
pub enum VerifyChainError {
pub enum RouterError {
/// Block could not be checkpointed
Checkpoint { source: Box<VerifyCheckpointError> },
/// Block could not be full-verified
Block { source: Box<VerifyBlockError> },
}
impl From<VerifyCheckpointError> for VerifyChainError {
impl From<VerifyCheckpointError> for RouterError {
fn from(err: VerifyCheckpointError) -> Self {
VerifyChainError::Checkpoint {
RouterError::Checkpoint {
source: Box::new(err),
}
}
}
impl From<VerifyBlockError> for VerifyChainError {
impl From<VerifyBlockError> for RouterError {
fn from(err: VerifyBlockError) -> Self {
VerifyChainError::Block {
RouterError::Block {
source: Box::new(err),
}
}
}
impl VerifyChainError {
impl RouterError {
/// Returns `true` if this is definitely a duplicate request.
/// Some duplicate requests might not be detected, and therefore return `false`.
pub fn is_duplicate_request(&self) -> bool {
match self {
VerifyChainError::Checkpoint { source, .. } => source.is_duplicate_request(),
VerifyChainError::Block { source, .. } => source.is_duplicate_request(),
RouterError::Checkpoint { source, .. } => source.is_duplicate_request(),
RouterError::Block { source, .. } => source.is_duplicate_request(),
}
}
}
impl<S, V> Service<Request> for ChainVerifier<S, V>
impl<S, V> Service<Request> for BlockVerifierRouter<S, V>
where
S: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone + 'static,
S::Future: Send + 'static,
@ -138,7 +138,7 @@ where
V::Future: Send + 'static,
{
type Response = block::Hash;
type Error = VerifyChainError;
type Error = RouterError;
type Future =
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
@ -224,7 +224,7 @@ where
///
/// Block and transaction verification requests should be wrapped in a timeout,
/// so that out-of-order and invalid requests do not hang indefinitely.
/// See the [`chain`](`crate::chain`) module documentation for details.
/// See the [`router`](`crate::router`) module documentation for details.
#[instrument(skip(state_service))]
pub async fn init<S>(
config: Config,
@ -232,7 +232,7 @@ pub async fn init<S>(
mut state_service: S,
debug_skip_parameter_preload: bool,
) -> (
Buffer<BoxService<Request, block::Hash, VerifyChainError>, Request>,
Buffer<BoxService<Request, block::Hash, RouterError>, Request>,
Buffer<
BoxService<transaction::Request, transaction::Response, TransactionError>,
transaction::Request,
@ -364,24 +364,28 @@ where
zs::Response::Tip(tip) => tip,
_ => unreachable!("wrong response to Request::Tip"),
};
tracing::info!(?tip, ?max_checkpoint_height, "initializing chain verifier");
tracing::info!(
?tip,
?max_checkpoint_height,
"initializing block verifier router"
);
let block = BlockVerifier::new(network, state_service.clone(), transaction.clone());
let block = SemanticBlockVerifier::new(network, state_service.clone(), transaction.clone());
let checkpoint = CheckpointVerifier::from_checkpoint_list(list, network, tip, state_service);
let chain = ChainVerifier {
let router = BlockVerifierRouter {
checkpoint,
max_checkpoint_height,
block,
};
let chain = Buffer::new(BoxService::new(chain), VERIFIER_BUFFER_BOUND);
let router = Buffer::new(BoxService::new(router), VERIFIER_BUFFER_BOUND);
let task_handles = BackgroundTaskHandles {
groth16_download_handle,
state_checkpoint_verify_handle,
};
(chain, transaction, task_handles, max_checkpoint_height)
(router, transaction, task_handles, max_checkpoint_height)
}
/// Parses the checkpoint list for `network` and `config`.

View File

@ -66,14 +66,14 @@ async fn verifiers_from_network(
+ 'static,
) {
let state_service = zs::init_test(network);
let (chain_verifier, _transaction_verifier, _groth16_download_handle, _max_checkpoint_height) =
crate::chain::init(Config::default(), network, state_service.clone(), true).await;
let (router_verifier, _transaction_verifier, _groth16_download_handle, _max_checkpoint_height) =
crate::router::init(Config::default(), network, state_service.clone(), true).await;
// We can drop the download task handle here, because:
// - if the download task fails, the tests will panic, and
// - if the download task hangs, the tests will hang.
(chain_verifier, state_service)
(router_verifier, state_service)
}
static BLOCK_VERIFY_TRANSCRIPT_GENESIS: Lazy<
@ -165,15 +165,15 @@ async fn verify_checkpoint(config: Config) -> Result<(), Report> {
// init_from_verifiers.
//
// Download task panics and timeouts are propagated to the tests that use Groth16 verifiers.
let (chain_verifier, _transaction_verifier, _groth16_download_handle, _max_checkpoint_height) =
let (router_verifier, _transaction_verifier, _groth16_download_handle, _max_checkpoint_height) =
super::init(config.clone(), network, zs::init_test(network), true).await;
// Add a timeout layer
let chain_verifier =
TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(chain_verifier);
let router_verifier =
TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(router_verifier);
let transcript = Transcript::from(BLOCK_VERIFY_TRANSCRIPT_GENESIS.iter().cloned());
transcript.check(chain_verifier).await.unwrap();
transcript.check(router_verifier).await.unwrap();
Ok(())
}
@ -183,22 +183,22 @@ async fn verify_fail_no_coinbase_test() -> Result<(), Report> {
verify_fail_no_coinbase().await
}
/// Test that blocks with no coinbase height are rejected by the ChainVerifier
/// Test that blocks with no coinbase height are rejected by the BlockVerifierRouter
///
/// ChainVerifier uses the block height to decide between the CheckpointVerifier
/// and BlockVerifier. This is the error case, where there is no height.
/// BlockVerifierRouter uses the block height to decide between the CheckpointVerifier
/// and SemanticBlockVerifier. This is the error case, where there is no height.
#[spandoc::spandoc]
async fn verify_fail_no_coinbase() -> Result<(), Report> {
let _init_guard = zebra_test::init();
let (chain_verifier, state_service) = verifiers_from_network(Network::Mainnet).await;
let (router, state_service) = verifiers_from_network(Network::Mainnet).await;
// Add a timeout layer
let chain_verifier =
TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(chain_verifier);
let router_verifier =
TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(router);
let transcript = Transcript::from(NO_COINBASE_TRANSCRIPT.iter().cloned());
transcript.check(chain_verifier).await.unwrap();
transcript.check(router_verifier).await.unwrap();
let transcript = Transcript::from(NO_COINBASE_STATE_TRANSCRIPT.iter().cloned());
transcript.check(state_service).await.unwrap();
@ -216,14 +216,14 @@ async fn round_trip_checkpoint_test() -> Result<(), Report> {
async fn round_trip_checkpoint() -> Result<(), Report> {
let _init_guard = zebra_test::init();
let (chain_verifier, state_service) = verifiers_from_network(Network::Mainnet).await;
let (router_verifier, state_service) = verifiers_from_network(Network::Mainnet).await;
// Add a timeout layer
let chain_verifier =
TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(chain_verifier);
let router_verifier =
TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(router_verifier);
let transcript = Transcript::from(BLOCK_VERIFY_TRANSCRIPT_GENESIS.iter().cloned());
transcript.check(chain_verifier).await.unwrap();
transcript.check(router_verifier).await.unwrap();
let transcript = Transcript::from(STATE_VERIFY_TRANSCRIPT_GENESIS.iter().cloned());
transcript.check(state_service).await.unwrap();
@ -241,20 +241,20 @@ async fn verify_fail_add_block_checkpoint_test() -> Result<(), Report> {
async fn verify_fail_add_block_checkpoint() -> Result<(), Report> {
let _init_guard = zebra_test::init();
let (chain_verifier, state_service) = verifiers_from_network(Network::Mainnet).await;
let (router_verifier, state_service) = verifiers_from_network(Network::Mainnet).await;
// Add a timeout layer
let chain_verifier =
TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(chain_verifier);
let router_verifier =
TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(router_verifier);
let transcript = Transcript::from(BLOCK_VERIFY_TRANSCRIPT_GENESIS.iter().cloned());
transcript.check(chain_verifier.clone()).await.unwrap();
transcript.check(router_verifier.clone()).await.unwrap();
let transcript = Transcript::from(STATE_VERIFY_TRANSCRIPT_GENESIS.iter().cloned());
transcript.check(state_service.clone()).await.unwrap();
let transcript = Transcript::from(BLOCK_VERIFY_TRANSCRIPT_GENESIS_FAIL.iter().cloned());
transcript.check(chain_verifier.clone()).await.unwrap();
transcript.check(router_verifier.clone()).await.unwrap();
let transcript = Transcript::from(STATE_VERIFY_TRANSCRIPT_GENESIS.iter().cloned());
transcript.check(state_service.clone()).await.unwrap();

View File

@ -58,7 +58,7 @@ const UTXO_LOOKUP_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(
/// # Correctness
///
/// Transaction verification requests should be wrapped in a timeout, so that
/// out-of-order and invalid requests do not hang indefinitely. See the [`chain`](`crate::chain`)
/// out-of-order and invalid requests do not hang indefinitely. See the [`router`](`crate::router`)
/// module documentation for details.
#[derive(Debug, Clone)]
pub struct Verifier<ZS> {

View File

@ -24,7 +24,7 @@ use zebra_chain::{
};
use zebra_consensus::{
funding_stream_address, funding_stream_values, height_for_first_halving, miner_subsidy,
VerifyChainError,
RouterError,
};
use zebra_network::AddressBookPeers;
use zebra_node_services::mempool;
@ -217,8 +217,14 @@ pub trait GetBlockTemplateRpc {
}
/// RPC method implementations.
pub struct GetBlockTemplateRpcImpl<Mempool, State, Tip, ChainVerifier, SyncStatus, AddressBook>
where
pub struct GetBlockTemplateRpcImpl<
Mempool,
State,
Tip,
BlockVerifierRouter,
SyncStatus,
AddressBook,
> where
Mempool: Service<
mempool::Request,
Response = mempool::Response,
@ -229,7 +235,7 @@ where
Response = zebra_state::ReadResponse,
Error = zebra_state::BoxError,
>,
ChainVerifier: Service<zebra_consensus::Request, Response = block::Hash, Error = zebra_consensus::BoxError>
BlockVerifierRouter: Service<zebra_consensus::Request, Response = block::Hash, Error = zebra_consensus::BoxError>
+ Clone
+ Send
+ Sync
@ -267,7 +273,7 @@ where
latest_chain_tip: Tip,
/// The chain verifier, used for submitting blocks.
chain_verifier: ChainVerifier,
router_verifier: BlockVerifierRouter,
/// The chain sync status, used for checking if Zebra is likely close to the network chain tip.
sync_status: SyncStatus,
@ -276,8 +282,8 @@ where
address_book: AddressBook,
}
impl<Mempool, State, Tip, ChainVerifier, SyncStatus, AddressBook>
GetBlockTemplateRpcImpl<Mempool, State, Tip, ChainVerifier, SyncStatus, AddressBook>
impl<Mempool, State, Tip, BlockVerifierRouter, SyncStatus, AddressBook>
GetBlockTemplateRpcImpl<Mempool, State, Tip, BlockVerifierRouter, SyncStatus, AddressBook>
where
Mempool: Service<
mempool::Request,
@ -293,7 +299,7 @@ where
+ Sync
+ 'static,
Tip: ChainTip + Clone + Send + Sync + 'static,
ChainVerifier: Service<zebra_consensus::Request, Response = block::Hash, Error = zebra_consensus::BoxError>
BlockVerifierRouter: Service<zebra_consensus::Request, Response = block::Hash, Error = zebra_consensus::BoxError>
+ Clone
+ Send
+ Sync
@ -313,7 +319,7 @@ where
mempool: Buffer<Mempool, mempool::Request>,
state: State,
latest_chain_tip: Tip,
chain_verifier: ChainVerifier,
router_verifier: BlockVerifierRouter,
sync_status: SyncStatus,
address_book: AddressBook,
) -> Self {
@ -352,15 +358,15 @@ where
mempool,
state,
latest_chain_tip,
chain_verifier,
router_verifier,
sync_status,
address_book,
}
}
}
impl<Mempool, State, Tip, ChainVerifier, SyncStatus, AddressBook> GetBlockTemplateRpc
for GetBlockTemplateRpcImpl<Mempool, State, Tip, ChainVerifier, SyncStatus, AddressBook>
impl<Mempool, State, Tip, BlockVerifierRouter, SyncStatus, AddressBook> GetBlockTemplateRpc
for GetBlockTemplateRpcImpl<Mempool, State, Tip, BlockVerifierRouter, SyncStatus, AddressBook>
where
Mempool: Service<
mempool::Request,
@ -378,12 +384,12 @@ where
+ 'static,
<State as Service<zebra_state::ReadRequest>>::Future: Send,
Tip: ChainTip + Clone + Send + Sync + 'static,
ChainVerifier: Service<zebra_consensus::Request, Response = block::Hash, Error = zebra_consensus::BoxError>
BlockVerifierRouter: Service<zebra_consensus::Request, Response = block::Hash, Error = zebra_consensus::BoxError>
+ Clone
+ Send
+ Sync
+ 'static,
<ChainVerifier as Service<zebra_consensus::Request>>::Future: Send,
<BlockVerifierRouter as Service<zebra_consensus::Request>>::Future: Send,
SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static,
AddressBook: AddressBookPeers + Clone + Send + Sync + 'static,
{
@ -448,7 +454,7 @@ where
.and_then(get_block_template::JsonParameters::block_proposal_data)
{
return validate_block_proposal(
self.chain_verifier.clone(),
self.router_verifier.clone(),
block_proposal_bytes,
network,
latest_chain_tip,
@ -731,7 +737,7 @@ where
HexData(block_bytes): HexData,
_parameters: Option<submit_block::JsonParameters>,
) -> BoxFuture<Result<submit_block::Response>> {
let mut chain_verifier = self.chain_verifier.clone();
let mut router_verifier = self.router_verifier.clone();
async move {
let block: Block = match block_bytes.zcash_deserialize_into() {
@ -749,7 +755,7 @@ where
.unwrap_or_else(|| "invalid coinbase height".to_string());
let block_hash = block.hash();
let chain_verifier_response = chain_verifier
let router_verifier_response = router_verifier
.ready()
.await
.map_err(|error| Error {
@ -760,7 +766,7 @@ where
.call(zebra_consensus::Request::Commit(Arc::new(block)))
.await;
let chain_error = match chain_verifier_response {
let chain_error = match router_verifier_response {
// Currently, this match arm returns `null` (Accepted) for blocks committed
// to any chain, but Accepted is only for blocks in the best chain.
//
@ -776,7 +782,7 @@ where
// by downcasting from Any to VerifyChainError.
Err(box_error) => {
let error = box_error
.downcast::<VerifyChainError>()
.downcast::<RouterError>()
.map(|boxed_chain_error| *boxed_chain_error);
tracing::info!(?error, ?block_hash, ?block_height, "submit block failed verification");
@ -802,7 +808,7 @@ where
// and return a duplicate error for the newer request immediately.
// This improves the speed of the RPC response.
//
// Checking the download queues and ChainVerifier buffer for duplicates
// Checking the download queues and BlockVerifierRouter buffer for duplicates
// might require architectural changes to Zebra, so we should only do it
// if mining pools really need it.
Ok(_verify_chain_error) => submit_block::ErrorResponse::Rejected,

View File

@ -97,15 +97,15 @@ pub fn check_miner_address(
/// usual acceptance rules (except proof-of-work).
///
/// Returns a `getblocktemplate` [`Response`].
pub async fn validate_block_proposal<ChainVerifier, Tip, SyncStatus>(
mut chain_verifier: ChainVerifier,
pub async fn validate_block_proposal<BlockVerifierRouter, Tip, SyncStatus>(
mut router_verifier: BlockVerifierRouter,
block_proposal_bytes: Vec<u8>,
network: Network,
latest_chain_tip: Tip,
sync_status: SyncStatus,
) -> Result<Response>
where
ChainVerifier: Service<zebra_consensus::Request, Response = block::Hash, Error = zebra_consensus::BoxError>
BlockVerifierRouter: Service<zebra_consensus::Request, Response = block::Hash, Error = zebra_consensus::BoxError>
+ Clone
+ Send
+ Sync
@ -129,7 +129,7 @@ where
}
};
let chain_verifier_response = chain_verifier
let router_verifier_response = router_verifier
.ready()
.await
.map_err(|error| Error {
@ -140,12 +140,12 @@ where
.call(zebra_consensus::Request::CheckProposal(Arc::new(block)))
.await;
Ok(chain_verifier_response
Ok(router_verifier_response
.map(|_hash| ProposalResponse::Valid)
.unwrap_or_else(|verify_chain_error| {
tracing::info!(
?verify_chain_error,
"error response from chain_verifier in CheckProposal request"
"error response from router_verifier in CheckProposal request"
);
ProposalResponse::rejected("invalid proposal", verify_chain_error)

View File

@ -85,11 +85,11 @@ pub async fn test_responses<State, ReadState>(
<ReadState as Service<zebra_state::ReadRequest>>::Future: Send,
{
let (
chain_verifier,
router_verifier,
_transaction_verifier,
_parameter_download_task_handle,
_max_checkpoint_height,
) = zebra_consensus::chain::init(
) = zebra_consensus::router::init(
zebra_consensus::Config::default(),
network,
state.clone(),
@ -145,7 +145,7 @@ pub async fn test_responses<State, ReadState>(
Buffer::new(mempool.clone(), 1),
read_state,
mock_chain_tip.clone(),
chain_verifier.clone(),
router_verifier.clone(),
mock_sync_status.clone(),
mock_address_book,
);
@ -267,7 +267,7 @@ pub async fn test_responses<State, ReadState>(
Buffer::new(mempool.clone(), 1),
read_state.clone(),
mock_chain_tip.clone(),
chain_verifier,
router_verifier,
mock_sync_status.clone(),
MockAddressBookPeers::default(),
);
@ -365,16 +365,16 @@ pub async fn test_responses<State, ReadState>(
snapshot_rpc_getblocktemplate("invalid-proposal", get_block_template, None, &settings);
// the following snapshots use a mock read_state and chain_verifier
// the following snapshots use a mock read_state and router_verifier
let mut mock_chain_verifier = MockService::build().for_unit_tests();
let mut mock_router_verifier = MockService::build().for_unit_tests();
let get_block_template_rpc_mock_state_verifier = GetBlockTemplateRpcImpl::new(
network,
mining_config,
Buffer::new(mempool.clone(), 1),
read_state.clone(),
mock_chain_tip,
mock_chain_verifier.clone(),
mock_router_verifier.clone(),
mock_sync_status,
MockAddressBookPeers::default(),
);
@ -387,15 +387,15 @@ pub async fn test_responses<State, ReadState>(
}),
);
let mock_chain_verifier_request_handler = async move {
mock_chain_verifier
let mock_router_verifier_request_handler = async move {
mock_router_verifier
.expect_request_that(|req| matches!(req, zebra_consensus::Request::CheckProposal(_)))
.await
.respond(Hash::from([0; 32]));
};
let (get_block_template, ..) =
tokio::join!(get_block_template_fut, mock_chain_verifier_request_handler,);
tokio::join!(get_block_template_fut, mock_router_verifier_request_handler,);
let get_block_template =
get_block_template.expect("unexpected error in getblocktemplate RPC call");

View File

@ -830,11 +830,11 @@ async fn rpc_getblockcount() {
zebra_state::populated_state(blocks.clone(), Mainnet).await;
let (
chain_verifier,
router_verifier,
_transaction_verifier,
_parameter_download_task_handle,
_max_checkpoint_height,
) = zebra_consensus::chain::init(
) = zebra_consensus::router::init(
zebra_consensus::Config::default(),
Mainnet,
state.clone(),
@ -849,7 +849,7 @@ async fn rpc_getblockcount() {
Buffer::new(mempool.clone(), 1),
read_state,
latest_chain_tip.clone(),
chain_verifier,
router_verifier,
MockSyncStatus::default(),
MockAddressBookPeers::default(),
);
@ -880,11 +880,11 @@ async fn rpc_getblockcount_empty_state() {
zebra_state::init_test_services(Mainnet);
let (
chain_verifier,
router_verifier,
_transaction_verifier,
_parameter_download_task_handle,
_max_checkpoint_height,
) = zebra_consensus::chain::init(
) = zebra_consensus::router::init(
zebra_consensus::Config::default(),
Mainnet,
state.clone(),
@ -899,7 +899,7 @@ async fn rpc_getblockcount_empty_state() {
Buffer::new(mempool.clone(), 1),
read_state,
latest_chain_tip.clone(),
chain_verifier,
router_verifier,
MockSyncStatus::default(),
MockAddressBookPeers::default(),
);
@ -932,11 +932,11 @@ async fn rpc_getpeerinfo() {
zebra_state::init_test_services(Mainnet);
let (
chain_verifier,
router_verifier,
_transaction_verifier,
_parameter_download_task_handle,
_max_checkpoint_height,
) = zebra_consensus::chain::init(
) = zebra_consensus::router::init(
zebra_consensus::Config::default(),
network,
state.clone(),
@ -965,7 +965,7 @@ async fn rpc_getpeerinfo() {
Buffer::new(mempool.clone(), 1),
read_state,
latest_chain_tip.clone(),
chain_verifier,
router_verifier,
MockSyncStatus::default(),
mock_address_book,
);
@ -1007,11 +1007,11 @@ async fn rpc_getblockhash() {
zebra_state::populated_state(blocks.clone(), Mainnet).await;
let (
chain_verifier,
router_verifier,
_transaction_verifier,
_parameter_download_task_handle,
_max_checkpoint_height,
) = zebra_consensus::chain::init(
) = zebra_consensus::router::init(
zebra_consensus::Config::default(),
Mainnet,
state.clone(),
@ -1026,7 +1026,7 @@ async fn rpc_getblockhash() {
Buffer::new(mempool.clone(), 1),
read_state,
latest_chain_tip.clone(),
tower::ServiceBuilder::new().service(chain_verifier),
tower::ServiceBuilder::new().service(router_verifier),
MockSyncStatus::default(),
MockAddressBookPeers::default(),
);
@ -1195,7 +1195,7 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) {
let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests();
let read_state = MockService::build().for_unit_tests();
let chain_verifier = MockService::build().for_unit_tests();
let router_verifier = MockService::build().for_unit_tests();
let mut mock_sync_status = MockSyncStatus::default();
mock_sync_status.set_is_close_to_tip(true);
@ -1236,7 +1236,7 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) {
Buffer::new(mempool.clone(), 1),
read_state.clone(),
mock_chain_tip,
chain_verifier,
router_verifier,
mock_sync_status.clone(),
MockAddressBookPeers::default(),
);
@ -1481,11 +1481,11 @@ async fn rpc_submitblock_errors() {
// Init RPCs
let (
chain_verifier,
router_verifier,
_transaction_verifier,
_parameter_download_task_handle,
_max_checkpoint_height,
) = zebra_consensus::chain::init(
) = zebra_consensus::router::init(
zebra_consensus::Config::default(),
Mainnet,
state.clone(),
@ -1500,7 +1500,7 @@ async fn rpc_submitblock_errors() {
Buffer::new(mempool.clone(), 1),
read_state,
latest_chain_tip.clone(),
chain_verifier,
router_verifier,
MockSyncStatus::default(),
MockAddressBookPeers::default(),
);
@ -1648,7 +1648,7 @@ async fn rpc_getdifficulty() {
let mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests();
let read_state = MockService::build().for_unit_tests();
let chain_verifier = MockService::build().for_unit_tests();
let router_verifier = MockService::build().for_unit_tests();
let mut mock_sync_status = MockSyncStatus::default();
mock_sync_status.set_is_close_to_tip(true);
@ -1683,7 +1683,7 @@ async fn rpc_getdifficulty() {
Buffer::new(mempool.clone(), 1),
read_state.clone(),
mock_chain_tip,
chain_verifier,
router_verifier,
mock_sync_status.clone(),
MockAddressBookPeers::default(),
);

View File

@ -277,7 +277,7 @@ proptest! {
block.transactions.push(Arc::new(transaction.clone()));
// commit the created block
let request = zebra_state::Request::CommitFinalizedBlock(zebra_state::FinalizedBlock::from(Arc::new(block.clone())));
let request = zebra_state::Request::CommitCheckpointVerifiedBlock(zebra_state::CheckpointVerifiedBlock::from(Arc::new(block.clone())));
let send_task = tokio::spawn(write_state.clone().oneshot(request.clone()));
let response = zebra_state::Response::Committed(block.hash());

View File

@ -73,7 +73,7 @@ impl RpcServer {
//
// TODO: put some of the configs or services in their own struct?
#[allow(clippy::too_many_arguments)]
pub fn spawn<Version, Mempool, State, Tip, ChainVerifier, SyncStatus, AddressBook>(
pub fn spawn<Version, Mempool, State, Tip, BlockVerifierRouter, SyncStatus, AddressBook>(
config: Config,
#[cfg(feature = "getblocktemplate-rpcs")]
mining_config: get_block_template_rpcs::config::Config,
@ -84,7 +84,7 @@ impl RpcServer {
mempool: Buffer<Mempool, mempool::Request>,
state: State,
#[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))]
chain_verifier: ChainVerifier,
router_verifier: BlockVerifierRouter,
#[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))]
sync_status: SyncStatus,
#[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))]
@ -110,7 +110,7 @@ impl RpcServer {
+ 'static,
State::Future: Send,
Tip: ChainTip + Clone + Send + Sync + 'static,
ChainVerifier: Service<
BlockVerifierRouter: Service<
zebra_consensus::Request,
Response = block::Hash,
Error = zebra_consensus::BoxError,
@ -118,7 +118,7 @@ impl RpcServer {
+ Send
+ Sync
+ 'static,
<ChainVerifier as Service<zebra_consensus::Request>>::Future: Send,
<BlockVerifierRouter as Service<zebra_consensus::Request>>::Future: Send,
SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static,
AddressBook: AddressBookPeers + Clone + Send + Sync + 'static,
{
@ -149,7 +149,7 @@ impl RpcServer {
mempool.clone(),
state.clone(),
latest_chain_tip.clone(),
chain_verifier,
router_verifier,
sync_status,
address_book,
);

View File

@ -52,7 +52,7 @@ fn rpc_server_spawn(parallel_cpu_threads: bool) {
rt.block_on(async {
let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests();
let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests();
let mut chain_verifier: MockService<_, _, _, BoxError> =
let mut router_verifier: MockService<_, _, _, BoxError> =
MockService::build().for_unit_tests();
info!("spawning RPC server...");
@ -63,7 +63,7 @@ fn rpc_server_spawn(parallel_cpu_threads: bool) {
"RPC server test",
Buffer::new(mempool.clone(), 1),
Buffer::new(state.clone(), 1),
Buffer::new(chain_verifier.clone(), 1),
Buffer::new(router_verifier.clone(), 1),
MockSyncStatus::default(),
MockAddressBookPeers::default(),
NoChainTip,
@ -74,7 +74,7 @@ fn rpc_server_spawn(parallel_cpu_threads: bool) {
mempool.expect_no_requests().await;
state.expect_no_requests().await;
chain_verifier.expect_no_requests().await;
router_verifier.expect_no_requests().await;
// The server and queue tasks should continue without errors or panics
let rpc_server_task_result = rpc_server_task_handle.now_or_never();
@ -138,7 +138,7 @@ fn rpc_server_spawn_unallocated_port(parallel_cpu_threads: bool, do_shutdown: bo
rt.block_on(async {
let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests();
let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests();
let mut chain_verifier: MockService<_, _, _, BoxError> =
let mut router_verifier: MockService<_, _, _, BoxError> =
MockService::build().for_unit_tests();
info!("spawning RPC server...");
@ -149,7 +149,7 @@ fn rpc_server_spawn_unallocated_port(parallel_cpu_threads: bool, do_shutdown: bo
"RPC server test",
Buffer::new(mempool.clone(), 1),
Buffer::new(state.clone(), 1),
Buffer::new(chain_verifier.clone(), 1),
Buffer::new(router_verifier.clone(), 1),
MockSyncStatus::default(),
MockAddressBookPeers::default(),
NoChainTip,
@ -160,7 +160,7 @@ fn rpc_server_spawn_unallocated_port(parallel_cpu_threads: bool, do_shutdown: bo
mempool.expect_no_requests().await;
state.expect_no_requests().await;
chain_verifier.expect_no_requests().await;
router_verifier.expect_no_requests().await;
if do_shutdown {
rpc_server
@ -217,7 +217,7 @@ fn rpc_server_spawn_port_conflict() {
let test_task_handle = rt.spawn(async {
let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests();
let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests();
let mut chain_verifier: MockService<_, _, _, BoxError> =
let mut router_verifier: MockService<_, _, _, BoxError> =
MockService::build().for_unit_tests();
info!("spawning RPC server 1...");
@ -229,7 +229,7 @@ fn rpc_server_spawn_port_conflict() {
"RPC server 1 test",
Buffer::new(mempool.clone(), 1),
Buffer::new(state.clone(), 1),
Buffer::new(chain_verifier.clone(), 1),
Buffer::new(router_verifier.clone(), 1),
MockSyncStatus::default(),
MockAddressBookPeers::default(),
NoChainTip,
@ -246,7 +246,7 @@ fn rpc_server_spawn_port_conflict() {
"RPC server 2 conflict test",
Buffer::new(mempool.clone(), 1),
Buffer::new(state.clone(), 1),
Buffer::new(chain_verifier.clone(), 1),
Buffer::new(router_verifier.clone(), 1),
MockSyncStatus::default(),
MockAddressBookPeers::default(),
NoChainTip,
@ -257,7 +257,7 @@ fn rpc_server_spawn_port_conflict() {
mempool.expect_no_requests().await;
state.expect_no_requests().await;
chain_verifier.expect_no_requests().await;
router_verifier.expect_no_requests().await;
// Because there is a panic inside a multi-threaded executor,
// we can't depend on the exact behaviour of the other tasks,
@ -325,7 +325,7 @@ fn rpc_server_spawn_port_conflict_parallel_auto() {
let test_task_handle = rt.spawn(async {
let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests();
let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests();
let mut chain_verifier: MockService<_, _, _, BoxError> =
let mut router_verifier: MockService<_, _, _, BoxError> =
MockService::build().for_unit_tests();
info!("spawning parallel RPC server 1...");
@ -337,7 +337,7 @@ fn rpc_server_spawn_port_conflict_parallel_auto() {
"RPC server 1 test",
Buffer::new(mempool.clone(), 1),
Buffer::new(state.clone(), 1),
Buffer::new(chain_verifier.clone(), 1),
Buffer::new(router_verifier.clone(), 1),
MockSyncStatus::default(),
MockAddressBookPeers::default(),
NoChainTip,
@ -354,7 +354,7 @@ fn rpc_server_spawn_port_conflict_parallel_auto() {
"RPC server 2 conflict test",
Buffer::new(mempool.clone(), 1),
Buffer::new(state.clone(), 1),
Buffer::new(chain_verifier.clone(), 1),
Buffer::new(router_verifier.clone(), 1),
MockSyncStatus::default(),
MockAddressBookPeers::default(),
NoChainTip,
@ -365,7 +365,7 @@ fn rpc_server_spawn_port_conflict_parallel_auto() {
mempool.expect_no_requests().await;
state.expect_no_requests().await;
chain_verifier.expect_no_requests().await;
router_verifier.expect_no_requests().await;
// Because there might be a panic inside a multi-threaded executor,
// we can't depend on the exact behaviour of the other tasks,

View File

@ -11,19 +11,19 @@ use zebra_chain::{
};
use crate::{
request::ContextuallyValidBlock, service::chain_tip::ChainTipBlock, FinalizedBlock,
PreparedBlock,
request::ContextuallyVerifiedBlock, service::chain_tip::ChainTipBlock, CheckpointVerifiedBlock,
SemanticallyVerifiedBlock,
};
/// Mocks computation done during semantic validation
pub trait Prepare {
/// Runs block semantic validation computation, and returns the result.
/// Test-only method.
fn prepare(self) -> PreparedBlock;
fn prepare(self) -> SemanticallyVerifiedBlock;
}
impl Prepare for Arc<Block> {
fn prepare(self) -> PreparedBlock {
fn prepare(self) -> SemanticallyVerifiedBlock {
let block = self;
let hash = block.hash();
let height = block.coinbase_height().unwrap();
@ -31,7 +31,7 @@ impl Prepare for Arc<Block> {
let new_outputs =
transparent::new_ordered_outputs_with_height(&block, height, &transaction_hashes);
PreparedBlock {
SemanticallyVerifiedBlock {
block,
hash,
height,
@ -50,9 +50,9 @@ where
}
}
impl From<PreparedBlock> for ChainTipBlock {
fn from(prepared: PreparedBlock) -> Self {
let PreparedBlock {
impl From<SemanticallyVerifiedBlock> for ChainTipBlock {
fn from(prepared: SemanticallyVerifiedBlock) -> Self {
let SemanticallyVerifiedBlock {
block,
hash,
height,
@ -71,17 +71,17 @@ impl From<PreparedBlock> for ChainTipBlock {
}
}
impl PreparedBlock {
/// Returns a [`ContextuallyValidBlock`] created from this block,
impl SemanticallyVerifiedBlock {
/// Returns a [`ContextuallyVerifiedBlock`] created from this block,
/// with fake zero-valued spent UTXOs.
///
/// Only for use in tests.
#[cfg(test)]
pub fn test_with_zero_spent_utxos(&self) -> ContextuallyValidBlock {
ContextuallyValidBlock::test_with_zero_spent_utxos(self)
pub fn test_with_zero_spent_utxos(&self) -> ContextuallyVerifiedBlock {
ContextuallyVerifiedBlock::test_with_zero_spent_utxos(self)
}
/// Returns a [`ContextuallyValidBlock`] created from this block,
/// Returns a [`ContextuallyVerifiedBlock`] created from this block,
/// using a fake chain value pool change.
///
/// Only for use in tests.
@ -89,26 +89,26 @@ impl PreparedBlock {
pub fn test_with_chain_pool_change(
&self,
fake_chain_value_pool_change: ValueBalance<NegativeAllowed>,
) -> ContextuallyValidBlock {
ContextuallyValidBlock::test_with_chain_pool_change(self, fake_chain_value_pool_change)
) -> ContextuallyVerifiedBlock {
ContextuallyVerifiedBlock::test_with_chain_pool_change(self, fake_chain_value_pool_change)
}
/// Returns a [`ContextuallyValidBlock`] created from this block,
/// Returns a [`ContextuallyVerifiedBlock`] created from this block,
/// with no chain value pool change.
///
/// Only for use in tests.
#[cfg(test)]
pub fn test_with_zero_chain_pool_change(&self) -> ContextuallyValidBlock {
ContextuallyValidBlock::test_with_zero_chain_pool_change(self)
pub fn test_with_zero_chain_pool_change(&self) -> ContextuallyVerifiedBlock {
ContextuallyVerifiedBlock::test_with_zero_chain_pool_change(self)
}
}
impl ContextuallyValidBlock {
impl ContextuallyVerifiedBlock {
/// Create a block that's ready for non-finalized `Chain` contextual
/// validation, using a [`PreparedBlock`] and fake zero-valued spent UTXOs.
/// validation, using a [`SemanticallyVerifiedBlock`] and fake zero-valued spent UTXOs.
///
/// Only for use in tests.
pub fn test_with_zero_spent_utxos(block: impl Into<PreparedBlock>) -> Self {
pub fn test_with_zero_spent_utxos(block: impl Into<SemanticallyVerifiedBlock>) -> Self {
let block = block.into();
let zero_output = transparent::Output {
@ -128,19 +128,19 @@ impl ContextuallyValidBlock {
.map(|outpoint| (outpoint, zero_utxo.clone()))
.collect();
ContextuallyValidBlock::with_block_and_spent_utxos(block, zero_spent_utxos)
ContextuallyVerifiedBlock::with_block_and_spent_utxos(block, zero_spent_utxos)
.expect("all UTXOs are provided with zero values")
}
/// Create a [`ContextuallyValidBlock`] from a [`Block`] or [`PreparedBlock`],
/// Create a [`ContextuallyVerifiedBlock`] from a [`Block`] or [`SemanticallyVerifiedBlock`],
/// using a fake chain value pool change.
///
/// Only for use in tests.
pub fn test_with_chain_pool_change(
block: impl Into<PreparedBlock>,
block: impl Into<SemanticallyVerifiedBlock>,
fake_chain_value_pool_change: ValueBalance<NegativeAllowed>,
) -> Self {
let PreparedBlock {
let SemanticallyVerifiedBlock {
block,
hash,
height,
@ -162,20 +162,20 @@ impl ContextuallyValidBlock {
}
}
/// Create a [`ContextuallyValidBlock`] from a [`Block`] or [`PreparedBlock`],
/// Create a [`ContextuallyVerifiedBlock`] from a [`Block`] or [`SemanticallyVerifiedBlock`],
/// with no chain value pool change.
///
/// Only for use in tests.
pub fn test_with_zero_chain_pool_change(block: impl Into<PreparedBlock>) -> Self {
pub fn test_with_zero_chain_pool_change(block: impl Into<SemanticallyVerifiedBlock>) -> Self {
Self::test_with_chain_pool_change(block, ValueBalance::zero())
}
}
impl FinalizedBlock {
impl CheckpointVerifiedBlock {
/// Create a block that's ready to be committed to the finalized state,
/// using a precalculated [`block::Hash`] and [`block::Height`].
///
/// This is a test-only method, prefer [`FinalizedBlock::with_hash`].
/// This is a test-only method, prefer [`CheckpointVerifiedBlock::with_hash`].
#[cfg(any(test, feature = "proptest-impl"))]
pub fn with_hash_and_height(
block: Arc<Block>,

View File

@ -24,8 +24,8 @@ pub struct CloneError {
source: Arc<dyn std::error::Error + Send + Sync + 'static>,
}
impl From<CommitBlockError> for CloneError {
fn from(source: CommitBlockError) -> Self {
impl From<CommitSemanticallyVerifiedError> for CloneError {
fn from(source: CommitSemanticallyVerifiedError) -> Self {
let source = Arc::new(source);
Self { source }
}
@ -41,10 +41,10 @@ impl From<BoxError> for CloneError {
/// A boxed [`std::error::Error`].
pub type BoxError = Box<dyn std::error::Error + Send + Sync + 'static>;
/// An error describing the reason a block could not be committed to the state.
/// An error describing the reason a semantically verified block could not be committed to the state.
#[derive(Debug, Error, PartialEq, Eq)]
#[error("block is not contextually valid: {}", .0)]
pub struct CommitBlockError(#[from] ValidateContextError);
pub struct CommitSemanticallyVerifiedError(#[from] ValidateContextError);
/// An error describing why a block failed contextual validation.
#[derive(Debug, Error, Clone, PartialEq, Eq)]

View File

@ -32,9 +32,12 @@ mod tests;
pub use config::{check_and_delete_old_databases, Config};
pub use constants::MAX_BLOCK_REORG_HEIGHT;
pub use error::{
BoxError, CloneError, CommitBlockError, DuplicateNullifierError, ValidateContextError,
BoxError, CloneError, CommitSemanticallyVerifiedError, DuplicateNullifierError,
ValidateContextError,
};
pub use request::{
CheckpointVerifiedBlock, HashOrHeight, ReadRequest, Request, SemanticallyVerifiedBlock,
};
pub use request::{FinalizedBlock, HashOrHeight, PreparedBlock, ReadRequest, Request};
pub use response::{KnownBlock, MinedTx, ReadResponse, Response};
pub use service::{
chain_tip::{ChainTipChange, LatestChainTip, TipAction},
@ -54,4 +57,4 @@ pub use service::{
init_test, init_test_services, ReadStateService,
};
pub(crate) use request::ContextuallyValidBlock;
pub(crate) use request::ContextuallyVerifiedBlock;

View File

@ -137,7 +137,7 @@ impl std::str::FromStr for HashOrHeight {
/// the *service caller*'s task, not inside the service call itself. This allows
/// moving work out of the single-threaded state service.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct PreparedBlock {
pub struct SemanticallyVerifiedBlock {
/// The block to commit to the state.
pub block: Arc<Block>,
/// The hash of the block.
@ -165,12 +165,16 @@ pub struct PreparedBlock {
// Some fields are pub(crate), so we can add whatever db-format-dependent
// precomputation we want here without leaking internal details.
/// A contextually validated block, ready to be committed directly to the finalized state with
/// no checks, if it becomes the root of the best non-finalized chain.
/// A contextually verified block, ready to be committed directly to the finalized state with no
/// checks, if it becomes the root of the best non-finalized chain.
///
/// Used by the state service and non-finalized `Chain`.
///
/// Note: The difference between a `CheckpointVerifiedBlock` and a `ContextuallyVerifiedBlock` is
/// that the `CheckpointVerifier` doesn't bind the transaction authorizing data to the
/// `ChainHistoryBlockTxAuthCommitmentHash`, but the `NonFinalizedState` and `FinalizedState` do.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ContextuallyValidBlock {
pub struct ContextuallyVerifiedBlock {
/// The block to commit to the state.
pub(crate) block: Arc<Block>,
@ -207,12 +211,16 @@ pub struct ContextuallyValidBlock {
pub(crate) chain_value_pool_change: ValueBalance<NegativeAllowed>,
}
/// A finalized block, ready to be committed directly to the finalized state with
/// A block ready to be committed directly to the finalized state with
/// no checks.
///
/// This is exposed for use in checkpointing.
///
/// Note: The difference between a `CheckpointVerifiedBlock` and a `ContextuallyVerifiedBlock` is
/// that the `CheckpointVerifier` doesn't bind the transaction authorizing data to the
/// `ChainHistoryBlockTxAuthCommitmentHash`, but the `NonFinalizedState` and `FinalizedState` do.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct FinalizedBlock {
pub struct CheckpointVerifiedBlock {
/// The block to commit to the state.
pub block: Arc<Block>,
/// The hash of the block.
@ -266,42 +274,42 @@ impl Treestate {
/// when committing a block. The associated treestate is passed so that the
/// finalized state does not have to retrieve the previous treestate from the
/// database and recompute the new one.
pub struct FinalizedWithTrees {
pub struct ContextuallyVerifiedBlockWithTrees {
/// A block ready to be committed.
pub finalized: FinalizedBlock,
pub checkpoint_verified: CheckpointVerifiedBlock,
/// The tresstate associated with the block.
pub treestate: Option<Treestate>,
}
impl FinalizedWithTrees {
pub fn new(block: ContextuallyValidBlock, treestate: Treestate) -> Self {
let finalized = FinalizedBlock::from(block);
impl ContextuallyVerifiedBlockWithTrees {
pub fn new(block: ContextuallyVerifiedBlock, treestate: Treestate) -> Self {
let checkpoint_verified = CheckpointVerifiedBlock::from(block);
Self {
finalized,
checkpoint_verified,
treestate: Some(treestate),
}
}
}
impl From<Arc<Block>> for FinalizedWithTrees {
impl From<Arc<Block>> for ContextuallyVerifiedBlockWithTrees {
fn from(block: Arc<Block>) -> Self {
Self::from(FinalizedBlock::from(block))
Self::from(CheckpointVerifiedBlock::from(block))
}
}
impl From<FinalizedBlock> for FinalizedWithTrees {
fn from(block: FinalizedBlock) -> Self {
impl From<CheckpointVerifiedBlock> for ContextuallyVerifiedBlockWithTrees {
fn from(block: CheckpointVerifiedBlock) -> Self {
Self {
finalized: block,
checkpoint_verified: block,
treestate: None,
}
}
}
impl From<&PreparedBlock> for PreparedBlock {
fn from(prepared: &PreparedBlock) -> Self {
prepared.clone()
impl From<&SemanticallyVerifiedBlock> for SemanticallyVerifiedBlock {
fn from(semantically_verified: &SemanticallyVerifiedBlock) -> Self {
semantically_verified.clone()
}
}
@ -309,27 +317,27 @@ impl From<&PreparedBlock> for PreparedBlock {
// the *service caller*'s task, not inside the service call itself.
// This allows moving work out of the single-threaded state service.
impl ContextuallyValidBlock {
impl ContextuallyVerifiedBlock {
/// Create a block that's ready for non-finalized `Chain` contextual validation,
/// using a [`PreparedBlock`] and the UTXOs it spends.
/// using a [`SemanticallyVerifiedBlock`] and the UTXOs it spends.
///
/// When combined, `prepared.new_outputs` and `spent_utxos` must contain
/// When combined, `semantically_verified.new_outputs` and `spent_utxos` must contain
/// the [`Utxo`](transparent::Utxo)s spent by every transparent input in this block,
/// including UTXOs created by earlier transactions in this block.
///
/// Note: a [`ContextuallyValidBlock`] isn't actually contextually valid until
/// Note: a [`ContextuallyVerifiedBlock`] isn't actually contextually valid until
/// [`Chain::push()`](crate::service::non_finalized_state::Chain::push) returns success.
pub fn with_block_and_spent_utxos(
prepared: PreparedBlock,
semantically_verified: SemanticallyVerifiedBlock,
mut spent_outputs: HashMap<transparent::OutPoint, transparent::OrderedUtxo>,
) -> Result<Self, ValueBalanceError> {
let PreparedBlock {
let SemanticallyVerifiedBlock {
block,
hash,
height,
new_outputs,
transaction_hashes,
} = prepared;
} = semantically_verified;
// This is redundant for the non-finalized state,
// but useful to make some tests pass more easily.
@ -350,12 +358,12 @@ impl ContextuallyValidBlock {
}
}
impl FinalizedBlock {
impl CheckpointVerifiedBlock {
/// Create a block that's ready to be committed to the finalized state,
/// using a precalculated [`block::Hash`].
///
/// Note: a [`FinalizedBlock`] isn't actually finalized
/// until [`Request::CommitFinalizedBlock`] returns success.
/// Note: a [`CheckpointVerifiedBlock`] isn't actually finalized
/// until [`Request::CommitCheckpointVerifiedBlock`] returns success.
pub fn with_hash(block: Arc<Block>, hash: block::Hash) -> Self {
let height = block
.coinbase_height()
@ -373,17 +381,17 @@ impl FinalizedBlock {
}
}
impl From<Arc<Block>> for FinalizedBlock {
impl From<Arc<Block>> for CheckpointVerifiedBlock {
fn from(block: Arc<Block>) -> Self {
let hash = block.hash();
FinalizedBlock::with_hash(block, hash)
CheckpointVerifiedBlock::with_hash(block, hash)
}
}
impl From<ContextuallyValidBlock> for FinalizedBlock {
fn from(contextually_valid: ContextuallyValidBlock) -> Self {
let ContextuallyValidBlock {
impl From<ContextuallyVerifiedBlock> for CheckpointVerifiedBlock {
fn from(contextually_valid: ContextuallyVerifiedBlock) -> Self {
let ContextuallyVerifiedBlock {
block,
hash,
height,
@ -428,7 +436,7 @@ pub enum Request {
/// Block commit requests should be wrapped in a timeout, so that
/// out-of-order and invalid requests do not hang indefinitely. See the [`crate`]
/// documentation for details.
CommitBlock(PreparedBlock),
CommitSemanticallyVerifiedBlock(SemanticallyVerifiedBlock),
/// Commit a checkpointed block to the state, skipping most block validation.
///
@ -474,7 +482,7 @@ pub enum Request {
/// Block commit requests should be wrapped in a timeout, so that
/// out-of-order and invalid requests do not hang indefinitely. See the [`crate`]
/// documentation for details.
CommitFinalizedBlock(FinalizedBlock),
CommitCheckpointVerifiedBlock(CheckpointVerifiedBlock),
/// Computes the depth in the current best chain of the block identified by the given hash.
///
@ -619,14 +627,15 @@ pub enum Request {
///
/// Returns [`Response::ValidBlockProposal`] when successful.
/// See `[ReadRequest::CheckBlockProposalValidity]` for details.
CheckBlockProposalValidity(PreparedBlock),
CheckBlockProposalValidity(SemanticallyVerifiedBlock),
}
impl Request {
fn variant_name(&self) -> &'static str {
match self {
Request::CommitBlock(_) => "commit_block",
Request::CommitFinalizedBlock(_) => "commit_finalized_block",
Request::CommitSemanticallyVerifiedBlock(_) => "commit_semantically_verified_block",
Request::CommitCheckpointVerifiedBlock(_) => "commit_checkpoint_verified_block",
Request::AwaitUtxo(_) => "await_utxo",
Request::Depth(_) => "depth",
Request::Tip => "tip",
@ -870,7 +879,7 @@ pub enum ReadRequest {
///
/// Returns [`ReadResponse::ValidBlockProposal`] when successful, or an error if
/// the block fails contextual validation.
CheckBlockProposalValidity(PreparedBlock),
CheckBlockProposalValidity(SemanticallyVerifiedBlock),
}
impl ReadRequest {
@ -947,9 +956,8 @@ impl TryFrom<Request> for ReadRequest {
Ok(ReadRequest::CheckBestChainTipNullifiersAndAnchors(tx))
}
Request::CommitBlock(_) | Request::CommitFinalizedBlock(_) => {
Err("ReadService does not write blocks")
}
Request::CommitSemanticallyVerifiedBlock(_)
| Request::CommitCheckpointVerifiedBlock(_) => Err("ReadService does not write blocks"),
Request::AwaitUtxo(_) => Err("ReadService does not track pending UTXOs. \
Manually convert the request to ReadRequest::AnyChainUtxo, \
@ -958,9 +966,9 @@ impl TryFrom<Request> for ReadRequest {
Request::KnownBlock(_) => Err("ReadService does not track queued blocks"),
#[cfg(feature = "getblocktemplate-rpcs")]
Request::CheckBlockProposalValidity(prepared) => {
Ok(ReadRequest::CheckBlockProposalValidity(prepared))
}
Request::CheckBlockProposalValidity(semantically_verified) => Ok(
ReadRequest::CheckBlockProposalValidity(semantically_verified),
),
}
}
}

View File

@ -24,7 +24,7 @@ use crate::{service::read::AddressUtxos, TransactionLocation};
#[derive(Clone, Debug, PartialEq, Eq)]
/// A response to a [`StateService`](crate::service::StateService) [`Request`].
pub enum Response {
/// Response to [`Request::CommitBlock`] indicating that a block was
/// Response to [`Request::CommitSemanticallyVerifiedBlock`] indicating that a block was
/// successfully committed to the state.
Committed(block::Hash),

View File

@ -61,8 +61,8 @@ use crate::{
queued_blocks::QueuedBlocks,
watch_receiver::WatchReceiver,
},
BoxError, CloneError, Config, FinalizedBlock, PreparedBlock, ReadRequest, ReadResponse,
Request, Response,
BoxError, CheckpointVerifiedBlock, CloneError, Config, ReadRequest, ReadResponse, Request,
Response, SemanticallyVerifiedBlock,
};
pub mod block_iter;
@ -345,7 +345,7 @@ impl StateService {
let initial_tip = finalized_state
.db
.tip_block()
.map(FinalizedBlock::from)
.map(CheckpointVerifiedBlock::from)
.map(ChainTipBlock::from);
timer.finish(module_path!(), line!(), "fetching database tip");
@ -459,25 +459,25 @@ impl StateService {
/// Returns a channel receiver that provides the result of the block commit.
fn queue_and_commit_finalized(
&mut self,
finalized: FinalizedBlock,
checkpoint_verified: CheckpointVerifiedBlock,
) -> oneshot::Receiver<Result<block::Hash, BoxError>> {
// # Correctness & Performance
//
// This method must not block, access the database, or perform CPU-intensive tasks,
// because it is called directly from the tokio executor's Future threads.
let queued_prev_hash = finalized.block.header.previous_block_hash;
let queued_height = finalized.height;
let queued_prev_hash = checkpoint_verified.block.header.previous_block_hash;
let queued_height = checkpoint_verified.height;
// If we're close to the final checkpoint, make the block's UTXOs available for
// full verification of non-finalized blocks, even when it is in the channel.
if self.is_close_to_final_checkpoint(queued_height) {
self.sent_non_finalized_block_hashes
.add_finalized(&finalized)
.add_finalized(&checkpoint_verified)
}
let (rsp_tx, rsp_rx) = oneshot::channel();
let queued = (finalized, rsp_tx);
let queued = (checkpoint_verified, rsp_tx);
if self.finalized_block_write_sender.is_some() {
// We're still committing finalized blocks
@ -636,17 +636,17 @@ impl StateService {
/// in RFC0005.
///
/// [1]: https://zebra.zfnd.org/dev/rfcs/0005-state-updates.html#committing-non-finalized-blocks
#[instrument(level = "debug", skip(self, prepared))]
#[instrument(level = "debug", skip(self, semantically_verrified))]
fn queue_and_commit_non_finalized(
&mut self,
prepared: PreparedBlock,
semantically_verrified: SemanticallyVerifiedBlock,
) -> oneshot::Receiver<Result<block::Hash, BoxError>> {
tracing::debug!(block = %prepared.block, "queueing block for contextual verification");
let parent_hash = prepared.block.header.previous_block_hash;
tracing::debug!(block = %semantically_verrified.block, "queueing block for contextual verification");
let parent_hash = semantically_verrified.block.header.previous_block_hash;
if self
.sent_non_finalized_block_hashes
.contains(&prepared.hash)
.contains(&semantically_verrified.hash)
{
let (rsp_tx, rsp_rx) = oneshot::channel();
let _ = rsp_tx.send(Err(
@ -655,7 +655,11 @@ impl StateService {
return rsp_rx;
}
if self.read_service.db.contains_height(prepared.height) {
if self
.read_service
.db
.contains_height(semantically_verrified.height)
{
let (rsp_tx, rsp_rx) = oneshot::channel();
let _ = rsp_tx.send(Err(
"block height is in the finalized state: block is already committed to the state"
@ -664,11 +668,12 @@ impl StateService {
return rsp_rx;
}
// Request::CommitBlock contract: a request to commit a block which has
// been queued but not yet committed to the state fails the older
// request and replaces it with the newer request.
let rsp_rx = if let Some((_, old_rsp_tx)) =
self.queued_non_finalized_blocks.get_mut(&prepared.hash)
// [`Request::CommitSemanticallyVerifiedBlock`] contract: a request to commit a block which
// has been queued but not yet committed to the state fails the older request and replaces
// it with the newer request.
let rsp_rx = if let Some((_, old_rsp_tx)) = self
.queued_non_finalized_blocks
.get_mut(&semantically_verrified.hash)
{
tracing::debug!("replacing older queued request with new request");
let (mut rsp_tx, rsp_rx) = oneshot::channel();
@ -677,7 +682,8 @@ impl StateService {
rsp_rx
} else {
let (rsp_tx, rsp_rx) = oneshot::channel();
self.queued_non_finalized_blocks.queue((prepared, rsp_tx));
self.queued_non_finalized_blocks
.queue((semantically_verrified, rsp_tx));
rsp_rx
};
@ -763,7 +769,7 @@ impl StateService {
.dequeue_children(parent_hash);
for queued_child in queued_children {
let (PreparedBlock { hash, .. }, _) = queued_child;
let (SemanticallyVerifiedBlock { hash, .. }, _) = queued_child;
self.sent_non_finalized_block_hashes.add(&queued_child.0);
let send_result = non_finalized_block_write_sender.send(queued_child);
@ -798,9 +804,9 @@ impl StateService {
)
}
/// Assert some assumptions about the prepared `block` before it is queued.
fn assert_block_can_be_validated(&self, block: &PreparedBlock) {
// required by CommitBlock call
/// Assert some assumptions about the semantically verified `block` before it is queued.
fn assert_block_can_be_validated(&self, block: &SemanticallyVerifiedBlock) {
// required by `Request::CommitSemanticallyVerifiedBlock` call
assert!(
block.height > self.network.mandatory_checkpoint_height(),
"invalid non-finalized block height: the canopy checkpoint is mandatory, pre-canopy \
@ -901,11 +907,11 @@ impl Service<Request> for StateService {
match req {
// Uses queued_non_finalized_blocks and pending_utxos in the StateService
// Accesses shared writeable state in the StateService, NonFinalizedState, and ZebraDb.
Request::CommitBlock(prepared) => {
self.assert_block_can_be_validated(&prepared);
Request::CommitSemanticallyVerifiedBlock(semantically_verified) => {
self.assert_block_can_be_validated(&semantically_verified);
self.pending_utxos
.check_against_ordered(&prepared.new_outputs);
.check_against_ordered(&semantically_verified.new_outputs);
// # Performance
//
@ -919,7 +925,7 @@ impl Service<Request> for StateService {
// https://docs.rs/tokio/latest/tokio/task/fn.block_in_place.html
let rsp_rx = tokio::task::block_in_place(move || {
span.in_scope(|| self.queue_and_commit_non_finalized(prepared))
span.in_scope(|| self.queue_and_commit_non_finalized(semantically_verified))
});
// TODO:
@ -927,14 +933,16 @@ impl Service<Request> for StateService {
// as well as in poll_ready()
// The work is all done, the future just waits on a channel for the result
timer.finish(module_path!(), line!(), "CommitBlock");
timer.finish(module_path!(), line!(), "CommitSemanticallyVerifiedBlock");
let span = Span::current();
async move {
rsp_rx
.await
.map_err(|_recv_error| {
BoxError::from("block was dropped from the state CommitBlock queue")
BoxError::from(
"block was dropped from the queue of non-finalized blocks",
)
})
// TODO: replace with Result::flatten once it stabilises
// https://github.com/rust-lang/rust/issues/70142
@ -948,7 +956,7 @@ impl Service<Request> for StateService {
// Uses queued_finalized_blocks and pending_utxos in the StateService.
// Accesses shared writeable state in the StateService.
Request::CommitFinalizedBlock(finalized) => {
Request::CommitCheckpointVerifiedBlock(finalized) => {
// # Consensus
//
// A non-finalized block verification could have called AwaitUtxo
@ -970,15 +978,13 @@ impl Service<Request> for StateService {
// as well as in poll_ready()
// The work is all done, the future just waits on a channel for the result
timer.finish(module_path!(), line!(), "CommitFinalizedBlock");
timer.finish(module_path!(), line!(), "CommitCheckpointVerifiedBlock");
async move {
rsp_rx
.await
.map_err(|_recv_error| {
BoxError::from(
"block was dropped from the state CommitFinalizedBlock queue",
)
BoxError::from("block was dropped from the queue of finalized blocks")
})
// TODO: replace with Result::flatten once it stabilises
// https://github.com/rust-lang/rust/issues/70142
@ -1753,7 +1759,7 @@ impl Service<ReadRequest> for ReadStateService {
}
#[cfg(feature = "getblocktemplate-rpcs")]
ReadRequest::CheckBlockProposalValidity(prepared) => {
ReadRequest::CheckBlockProposalValidity(semantically_verified) => {
let state = self.clone();
// # Performance
@ -1770,7 +1776,7 @@ impl Service<ReadRequest> for ReadStateService {
return Err("state is empty: wait for Zebra to sync before submitting a proposal".into());
};
if prepared.block.header.previous_block_hash != best_tip_hash {
if semantically_verified.block.header.previous_block_hash != best_tip_hash {
return Err("proposal is not based on the current best chain tip: previous block hash must be the best chain tip".into());
}
@ -1778,13 +1784,13 @@ impl Service<ReadRequest> for ReadStateService {
// The non-finalized state that's used in the rest of the state (including finalizing
// blocks into the db) is not mutated here.
//
// TODO: Convert `CommitBlockError` to a new `ValidateProposalError`?
// TODO: Convert `CommitSemanticallyVerifiedError` to a new `ValidateProposalError`?
latest_non_finalized_state.disable_metrics();
write::validate_and_commit_non_finalized(
&state.db,
&mut latest_non_finalized_state,
prepared,
semantically_verified,
)?;
// The work is done in the future.

View File

@ -23,7 +23,7 @@ use zebra_chain::{
use crate::{
arbitrary::Prepare,
service::{check, ReadStateService, StateService},
BoxError, ChainTipChange, Config, LatestChainTip, PreparedBlock, Request, Response,
BoxError, ChainTipChange, Config, LatestChainTip, Request, Response, SemanticallyVerifiedBlock,
};
pub use zebra_chain::block::arbitrary::MAX_PARTIAL_CHAIN_BLOCKS;
@ -33,7 +33,7 @@ pub const CHAIN_TIP_UPDATE_WAIT_LIMIT: Duration = Duration::from_secs(2);
#[derive(Debug)]
pub struct PreparedChainTree {
chain: Arc<SummaryDebug<Vec<PreparedBlock>>>,
chain: Arc<SummaryDebug<Vec<SemanticallyVerifiedBlock>>>,
count: BinarySearch,
network: Network,
history_tree: Arc<HistoryTree>,
@ -41,7 +41,7 @@ pub struct PreparedChainTree {
impl ValueTree for PreparedChainTree {
type Value = (
Arc<SummaryDebug<Vec<PreparedBlock>>>,
Arc<SummaryDebug<Vec<SemanticallyVerifiedBlock>>>,
<BinarySearch as ValueTree>::Value,
Network,
Arc<HistoryTree>,
@ -71,7 +71,7 @@ pub struct PreparedChain {
chain: std::sync::Mutex<
Option<(
Network,
Arc<SummaryDebug<Vec<PreparedBlock>>>,
Arc<SummaryDebug<Vec<SemanticallyVerifiedBlock>>>,
Arc<HistoryTree>,
)>,
>,
@ -199,7 +199,7 @@ pub async fn populated_state(
) {
let requests = blocks
.into_iter()
.map(|block| Request::CommitFinalizedBlock(block.into()));
.map(|block| Request::CommitCheckpointVerifiedBlock(block.into()));
// TODO: write a test that checks the finalized to non-finalized transition with UTXOs,
// and set max_checkpoint_height and checkpoint_verify_concurrency_limit correctly.

View File

@ -20,7 +20,8 @@ use zebra_chain::{
};
use crate::{
request::ContextuallyValidBlock, service::watch_receiver::WatchReceiver, FinalizedBlock,
request::ContextuallyVerifiedBlock, service::watch_receiver::WatchReceiver,
CheckpointVerifiedBlock,
};
use TipAction::*;
@ -85,9 +86,9 @@ impl fmt::Display for ChainTipBlock {
}
}
impl From<ContextuallyValidBlock> for ChainTipBlock {
fn from(contextually_valid: ContextuallyValidBlock) -> Self {
let ContextuallyValidBlock {
impl From<ContextuallyVerifiedBlock> for ChainTipBlock {
fn from(contextually_valid: ContextuallyVerifiedBlock) -> Self {
let ContextuallyVerifiedBlock {
block,
hash,
height,
@ -106,9 +107,9 @@ impl From<ContextuallyValidBlock> for ChainTipBlock {
}
}
impl From<FinalizedBlock> for ChainTipBlock {
fn from(finalized: FinalizedBlock) -> Self {
let FinalizedBlock {
impl From<CheckpointVerifiedBlock> for ChainTipBlock {
fn from(finalized: CheckpointVerifiedBlock) -> Self {
let CheckpointVerifiedBlock {
block,
hash,
height,

View File

@ -16,7 +16,7 @@ use crate::{
block_iter::any_ancestor_blocks, check::difficulty::POW_ADJUSTMENT_BLOCK_SPAN,
finalized_state::ZebraDb, non_finalized_state::NonFinalizedState,
},
BoxError, PreparedBlock, ValidateContextError,
BoxError, SemanticallyVerifiedBlock, ValidateContextError,
};
// use self as check
@ -52,7 +52,7 @@ pub(crate) use difficulty::AdjustedDifficulty;
/// If the state contains less than 28 ([`POW_ADJUSTMENT_BLOCK_SPAN`]) blocks.
#[tracing::instrument(skip(prepared, finalized_tip_height, relevant_chain))]
pub(crate) fn block_is_valid_for_recent_chain<C>(
prepared: &PreparedBlock,
prepared: &SemanticallyVerifiedBlock,
network: Network,
finalized_tip_height: Option<block::Height>,
relevant_chain: C,
@ -369,7 +369,7 @@ where
pub(crate) fn initial_contextual_validity(
finalized_state: &ZebraDb,
non_finalized_state: &NonFinalizedState,
prepared: &PreparedBlock,
prepared: &SemanticallyVerifiedBlock,
) -> Result<(), ValidateContextError> {
let relevant_chain = any_ancestor_blocks(
non_finalized_state,

View File

@ -13,7 +13,7 @@ use zebra_chain::{
use crate::{
service::{finalized_state::ZebraDb, non_finalized_state::Chain},
PreparedBlock, ValidateContextError,
SemanticallyVerifiedBlock, ValidateContextError,
};
/// Checks the final Sapling and Orchard anchors specified by `transaction`
@ -312,9 +312,9 @@ fn sprout_anchors_refer_to_treestates(
Ok(())
}
/// Accepts a [`ZebraDb`], [`Chain`], and [`PreparedBlock`].
/// Accepts a [`ZebraDb`], [`Chain`], and [`SemanticallyVerifiedBlock`].
///
/// Iterates over the transactions in the [`PreparedBlock`] checking the final Sapling and Orchard anchors.
/// Iterates over the transactions in the [`SemanticallyVerifiedBlock`] checking the final Sapling and Orchard anchors.
///
/// This method checks for anchors computed from the final treestate of each block in
/// the `parent_chain` or `finalized_state`.
@ -322,7 +322,7 @@ fn sprout_anchors_refer_to_treestates(
pub(crate) fn block_sapling_orchard_anchors_refer_to_final_treestates(
finalized_state: &ZebraDb,
parent_chain: &Arc<Chain>,
prepared: &PreparedBlock,
prepared: &SemanticallyVerifiedBlock,
) -> Result<(), ValidateContextError> {
prepared.block.transactions.iter().enumerate().try_for_each(
|(tx_index_in_block, transaction)| {
@ -338,9 +338,9 @@ pub(crate) fn block_sapling_orchard_anchors_refer_to_final_treestates(
)
}
/// Accepts a [`ZebraDb`], [`Arc<Chain>`](Chain), and [`PreparedBlock`].
/// Accepts a [`ZebraDb`], [`Arc<Chain>`](Chain), and [`SemanticallyVerifiedBlock`].
///
/// Iterates over the transactions in the [`PreparedBlock`], and fetches the Sprout final treestates
/// Iterates over the transactions in the [`SemanticallyVerifiedBlock`], and fetches the Sprout final treestates
/// from the state.
///
/// Returns a `HashMap` of the Sprout final treestates from the state for [`sprout_anchors_refer_to_treestates()`]
@ -353,7 +353,7 @@ pub(crate) fn block_sapling_orchard_anchors_refer_to_final_treestates(
pub(crate) fn block_fetch_sprout_final_treestates(
finalized_state: &ZebraDb,
parent_chain: &Arc<Chain>,
prepared: &PreparedBlock,
prepared: &SemanticallyVerifiedBlock,
) -> HashMap<sprout::tree::Root, Arc<sprout::tree::NoteCommitmentTree>> {
let mut sprout_final_treestates = HashMap::new();

View File

@ -8,7 +8,7 @@ use zebra_chain::transaction::Transaction;
use crate::{
error::DuplicateNullifierError,
service::{finalized_state::ZebraDb, non_finalized_state::Chain},
PreparedBlock, ValidateContextError,
SemanticallyVerifiedBlock, ValidateContextError,
};
// Tidy up some doc links
@ -16,7 +16,7 @@ use crate::{
use crate::service;
/// Reject double-spends of nullifers:
/// - one from this [`PreparedBlock`], and the other already committed to the
/// - one from this [`SemanticallyVerifiedBlock`], and the other already committed to the
/// [`FinalizedState`](service::FinalizedState).
///
/// (Duplicate non-finalized nullifiers are rejected during the chain update,
@ -32,7 +32,7 @@ use crate::service;
/// <https://zips.z.cash/protocol/protocol.pdf#nullifierset>
#[tracing::instrument(skip(prepared, finalized_state))]
pub(crate) fn no_duplicates_in_finalized_chain(
prepared: &PreparedBlock,
prepared: &SemanticallyVerifiedBlock,
finalized_state: &ZebraDb,
) -> Result<(), ValidateContextError> {
for nullifier in prepared.block.sprout_nullifiers() {

View File

@ -18,7 +18,7 @@ use crate::{
write::validate_and_commit_non_finalized,
},
tests::setup::{new_state_with_mainnet_genesis, transaction_v4_from_coinbase},
PreparedBlock, ValidateContextError,
SemanticallyVerifiedBlock, ValidateContextError,
};
// Sprout
@ -105,7 +105,10 @@ fn check_sprout_anchors() {
);
}
fn prepare_sprout_block(mut block_to_prepare: Block, reference_block: Block) -> PreparedBlock {
fn prepare_sprout_block(
mut block_to_prepare: Block,
reference_block: Block,
) -> SemanticallyVerifiedBlock {
// Convert the coinbase transaction to a version that the non-finalized state will accept.
block_to_prepare.transactions[0] =
transaction_v4_from_coinbase(&block_to_prepare.transactions[0]).into();

View File

@ -23,7 +23,7 @@ use crate::{
check::nullifier::tx_no_duplicates_in_chain, read, write::validate_and_commit_non_finalized,
},
tests::setup::{new_state_with_mainnet_genesis, transaction_v4_from_coinbase},
FinalizedBlock,
CheckpointVerifiedBlock,
ValidateContextError::{
DuplicateOrchardNullifier, DuplicateSaplingNullifier, DuplicateSproutNullifier,
},
@ -84,7 +84,7 @@ proptest! {
// randomly choose to commit the block to the finalized or non-finalized state
if use_finalized_state {
let block1 = FinalizedBlock::from(Arc::new(block1));
let block1 = CheckpointVerifiedBlock::from(Arc::new(block1));
let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test");
// the block was committed
@ -351,7 +351,7 @@ proptest! {
let block1_hash;
// randomly choose to commit the next block to the finalized or non-finalized state
if duplicate_in_finalized_state {
let block1 = FinalizedBlock::from(Arc::new(block1));
let block1 = CheckpointVerifiedBlock::from(Arc::new(block1));
let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test");
prop_assert_eq!(Some((Height(1), block1.hash)), read::best_tip(&non_finalized_state, &finalized_state.db));
@ -451,7 +451,7 @@ proptest! {
// randomly choose to commit the block to the finalized or non-finalized state
if use_finalized_state {
let block1 = FinalizedBlock::from(Arc::new(block1));
let block1 = CheckpointVerifiedBlock::from(Arc::new(block1));
let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test");
prop_assert_eq!(Some((Height(1), block1.hash)), read::best_tip(&non_finalized_state, &finalized_state.db));
@ -633,7 +633,7 @@ proptest! {
let block1_hash;
// randomly choose to commit the next block to the finalized or non-finalized state
if duplicate_in_finalized_state {
let block1 = FinalizedBlock::from(Arc::new(block1));
let block1 = CheckpointVerifiedBlock::from(Arc::new(block1));
let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test");
prop_assert_eq!(Some((Height(1), block1.hash)), read::best_tip(&non_finalized_state, &finalized_state.db));
@ -731,7 +731,7 @@ proptest! {
// randomly choose to commit the block to the finalized or non-finalized state
if use_finalized_state {
let block1 = FinalizedBlock::from(Arc::new(block1));
let block1 = CheckpointVerifiedBlock::from(Arc::new(block1));
let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test");
prop_assert_eq!(Some((Height(1), block1.hash)), read::best_tip(&non_finalized_state, &finalized_state.db));
@ -922,7 +922,7 @@ proptest! {
let block1_hash;
// randomly choose to commit the next block to the finalized or non-finalized state
if duplicate_in_finalized_state {
let block1 = FinalizedBlock::from(Arc::new(block1));
let block1 = CheckpointVerifiedBlock::from(Arc::new(block1));
let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test");
prop_assert_eq!(Some((Height(1), block1.hash)), read::best_tip(&non_finalized_state, &finalized_state.db));

View File

@ -21,7 +21,7 @@ use crate::{
write::validate_and_commit_non_finalized,
},
tests::setup::{new_state_with_mainnet_genesis, transaction_v4_from_coinbase},
FinalizedBlock,
CheckpointVerifiedBlock,
ValidateContextError::{
DuplicateTransparentSpend, EarlyTransparentSpend, ImmatureTransparentCoinbaseSpend,
MissingTransparentOutput, UnshieldedTransparentCoinbaseSpend,
@ -184,7 +184,7 @@ proptest! {
// randomly choose to commit the block to the finalized or non-finalized state
if use_finalized_state {
let block1 = FinalizedBlock::from(Arc::new(block1));
let block1 = CheckpointVerifiedBlock::from(Arc::new(block1));
let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test");
// the block was committed
@ -272,7 +272,7 @@ proptest! {
block2.transactions.push(spend_transaction.into());
if use_finalized_state_spend {
let block2 = FinalizedBlock::from(Arc::new(block2));
let block2 = CheckpointVerifiedBlock::from(Arc::new(block2));
let commit_result = finalized_state.commit_finalized_direct(block2.clone().into(), "test");
// the block was committed
@ -611,7 +611,7 @@ proptest! {
let block2 = Arc::new(block2);
if use_finalized_state_spend {
let block2 = FinalizedBlock::from(block2.clone());
let block2 = CheckpointVerifiedBlock::from(block2.clone());
let commit_result = finalized_state.commit_finalized_direct(block2.clone().into(), "test");
// the block was committed
@ -842,7 +842,7 @@ struct TestState {
/// The genesis block that has already been committed to the `state` service's
/// finalized state.
#[allow(dead_code)]
genesis: FinalizedBlock,
genesis: CheckpointVerifiedBlock,
/// A block at height 1, that has already been committed to the `state` service.
block1: Arc<Block>,
@ -883,7 +883,7 @@ fn new_state_with_mainnet_transparent_data(
let block1 = Arc::new(block1);
if use_finalized_state {
let block1 = FinalizedBlock::from(block1.clone());
let block1 = CheckpointVerifiedBlock::from(block1.clone());
let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test");
// the block was committed

View File

@ -10,14 +10,14 @@ use zebra_chain::{
use crate::{
constants::MIN_TRANSPARENT_COINBASE_MATURITY,
service::finalized_state::ZebraDb,
PreparedBlock,
SemanticallyVerifiedBlock,
ValidateContextError::{
self, DuplicateTransparentSpend, EarlyTransparentSpend, ImmatureTransparentCoinbaseSpend,
MissingTransparentOutput, UnshieldedTransparentCoinbaseSpend,
},
};
/// Lookup all the [`transparent::Utxo`]s spent by a [`PreparedBlock`].
/// Lookup all the [`transparent::Utxo`]s spent by a [`SemanticallyVerifiedBlock`].
/// If any of the spends are invalid, return an error.
/// Otherwise, return the looked up UTXOs.
///
@ -36,7 +36,7 @@ use crate::{
/// - spends of an immature transparent coinbase output,
/// - unshielded spends of a transparent coinbase output.
pub fn transparent_spend(
prepared: &PreparedBlock,
prepared: &SemanticallyVerifiedBlock,
non_finalized_chain_unspent_utxos: &HashMap<transparent::OutPoint, transparent::OrderedUtxo>,
non_finalized_chain_spent_utxos: &HashSet<transparent::OutPoint>,
finalized_state: &ZebraDb,
@ -225,7 +225,7 @@ pub fn transparent_coinbase_spend(
///
/// <https://zips.z.cash/protocol/protocol.pdf#transactions>
pub fn remaining_transaction_value(
prepared: &PreparedBlock,
prepared: &SemanticallyVerifiedBlock,
utxos: &HashMap<transparent::OutPoint, transparent::OrderedUtxo>,
) -> Result<(), ValidateContextError> {
for (tx_index_in_block, transaction) in prepared.block.transactions.iter().enumerate() {

View File

@ -23,9 +23,9 @@ use std::{
use zebra_chain::{block, parameters::Network};
use crate::{
request::FinalizedWithTrees,
request::ContextuallyVerifiedBlockWithTrees,
service::{check, QueuedFinalized},
BoxError, CloneError, Config, FinalizedBlock,
BoxError, CheckpointVerifiedBlock, CloneError, Config,
};
mod disk_db;
@ -161,23 +161,25 @@ impl FinalizedState {
self.network
}
/// Commit a finalized block to the state.
/// Commit a checkpoint-verified block to the state.
///
/// It's the caller's responsibility to ensure that blocks are committed in
/// order.
pub fn commit_finalized(
&mut self,
ordered_block: QueuedFinalized,
) -> Result<FinalizedBlock, BoxError> {
let (finalized, rsp_tx) = ordered_block;
let result =
self.commit_finalized_direct(finalized.clone().into(), "CommitFinalized request");
) -> Result<CheckpointVerifiedBlock, BoxError> {
let (checkpoint_verified, rsp_tx) = ordered_block;
let result = self.commit_finalized_direct(
checkpoint_verified.clone().into(),
"commit checkpoint-verified request",
);
if result.is_ok() {
metrics::counter!("state.checkpoint.finalized.block.count", 1);
metrics::gauge!(
"state.checkpoint.finalized.block.height",
finalized.height.0 as f64,
checkpoint_verified.height.0 as f64,
);
// This height gauge is updated for both fully verified and checkpoint blocks.
@ -185,14 +187,14 @@ impl FinalizedState {
// are committed in order.
metrics::gauge!(
"zcash.chain.verified.block.height",
finalized.height.0 as f64,
checkpoint_verified.height.0 as f64,
);
metrics::counter!("zcash.chain.verified.block.total", 1);
} else {
metrics::counter!("state.checkpoint.error.block.count", 1);
metrics::gauge!(
"state.checkpoint.error.block.height",
finalized.height.0 as f64,
checkpoint_verified.height.0 as f64,
);
};
@ -202,7 +204,9 @@ impl FinalizedState {
let _ = rsp_tx.send(result.clone().map_err(BoxError::from));
result.map(|_hash| finalized).map_err(BoxError::from)
result
.map(|_hash| checkpoint_verified)
.map_err(BoxError::from)
}
/// Immediately commit a `finalized` block to the finalized state.
@ -221,10 +225,10 @@ impl FinalizedState {
#[allow(clippy::unwrap_in_result)]
pub fn commit_finalized_direct(
&mut self,
finalized_with_trees: FinalizedWithTrees,
contextually_verified_with_trees: ContextuallyVerifiedBlockWithTrees,
source: &str,
) -> Result<block::Hash, BoxError> {
let finalized = finalized_with_trees.finalized;
let finalized = contextually_verified_with_trees.checkpoint_verified;
let committed_tip_hash = self.db.finalized_tip_hash();
let committed_tip_height = self.db.finalized_tip_height();
@ -252,7 +256,8 @@ impl FinalizedState {
);
}
let (history_tree, note_commitment_trees) = match finalized_with_trees.treestate {
let (history_tree, note_commitment_trees) = match contextually_verified_with_trees.treestate
{
// If the treestate associated with the block was supplied, use it
// without recomputing it.
Some(ref treestate) => (

View File

@ -9,7 +9,7 @@ use crate::{
config::Config,
service::{
arbitrary::PreparedChain,
finalized_state::{FinalizedBlock, FinalizedState},
finalized_state::{CheckpointVerifiedBlock, FinalizedState},
},
tests::FakeChainHelper,
};
@ -28,9 +28,9 @@ fn blocks_with_v5_transactions() -> Result<()> {
let mut height = Height(0);
// use `count` to minimize test failures, so they are easier to diagnose
for block in chain.iter().take(count) {
let finalized = FinalizedBlock::from(block.block.clone());
let checkpoint_verified = CheckpointVerifiedBlock::from(block.block.clone());
let hash = state.commit_finalized_direct(
finalized.into(),
checkpoint_verified.into(),
"blocks_with_v5_transactions test"
);
prop_assert_eq!(Some(height), state.finalized_tip_height());
@ -83,18 +83,18 @@ fn all_upgrades_and_wrong_commitments_with_fake_activation_heights() -> Result<(
h == nu5_height ||
h == nu5_height_plus1 => {
let block = block.block.clone().set_block_commitment([0x42; 32]);
let finalized = FinalizedBlock::from(block);
let checkpoint_verified = CheckpointVerifiedBlock::from(block);
state.commit_finalized_direct(
finalized.into(),
checkpoint_verified.into(),
"all_upgrades test"
).expect_err("Must fail commitment check");
failure_count += 1;
},
_ => {},
}
let finalized = FinalizedBlock::from(block.block.clone());
let checkpoint_verified = CheckpointVerifiedBlock::from(block.block.clone());
let hash = state.commit_finalized_direct(
finalized.into(),
checkpoint_verified.into(),
"all_upgrades test"
).unwrap();
prop_assert_eq!(Some(height), state.finalized_tip_height());

View File

@ -38,7 +38,7 @@ use crate::{
transparent::{AddressBalanceLocation, OutputLocation},
},
zebra_db::{metrics::block_precommit_metrics, ZebraDb},
FinalizedBlock,
CheckpointVerifiedBlock,
},
BoxError, HashOrHeight,
};
@ -282,7 +282,7 @@ impl ZebraDb {
/// - Propagates any errors from updating history and note commitment trees
pub(in super::super) fn write_block(
&mut self,
finalized: FinalizedBlock,
finalized: CheckpointVerifiedBlock,
history_tree: Arc<HistoryTree>,
note_commitment_trees: NoteCommitmentTrees,
network: Network,
@ -420,7 +420,7 @@ impl DiskWriteBatch {
pub fn prepare_block_batch(
&mut self,
db: &DiskDb,
finalized: FinalizedBlock,
finalized: CheckpointVerifiedBlock,
new_outputs_by_out_loc: BTreeMap<OutputLocation, transparent::Utxo>,
spent_utxos_by_outpoint: HashMap<transparent::OutPoint, transparent::Utxo>,
spent_utxos_by_out_loc: BTreeMap<OutputLocation, transparent::Utxo>,
@ -429,7 +429,7 @@ impl DiskWriteBatch {
note_commitment_trees: NoteCommitmentTrees,
value_pool: ValueBalance<NonNegative>,
) -> Result<(), BoxError> {
let FinalizedBlock {
let CheckpointVerifiedBlock {
block,
hash,
height,
@ -485,7 +485,7 @@ impl DiskWriteBatch {
pub fn prepare_block_header_and_transaction_data_batch(
&mut self,
db: &DiskDb,
finalized: &FinalizedBlock,
finalized: &CheckpointVerifiedBlock,
) -> Result<(), BoxError> {
// Blocks
let block_header_by_height = db.cf_handle("block_header_by_height").unwrap();
@ -497,7 +497,7 @@ impl DiskWriteBatch {
let hash_by_tx_loc = db.cf_handle("hash_by_tx_loc").unwrap();
let tx_loc_by_hash = db.cf_handle("tx_loc_by_hash").unwrap();
let FinalizedBlock {
let CheckpointVerifiedBlock {
block,
hash,
height,
@ -541,8 +541,12 @@ impl DiskWriteBatch {
/// If `finalized.block` is not a genesis block, does nothing.
///
/// This method never returns an error.
pub fn prepare_genesis_batch(&mut self, db: &DiskDb, finalized: &FinalizedBlock) -> bool {
let FinalizedBlock { block, .. } = finalized;
pub fn prepare_genesis_batch(
&mut self,
db: &DiskDb,
finalized: &CheckpointVerifiedBlock,
) -> bool {
let CheckpointVerifiedBlock { block, .. } = finalized;
if block.header.previous_block_hash == GENESIS_PREVIOUS_BLOCK_HASH {
self.prepare_genesis_note_commitment_tree_batch(db, finalized);

View File

@ -27,7 +27,7 @@ use zebra_test::vectors::{MAINNET_BLOCKS, TESTNET_BLOCKS};
use crate::{
service::finalized_state::{disk_db::DiskWriteBatch, FinalizedState},
Config, FinalizedBlock,
CheckpointVerifiedBlock, Config,
};
/// Storage round-trip test for block and transaction data in the finalized state database.
@ -112,7 +112,7 @@ fn test_block_db_round_trip_with(
original_block.clone().into()
} else {
// Fake a zero height
FinalizedBlock::with_hash_and_height(
CheckpointVerifiedBlock::with_hash_and_height(
original_block.clone(),
original_block.hash(),
Height(0),

View File

@ -24,7 +24,7 @@ use crate::{
service::finalized_state::{
disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk},
zebra_db::ZebraDb,
FinalizedBlock,
CheckpointVerifiedBlock,
},
BoxError,
};
@ -70,12 +70,12 @@ impl DiskWriteBatch {
pub fn prepare_history_batch(
&mut self,
db: &DiskDb,
finalized: &FinalizedBlock,
finalized: &CheckpointVerifiedBlock,
history_tree: Arc<HistoryTree>,
) -> Result<(), BoxError> {
let history_tree_cf = db.cf_handle("history_tree").unwrap();
let FinalizedBlock { height, .. } = finalized;
let CheckpointVerifiedBlock { height, .. } = finalized;
// Update the tree in state
let current_tip_height = *height - 1;
@ -108,13 +108,13 @@ impl DiskWriteBatch {
pub fn prepare_chain_value_pools_batch(
&mut self,
db: &DiskDb,
finalized: &FinalizedBlock,
finalized: &CheckpointVerifiedBlock,
utxos_spent_by_block: HashMap<transparent::OutPoint, transparent::Utxo>,
value_pool: ValueBalance<NonNegative>,
) -> Result<(), BoxError> {
let tip_chain_value_pool = db.cf_handle("tip_chain_value_pool").unwrap();
let FinalizedBlock { block, .. } = finalized;
let CheckpointVerifiedBlock { block, .. } = finalized;
let new_pool = value_pool.add_block(block.borrow(), &utxos_spent_by_block)?;
self.zs_insert(&tip_chain_value_pool, (), new_pool);

View File

@ -23,7 +23,7 @@ use crate::{
service::finalized_state::{
disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk},
zebra_db::ZebraDb,
FinalizedBlock,
CheckpointVerifiedBlock,
},
BoxError,
};
@ -179,9 +179,9 @@ impl DiskWriteBatch {
pub fn prepare_shielded_transaction_batch(
&mut self,
db: &DiskDb,
finalized: &FinalizedBlock,
finalized: &CheckpointVerifiedBlock,
) -> Result<(), BoxError> {
let FinalizedBlock { block, .. } = finalized;
let CheckpointVerifiedBlock { block, .. } = finalized;
// Index each transaction's shielded data
for transaction in &block.transactions {
@ -234,7 +234,7 @@ impl DiskWriteBatch {
pub fn prepare_note_commitment_batch(
&mut self,
db: &DiskDb,
finalized: &FinalizedBlock,
finalized: &CheckpointVerifiedBlock,
note_commitment_trees: NoteCommitmentTrees,
history_tree: Arc<HistoryTree>,
) -> Result<(), BoxError> {
@ -246,7 +246,7 @@ impl DiskWriteBatch {
let sapling_note_commitment_tree_cf = db.cf_handle("sapling_note_commitment_tree").unwrap();
let orchard_note_commitment_tree_cf = db.cf_handle("orchard_note_commitment_tree").unwrap();
let FinalizedBlock { height, .. } = finalized;
let CheckpointVerifiedBlock { height, .. } = finalized;
// Use the cached values that were previously calculated in parallel.
let sprout_root = note_commitment_trees.sprout.root();
@ -297,13 +297,13 @@ impl DiskWriteBatch {
pub fn prepare_genesis_note_commitment_tree_batch(
&mut self,
db: &DiskDb,
finalized: &FinalizedBlock,
finalized: &CheckpointVerifiedBlock,
) {
let sprout_note_commitment_tree_cf = db.cf_handle("sprout_note_commitment_tree").unwrap();
let sapling_note_commitment_tree_cf = db.cf_handle("sapling_note_commitment_tree").unwrap();
let orchard_note_commitment_tree_cf = db.cf_handle("orchard_note_commitment_tree").unwrap();
let FinalizedBlock { height, .. } = finalized;
let CheckpointVerifiedBlock { height, .. } = finalized;
// Insert empty note commitment trees. Note that these can't be
// used too early (e.g. the Orchard tree before Nu5 activates)

View File

@ -35,7 +35,7 @@ use crate::{
},
zebra_db::ZebraDb,
},
BoxError, FinalizedBlock,
BoxError, CheckpointVerifiedBlock,
};
impl ZebraDb {
@ -369,13 +369,13 @@ impl DiskWriteBatch {
pub fn prepare_transparent_transaction_batch(
&mut self,
db: &DiskDb,
finalized: &FinalizedBlock,
finalized: &CheckpointVerifiedBlock,
new_outputs_by_out_loc: &BTreeMap<OutputLocation, transparent::Utxo>,
spent_utxos_by_outpoint: &HashMap<transparent::OutPoint, transparent::Utxo>,
spent_utxos_by_out_loc: &BTreeMap<OutputLocation, transparent::Utxo>,
mut address_balances: HashMap<transparent::Address, AddressBalanceLocation>,
) -> Result<(), BoxError> {
let FinalizedBlock { block, height, .. } = finalized;
let CheckpointVerifiedBlock { block, height, .. } = finalized;
// Update created and spent transparent outputs
self.prepare_new_transparent_outputs_batch(

View File

@ -16,9 +16,9 @@ use zebra_chain::{
use crate::{
constants::MAX_NON_FINALIZED_CHAIN_FORKS,
request::{ContextuallyValidBlock, FinalizedWithTrees},
request::{ContextuallyVerifiedBlock, ContextuallyVerifiedBlockWithTrees},
service::{check, finalized_state::ZebraDb},
PreparedBlock, ValidateContextError,
SemanticallyVerifiedBlock, ValidateContextError,
};
mod chain;
@ -174,7 +174,7 @@ impl NonFinalizedState {
/// Finalize the lowest height block in the non-finalized portion of the best
/// chain and update all side-chains to match.
pub fn finalize(&mut self) -> FinalizedWithTrees {
pub fn finalize(&mut self) -> ContextuallyVerifiedBlockWithTrees {
// Chain::cmp uses the partial cumulative work, and the hash of the tip block.
// Neither of these fields has interior mutability.
// (And when the tip block is dropped for a chain, the chain is also dropped.)
@ -226,7 +226,7 @@ impl NonFinalizedState {
self.update_metrics_for_chains();
// Add the treestate to the finalized block.
FinalizedWithTrees::new(best_chain_root, root_treestate)
ContextuallyVerifiedBlockWithTrees::new(best_chain_root, root_treestate)
}
/// Commit block to the non-finalized state, on top of:
@ -235,7 +235,7 @@ impl NonFinalizedState {
#[tracing::instrument(level = "debug", skip(self, finalized_state, prepared))]
pub fn commit_block(
&mut self,
prepared: PreparedBlock,
prepared: SemanticallyVerifiedBlock,
finalized_state: &ZebraDb,
) -> Result<(), ValidateContextError> {
let parent_hash = prepared.block.header.previous_block_hash;
@ -266,7 +266,7 @@ impl NonFinalizedState {
#[allow(clippy::unwrap_in_result)]
pub fn commit_new_chain(
&mut self,
prepared: PreparedBlock,
prepared: SemanticallyVerifiedBlock,
finalized_state: &ZebraDb,
) -> Result<(), ValidateContextError> {
let finalized_tip_height = finalized_state.finalized_tip_height();
@ -308,7 +308,7 @@ impl NonFinalizedState {
fn validate_and_commit(
&self,
new_chain: Arc<Chain>,
prepared: PreparedBlock,
prepared: SemanticallyVerifiedBlock,
finalized_state: &ZebraDb,
) -> Result<Arc<Chain>, ValidateContextError> {
// Reads from disk
@ -336,7 +336,7 @@ impl NonFinalizedState {
);
// Quick check that doesn't read from disk
let contextual = ContextuallyValidBlock::with_block_and_spent_utxos(
let contextual = ContextuallyVerifiedBlock::with_block_and_spent_utxos(
prepared.clone(),
spent_utxos.clone(),
)
@ -358,7 +358,7 @@ impl NonFinalizedState {
#[tracing::instrument(skip(new_chain, sprout_final_treestates))]
fn validate_and_update_parallel(
new_chain: Arc<Chain>,
contextual: ContextuallyValidBlock,
contextual: ContextuallyVerifiedBlock,
sprout_final_treestates: HashMap<sprout::tree::Root, Arc<sprout::tree::NoteCommitmentTree>>,
) -> Result<Arc<Chain>, ValidateContextError> {
let mut block_commitment_result = None;
@ -489,7 +489,7 @@ impl NonFinalizedState {
/// Returns the block at the tip of the best chain.
#[allow(dead_code)]
pub fn best_tip_block(&self) -> Option<&ContextuallyValidBlock> {
pub fn best_tip_block(&self) -> Option<&ContextuallyVerifiedBlock> {
let best_chain = self.best_chain()?;
best_chain.tip_block()

View File

@ -28,7 +28,7 @@ use zebra_chain::{
};
use crate::{
request::Treestate, service::check, ContextuallyValidBlock, HashOrHeight, OutputLocation,
request::Treestate, service::check, ContextuallyVerifiedBlock, HashOrHeight, OutputLocation,
TransactionLocation, ValidateContextError,
};
@ -50,7 +50,7 @@ pub struct Chain {
// Blocks, heights, hashes, and transaction locations
//
/// The contextually valid blocks which form this non-finalized partial chain, in height order.
pub(crate) blocks: BTreeMap<block::Height, ContextuallyValidBlock>,
pub(crate) blocks: BTreeMap<block::Height, ContextuallyVerifiedBlock>,
/// An index of block heights for each block hash in `blocks`.
pub height_by_hash: HashMap<block::Hash, block::Height>,
@ -318,10 +318,10 @@ impl Chain {
///
/// If the block is invalid, drops this chain, and returns an error.
///
/// Note: a [`ContextuallyValidBlock`] isn't actually contextually valid until
/// Note: a [`ContextuallyVerifiedBlock`] isn't actually contextually valid until
/// [`Self::update_chain_tip_with`] returns success.
#[instrument(level = "debug", skip(self, block), fields(block = %block.block))]
pub fn push(mut self, block: ContextuallyValidBlock) -> Result<Chain, ValidateContextError> {
pub fn push(mut self, block: ContextuallyVerifiedBlock) -> Result<Chain, ValidateContextError> {
// update cumulative data members
self.update_chain_tip_with(&block)?;
@ -334,7 +334,7 @@ impl Chain {
/// Pops the lowest height block of the non-finalized portion of a chain,
/// and returns it with its associated treestate.
#[instrument(level = "debug", skip(self))]
pub(crate) fn pop_root(&mut self) -> (ContextuallyValidBlock, Treestate) {
pub(crate) fn pop_root(&mut self) -> (ContextuallyVerifiedBlock, Treestate) {
// Obtain the lowest height.
let block_height = self.non_finalized_root_height();
@ -388,9 +388,9 @@ impl Chain {
self.network
}
/// Returns the [`ContextuallyValidBlock`] with [`block::Hash`] or
/// Returns the [`ContextuallyVerifiedBlock`] with [`block::Hash`] or
/// [`Height`](zebra_chain::block::Height), if it exists in this chain.
pub fn block(&self, hash_or_height: HashOrHeight) -> Option<&ContextuallyValidBlock> {
pub fn block(&self, hash_or_height: HashOrHeight) -> Option<&ContextuallyVerifiedBlock> {
let height =
hash_or_height.height_or_else(|hash| self.height_by_hash.get(&hash).cloned())?;
@ -969,7 +969,7 @@ impl Chain {
/// Return the non-finalized tip block for this chain,
/// or `None` if `self.blocks` is empty.
pub fn tip_block(&self) -> Option<&ContextuallyValidBlock> {
pub fn tip_block(&self) -> Option<&ContextuallyVerifiedBlock> {
self.blocks.values().next_back()
}
@ -1123,12 +1123,12 @@ impl Chain {
/// Update the chain tip with the `contextually_valid` block,
/// running note commitment tree updates in parallel with other updates.
///
/// Used to implement `update_chain_tip_with::<ContextuallyValidBlock>`.
/// Used to implement `update_chain_tip_with::<ContextuallyVerifiedBlock>`.
#[instrument(skip(self, contextually_valid), fields(block = %contextually_valid.block))]
#[allow(clippy::unwrap_in_result)]
fn update_chain_tip_with_block_parallel(
&mut self,
contextually_valid: &ContextuallyValidBlock,
contextually_valid: &ContextuallyVerifiedBlock,
) -> Result<(), ValidateContextError> {
let height = contextually_valid.height;
@ -1186,12 +1186,12 @@ impl Chain {
/// Update the chain tip with the `contextually_valid` block,
/// except for the note commitment and history tree updates.
///
/// Used to implement `update_chain_tip_with::<ContextuallyValidBlock>`.
/// Used to implement `update_chain_tip_with::<ContextuallyVerifiedBlock>`.
#[instrument(skip(self, contextually_valid), fields(block = %contextually_valid.block))]
#[allow(clippy::unwrap_in_result)]
fn update_chain_tip_with_block_except_trees(
&mut self,
contextually_valid: &ContextuallyValidBlock,
contextually_valid: &ContextuallyVerifiedBlock,
) -> Result<(), ValidateContextError> {
let (
block,
@ -1327,12 +1327,12 @@ trait UpdateWith<T> {
fn revert_chain_with(&mut self, _: &T, position: RevertPosition);
}
impl UpdateWith<ContextuallyValidBlock> for Chain {
impl UpdateWith<ContextuallyVerifiedBlock> for Chain {
#[instrument(skip(self, contextually_valid), fields(block = %contextually_valid.block))]
#[allow(clippy::unwrap_in_result)]
fn update_chain_tip_with(
&mut self,
contextually_valid: &ContextuallyValidBlock,
contextually_valid: &ContextuallyVerifiedBlock,
) -> Result<(), ValidateContextError> {
self.update_chain_tip_with_block_parallel(contextually_valid)
}
@ -1340,7 +1340,7 @@ impl UpdateWith<ContextuallyValidBlock> for Chain {
#[instrument(skip(self, contextually_valid), fields(block = %contextually_valid.block))]
fn revert_chain_with(
&mut self,
contextually_valid: &ContextuallyValidBlock,
contextually_valid: &ContextuallyVerifiedBlock,
position: RevertPosition,
) {
let (

View File

@ -17,7 +17,7 @@ use zebra_chain::{
use crate::{
arbitrary::Prepare,
request::ContextuallyValidBlock,
request::ContextuallyVerifiedBlock,
service::{
arbitrary::PreparedChain,
finalized_state::FinalizedState,
@ -55,7 +55,7 @@ fn push_genesis_chain() -> Result<()> {
for block in chain.iter().take(count).cloned() {
let block =
ContextuallyValidBlock::with_block_and_spent_utxos(
ContextuallyVerifiedBlock::with_block_and_spent_utxos(
block,
only_chain.unspent_utxos(),
)
@ -104,7 +104,7 @@ fn push_history_tree_chain() -> Result<()> {
for block in chain
.iter()
.take(count)
.map(ContextuallyValidBlock::test_with_zero_chain_pool_change) {
.map(ContextuallyVerifiedBlock::test_with_zero_chain_pool_change) {
only_chain = only_chain.push(block)?;
}
@ -151,7 +151,7 @@ fn forked_equals_pushed_genesis() -> Result<()> {
ValueBalance::zero(),
);
for block in chain.iter().take(fork_at_count).cloned() {
let block = ContextuallyValidBlock::with_block_and_spent_utxos(
let block = ContextuallyVerifiedBlock::with_block_and_spent_utxos(
block,
partial_chain.unspent_utxos(),
)?;
@ -172,7 +172,7 @@ fn forked_equals_pushed_genesis() -> Result<()> {
);
for block in chain.iter().cloned() {
let block =
ContextuallyValidBlock::with_block_and_spent_utxos(block, full_chain.unspent_utxos())?;
ContextuallyVerifiedBlock::with_block_and_spent_utxos(block, full_chain.unspent_utxos())?;
full_chain = full_chain
.push(block.clone())
.expect("full chain push is valid");
@ -216,7 +216,7 @@ fn forked_equals_pushed_genesis() -> Result<()> {
// same original full chain.
for block in chain.iter().skip(fork_at_count).cloned() {
let block =
ContextuallyValidBlock::with_block_and_spent_utxos(block, forked.unspent_utxos())?;
ContextuallyVerifiedBlock::with_block_and_spent_utxos(block, forked.unspent_utxos())?;
forked = forked.push(block).expect("forked chain push is valid");
}
@ -256,13 +256,13 @@ fn forked_equals_pushed_history_tree() -> Result<()> {
for block in chain
.iter()
.take(fork_at_count)
.map(ContextuallyValidBlock::test_with_zero_chain_pool_change) {
.map(ContextuallyVerifiedBlock::test_with_zero_chain_pool_change) {
partial_chain = partial_chain.push(block)?;
}
for block in chain
.iter()
.map(ContextuallyValidBlock::test_with_zero_chain_pool_change) {
.map(ContextuallyVerifiedBlock::test_with_zero_chain_pool_change) {
full_chain = full_chain.push(block.clone())?;
}
@ -279,7 +279,7 @@ fn forked_equals_pushed_history_tree() -> Result<()> {
for block in chain
.iter()
.skip(fork_at_count)
.map(ContextuallyValidBlock::test_with_zero_chain_pool_change) {
.map(ContextuallyVerifiedBlock::test_with_zero_chain_pool_change) {
forked = forked.push(block)?;
}
@ -310,7 +310,7 @@ fn finalized_equals_pushed_genesis() -> Result<()> {
// TODO: fix this test or the code so the full_chain temporary trees aren't overwritten
let chain = chain.iter()
.filter(|block| block.height != Height(0))
.map(ContextuallyValidBlock::test_with_zero_spent_utxos);
.map(ContextuallyVerifiedBlock::test_with_zero_spent_utxos);
// use `end_count` as the number of non-finalized blocks at the end of the chain,
// make sure this test pushes at least 1 block in the partial chain.
@ -399,7 +399,7 @@ fn finalized_equals_pushed_history_tree() -> Result<()> {
for block in chain
.iter()
.take(finalized_count)
.map(ContextuallyValidBlock::test_with_zero_spent_utxos) {
.map(ContextuallyVerifiedBlock::test_with_zero_spent_utxos) {
full_chain = full_chain.push(block)?;
}
@ -416,14 +416,14 @@ fn finalized_equals_pushed_history_tree() -> Result<()> {
for block in chain
.iter()
.skip(finalized_count)
.map(ContextuallyValidBlock::test_with_zero_spent_utxos) {
.map(ContextuallyVerifiedBlock::test_with_zero_spent_utxos) {
partial_chain = partial_chain.push(block.clone())?;
}
for block in chain
.iter()
.skip(finalized_count)
.map(ContextuallyValidBlock::test_with_zero_spent_utxos) {
.map(ContextuallyVerifiedBlock::test_with_zero_spent_utxos) {
full_chain= full_chain.push(block.clone())?;
}

View File

@ -214,11 +214,11 @@ fn finalize_pops_from_best_chain_for_network(network: Network) -> Result<()> {
state.commit_block(child.prepare(), &finalized_state)?;
let finalized_with_trees = state.finalize();
let finalized = finalized_with_trees.finalized;
let finalized = finalized_with_trees.checkpoint_verified;
assert_eq!(block1, finalized.block);
let finalized_with_trees = state.finalize();
let finalized = finalized_with_trees.finalized;
let finalized = finalized_with_trees.checkpoint_verified;
assert_eq!(block2, finalized.block);
assert!(state.best_chain().is_none());

View File

@ -10,20 +10,20 @@ use tracing::instrument;
use zebra_chain::{block, transparent};
use crate::{BoxError, FinalizedBlock, PreparedBlock};
use crate::{BoxError, CheckpointVerifiedBlock, SemanticallyVerifiedBlock};
#[cfg(test)]
mod tests;
/// A queued finalized block, and its corresponding [`Result`] channel.
pub type QueuedFinalized = (
FinalizedBlock,
CheckpointVerifiedBlock,
oneshot::Sender<Result<block::Hash, BoxError>>,
);
/// A queued non-finalized block, and its corresponding [`Result`] channel.
pub type QueuedNonFinalized = (
PreparedBlock,
SemanticallyVerifiedBlock,
oneshot::Sender<Result<block::Hash, BoxError>>,
);
@ -242,7 +242,7 @@ impl SentHashes {
///
/// Assumes that blocks are added in the order of their height between `finish_batch` calls
/// for efficient pruning.
pub fn add(&mut self, block: &PreparedBlock) {
pub fn add(&mut self, block: &SemanticallyVerifiedBlock) {
// Track known UTXOs in sent blocks.
let outpoints = block
.new_outputs
@ -271,7 +271,7 @@ impl SentHashes {
/// for efficient pruning.
///
/// For more details see `add()`.
pub fn add_finalized(&mut self, block: &FinalizedBlock) {
pub fn add_finalized(&mut self, block: &CheckpointVerifiedBlock) {
// Track known UTXOs in sent blocks.
let outpoints = block
.new_outputs

View File

@ -23,7 +23,7 @@ use crate::{
init_test,
service::{arbitrary::populated_state, chain_tip::TipAction, StateService},
tests::setup::{partial_nu5_chain_strategy, transaction_v4_from_coinbase},
BoxError, Config, FinalizedBlock, PreparedBlock, Request, Response,
BoxError, CheckpointVerifiedBlock, Config, Request, Response, SemanticallyVerifiedBlock,
};
const LAST_BLOCK_HEIGHT: u32 = 10;
@ -216,7 +216,7 @@ async fn empty_state_still_responds_to_requests() -> Result<()> {
zebra_test::vectors::BLOCK_MAINNET_419200_BYTES.zcash_deserialize_into::<Arc<Block>>()?;
let iter = vec![
// No checks for CommitBlock or CommitFinalizedBlock because empty state
// No checks for SemanticallyVerifiedBlock or CommitCheckpointVerifiedBlock because empty state
// precondition doesn't matter to them
(Request::Depth(block.hash()), Ok(Response::Depth(None))),
(Request::Tip, Ok(Response::Tip(None))),
@ -555,8 +555,8 @@ proptest! {
fn continuous_empty_blocks_from_test_vectors() -> impl Strategy<
Value = (
Network,
SummaryDebug<Vec<FinalizedBlock>>,
SummaryDebug<Vec<PreparedBlock>>,
SummaryDebug<Vec<CheckpointVerifiedBlock>>,
SummaryDebug<Vec<SemanticallyVerifiedBlock>>,
),
> {
any::<Network>()
@ -567,7 +567,7 @@ fn continuous_empty_blocks_from_test_vectors() -> impl Strategy<
Network::Testnet => &*zebra_test::vectors::CONTINUOUS_TESTNET_BLOCKS,
};
// Transform the test vector's block bytes into a vector of `PreparedBlock`s.
// Transform the test vector's block bytes into a vector of `SemanticallyVerifiedBlock`s.
let blocks: Vec<_> = raw_blocks
.iter()
.map(|(_height, &block_bytes)| {
@ -591,7 +591,7 @@ fn continuous_empty_blocks_from_test_vectors() -> impl Strategy<
let non_finalized_blocks = blocks.split_off(finalized_blocks_count);
let finalized_blocks: Vec<_> = blocks
.into_iter()
.map(|prepared_block| FinalizedBlock::from(prepared_block.block))
.map(|prepared_block| CheckpointVerifiedBlock::from(prepared_block.block))
.collect();
(

View File

@ -20,7 +20,7 @@ use crate::{
queued_blocks::{QueuedFinalized, QueuedNonFinalized},
BoxError, ChainTipBlock, ChainTipSender, CloneError,
},
CommitBlockError, PreparedBlock,
CommitSemanticallyVerifiedError, SemanticallyVerifiedBlock,
};
// These types are used in doc links
@ -49,8 +49,8 @@ const PARENT_ERROR_MAP_LIMIT: usize = MAX_BLOCK_REORG_HEIGHT as usize * 2;
pub(crate) fn validate_and_commit_non_finalized(
finalized_state: &ZebraDb,
non_finalized_state: &mut NonFinalizedState,
prepared: PreparedBlock,
) -> Result<(), CommitBlockError> {
prepared: SemanticallyVerifiedBlock,
) -> Result<(), CommitSemanticallyVerifiedError> {
check::initial_contextual_validity(finalized_state, non_finalized_state, &prepared)?;
let parent_hash = prepared.block.header.previous_block_hash;
@ -288,9 +288,9 @@ pub fn write_blocks_from_channels(
while non_finalized_state.best_chain_len() > MAX_BLOCK_REORG_HEIGHT {
tracing::trace!("finalizing block past the reorg limit");
let finalized_with_trees = non_finalized_state.finalize();
let contextually_verified_with_trees = non_finalized_state.finalize();
finalized_state
.commit_finalized_direct(finalized_with_trees, "best non-finalized chain root")
.commit_finalized_direct(contextually_verified_with_trees, "commit contextually-verified request")
.expect(
"unexpected finalized block commit error: note commitment and history trees were already checked by the non-finalized state",
);

View File

@ -18,7 +18,7 @@ use crate::{
service::{
check, finalized_state::FinalizedState, non_finalized_state::NonFinalizedState, read,
},
Config, FinalizedBlock,
CheckpointVerifiedBlock, Config,
};
/// Generate a chain that allows us to make tests for the legacy chain rules.
@ -83,8 +83,8 @@ pub(crate) fn partial_nu5_chain_strategy(
/// Return a new `StateService` containing the mainnet genesis block.
/// Also returns the finalized genesis block itself.
pub(crate) fn new_state_with_mainnet_genesis() -> (FinalizedState, NonFinalizedState, FinalizedBlock)
{
pub(crate) fn new_state_with_mainnet_genesis(
) -> (FinalizedState, NonFinalizedState, CheckpointVerifiedBlock) {
let genesis = zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES
.zcash_deserialize_into::<Arc<Block>>()
.expect("block should deserialize");
@ -105,7 +105,7 @@ pub(crate) fn new_state_with_mainnet_genesis() -> (FinalizedState, NonFinalizedS
read::best_tip(&non_finalized_state, &finalized_state.db)
);
let genesis = FinalizedBlock::from(genesis);
let genesis = CheckpointVerifiedBlock::from(genesis);
finalized_state
.commit_finalized_direct(genesis.clone().into(), "test")
.expect("unexpected invalid genesis block test vector");

View File

@ -25,7 +25,7 @@ static COMMIT_FINALIZED_BLOCK_MAINNET: Lazy<
let hash = block.hash();
vec![
(
Request::CommitFinalizedBlock(block.into()),
Request::CommitCheckpointVerifiedBlock(block.into()),
Ok(Response::Committed(hash)),
),
(
@ -46,7 +46,7 @@ static COMMIT_FINALIZED_BLOCK_TESTNET: Lazy<
let hash = block.hash();
vec![
(
Request::CommitFinalizedBlock(block.into()),
Request::CommitCheckpointVerifiedBlock(block.into()),
Ok(Response::Committed(hash)),
),
(

View File

@ -230,7 +230,7 @@ impl CopyStateCmd {
let target_block_commit_hash = target_state
.ready()
.await?
.call(new_zs::Request::CommitFinalizedBlock(
.call(new_zs::Request::CommitCheckpointVerifiedBlock(
source_block.clone().into(),
))
.await?;
@ -240,7 +240,7 @@ impl CopyStateCmd {
target_block_commit_hash
}
response => Err(format!(
"unexpected response to CommitFinalizedBlock request, height: {height}\n \
"unexpected response to CommitCheckpointVerifiedBlock request, height: {height}\n \
response: {response:?}",
))?,
};

View File

@ -75,7 +75,7 @@ use tokio::{pin, select, sync::oneshot};
use tower::{builder::ServiceBuilder, util::BoxService};
use tracing_futures::Instrument;
use zebra_consensus::chain::BackgroundTaskHandles;
use zebra_consensus::router::BackgroundTaskHandles;
use zebra_rpc::server::RpcServer;
use crate::{
@ -104,7 +104,7 @@ impl StartCmd {
let config = app_config().clone();
info!("initializing node state");
let (_, max_checkpoint_height) = zebra_consensus::chain::init_checkpoint_list(
let (_, max_checkpoint_height) = zebra_consensus::router::init_checkpoint_list(
config.consensus.clone(),
config.network.network,
);
@ -147,8 +147,8 @@ impl StartCmd {
.await;
info!("initializing verifiers");
let (chain_verifier, tx_verifier, consensus_task_handles, max_checkpoint_height) =
zebra_consensus::chain::init(
let (router_verifier, tx_verifier, consensus_task_handles, max_checkpoint_height) =
zebra_consensus::router::init(
config.consensus.clone(),
config.network.network,
state.clone(),
@ -161,7 +161,7 @@ impl StartCmd {
&config,
max_checkpoint_height,
peer_set.clone(),
chain_verifier.clone(),
router_verifier.clone(),
state.clone(),
latest_chain_tip.clone(),
);
@ -186,7 +186,7 @@ impl StartCmd {
let setup_data = InboundSetupData {
address_book: address_book.clone(),
block_download_peer_set: peer_set.clone(),
block_verifier: chain_verifier.clone(),
block_verifier: router_verifier.clone(),
mempool: mempool.clone(),
state,
latest_chain_tip: latest_chain_tip.clone(),
@ -207,7 +207,7 @@ impl StartCmd {
app_version(),
mempool.clone(),
read_only_state_service,
chain_verifier,
router_verifier,
sync_status.clone(),
address_book,
latest_chain_tip.clone(),

View File

@ -29,7 +29,7 @@ use zebra_chain::{
serialization::ZcashSerialize,
transaction::UnminedTxId,
};
use zebra_consensus::chain::VerifyChainError;
use zebra_consensus::router::RouterError;
use zebra_network::{
constants::{ADDR_RESPONSE_LIMIT_DENOMINATOR, MAX_ADDRS_IN_MESSAGE},
AddressBook, InventoryResponse,
@ -73,12 +73,12 @@ type BlockDownloadPeerSet =
Buffer<BoxService<zn::Request, zn::Response, zn::BoxError>, zn::Request>;
type State = Buffer<BoxService<zs::Request, zs::Response, zs::BoxError>, zs::Request>;
type Mempool = Buffer<BoxService<mempool::Request, mempool::Response, BoxError>, mempool::Request>;
type BlockVerifier = Buffer<
BoxService<zebra_consensus::Request, block::Hash, VerifyChainError>,
type SemanticBlockVerifier = Buffer<
BoxService<zebra_consensus::Request, block::Hash, RouterError>,
zebra_consensus::Request,
>;
type GossipedBlockDownloads =
BlockDownloads<Timeout<BlockDownloadPeerSet>, Timeout<BlockVerifier>, State>;
BlockDownloads<Timeout<BlockDownloadPeerSet>, Timeout<SemanticBlockVerifier>, State>;
/// The services used by the [`Inbound`] service.
pub struct InboundSetupData {
@ -91,7 +91,7 @@ pub struct InboundSetupData {
/// A service that verifies downloaded blocks.
///
/// Given to `Inbound.block_downloads` after the required services are set up.
pub block_verifier: BlockVerifier,
pub block_verifier: SemanticBlockVerifier,
/// A service that manages transactions in the memory pool.
pub mempool: Mempool,

View File

@ -410,7 +410,7 @@ async fn mempool_transaction_expiration() -> Result<(), crate::BoxError> {
.unwrap();
state_service
.clone()
.oneshot(zebra_state::Request::CommitFinalizedBlock(
.oneshot(zebra_state::Request::CommitCheckpointVerifiedBlock(
block_two.clone().into(),
))
.await
@ -483,7 +483,7 @@ async fn mempool_transaction_expiration() -> Result<(), crate::BoxError> {
.unwrap();
state_service
.clone()
.oneshot(zebra_state::Request::CommitFinalizedBlock(
.oneshot(zebra_state::Request::CommitCheckpointVerifiedBlock(
block_three.clone().into(),
))
.await
@ -591,7 +591,7 @@ async fn mempool_transaction_expiration() -> Result<(), crate::BoxError> {
for block in more_blocks {
state_service
.clone()
.oneshot(zebra_state::Request::CommitFinalizedBlock(
.oneshot(zebra_state::Request::CommitCheckpointVerifiedBlock(
block.clone().into(),
))
.await
@ -784,7 +784,7 @@ async fn setup(
// Download task panics and timeouts are propagated to the tests that use Groth16 verifiers.
let (block_verifier, _transaction_verifier, _groth16_download_handle, _max_checkpoint_height) =
zebra_consensus::chain::init(
zebra_consensus::router::init(
consensus_config.clone(),
network,
state_service.clone(),
@ -812,7 +812,7 @@ async fn setup(
.ready()
.await
.unwrap()
.call(zebra_state::Request::CommitFinalizedBlock(
.call(zebra_state::Request::CommitCheckpointVerifiedBlock(
genesis_block.clone().into(),
))
.await
@ -842,7 +842,7 @@ async fn setup(
.unwrap();
state_service
.clone()
.oneshot(zebra_state::Request::CommitFinalizedBlock(
.oneshot(zebra_state::Request::CommitCheckpointVerifiedBlock(
block_one.clone().into(),
))
.await

View File

@ -18,7 +18,7 @@ use zebra_chain::{
serialization::ZcashDeserializeInto,
transaction::{AuthDigest, Hash as TxHash, Transaction, UnminedTx, UnminedTxId, WtxId},
};
use zebra_consensus::{chain::VerifyChainError, error::TransactionError, transaction};
use zebra_consensus::{error::TransactionError, router::RouterError, transaction};
use zebra_network::{
canonical_peer_addr, connect_isolated_tcp_direct_with_inbound, types::InventoryHash,
Config as NetworkConfig, InventoryResponse, PeerError, Request, Response, SharedPeerError,
@ -609,7 +609,7 @@ async fn setup(
Buffer<BoxService<mempool::Request, mempool::Response, BoxError>, mempool::Request>,
Buffer<BoxService<zebra_state::Request, zebra_state::Response, BoxError>, zebra_state::Request>,
// mocked services
MockService<zebra_consensus::Request, block::Hash, PanicAssertion, VerifyChainError>,
MockService<zebra_consensus::Request, block::Hash, PanicAssertion, RouterError>,
MockService<transaction::Request, transaction::Response, PanicAssertion, TransactionError>,
// real tasks
JoinHandle<Result<(), BlockGossipError>>,

View File

@ -20,7 +20,7 @@ use zebra_consensus::{error::TransactionError, transaction as tx};
use zebra_network as zn;
use zebra_state::{self as zs, ChainTipBlock, ChainTipSender};
use zebra_test::mock_service::{MockService, PropTestAssertion};
use zs::FinalizedBlock;
use zs::CheckpointVerifiedBlock;
use crate::components::{
mempool::{config::Config, Mempool},
@ -239,7 +239,7 @@ proptest! {
fn genesis_chain_tip() -> Option<ChainTipBlock> {
zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES
.zcash_deserialize_into::<Arc<Block>>()
.map(FinalizedBlock::from)
.map(CheckpointVerifiedBlock::from)
.map(ChainTipBlock::from)
.ok()
}
@ -247,7 +247,7 @@ fn genesis_chain_tip() -> Option<ChainTipBlock> {
fn block1_chain_tip() -> Option<ChainTipBlock> {
zebra_test::vectors::BLOCK_MAINNET_1_BYTES
.zcash_deserialize_into::<Arc<Block>>()
.map(FinalizedBlock::from)
.map(CheckpointVerifiedBlock::from)
.map(ChainTipBlock::from)
.ok()
}

View File

@ -412,7 +412,7 @@ async fn mempool_cancel_mined() -> Result<(), Report> {
.ready()
.await
.unwrap()
.call(zebra_state::Request::CommitFinalizedBlock(
.call(zebra_state::Request::CommitCheckpointVerifiedBlock(
block1.clone().into(),
))
.await
@ -457,7 +457,7 @@ async fn mempool_cancel_mined() -> Result<(), Report> {
// Push block 2 to the state
state_service
.oneshot(zebra_state::Request::CommitFinalizedBlock(
.oneshot(zebra_state::Request::CommitCheckpointVerifiedBlock(
block2.clone().into(),
))
.await
@ -545,7 +545,7 @@ async fn mempool_cancel_downloads_after_network_upgrade() -> Result<(), Report>
.ready()
.await
.unwrap()
.call(zebra_state::Request::CommitFinalizedBlock(
.call(zebra_state::Request::CommitCheckpointVerifiedBlock(
block1.clone().into(),
))
.await
@ -822,7 +822,7 @@ async fn mempool_reverifies_after_tip_change() -> Result<(), Report> {
.ready()
.await
.unwrap()
.call(zebra_state::Request::CommitFinalizedBlock(
.call(zebra_state::Request::CommitCheckpointVerifiedBlock(
block1.clone().into(),
))
.await
@ -882,7 +882,7 @@ async fn mempool_reverifies_after_tip_change() -> Result<(), Report> {
.ready()
.await
.unwrap()
.call(zebra_state::Request::CommitFinalizedBlock(
.call(zebra_state::Request::CommitCheckpointVerifiedBlock(
block2.clone().into(),
))
.await
@ -955,7 +955,7 @@ async fn setup(
.ready()
.await
.unwrap()
.call(zebra_state::Request::CommitFinalizedBlock(
.call(zebra_state::Request::CommitCheckpointVerifiedBlock(
genesis_block.clone().into(),
))
.await

View File

@ -122,7 +122,7 @@ pub enum BlockDownloadVerifyError {
#[error("block failed consensus validation: {error:?} {height:?} {hash:?}")]
Invalid {
#[source]
error: zebra_consensus::chain::VerifyChainError,
error: zebra_consensus::router::RouterError,
height: block::Height,
hash: block::Hash,
},
@ -543,7 +543,7 @@ where
verification
.map(|hash| (block_height, hash))
.map_err(|err| {
match err.downcast::<zebra_consensus::chain::VerifyChainError>() {
match err.downcast::<zebra_consensus::router::RouterError>() {
Ok(error) => BlockDownloadVerifyError::Invalid { error: *error, height: block_height, hash },
Err(error) => BlockDownloadVerifyError::ValidationRequestError { error, height: block_height, hash },
}

View File

@ -45,7 +45,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> {
let (
chain_sync_future,
_sync_status,
mut chain_verifier,
mut router_verifier,
mut peer_set,
mut state_service,
_mock_chain_tip_sender,
@ -88,7 +88,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> {
.await
.respond(zn::Response::Blocks(vec![Available(block0.clone())]));
chain_verifier
router_verifier
.expect_request(zebra_consensus::Request::Commit(block0))
.await
.respond(block0_hash);
@ -96,7 +96,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> {
// Check that nothing unexpected happened.
// We expect more requests to the state service, because the syncer keeps on running.
peer_set.expect_no_requests().await;
chain_verifier.expect_no_requests().await;
router_verifier.expect_no_requests().await;
// State is checked for genesis again
state_service
@ -144,7 +144,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> {
// Check that nothing unexpected happened.
peer_set.expect_no_requests().await;
chain_verifier.expect_no_requests().await;
router_verifier.expect_no_requests().await;
// State is checked for all non-tip blocks (blocks 1 & 2) in response order
state_service
@ -174,7 +174,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> {
.collect();
for _ in 1..=2 {
chain_verifier
router_verifier
.expect_request_that(|req| remaining_blocks.remove(&req.block().hash()).is_some())
.await
.respond_with(|req| req.block().hash());
@ -186,7 +186,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> {
);
// Check that nothing unexpected happened.
chain_verifier.expect_no_requests().await;
router_verifier.expect_no_requests().await;
state_service.expect_no_requests().await;
// ChainSync::extend_tips
@ -217,7 +217,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> {
}
// Check that nothing unexpected happened.
chain_verifier.expect_no_requests().await;
router_verifier.expect_no_requests().await;
state_service.expect_no_requests().await;
// Blocks 3 & 4 are fetched in order, then verified concurrently
@ -238,7 +238,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> {
.collect();
for _ in 3..=4 {
chain_verifier
router_verifier
.expect_request_that(|req| remaining_blocks.remove(&req.block().hash()).is_some())
.await
.respond_with(|req| req.block().hash());
@ -250,7 +250,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> {
);
// Check that nothing unexpected happened.
chain_verifier.expect_no_requests().await;
router_verifier.expect_no_requests().await;
state_service.expect_no_requests().await;
let chain_sync_result = chain_sync_task_handle.now_or_never();
@ -272,7 +272,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> {
let (
chain_sync_future,
_sync_status,
mut chain_verifier,
mut router_verifier,
mut peer_set,
mut state_service,
_mock_chain_tip_sender,
@ -315,7 +315,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> {
.await
.respond(zn::Response::Blocks(vec![Available(block0.clone())]));
chain_verifier
router_verifier
.expect_request(zebra_consensus::Request::Commit(block0))
.await
.respond(block0_hash);
@ -323,7 +323,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> {
// Check that nothing unexpected happened.
// We expect more requests to the state service, because the syncer keeps on running.
peer_set.expect_no_requests().await;
chain_verifier.expect_no_requests().await;
router_verifier.expect_no_requests().await;
// State is checked for genesis again
state_service
@ -373,7 +373,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> {
// Check that nothing unexpected happened.
peer_set.expect_no_requests().await;
chain_verifier.expect_no_requests().await;
router_verifier.expect_no_requests().await;
// State is checked for all non-tip blocks (blocks 1 & 2) in response order
state_service
@ -403,7 +403,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> {
.collect();
for _ in 1..=2 {
chain_verifier
router_verifier
.expect_request_that(|req| remaining_blocks.remove(&req.block().hash()).is_some())
.await
.respond_with(|req| req.block().hash());
@ -415,7 +415,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> {
);
// Check that nothing unexpected happened.
chain_verifier.expect_no_requests().await;
router_verifier.expect_no_requests().await;
state_service.expect_no_requests().await;
// ChainSync::extend_tips
@ -448,7 +448,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> {
}
// Check that nothing unexpected happened.
chain_verifier.expect_no_requests().await;
router_verifier.expect_no_requests().await;
state_service.expect_no_requests().await;
// Blocks 3 & 4 are fetched in order, then verified concurrently
@ -469,7 +469,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> {
.collect();
for _ in 3..=4 {
chain_verifier
router_verifier
.expect_request_that(|req| remaining_blocks.remove(&req.block().hash()).is_some())
.await
.respond_with(|req| req.block().hash());
@ -481,7 +481,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> {
);
// Check that nothing unexpected happened.
chain_verifier.expect_no_requests().await;
router_verifier.expect_no_requests().await;
state_service.expect_no_requests().await;
let chain_sync_result = chain_sync_task_handle.now_or_never();
@ -500,7 +500,7 @@ async fn sync_block_lookahead_drop() -> Result<(), crate::BoxError> {
let (
chain_sync_future,
_sync_status,
mut chain_verifier,
mut router_verifier,
mut peer_set,
mut state_service,
_mock_chain_tip_sender,
@ -535,7 +535,7 @@ async fn sync_block_lookahead_drop() -> Result<(), crate::BoxError> {
// Block is dropped because it is too far ahead of the tip.
// We expect more requests to the state service, because the syncer keeps on running.
peer_set.expect_no_requests().await;
chain_verifier.expect_no_requests().await;
router_verifier.expect_no_requests().await;
let chain_sync_result = chain_sync_task_handle.now_or_never();
assert!(
@ -555,7 +555,7 @@ async fn sync_block_too_high_obtain_tips() -> Result<(), crate::BoxError> {
let (
chain_sync_future,
_sync_status,
mut chain_verifier,
mut router_verifier,
mut peer_set,
mut state_service,
_mock_chain_tip_sender,
@ -597,7 +597,7 @@ async fn sync_block_too_high_obtain_tips() -> Result<(), crate::BoxError> {
.await
.respond(zn::Response::Blocks(vec![Available(block0.clone())]));
chain_verifier
router_verifier
.expect_request(zebra_consensus::Request::Commit(block0))
.await
.respond(block0_hash);
@ -605,7 +605,7 @@ async fn sync_block_too_high_obtain_tips() -> Result<(), crate::BoxError> {
// Check that nothing unexpected happened.
// We expect more requests to the state service, because the syncer keeps on running.
peer_set.expect_no_requests().await;
chain_verifier.expect_no_requests().await;
router_verifier.expect_no_requests().await;
// State is checked for genesis again
state_service
@ -654,7 +654,7 @@ async fn sync_block_too_high_obtain_tips() -> Result<(), crate::BoxError> {
// Check that nothing unexpected happened.
peer_set.expect_no_requests().await;
chain_verifier.expect_no_requests().await;
router_verifier.expect_no_requests().await;
// State is checked for all non-tip blocks (blocks 982k, 1, 2) in response order
state_service
@ -710,7 +710,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> {
let (
chain_sync_future,
_sync_status,
mut chain_verifier,
mut router_verifier,
mut peer_set,
mut state_service,
_mock_chain_tip_sender,
@ -758,7 +758,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> {
.await
.respond(zn::Response::Blocks(vec![Available(block0.clone())]));
chain_verifier
router_verifier
.expect_request(zebra_consensus::Request::Commit(block0))
.await
.respond(block0_hash);
@ -766,7 +766,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> {
// Check that nothing unexpected happened.
// We expect more requests to the state service, because the syncer keeps on running.
peer_set.expect_no_requests().await;
chain_verifier.expect_no_requests().await;
router_verifier.expect_no_requests().await;
// State is checked for genesis again
state_service
@ -814,7 +814,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> {
// Check that nothing unexpected happened.
peer_set.expect_no_requests().await;
chain_verifier.expect_no_requests().await;
router_verifier.expect_no_requests().await;
// State is checked for all non-tip blocks (blocks 1 & 2) in response order
state_service
@ -844,7 +844,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> {
.collect();
for _ in 1..=2 {
chain_verifier
router_verifier
.expect_request_that(|req| remaining_blocks.remove(&req.block().hash()).is_some())
.await
.respond_with(|req| req.block().hash());
@ -856,7 +856,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> {
);
// Check that nothing unexpected happened.
chain_verifier.expect_no_requests().await;
router_verifier.expect_no_requests().await;
state_service.expect_no_requests().await;
// ChainSync::extend_tips
@ -888,7 +888,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> {
}
// Check that nothing unexpected happened.
chain_verifier.expect_no_requests().await;
router_verifier.expect_no_requests().await;
state_service.expect_no_requests().await;
// Blocks 3, 4, 982k are fetched in order, then verified concurrently,
@ -926,7 +926,7 @@ fn setup() -> (
// ChainSync
impl Future<Output = Result<(), Report>> + Send,
SyncStatus,
// ChainVerifier
// BlockVerifierRouter
MockService<zebra_consensus::Request, block::Hash, PanicAssertion>,
// PeerSet
MockService<zebra_network::Request, zebra_network::Response, PanicAssertion>,
@ -951,7 +951,7 @@ fn setup() -> (
.with_max_request_delay(MAX_SERVICE_REQUEST_DELAY)
.for_unit_tests();
let chain_verifier = MockService::build()
let router_verifier = MockService::build()
.with_max_request_delay(MAX_SERVICE_REQUEST_DELAY)
.for_unit_tests();
@ -965,7 +965,7 @@ fn setup() -> (
&config,
Height(0),
peer_set.clone(),
chain_verifier.clone(),
router_verifier.clone(),
state_service.clone(),
mock_chain_tip,
);
@ -975,7 +975,7 @@ fn setup() -> (
(
chain_sync_future,
sync_status,
chain_verifier,
router_verifier,
peer_set,
state_service,
mock_chain_tip_sender,

View File

@ -1038,7 +1038,8 @@ fn sync_large_checkpoints_mempool_mainnet() -> Result<()> {
#[tracing::instrument]
fn create_cached_database(network: Network) -> Result<()> {
let height = network.mandatory_checkpoint_height();
let checkpoint_stop_regex = format!("{STOP_AT_HEIGHT_REGEX}.*CommitFinalized request");
let checkpoint_stop_regex =
format!("{STOP_AT_HEIGHT_REGEX}.*commit checkpoint-verified request");
create_cached_database_height(
network,
@ -1056,7 +1057,7 @@ fn create_cached_database(network: Network) -> Result<()> {
fn sync_past_mandatory_checkpoint(network: Network) -> Result<()> {
let height = network.mandatory_checkpoint_height() + 1200;
let full_validation_stop_regex =
format!("{STOP_AT_HEIGHT_REGEX}.*best non-finalized chain root");
format!("{STOP_AT_HEIGHT_REGEX}.*commit contextually-verified request");
create_cached_database_height(
network,

View File

@ -52,7 +52,7 @@ pub const SYNC_PROGRESS_REGEX: &str = r"sync_percent";
/// The text that should be logged when Zebra loads its compiled-in checkpoints.
#[cfg(feature = "zebra-checkpoints")]
pub const CHECKPOINT_VERIFIER_REGEX: &str =
r"initializing chain verifier.*max_checkpoint_height.*=.*Height";
r"initializing block verifier router.*max_checkpoint_height.*=.*Height";
/// The maximum amount of time Zebra should take to reload after shutting down.
///