2022-03-11 05:58:22 -08:00
|
|
|
//! [`tower::Service`]s for Zebra's cached chain state.
|
|
|
|
//!
|
|
|
|
//! Zebra provides cached state access via two main services:
|
|
|
|
//! - [`StateService`]: a read-write service that waits for queued blocks.
|
2022-06-13 18:22:16 -07:00
|
|
|
//! - [`ReadStateService`]: a read-only service that answers from the most
|
|
|
|
//! recent committed block.
|
2022-03-11 05:58:22 -08:00
|
|
|
//!
|
|
|
|
//! Most users should prefer [`ReadStateService`], unless they need to wait for
|
2022-06-13 18:22:16 -07:00
|
|
|
//! verified blocks to be committed. (For example, the syncer and mempool
|
|
|
|
//! tasks.)
|
2022-03-11 05:58:22 -08:00
|
|
|
//!
|
|
|
|
//! Zebra also provides access to the best chain tip via:
|
2022-06-13 18:22:16 -07:00
|
|
|
//! - [`LatestChainTip`]: a read-only channel that contains the latest committed
|
|
|
|
//! tip.
|
|
|
|
//! - [`ChainTipChange`]: a read-only channel that can asynchronously await
|
|
|
|
//! chain tip changes.
|
2022-03-11 05:58:22 -08:00
|
|
|
|
2020-09-09 17:13:58 -07:00
|
|
|
use std::{
|
2022-03-31 03:26:21 -07:00
|
|
|
convert,
|
2020-09-09 17:13:58 -07:00
|
|
|
future::Future,
|
|
|
|
pin::Pin,
|
2020-09-09 23:07:47 -07:00
|
|
|
sync::Arc,
|
2020-09-09 17:13:58 -07:00
|
|
|
task::{Context, Poll},
|
state: introduce PreparedBlock, FinalizedBlock
This change introduces two new types:
- `PreparedBlock`, representing a block which has undergone semantic
validation and has been prepared for contextual validation;
- `FinalizedBlock`, representing a block which is ready to be finalized
immediately;
and changes the `Request::CommitBlock`,`Request::CommitFinalizedBlock`
variants to use these types instead of their previous fields.
This change solves the problem of passing data between semantic
validation and contextual validation, and cleans up the state code by
allowing it to pass around a bundle of data. Previously, the state code
just passed around an `Arc<Block>`, which forced it to needlessly
recompute block hashes and other data, and was incompatible with the
already-known but not-yet-implemented data transfer requirements, namely
passing in the Sprout and Sapling anchors computed during contextual
validation.
This commit propagates the `PreparedBlock` and `FinalizedBlock` types
through the state code but only uses their data opportunistically, e.g.,
changing .hash() computations to use the precomputed hash. In the
future, these structures can be extended to pass data through the
verification pipeline for reuse as appropriate. For instance, these
changes allow the sprout and sapling anchors to be propagated through
the state.
2020-11-21 01:16:14 -08:00
|
|
|
time::{Duration, Instant},
|
2020-09-09 17:13:58 -07:00
|
|
|
};
|
2020-09-09 21:15:08 -07:00
|
|
|
|
2022-03-10 12:40:48 -08:00
|
|
|
use futures::future::FutureExt;
|
2022-03-11 05:58:22 -08:00
|
|
|
use tokio::sync::{oneshot, watch};
|
2022-03-10 12:40:48 -08:00
|
|
|
use tower::{util::BoxService, Service};
|
2022-07-25 15:33:00 -07:00
|
|
|
use tracing::{instrument, Instrument, Span};
|
2021-08-26 18:34:33 -07:00
|
|
|
|
|
|
|
#[cfg(any(test, feature = "proptest-impl"))]
|
|
|
|
use tower::buffer::Buffer;
|
|
|
|
|
2020-09-09 23:07:47 -07:00
|
|
|
use zebra_chain::{
|
2022-07-26 13:26:17 -07:00
|
|
|
block::{self, CountedHeader},
|
2022-07-25 15:33:00 -07:00
|
|
|
diagnostic::CodeTimer,
|
2021-06-28 22:03:51 -07:00
|
|
|
parameters::{Network, NetworkUpgrade},
|
2020-11-01 10:49:34 -08:00
|
|
|
transparent,
|
2020-09-09 23:07:47 -07:00
|
|
|
};
|
2020-09-09 17:13:58 -07:00
|
|
|
|
2020-10-09 01:37:24 -07:00
|
|
|
use crate::{
|
2022-03-11 05:58:22 -08:00
|
|
|
service::{
|
|
|
|
chain_tip::{ChainTipBlock, ChainTipChange, ChainTipSender, LatestChainTip},
|
2022-03-11 12:23:32 -08:00
|
|
|
finalized_state::{FinalizedState, ZebraDb},
|
2022-03-11 05:58:22 -08:00
|
|
|
non_finalized_state::{Chain, NonFinalizedState, QueuedBlocks},
|
|
|
|
pending_utxos::PendingUtxos,
|
2022-03-16 17:37:44 -07:00
|
|
|
watch_receiver::WatchReceiver,
|
2022-03-11 05:58:22 -08:00
|
|
|
},
|
2022-03-17 15:59:46 -07:00
|
|
|
BoxError, CloneError, CommitBlockError, Config, FinalizedBlock, PreparedBlock, ReadRequest,
|
|
|
|
ReadResponse, Request, Response, ValidateContextError,
|
2021-08-26 18:34:33 -07:00
|
|
|
};
|
|
|
|
|
2022-03-10 12:40:48 -08:00
|
|
|
pub mod block_iter;
|
2021-08-26 18:34:33 -07:00
|
|
|
pub mod chain_tip;
|
2022-03-16 17:37:44 -07:00
|
|
|
pub mod watch_receiver;
|
2022-03-10 12:40:48 -08:00
|
|
|
|
2021-07-28 21:23:50 -07:00
|
|
|
pub(crate) mod check;
|
2022-03-10 12:40:48 -08:00
|
|
|
|
2020-11-16 16:05:35 -08:00
|
|
|
mod finalized_state;
|
|
|
|
mod non_finalized_state;
|
2020-11-23 12:54:27 -08:00
|
|
|
mod pending_utxos;
|
2022-04-24 20:00:52 -07:00
|
|
|
pub(crate) mod read;
|
2021-07-28 21:23:50 -07:00
|
|
|
|
|
|
|
#[cfg(any(test, feature = "proptest-impl"))]
|
|
|
|
pub mod arbitrary;
|
|
|
|
|
2020-11-13 10:19:47 -08:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests;
|
2020-11-23 12:54:27 -08:00
|
|
|
|
2022-05-02 21:10:21 -07:00
|
|
|
pub use finalized_state::{OutputIndex, OutputLocation, TransactionLocation};
|
2022-04-12 10:21:46 -07:00
|
|
|
|
state: introduce PreparedBlock, FinalizedBlock
This change introduces two new types:
- `PreparedBlock`, representing a block which has undergone semantic
validation and has been prepared for contextual validation;
- `FinalizedBlock`, representing a block which is ready to be finalized
immediately;
and changes the `Request::CommitBlock`,`Request::CommitFinalizedBlock`
variants to use these types instead of their previous fields.
This change solves the problem of passing data between semantic
validation and contextual validation, and cleans up the state code by
allowing it to pass around a bundle of data. Previously, the state code
just passed around an `Arc<Block>`, which forced it to needlessly
recompute block hashes and other data, and was incompatible with the
already-known but not-yet-implemented data transfer requirements, namely
passing in the Sprout and Sapling anchors computed during contextual
validation.
This commit propagates the `PreparedBlock` and `FinalizedBlock` types
through the state code but only uses their data opportunistically, e.g.,
changing .hash() computations to use the precomputed hash. In the
future, these structures can be extended to pass data through the
verification pipeline for reuse as appropriate. For instance, these
changes allow the sprout and sapling anchors to be propagated through
the state.
2020-11-21 01:16:14 -08:00
|
|
|
pub type QueuedBlock = (
|
|
|
|
PreparedBlock,
|
|
|
|
oneshot::Sender<Result<block::Hash, BoxError>>,
|
|
|
|
);
|
|
|
|
pub type QueuedFinalized = (
|
|
|
|
FinalizedBlock,
|
|
|
|
oneshot::Sender<Result<block::Hash, BoxError>>,
|
|
|
|
);
|
2020-09-09 23:07:47 -07:00
|
|
|
|
2022-03-10 12:40:48 -08:00
|
|
|
/// A read-write service for Zebra's cached blockchain state.
|
|
|
|
///
|
|
|
|
/// This service modifies and provides access to:
|
|
|
|
/// - the non-finalized state: the ~100 most recent blocks.
|
|
|
|
/// Zebra allows chain forks in the non-finalized state,
|
|
|
|
/// stores it in memory, and re-downloads it when restarted.
|
|
|
|
/// - the finalized state: older blocks that have many confirmations.
|
|
|
|
/// Zebra stores the single best chain in the finalized state,
|
|
|
|
/// and re-loads it from disk when restarted.
|
2022-03-11 05:58:22 -08:00
|
|
|
///
|
|
|
|
/// Requests to this service are processed in series,
|
|
|
|
/// so read requests wait for all queued write requests to complete,
|
|
|
|
/// then return their answers.
|
|
|
|
///
|
|
|
|
/// This behaviour is implicitly used by Zebra's syncer,
|
2022-03-13 14:48:47 -07:00
|
|
|
/// to delay the next ObtainTips until all queued blocks have been committed.
|
2022-03-11 05:58:22 -08:00
|
|
|
///
|
|
|
|
/// But most state users can ignore any queued blocks, and get faster read responses
|
2022-06-13 18:22:16 -07:00
|
|
|
/// using the [`ReadStateService`].
|
2022-03-11 05:58:22 -08:00
|
|
|
#[derive(Debug)]
|
2021-07-27 17:01:19 -07:00
|
|
|
pub(crate) struct StateService {
|
2022-03-11 05:58:22 -08:00
|
|
|
/// The finalized chain state, including its on-disk database.
|
2021-07-27 17:01:19 -07:00
|
|
|
pub(crate) disk: FinalizedState,
|
2022-03-11 05:58:22 -08:00
|
|
|
|
|
|
|
/// The non-finalized chain state, including its in-memory chain forks.
|
2020-10-07 20:07:32 -07:00
|
|
|
mem: NonFinalizedState,
|
2022-03-11 05:58:22 -08:00
|
|
|
|
|
|
|
/// The configured Zcash network.
|
|
|
|
network: Network,
|
|
|
|
|
2020-10-07 20:07:32 -07:00
|
|
|
/// Blocks awaiting their parent blocks for contextual verification.
|
|
|
|
queued_blocks: QueuedBlocks,
|
2022-03-11 05:58:22 -08:00
|
|
|
|
|
|
|
/// The set of outpoints with pending requests for their associated transparent::Output.
|
2020-11-23 12:54:27 -08:00
|
|
|
pending_utxos: PendingUtxos,
|
2022-03-11 05:58:22 -08:00
|
|
|
|
|
|
|
/// Instant tracking the last time `pending_utxos` was pruned.
|
2020-10-14 14:06:32 -07:00
|
|
|
last_prune: Instant,
|
2022-03-11 05:58:22 -08:00
|
|
|
|
|
|
|
/// A sender channel for the current best chain tip.
|
2021-08-26 18:34:33 -07:00
|
|
|
chain_tip_sender: ChainTipSender,
|
2022-03-11 05:58:22 -08:00
|
|
|
|
|
|
|
/// A sender channel for the current best non-finalized chain.
|
|
|
|
best_chain_sender: watch::Sender<Option<Arc<Chain>>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A read-only service for accessing Zebra's cached blockchain state.
|
|
|
|
///
|
|
|
|
/// This service provides read-only access to:
|
|
|
|
/// - the non-finalized state: the ~100 most recent blocks.
|
|
|
|
/// - the finalized state: older blocks that have many confirmations.
|
|
|
|
///
|
|
|
|
/// Requests to this service are processed in parallel,
|
|
|
|
/// ignoring any blocks queued by the read-write [`StateService`].
|
|
|
|
///
|
|
|
|
/// This quick response behavior is better for most state users.
|
|
|
|
#[allow(dead_code)]
|
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
pub struct ReadStateService {
|
|
|
|
/// The shared inner on-disk database for the finalized state.
|
|
|
|
///
|
2022-03-11 12:23:32 -08:00
|
|
|
/// RocksDB allows reads and writes via a shared reference,
|
|
|
|
/// but [`ZebraDb`] doesn't expose any write methods or types.
|
2022-03-11 05:58:22 -08:00
|
|
|
///
|
|
|
|
/// This chain is updated concurrently with requests,
|
|
|
|
/// so it might include some block data that is also in `best_mem`.
|
2022-03-11 12:23:32 -08:00
|
|
|
db: ZebraDb,
|
2022-03-11 05:58:22 -08:00
|
|
|
|
|
|
|
/// A watch channel for the current best in-memory chain.
|
|
|
|
///
|
|
|
|
/// This chain is only updated between requests,
|
|
|
|
/// so it might include some block data that is also on `disk`.
|
2022-03-16 17:37:44 -07:00
|
|
|
best_chain_receiver: WatchReceiver<Option<Arc<Chain>>>,
|
2022-03-11 05:58:22 -08:00
|
|
|
|
|
|
|
/// The configured Zcash network.
|
|
|
|
network: Network,
|
2020-10-07 20:07:32 -07:00
|
|
|
}
|
|
|
|
|
2020-09-09 17:13:58 -07:00
|
|
|
impl StateService {
|
2020-10-14 14:06:32 -07:00
|
|
|
const PRUNE_INTERVAL: Duration = Duration::from_secs(30);
|
|
|
|
|
2022-03-11 05:58:22 -08:00
|
|
|
/// Create a new read-write state service.
|
|
|
|
/// Returns the read-write and read-only state services,
|
|
|
|
/// and read-only watch channels for its best chain tip.
|
|
|
|
pub fn new(
|
|
|
|
config: Config,
|
|
|
|
network: Network,
|
|
|
|
) -> (Self, ReadStateService, LatestChainTip, ChainTipChange) {
|
2022-07-25 15:33:00 -07:00
|
|
|
let timer = CodeTimer::start();
|
2020-11-17 15:26:21 -08:00
|
|
|
let disk = FinalizedState::new(&config, network);
|
2022-07-25 15:33:00 -07:00
|
|
|
timer.finish(module_path!(), line!(), "opening finalized state database");
|
|
|
|
|
|
|
|
let timer = CodeTimer::start();
|
2021-08-29 19:38:41 -07:00
|
|
|
let initial_tip = disk
|
2022-03-11 12:23:32 -08:00
|
|
|
.db()
|
2021-08-29 19:38:41 -07:00
|
|
|
.tip_block()
|
|
|
|
.map(FinalizedBlock::from)
|
|
|
|
.map(ChainTipBlock::from);
|
2022-07-25 15:33:00 -07:00
|
|
|
timer.finish(module_path!(), line!(), "fetching database tip");
|
|
|
|
|
|
|
|
let timer = CodeTimer::start();
|
2021-09-01 15:31:16 -07:00
|
|
|
let (chain_tip_sender, latest_chain_tip, chain_tip_change) =
|
2021-09-01 19:25:42 -07:00
|
|
|
ChainTipSender::new(initial_tip, network);
|
Reject connections from outdated peers (#2519)
* Simplify state service initialization in test
Use the test helper function to remove redundant code.
* Create `BestTipHeight` helper type
This type abstracts away the calculation of the best tip height based on
the finalized block height and the best non-finalized chain's tip.
* Add `best_tip_height` field to `StateService`
The receiver endpoint is currently ignored.
* Return receiver endpoint from service constructor
Make it available so that the best tip height can be watched.
* Update finalized height after finalizing blocks
After blocks from the queue are finalized and committed to disk, update
the finalized block height.
* Update best non-finalized height after validation
Update the value of the best non-finalized chain tip block height after
a new block is committed to the non-finalized state.
* Update finalized height after loading from disk
When `FinalizedState` is first created, it loads the state from
persistent storage, and the finalized tip height is updated. Therefore,
the `best_tip_height` must be notified of the initial value.
* Update the finalized height on checkpoint commit
When a checkpointed block is commited, it bypasses the non-finalized
state, so there's an extra place where the finalized height has to be
updated.
* Add `best_tip_height` to `Handshake` service
It can be configured using the `Builder::with_best_tip_height`. It's
currently not used, but it will be used to determine if a connection to
a remote peer should be rejected or not based on that peer's protocol
version.
* Require best tip height to init. `zebra_network`
Without it the handshake service can't properly enforce the minimum
network protocol version from peers. Zebrad obtains the best tip height
endpoint from `zebra_state`, and the test vectors simply use a dummy
endpoint that's fixed at the genesis height.
* Pass `best_tip_height` to proto. ver. negotiation
The protocol version negotiation code will reject connections to peers
if they are using an old protocol version. An old version is determined
based on the current known best chain tip height.
* Handle an optional height in `Version`
Fallback to the genesis height in `None` is specified.
* Reject connections to peers on old proto. versions
Avoid connecting to peers that are on protocol versions that don't
recognize a network update.
* Document why peers on old versions are rejected
Describe why it's a security issue above the check.
* Test if `BestTipHeight` starts with `None`
Check if initially there is no best tip height.
* Test if best tip height is max. of latest values
After applying a list of random updates where each one either sets the
finalized height or the non-finalized height, check that the best tip
height is the maximum of the most recently set finalized height and the
most recently set non-finalized height.
* Add `queue_and_commit_finalized` method
A small refactor to make testing easier. The handling of requests for
committing non-finalized and finalized blocks is now more consistent.
* Add `assert_block_can_be_validated` helper
Refactor to move into a separate method some assertions that are done
before a block is validated. This is to allow moving these assertions
more easily to simplify testing.
* Remove redundant PoW block assertion
It's also checked in
`zebra_state::service::check::block_is_contextually_valid`, and it was
getting in the way of tests that received a gossiped block before
finalizing enough blocks.
* Create a test strategy for test vector chain
Splits a chain loaded from the test vectors in two parts, containing the
blocks to finalize and the blocks to keep in the non-finalized state.
* Test committing blocks update best tip height
Create a mock blockchain state, with a chain of finalized blocks and a
chain of non-finalized blocks. Commit all the blocks appropriately, and
verify that the best tip height is updated.
Co-authored-by: teor <teor@riseup.net>
2021-08-08 16:52:52 -07:00
|
|
|
|
2021-07-14 15:23:54 -07:00
|
|
|
let mem = NonFinalizedState::new(network);
|
2022-03-11 05:58:22 -08:00
|
|
|
|
|
|
|
let (read_only_service, best_chain_sender) = ReadStateService::new(&disk);
|
|
|
|
|
2020-10-07 20:07:32 -07:00
|
|
|
let queued_blocks = QueuedBlocks::default();
|
2020-11-23 12:54:27 -08:00
|
|
|
let pending_utxos = PendingUtxos::default();
|
2020-10-07 20:07:32 -07:00
|
|
|
|
2021-06-28 22:03:51 -07:00
|
|
|
let state = Self {
|
2020-11-17 15:26:21 -08:00
|
|
|
disk,
|
2020-10-07 20:07:32 -07:00
|
|
|
mem,
|
|
|
|
queued_blocks,
|
2020-10-14 14:06:32 -07:00
|
|
|
pending_utxos,
|
2020-11-15 21:46:16 -08:00
|
|
|
network,
|
2020-10-14 14:06:32 -07:00
|
|
|
last_prune: Instant::now(),
|
2021-08-26 18:34:33 -07:00
|
|
|
chain_tip_sender,
|
2022-03-11 05:58:22 -08:00
|
|
|
best_chain_sender,
|
2021-06-28 22:03:51 -07:00
|
|
|
};
|
2022-07-25 15:33:00 -07:00
|
|
|
timer.finish(module_path!(), line!(), "initializing state service");
|
2021-06-28 22:03:51 -07:00
|
|
|
|
|
|
|
tracing::info!("starting legacy chain check");
|
2022-07-25 15:33:00 -07:00
|
|
|
let timer = CodeTimer::start();
|
|
|
|
|
2021-06-28 22:03:51 -07:00
|
|
|
if let Some(tip) = state.best_tip() {
|
2022-08-29 13:25:41 -07:00
|
|
|
let nu5_activation_height = NetworkUpgrade::Nu5
|
|
|
|
.activation_height(network)
|
|
|
|
.expect("NU5 activation height is set");
|
|
|
|
|
|
|
|
if let Err(error) = check::legacy_chain(
|
|
|
|
nu5_activation_height,
|
|
|
|
state.any_ancestor_blocks(tip.1),
|
|
|
|
state.network,
|
|
|
|
) {
|
|
|
|
let legacy_db_path = state.disk.path().to_path_buf();
|
|
|
|
panic!(
|
|
|
|
"Cached state contains a legacy chain.\n\
|
|
|
|
An outdated Zebra version did not know about a recent network upgrade,\n\
|
|
|
|
so it followed a legacy chain using outdated consensus branch rules.\n\
|
|
|
|
Hint: Delete your database, and restart Zebra to do a full sync.\n\
|
|
|
|
Database path: {legacy_db_path:?}\n\
|
|
|
|
Error: {error:?}",
|
|
|
|
);
|
2021-06-28 22:03:51 -07:00
|
|
|
}
|
2020-10-07 20:07:32 -07:00
|
|
|
}
|
2022-08-29 13:25:41 -07:00
|
|
|
|
|
|
|
tracing::info!("cached state consensus branch is valid: no legacy chain found");
|
2022-07-25 15:33:00 -07:00
|
|
|
timer.finish(module_path!(), line!(), "legacy chain check");
|
2021-06-28 22:03:51 -07:00
|
|
|
|
2022-03-11 05:58:22 -08:00
|
|
|
(state, read_only_service, latest_chain_tip, chain_tip_change)
|
Reject connections from outdated peers (#2519)
* Simplify state service initialization in test
Use the test helper function to remove redundant code.
* Create `BestTipHeight` helper type
This type abstracts away the calculation of the best tip height based on
the finalized block height and the best non-finalized chain's tip.
* Add `best_tip_height` field to `StateService`
The receiver endpoint is currently ignored.
* Return receiver endpoint from service constructor
Make it available so that the best tip height can be watched.
* Update finalized height after finalizing blocks
After blocks from the queue are finalized and committed to disk, update
the finalized block height.
* Update best non-finalized height after validation
Update the value of the best non-finalized chain tip block height after
a new block is committed to the non-finalized state.
* Update finalized height after loading from disk
When `FinalizedState` is first created, it loads the state from
persistent storage, and the finalized tip height is updated. Therefore,
the `best_tip_height` must be notified of the initial value.
* Update the finalized height on checkpoint commit
When a checkpointed block is commited, it bypasses the non-finalized
state, so there's an extra place where the finalized height has to be
updated.
* Add `best_tip_height` to `Handshake` service
It can be configured using the `Builder::with_best_tip_height`. It's
currently not used, but it will be used to determine if a connection to
a remote peer should be rejected or not based on that peer's protocol
version.
* Require best tip height to init. `zebra_network`
Without it the handshake service can't properly enforce the minimum
network protocol version from peers. Zebrad obtains the best tip height
endpoint from `zebra_state`, and the test vectors simply use a dummy
endpoint that's fixed at the genesis height.
* Pass `best_tip_height` to proto. ver. negotiation
The protocol version negotiation code will reject connections to peers
if they are using an old protocol version. An old version is determined
based on the current known best chain tip height.
* Handle an optional height in `Version`
Fallback to the genesis height in `None` is specified.
* Reject connections to peers on old proto. versions
Avoid connecting to peers that are on protocol versions that don't
recognize a network update.
* Document why peers on old versions are rejected
Describe why it's a security issue above the check.
* Test if `BestTipHeight` starts with `None`
Check if initially there is no best tip height.
* Test if best tip height is max. of latest values
After applying a list of random updates where each one either sets the
finalized height or the non-finalized height, check that the best tip
height is the maximum of the most recently set finalized height and the
most recently set non-finalized height.
* Add `queue_and_commit_finalized` method
A small refactor to make testing easier. The handling of requests for
committing non-finalized and finalized blocks is now more consistent.
* Add `assert_block_can_be_validated` helper
Refactor to move into a separate method some assertions that are done
before a block is validated. This is to allow moving these assertions
more easily to simplify testing.
* Remove redundant PoW block assertion
It's also checked in
`zebra_state::service::check::block_is_contextually_valid`, and it was
getting in the way of tests that received a gossiped block before
finalizing enough blocks.
* Create a test strategy for test vector chain
Splits a chain loaded from the test vectors in two parts, containing the
blocks to finalize and the blocks to keep in the non-finalized state.
* Test committing blocks update best tip height
Create a mock blockchain state, with a chain of finalized blocks and a
chain of non-finalized blocks. Commit all the blocks appropriately, and
verify that the best tip height is updated.
Co-authored-by: teor <teor@riseup.net>
2021-08-08 16:52:52 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Queue a finalized block for verification and storage in the finalized state.
|
|
|
|
fn queue_and_commit_finalized(
|
|
|
|
&mut self,
|
|
|
|
finalized: FinalizedBlock,
|
|
|
|
) -> oneshot::Receiver<Result<block::Hash, BoxError>> {
|
|
|
|
let (rsp_tx, rsp_rx) = oneshot::channel();
|
|
|
|
|
2021-08-29 19:38:41 -07:00
|
|
|
let tip_block = self
|
|
|
|
.disk
|
|
|
|
.queue_and_commit_finalized((finalized, rsp_tx))
|
|
|
|
.map(ChainTipBlock::from);
|
|
|
|
self.chain_tip_sender.set_finalized_tip(tip_block);
|
Reject connections from outdated peers (#2519)
* Simplify state service initialization in test
Use the test helper function to remove redundant code.
* Create `BestTipHeight` helper type
This type abstracts away the calculation of the best tip height based on
the finalized block height and the best non-finalized chain's tip.
* Add `best_tip_height` field to `StateService`
The receiver endpoint is currently ignored.
* Return receiver endpoint from service constructor
Make it available so that the best tip height can be watched.
* Update finalized height after finalizing blocks
After blocks from the queue are finalized and committed to disk, update
the finalized block height.
* Update best non-finalized height after validation
Update the value of the best non-finalized chain tip block height after
a new block is committed to the non-finalized state.
* Update finalized height after loading from disk
When `FinalizedState` is first created, it loads the state from
persistent storage, and the finalized tip height is updated. Therefore,
the `best_tip_height` must be notified of the initial value.
* Update the finalized height on checkpoint commit
When a checkpointed block is commited, it bypasses the non-finalized
state, so there's an extra place where the finalized height has to be
updated.
* Add `best_tip_height` to `Handshake` service
It can be configured using the `Builder::with_best_tip_height`. It's
currently not used, but it will be used to determine if a connection to
a remote peer should be rejected or not based on that peer's protocol
version.
* Require best tip height to init. `zebra_network`
Without it the handshake service can't properly enforce the minimum
network protocol version from peers. Zebrad obtains the best tip height
endpoint from `zebra_state`, and the test vectors simply use a dummy
endpoint that's fixed at the genesis height.
* Pass `best_tip_height` to proto. ver. negotiation
The protocol version negotiation code will reject connections to peers
if they are using an old protocol version. An old version is determined
based on the current known best chain tip height.
* Handle an optional height in `Version`
Fallback to the genesis height in `None` is specified.
* Reject connections to peers on old proto. versions
Avoid connecting to peers that are on protocol versions that don't
recognize a network update.
* Document why peers on old versions are rejected
Describe why it's a security issue above the check.
* Test if `BestTipHeight` starts with `None`
Check if initially there is no best tip height.
* Test if best tip height is max. of latest values
After applying a list of random updates where each one either sets the
finalized height or the non-finalized height, check that the best tip
height is the maximum of the most recently set finalized height and the
most recently set non-finalized height.
* Add `queue_and_commit_finalized` method
A small refactor to make testing easier. The handling of requests for
committing non-finalized and finalized blocks is now more consistent.
* Add `assert_block_can_be_validated` helper
Refactor to move into a separate method some assertions that are done
before a block is validated. This is to allow moving these assertions
more easily to simplify testing.
* Remove redundant PoW block assertion
It's also checked in
`zebra_state::service::check::block_is_contextually_valid`, and it was
getting in the way of tests that received a gossiped block before
finalizing enough blocks.
* Create a test strategy for test vector chain
Splits a chain loaded from the test vectors in two parts, containing the
blocks to finalize and the blocks to keep in the non-finalized state.
* Test committing blocks update best tip height
Create a mock blockchain state, with a chain of finalized blocks and a
chain of non-finalized blocks. Commit all the blocks appropriately, and
verify that the best tip height is updated.
Co-authored-by: teor <teor@riseup.net>
2021-08-08 16:52:52 -07:00
|
|
|
|
|
|
|
rsp_rx
|
2020-10-07 20:07:32 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Queue a non finalized block for verification and check if any queued
|
|
|
|
/// blocks are ready to be verified and committed to the state.
|
|
|
|
///
|
|
|
|
/// This function encodes the logic for [committing non-finalized blocks][1]
|
|
|
|
/// in RFC0005.
|
|
|
|
///
|
|
|
|
/// [1]: https://zebra.zfnd.org/dev/rfcs/0005-state-updates.html#committing-non-finalized-blocks
|
2020-11-21 10:41:53 -08:00
|
|
|
#[instrument(level = "debug", skip(self, prepared))]
|
2020-11-20 19:52:44 -08:00
|
|
|
fn queue_and_commit_non_finalized(
|
2020-10-26 13:54:19 -07:00
|
|
|
&mut self,
|
state: introduce PreparedBlock, FinalizedBlock
This change introduces two new types:
- `PreparedBlock`, representing a block which has undergone semantic
validation and has been prepared for contextual validation;
- `FinalizedBlock`, representing a block which is ready to be finalized
immediately;
and changes the `Request::CommitBlock`,`Request::CommitFinalizedBlock`
variants to use these types instead of their previous fields.
This change solves the problem of passing data between semantic
validation and contextual validation, and cleans up the state code by
allowing it to pass around a bundle of data. Previously, the state code
just passed around an `Arc<Block>`, which forced it to needlessly
recompute block hashes and other data, and was incompatible with the
already-known but not-yet-implemented data transfer requirements, namely
passing in the Sprout and Sapling anchors computed during contextual
validation.
This commit propagates the `PreparedBlock` and `FinalizedBlock` types
through the state code but only uses their data opportunistically, e.g.,
changing .hash() computations to use the precomputed hash. In the
future, these structures can be extended to pass data through the
verification pipeline for reuse as appropriate. For instance, these
changes allow the sprout and sapling anchors to be propagated through
the state.
2020-11-21 01:16:14 -08:00
|
|
|
prepared: PreparedBlock,
|
2020-10-26 13:54:19 -07:00
|
|
|
) -> oneshot::Receiver<Result<block::Hash, BoxError>> {
|
2020-11-21 10:41:53 -08:00
|
|
|
tracing::debug!(block = %prepared.block, "queueing block for contextual verification");
|
state: introduce PreparedBlock, FinalizedBlock
This change introduces two new types:
- `PreparedBlock`, representing a block which has undergone semantic
validation and has been prepared for contextual validation;
- `FinalizedBlock`, representing a block which is ready to be finalized
immediately;
and changes the `Request::CommitBlock`,`Request::CommitFinalizedBlock`
variants to use these types instead of their previous fields.
This change solves the problem of passing data between semantic
validation and contextual validation, and cleans up the state code by
allowing it to pass around a bundle of data. Previously, the state code
just passed around an `Arc<Block>`, which forced it to needlessly
recompute block hashes and other data, and was incompatible with the
already-known but not-yet-implemented data transfer requirements, namely
passing in the Sprout and Sapling anchors computed during contextual
validation.
This commit propagates the `PreparedBlock` and `FinalizedBlock` types
through the state code but only uses their data opportunistically, e.g.,
changing .hash() computations to use the precomputed hash. In the
future, these structures can be extended to pass data through the
verification pipeline for reuse as appropriate. For instance, these
changes allow the sprout and sapling anchors to be propagated through
the state.
2020-11-21 01:16:14 -08:00
|
|
|
let parent_hash = prepared.block.header.previous_block_hash;
|
2020-10-07 20:07:32 -07:00
|
|
|
|
2022-03-11 12:23:32 -08:00
|
|
|
if self.mem.any_chain_contains(&prepared.hash)
|
|
|
|
|| self.disk.db().hash(prepared.height).is_some()
|
state: introduce PreparedBlock, FinalizedBlock
This change introduces two new types:
- `PreparedBlock`, representing a block which has undergone semantic
validation and has been prepared for contextual validation;
- `FinalizedBlock`, representing a block which is ready to be finalized
immediately;
and changes the `Request::CommitBlock`,`Request::CommitFinalizedBlock`
variants to use these types instead of their previous fields.
This change solves the problem of passing data between semantic
validation and contextual validation, and cleans up the state code by
allowing it to pass around a bundle of data. Previously, the state code
just passed around an `Arc<Block>`, which forced it to needlessly
recompute block hashes and other data, and was incompatible with the
already-known but not-yet-implemented data transfer requirements, namely
passing in the Sprout and Sapling anchors computed during contextual
validation.
This commit propagates the `PreparedBlock` and `FinalizedBlock` types
through the state code but only uses their data opportunistically, e.g.,
changing .hash() computations to use the precomputed hash. In the
future, these structures can be extended to pass data through the
verification pipeline for reuse as appropriate. For instance, these
changes allow the sprout and sapling anchors to be propagated through
the state.
2020-11-21 01:16:14 -08:00
|
|
|
{
|
2020-10-26 13:54:19 -07:00
|
|
|
let (rsp_tx, rsp_rx) = oneshot::channel();
|
2020-11-12 11:43:17 -08:00
|
|
|
let _ = rsp_tx.send(Err("block is already committed to the state".into()));
|
2020-10-26 13:54:19 -07:00
|
|
|
return rsp_rx;
|
|
|
|
}
|
|
|
|
|
2020-11-12 11:43:17 -08:00
|
|
|
// Request::CommitBlock contract: a request to commit a block which has
|
|
|
|
// been queued but not yet committed to the state fails the older
|
|
|
|
// request and replaces it with the newer request.
|
state: introduce PreparedBlock, FinalizedBlock
This change introduces two new types:
- `PreparedBlock`, representing a block which has undergone semantic
validation and has been prepared for contextual validation;
- `FinalizedBlock`, representing a block which is ready to be finalized
immediately;
and changes the `Request::CommitBlock`,`Request::CommitFinalizedBlock`
variants to use these types instead of their previous fields.
This change solves the problem of passing data between semantic
validation and contextual validation, and cleans up the state code by
allowing it to pass around a bundle of data. Previously, the state code
just passed around an `Arc<Block>`, which forced it to needlessly
recompute block hashes and other data, and was incompatible with the
already-known but not-yet-implemented data transfer requirements, namely
passing in the Sprout and Sapling anchors computed during contextual
validation.
This commit propagates the `PreparedBlock` and `FinalizedBlock` types
through the state code but only uses their data opportunistically, e.g.,
changing .hash() computations to use the precomputed hash. In the
future, these structures can be extended to pass data through the
verification pipeline for reuse as appropriate. For instance, these
changes allow the sprout and sapling anchors to be propagated through
the state.
2020-11-21 01:16:14 -08:00
|
|
|
let rsp_rx = if let Some((_, old_rsp_tx)) = self.queued_blocks.get_mut(&prepared.hash) {
|
2020-11-20 19:58:07 -08:00
|
|
|
tracing::debug!("replacing older queued request with new request");
|
2020-10-26 13:54:19 -07:00
|
|
|
let (mut rsp_tx, rsp_rx) = oneshot::channel();
|
state: introduce PreparedBlock, FinalizedBlock
This change introduces two new types:
- `PreparedBlock`, representing a block which has undergone semantic
validation and has been prepared for contextual validation;
- `FinalizedBlock`, representing a block which is ready to be finalized
immediately;
and changes the `Request::CommitBlock`,`Request::CommitFinalizedBlock`
variants to use these types instead of their previous fields.
This change solves the problem of passing data between semantic
validation and contextual validation, and cleans up the state code by
allowing it to pass around a bundle of data. Previously, the state code
just passed around an `Arc<Block>`, which forced it to needlessly
recompute block hashes and other data, and was incompatible with the
already-known but not-yet-implemented data transfer requirements, namely
passing in the Sprout and Sapling anchors computed during contextual
validation.
This commit propagates the `PreparedBlock` and `FinalizedBlock` types
through the state code but only uses their data opportunistically, e.g.,
changing .hash() computations to use the precomputed hash. In the
future, these structures can be extended to pass data through the
verification pipeline for reuse as appropriate. For instance, these
changes allow the sprout and sapling anchors to be propagated through
the state.
2020-11-21 01:16:14 -08:00
|
|
|
std::mem::swap(old_rsp_tx, &mut rsp_tx);
|
2020-11-12 11:43:17 -08:00
|
|
|
let _ = rsp_tx.send(Err("replaced by newer request".into()));
|
2020-10-26 13:54:19 -07:00
|
|
|
rsp_rx
|
|
|
|
} else {
|
|
|
|
let (rsp_tx, rsp_rx) = oneshot::channel();
|
state: introduce PreparedBlock, FinalizedBlock
This change introduces two new types:
- `PreparedBlock`, representing a block which has undergone semantic
validation and has been prepared for contextual validation;
- `FinalizedBlock`, representing a block which is ready to be finalized
immediately;
and changes the `Request::CommitBlock`,`Request::CommitFinalizedBlock`
variants to use these types instead of their previous fields.
This change solves the problem of passing data between semantic
validation and contextual validation, and cleans up the state code by
allowing it to pass around a bundle of data. Previously, the state code
just passed around an `Arc<Block>`, which forced it to needlessly
recompute block hashes and other data, and was incompatible with the
already-known but not-yet-implemented data transfer requirements, namely
passing in the Sprout and Sapling anchors computed during contextual
validation.
This commit propagates the `PreparedBlock` and `FinalizedBlock` types
through the state code but only uses their data opportunistically, e.g.,
changing .hash() computations to use the precomputed hash. In the
future, these structures can be extended to pass data through the
verification pipeline for reuse as appropriate. For instance, these
changes allow the sprout and sapling anchors to be propagated through
the state.
2020-11-21 01:16:14 -08:00
|
|
|
self.queued_blocks.queue((prepared, rsp_tx));
|
2020-10-26 13:54:19 -07:00
|
|
|
rsp_rx
|
|
|
|
};
|
2020-10-07 20:07:32 -07:00
|
|
|
|
|
|
|
if !self.can_fork_chain_at(&parent_hash) {
|
2020-11-20 19:58:07 -08:00
|
|
|
tracing::trace!("unready to verify, returning early");
|
2020-10-26 13:54:19 -07:00
|
|
|
return rsp_rx;
|
2020-10-07 20:07:32 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
self.process_queued(parent_hash);
|
|
|
|
|
|
|
|
while self.mem.best_chain_len() > crate::constants::MAX_BLOCK_REORG_HEIGHT {
|
2020-11-20 19:58:07 -08:00
|
|
|
tracing::trace!("finalizing block past the reorg limit");
|
2022-09-06 02:32:54 -07:00
|
|
|
let finalized_with_trees = self.mem.finalize();
|
2020-11-17 15:26:21 -08:00
|
|
|
self.disk
|
2022-09-06 02:32:54 -07:00
|
|
|
.commit_finalized_direct(finalized_with_trees, "best non-finalized chain root")
|
2021-08-05 06:02:37 -07:00
|
|
|
.expect(
|
|
|
|
"expected that errors would not occur when writing to disk or updating note commitment and history trees",
|
|
|
|
);
|
2020-10-07 20:07:32 -07:00
|
|
|
}
|
|
|
|
|
2022-03-11 12:23:32 -08:00
|
|
|
let finalized_tip_height = self.disk.db().finalized_tip_height().expect(
|
2020-10-07 20:07:32 -07:00
|
|
|
"Finalized state must have at least one block before committing non-finalized state",
|
Reject connections from outdated peers (#2519)
* Simplify state service initialization in test
Use the test helper function to remove redundant code.
* Create `BestTipHeight` helper type
This type abstracts away the calculation of the best tip height based on
the finalized block height and the best non-finalized chain's tip.
* Add `best_tip_height` field to `StateService`
The receiver endpoint is currently ignored.
* Return receiver endpoint from service constructor
Make it available so that the best tip height can be watched.
* Update finalized height after finalizing blocks
After blocks from the queue are finalized and committed to disk, update
the finalized block height.
* Update best non-finalized height after validation
Update the value of the best non-finalized chain tip block height after
a new block is committed to the non-finalized state.
* Update finalized height after loading from disk
When `FinalizedState` is first created, it loads the state from
persistent storage, and the finalized tip height is updated. Therefore,
the `best_tip_height` must be notified of the initial value.
* Update the finalized height on checkpoint commit
When a checkpointed block is commited, it bypasses the non-finalized
state, so there's an extra place where the finalized height has to be
updated.
* Add `best_tip_height` to `Handshake` service
It can be configured using the `Builder::with_best_tip_height`. It's
currently not used, but it will be used to determine if a connection to
a remote peer should be rejected or not based on that peer's protocol
version.
* Require best tip height to init. `zebra_network`
Without it the handshake service can't properly enforce the minimum
network protocol version from peers. Zebrad obtains the best tip height
endpoint from `zebra_state`, and the test vectors simply use a dummy
endpoint that's fixed at the genesis height.
* Pass `best_tip_height` to proto. ver. negotiation
The protocol version negotiation code will reject connections to peers
if they are using an old protocol version. An old version is determined
based on the current known best chain tip height.
* Handle an optional height in `Version`
Fallback to the genesis height in `None` is specified.
* Reject connections to peers on old proto. versions
Avoid connecting to peers that are on protocol versions that don't
recognize a network update.
* Document why peers on old versions are rejected
Describe why it's a security issue above the check.
* Test if `BestTipHeight` starts with `None`
Check if initially there is no best tip height.
* Test if best tip height is max. of latest values
After applying a list of random updates where each one either sets the
finalized height or the non-finalized height, check that the best tip
height is the maximum of the most recently set finalized height and the
most recently set non-finalized height.
* Add `queue_and_commit_finalized` method
A small refactor to make testing easier. The handling of requests for
committing non-finalized and finalized blocks is now more consistent.
* Add `assert_block_can_be_validated` helper
Refactor to move into a separate method some assertions that are done
before a block is validated. This is to allow moving these assertions
more easily to simplify testing.
* Remove redundant PoW block assertion
It's also checked in
`zebra_state::service::check::block_is_contextually_valid`, and it was
getting in the way of tests that received a gossiped block before
finalizing enough blocks.
* Create a test strategy for test vector chain
Splits a chain loaded from the test vectors in two parts, containing the
blocks to finalize and the blocks to keep in the non-finalized state.
* Test committing blocks update best tip height
Create a mock blockchain state, with a chain of finalized blocks and a
chain of non-finalized blocks. Commit all the blocks appropriately, and
verify that the best tip height is updated.
Co-authored-by: teor <teor@riseup.net>
2021-08-08 16:52:52 -07:00
|
|
|
);
|
|
|
|
self.queued_blocks.prune_by_height(finalized_tip_height);
|
|
|
|
|
2022-03-16 17:37:44 -07:00
|
|
|
let tip_block_height = self.update_latest_chain_channels();
|
2021-10-07 06:42:38 -07:00
|
|
|
|
|
|
|
// update metrics using the best non-finalized tip
|
2022-03-16 17:37:44 -07:00
|
|
|
if let Some(tip_block_height) = tip_block_height {
|
2021-10-07 06:42:38 -07:00
|
|
|
metrics::gauge!(
|
|
|
|
"state.full_verifier.committed.block.height",
|
2022-05-31 20:53:51 -07:00
|
|
|
tip_block_height.0 as f64,
|
2021-10-07 06:42:38 -07:00
|
|
|
);
|
|
|
|
|
|
|
|
// This height gauge is updated for both fully verified and checkpoint blocks.
|
|
|
|
// These updates can't conflict, because the state makes sure that blocks
|
|
|
|
// are committed in order.
|
2022-05-31 20:53:51 -07:00
|
|
|
metrics::gauge!(
|
|
|
|
"zcash.chain.verified.block.height",
|
|
|
|
tip_block_height.0 as f64,
|
|
|
|
);
|
2021-10-07 06:42:38 -07:00
|
|
|
}
|
|
|
|
|
2022-03-16 17:37:44 -07:00
|
|
|
tracing::trace!("finished processing queued block");
|
|
|
|
rsp_rx
|
|
|
|
}
|
|
|
|
|
2022-06-13 18:22:16 -07:00
|
|
|
/// Update the [`LatestChainTip`], [`ChainTipChange`], and `best_chain_sender`
|
|
|
|
/// channels with the latest non-finalized [`ChainTipBlock`] and
|
|
|
|
/// [`Chain`][1].
|
2022-03-16 17:37:44 -07:00
|
|
|
///
|
2022-06-13 18:22:16 -07:00
|
|
|
/// Returns the latest non-finalized chain tip height, or `None` if the
|
|
|
|
/// non-finalized state is empty.
|
|
|
|
///
|
|
|
|
/// [1]: non_finalized_state::Chain
|
2022-03-16 17:37:44 -07:00
|
|
|
#[instrument(level = "debug", skip(self))]
|
|
|
|
fn update_latest_chain_channels(&mut self) -> Option<block::Height> {
|
|
|
|
let best_chain = self.mem.best_chain();
|
|
|
|
let tip_block = best_chain
|
|
|
|
.and_then(|chain| chain.tip_block())
|
|
|
|
.cloned()
|
|
|
|
.map(ChainTipBlock::from);
|
|
|
|
let tip_block_height = tip_block.as_ref().map(|block| block.height);
|
2022-03-11 05:58:22 -08:00
|
|
|
|
2022-03-16 17:37:44 -07:00
|
|
|
// The RPC service uses the ReadStateService, but it is not turned on by default.
|
2022-03-11 05:58:22 -08:00
|
|
|
if self.best_chain_sender.receiver_count() > 0 {
|
|
|
|
// If the final receiver was just dropped, ignore the error.
|
|
|
|
let _ = self.best_chain_sender.send(best_chain.cloned());
|
|
|
|
}
|
|
|
|
|
2021-08-29 19:38:41 -07:00
|
|
|
self.chain_tip_sender.set_best_non_finalized_tip(tip_block);
|
2020-10-26 13:54:19 -07:00
|
|
|
|
2022-03-16 17:37:44 -07:00
|
|
|
tip_block_height
|
2020-10-07 20:07:32 -07:00
|
|
|
}
|
|
|
|
|
state: introduce PreparedBlock, FinalizedBlock
This change introduces two new types:
- `PreparedBlock`, representing a block which has undergone semantic
validation and has been prepared for contextual validation;
- `FinalizedBlock`, representing a block which is ready to be finalized
immediately;
and changes the `Request::CommitBlock`,`Request::CommitFinalizedBlock`
variants to use these types instead of their previous fields.
This change solves the problem of passing data between semantic
validation and contextual validation, and cleans up the state code by
allowing it to pass around a bundle of data. Previously, the state code
just passed around an `Arc<Block>`, which forced it to needlessly
recompute block hashes and other data, and was incompatible with the
already-known but not-yet-implemented data transfer requirements, namely
passing in the Sprout and Sapling anchors computed during contextual
validation.
This commit propagates the `PreparedBlock` and `FinalizedBlock` types
through the state code but only uses their data opportunistically, e.g.,
changing .hash() computations to use the precomputed hash. In the
future, these structures can be extended to pass data through the
verification pipeline for reuse as appropriate. For instance, these
changes allow the sprout and sapling anchors to be propagated through
the state.
2020-11-21 01:16:14 -08:00
|
|
|
/// Run contextual validation on the prepared block and add it to the
|
|
|
|
/// non-finalized state if it is contextually valid.
|
2021-11-30 08:05:35 -08:00
|
|
|
#[tracing::instrument(level = "debug", skip(self, prepared))]
|
state: introduce PreparedBlock, FinalizedBlock
This change introduces two new types:
- `PreparedBlock`, representing a block which has undergone semantic
validation and has been prepared for contextual validation;
- `FinalizedBlock`, representing a block which is ready to be finalized
immediately;
and changes the `Request::CommitBlock`,`Request::CommitFinalizedBlock`
variants to use these types instead of their previous fields.
This change solves the problem of passing data between semantic
validation and contextual validation, and cleans up the state code by
allowing it to pass around a bundle of data. Previously, the state code
just passed around an `Arc<Block>`, which forced it to needlessly
recompute block hashes and other data, and was incompatible with the
already-known but not-yet-implemented data transfer requirements, namely
passing in the Sprout and Sapling anchors computed during contextual
validation.
This commit propagates the `PreparedBlock` and `FinalizedBlock` types
through the state code but only uses their data opportunistically, e.g.,
changing .hash() computations to use the precomputed hash. In the
future, these structures can be extended to pass data through the
verification pipeline for reuse as appropriate. For instance, these
changes allow the sprout and sapling anchors to be propagated through
the state.
2020-11-21 01:16:14 -08:00
|
|
|
fn validate_and_commit(&mut self, prepared: PreparedBlock) -> Result<(), CommitBlockError> {
|
|
|
|
self.check_contextual_validity(&prepared)?;
|
|
|
|
let parent_hash = prepared.block.header.previous_block_hash;
|
2020-10-07 20:07:32 -07:00
|
|
|
|
2022-03-11 12:23:32 -08:00
|
|
|
if self.disk.db().finalized_tip_hash() == parent_hash {
|
|
|
|
self.mem.commit_new_chain(prepared, self.disk.db())?;
|
2020-10-07 20:07:32 -07:00
|
|
|
} else {
|
2022-03-11 12:23:32 -08:00
|
|
|
self.mem.commit_block(prepared, self.disk.db())?;
|
2020-10-07 20:07:32 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns `true` if `hash` is a valid previous block hash for new non-finalized blocks.
|
|
|
|
fn can_fork_chain_at(&self, hash: &block::Hash) -> bool {
|
2022-03-11 12:23:32 -08:00
|
|
|
self.mem.any_chain_contains(hash) || &self.disk.db().finalized_tip_hash() == hash
|
2020-10-07 20:07:32 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Attempt to validate and commit all queued blocks whose parents have
|
|
|
|
/// recently arrived starting from `new_parent`, in breadth-first ordering.
|
2021-11-30 08:05:35 -08:00
|
|
|
#[tracing::instrument(level = "debug", skip(self, new_parent))]
|
2020-10-07 20:07:32 -07:00
|
|
|
fn process_queued(&mut self, new_parent: block::Hash) {
|
2021-07-13 16:12:46 -07:00
|
|
|
let mut new_parents: Vec<(block::Hash, Result<(), CloneError>)> =
|
|
|
|
vec![(new_parent, Ok(()))];
|
2020-10-07 20:07:32 -07:00
|
|
|
|
2021-07-13 16:12:46 -07:00
|
|
|
while let Some((parent_hash, parent_result)) = new_parents.pop() {
|
2020-11-20 19:54:57 -08:00
|
|
|
let queued_children = self.queued_blocks.dequeue_children(parent_hash);
|
2020-10-07 20:07:32 -07:00
|
|
|
|
state: introduce PreparedBlock, FinalizedBlock
This change introduces two new types:
- `PreparedBlock`, representing a block which has undergone semantic
validation and has been prepared for contextual validation;
- `FinalizedBlock`, representing a block which is ready to be finalized
immediately;
and changes the `Request::CommitBlock`,`Request::CommitFinalizedBlock`
variants to use these types instead of their previous fields.
This change solves the problem of passing data between semantic
validation and contextual validation, and cleans up the state code by
allowing it to pass around a bundle of data. Previously, the state code
just passed around an `Arc<Block>`, which forced it to needlessly
recompute block hashes and other data, and was incompatible with the
already-known but not-yet-implemented data transfer requirements, namely
passing in the Sprout and Sapling anchors computed during contextual
validation.
This commit propagates the `PreparedBlock` and `FinalizedBlock` types
through the state code but only uses their data opportunistically, e.g.,
changing .hash() computations to use the precomputed hash. In the
future, these structures can be extended to pass data through the
verification pipeline for reuse as appropriate. For instance, these
changes allow the sprout and sapling anchors to be propagated through
the state.
2020-11-21 01:16:14 -08:00
|
|
|
for (child, rsp_tx) in queued_children {
|
2020-11-22 19:38:25 -08:00
|
|
|
let child_hash = child.hash;
|
2021-07-13 16:12:46 -07:00
|
|
|
let result;
|
|
|
|
|
|
|
|
// If the block is invalid, reject any descendant blocks.
|
|
|
|
//
|
|
|
|
// At this point, we know that the block and all its descendants
|
|
|
|
// are invalid, because we checked all the consensus rules before
|
|
|
|
// committing the block to the non-finalized state.
|
|
|
|
// (These checks also bind the transaction data to the block
|
|
|
|
// header, using the transaction merkle tree and authorizing data
|
|
|
|
// commitment.)
|
|
|
|
if let Err(ref parent_error) = parent_result {
|
|
|
|
tracing::trace!(
|
|
|
|
?child_hash,
|
|
|
|
?parent_error,
|
|
|
|
"rejecting queued child due to parent error"
|
|
|
|
);
|
|
|
|
result = Err(parent_error.clone());
|
|
|
|
} else {
|
|
|
|
tracing::trace!(?child_hash, "validating queued child");
|
|
|
|
result = self.validate_and_commit(child).map_err(CloneError::from);
|
2021-10-07 06:42:38 -07:00
|
|
|
if result.is_ok() {
|
|
|
|
// Update the metrics if semantic and contextual validation passes
|
|
|
|
metrics::counter!("state.full_verifier.committed.block.count", 1);
|
|
|
|
metrics::counter!("zcash.chain.verified.block.total", 1);
|
|
|
|
}
|
2021-07-13 16:12:46 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
let _ = rsp_tx.send(result.clone().map(|()| child_hash).map_err(BoxError::from));
|
|
|
|
new_parents.push((child_hash, result));
|
2020-10-07 20:07:32 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
state: introduce PreparedBlock, FinalizedBlock
This change introduces two new types:
- `PreparedBlock`, representing a block which has undergone semantic
validation and has been prepared for contextual validation;
- `FinalizedBlock`, representing a block which is ready to be finalized
immediately;
and changes the `Request::CommitBlock`,`Request::CommitFinalizedBlock`
variants to use these types instead of their previous fields.
This change solves the problem of passing data between semantic
validation and contextual validation, and cleans up the state code by
allowing it to pass around a bundle of data. Previously, the state code
just passed around an `Arc<Block>`, which forced it to needlessly
recompute block hashes and other data, and was incompatible with the
already-known but not-yet-implemented data transfer requirements, namely
passing in the Sprout and Sapling anchors computed during contextual
validation.
This commit propagates the `PreparedBlock` and `FinalizedBlock` types
through the state code but only uses their data opportunistically, e.g.,
changing .hash() computations to use the precomputed hash. In the
future, these structures can be extended to pass data through the
verification pipeline for reuse as appropriate. For instance, these
changes allow the sprout and sapling anchors to be propagated through
the state.
2020-11-21 01:16:14 -08:00
|
|
|
/// Check that the prepared block is contextually valid for the configured
|
|
|
|
/// network, based on the committed finalized and non-finalized state.
|
2021-07-19 06:52:32 -07:00
|
|
|
///
|
|
|
|
/// Note: some additional contextual validity checks are performed by the
|
|
|
|
/// non-finalized [`Chain`].
|
state: introduce PreparedBlock, FinalizedBlock
This change introduces two new types:
- `PreparedBlock`, representing a block which has undergone semantic
validation and has been prepared for contextual validation;
- `FinalizedBlock`, representing a block which is ready to be finalized
immediately;
and changes the `Request::CommitBlock`,`Request::CommitFinalizedBlock`
variants to use these types instead of their previous fields.
This change solves the problem of passing data between semantic
validation and contextual validation, and cleans up the state code by
allowing it to pass around a bundle of data. Previously, the state code
just passed around an `Arc<Block>`, which forced it to needlessly
recompute block hashes and other data, and was incompatible with the
already-known but not-yet-implemented data transfer requirements, namely
passing in the Sprout and Sapling anchors computed during contextual
validation.
This commit propagates the `PreparedBlock` and `FinalizedBlock` types
through the state code but only uses their data opportunistically, e.g.,
changing .hash() computations to use the precomputed hash. In the
future, these structures can be extended to pass data through the
verification pipeline for reuse as appropriate. For instance, these
changes allow the sprout and sapling anchors to be propagated through
the state.
2020-11-21 01:16:14 -08:00
|
|
|
fn check_contextual_validity(
|
|
|
|
&mut self,
|
|
|
|
prepared: &PreparedBlock,
|
|
|
|
) -> Result<(), ValidateContextError> {
|
2020-12-10 16:23:26 -08:00
|
|
|
let relevant_chain = self.any_ancestor_blocks(prepared.block.header.previous_block_hash);
|
2020-11-22 17:56:38 -08:00
|
|
|
|
2021-07-14 05:06:43 -07:00
|
|
|
// Security: check proof of work before any other checks
|
2021-08-17 07:49:27 -07:00
|
|
|
check::block_is_valid_for_recent_chain(
|
state: introduce PreparedBlock, FinalizedBlock
This change introduces two new types:
- `PreparedBlock`, representing a block which has undergone semantic
validation and has been prepared for contextual validation;
- `FinalizedBlock`, representing a block which is ready to be finalized
immediately;
and changes the `Request::CommitBlock`,`Request::CommitFinalizedBlock`
variants to use these types instead of their previous fields.
This change solves the problem of passing data between semantic
validation and contextual validation, and cleans up the state code by
allowing it to pass around a bundle of data. Previously, the state code
just passed around an `Arc<Block>`, which forced it to needlessly
recompute block hashes and other data, and was incompatible with the
already-known but not-yet-implemented data transfer requirements, namely
passing in the Sprout and Sapling anchors computed during contextual
validation.
This commit propagates the `PreparedBlock` and `FinalizedBlock` types
through the state code but only uses their data opportunistically, e.g.,
changing .hash() computations to use the precomputed hash. In the
future, these structures can be extended to pass data through the
verification pipeline for reuse as appropriate. For instance, these
changes allow the sprout and sapling anchors to be propagated through
the state.
2020-11-21 01:16:14 -08:00
|
|
|
prepared,
|
2020-11-16 16:55:24 -08:00
|
|
|
self.network,
|
2022-03-11 12:23:32 -08:00
|
|
|
self.disk.db().finalized_tip_height(),
|
2020-11-22 17:56:38 -08:00
|
|
|
relevant_chain,
|
2020-11-16 16:55:24 -08:00
|
|
|
)?;
|
2020-11-15 21:46:16 -08:00
|
|
|
|
2022-03-11 12:23:32 -08:00
|
|
|
check::nullifier::no_duplicates_in_finalized_chain(prepared, self.disk.db())?;
|
2021-07-14 05:06:43 -07:00
|
|
|
|
2020-10-07 20:07:32 -07:00
|
|
|
Ok(())
|
2020-09-09 17:13:58 -07:00
|
|
|
}
|
2020-11-01 10:49:34 -08:00
|
|
|
|
|
|
|
/// Create a block locator for the current best chain.
|
|
|
|
fn block_locator(&self) -> Option<Vec<block::Hash>> {
|
2020-12-10 16:23:26 -08:00
|
|
|
let tip_height = self.best_tip()?.0;
|
2020-11-01 10:49:34 -08:00
|
|
|
|
|
|
|
let heights = crate::util::block_locator_heights(tip_height);
|
|
|
|
let mut hashes = Vec::with_capacity(heights.len());
|
|
|
|
|
|
|
|
for height in heights {
|
2020-11-30 13:30:37 -08:00
|
|
|
if let Some(hash) = self.best_hash(height) {
|
2020-11-01 10:49:34 -08:00
|
|
|
hashes.push(hash);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Some(hashes)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Return the tip of the current best chain.
|
2020-12-10 16:23:26 -08:00
|
|
|
pub fn best_tip(&self) -> Option<(block::Height, block::Hash)> {
|
2022-03-11 12:23:32 -08:00
|
|
|
self.mem.best_tip().or_else(|| self.disk.db().tip())
|
2020-11-01 10:49:34 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Return the depth of block `hash` in the current best chain.
|
2020-12-10 16:23:26 -08:00
|
|
|
pub fn best_depth(&self, hash: block::Hash) -> Option<u32> {
|
|
|
|
let tip = self.best_tip()?.0;
|
2020-11-30 13:30:37 -08:00
|
|
|
let height = self
|
|
|
|
.mem
|
|
|
|
.best_height_by_hash(hash)
|
2022-03-11 12:23:32 -08:00
|
|
|
.or_else(|| self.disk.db().height(hash))?;
|
2020-11-01 10:49:34 -08:00
|
|
|
|
|
|
|
Some(tip.0 - height.0)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Return the hash for the block at `height` in the current best chain.
|
2020-11-30 13:30:37 -08:00
|
|
|
pub fn best_hash(&self, height: block::Height) -> Option<block::Hash> {
|
|
|
|
self.mem
|
|
|
|
.best_hash(height)
|
2022-03-11 12:23:32 -08:00
|
|
|
.or_else(|| self.disk.db().hash(height))
|
2020-11-30 13:30:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Return true if `hash` is in the current best chain.
|
2022-07-26 13:26:17 -07:00
|
|
|
#[allow(dead_code)]
|
2020-11-30 13:30:37 -08:00
|
|
|
pub fn best_chain_contains(&self, hash: block::Hash) -> bool {
|
2022-07-26 13:26:17 -07:00
|
|
|
read::chain_contains_hash(self.mem.best_chain(), self.disk.db(), hash)
|
2020-11-30 13:30:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Return the height for the block at `hash`, if `hash` is in the best chain.
|
2022-07-26 13:26:17 -07:00
|
|
|
#[allow(dead_code)]
|
2020-11-30 13:30:37 -08:00
|
|
|
pub fn best_height_by_hash(&self, hash: block::Hash) -> Option<block::Height> {
|
2022-07-26 13:26:17 -07:00
|
|
|
read::height_by_hash(self.mem.best_chain(), self.disk.db(), hash)
|
2020-11-01 10:49:34 -08:00
|
|
|
}
|
|
|
|
|
2020-11-15 18:22:53 -08:00
|
|
|
/// Return the height for the block at `hash` in any chain.
|
2020-11-30 13:30:37 -08:00
|
|
|
pub fn any_height_by_hash(&self, hash: block::Hash) -> Option<block::Height> {
|
2020-11-15 18:22:53 -08:00
|
|
|
self.mem
|
2020-11-30 13:30:37 -08:00
|
|
|
.any_height_by_hash(hash)
|
2022-03-11 12:23:32 -08:00
|
|
|
.or_else(|| self.disk.db().height(hash))
|
2020-11-15 18:22:53 -08:00
|
|
|
}
|
|
|
|
|
2022-06-13 18:22:16 -07:00
|
|
|
/// Return the [`transparent::Utxo`] pointed to by `outpoint`, if it exists
|
|
|
|
/// in any chain, or in any pending block.
|
2022-04-08 15:42:05 -07:00
|
|
|
///
|
|
|
|
/// Some of the returned UTXOs may be invalid, because:
|
|
|
|
/// - they are not in the best chain, or
|
|
|
|
/// - their block fails contextual validation.
|
2021-07-11 19:49:33 -07:00
|
|
|
pub fn any_utxo(&self, outpoint: &transparent::OutPoint) -> Option<transparent::Utxo> {
|
2022-07-21 16:15:22 -07:00
|
|
|
// We ignore any UTXOs in FinalizedState.queued_by_prev_hash,
|
|
|
|
// because it is only used during checkpoint verification.
|
2020-11-20 22:50:09 -08:00
|
|
|
self.mem
|
2020-12-10 16:23:26 -08:00
|
|
|
.any_utxo(outpoint)
|
2020-11-20 22:50:09 -08:00
|
|
|
.or_else(|| self.queued_blocks.utxo(outpoint))
|
2022-04-12 10:21:46 -07:00
|
|
|
.or_else(|| {
|
|
|
|
self.disk
|
|
|
|
.db()
|
|
|
|
.utxo(outpoint)
|
|
|
|
.map(|ordered_utxo| ordered_utxo.utxo)
|
|
|
|
})
|
2020-11-01 10:49:34 -08:00
|
|
|
}
|
2020-11-15 18:22:53 -08:00
|
|
|
|
|
|
|
/// Return an iterator over the relevant chain of the block identified by
|
2021-07-01 16:21:22 -07:00
|
|
|
/// `hash`, in order from the largest height to the genesis block.
|
2020-11-15 18:22:53 -08:00
|
|
|
///
|
|
|
|
/// The block identified by `hash` is included in the chain of blocks yielded
|
2020-12-10 16:23:26 -08:00
|
|
|
/// by the iterator. `hash` can come from any chain.
|
2022-03-10 12:40:48 -08:00
|
|
|
pub fn any_ancestor_blocks(&self, hash: block::Hash) -> block_iter::Iter<'_> {
|
|
|
|
block_iter::Iter {
|
2020-11-15 18:22:53 -08:00
|
|
|
service: self,
|
2022-03-10 12:40:48 -08:00
|
|
|
state: block_iter::IterState::NonFinalized(hash),
|
2020-11-15 18:22:53 -08:00
|
|
|
}
|
|
|
|
}
|
2020-11-30 13:30:37 -08:00
|
|
|
|
Reject connections from outdated peers (#2519)
* Simplify state service initialization in test
Use the test helper function to remove redundant code.
* Create `BestTipHeight` helper type
This type abstracts away the calculation of the best tip height based on
the finalized block height and the best non-finalized chain's tip.
* Add `best_tip_height` field to `StateService`
The receiver endpoint is currently ignored.
* Return receiver endpoint from service constructor
Make it available so that the best tip height can be watched.
* Update finalized height after finalizing blocks
After blocks from the queue are finalized and committed to disk, update
the finalized block height.
* Update best non-finalized height after validation
Update the value of the best non-finalized chain tip block height after
a new block is committed to the non-finalized state.
* Update finalized height after loading from disk
When `FinalizedState` is first created, it loads the state from
persistent storage, and the finalized tip height is updated. Therefore,
the `best_tip_height` must be notified of the initial value.
* Update the finalized height on checkpoint commit
When a checkpointed block is commited, it bypasses the non-finalized
state, so there's an extra place where the finalized height has to be
updated.
* Add `best_tip_height` to `Handshake` service
It can be configured using the `Builder::with_best_tip_height`. It's
currently not used, but it will be used to determine if a connection to
a remote peer should be rejected or not based on that peer's protocol
version.
* Require best tip height to init. `zebra_network`
Without it the handshake service can't properly enforce the minimum
network protocol version from peers. Zebrad obtains the best tip height
endpoint from `zebra_state`, and the test vectors simply use a dummy
endpoint that's fixed at the genesis height.
* Pass `best_tip_height` to proto. ver. negotiation
The protocol version negotiation code will reject connections to peers
if they are using an old protocol version. An old version is determined
based on the current known best chain tip height.
* Handle an optional height in `Version`
Fallback to the genesis height in `None` is specified.
* Reject connections to peers on old proto. versions
Avoid connecting to peers that are on protocol versions that don't
recognize a network update.
* Document why peers on old versions are rejected
Describe why it's a security issue above the check.
* Test if `BestTipHeight` starts with `None`
Check if initially there is no best tip height.
* Test if best tip height is max. of latest values
After applying a list of random updates where each one either sets the
finalized height or the non-finalized height, check that the best tip
height is the maximum of the most recently set finalized height and the
most recently set non-finalized height.
* Add `queue_and_commit_finalized` method
A small refactor to make testing easier. The handling of requests for
committing non-finalized and finalized blocks is now more consistent.
* Add `assert_block_can_be_validated` helper
Refactor to move into a separate method some assertions that are done
before a block is validated. This is to allow moving these assertions
more easily to simplify testing.
* Remove redundant PoW block assertion
It's also checked in
`zebra_state::service::check::block_is_contextually_valid`, and it was
getting in the way of tests that received a gossiped block before
finalizing enough blocks.
* Create a test strategy for test vector chain
Splits a chain loaded from the test vectors in two parts, containing the
blocks to finalize and the blocks to keep in the non-finalized state.
* Test committing blocks update best tip height
Create a mock blockchain state, with a chain of finalized blocks and a
chain of non-finalized blocks. Commit all the blocks appropriately, and
verify that the best tip height is updated.
Co-authored-by: teor <teor@riseup.net>
2021-08-08 16:52:52 -07:00
|
|
|
/// Assert some assumptions about the prepared `block` before it is validated.
|
|
|
|
fn assert_block_can_be_validated(&self, block: &PreparedBlock) {
|
|
|
|
// required by validate_and_commit, moved here to make testing easier
|
|
|
|
assert!(
|
|
|
|
block.height > self.network.mandatory_checkpoint_height(),
|
|
|
|
"invalid non-finalized block height: the canopy checkpoint is mandatory, pre-canopy \
|
|
|
|
blocks, and the canopy activation block, must be committed to the state as finalized \
|
|
|
|
blocks"
|
|
|
|
);
|
|
|
|
}
|
2020-11-15 18:22:53 -08:00
|
|
|
}
|
|
|
|
|
2022-03-11 05:58:22 -08:00
|
|
|
impl ReadStateService {
|
|
|
|
/// Creates a new read-only state service, using the provided finalized state.
|
|
|
|
///
|
|
|
|
/// Returns the newly created service,
|
|
|
|
/// and a watch channel for updating its best non-finalized chain.
|
|
|
|
pub(crate) fn new(disk: &FinalizedState) -> (Self, watch::Sender<Option<Arc<Chain>>>) {
|
|
|
|
let (best_chain_sender, best_chain_receiver) = watch::channel(None);
|
|
|
|
|
|
|
|
let read_only_service = Self {
|
2022-03-11 12:23:32 -08:00
|
|
|
db: disk.db().clone(),
|
2022-03-16 17:37:44 -07:00
|
|
|
best_chain_receiver: WatchReceiver::new(best_chain_receiver),
|
2022-03-11 05:58:22 -08:00
|
|
|
network: disk.network(),
|
|
|
|
};
|
|
|
|
|
|
|
|
tracing::info!("created new read-only state service");
|
|
|
|
|
|
|
|
(read_only_service, best_chain_sender)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-09 17:13:58 -07:00
|
|
|
impl Service<Request> for StateService {
|
|
|
|
type Response = Response;
|
|
|
|
type Error = BoxError;
|
|
|
|
type Future =
|
|
|
|
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
|
|
|
|
|
|
|
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
2020-10-14 14:06:32 -07:00
|
|
|
let now = Instant::now();
|
|
|
|
|
|
|
|
if self.last_prune + Self::PRUNE_INTERVAL < now {
|
2020-12-10 16:23:26 -08:00
|
|
|
let tip = self.best_tip();
|
2020-11-25 23:26:10 -08:00
|
|
|
let old_len = self.pending_utxos.len();
|
|
|
|
|
2020-10-14 14:06:32 -07:00
|
|
|
self.pending_utxos.prune();
|
|
|
|
self.last_prune = now;
|
2020-11-25 23:26:10 -08:00
|
|
|
|
|
|
|
let new_len = self.pending_utxos.len();
|
|
|
|
let prune_count = old_len
|
|
|
|
.checked_sub(new_len)
|
|
|
|
.expect("prune does not add any utxo requests");
|
|
|
|
if prune_count > 0 {
|
2022-01-28 14:12:19 -08:00
|
|
|
tracing::debug!(
|
2020-11-25 23:26:10 -08:00
|
|
|
?old_len,
|
|
|
|
?new_len,
|
|
|
|
?prune_count,
|
|
|
|
?tip,
|
|
|
|
"pruned utxo requests"
|
|
|
|
);
|
|
|
|
} else {
|
|
|
|
tracing::debug!(len = ?old_len, ?tip, "no utxo requests needed pruning");
|
|
|
|
}
|
2020-10-14 14:06:32 -07:00
|
|
|
}
|
|
|
|
|
2020-09-09 17:13:58 -07:00
|
|
|
Poll::Ready(Ok(()))
|
|
|
|
}
|
|
|
|
|
2020-11-20 15:12:30 -08:00
|
|
|
#[instrument(name = "state", skip(self, req))]
|
2020-09-09 17:13:58 -07:00
|
|
|
fn call(&mut self, req: Request) -> Self::Future {
|
|
|
|
match req {
|
state: introduce PreparedBlock, FinalizedBlock
This change introduces two new types:
- `PreparedBlock`, representing a block which has undergone semantic
validation and has been prepared for contextual validation;
- `FinalizedBlock`, representing a block which is ready to be finalized
immediately;
and changes the `Request::CommitBlock`,`Request::CommitFinalizedBlock`
variants to use these types instead of their previous fields.
This change solves the problem of passing data between semantic
validation and contextual validation, and cleans up the state code by
allowing it to pass around a bundle of data. Previously, the state code
just passed around an `Arc<Block>`, which forced it to needlessly
recompute block hashes and other data, and was incompatible with the
already-known but not-yet-implemented data transfer requirements, namely
passing in the Sprout and Sapling anchors computed during contextual
validation.
This commit propagates the `PreparedBlock` and `FinalizedBlock` types
through the state code but only uses their data opportunistically, e.g.,
changing .hash() computations to use the precomputed hash. In the
future, these structures can be extended to pass data through the
verification pipeline for reuse as appropriate. For instance, these
changes allow the sprout and sapling anchors to be propagated through
the state.
2020-11-21 01:16:14 -08:00
|
|
|
Request::CommitBlock(prepared) => {
|
2022-03-17 11:48:13 -07:00
|
|
|
metrics::counter!(
|
|
|
|
"state.requests",
|
|
|
|
1,
|
|
|
|
"service" => "state",
|
|
|
|
"type" => "commit_block",
|
|
|
|
);
|
2020-11-20 13:27:57 -08:00
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
let timer = CodeTimer::start();
|
|
|
|
|
Reject connections from outdated peers (#2519)
* Simplify state service initialization in test
Use the test helper function to remove redundant code.
* Create `BestTipHeight` helper type
This type abstracts away the calculation of the best tip height based on
the finalized block height and the best non-finalized chain's tip.
* Add `best_tip_height` field to `StateService`
The receiver endpoint is currently ignored.
* Return receiver endpoint from service constructor
Make it available so that the best tip height can be watched.
* Update finalized height after finalizing blocks
After blocks from the queue are finalized and committed to disk, update
the finalized block height.
* Update best non-finalized height after validation
Update the value of the best non-finalized chain tip block height after
a new block is committed to the non-finalized state.
* Update finalized height after loading from disk
When `FinalizedState` is first created, it loads the state from
persistent storage, and the finalized tip height is updated. Therefore,
the `best_tip_height` must be notified of the initial value.
* Update the finalized height on checkpoint commit
When a checkpointed block is commited, it bypasses the non-finalized
state, so there's an extra place where the finalized height has to be
updated.
* Add `best_tip_height` to `Handshake` service
It can be configured using the `Builder::with_best_tip_height`. It's
currently not used, but it will be used to determine if a connection to
a remote peer should be rejected or not based on that peer's protocol
version.
* Require best tip height to init. `zebra_network`
Without it the handshake service can't properly enforce the minimum
network protocol version from peers. Zebrad obtains the best tip height
endpoint from `zebra_state`, and the test vectors simply use a dummy
endpoint that's fixed at the genesis height.
* Pass `best_tip_height` to proto. ver. negotiation
The protocol version negotiation code will reject connections to peers
if they are using an old protocol version. An old version is determined
based on the current known best chain tip height.
* Handle an optional height in `Version`
Fallback to the genesis height in `None` is specified.
* Reject connections to peers on old proto. versions
Avoid connecting to peers that are on protocol versions that don't
recognize a network update.
* Document why peers on old versions are rejected
Describe why it's a security issue above the check.
* Test if `BestTipHeight` starts with `None`
Check if initially there is no best tip height.
* Test if best tip height is max. of latest values
After applying a list of random updates where each one either sets the
finalized height or the non-finalized height, check that the best tip
height is the maximum of the most recently set finalized height and the
most recently set non-finalized height.
* Add `queue_and_commit_finalized` method
A small refactor to make testing easier. The handling of requests for
committing non-finalized and finalized blocks is now more consistent.
* Add `assert_block_can_be_validated` helper
Refactor to move into a separate method some assertions that are done
before a block is validated. This is to allow moving these assertions
more easily to simplify testing.
* Remove redundant PoW block assertion
It's also checked in
`zebra_state::service::check::block_is_contextually_valid`, and it was
getting in the way of tests that received a gossiped block before
finalizing enough blocks.
* Create a test strategy for test vector chain
Splits a chain loaded from the test vectors in two parts, containing the
blocks to finalize and the blocks to keep in the non-finalized state.
* Test committing blocks update best tip height
Create a mock blockchain state, with a chain of finalized blocks and a
chain of non-finalized blocks. Commit all the blocks appropriately, and
verify that the best tip height is updated.
Co-authored-by: teor <teor@riseup.net>
2021-08-08 16:52:52 -07:00
|
|
|
self.assert_block_can_be_validated(&prepared);
|
|
|
|
|
2021-07-19 06:52:32 -07:00
|
|
|
self.pending_utxos
|
|
|
|
.check_against_ordered(&prepared.new_outputs);
|
2022-07-21 16:16:41 -07:00
|
|
|
|
|
|
|
// # Performance
|
|
|
|
//
|
|
|
|
// Allow other async tasks to make progress while blocks are being verified
|
|
|
|
// and written to disk. But wait for the blocks to finish committing,
|
|
|
|
// so that `StateService` multi-block queries always observe a consistent state.
|
|
|
|
//
|
|
|
|
// Since each block is spawned into its own task,
|
|
|
|
// there shouldn't be any other code running in the same task,
|
|
|
|
// so we don't need to worry about blocking it:
|
2022-07-25 15:33:00 -07:00
|
|
|
// https://docs.rs/tokio/latest/tokio/task/fn.block_in_place.html
|
|
|
|
let span = Span::current();
|
|
|
|
let rsp_rx = tokio::task::block_in_place(move || {
|
|
|
|
span.in_scope(|| self.queue_and_commit_non_finalized(prepared))
|
|
|
|
});
|
2020-10-07 20:07:32 -07:00
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
// The work is all done, the future just waits on a channel for the result
|
|
|
|
timer.finish(module_path!(), line!(), "CommitBlock");
|
|
|
|
|
|
|
|
let span = Span::current();
|
2020-10-07 20:07:32 -07:00
|
|
|
async move {
|
|
|
|
rsp_rx
|
|
|
|
.await
|
2022-03-31 03:26:21 -07:00
|
|
|
.map_err(|_recv_error| {
|
|
|
|
BoxError::from("block was dropped from the state CommitBlock queue")
|
|
|
|
})
|
|
|
|
// TODO: replace with Result::flatten once it stabilises
|
|
|
|
// https://github.com/rust-lang/rust/issues/70142
|
|
|
|
.and_then(convert::identity)
|
2020-10-07 20:07:32 -07:00
|
|
|
.map(Response::Committed)
|
2020-10-09 01:37:24 -07:00
|
|
|
.map_err(Into::into)
|
2020-10-07 20:07:32 -07:00
|
|
|
}
|
2022-07-25 15:33:00 -07:00
|
|
|
.instrument(span)
|
2020-10-07 20:07:32 -07:00
|
|
|
.boxed()
|
|
|
|
}
|
state: introduce PreparedBlock, FinalizedBlock
This change introduces two new types:
- `PreparedBlock`, representing a block which has undergone semantic
validation and has been prepared for contextual validation;
- `FinalizedBlock`, representing a block which is ready to be finalized
immediately;
and changes the `Request::CommitBlock`,`Request::CommitFinalizedBlock`
variants to use these types instead of their previous fields.
This change solves the problem of passing data between semantic
validation and contextual validation, and cleans up the state code by
allowing it to pass around a bundle of data. Previously, the state code
just passed around an `Arc<Block>`, which forced it to needlessly
recompute block hashes and other data, and was incompatible with the
already-known but not-yet-implemented data transfer requirements, namely
passing in the Sprout and Sapling anchors computed during contextual
validation.
This commit propagates the `PreparedBlock` and `FinalizedBlock` types
through the state code but only uses their data opportunistically, e.g.,
changing .hash() computations to use the precomputed hash. In the
future, these structures can be extended to pass data through the
verification pipeline for reuse as appropriate. For instance, these
changes allow the sprout and sapling anchors to be propagated through
the state.
2020-11-21 01:16:14 -08:00
|
|
|
Request::CommitFinalizedBlock(finalized) => {
|
2022-03-17 11:48:13 -07:00
|
|
|
metrics::counter!(
|
|
|
|
"state.requests",
|
|
|
|
1,
|
|
|
|
"service" => "state",
|
|
|
|
"type" => "commit_finalized_block",
|
|
|
|
);
|
2020-11-20 13:27:57 -08:00
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
let timer = CodeTimer::start();
|
|
|
|
|
2022-09-07 16:42:52 -07:00
|
|
|
// # Consensus
|
|
|
|
//
|
|
|
|
// A non-finalized block verification could have called AwaitUtxo
|
|
|
|
// before this finalized block arrived in the state.
|
|
|
|
// So we need to check for pending UTXOs here for non-finalized blocks,
|
|
|
|
// even though it is redundant for most finalized blocks.
|
|
|
|
// (Finalized blocks are verified using block hash checkpoints
|
|
|
|
// and transaction merkle tree block header commitments.)
|
2020-11-23 12:02:57 -08:00
|
|
|
self.pending_utxos.check_against(&finalized.new_outputs);
|
2022-07-21 16:16:41 -07:00
|
|
|
|
|
|
|
// # Performance
|
|
|
|
//
|
|
|
|
// Allow other async tasks to make progress while blocks are being verified
|
2022-07-22 09:25:32 -07:00
|
|
|
// and written to disk.
|
2022-07-21 16:16:41 -07:00
|
|
|
//
|
|
|
|
// See the note in `CommitBlock` for more details.
|
2022-07-25 15:33:00 -07:00
|
|
|
let span = Span::current();
|
|
|
|
let rsp_rx = tokio::task::block_in_place(move || {
|
|
|
|
span.in_scope(|| self.queue_and_commit_finalized(finalized))
|
|
|
|
});
|
|
|
|
|
|
|
|
// The work is all done, the future just waits on a channel for the result
|
|
|
|
timer.finish(module_path!(), line!(), "CommitFinalizedBlock");
|
2020-09-09 21:15:08 -07:00
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
let span = Span::current();
|
2020-09-09 23:07:47 -07:00
|
|
|
async move {
|
|
|
|
rsp_rx
|
|
|
|
.await
|
2022-03-31 03:26:21 -07:00
|
|
|
.map_err(|_recv_error| {
|
|
|
|
BoxError::from(
|
|
|
|
"block was dropped from the state CommitFinalizedBlock queue",
|
|
|
|
)
|
|
|
|
})
|
|
|
|
// TODO: replace with Result::flatten once it stabilises
|
|
|
|
// https://github.com/rust-lang/rust/issues/70142
|
|
|
|
.and_then(convert::identity)
|
2020-09-10 10:52:51 -07:00
|
|
|
.map(Response::Committed)
|
2020-10-09 01:37:24 -07:00
|
|
|
.map_err(Into::into)
|
2020-09-09 23:07:47 -07:00
|
|
|
}
|
2022-07-25 15:33:00 -07:00
|
|
|
.instrument(span)
|
2020-09-09 23:07:47 -07:00
|
|
|
.boxed()
|
2020-09-09 21:15:08 -07:00
|
|
|
}
|
|
|
|
Request::Depth(hash) => {
|
2022-03-17 11:48:13 -07:00
|
|
|
metrics::counter!(
|
|
|
|
"state.requests",
|
|
|
|
1,
|
|
|
|
"service" => "state",
|
|
|
|
"type" => "depth",
|
|
|
|
);
|
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
let timer = CodeTimer::start();
|
|
|
|
|
|
|
|
// TODO: move this work into the future, like Block and Transaction?
|
2022-09-05 11:58:45 -07:00
|
|
|
// move disk reads to a blocking thread (#2188)
|
2022-07-22 09:25:32 -07:00
|
|
|
let rsp = Ok(Response::Depth(self.best_depth(hash)));
|
2022-07-25 15:33:00 -07:00
|
|
|
|
|
|
|
// The work is all done, the future just returns the result.
|
|
|
|
timer.finish(module_path!(), line!(), "Depth");
|
|
|
|
|
2020-10-24 17:09:50 -07:00
|
|
|
async move { rsp }.boxed()
|
2020-09-09 21:15:08 -07:00
|
|
|
}
|
2022-07-22 09:25:32 -07:00
|
|
|
// TODO: consider spawning small reads into blocking tasks,
|
|
|
|
// because the database can do large cleanups during small reads.
|
2020-09-09 21:15:08 -07:00
|
|
|
Request::Tip => {
|
2022-03-17 11:48:13 -07:00
|
|
|
metrics::counter!(
|
|
|
|
"state.requests",
|
|
|
|
1,
|
|
|
|
"service" => "state",
|
|
|
|
"type" => "tip",
|
|
|
|
);
|
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
let timer = CodeTimer::start();
|
|
|
|
|
|
|
|
// TODO: move this work into the future, like Block and Transaction?
|
2022-09-05 11:58:45 -07:00
|
|
|
// move disk reads to a blocking thread (#2188)
|
2022-07-22 09:25:32 -07:00
|
|
|
let rsp = Ok(Response::Tip(self.best_tip()));
|
2022-07-25 15:33:00 -07:00
|
|
|
|
|
|
|
// The work is all done, the future just returns the result.
|
|
|
|
timer.finish(module_path!(), line!(), "Tip");
|
|
|
|
|
2020-10-24 17:09:50 -07:00
|
|
|
async move { rsp }.boxed()
|
2020-09-09 21:15:08 -07:00
|
|
|
}
|
|
|
|
Request::BlockLocator => {
|
2022-03-17 11:48:13 -07:00
|
|
|
metrics::counter!(
|
|
|
|
"state.requests",
|
|
|
|
1,
|
|
|
|
"service" => "state",
|
|
|
|
"type" => "block_locator",
|
|
|
|
);
|
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
let timer = CodeTimer::start();
|
|
|
|
|
|
|
|
// TODO: move this work into the future, like Block and Transaction?
|
2022-09-05 11:58:45 -07:00
|
|
|
// move disk reads to a blocking thread (#2188)
|
2022-07-22 09:25:32 -07:00
|
|
|
let rsp = Ok(Response::BlockLocator(
|
|
|
|
self.block_locator().unwrap_or_default(),
|
|
|
|
));
|
2022-07-25 15:33:00 -07:00
|
|
|
|
|
|
|
// The work is all done, the future just returns the result.
|
|
|
|
timer.finish(module_path!(), line!(), "BlockLocator");
|
|
|
|
|
2020-11-01 10:49:34 -08:00
|
|
|
async move { rsp }.boxed()
|
|
|
|
}
|
|
|
|
Request::Transaction(hash) => {
|
2022-03-17 11:48:13 -07:00
|
|
|
metrics::counter!(
|
|
|
|
"state.requests",
|
|
|
|
1,
|
|
|
|
"service" => "state",
|
|
|
|
"type" => "transaction",
|
|
|
|
);
|
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
let timer = CodeTimer::start();
|
|
|
|
|
2022-07-22 09:25:32 -07:00
|
|
|
// Prepare data for concurrent execution
|
|
|
|
let best_chain = self.mem.best_chain().cloned();
|
|
|
|
let db = self.disk.db().clone();
|
|
|
|
|
|
|
|
// # Performance
|
|
|
|
//
|
|
|
|
// Allow other async tasks to make progress while the transaction is being read from disk.
|
2022-07-25 15:33:00 -07:00
|
|
|
let span = Span::current();
|
2022-07-22 09:25:32 -07:00
|
|
|
tokio::task::spawn_blocking(move || {
|
2022-07-25 15:33:00 -07:00
|
|
|
span.in_scope(|| {
|
|
|
|
let rsp = read::transaction(best_chain, &db, hash);
|
|
|
|
|
|
|
|
// The work is done in the future.
|
|
|
|
timer.finish(module_path!(), line!(), "Transaction");
|
2022-07-22 09:25:32 -07:00
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
Ok(Response::Transaction(rsp.map(|(tx, _height)| tx)))
|
|
|
|
})
|
2022-07-22 09:25:32 -07:00
|
|
|
})
|
|
|
|
.map(|join_result| join_result.expect("panic in Request::Transaction"))
|
|
|
|
.boxed()
|
2020-09-09 21:15:08 -07:00
|
|
|
}
|
2020-09-10 10:19:45 -07:00
|
|
|
Request::Block(hash_or_height) => {
|
2022-03-17 11:48:13 -07:00
|
|
|
metrics::counter!(
|
|
|
|
"state.requests",
|
|
|
|
1,
|
|
|
|
"service" => "state",
|
|
|
|
"type" => "block",
|
|
|
|
);
|
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
let timer = CodeTimer::start();
|
|
|
|
|
2022-07-22 09:25:32 -07:00
|
|
|
// Prepare data for concurrent execution
|
|
|
|
let best_chain = self.mem.best_chain().cloned();
|
|
|
|
let db = self.disk.db().clone();
|
|
|
|
|
|
|
|
// # Performance
|
|
|
|
//
|
|
|
|
// Allow other async tasks to make progress while the block is being read from disk.
|
2022-07-25 15:33:00 -07:00
|
|
|
let span = Span::current();
|
2022-07-22 09:25:32 -07:00
|
|
|
tokio::task::spawn_blocking(move || {
|
2022-07-25 15:33:00 -07:00
|
|
|
span.in_scope(move || {
|
|
|
|
let rsp = read::block(best_chain, &db, hash_or_height);
|
2022-07-22 09:25:32 -07:00
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
// The work is done in the future.
|
|
|
|
timer.finish(module_path!(), line!(), "Block");
|
|
|
|
|
|
|
|
Ok(Response::Block(rsp))
|
|
|
|
})
|
2022-07-22 09:25:32 -07:00
|
|
|
})
|
|
|
|
.map(|join_result| join_result.expect("panic in Request::Block"))
|
|
|
|
.boxed()
|
2020-09-10 10:19:45 -07:00
|
|
|
}
|
2020-10-14 14:06:32 -07:00
|
|
|
Request::AwaitUtxo(outpoint) => {
|
2022-03-17 11:48:13 -07:00
|
|
|
metrics::counter!(
|
|
|
|
"state.requests",
|
|
|
|
1,
|
|
|
|
"service" => "state",
|
|
|
|
"type" => "await_utxo",
|
|
|
|
);
|
2020-11-20 13:27:57 -08:00
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
let timer = CodeTimer::start();
|
|
|
|
let span = Span::current();
|
|
|
|
|
2020-10-14 14:06:32 -07:00
|
|
|
let fut = self.pending_utxos.queue(outpoint);
|
|
|
|
|
2022-09-05 11:58:45 -07:00
|
|
|
// TODO: move disk reads (in `any_utxo()`) to a blocking thread (#2188)
|
2020-12-10 16:23:26 -08:00
|
|
|
if let Some(utxo) = self.any_utxo(&outpoint) {
|
state: introduce PreparedBlock, FinalizedBlock
This change introduces two new types:
- `PreparedBlock`, representing a block which has undergone semantic
validation and has been prepared for contextual validation;
- `FinalizedBlock`, representing a block which is ready to be finalized
immediately;
and changes the `Request::CommitBlock`,`Request::CommitFinalizedBlock`
variants to use these types instead of their previous fields.
This change solves the problem of passing data between semantic
validation and contextual validation, and cleans up the state code by
allowing it to pass around a bundle of data. Previously, the state code
just passed around an `Arc<Block>`, which forced it to needlessly
recompute block hashes and other data, and was incompatible with the
already-known but not-yet-implemented data transfer requirements, namely
passing in the Sprout and Sapling anchors computed during contextual
validation.
This commit propagates the `PreparedBlock` and `FinalizedBlock` types
through the state code but only uses their data opportunistically, e.g.,
changing .hash() computations to use the precomputed hash. In the
future, these structures can be extended to pass data through the
verification pipeline for reuse as appropriate. For instance, these
changes allow the sprout and sapling anchors to be propagated through
the state.
2020-11-21 01:16:14 -08:00
|
|
|
self.pending_utxos.respond(&outpoint, utxo);
|
2020-10-14 14:06:32 -07:00
|
|
|
}
|
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
// The future waits on a channel for a response.
|
|
|
|
timer.finish(module_path!(), line!(), "AwaitUtxo");
|
|
|
|
|
|
|
|
fut.instrument(span).boxed()
|
2020-10-14 14:06:32 -07:00
|
|
|
}
|
2020-11-30 13:30:37 -08:00
|
|
|
Request::FindBlockHashes { known_blocks, stop } => {
|
2022-03-17 11:48:13 -07:00
|
|
|
metrics::counter!(
|
|
|
|
"state.requests",
|
|
|
|
1,
|
|
|
|
"service" => "state",
|
|
|
|
"type" => "find_block_hashes",
|
|
|
|
);
|
|
|
|
|
2022-07-26 13:21:15 -07:00
|
|
|
const MAX_FIND_BLOCK_HASHES_RESULTS: u32 = 500;
|
2022-07-25 15:33:00 -07:00
|
|
|
|
|
|
|
let timer = CodeTimer::start();
|
|
|
|
|
2022-07-26 13:26:17 -07:00
|
|
|
// Prepare data for concurrent execution
|
|
|
|
let best_chain = self.mem.best_chain().cloned();
|
|
|
|
let db = self.disk.db().clone();
|
2022-07-25 15:33:00 -07:00
|
|
|
|
2022-07-26 13:26:17 -07:00
|
|
|
// # Performance
|
|
|
|
//
|
|
|
|
// Allow other async tasks to make progress while the block is being read from disk.
|
|
|
|
let span = Span::current();
|
|
|
|
tokio::task::spawn_blocking(move || {
|
|
|
|
span.in_scope(move || {
|
|
|
|
let res = read::find_chain_hashes(
|
|
|
|
best_chain,
|
|
|
|
&db,
|
|
|
|
known_blocks,
|
|
|
|
stop,
|
|
|
|
MAX_FIND_BLOCK_HASHES_RESULTS,
|
|
|
|
);
|
|
|
|
|
|
|
|
// The work is done in the future.
|
|
|
|
timer.finish(module_path!(), line!(), "FindBlockHashes");
|
2022-07-25 15:33:00 -07:00
|
|
|
|
2022-07-26 13:26:17 -07:00
|
|
|
Ok(Response::BlockHashes(res))
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.map(|join_result| join_result.expect("panic in Request::Block"))
|
|
|
|
.boxed()
|
2020-11-30 13:30:37 -08:00
|
|
|
}
|
|
|
|
Request::FindBlockHeaders { known_blocks, stop } => {
|
2022-03-17 11:48:13 -07:00
|
|
|
metrics::counter!(
|
|
|
|
"state.requests",
|
|
|
|
1,
|
|
|
|
"service" => "state",
|
|
|
|
"type" => "find_block_headers",
|
|
|
|
);
|
|
|
|
|
2022-07-22 09:25:32 -07:00
|
|
|
// Before we spawn the future, get a consistent set of chain hashes from the state.
|
|
|
|
|
2022-07-26 13:21:15 -07:00
|
|
|
const MAX_FIND_BLOCK_HEADERS_RESULTS: u32 = 160;
|
state: dodge a bug in zcashd
Zcashd will blindly request more block headers as long as it got 160
block headers in response to a previous query, EVEN IF THOSE HEADERS ARE
ALREADY KNOWN. To dodge this behavior, return slightly fewer than the
maximum, to get it to go away.
https://github.com/zcash/zcash/blob/0ccc885371e01d844ebeced7babe45826623d9c2/src/main.cpp#L6274-L6280
Without this change, communication between a partially-synced `zebrad`
and fully-synced `zcashd` looked like this:
1. `zebrad` connects to `zcashd`, which sends an initial `getheaders`
request;
2. `zebrad` correctly computes the intersection of the provided block
locator with the node's current chain and returns 160 following
headers;
3. `zcashd` does not check whether it already has those headers and
assumes that any provided headers are new and re-validates them;
4. `zcashd` assumes that because `zebrad` responded with 160 headers,
the `zebrad` node is ahead of it, and requests the next 160 headers.
5. Because block locators are sparse, the intersection between the
`zcashd` and `zebrad` chains is likely well behind the `zebrad` tip,
so this process continues for thousands of blocks.
To avoid this problem, we return slightly fewer than the protocol
maximum (158 rather than 160, to guard against off-by-one errors in
zcashd). This does not interfere with use of the returned headers by
peers that check the headers, but does prevent `zcashd` from trying to
download thousands of block headers it already has.
This problem does not occur in the `zcashd<->zcashd` case only because
`zcashd` does not respond to `getheaders` messages while it is syncing.
However, implementing this behavior in Zebra would be more complicated,
because we don't have a distinct "initial block sync" state (we do
poll-based syncing continuously) and we don't have shared global
variables to modify to set that state.
Relevant links (thanks @str4d):
- The PR that introduced this behavior: https://github.com/bitcoin/bitcoin/pull/4468/files#r17026905
- https://github.com/bitcoin/bitcoin/issues/6861
- https://github.com/bitcoin/bitcoin/issues/6755
- https://github.com/bitcoin/bitcoin/pull/8306#issuecomment-614916454
2020-12-02 12:08:47 -08:00
|
|
|
// Zcashd will blindly request more block headers as long as it
|
|
|
|
// got 160 block headers in response to a previous query, EVEN
|
|
|
|
// IF THOSE HEADERS ARE ALREADY KNOWN. To dodge this behavior,
|
|
|
|
// return slightly fewer than the maximum, to get it to go away.
|
|
|
|
//
|
|
|
|
// https://github.com/bitcoin/bitcoin/pull/4468/files#r17026905
|
2022-07-26 13:21:15 -07:00
|
|
|
let max_len = MAX_FIND_BLOCK_HEADERS_RESULTS - 2;
|
2022-07-25 15:33:00 -07:00
|
|
|
|
|
|
|
let timer = CodeTimer::start();
|
|
|
|
|
2022-07-26 13:26:17 -07:00
|
|
|
// Prepare data for concurrent execution
|
|
|
|
let best_chain = self.mem.best_chain().cloned();
|
|
|
|
let db = self.disk.db().clone();
|
2022-07-25 15:33:00 -07:00
|
|
|
|
2022-07-26 13:26:17 -07:00
|
|
|
// # Performance
|
|
|
|
//
|
|
|
|
// Allow other async tasks to make progress while the block is being read from disk.
|
|
|
|
let span = Span::current();
|
|
|
|
tokio::task::spawn_blocking(move || {
|
|
|
|
span.in_scope(move || {
|
|
|
|
let res =
|
|
|
|
read::find_chain_headers(best_chain, &db, known_blocks, stop, max_len);
|
|
|
|
let res = res
|
|
|
|
.into_iter()
|
2022-07-26 13:21:15 -07:00
|
|
|
.map(|header| CountedHeader { header })
|
2022-07-26 13:26:17 -07:00
|
|
|
.collect();
|
|
|
|
|
|
|
|
// The work is done in the future.
|
|
|
|
timer.finish(module_path!(), line!(), "FindBlockHeaders");
|
|
|
|
|
|
|
|
Ok(Response::BlockHeaders(res))
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.map(|join_result| join_result.expect("panic in Request::Block"))
|
2022-07-22 09:25:32 -07:00
|
|
|
.boxed()
|
2020-11-30 13:30:37 -08:00
|
|
|
}
|
2020-09-09 17:13:58 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-17 15:59:46 -07:00
|
|
|
impl Service<ReadRequest> for ReadStateService {
|
|
|
|
type Response = ReadResponse;
|
2022-03-11 05:58:22 -08:00
|
|
|
type Error = BoxError;
|
|
|
|
type Future =
|
|
|
|
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
|
|
|
|
|
|
|
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
|
|
|
Poll::Ready(Ok(()))
|
|
|
|
}
|
|
|
|
|
2022-03-15 12:50:28 -07:00
|
|
|
#[instrument(name = "read_state", skip(self))]
|
2022-03-17 15:59:46 -07:00
|
|
|
fn call(&mut self, req: ReadRequest) -> Self::Future {
|
2022-03-11 05:58:22 -08:00
|
|
|
match req {
|
|
|
|
// Used by get_block RPC.
|
2022-03-17 15:59:46 -07:00
|
|
|
ReadRequest::Block(hash_or_height) => {
|
2022-03-17 11:48:13 -07:00
|
|
|
metrics::counter!(
|
|
|
|
"state.requests",
|
|
|
|
1,
|
|
|
|
"service" => "read_state",
|
|
|
|
"type" => "block",
|
|
|
|
);
|
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
let timer = CodeTimer::start();
|
|
|
|
|
2022-03-15 12:50:28 -07:00
|
|
|
let state = self.clone();
|
|
|
|
|
2022-07-22 09:25:32 -07:00
|
|
|
// # Performance
|
|
|
|
//
|
|
|
|
// Allow other async tasks to make progress while concurrently reading blocks from disk.
|
2022-07-25 15:33:00 -07:00
|
|
|
let span = Span::current();
|
2022-07-22 09:25:32 -07:00
|
|
|
tokio::task::spawn_blocking(move || {
|
2022-07-25 15:33:00 -07:00
|
|
|
span.in_scope(move || {
|
|
|
|
let block = state.best_chain_receiver.with_watch_data(|best_chain| {
|
|
|
|
read::block(best_chain, &state.db, hash_or_height)
|
|
|
|
});
|
|
|
|
|
|
|
|
// The work is done in the future.
|
|
|
|
timer.finish(module_path!(), line!(), "ReadRequest::Block");
|
2022-03-16 17:37:44 -07:00
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
Ok(ReadResponse::Block(block))
|
|
|
|
})
|
2022-07-22 09:25:32 -07:00
|
|
|
})
|
|
|
|
.map(|join_result| join_result.expect("panic in ReadRequest::Block"))
|
2022-03-15 12:50:28 -07:00
|
|
|
.boxed()
|
|
|
|
}
|
2022-03-11 05:58:22 -08:00
|
|
|
|
2022-04-13 01:48:13 -07:00
|
|
|
// For the get_raw_transaction RPC.
|
2022-03-17 15:59:46 -07:00
|
|
|
ReadRequest::Transaction(hash) => {
|
2022-03-17 11:48:13 -07:00
|
|
|
metrics::counter!(
|
|
|
|
"state.requests",
|
|
|
|
1,
|
|
|
|
"service" => "read_state",
|
|
|
|
"type" => "transaction",
|
|
|
|
);
|
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
let timer = CodeTimer::start();
|
|
|
|
|
2022-03-17 11:48:13 -07:00
|
|
|
let state = self.clone();
|
2022-03-11 05:58:22 -08:00
|
|
|
|
2022-07-22 09:25:32 -07:00
|
|
|
// # Performance
|
|
|
|
//
|
|
|
|
// Allow other async tasks to make progress while concurrently reading transactions from disk.
|
2022-07-25 15:33:00 -07:00
|
|
|
let span = Span::current();
|
2022-07-22 09:25:32 -07:00
|
|
|
tokio::task::spawn_blocking(move || {
|
2022-07-25 15:33:00 -07:00
|
|
|
span.in_scope(move || {
|
|
|
|
let transaction_and_height =
|
|
|
|
state.best_chain_receiver.with_watch_data(|best_chain| {
|
|
|
|
read::transaction(best_chain, &state.db, hash)
|
|
|
|
});
|
|
|
|
|
|
|
|
// The work is done in the future.
|
|
|
|
timer.finish(module_path!(), line!(), "ReadRequest::Transaction");
|
2022-03-17 11:48:13 -07:00
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
Ok(ReadResponse::Transaction(transaction_and_height))
|
|
|
|
})
|
2022-07-22 09:25:32 -07:00
|
|
|
})
|
|
|
|
.map(|join_result| join_result.expect("panic in ReadRequest::Transaction"))
|
2022-03-17 11:48:13 -07:00
|
|
|
.boxed()
|
2022-03-11 05:58:22 -08:00
|
|
|
}
|
2022-04-13 01:48:13 -07:00
|
|
|
|
2022-05-12 00:00:12 -07:00
|
|
|
ReadRequest::SaplingTree(hash_or_height) => {
|
|
|
|
metrics::counter!(
|
|
|
|
"state.requests",
|
|
|
|
1,
|
|
|
|
"service" => "read_state",
|
|
|
|
"type" => "sapling_tree",
|
|
|
|
);
|
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
let timer = CodeTimer::start();
|
|
|
|
|
2022-05-12 00:00:12 -07:00
|
|
|
let state = self.clone();
|
|
|
|
|
2022-07-22 09:25:32 -07:00
|
|
|
// # Performance
|
|
|
|
//
|
|
|
|
// Allow other async tasks to make progress while concurrently reading trees from disk.
|
2022-07-25 15:33:00 -07:00
|
|
|
let span = Span::current();
|
2022-07-22 09:25:32 -07:00
|
|
|
tokio::task::spawn_blocking(move || {
|
2022-07-25 15:33:00 -07:00
|
|
|
span.in_scope(move || {
|
|
|
|
let sapling_tree =
|
|
|
|
state.best_chain_receiver.with_watch_data(|best_chain| {
|
|
|
|
read::sapling_tree(best_chain, &state.db, hash_or_height)
|
|
|
|
});
|
|
|
|
|
|
|
|
// The work is done in the future.
|
|
|
|
timer.finish(module_path!(), line!(), "ReadRequest::SaplingTree");
|
2022-05-12 00:00:12 -07:00
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
Ok(ReadResponse::SaplingTree(sapling_tree))
|
|
|
|
})
|
2022-07-22 09:25:32 -07:00
|
|
|
})
|
|
|
|
.map(|join_result| join_result.expect("panic in ReadRequest::SaplingTree"))
|
2022-05-12 00:00:12 -07:00
|
|
|
.boxed()
|
|
|
|
}
|
|
|
|
|
|
|
|
ReadRequest::OrchardTree(hash_or_height) => {
|
|
|
|
metrics::counter!(
|
|
|
|
"state.requests",
|
|
|
|
1,
|
|
|
|
"service" => "read_state",
|
|
|
|
"type" => "orchard_tree",
|
|
|
|
);
|
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
let timer = CodeTimer::start();
|
|
|
|
|
2022-05-12 00:00:12 -07:00
|
|
|
let state = self.clone();
|
|
|
|
|
2022-07-22 09:25:32 -07:00
|
|
|
// # Performance
|
|
|
|
//
|
|
|
|
// Allow other async tasks to make progress while concurrently reading trees from disk.
|
2022-07-25 15:33:00 -07:00
|
|
|
let span = Span::current();
|
2022-07-22 09:25:32 -07:00
|
|
|
tokio::task::spawn_blocking(move || {
|
2022-07-25 15:33:00 -07:00
|
|
|
span.in_scope(move || {
|
|
|
|
let orchard_tree =
|
|
|
|
state.best_chain_receiver.with_watch_data(|best_chain| {
|
|
|
|
read::orchard_tree(best_chain, &state.db, hash_or_height)
|
|
|
|
});
|
2022-05-12 00:00:12 -07:00
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
// The work is done in the future.
|
|
|
|
timer.finish(module_path!(), line!(), "ReadRequest::OrchardTree");
|
|
|
|
|
|
|
|
Ok(ReadResponse::OrchardTree(orchard_tree))
|
|
|
|
})
|
2022-07-22 09:25:32 -07:00
|
|
|
})
|
|
|
|
.map(|join_result| join_result.expect("panic in ReadRequest::OrchardTree"))
|
2022-05-12 00:00:12 -07:00
|
|
|
.boxed()
|
|
|
|
}
|
|
|
|
|
2022-04-13 01:48:13 -07:00
|
|
|
// For the get_address_tx_ids RPC.
|
2022-04-21 13:19:26 -07:00
|
|
|
ReadRequest::TransactionIdsByAddresses {
|
|
|
|
addresses,
|
|
|
|
height_range,
|
|
|
|
} => {
|
2022-04-13 01:48:13 -07:00
|
|
|
metrics::counter!(
|
|
|
|
"state.requests",
|
|
|
|
1,
|
|
|
|
"service" => "read_state",
|
2022-04-21 13:19:26 -07:00
|
|
|
"type" => "transaction_ids_by_addresses",
|
2022-04-13 01:48:13 -07:00
|
|
|
);
|
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
let timer = CodeTimer::start();
|
|
|
|
|
2022-04-21 13:19:26 -07:00
|
|
|
let state = self.clone();
|
2022-04-13 01:48:13 -07:00
|
|
|
|
2022-07-22 09:25:32 -07:00
|
|
|
// # Performance
|
|
|
|
//
|
|
|
|
// Allow other async tasks to make progress while concurrently reading transaction IDs from disk.
|
2022-07-25 15:33:00 -07:00
|
|
|
let span = Span::current();
|
2022-07-22 09:25:32 -07:00
|
|
|
tokio::task::spawn_blocking(move || {
|
2022-07-25 15:33:00 -07:00
|
|
|
span.in_scope(move || {
|
|
|
|
let tx_ids = state.best_chain_receiver.with_watch_data(|best_chain| {
|
|
|
|
read::transparent_tx_ids(best_chain, &state.db, addresses, height_range)
|
|
|
|
});
|
2022-04-21 13:19:26 -07:00
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
// The work is done in the future.
|
|
|
|
timer.finish(
|
|
|
|
module_path!(),
|
|
|
|
line!(),
|
|
|
|
"ReadRequest::TransactionIdsByAddresses",
|
|
|
|
);
|
|
|
|
|
|
|
|
tx_ids.map(ReadResponse::AddressesTransactionIds)
|
|
|
|
})
|
2022-07-22 09:25:32 -07:00
|
|
|
})
|
|
|
|
.map(|join_result| {
|
|
|
|
join_result.expect("panic in ReadRequest::TransactionIdsByAddresses")
|
|
|
|
})
|
2022-04-13 01:48:13 -07:00
|
|
|
.boxed()
|
|
|
|
}
|
2022-04-20 11:27:00 -07:00
|
|
|
|
|
|
|
// For the get_address_balance RPC.
|
|
|
|
ReadRequest::AddressBalance(addresses) => {
|
|
|
|
metrics::counter!(
|
|
|
|
"state.requests",
|
|
|
|
1,
|
|
|
|
"service" => "read_state",
|
|
|
|
"type" => "address_balance",
|
|
|
|
);
|
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
let timer = CodeTimer::start();
|
|
|
|
|
2022-04-20 11:27:00 -07:00
|
|
|
let state = self.clone();
|
|
|
|
|
2022-07-22 09:25:32 -07:00
|
|
|
// # Performance
|
|
|
|
//
|
|
|
|
// Allow other async tasks to make progress while concurrently reading balances from disk.
|
2022-07-25 15:33:00 -07:00
|
|
|
let span = Span::current();
|
2022-07-22 09:25:32 -07:00
|
|
|
tokio::task::spawn_blocking(move || {
|
2022-07-25 15:33:00 -07:00
|
|
|
span.in_scope(move || {
|
|
|
|
let balance = state.best_chain_receiver.with_watch_data(|best_chain| {
|
|
|
|
read::transparent_balance(best_chain, &state.db, addresses)
|
|
|
|
})?;
|
|
|
|
|
|
|
|
// The work is done in the future.
|
|
|
|
timer.finish(module_path!(), line!(), "ReadRequest::AddressBalance");
|
2022-04-20 11:27:00 -07:00
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
Ok(ReadResponse::AddressBalance(balance))
|
|
|
|
})
|
2022-07-22 09:25:32 -07:00
|
|
|
})
|
|
|
|
.map(|join_result| join_result.expect("panic in ReadRequest::AddressBalance"))
|
2022-04-20 11:27:00 -07:00
|
|
|
.boxed()
|
|
|
|
}
|
2022-04-24 20:00:52 -07:00
|
|
|
|
|
|
|
// For the get_address_utxos RPC.
|
|
|
|
ReadRequest::UtxosByAddresses(addresses) => {
|
|
|
|
metrics::counter!(
|
|
|
|
"state.requests",
|
|
|
|
1,
|
|
|
|
"service" => "read_state",
|
|
|
|
"type" => "utxos_by_addresses",
|
|
|
|
);
|
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
let timer = CodeTimer::start();
|
|
|
|
|
2022-04-24 20:00:52 -07:00
|
|
|
let state = self.clone();
|
|
|
|
|
2022-07-22 09:25:32 -07:00
|
|
|
// # Performance
|
|
|
|
//
|
|
|
|
// Allow other async tasks to make progress while concurrently reading UTXOs from disk.
|
2022-07-25 15:33:00 -07:00
|
|
|
let span = Span::current();
|
2022-07-22 09:25:32 -07:00
|
|
|
tokio::task::spawn_blocking(move || {
|
2022-07-25 15:33:00 -07:00
|
|
|
span.in_scope(move || {
|
|
|
|
let utxos = state.best_chain_receiver.with_watch_data(|best_chain| {
|
|
|
|
read::transparent_utxos(state.network, best_chain, &state.db, addresses)
|
|
|
|
});
|
|
|
|
|
|
|
|
// The work is done in the future.
|
|
|
|
timer.finish(module_path!(), line!(), "ReadRequest::UtxosByAddresses");
|
2022-04-24 20:00:52 -07:00
|
|
|
|
2022-07-25 15:33:00 -07:00
|
|
|
utxos.map(ReadResponse::Utxos)
|
|
|
|
})
|
2022-07-22 09:25:32 -07:00
|
|
|
})
|
|
|
|
.map(|join_result| join_result.expect("panic in ReadRequest::UtxosByAddresses"))
|
2022-04-24 20:00:52 -07:00
|
|
|
.boxed()
|
|
|
|
}
|
2022-03-11 05:58:22 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-09 17:51:08 -07:00
|
|
|
/// Initialize a state service from the provided [`Config`].
|
2022-03-11 05:58:22 -08:00
|
|
|
/// Returns a boxed state service, a read-only state service,
|
|
|
|
/// and receivers for state chain tip updates.
|
2020-09-09 17:13:58 -07:00
|
|
|
///
|
2020-11-17 15:26:21 -08:00
|
|
|
/// Each `network` has its own separate on-disk database.
|
2020-09-09 17:13:58 -07:00
|
|
|
///
|
2022-03-11 05:58:22 -08:00
|
|
|
/// To share access to the state, wrap the returned service in a `Buffer`,
|
|
|
|
/// or clone the returned [`ReadStateService`].
|
|
|
|
///
|
|
|
|
/// It's possible to construct multiple state services in the same application (as
|
2020-10-21 21:56:18 -07:00
|
|
|
/// long as they, e.g., use different storage locations), but doing so is
|
|
|
|
/// probably not what you want.
|
Reject connections from outdated peers (#2519)
* Simplify state service initialization in test
Use the test helper function to remove redundant code.
* Create `BestTipHeight` helper type
This type abstracts away the calculation of the best tip height based on
the finalized block height and the best non-finalized chain's tip.
* Add `best_tip_height` field to `StateService`
The receiver endpoint is currently ignored.
* Return receiver endpoint from service constructor
Make it available so that the best tip height can be watched.
* Update finalized height after finalizing blocks
After blocks from the queue are finalized and committed to disk, update
the finalized block height.
* Update best non-finalized height after validation
Update the value of the best non-finalized chain tip block height after
a new block is committed to the non-finalized state.
* Update finalized height after loading from disk
When `FinalizedState` is first created, it loads the state from
persistent storage, and the finalized tip height is updated. Therefore,
the `best_tip_height` must be notified of the initial value.
* Update the finalized height on checkpoint commit
When a checkpointed block is commited, it bypasses the non-finalized
state, so there's an extra place where the finalized height has to be
updated.
* Add `best_tip_height` to `Handshake` service
It can be configured using the `Builder::with_best_tip_height`. It's
currently not used, but it will be used to determine if a connection to
a remote peer should be rejected or not based on that peer's protocol
version.
* Require best tip height to init. `zebra_network`
Without it the handshake service can't properly enforce the minimum
network protocol version from peers. Zebrad obtains the best tip height
endpoint from `zebra_state`, and the test vectors simply use a dummy
endpoint that's fixed at the genesis height.
* Pass `best_tip_height` to proto. ver. negotiation
The protocol version negotiation code will reject connections to peers
if they are using an old protocol version. An old version is determined
based on the current known best chain tip height.
* Handle an optional height in `Version`
Fallback to the genesis height in `None` is specified.
* Reject connections to peers on old proto. versions
Avoid connecting to peers that are on protocol versions that don't
recognize a network update.
* Document why peers on old versions are rejected
Describe why it's a security issue above the check.
* Test if `BestTipHeight` starts with `None`
Check if initially there is no best tip height.
* Test if best tip height is max. of latest values
After applying a list of random updates where each one either sets the
finalized height or the non-finalized height, check that the best tip
height is the maximum of the most recently set finalized height and the
most recently set non-finalized height.
* Add `queue_and_commit_finalized` method
A small refactor to make testing easier. The handling of requests for
committing non-finalized and finalized blocks is now more consistent.
* Add `assert_block_can_be_validated` helper
Refactor to move into a separate method some assertions that are done
before a block is validated. This is to allow moving these assertions
more easily to simplify testing.
* Remove redundant PoW block assertion
It's also checked in
`zebra_state::service::check::block_is_contextually_valid`, and it was
getting in the way of tests that received a gossiped block before
finalizing enough blocks.
* Create a test strategy for test vector chain
Splits a chain loaded from the test vectors in two parts, containing the
blocks to finalize and the blocks to keep in the non-finalized state.
* Test committing blocks update best tip height
Create a mock blockchain state, with a chain of finalized blocks and a
chain of non-finalized blocks. Commit all the blocks appropriately, and
verify that the best tip height is updated.
Co-authored-by: teor <teor@riseup.net>
2021-08-08 16:52:52 -07:00
|
|
|
pub fn init(
|
|
|
|
config: Config,
|
|
|
|
network: Network,
|
2021-09-01 15:31:16 -07:00
|
|
|
) -> (
|
|
|
|
BoxService<Request, Response, BoxError>,
|
2022-03-11 05:58:22 -08:00
|
|
|
ReadStateService,
|
2021-09-01 15:31:16 -07:00
|
|
|
LatestChainTip,
|
|
|
|
ChainTipChange,
|
|
|
|
) {
|
2022-03-11 05:58:22 -08:00
|
|
|
let (state_service, read_only_state_service, latest_chain_tip, chain_tip_change) =
|
|
|
|
StateService::new(config, network);
|
2021-09-01 15:31:16 -07:00
|
|
|
|
|
|
|
(
|
|
|
|
BoxService::new(state_service),
|
2022-03-11 05:58:22 -08:00
|
|
|
read_only_state_service,
|
2021-09-01 15:31:16 -07:00
|
|
|
latest_chain_tip,
|
|
|
|
chain_tip_change,
|
|
|
|
)
|
2020-09-09 17:13:58 -07:00
|
|
|
}
|
2021-06-28 22:03:51 -07:00
|
|
|
|
2022-03-17 15:59:46 -07:00
|
|
|
/// Returns a [`StateService`] with an ephemeral [`Config`] and a buffer with a single slot.
|
2021-07-28 16:55:01 -07:00
|
|
|
///
|
2022-03-17 15:59:46 -07:00
|
|
|
/// This can be used to create a state service for testing.
|
|
|
|
///
|
|
|
|
/// See also [`init`].
|
2021-07-28 16:55:01 -07:00
|
|
|
#[cfg(any(test, feature = "proptest-impl"))]
|
|
|
|
pub fn init_test(network: Network) -> Buffer<BoxService<Request, Response, BoxError>, Request> {
|
2022-03-11 05:58:22 -08:00
|
|
|
let (state_service, _, _, _) = StateService::new(Config::ephemeral(), network);
|
2021-07-28 16:55:01 -07:00
|
|
|
|
|
|
|
Buffer::new(BoxService::new(state_service), 1)
|
|
|
|
}
|
2022-03-17 15:59:46 -07:00
|
|
|
|
|
|
|
/// Initializes a state service with an ephemeral [`Config`] and a buffer with a single slot,
|
|
|
|
/// then returns the read-write service, read-only service, and tip watch channels.
|
|
|
|
///
|
|
|
|
/// This can be used to create a state service for testing. See also [`init`].
|
|
|
|
#[cfg(any(test, feature = "proptest-impl"))]
|
|
|
|
pub fn init_test_services(
|
|
|
|
network: Network,
|
|
|
|
) -> (
|
|
|
|
Buffer<BoxService<Request, Response, BoxError>, Request>,
|
|
|
|
ReadStateService,
|
|
|
|
LatestChainTip,
|
|
|
|
ChainTipChange,
|
|
|
|
) {
|
|
|
|
let (state_service, read_state_service, latest_chain_tip, chain_tip_change) =
|
|
|
|
StateService::new(Config::ephemeral(), network);
|
|
|
|
|
|
|
|
let state_service = Buffer::new(BoxService::new(state_service), 1);
|
|
|
|
|
|
|
|
(
|
|
|
|
state_service,
|
|
|
|
read_state_service,
|
|
|
|
latest_chain_tip,
|
|
|
|
chain_tip_change,
|
|
|
|
)
|
|
|
|
}
|