2020-06-15 15:41:10 -07:00
|
|
|
//! `start` subcommand - entry point for starting a zebra node
|
2020-06-15 16:16:46 -07:00
|
|
|
//!
|
|
|
|
//! ## Application Structure
|
|
|
|
//!
|
|
|
|
//! A zebra node consists of the following services and tasks:
|
|
|
|
//!
|
|
|
|
//! * Network Service
|
|
|
|
//! * primary interface to the node
|
2020-07-05 03:58:50 -07:00
|
|
|
//! * handles all external network requests for the Zcash protocol
|
2020-06-15 17:07:55 -07:00
|
|
|
//! * via zebra_network::Message and zebra_network::Response
|
2020-06-15 16:16:46 -07:00
|
|
|
//! * provides an interface to the rest of the network for other services and
|
2021-01-29 04:19:06 -08:00
|
|
|
//! tasks running within this node
|
2020-06-15 17:07:55 -07:00
|
|
|
//! * via zebra_network::Request
|
2020-06-15 16:16:46 -07:00
|
|
|
//! * Consensus Service
|
|
|
|
//! * handles all validation logic for the node
|
|
|
|
//! * verifies blocks using zebra-chain and zebra-script, then stores verified
|
2021-01-29 04:19:06 -08:00
|
|
|
//! blocks in zebra-state
|
2020-06-15 16:16:46 -07:00
|
|
|
//! * Sync Task
|
2021-01-29 04:19:06 -08:00
|
|
|
//! * runs in the background and continuously queries the network for
|
|
|
|
//! new blocks to be verified and added to the local state
|
|
|
|
//! * Inbound Service
|
|
|
|
//! * handles requests from peers for network data and chain data
|
|
|
|
//! * performs transaction and block diffusion
|
2021-05-07 04:01:11 -07:00
|
|
|
//! * downloads and verifies gossiped blocks and transactions
|
2020-07-20 21:00:22 -07:00
|
|
|
|
2020-09-18 12:18:22 -07:00
|
|
|
use abscissa_core::{config, Command, FrameworkError, Options, Runnable};
|
|
|
|
use color_eyre::eyre::{eyre, Report};
|
2021-08-24 07:23:53 -07:00
|
|
|
use futures::{select, FutureExt};
|
2020-09-18 12:18:22 -07:00
|
|
|
use tokio::sync::oneshot;
|
2021-10-12 10:31:54 -07:00
|
|
|
use tower::{builder::ServiceBuilder, util::BoxService};
|
2020-09-18 12:18:22 -07:00
|
|
|
|
2020-09-09 12:03:09 -07:00
|
|
|
use crate::{
|
2021-09-08 11:51:17 -07:00
|
|
|
components::{
|
|
|
|
mempool::{self, Mempool},
|
2021-10-07 03:46:37 -07:00
|
|
|
sync,
|
2021-09-08 11:51:17 -07:00
|
|
|
tokio::{RuntimeRun, TokioComponent},
|
|
|
|
ChainSync, Inbound,
|
|
|
|
},
|
|
|
|
config::ZebradConfig,
|
2020-09-09 12:03:09 -07:00
|
|
|
prelude::*,
|
|
|
|
};
|
2020-07-20 21:00:22 -07:00
|
|
|
|
2019-08-29 14:46:54 -07:00
|
|
|
/// `start` subcommand
|
|
|
|
#[derive(Command, Debug, Options)]
|
2020-06-16 11:02:01 -07:00
|
|
|
pub struct StartCmd {
|
2019-09-09 13:05:42 -07:00
|
|
|
/// Filter strings
|
2019-08-29 14:46:54 -07:00
|
|
|
#[options(free)]
|
2019-09-09 13:05:42 -07:00
|
|
|
filters: Vec<String>,
|
2019-08-29 14:46:54 -07:00
|
|
|
}
|
|
|
|
|
2020-06-16 11:02:01 -07:00
|
|
|
impl StartCmd {
|
2020-06-15 17:07:55 -07:00
|
|
|
async fn start(&self) -> Result<(), Report> {
|
2020-09-18 12:18:22 -07:00
|
|
|
let config = app_config().clone();
|
|
|
|
info!(?config);
|
2020-06-15 17:07:55 -07:00
|
|
|
|
2020-09-18 12:18:22 -07:00
|
|
|
info!("initializing node state");
|
2021-09-01 15:31:16 -07:00
|
|
|
// TODO: use ChainTipChange to get tip changes (#2374, #2710, #2711, #2712, #2713, #2714)
|
2021-09-21 10:06:52 -07:00
|
|
|
let (state_service, latest_chain_tip, chain_tip_change) =
|
Reject connections from outdated peers (#2519)
* Simplify state service initialization in test
Use the test helper function to remove redundant code.
* Create `BestTipHeight` helper type
This type abstracts away the calculation of the best tip height based on
the finalized block height and the best non-finalized chain's tip.
* Add `best_tip_height` field to `StateService`
The receiver endpoint is currently ignored.
* Return receiver endpoint from service constructor
Make it available so that the best tip height can be watched.
* Update finalized height after finalizing blocks
After blocks from the queue are finalized and committed to disk, update
the finalized block height.
* Update best non-finalized height after validation
Update the value of the best non-finalized chain tip block height after
a new block is committed to the non-finalized state.
* Update finalized height after loading from disk
When `FinalizedState` is first created, it loads the state from
persistent storage, and the finalized tip height is updated. Therefore,
the `best_tip_height` must be notified of the initial value.
* Update the finalized height on checkpoint commit
When a checkpointed block is commited, it bypasses the non-finalized
state, so there's an extra place where the finalized height has to be
updated.
* Add `best_tip_height` to `Handshake` service
It can be configured using the `Builder::with_best_tip_height`. It's
currently not used, but it will be used to determine if a connection to
a remote peer should be rejected or not based on that peer's protocol
version.
* Require best tip height to init. `zebra_network`
Without it the handshake service can't properly enforce the minimum
network protocol version from peers. Zebrad obtains the best tip height
endpoint from `zebra_state`, and the test vectors simply use a dummy
endpoint that's fixed at the genesis height.
* Pass `best_tip_height` to proto. ver. negotiation
The protocol version negotiation code will reject connections to peers
if they are using an old protocol version. An old version is determined
based on the current known best chain tip height.
* Handle an optional height in `Version`
Fallback to the genesis height in `None` is specified.
* Reject connections to peers on old proto. versions
Avoid connecting to peers that are on protocol versions that don't
recognize a network update.
* Document why peers on old versions are rejected
Describe why it's a security issue above the check.
* Test if `BestTipHeight` starts with `None`
Check if initially there is no best tip height.
* Test if best tip height is max. of latest values
After applying a list of random updates where each one either sets the
finalized height or the non-finalized height, check that the best tip
height is the maximum of the most recently set finalized height and the
most recently set non-finalized height.
* Add `queue_and_commit_finalized` method
A small refactor to make testing easier. The handling of requests for
committing non-finalized and finalized blocks is now more consistent.
* Add `assert_block_can_be_validated` helper
Refactor to move into a separate method some assertions that are done
before a block is validated. This is to allow moving these assertions
more easily to simplify testing.
* Remove redundant PoW block assertion
It's also checked in
`zebra_state::service::check::block_is_contextually_valid`, and it was
getting in the way of tests that received a gossiped block before
finalizing enough blocks.
* Create a test strategy for test vector chain
Splits a chain loaded from the test vectors in two parts, containing the
blocks to finalize and the blocks to keep in the non-finalized state.
* Test committing blocks update best tip height
Create a mock blockchain state, with a chain of finalized blocks and a
chain of non-finalized blocks. Commit all the blocks appropriately, and
verify that the best tip height is updated.
Co-authored-by: teor <teor@riseup.net>
2021-08-08 16:52:52 -07:00
|
|
|
zebra_state::init(config.state.clone(), config.network.network);
|
|
|
|
let state = ServiceBuilder::new().buffer(20).service(state_service);
|
2020-09-18 12:18:22 -07:00
|
|
|
|
2021-04-21 16:32:47 -07:00
|
|
|
info!("initializing verifiers");
|
2021-08-25 08:07:26 -07:00
|
|
|
// TODO: use the transaction verifier to verify mempool transactions (#2637, #2606)
|
2021-09-01 17:06:20 -07:00
|
|
|
let (chain_verifier, tx_verifier) = zebra_consensus::chain::init(
|
2020-08-19 19:28:21 -07:00
|
|
|
config.consensus.clone(),
|
|
|
|
config.network.network,
|
|
|
|
state.clone(),
|
|
|
|
)
|
|
|
|
.await;
|
2020-07-23 18:47:48 -07:00
|
|
|
|
2020-09-18 12:18:22 -07:00
|
|
|
info!("initializing network");
|
2020-09-18 12:37:01 -07:00
|
|
|
// The service that our node uses to respond to requests by peers. The
|
|
|
|
// load_shed middleware ensures that we reduce the size of the peer set
|
|
|
|
// in response to excess load.
|
2020-09-18 12:18:22 -07:00
|
|
|
let (setup_tx, setup_rx) = oneshot::channel();
|
|
|
|
let inbound = ServiceBuilder::new()
|
|
|
|
.load_shed()
|
|
|
|
.buffer(20)
|
2021-08-25 08:07:26 -07:00
|
|
|
.service(Inbound::new(
|
|
|
|
setup_rx,
|
|
|
|
state.clone(),
|
|
|
|
chain_verifier.clone(),
|
|
|
|
));
|
2020-09-18 12:18:22 -07:00
|
|
|
|
Reject connections from outdated peers (#2519)
* Simplify state service initialization in test
Use the test helper function to remove redundant code.
* Create `BestTipHeight` helper type
This type abstracts away the calculation of the best tip height based on
the finalized block height and the best non-finalized chain's tip.
* Add `best_tip_height` field to `StateService`
The receiver endpoint is currently ignored.
* Return receiver endpoint from service constructor
Make it available so that the best tip height can be watched.
* Update finalized height after finalizing blocks
After blocks from the queue are finalized and committed to disk, update
the finalized block height.
* Update best non-finalized height after validation
Update the value of the best non-finalized chain tip block height after
a new block is committed to the non-finalized state.
* Update finalized height after loading from disk
When `FinalizedState` is first created, it loads the state from
persistent storage, and the finalized tip height is updated. Therefore,
the `best_tip_height` must be notified of the initial value.
* Update the finalized height on checkpoint commit
When a checkpointed block is commited, it bypasses the non-finalized
state, so there's an extra place where the finalized height has to be
updated.
* Add `best_tip_height` to `Handshake` service
It can be configured using the `Builder::with_best_tip_height`. It's
currently not used, but it will be used to determine if a connection to
a remote peer should be rejected or not based on that peer's protocol
version.
* Require best tip height to init. `zebra_network`
Without it the handshake service can't properly enforce the minimum
network protocol version from peers. Zebrad obtains the best tip height
endpoint from `zebra_state`, and the test vectors simply use a dummy
endpoint that's fixed at the genesis height.
* Pass `best_tip_height` to proto. ver. negotiation
The protocol version negotiation code will reject connections to peers
if they are using an old protocol version. An old version is determined
based on the current known best chain tip height.
* Handle an optional height in `Version`
Fallback to the genesis height in `None` is specified.
* Reject connections to peers on old proto. versions
Avoid connecting to peers that are on protocol versions that don't
recognize a network update.
* Document why peers on old versions are rejected
Describe why it's a security issue above the check.
* Test if `BestTipHeight` starts with `None`
Check if initially there is no best tip height.
* Test if best tip height is max. of latest values
After applying a list of random updates where each one either sets the
finalized height or the non-finalized height, check that the best tip
height is the maximum of the most recently set finalized height and the
most recently set non-finalized height.
* Add `queue_and_commit_finalized` method
A small refactor to make testing easier. The handling of requests for
committing non-finalized and finalized blocks is now more consistent.
* Add `assert_block_can_be_validated` helper
Refactor to move into a separate method some assertions that are done
before a block is validated. This is to allow moving these assertions
more easily to simplify testing.
* Remove redundant PoW block assertion
It's also checked in
`zebra_state::service::check::block_is_contextually_valid`, and it was
getting in the way of tests that received a gossiped block before
finalizing enough blocks.
* Create a test strategy for test vector chain
Splits a chain loaded from the test vectors in two parts, containing the
blocks to finalize and the blocks to keep in the non-finalized state.
* Test committing blocks update best tip height
Create a mock blockchain state, with a chain of finalized blocks and a
chain of non-finalized blocks. Commit all the blocks appropriately, and
verify that the best tip height is updated.
Co-authored-by: teor <teor@riseup.net>
2021-08-08 16:52:52 -07:00
|
|
|
let (peer_set, address_book) =
|
2021-09-29 09:52:44 -07:00
|
|
|
zebra_network::init(config.network.clone(), inbound, latest_chain_tip.clone()).await;
|
2021-09-13 13:28:07 -07:00
|
|
|
|
2021-09-15 15:13:29 -07:00
|
|
|
info!("initializing syncer");
|
|
|
|
let (syncer, sync_status) =
|
|
|
|
ChainSync::new(&config, peer_set.clone(), state.clone(), chain_verifier);
|
|
|
|
|
2021-09-13 13:28:07 -07:00
|
|
|
info!("initializing mempool");
|
2021-10-12 10:31:54 -07:00
|
|
|
let (mempool, mempool_transaction_receiver) = Mempool::new(
|
|
|
|
&config.mempool,
|
2021-09-13 13:28:07 -07:00
|
|
|
peer_set.clone(),
|
2021-09-15 15:13:29 -07:00
|
|
|
state,
|
2021-09-13 13:28:07 -07:00
|
|
|
tx_verifier,
|
2021-09-15 15:13:29 -07:00
|
|
|
sync_status.clone(),
|
2021-09-29 09:52:44 -07:00
|
|
|
latest_chain_tip,
|
2021-09-21 10:06:52 -07:00
|
|
|
chain_tip_change.clone(),
|
2021-10-12 10:31:54 -07:00
|
|
|
);
|
|
|
|
let mempool = BoxService::new(mempool);
|
|
|
|
let mempool = ServiceBuilder::new().buffer(20).service(mempool);
|
2021-09-13 13:28:07 -07:00
|
|
|
|
2020-09-18 12:18:22 -07:00
|
|
|
setup_tx
|
Send crawled transaction IDs to downloader (#2801)
* Rename type parameter to be more explicit
Replace the single letter with a proper name.
* Remove imports for `Request` and `Response`
The type names will conflict with the ones for the mempool service.
* Attach `Mempool` service to the `Crawler`
Add a field to the `Crawler` type to store a way to access the `Mempool`
service.
* Forward crawled transactions to downloader
The crawled transactions are now sent to the transaction downloader and
verifier, to be included in the mempool.
* Derive `Eq` and `PartialEq` for `mempool::Request`
Make it simpler to use the `MockService::expect_request` method.
* Test if crawled transactions are downloaded
Create some dummy crawled transactions, and let the crawler discover
them. Then check if they are forwarded to the mempool to be downloaded
and verified.
* Don't send empty transaction ID list to downloader
Ignore response from peers that don't provide any crawled transactions.
* Log errors when forwarding crawled transaction IDs
Calling the Mempool service should not fail, so if an error happens it
should be visible. However, errors when downloading individual
transactions can happen from time to time, so there's no need for them
to be very visible.
* Document existing `mempool::Crawler` test
Provide some depth as to what the test expect from the crawler's
behavior.
* Refactor to create `setup_crawler` helper function
Make it easier to reuse the common test setup code.
* Simplify code to expect requests
Now that `zebra_network::Request` implement `Eq`, the call can be
simplified into `expect_request`.
* Refactor to create `respond_with_transaction_ids`
A helper function that checks for a network crawl request and responds
with the given list of crawled transaction IDs.
* Refactor to create `crawler_iterator` helper
A function to intercept and respond to the fanned-out requests sent
during a single crawl iteration.
* Refactor to create `respond_to_queue_request`
Reduce the repeated code necessary to intercept and reply to a request
for queuing transactions to be downloaded.
* Add `respond_to_queue_request_with_error` helper
Intercepts a mempool request to queue transactions to be downloaded, and
responds with an error, simulating an internal problem in the mempool
service implementation.
* Derive `Arbitrary` for `NetworkUpgrade`
This is required for deriving `Arbitrary` for some error types.
* Derive `Arbitrary` for `TransactionError`
Allow random transaction errors to be generated for property tests.
* Derive `Arbitrary` for `MempoolError`
Allow random Mempool errors to be generated for property tests.
* Test if errors don't stop the mempool crawler
The crawler should be robust enough to continue operating even if the
mempool service fails to download transactions or even fails to handle
requests to enqueue transactions.
* Reduce the log level for download errors
They should happen regularly, so there's no need to have them with a
high visibility level.
Co-authored-by: teor <teor@riseup.net>
* Stop crawler if service stops
If `Mempool::poll_ready` returns an error, it's because the mempool
service has stopped and can't handle any requests, so the crawler should
stop as well.
Co-authored-by: teor <teor@riseup.net>
Co-authored-by: Conrado Gouvea <conrado@zfnd.org>
2021-10-04 17:55:42 -07:00
|
|
|
.send((peer_set.clone(), address_book, mempool.clone()))
|
2020-09-18 12:18:22 -07:00
|
|
|
.map_err(|_| eyre!("could not send setup data to inbound service"))?;
|
2020-06-15 17:07:55 -07:00
|
|
|
|
2021-10-07 03:46:37 -07:00
|
|
|
let syncer_error_future = syncer.sync();
|
|
|
|
|
|
|
|
let sync_gossip_task_handle = tokio::spawn(sync::gossip_best_tip_block_hashes(
|
|
|
|
sync_status.clone(),
|
2021-10-13 08:04:49 -07:00
|
|
|
chain_tip_change.clone(),
|
2021-10-07 03:46:37 -07:00
|
|
|
peer_set.clone(),
|
|
|
|
));
|
|
|
|
|
2021-10-13 08:04:49 -07:00
|
|
|
let mempool_crawler_task_handle = mempool::Crawler::spawn(
|
|
|
|
&config.mempool,
|
|
|
|
peer_set.clone(),
|
|
|
|
mempool,
|
|
|
|
sync_status,
|
|
|
|
chain_tip_change,
|
|
|
|
);
|
2021-10-08 04:59:46 -07:00
|
|
|
|
|
|
|
let tx_gossip_task_handle = tokio::spawn(mempool::gossip_mempool_transaction_id(
|
|
|
|
mempool_transaction_receiver,
|
|
|
|
peer_set,
|
|
|
|
));
|
2021-10-07 03:46:37 -07:00
|
|
|
|
2021-08-24 07:23:53 -07:00
|
|
|
select! {
|
2021-10-07 03:46:37 -07:00
|
|
|
sync_result = syncer_error_future.fuse() => sync_result,
|
|
|
|
|
|
|
|
sync_gossip_result = sync_gossip_task_handle.fuse() => sync_gossip_result
|
|
|
|
.expect("unexpected panic in the chain tip block gossip task")
|
|
|
|
.map_err(|e| eyre!(e)),
|
|
|
|
|
|
|
|
mempool_crawl_result = mempool_crawler_task_handle.fuse() => mempool_crawl_result
|
|
|
|
.expect("unexpected panic in the mempool crawler")
|
|
|
|
.map_err(|e| eyre!(e)),
|
2021-10-08 04:59:46 -07:00
|
|
|
|
|
|
|
tx_gossip_result = tx_gossip_task_handle.fuse() => tx_gossip_result
|
|
|
|
.expect("unexpected panic in the transaction gossip task")
|
|
|
|
.map_err(|e| eyre!(e)),
|
2021-08-24 07:23:53 -07:00
|
|
|
}
|
2020-06-15 17:07:55 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-16 11:02:01 -07:00
|
|
|
impl Runnable for StartCmd {
|
2019-08-29 14:46:54 -07:00
|
|
|
/// Start the application.
|
|
|
|
fn run(&self) {
|
2020-07-31 23:15:26 -07:00
|
|
|
info!("Starting zebrad");
|
2020-01-13 09:54:27 -08:00
|
|
|
let rt = app_writer()
|
2019-12-13 14:25:14 -08:00
|
|
|
.state_mut()
|
2019-09-09 13:05:42 -07:00
|
|
|
.components
|
2019-12-13 14:25:14 -08:00
|
|
|
.get_downcast_mut::<TokioComponent>()
|
2019-09-09 13:05:42 -07:00
|
|
|
.expect("TokioComponent should be available")
|
|
|
|
.rt
|
2020-01-13 09:54:27 -08:00
|
|
|
.take();
|
|
|
|
|
2020-08-05 16:35:56 -07:00
|
|
|
rt.expect("runtime should not already be taken")
|
|
|
|
.run(self.start());
|
2019-08-29 14:46:54 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-16 11:02:01 -07:00
|
|
|
impl config::Override<ZebradConfig> for StartCmd {
|
2019-08-29 14:46:54 -07:00
|
|
|
// Process the given command line options, overriding settings from
|
|
|
|
// a configuration file using explicit flags taken from command-line
|
|
|
|
// arguments.
|
2019-09-09 13:05:42 -07:00
|
|
|
fn override_config(&self, mut config: ZebradConfig) -> Result<ZebradConfig, FrameworkError> {
|
|
|
|
if !self.filters.is_empty() {
|
2020-06-04 19:34:06 -07:00
|
|
|
config.tracing.filter = Some(self.filters.join(","));
|
2019-08-29 14:46:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(config)
|
|
|
|
}
|
|
|
|
}
|