Update to Tokio 1.13.0 (#2994)

* Update `tower` to version `0.4.9`

Update to latest version to add support for Tokio version 1.

* Replace usage of `ServiceExt::ready_and`

It was deprecated in favor of `ServiceExt::ready`.

* Update Tokio dependency to version `1.13.0`

This will break the build because the code isn't ready for the update,
but future commits will fix the issues.

* Replace import of `tokio::stream::StreamExt`

Use `futures::stream::StreamExt` instead, because newer versions of
Tokio don't have the `stream` feature.

* Use `IntervalStream` in `zebra-network`

In newer versions of Tokio `Interval` doesn't implement `Stream`, so the
wrapper types from `tokio-stream` have to be used instead.

* Use `IntervalStream` in `inventory_registry`

In newer versions of Tokio the `Interval` type doesn't implement
`Stream`, so `tokio_stream::wrappers::IntervalStream` has to be used
instead.

* Use `BroadcastStream` in `inventory_registry`

In newer versions of Tokio `broadcast::Receiver` doesn't implement
`Stream`, so `tokio_stream::wrappers::BroadcastStream` instead. This
also requires changing the error type that is used.

* Handle `Semaphore::acquire` error in `tower-batch`

Newer versions of Tokio can return an error if the semaphore is closed.
This shouldn't happen in `tower-batch` because the semaphore is never
closed.

* Handle `Semaphore::acquire` error in `zebrad` test

On newer versions of Tokio `Semaphore::acquire` can return an error if
the semaphore is closed. This shouldn't happen in the test because the
semaphore is never closed.

* Update some `zebra-network` dependencies

Use versions compatible with Tokio version 1.

* Upgrade Hyper to version 0.14

Use a version that supports Tokio version 1.

* Update `metrics` dependency to version 0.17

And also update the `metrics-exporter-prometheus` to version 0.6.1.
These updates are to make sure Tokio 1 is supported.

* Use `f64` as the histogram data type

`u64` isn't supported as the histogram data type in newer versions of
`metrics`.

* Update the initialization of the metrics component

Make it compatible with the new version of `metrics`.

* Simplify build version counter

Remove all constants and use the new `metrics::incement_counter!` macro.

* Change metrics output line to match on

The snapshot string isn't included in the newer version of
`metrics-exporter-prometheus`.

* Update `sentry` to version 0.23.0

Use a version compatible with Tokio version 1.

* Remove usage of `TracingIntegration`

This seems to not be available from `sentry-tracing` anymore, so it
needs to be replaced.

* Add sentry layer to tracing initialization

This seems like the replacement for `TracingIntegration`.

* Remove unnecessary conversion

Suggested by a Clippy lint.

* Update Cargo lock file

Apply all of the updates to dependencies.

* Ban duplicate tokio dependencies

Also ban git sources for tokio dependencies.

* Stop allowing sentry-tracing git repository in `deny.toml`

* Allow remaining duplicates after the tokio upgrade

* Use C: drive for CI build output on Windows

GitHub Actions uses a Windows image with two disk drives, and the
default D: drive is smaller than the C: drive. Zebra currently uses a
lot of space to build, so it has to use the C: drive to avoid CI build
failures because of insufficient space.

Co-authored-by: teor <teor@riseup.net>
This commit is contained in:
Janito Vaqueiro Ferreira Filho 2021-11-02 15:46:57 -03:00 committed by GitHub
parent 989ba51cb9
commit 0960e4fb0b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
47 changed files with 457 additions and 797 deletions

View File

@ -64,6 +64,15 @@ jobs:
echo "CARGO_INCREMENTAL=${{ env.CARGO_INCREMENTAL }}"
echo "RUST_BACKTRACE=${{ env.RUST_BACKTRACE }}"
- name: Change target output directory on Windows
# Windows doesn't have enough space on the D: drive, so we redirect the build output to the
# larger C: drive.
# TODO: Remove this workaround once the build is more efficient (#3005).
if: matrix.os == 'windows-latest'
run: |
mkdir C:\zebra-target
echo "CARGO_TARGET_DIR=C:\zebra-target" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
- name: Run tests
uses: actions-rs/cargo@v1.0.3
with:

837
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -22,12 +22,6 @@ panic = "abort"
[patch.crates-io]
# awaiting stable versions which all depend on tokio 1.0, see #1086 for details
hyper = { git = "https://github.com/hyperium/hyper", rev = "ed2b22a7f66899d338691552fbcb6c0f2f4e06b9" }
metrics = { git = "https://github.com/ZcashFoundation/metrics", rev = "971133128e5aebe3ad177acffc6154449736cfa2" }
metrics-exporter-prometheus = { git = "https://github.com/ZcashFoundation/metrics", rev = "971133128e5aebe3ad177acffc6154449736cfa2" }
tower = { git = "https://github.com/tower-rs/tower", rev = "d4d1c67c6a0e4213a52abcc2b9df6cc58276ee39" }
# TODO: remove these after a new librustzcash release.
# These are librustzcash requirements specified in its workspace Cargo.toml that we must replicate here
incrementalmerkletree = { git = "https://github.com/zcash/incrementalmerkletree", rev = "b7bd6246122a6e9ace8edb51553fbf5228906cbb" }

View File

@ -31,12 +31,6 @@ skip = [
# dependencies starting at the specified crate, up to a certain depth, which is
# by default infinite
skip-tree = [
# ticket #2200: tokio dependencies
{ name = "metrics-exporter-prometheus", version = "=0.1.0-alpha.7" },
{ name = "tower", version = "=0.4.0" },
{ name = "tokio", version = "=0.2.23" },
{ name = "tokio-util", version = "=0.3.1" },
# ticket #2953: tracing dependencies
{ name = "tracing-subscriber", version = "=0.1.6" },
@ -52,6 +46,9 @@ skip-tree = [
# ticket #2983: criterion dependencies
{ name = "criterion", version = "=0.3.4" },
# ticket #3000: tower-fallback dependencies
{ name = "pin-project", version = "=0.4.28" },
# ticket #2981: bindgen dependencies
{ name = "rocksdb", version = "=0.16.0" },
@ -64,9 +61,22 @@ skip-tree = [
# ticket #2980: inferno dependencies
{ name = "inferno", version = "=0.10.7" },
# ticket #2998: base64 dependencies
{ name = "base64", version = "=0.10.1" },
# ticket #2999: http dependencies
{ name = "bytes", version = "=0.5.6" },
# upgrade orchard from deprecated `bigint` to `uint`: https://github.com/zcash/orchard/issues/219
# alternative: downgrade Zebra to `bigint`
{ name = "bigint", version = "=4.4.3" },
# recent major version bumps
# wait for lots of crates in the tokio ecosystem to upgrade
# we should re-check these dependencies in February 2022
{ name = "redox_syscall", version = "=0.1.57" },
{ name = "socket2", version = "=0.3.16" },
]
# This section is considered when running `cargo deny check sources`.
@ -84,9 +94,6 @@ unknown-git = "deny"
allow-registry = ["https://github.com/rust-lang/crates.io-index"]
# List of URLs for allowed Git repositories
allow-git = [
# ticket #2200: tokio dependencies
"https://github.com/kellpossible/sentry-tracing",
# ticket #2982: librustzcash and orchard git versions
"https://github.com/str4d/redjubjub",
]
@ -95,8 +102,4 @@ allow-git = [
github = [
"ZcashFoundation",
"zcash",
# ticket #2200: tokio dependencies
"hyperium",
"tower-rs",
]

View File

@ -9,8 +9,8 @@ edition = "2018"
futures = "0.3.17"
futures-core = "0.3.13"
pin-project = "1.0.7"
tokio = { version = "0.3.6", features = ["time", "sync", "stream", "tracing", "macros"] }
tower = { version = "0.4", features = ["util", "buffer"] }
tokio = { version = "1.13.0", features = ["time", "sync", "tracing", "macros"] }
tower = { version = "0.4.9", features = ["util", "buffer"] }
tracing = "0.1.29"
tracing-futures = "0.2.5"
@ -18,7 +18,7 @@ tracing-futures = "0.2.5"
color-eyre = "0.5.11"
ed25519-zebra = "3.0.0"
rand = "0.8"
tokio = { version = "0.3.6", features = ["full"]}
tokio = { version = "1.13.0", features = ["full"]}
tokio-test = "0.4.2"
tower-fallback = { path = "../tower-fallback/" }
tower-test = "0.4.0"

View File

@ -8,6 +8,7 @@
#![allow(dead_code)]
pub(crate) use self::sync::OwnedSemaphorePermit as Permit;
use futures::FutureExt;
use futures_core::ready;
use std::{
fmt,
@ -66,7 +67,12 @@ impl Semaphore {
let permit = ready!(Pin::new(fut).poll(cx));
State::Ready(permit)
}
State::Empty => State::Waiting(Box::pin(self.semaphore.clone().acquire_owned())),
State::Empty => State::Waiting(Box::pin(
self.semaphore
.clone()
.acquire_owned()
.map(|result| result.expect("internal semaphore is never closed")),
)),
};
}
}

View File

@ -83,7 +83,7 @@ where
tracing::trace!("notifying caller about worker failure");
let _ = tx.send(Err(failed.clone()));
} else {
match self.service.ready_and().await {
match self.service.ready().await {
Ok(svc) => {
let rsp = svc.call(req.into());
let _ = tx.send(Ok(rsp));
@ -109,7 +109,7 @@ where
async fn flush_service(&mut self) {
if let Err(e) = self
.service
.ready_and()
.ready()
.and_then(|svc| svc.call(BatchControl::Flush))
.await
{

View File

@ -112,7 +112,7 @@ where
sk.sign(&msg[..])
};
verifier.ready_and().await?;
verifier.ready().await?;
results.push(span.in_scope(|| verifier.call((vk_bytes, sig, msg).into())))
}

View File

@ -21,18 +21,18 @@ async fn wakes_pending_waiters_on_close() {
// // keep the request in the worker
handle.allow(0);
let service1 = service.ready_and().await.unwrap();
let service1 = service.ready().await.unwrap();
let poll = worker.poll();
assert_pending!(poll);
let mut response = task::spawn(service1.call(()));
let mut service1 = service.clone();
let mut ready1 = task::spawn(service1.ready_and());
let mut ready1 = task::spawn(service1.ready());
assert_pending!(worker.poll());
assert_pending!(ready1.poll(), "no capacity");
let mut service1 = service.clone();
let mut ready2 = task::spawn(service1.ready_and());
let mut ready2 = task::spawn(service1.ready());
assert_pending!(worker.poll());
assert_pending!(ready2.poll(), "no capacity");
@ -80,17 +80,17 @@ async fn wakes_pending_waiters_on_failure() {
// keep the request in the worker
handle.allow(0);
let service1 = service.ready_and().await.unwrap();
let service1 = service.ready().await.unwrap();
assert_pending!(worker.poll());
let mut response = task::spawn(service1.call("hello"));
let mut service1 = service.clone();
let mut ready1 = task::spawn(service1.ready_and());
let mut ready1 = task::spawn(service1.ready());
assert_pending!(worker.poll());
assert_pending!(ready1.poll(), "no capacity");
let mut service1 = service.clone();
let mut ready2 = task::spawn(service1.ready_and());
let mut ready2 = task::spawn(service1.ready());
assert_pending!(worker.poll());
assert_pending!(ready2.poll(), "no capacity");

View File

@ -13,4 +13,4 @@ tracing = "0.1"
[dev-dependencies]
zebra-test = { path = "../zebra-test/" }
tokio = { version = "0.3.6", features = ["full"]}
tokio = { version = "1.13.0", features = ["full"]}

View File

@ -30,7 +30,7 @@ async fn fallback() {
let mut svc = Fallback::new(svc1, svc2);
assert_eq!(svc.ready_and().await.unwrap().call(1).await.unwrap(), 1);
assert_eq!(svc.ready_and().await.unwrap().call(11).await.unwrap(), 111);
assert!(svc.ready_and().await.unwrap().call(21).await.is_err());
assert_eq!(svc.ready().await.unwrap().call(1).await.unwrap(), 1);
assert_eq!(svc.ready().await.unwrap().call(11).await.unwrap(), 111);
assert!(svc.ready().await.unwrap().call(21).await.is_err());
}

View File

@ -23,10 +23,10 @@ serde = { version = "1", features = ["serde_derive"] }
futures = "0.3.17"
futures-util = "0.3.17"
metrics = "0.13.0-alpha.8"
metrics = "0.17.0"
thiserror = "1.0.30"
tokio = { version = "0.3.6", features = ["time", "sync", "stream", "tracing"] }
tower = { version = "0.4", features = ["timeout", "util", "buffer"] }
tokio = { version = "1.13.0", features = ["time", "sync", "tracing"] }
tower = { version = "0.4.9", features = ["timeout", "util", "buffer"] }
tracing = "0.1.29"
tracing-futures = "0.2.5"
@ -46,7 +46,7 @@ proptest = "0.10"
proptest-derive = "0.3.0"
rand07 = { package = "rand", version = "0.7" }
spandoc = "0.2"
tokio = { version = "0.3.6", features = ["full"] }
tokio = { version = "1.13.0", features = ["full"] }
tracing-error = "0.1.2"
tracing-subscriber = "0.2.25"

View File

@ -125,7 +125,7 @@ where
// Check that this block is actually a new block.
tracing::trace!("checking that block is not already in state");
match state_service
.ready_and()
.ready()
.await
.map_err(|source| VerifyBlockError::Depth { source, hash })?
.call(zs::Request::Depth(hash))
@ -179,7 +179,7 @@ where
));
for transaction in &block.transactions {
let rsp = transaction_verifier
.ready_and()
.ready()
.await
.expect("transaction verifier is always ready")
.call(tx::Request::Block {
@ -211,7 +211,7 @@ where
transaction_hashes,
};
match state_service
.ready_and()
.ready()
.await
.map_err(VerifyBlockError::Commit)?
.call(zs::Request::CommitBlock(prepared_block))

View File

@ -203,7 +203,7 @@ where
};
let tip = match state_service
.ready_and()
.ready()
.await
.unwrap()
.call(zs::Request::Tip)

View File

@ -6,9 +6,12 @@ use super::types::Progress::*;
use super::types::TargetHeight::*;
use color_eyre::eyre::{eyre, Report};
use futures::{future::TryFutureExt, stream::FuturesUnordered};
use futures::{
future::TryFutureExt,
stream::{FuturesUnordered, StreamExt},
};
use std::{cmp::min, convert::TryInto, mem::drop, time::Duration};
use tokio::{stream::StreamExt, time::timeout};
use tokio::time::timeout;
use tower::{Service, ServiceExt};
use tracing_futures::Instrument;
@ -64,10 +67,7 @@ async fn single_item_checkpoint_list() -> Result<(), Report> {
);
/// SPANDOC: Make sure the verifier service is ready
let ready_verifier_service = checkpoint_verifier
.ready_and()
.map_err(|e| eyre!(e))
.await?;
let ready_verifier_service = checkpoint_verifier.ready().map_err(|e| eyre!(e)).await?;
/// SPANDOC: Set up the future for block 0
let verify_future = timeout(
Duration::from_secs(VERIFY_TIMEOUT_SECONDS),
@ -148,10 +148,7 @@ async fn multi_item_checkpoint_list() -> Result<(), Report> {
// Now verify each block
for (block, height, hash) in checkpoint_data {
/// SPANDOC: Make sure the verifier service is ready
let ready_verifier_service = checkpoint_verifier
.ready_and()
.map_err(|e| eyre!(e))
.await?;
let ready_verifier_service = checkpoint_verifier.ready().map_err(|e| eyre!(e)).await?;
/// SPANDOC: Set up the future for block {?height}
let verify_future = timeout(
@ -325,8 +322,7 @@ async fn continuous_blockchain(
if height <= restart_height {
let mut state_service = state_service.clone();
/// SPANDOC: Make sure the state service is ready for block {?height}
let ready_state_service =
state_service.ready_and().map_err(|e| eyre!(e)).await?;
let ready_state_service = state_service.ready().map_err(|e| eyre!(e)).await?;
/// SPANDOC: Add block directly to the state {?height}
ready_state_service
@ -342,10 +338,7 @@ async fn continuous_blockchain(
}
/// SPANDOC: Make sure the verifier service is ready for block {?height}
let ready_verifier_service = checkpoint_verifier
.ready_and()
.map_err(|e| eyre!(e))
.await?;
let ready_verifier_service = checkpoint_verifier.ready().map_err(|e| eyre!(e)).await?;
/// SPANDOC: Set up the future for block {?height}
let verify_future = timeout(
@ -470,10 +463,7 @@ async fn block_higher_than_max_checkpoint_fail() -> Result<(), Report> {
);
/// SPANDOC: Make sure the verifier service is ready
let ready_verifier_service = checkpoint_verifier
.ready_and()
.map_err(|e| eyre!(e))
.await?;
let ready_verifier_service = checkpoint_verifier.ready().map_err(|e| eyre!(e)).await?;
/// SPANDOC: Set up the future for block 415000
let verify_future = timeout(
Duration::from_secs(VERIFY_TIMEOUT_SECONDS),
@ -547,10 +537,7 @@ async fn wrong_checkpoint_hash_fail() -> Result<(), Report> {
);
/// SPANDOC: Make sure the verifier service is ready (1/3)
let ready_verifier_service = checkpoint_verifier
.ready_and()
.map_err(|e| eyre!(e))
.await?;
let ready_verifier_service = checkpoint_verifier.ready().map_err(|e| eyre!(e)).await?;
/// SPANDOC: Set up the future for bad block 0 (1/3)
// TODO(teor || jlusby): check error kind
let bad_verify_future_1 = timeout(
@ -574,10 +561,7 @@ async fn wrong_checkpoint_hash_fail() -> Result<(), Report> {
);
/// SPANDOC: Make sure the verifier service is ready (2/3)
let ready_verifier_service = checkpoint_verifier
.ready_and()
.map_err(|e| eyre!(e))
.await?;
let ready_verifier_service = checkpoint_verifier.ready().map_err(|e| eyre!(e)).await?;
/// SPANDOC: Set up the future for bad block 0 again (2/3)
// TODO(teor || jlusby): check error kind
let bad_verify_future_2 = timeout(
@ -601,10 +585,7 @@ async fn wrong_checkpoint_hash_fail() -> Result<(), Report> {
);
/// SPANDOC: Make sure the verifier service is ready (3/3)
let ready_verifier_service = checkpoint_verifier
.ready_and()
.map_err(|e| eyre!(e))
.await?;
let ready_verifier_service = checkpoint_verifier.ready().map_err(|e| eyre!(e)).await?;
/// SPANDOC: Set up the future for good block 0 (3/3)
let good_verify_future = timeout(
Duration::from_secs(VERIFY_TIMEOUT_SECONDS),
@ -732,10 +713,7 @@ async fn checkpoint_drop_cancel() -> Result<(), Report> {
// Now collect verify futures for each block
for (block, height, hash) in checkpoint_data {
/// SPANDOC: Make sure the verifier service is ready
let ready_verifier_service = checkpoint_verifier
.ready_and()
.map_err(|e| eyre!(e))
.await?;
let ready_verifier_service = checkpoint_verifier.ready().map_err(|e| eyre!(e)).await?;
/// SPANDOC: Set up the future for block {?height}
let verify_future = timeout(
@ -811,10 +789,7 @@ async fn hard_coded_mainnet() -> Result<(), Report> {
assert!(checkpoint_verifier.checkpoint_list.max_height() > block::Height(0));
/// SPANDOC: Make sure the verifier service is ready
let ready_verifier_service = checkpoint_verifier
.ready_and()
.map_err(|e| eyre!(e))
.await?;
let ready_verifier_service = checkpoint_verifier.ready().map_err(|e| eyre!(e)).await?;
/// SPANDOC: Set up the future for block 0
let verify_future = timeout(
Duration::from_secs(VERIFY_TIMEOUT_SECONDS),

View File

@ -22,7 +22,7 @@ where
let sk = SigningKey::new(&mut rng);
let vk = VerificationKey::from(&sk);
let sig = sk.sign(&msg[..]);
verifier.ready_and().await?;
verifier.ready().await?;
results.push(span.in_scope(|| verifier.call((vk.into(), sig, msg).into())))
}

View File

@ -32,7 +32,7 @@ where
tracing::trace!(?spend);
let spend_rsp = spend_verifier
.ready_and()
.ready()
.await?
.call(groth16::ItemWrapper::from(&spend).into());
@ -43,7 +43,7 @@ where
tracing::trace!(?output);
let output_rsp = output_verifier
.ready_and()
.ready()
.await?
.call(groth16::ItemWrapper::from(output).into());
@ -131,7 +131,7 @@ where
tracing::trace!(?modified_output);
let output_rsp = output_verifier
.ready_and()
.ready()
.await?
.call(groth16::ItemWrapper::from(&modified_output).into());

View File

@ -24,14 +24,14 @@ where
let sk = SigningKey::<SpendAuth>::new(&mut rng);
let vk = VerificationKey::from(&sk);
let sig = sk.sign(&mut rng, &msg[..]);
verifier.ready_and().await?;
verifier.ready().await?;
results.push(span.in_scope(|| verifier.call((vk.into(), sig, msg).into())))
}
1 => {
let sk = SigningKey::<Binding>::new(&mut rng);
let vk = VerificationKey::from(&sk);
let sig = sk.sign(&mut rng, &msg[..]);
verifier.ready_and().await?;
verifier.ready().await?;
results.push(span.in_scope(|| verifier.call((vk.into(), sig, msg).into())))
}
_ => panic!(),

View File

@ -24,14 +24,14 @@ where
let sk = SigningKey::<SpendAuth>::new(&mut rng);
let vk = VerificationKey::from(&sk);
let sig = sk.sign(&mut rng, &msg[..]);
verifier.ready_and().await?;
verifier.ready().await?;
results.push(span.in_scope(|| verifier.call((vk.into(), sig, msg).into())))
}
1 => {
let sk = SigningKey::<Binding>::new(&mut rng);
let vk = VerificationKey::from(&sk);
let sig = sk.sign(&mut rng, &msg[..]);
verifier.ready_and().await?;
verifier.ready().await?;
results.push(span.in_scope(|| verifier.call((vk.into(), sig, msg).into())))
}
_ => panic!(),

View File

@ -10,7 +10,7 @@ edition = "2018"
[dependencies]
bitflags = "1.2"
byteorder = "1.4"
bytes = "0.6"
bytes = "1.1.0"
chrono = "0.4"
hex = "0.4"
# indexmap has rayon support for parallel iteration,
@ -24,11 +24,12 @@ serde = { version = "1", features = ["serde_derive"] }
thiserror = "1"
futures = "0.3"
tokio = { version = "0.3.6", features = ["net", "time", "stream", "tracing", "macros", "rt-multi-thread"] }
tokio-util = { version = "0.5", features = ["codec"] }
tower = { version = "0.4", features = ["retry", "discover", "load", "load-shed", "timeout", "util", "buffer"] }
tokio = { version = "1.13.0", features = ["net", "time", "tracing", "macros", "rt-multi-thread"] }
tokio-stream = { version = "0.1.7", features = ["sync", "time"] }
tokio-util = { version = "0.6.9", features = ["codec"] }
tower = { version = "0.4.9", features = ["retry", "discover", "load", "load-shed", "timeout", "util", "buffer"] }
metrics = "0.13.0-alpha.8"
metrics = "0.17.0"
tracing = "0.1"
tracing-futures = "0.2"
tracing-error = { version = "0.1.2", features = ["traced-error"] }
@ -38,7 +39,7 @@ zebra-chain = { path = "../zebra-chain" }
[dev-dependencies]
proptest = "0.10"
proptest-derive = "0.3"
tokio = { version = "0.3.6", features = ["test-util"] }
tokio = { version = "1.13.0", features = ["test-util"] }
toml = "0.5"
zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] }

View File

@ -924,7 +924,7 @@ where
trace!(?req);
use tower::{load_shed::error::Overloaded, ServiceExt};
if self.svc.ready_and().await.is_err() {
if self.svc.ready().await.is_err() {
// Treat all service readiness errors as Overloaded
// TODO: treat `TryRecvError::Closed` in `Inbound::poll_ready` as a fatal error (#1655)
self.fail_with(PeerError::Overloaded);

View File

@ -78,7 +78,7 @@ where
async move {
let stream = TcpStream::connect(addr).await?;
hs.ready_and().await?;
hs.ready().await?;
let client = hs
.call(HandshakeRequest {
tcp_stream: stream,

View File

@ -15,6 +15,7 @@ use futures::{
future, FutureExt, SinkExt, StreamExt,
};
use tokio::{net::TcpStream, sync::broadcast, task::JoinError, time::timeout};
use tokio_stream::wrappers::IntervalStream;
use tokio_util::codec::Framed;
use tower::Service;
use tracing::{span, Level, Span};
@ -946,7 +947,8 @@ where
let mut shutdown_rx = shutdown_rx;
let mut server_tx = server_tx;
let mut timestamp_collector = heartbeat_ts_collector.clone();
let mut interval_stream = tokio::time::interval(constants::HEARTBEAT_INTERVAL);
let mut interval_stream =
IntervalStream::new(tokio::time::interval(constants::HEARTBEAT_INTERVAL));
loop {
let shutdown_rx_ref = Pin::new(&mut shutdown_rx);

View File

@ -239,7 +239,7 @@ where
debug!(?fanout_limit, "sending GetPeers requests");
// TODO: launch each fanout in its own task (might require tokio 1.6)
for _ in 0..fanout_limit {
let peer_service = self.peer_service.ready_and().await?;
let peer_service = self.peer_service.ready().await?;
responses.push(peer_service.call(Request::Peers));
}
while let Some(rsp) = responses.next().await {

View File

@ -18,6 +18,7 @@ use tokio::{
sync::broadcast,
time::{sleep, Instant},
};
use tokio_stream::wrappers::IntervalStream;
use tower::{
buffer::Buffer, discover::Change, layer::Layer, load::peak_ewma::PeakEwmaDiscover,
util::BoxService, Service, ServiceExt,
@ -477,7 +478,7 @@ where
let _guard = accept_span.enter();
debug!("got incoming connection");
handshaker.ready_and().await?;
handshaker.ready().await?;
// TODO: distinguish between proxied listeners and direct listeners
let handshaker_span = info_span!("listen_handshaker", peer = ?connected_addr);
@ -606,7 +607,8 @@ where
handshakes.push(future::pending().boxed());
let mut crawl_timer =
tokio::time::interval(config.crawl_new_peer_interval).map(|tick| TimerCrawl { tick });
IntervalStream::new(tokio::time::interval(config.crawl_new_peer_interval))
.map(|tick| TimerCrawl { tick });
loop {
metrics::gauge!(
@ -750,7 +752,7 @@ where
// the connector is always ready, so this can't hang
let outbound_connector = outbound_connector
.ready_and()
.ready()
.await
.expect("outbound connector never errors");

View File

@ -10,11 +10,9 @@ use std::{
time::Duration,
};
use futures::{Stream, StreamExt};
use tokio::{
sync::broadcast,
time::{self, Interval},
};
use futures::{FutureExt, Stream, StreamExt};
use tokio::{sync::broadcast, time};
use tokio_stream::wrappers::{errors::BroadcastStreamRecvError, BroadcastStream, IntervalStream};
use crate::{protocol::external::InventoryHash, BoxError};
@ -32,13 +30,13 @@ pub struct InventoryRegistry {
/// Stream of incoming inventory hashes to register
inv_stream: Pin<
Box<
dyn Stream<Item = Result<(InventoryHash, SocketAddr), broadcast::error::RecvError>>
dyn Stream<Item = Result<(InventoryHash, SocketAddr), BroadcastStreamRecvError>>
+ Send
+ 'static,
>,
>,
/// Interval tracking how frequently we should rotate our maps
interval: Interval,
interval: IntervalStream,
}
impl std::fmt::Debug for InventoryRegistry {
@ -56,8 +54,8 @@ impl InventoryRegistry {
Self {
current: Default::default(),
prev: Default::default(),
inv_stream: inv_stream.into_stream().boxed(),
interval: time::interval(Duration::from_secs(75)),
inv_stream: BroadcastStream::new(inv_stream).boxed(),
interval: IntervalStream::new(time::interval(Duration::from_secs(75))),
}
}
@ -97,17 +95,16 @@ impl InventoryRegistry {
// rather than propagating it through the peer set's Service::poll_ready
// implementation, where reporting a failure means reporting a permanent
// failure of the peer set.
use broadcast::error::RecvError;
while let Poll::Ready(Some(channel_result)) = Pin::new(&mut self.inv_stream).poll_next(cx) {
while let Poll::Ready(channel_result) = self.inv_stream.next().poll_unpin(cx) {
match channel_result {
Ok((hash, addr)) => self.register(hash, addr),
Err(RecvError::Lagged(count)) => {
Some(Ok((hash, addr))) => self.register(hash, addr),
Some(Err(BroadcastStreamRecvError::Lagged(count))) => {
metrics::counter!("pool.inventory.dropped", 1);
tracing::debug!(count, "dropped lagged inventory advertisements");
}
// This indicates all senders, including the one in the handshaker,
// have been dropped, which really is a permanent failure.
Err(RecvError::Closed) => return Err(RecvError::Closed.into()),
None => return Err(broadcast::error::RecvError::Closed.into()),
}
}

View File

@ -19,11 +19,11 @@ serde = { version = "1", features = ["serde_derive"] }
bincode = "1"
futures = "0.3.17"
metrics = "0.13.0-alpha.8"
tower = { version = "0.4", features = ["buffer", "util"] }
metrics = "0.17.0"
tower = { version = "0.4.9", features = ["buffer", "util"] }
tracing = "0.1"
thiserror = "1.0.30"
tokio = { version = "0.3.6", features = ["sync"] }
tokio = { version = "1.13.0", features = ["sync"] }
displaydoc = "0.2.2"
rocksdb = "0.16.0"
tempdir = "0.3.7"
@ -46,7 +46,7 @@ once_cell = "1.8"
itertools = "0.10.1"
spandoc = "0.2"
tempdir = "0.3.7"
tokio = { version = "0.3.6", features = ["full"] }
tokio = { version = "1.13.0", features = ["full"] }
# TODO: replace w/ crate version when released: https://github.com/ZcashFoundation/zebra/issues/2083
# Note: if updating this, also update the workspace Cargo.toml to match.
halo2 = "=0.1.0-beta.1"

View File

@ -37,7 +37,7 @@ async fn populated_state(
let mut responses = FuturesUnordered::new();
for request in requests {
let rsp = state.ready_and().await.unwrap().call(request);
let rsp = state.ready().await.unwrap().call(request);
responses.push(rsp);
}

View File

@ -15,8 +15,8 @@ proptest = "0.10.1"
rand = "0.8"
regex = "1.4.6"
tower = { version = "0.4", features = ["util"] }
tokio = { version = "0.3", features = ["full"] }
tokio = { version = "1.13.0", features = ["full"] }
tower = { version = "0.4.9", features = ["util"] }
futures = "0.3.17"
color-eyre = "0.5.11"

View File

@ -98,7 +98,7 @@ where
// These unwraps could propagate errors with the correct
// bound on C::Error
let fut = to_check
.ready_and()
.ready()
.await
.map_err(Into::into)
.map_err(|e| eyre!(e))

View File

@ -23,11 +23,11 @@ async fn transcript_returns_responses_and_ends() {
for (req, rsp) in TRANSCRIPT_DATA.iter() {
assert_eq!(
svc.ready_and().await.unwrap().call(req).await.unwrap(),
svc.ready().await.unwrap().call(req).await.unwrap(),
*rsp.as_ref().unwrap()
);
}
assert!(svc.ready_and().await.unwrap().call("end").await.is_err());
assert!(svc.ready().await.unwrap().call("end").await.is_err());
}
#[tokio::test]
@ -37,10 +37,10 @@ async fn transcript_errors_wrong_request() {
let mut svc = Transcript::from(TRANSCRIPT_DATA.iter().cloned());
assert_eq!(
svc.ready_and().await.unwrap().call("req1").await.unwrap(),
svc.ready().await.unwrap().call("req1").await.unwrap(),
"rsp1",
);
assert!(svc.ready_and().await.unwrap().call("bad").await.is_err());
assert!(svc.ready().await.unwrap().call("bad").await.is_err());
}
#[tokio::test]

View File

@ -21,10 +21,10 @@ serde = { version = "1", features = ["serde_derive"] }
toml = "0.5"
chrono = "0.4"
hyper = { version = "0.14.0-dev", features = ["full"] }
hyper = { version = "0.14.13", features = ["full"] }
futures = "0.3"
tokio = { version = "0.3.6", features = ["time", "rt-multi-thread", "stream", "macros", "tracing", "signal"] }
tower = { version = "0.4", features = ["hedge", "limit"] }
tokio = { version = "1.13.0", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] }
tower = { version = "0.4.9", features = ["hedge", "limit"] }
pin-project = "1.0.7"
color-eyre = { version = "0.5.11", features = ["issue-url"] }
@ -36,15 +36,15 @@ tracing-flame = "0.1.0"
tracing-journald = "0.1.0"
tracing-subscriber = { version = "0.2.25", features = ["tracing-log"] }
tracing-error = "0.1.2"
metrics = "0.13.0-alpha.8"
metrics-exporter-prometheus = "0.1.0-alpha.7"
metrics = "0.17.0"
metrics-exporter-prometheus = "0.6.1"
dirs = "4.0.0"
inferno = { version = "0.10.7", default-features = false }
atty = "0.2.14"
sentry = { version = "0.21.0", default-features = false, features = ["backtrace", "contexts", "reqwest", "rustls"] }
sentry-tracing = { git = "https://github.com/kellpossible/sentry-tracing.git", rev = "f1a4a4a16b5ff1022ae60be779eb3fb928ce9b0f" }
sentry = { version = "0.23.0", default-features = false, features = ["backtrace", "contexts", "reqwest", "rustls"] }
sentry-tracing = "0.23.0"
rand = "0.8.4"
@ -57,7 +57,7 @@ once_cell = "1.8"
regex = "1.4.6"
semver = "1.0.3"
tempdir = "0.3.7"
tokio = { version = "0.3.6", features = ["full", "test-util"] }
tokio = { version = "1.13.0", features = ["full", "test-util"] }
proptest = "0.10"
proptest-derive = "0.3"

View File

@ -306,14 +306,11 @@ impl Application for ZebradApp {
// The Sentry default config pulls in the DSN from the `SENTRY_DSN`
// environment variable.
#[cfg(feature = "enable-sentry")]
let guard = sentry::init(
sentry::ClientOptions {
debug: true,
release: Some(app_version().to_string().into()),
..Default::default()
}
.add_integration(sentry_tracing::TracingIntegration::default()),
);
let guard = sentry::init(sentry::ClientOptions {
debug: true,
release: Some(app_version().to_string().into()),
..Default::default()
});
std::panic::set_hook(Box::new(move |panic_info| {
let panic_report = panic_hook.panic_report(panic_info);

View File

@ -609,7 +609,7 @@ async fn setup(
.zcash_deserialize_into()
.unwrap();
state_service
.ready_and()
.ready()
.await
.unwrap()
.call(zebra_state::Request::CommitFinalizedBlock(

View File

@ -197,7 +197,7 @@ where
for _ in 0..FANOUT {
let mut peer_set = peer_set.clone();
// end the task on permanent peer set errors
let peer_set = peer_set.ready_and().await?;
let peer_set = peer_set.ready().await?;
requests.push(peer_set.call(zn::Request::MempoolTransactionIds));
}
@ -242,7 +242,7 @@ where
let call_result = self
.mempool
.ready_and()
.ready()
.await?
.call(mempool::Request::Queue(transaction_ids))
.await;

View File

@ -443,7 +443,7 @@ where
) -> Result<(), TransactionDownloadVerifyError> {
// Check if the transaction is already in the state.
match state
.ready_and()
.ready()
.await
.map_err(|e| TransactionDownloadVerifyError::StateError(e))?
.call(zs::Request::Transaction(txid.mined_id()))

View File

@ -44,7 +44,7 @@ where
info!(?request, "sending mempool transaction broadcast");
// broadcast requests don't return errors, and we'd just want to ignore them anyway
let _ = broadcast_network.ready_and().await?.call(request).await;
let _ = broadcast_network.ready().await?.call(request).await;
metrics::counter!("mempool.gossiped.transactions.total", txs_len as _);
}

View File

@ -69,7 +69,7 @@ where
// So we propagate any unexpected errors to the task that spawned us.
let response = self
.mempool
.ready_and()
.ready()
.await?
.call(mempool::Request::CheckForVerifiedTransactions)
.await?;

View File

@ -267,6 +267,6 @@ impl VerifiedSet {
"zcash.mempool.size.bytes",
self.transactions_serialized_size as _
);
metrics::gauge!("zcash.mempool.cost.bytes", u64::from(self.total_cost) as _);
metrics::gauge!("zcash.mempool.cost.bytes", self.total_cost as _);
}
}

View File

@ -63,7 +63,7 @@ async fn mempool_service_basic_single() -> Result<(), Report> {
// Test `Request::TransactionIds`
let response = service
.ready_and()
.ready()
.await
.unwrap()
.call(Request::TransactionIds)
@ -80,7 +80,7 @@ async fn mempool_service_basic_single() -> Result<(), Report> {
.copied()
.collect::<HashSet<_>>();
let response = service
.ready_and()
.ready()
.await
.unwrap()
.call(Request::TransactionsById(
@ -109,7 +109,7 @@ async fn mempool_service_basic_single() -> Result<(), Report> {
// Test `Request::RejectedTransactionIds`
let response = service
.ready_and()
.ready()
.await
.unwrap()
.call(Request::RejectedTransactionIds(
@ -127,7 +127,7 @@ async fn mempool_service_basic_single() -> Result<(), Report> {
// Test `Request::Queue`
// Use the ID of the last transaction in the list
let response = service
.ready_and()
.ready()
.await
.unwrap()
.call(Request::Queue(vec![last_transaction.transaction.id.into()]))
@ -190,7 +190,7 @@ async fn mempool_queue_single() -> Result<(), Report> {
// Test `Request::Queue` for a new transaction
let response = service
.ready_and()
.ready()
.await
.unwrap()
.call(Request::Queue(vec![new_tx.transaction.id.into()]))
@ -207,7 +207,7 @@ async fn mempool_queue_single() -> Result<(), Report> {
// They should all be rejected; either because they are already in the mempool,
// or because they are in the recently evicted list.
let response = service
.ready_and()
.ready()
.await
.unwrap()
.call(Request::Queue(
@ -273,7 +273,7 @@ async fn mempool_service_disabled() -> Result<(), Report> {
// Test if the mempool answers correctly (i.e. is enabled)
let response = service
.ready_and()
.ready()
.await
.unwrap()
.call(Request::TransactionIds)
@ -288,7 +288,7 @@ async fn mempool_service_disabled() -> Result<(), Report> {
// Use the ID of the last transaction in the list
let txid = more_transactions.last().unwrap().transaction.id;
let response = service
.ready_and()
.ready()
.await
.unwrap()
.call(Request::Queue(vec![txid.into()]))
@ -310,7 +310,7 @@ async fn mempool_service_disabled() -> Result<(), Report> {
// Test if the mempool returns no transactions when disabled
let response = service
.ready_and()
.ready()
.await
.unwrap()
.call(Request::TransactionIds)
@ -329,7 +329,7 @@ async fn mempool_service_disabled() -> Result<(), Report> {
// Test if the mempool returns to Queue requests correctly when disabled
let response = service
.ready_and()
.ready()
.await
.unwrap()
.call(Request::Queue(vec![txid.into()]))
@ -371,7 +371,7 @@ async fn mempool_cancel_mined() -> Result<(), Report> {
.zcash_deserialize_into()
.unwrap();
state_service
.ready_and()
.ready()
.await
.unwrap()
.call(zebra_state::Request::CommitFinalizedBlock(
@ -385,7 +385,7 @@ async fn mempool_cancel_mined() -> Result<(), Report> {
// Push block 1 to the state
state_service
.ready_and()
.ready()
.await
.unwrap()
.call(zebra_state::Request::CommitFinalizedBlock(
@ -402,7 +402,7 @@ async fn mempool_cancel_mined() -> Result<(), Report> {
// which cancels all downloads.
let txid = block2.transactions[0].unmined_id();
let response = mempool
.ready_and()
.ready()
.await
.unwrap()
.call(Request::Queue(vec![txid.into()]))
@ -464,7 +464,7 @@ async fn mempool_cancel_downloads_after_network_upgrade() -> Result<(), Report>
.zcash_deserialize_into()
.unwrap();
state_service
.ready_and()
.ready()
.await
.unwrap()
.call(zebra_state::Request::CommitFinalizedBlock(
@ -476,7 +476,7 @@ async fn mempool_cancel_downloads_after_network_upgrade() -> Result<(), Report>
// Queue transaction from block 2 for download
let txid = block2.transactions[0].unmined_id();
let response = mempool
.ready_and()
.ready()
.await
.unwrap()
.call(Request::Queue(vec![txid.into()]))
@ -496,7 +496,7 @@ async fn mempool_cancel_downloads_after_network_upgrade() -> Result<(), Report>
// Push block 1 to the state. This is considered a network upgrade,
// and thus must cancel all pending transaction downloads.
state_service
.ready_and()
.ready()
.await
.unwrap()
.call(zebra_state::Request::CommitFinalizedBlock(
@ -537,7 +537,7 @@ async fn mempool_failed_verification_is_rejected() -> Result<(), Report> {
.zcash_deserialize_into()
.unwrap();
state_service
.ready_and()
.ready()
.await
.unwrap()
.call(zebra_state::Request::CommitFinalizedBlock(
@ -549,7 +549,7 @@ async fn mempool_failed_verification_is_rejected() -> Result<(), Report> {
// Queue first transaction for verification
// (queue the transaction itself to avoid a download).
let request = mempool
.ready_and()
.ready()
.await
.unwrap()
.call(Request::Queue(vec![rejected_tx.transaction.clone().into()]));
@ -576,7 +576,7 @@ async fn mempool_failed_verification_is_rejected() -> Result<(), Report> {
// Try to queue the same transaction by its ID and check if it's correctly
// rejected.
let response = mempool
.ready_and()
.ready()
.await
.unwrap()
.call(Request::Queue(vec![rejected_tx.transaction.id.into()]))
@ -620,7 +620,7 @@ async fn mempool_failed_download_is_not_rejected() -> Result<(), Report> {
.zcash_deserialize_into()
.unwrap();
state_service
.ready_and()
.ready()
.await
.unwrap()
.call(zebra_state::Request::CommitFinalizedBlock(
@ -631,7 +631,7 @@ async fn mempool_failed_download_is_not_rejected() -> Result<(), Report> {
// Queue second transaction for download and verification.
let request = mempool
.ready_and()
.ready()
.await
.unwrap()
.call(Request::Queue(vec![rejected_valid_tx
@ -663,7 +663,7 @@ async fn mempool_failed_download_is_not_rejected() -> Result<(), Report> {
// Try to queue the same transaction by its ID and check if it's not being
// rejected.
let response = mempool
.ready_and()
.ready()
.await
.unwrap()
.call(Request::Queue(vec![rejected_valid_tx

View File

@ -23,24 +23,10 @@ impl MetricsEndpoint {
// Expose binary metadata to metrics, using a single time series with
// value 1:
// https://www.robustperception.io/exposing-the-software-version-to-prometheus
//
// We manually expand the metrics::increment!() macro because it only
// supports string literals for metrics names, preventing us from
// using concat!() to build the name.
static METRIC_NAME: [metrics::SharedString; 2] = [
metrics::SharedString::const_str(env!("CARGO_PKG_NAME")),
metrics::SharedString::const_str("build.info"),
];
static METRIC_LABELS: [metrics::Label; 1] =
[metrics::Label::from_static_parts(
"version",
env!("CARGO_PKG_VERSION"),
)];
static METRIC_KEY: metrics::KeyData =
metrics::KeyData::from_static_parts(&METRIC_NAME, &METRIC_LABELS);
if let Some(recorder) = metrics::try_recorder() {
recorder.increment_counter(metrics::Key::Borrowed(&METRIC_KEY), 1);
}
metrics::increment_counter!(
format!("{}.build.info", env!("CARGO_PKG_NAME")),
"version" => env!("CARGO_PKG_VERSION")
);
}
Err(e) => panic!(
"Opening metrics endpoint listener {:?} failed: {:?}. \

View File

@ -385,7 +385,7 @@ where
async fn obtain_tips(&mut self) -> Result<(), Report> {
let block_locator = self
.state
.ready_and()
.ready()
.await
.map_err(|e| eyre!(e))?
.call(zebra_state::Request::BlockLocator)
@ -403,16 +403,12 @@ where
let mut requests = FuturesUnordered::new();
for _ in 0..FANOUT {
requests.push(
self.tip_network
.ready_and()
.await
.map_err(|e| eyre!(e))?
.call(zn::Request::FindBlocks {
known_blocks: block_locator.clone(),
stop: None,
}),
);
requests.push(self.tip_network.ready().await.map_err(|e| eyre!(e))?.call(
zn::Request::FindBlocks {
known_blocks: block_locator.clone(),
stop: None,
},
));
}
let mut download_set = HashSet::new();
@ -485,7 +481,7 @@ where
let new_download_len = download_set.len();
let new_hashes = new_download_len - prev_download_len;
tracing::debug!(new_hashes, "added hashes to download set");
metrics::histogram!("sync.obtain.response.hash.count", new_hashes as u64);
metrics::histogram!("sync.obtain.response.hash.count", new_hashes as f64);
}
Ok(_) => unreachable!("network returned wrong response"),
// We ignore this error because we made multiple fanout requests.
@ -526,16 +522,12 @@ where
tracing::debug!(?tip, "asking peers to extend chain tip");
let mut responses = FuturesUnordered::new();
for _ in 0..FANOUT {
responses.push(
self.tip_network
.ready_and()
.await
.map_err(|e| eyre!(e))?
.call(zn::Request::FindBlocks {
known_blocks: vec![tip.tip],
stop: None,
}),
);
responses.push(self.tip_network.ready().await.map_err(|e| eyre!(e))?.call(
zn::Request::FindBlocks {
known_blocks: vec![tip.tip],
stop: None,
},
));
}
while let Some(res) = responses.next().await {
match res.map_err::<Report, _>(|e| eyre!(e)) {
@ -621,7 +613,7 @@ where
let new_download_len = download_set.len();
let new_hashes = new_download_len - prev_download_len;
tracing::debug!(new_hashes, "added hashes to download set");
metrics::histogram!("sync.extend.response.hash.count", new_hashes as u64);
metrics::histogram!("sync.extend.response.hash.count", new_hashes as f64);
}
Ok(_) => unreachable!("network returned wrong response"),
// We ignore this error because we made multiple fanout requests.
@ -689,7 +681,7 @@ where
async fn state_contains(&mut self, hash: block::Hash) -> Result<bool, Report> {
match self
.state
.ready_and()
.ready()
.await
.map_err(|e| eyre!(e))?
.call(zebra_state::Request::Depth(hash))

View File

@ -167,7 +167,7 @@ where
tracing::debug!("waiting to request block");
let block_req = self
.network
.ready_and()
.ready()
.await
.map_err(|e| eyre!(e))?
.call(zn::Request::BlocksByHash(std::iter::once(hash).collect()));
@ -201,7 +201,7 @@ where
metrics::counter!("sync.downloaded.block.count", 1);
let rsp = verifier
.ready_and()
.ready()
.await
.map_err(BlockDownloadVerifyError::VerifierError)?
.call(block);

View File

@ -76,7 +76,7 @@ where
// broadcast requests don't return errors, and we'd just want to ignore them anyway
let _ = broadcast_network
.ready_and()
.ready()
.await
.map_err(PeerSetReadiness)?
.call(request)

View File

@ -100,7 +100,7 @@ proptest! {
let awoke = match timeout(EVENT_TIMEOUT, wake_events.acquire()).await {
Ok(permit) => {
permit.forget();
permit.expect("Sempahore closed prematurely").forget();
true
}
Err(_) => false,
@ -127,7 +127,7 @@ proptest! {
wake_events: Arc<Semaphore>,
) -> Result<(), TestCaseError> {
loop {
update_events.acquire().await.forget();
update_events.acquire().await.expect("Sempahore closed prematurely").forget();
// The refactor suggested by clippy is harder to read and understand.
#[allow(clippy::question_mark)]

View File

@ -46,7 +46,10 @@ impl Tracing {
None
};
let subscriber = builder.finish().with(ErrorLayer::default());
let subscriber = builder
.finish()
.with(ErrorLayer::default())
.with(sentry_tracing::layer());
match (flamelayer, journaldlayer) {
(None, None) => subscriber.init(),
(Some(layer1), None) => subscriber.with(layer1).init(),

View File

@ -1127,7 +1127,7 @@ async fn metrics_endpoint() -> Result<()> {
let output = output.assert_failure()?;
output.any_output_line_contains(
"metrics snapshot",
"# TYPE zebrad_build_info counter",
&body,
"metrics exporter response",
"the metrics response header",