fix(test): Reduce verbose test logs (#5825)

* Remove verbose continuous_blockchain test logs

* Downgrade verbose zebra-network logs to debug

* Downgrade some state logs to debug during tests

* Mark were we would add always-on log filters, if we needed to

* Reduce the number of mempool property tests, to reduce logging
This commit is contained in:
teor 2022-12-09 09:56:01 +10:00 committed by GitHub
parent 77b85cf767
commit 2041fda7bb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 113 additions and 34 deletions

View File

@ -1,3 +1,5 @@
//! Randomised property tests for Zcash blocks.
use std::{env, io::ErrorKind};
use proptest::{arbitrary::any, prelude::*, test_runner::Config};

View File

@ -1,11 +1,13 @@
//! Randomised property tests for Proof of Work.
use proptest::{arbitrary::any, prelude::*, test_runner::Config};
use std::{env, sync::Arc};
use crate::block::{self, Block};
use crate::serialization::{ZcashDeserialize, ZcashDeserializeInto, ZcashSerialize};
use proptest::{arbitrary::any, prelude::*, test_runner::Config};
use crate::{
block::{self, Block},
serialization::{ZcashDeserialize, ZcashDeserializeInto, ZcashSerialize},
};
use super::super::*;

View File

@ -226,7 +226,10 @@ async fn continuous_blockchain_restart() -> Result<(), Report> {
}
/// Test a continuous blockchain on `network`, restarting verification at `restart_height`.
#[spandoc::spandoc]
//
// This span is far too verbose for use during normal testing.
// Turn the SPANDOC: comments into doc comments to re-enable.
//#[spandoc::spandoc]
async fn continuous_blockchain(
restart_height: Option<block::Height>,
network: Network,
@ -266,7 +269,7 @@ async fn continuous_blockchain(
.map(|(_block, height, hash)| (*height, *hash))
.collect();
/// SPANDOC: Verify blocks, restarting at {?restart_height} {?network}
// SPANDOC: Verify blocks, restarting at {?restart_height} {?network}
{
let initial_tip = restart_height.map(|block::Height(height)| {
(blockchain[height as usize].1, blockchain[height as usize].2)
@ -322,10 +325,10 @@ async fn continuous_blockchain(
if let Some(restart_height) = restart_height {
if height <= restart_height {
let mut state_service = state_service.clone();
/// SPANDOC: Make sure the state service is ready for block {?height}
// SPANDOC: Make sure the state service is ready for block {?height}
let ready_state_service = state_service.ready().map_err(|e| eyre!(e)).await?;
/// SPANDOC: Add block directly to the state {?height}
// SPANDOC: Add block directly to the state {?height}
ready_state_service
.call(zebra_state::Request::CommitFinalizedBlock(
block.clone().into(),
@ -338,16 +341,16 @@ async fn continuous_blockchain(
}
}
/// SPANDOC: Make sure the verifier service is ready for block {?height}
// SPANDOC: Make sure the verifier service is ready for block {?height}
let ready_verifier_service = checkpoint_verifier.ready().map_err(|e| eyre!(e)).await?;
/// SPANDOC: Set up the future for block {?height}
// SPANDOC: Set up the future for block {?height}
let verify_future = timeout(
Duration::from_secs(VERIFY_TIMEOUT_SECONDS),
ready_verifier_service.call(block.clone()),
);
/// SPANDOC: spawn verification future in the background for block {?height}
// SPANDOC: spawn verification future in the background for block {?height}
let handle = tokio::spawn(verify_future.in_current_span());
handles.push(handle);
@ -393,7 +396,7 @@ async fn continuous_blockchain(
);
}
/// SPANDOC: wait on spawned verification tasks for restart height {?restart_height} {?network}
// SPANDOC: wait on spawned verification tasks for restart height {?restart_height} {?network}
while let Some(result) = handles.next().await {
result??.map_err(|e| eyre!(e))?;
}

View File

@ -215,7 +215,12 @@ impl Config {
// the full list of IP addresses will be shown in the log message
let debug_span = debug_span!("", remote_ip_addrs = ?ip_addrs);
let _span_guard = debug_span.enter();
// This log is needed for user debugging, but it's annoying during tests.
#[cfg(not(test))]
info!(seed = ?host, remote_ip_count = ?ip_addrs.len(), "resolved seed peer IP addresses");
#[cfg(test)]
debug!(seed = ?host, remote_ip_count = ?ip_addrs.len(), "resolved seed peer IP addresses");
for ip in &ip_addrs {
// Count each initial peer, recording the seed config and resolved IP address.

View File

@ -1377,7 +1377,7 @@ where
// Wait until the listener closes the connection.
// The handshaker is fake, so it never sends any data.
let readable_result = outbound_stream.readable().await;
info!(
debug!(
?readable_result,
"outbound connection became readable or errored: \
closing connection to test inbound listener"

View File

@ -278,7 +278,7 @@ impl RpcServer {
span.in_scope(|| {
info!("Stopping RPC server");
close_handle.clone().close();
info!("Stopped RPC server");
debug!("Stopped RPC server");
})
};

View File

@ -268,11 +268,17 @@ impl Drop for ReadStateService {
// We are the last state with a reference to this thread, so we can
// wait until the block write task finishes, then check for panics (blocking).
// (We'd also like to abort the thread, but std::thread::JoinHandle can't do that.)
// This log is verbose during tests.
#[cfg(not(test))]
info!("waiting for the block write task to finish");
#[cfg(test)]
debug!("waiting for the block write task to finish");
if let Err(thread_panic) = block_write_task_handle.join() {
std::panic::resume_unwind(thread_panic);
} else {
info!("shutting down the state without waiting for the block write task");
debug!("shutting down the state because the block write task has finished");
}
}
} else {
@ -788,7 +794,7 @@ impl ReadStateService {
block_write_task: Some(block_write_task),
};
tracing::info!("created new read-only state service");
tracing::debug!("created new read-only state service");
read_service
}

View File

@ -227,7 +227,7 @@ pub async fn populated_state(
.await
.map(|change_result| change_result.expect("unexpected chain tip update failure"))
{
info!(
debug!(
timeout = ?humantime_seconds(CHAIN_TIP_UPDATE_WAIT_LIMIT),
?timeout_error,
"timeout waiting for chain tip change after committing block"

View File

@ -616,18 +616,34 @@ impl DiskDb {
DiskDb::IDEAL_OPEN_FILE_LIMIT
);
} else if cfg!(windows) {
// This log is verbose during tests.
#[cfg(not(test))]
info!(
min_limit = ?DiskDb::MIN_OPEN_FILE_LIMIT,
ideal_limit = ?DiskDb::IDEAL_OPEN_FILE_LIMIT,
"assuming the open file limit is high enough for Zebra",
);
#[cfg(test)]
debug!(
min_limit = ?DiskDb::MIN_OPEN_FILE_LIMIT,
ideal_limit = ?DiskDb::IDEAL_OPEN_FILE_LIMIT,
"assuming the open file limit is high enough for Zebra",
);
} else {
#[cfg(not(test))]
info!(
?current_limit,
min_limit = ?DiskDb::MIN_OPEN_FILE_LIMIT,
ideal_limit = ?DiskDb::IDEAL_OPEN_FILE_LIMIT,
"the open file limit is high enough for Zebra",
);
#[cfg(test)]
debug!(
?current_limit,
min_limit = ?DiskDb::MIN_OPEN_FILE_LIMIT,
ideal_limit = ?DiskDb::IDEAL_OPEN_FILE_LIMIT,
"the open file limit is high enough for Zebra",
);
}
current_limit
@ -667,11 +683,19 @@ impl DiskDb {
ephemeral_note = " and removing ephemeral files";
}
// This log is verbose during tests.
#[cfg(not(test))]
info!(
?path,
"forcing shutdown{} of a state database with multiple active instances",
ephemeral_note,
);
#[cfg(test)]
debug!(
?path,
"forcing shutdown{} of a state database with multiple active instances",
ephemeral_note,
);
} else {
if self.ephemeral {
ephemeral_note = " and files";
@ -696,7 +720,8 @@ impl DiskDb {
// - Zebra commits each block in a database transaction, any incomplete blocks get rolled back
// - ephemeral files are placed in the os temp dir and should be cleaned up automatically eventually
let path = self.path();
info!(?path, "flushing database to disk");
debug!(?path, "flushing database to disk");
self.db
.flush()
.expect("unexpected failure flushing SST data to disk");
@ -759,7 +784,12 @@ impl DiskDb {
}
let path = self.path();
// This log is verbose during tests.
#[cfg(not(test))]
info!(?path, "removing temporary database files");
#[cfg(test)]
debug!(?path, "removing temporary database files");
// We'd like to use `rocksdb::Env::mem_env` for ephemeral databases,
// but the Zcash blockchain might not fit in memory. So we just
@ -776,11 +806,19 @@ impl DiskDb {
let result = std::fs::remove_dir_all(path);
if result.is_err() {
// This log is verbose during tests.
#[cfg(not(test))]
info!(
?result,
?path,
"removing temporary database files caused an error",
);
#[cfg(test)]
debug!(
?result,
?path,
"removing temporary database files caused an error",
);
} else {
debug!(
?result,

View File

@ -4,16 +4,18 @@
#![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_test")]
// Each lazy_static variable uses additional recursion
#![recursion_limit = "512"]
use std::sync::Once;
use color_eyre::section::PanicMessage;
use once_cell::sync::Lazy;
use owo_colors::OwoColorize;
use tracing_error::ErrorLayer;
use tracing_subscriber::{fmt, prelude::*, EnvFilter};
use std::sync::Once;
#[allow(missing_docs)]
pub mod command;
pub mod mock_service;
pub mod net;
pub mod network_addr;
@ -85,15 +87,22 @@ pub fn init() -> impl Drop {
// Use the RUST_LOG env var, or by default:
// - warn for most tests, and
// - for some modules, hide expected warn logs
let filter_layer = EnvFilter::try_from_default_env().unwrap_or_else(|_| {
EnvFilter::try_new("warn")
.unwrap()
.add_directive("zebra_consensus=error".parse().unwrap())
.add_directive("zebra_network=error".parse().unwrap())
.add_directive("zebra_state=error".parse().unwrap())
.add_directive("zebrad=error".parse().unwrap())
.add_directive("tor_circmgr=error".parse().unwrap())
});
let filter_layer = EnvFilter::try_from_default_env()
.unwrap_or_else(|_| {
// These filters apply when RUST_LOG isn't set
EnvFilter::try_new("warn")
.unwrap()
.add_directive("zebra_consensus=error".parse().unwrap())
.add_directive("zebra_network=error".parse().unwrap())
.add_directive("zebra_state=error".parse().unwrap())
.add_directive("zebrad=error".parse().unwrap())
.add_directive("tor_circmgr=error".parse().unwrap())
})
// These filters apply on top of RUST_LOG.
// Avoid adding filters to this list, because users can't override them.
//
// (There are currently no always-on directives.)
;
tracing_subscriber::registry()
.with(filter_layer)

View File

@ -183,7 +183,11 @@ where
/// Runs until the [`SyncStatus`] loses its connection to the chain syncer, which happens when
/// Zebra is shutting down.
pub async fn run(mut self) -> Result<(), BoxError> {
// This log is verbose during tests.
#[cfg(not(test))]
info!("initializing mempool crawler task");
#[cfg(test)]
debug!("initializing mempool crawler task");
loop {
self.wait_until_enabled().await?;

View File

@ -1,7 +1,8 @@
//! Randomised property tests for the mempool.
use proptest::collection::vec;
use proptest::prelude::*;
use std::env;
use proptest::{collection::vec, prelude::*};
use proptest_derive::Arbitrary;
use chrono::Duration;
@ -32,12 +33,21 @@ type MockState = MockService<zs::Request, zs::Response, PropTestAssertion>;
/// A [`MockService`] representing the Zebra transaction verifier service.
type MockTxVerifier = MockService<tx::Request, tx::Response, PropTestAssertion, TransactionError>;
const CHAIN_LENGTH: usize = 10;
const CHAIN_LENGTH: usize = 5;
const DEFAULT_MEMPOOL_PROPTEST_CASES: u32 = 8;
proptest! {
// The mempool tests can generate very verbose logs, so we use fewer cases by
// default. Set the PROPTEST_CASES env var to override this default.
#![proptest_config(proptest::test_runner::Config::with_cases(env::var("PROPTEST_CASES")
.ok()
.and_then(|v| v.parse().ok())
.unwrap_or(DEFAULT_MEMPOOL_PROPTEST_CASES)))]
/// Test if the mempool storage is cleared on a chain reset.
#[test]
fn storage_is_cleared_on_chain_reset(
fn storage_is_cleared_on_single_chain_reset(
network in any::<Network>(),
transaction in any::<VerifiedUnminedTx>(),
chain_tip in any::<ChainTipBlock>(),
@ -87,7 +97,7 @@ proptest! {
/// Test if the mempool storage is cleared on multiple chain resets.
#[test]
fn storage_is_cleared_on_chain_resets(
fn storage_is_cleared_on_multiple_chain_resets(
network in any::<Network>(),
mut previous_chain_tip in any::<ChainTipBlock>(),
mut transactions in vec(any::<VerifiedUnminedTx>(), 0..CHAIN_LENGTH),