fix(clippy): Put Rust format variables inline (#5783)

* cargo clippy --fix --all-features --all-targets

With rustc 1.67.0-nightly (234151769 2022-12-03)

* cargo fmt --all
This commit is contained in:
teor 2022-12-08 11:05:57 +10:00 committed by GitHub
parent 678c519032
commit 09836d2800
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
35 changed files with 196 additions and 427 deletions

View File

@ -39,8 +39,7 @@ async fn wakes_pending_waiters_on_close() {
let err = assert_ready_err!(response.poll());
assert!(
err.is::<error::Closed>(),
"response should fail with a Closed, got: {:?}",
err,
"response should fail with a Closed, got: {err:?}",
);
assert!(
@ -50,8 +49,7 @@ async fn wakes_pending_waiters_on_close() {
let err = assert_ready_err!(ready1.poll());
assert!(
err.is::<error::ServiceError>(),
"ready 1 should fail with a ServiceError {{ Closed }}, got: {:?}",
err,
"ready 1 should fail with a ServiceError {{ Closed }}, got: {err:?}",
);
assert!(
@ -61,8 +59,7 @@ async fn wakes_pending_waiters_on_close() {
let err = assert_ready_err!(ready1.poll());
assert!(
err.is::<error::ServiceError>(),
"ready 2 should fail with a ServiceError {{ Closed }}, got: {:?}",
err,
"ready 2 should fail with a ServiceError {{ Closed }}, got: {err:?}",
);
}
@ -99,8 +96,7 @@ async fn wakes_pending_waiters_on_failure() {
let err = assert_ready_err!(response.poll());
assert!(
err.is::<error::ServiceError>(),
"response should fail with a ServiceError, got: {:?}",
err
"response should fail with a ServiceError, got: {err:?}"
);
assert!(
@ -110,8 +106,7 @@ async fn wakes_pending_waiters_on_failure() {
let err = assert_ready_err!(ready1.poll());
assert!(
err.is::<error::ServiceError>(),
"ready 1 should fail with a ServiceError, got: {:?}",
err
"ready 1 should fail with a ServiceError, got: {err:?}"
);
assert!(
@ -121,7 +116,6 @@ async fn wakes_pending_waiters_on_failure() {
let err = assert_ready_err!(ready1.poll());
assert!(
err.is::<error::ServiceError>(),
"ready 2 should fail with a ServiceError, got: {:?}",
err
"ready 2 should fail with a ServiceError, got: {err:?}"
);
}

View File

@ -158,22 +158,16 @@ fn multi_transaction_block(oversized: bool) -> Block {
oversized,
serialized_len > MAX_BLOCK_BYTES.try_into().unwrap(),
"block is over-sized if requested:\n\
oversized: {},\n\
serialized_len: {},\n\
MAX_BLOCK_BYTES: {},",
oversized,
serialized_len,
MAX_BLOCK_BYTES,
oversized: {oversized},\n\
serialized_len: {serialized_len},\n\
MAX_BLOCK_BYTES: {MAX_BLOCK_BYTES},",
);
assert!(
serialized_len > MIN_LARGE_BLOCK_BYTES.try_into().unwrap(),
"block is large\n\
oversized: {},\n\
serialized_len: {},\n\
MIN_LARGE_BLOCK_BYTES: {},",
oversized,
serialized_len,
MIN_LARGE_BLOCK_BYTES,
oversized: {oversized},\n\
serialized_len: {serialized_len},\n\
MIN_LARGE_BLOCK_BYTES: {MIN_LARGE_BLOCK_BYTES},",
);
block
@ -238,22 +232,16 @@ fn single_transaction_block_many_inputs(oversized: bool) -> Block {
oversized,
serialized_len > MAX_BLOCK_BYTES.try_into().unwrap(),
"block is over-sized if requested:\n\
oversized: {},\n\
serialized_len: {},\n\
MAX_BLOCK_BYTES: {},",
oversized,
serialized_len,
MAX_BLOCK_BYTES,
oversized: {oversized},\n\
serialized_len: {serialized_len},\n\
MAX_BLOCK_BYTES: {MAX_BLOCK_BYTES},",
);
assert!(
serialized_len > MIN_LARGE_BLOCK_BYTES.try_into().unwrap(),
"block is large\n\
oversized: {},\n\
serialized_len: {},\n\
MIN_LARGE_BLOCK_BYTES: {},",
oversized,
serialized_len,
MIN_LARGE_BLOCK_BYTES,
oversized: {oversized},\n\
serialized_len: {serialized_len},\n\
MIN_LARGE_BLOCK_BYTES: {MIN_LARGE_BLOCK_BYTES},",
);
block
@ -316,22 +304,16 @@ fn single_transaction_block_many_outputs(oversized: bool) -> Block {
oversized,
serialized_len > MAX_BLOCK_BYTES.try_into().unwrap(),
"block is over-sized if requested:\n\
oversized: {},\n\
serialized_len: {},\n\
MAX_BLOCK_BYTES: {},",
oversized,
serialized_len,
MAX_BLOCK_BYTES,
oversized: {oversized},\n\
serialized_len: {serialized_len},\n\
MAX_BLOCK_BYTES: {MAX_BLOCK_BYTES},",
);
assert!(
serialized_len > MIN_LARGE_BLOCK_BYTES.try_into().unwrap(),
"block is large\n\
oversized: {},\n\
serialized_len: {},\n\
MIN_LARGE_BLOCK_BYTES: {},",
oversized,
serialized_len,
MIN_LARGE_BLOCK_BYTES,
oversized: {oversized},\n\
serialized_len: {serialized_len},\n\
MIN_LARGE_BLOCK_BYTES: {MIN_LARGE_BLOCK_BYTES},",
);
block

View File

@ -181,9 +181,7 @@ fn block_test_vectors_height(network: Network) {
{
assert!(
sapling_roots.contains_key(&height),
"post-sapling block test vectors must have matching sapling root test vectors: missing {} {}",
network,
height
"post-sapling block test vectors must have matching sapling root test vectors: missing {network} {height}"
);
}
}
@ -225,10 +223,7 @@ fn block_commitment(network: Network) {
.expect("block is structurally valid");
let commitment = block.commitment(network).unwrap_or_else(|_| {
panic!(
"unexpected structurally invalid block commitment at {} {}",
network, height
)
panic!("unexpected structurally invalid block commitment at {network} {height}")
});
if let FinalSaplingRoot(final_sapling_root) = commitment {
@ -238,9 +233,7 @@ fn block_commitment(network: Network) {
assert_eq!(
final_sapling_root,
crate::sapling::tree::Root::try_from(*expected_final_sapling_root).unwrap(),
"unexpected invalid final sapling root commitment at {} {}",
network,
height
"unexpected invalid final sapling root commitment at {network} {height}"
);
}
}

View File

@ -346,16 +346,11 @@ impl Transaction {
.add_transaction(self, outputs)
.unwrap_or_else(|err| {
panic!(
"unexpected chain value pool error: {:?}, \n\
original chain value pools: {:?}, \n\
transaction chain value change: {:?}, \n\
input-only transaction chain value pools: {:?}, \n\
calculated remaining transaction value: {:?}",
err,
chain_value_pools, // old value
transaction_chain_value_pool_change,
input_chain_value_pools,
remaining_transaction_value,
"unexpected chain value pool error: {err:?}, \n\
original chain value pools: {chain_value_pools:?}, \n\
transaction chain value change: {transaction_chain_value_pool_change:?}, \n\
input-only transaction chain value pools: {input_chain_value_pools:?}, \n\
calculated remaining transaction value: {remaining_transaction_value:?}",
)
});
@ -495,9 +490,8 @@ impl Transaction {
.remaining_transaction_value()
.unwrap_or_else(|err| {
panic!(
"unexpected remaining transaction value: {:?}, \
calculated remaining input value: {:?}",
err, remaining_input_value
"unexpected remaining transaction value: {err:?}, \
calculated remaining input value: {remaining_input_value:?}"
)
});
assert_eq!(

View File

@ -114,10 +114,7 @@ fn get_transparent_output_address_with_blocks_for_network(network: Network) {
}
assert!(
addr.is_some(),
"address of {:?}; block #{}; tx #{}; must not be None",
output,
height,
idx,
"address of {output:?}; block #{height}; tx #{idx}; must not be None",
);
valid_addresses += 1;
}

View File

@ -383,15 +383,13 @@ impl ExpandedDifficulty {
// This assertion also makes sure that size fits in its 8 bit compact field
assert!(
size < (31 + OFFSET) as _,
"256^size (256^{}) must fit in a u256, after the sign bit adjustment and offset",
size
"256^size (256^{size}) must fit in a u256, after the sign bit adjustment and offset"
);
let size = u32::try_from(size).expect("a 0-6 bit value fits in a u32");
assert!(
mantissa <= UNSIGNED_MANTISSA_MASK.into(),
"mantissa {:x?} must fit in its compact field",
mantissa
"mantissa {mantissa:x?} must fit in its compact field"
);
let mantissa = u32::try_from(mantissa).expect("a 0-23 bit value fits in a u32");

View File

@ -226,8 +226,7 @@ where
assert!(
matches!(response, tx::Response::Block { .. }),
"unexpected response from transaction verifier: {:?}",
response
"unexpected response from transaction verifier: {response:?}"
);
legacy_sigop_count += response.legacy_sigop_count();

View File

@ -376,15 +376,13 @@ async fn continuous_blockchain(
assert_eq!(
handles.len(),
0,
"unexpected number of verify tasks for restart height: {:?}",
restart_height,
"unexpected number of verify tasks for restart height: {restart_height:?}",
);
} else {
assert_eq!(
handles.len(),
blockchain_len - restart_height - 1,
"unexpected number of verify tasks for restart height: {:?}",
restart_height,
"unexpected number of verify tasks for restart height: {restart_height:?}",
);
}
} else {
@ -404,20 +402,17 @@ async fn continuous_blockchain(
assert_eq!(
checkpoint_verifier.previous_checkpoint_height(),
FinalCheckpoint,
"unexpected previous checkpoint for restart height: {:?}",
restart_height,
"unexpected previous checkpoint for restart height: {restart_height:?}",
);
assert_eq!(
checkpoint_verifier.target_checkpoint_height(),
FinishedVerifying,
"unexpected target checkpoint for restart height: {:?}",
restart_height,
"unexpected target checkpoint for restart height: {restart_height:?}",
);
assert_eq!(
checkpoint_verifier.checkpoint_list.max_height(),
expected_max_height,
"unexpected max checkpoint height for restart height: {:?}",
restart_height,
"unexpected max checkpoint height for restart height: {restart_height:?}",
);
}

View File

@ -207,8 +207,7 @@ impl From<BoxError> for TransactionError {
}
TransactionError::InternalDowncastError(format!(
"downcast to known transaction error type failed, original error: {:?}",
err,
"downcast to known transaction error type failed, original error: {err:?}",
))
}
}

View File

@ -326,8 +326,7 @@ impl<'de> Deserialize<'de> for Config {
Err(_) => match config.listen_addr.parse::<IpAddr>() {
Ok(ip) => Ok(SocketAddr::new(ip, config.network.default_port())),
Err(err) => Err(de::Error::custom(format!(
"{}; Hint: addresses can be a IPv4, IPv6 (with brackets), or a DNS name, the port is optional",
err
"{err}; Hint: addresses can be a IPv4, IPv6 (with brackets), or a DNS name, the port is optional"
))),
},
}?;

View File

@ -289,8 +289,7 @@ impl MustUseClientResponseSender {
.take()
.unwrap_or_else(|| {
panic!(
"multiple uses of response sender: response must be sent exactly once: {:?}",
self
"multiple uses of response sender: response must be sent exactly once: {self:?}"
)
})
.send(response)
@ -328,8 +327,7 @@ impl Drop for MustUseClientResponseSender {
// is_canceled() will not panic, because we check is_none() first
assert!(
self.tx.is_none() || self.is_canceled(),
"unused client response sender: oneshot must be used or canceled: {:?}",
self
"unused client response sender: oneshot must be used or canceled: {self:?}"
);
}
}

View File

@ -95,8 +95,7 @@ async fn connection_run_loop_spawn_ok() {
let connection_result = futures::poll!(&mut connection_join_handle);
assert!(
matches!(connection_result, Poll::Pending),
"unexpected run loop termination: {:?}",
connection_result,
"unexpected run loop termination: {connection_result:?}",
);
// We need to abort the connection, because it holds a lock on the outbound channel.
@ -175,8 +174,7 @@ async fn connection_run_loop_message_ok() {
let connection_result = futures::poll!(&mut connection_join_handle);
assert!(
matches!(connection_result, Poll::Pending),
"unexpected run loop termination: {:?}",
connection_result,
"unexpected run loop termination: {connection_result:?}",
);
// We need to abort the connection, because it holds a lock on the outbound channel.
@ -500,8 +498,7 @@ async fn connection_run_loop_send_timeout_nil_response() {
let connection_result = futures::poll!(&mut connection_join_handle);
assert!(
matches!(connection_result, Poll::Ready(Ok(()))),
"expected run loop termination, but run loop continued: {:?}",
connection_result,
"expected run loop termination, but run loop continued: {connection_result:?}",
);
let outbound_message = peer_outbound_messages.next().await;
@ -575,8 +572,7 @@ async fn connection_run_loop_send_timeout_expect_response() {
let connection_result = futures::poll!(&mut connection_join_handle);
assert!(
matches!(connection_result, Poll::Ready(Ok(()))),
"expected run loop termination, but run loop continued: {:?}",
connection_result,
"expected run loop termination, but run loop continued: {connection_result:?}",
);
let outbound_message = peer_outbound_messages.next().await;
@ -651,8 +647,7 @@ async fn connection_run_loop_receive_timeout() {
let connection_result = futures::poll!(&mut connection_join_handle);
assert!(
matches!(connection_result, Poll::Pending),
"unexpected run loop termination: {:?}",
connection_result,
"unexpected run loop termination: {connection_result:?}",
);
// We need to abort the connection, because it holds a lock on the outbound channel.

View File

@ -154,15 +154,11 @@ where
now = Instant::now();
assert!(
now >= minimum_reconnect_instant,
"all candidates should obey the minimum rate-limit: now: {:?} min: {:?}",
now,
minimum_reconnect_instant,
"all candidates should obey the minimum rate-limit: now: {now:?} min: {minimum_reconnect_instant:?}",
);
assert!(
now <= maximum_reconnect_instant,
"rate-limited candidates should not be delayed too long: now: {:?} max: {:?}. Hint: is the test machine overloaded?",
now,
maximum_reconnect_instant,
"rate-limited candidates should not be delayed too long: now: {now:?} max: {maximum_reconnect_instant:?}. Hint: is the test machine overloaded?",
);
minimum_reconnect_instant = now + MIN_PEER_CONNECTION_INTERVAL;

View File

@ -303,8 +303,7 @@ async fn crawler_peer_limit_zero_connect_panic() {
// `Err(_)` means that no peers are available, and the sender has not been dropped.
// `Ok(None)` means that no peers are available, and the sender has been dropped.
matches!(peer_result, Err(_) | Ok(None)),
"unexpected peer when outbound limit is zero: {:?}",
peer_result,
"unexpected peer when outbound limit is zero: {peer_result:?}",
);
}
@ -327,8 +326,7 @@ async fn crawler_peer_limit_one_connect_error() {
// `Err(_)` means that no peers are available, and the sender has not been dropped.
// `Ok(None)` means that no peers are available, and the sender has been dropped.
matches!(peer_result, Err(_) | Ok(None)),
"unexpected peer when all connections error: {:?}",
peer_result,
"unexpected peer when all connections error: {peer_result:?}",
);
}
@ -370,10 +368,8 @@ async fn crawler_peer_limit_one_connect_ok_then_drop() {
Ok(Some(peer_result)) => {
assert!(
matches!(peer_result, Ok((_, _))),
"unexpected connection error: {:?}\n\
{} previous peers succeeded",
peer_result,
peer_count,
"unexpected connection error: {peer_result:?}\n\
{peer_count} previous peers succeeded",
);
peer_count += 1;
}
@ -434,10 +430,8 @@ async fn crawler_peer_limit_one_connect_ok_stay_open() {
Ok(Some(peer_change_result)) => {
assert!(
matches!(peer_change_result, Ok((_, _))),
"unexpected connection error: {:?}\n\
{} previous peers succeeded",
peer_change_result,
peer_change_count,
"unexpected connection error: {peer_change_result:?}\n\
{peer_change_count} previous peers succeeded",
);
peer_change_count += 1;
}
@ -502,8 +496,7 @@ async fn crawler_peer_limit_default_connect_error() {
// `Err(_)` means that no peers are available, and the sender has not been dropped.
// `Ok(None)` means that no peers are available, and the sender has been dropped.
matches!(peer_result, Err(_) | Ok(None)),
"unexpected peer when all connections error: {:?}",
peer_result,
"unexpected peer when all connections error: {peer_result:?}",
);
}
@ -547,10 +540,8 @@ async fn crawler_peer_limit_default_connect_ok_then_drop() {
Ok(Some(peer_result)) => {
assert!(
matches!(peer_result, Ok((_, _))),
"unexpected connection error: {:?}\n\
{} previous peers succeeded",
peer_result,
peer_count,
"unexpected connection error: {peer_result:?}\n\
{peer_count} previous peers succeeded",
);
peer_count += 1;
}
@ -612,10 +603,8 @@ async fn crawler_peer_limit_default_connect_ok_stay_open() {
Ok(Some(peer_change_result)) => {
assert!(
matches!(peer_change_result, Ok((_, _))),
"unexpected connection error: {:?}\n\
{} previous peers succeeded",
peer_change_result,
peer_change_count,
"unexpected connection error: {peer_change_result:?}\n\
{peer_change_count} previous peers succeeded",
);
peer_change_count += 1;
}
@ -683,8 +672,7 @@ async fn listener_peer_limit_zero_handshake_panic() {
// `Err(_)` means that no peers are available, and the sender has not been dropped.
// `Ok(None)` means that no peers are available, and the sender has been dropped.
matches!(peer_result, Err(_) | Ok(None)),
"unexpected peer when inbound limit is zero: {:?}",
peer_result,
"unexpected peer when inbound limit is zero: {peer_result:?}",
);
}
@ -709,8 +697,7 @@ async fn listener_peer_limit_one_handshake_error() {
// `Err(_)` means that no peers are available, and the sender has not been dropped.
// `Ok(None)` means that no peers are available, and the sender has been dropped.
matches!(peer_result, Err(_) | Ok(None)),
"unexpected peer when all handshakes error: {:?}",
peer_result,
"unexpected peer when all handshakes error: {peer_result:?}",
);
}
@ -756,10 +743,8 @@ async fn listener_peer_limit_one_handshake_ok_then_drop() {
Ok(Some(peer_result)) => {
assert!(
matches!(peer_result, Ok((_, _))),
"unexpected connection error: {:?}\n\
{} previous peers succeeded",
peer_result,
peer_count,
"unexpected connection error: {peer_result:?}\n\
{peer_count} previous peers succeeded",
);
peer_count += 1;
}
@ -824,10 +809,8 @@ async fn listener_peer_limit_one_handshake_ok_stay_open() {
Ok(Some(peer_change_result)) => {
assert!(
matches!(peer_change_result, Ok((_, _))),
"unexpected connection error: {:?}\n\
{} previous peers succeeded",
peer_change_result,
peer_change_count,
"unexpected connection error: {peer_change_result:?}\n\
{peer_change_count} previous peers succeeded",
);
peer_change_count += 1;
}
@ -895,8 +878,7 @@ async fn listener_peer_limit_default_handshake_error() {
// `Err(_)` means that no peers are available, and the sender has not been dropped.
// `Ok(None)` means that no peers are available, and the sender has been dropped.
matches!(peer_result, Err(_) | Ok(None)),
"unexpected peer when all handshakes error: {:?}",
peer_result,
"unexpected peer when all handshakes error: {peer_result:?}",
);
}
@ -946,10 +928,8 @@ async fn listener_peer_limit_default_handshake_ok_then_drop() {
Ok(Some(peer_result)) => {
assert!(
matches!(peer_result, Ok((_, _))),
"unexpected connection error: {:?}\n\
{} previous peers succeeded",
peer_result,
peer_count,
"unexpected connection error: {peer_result:?}\n\
{peer_count} previous peers succeeded",
);
peer_count += 1;
}
@ -1014,10 +994,8 @@ async fn listener_peer_limit_default_handshake_ok_stay_open() {
Ok(Some(peer_change_result)) => {
assert!(
matches!(peer_change_result, Ok((_, _))),
"unexpected connection error: {:?}\n\
{} previous peers succeeded",
peer_change_result,
peer_change_count,
"unexpected connection error: {peer_change_result:?}\n\
{peer_change_count} previous peers succeeded",
);
peer_change_count += 1;
}
@ -1090,15 +1068,13 @@ async fn add_initial_peers_is_rate_limited() {
// Make sure the rate limiting worked by checking if it took long enough
assert!(
elapsed > constants::MIN_PEER_CONNECTION_INTERVAL.saturating_mul((PEER_COUNT - 1) as u32),
"elapsed only {:?}",
elapsed
"elapsed only {elapsed:?}"
);
let initial_peers_result = initial_peers_task_handle.await;
assert!(
matches!(initial_peers_result, Ok(Ok(_))),
"unexpected error or panic in add_initial_peers task: {:?}",
initial_peers_result,
"unexpected error or panic in add_initial_peers task: {initial_peers_result:?}",
);
// Check for panics or errors in the address book updater task.
@ -1110,8 +1086,7 @@ async fn add_initial_peers_is_rate_limited() {
// We can't check for error equality due to type erasure,
// and we can't downcast due to ownership.
|| matches!(updater_result, Some(Ok(Err(ref _all_senders_closed)))),
"unexpected error or panic in address book updater task: {:?}",
updater_result,
"unexpected error or panic in address book updater task: {updater_result:?}",
);
}
@ -1330,8 +1305,7 @@ where
assert!(
matches!(crawl_result, None)
|| matches!(crawl_result, Some(Err(ref e)) if e.is_cancelled()),
"unexpected error or panic in peer crawler task: {:?}",
crawl_result,
"unexpected error or panic in peer crawler task: {crawl_result:?}",
);
// Check the final address book contents.
@ -1436,8 +1410,7 @@ where
assert!(
matches!(listen_result, None)
|| matches!(listen_result, Some(Err(ref e)) if e.is_cancelled()),
"unexpected error or panic in inbound peer listener task: {:?}",
listen_result,
"unexpected error or panic in inbound peer listener task: {listen_result:?}",
);
(config, peerset_rx)

View File

@ -271,9 +271,7 @@ impl InventoryRegistry {
use InventoryHash::*;
assert!(
matches!(inv, Block(_) | Tx(_) | Wtx(_)),
"unexpected inventory type: {:?} from peer: {:?}",
inv,
addr,
"unexpected inventory type: {inv:?} from peer: {addr:?}",
);
let current = self.current.entry(inv).or_default();

View File

@ -874,9 +874,8 @@ where
let address_metrics = *self.address_metrics.borrow();
panic!(
"unexpectedly exceeded configured peer set connection limit: \n\
peers: {:?}, ready: {:?}, unready: {:?}, \n\
address_metrics: {:?}",
num_peers, num_ready, num_unready, address_metrics,
peers: {num_peers:?}, ready: {num_ready:?}, unready: {num_unready:?}, \n\
address_metrics: {address_metrics:?}",
);
}
}

View File

@ -54,15 +54,11 @@ fn parses_msg_addr_v1_ip() {
if let Message::Addr(addrs) = deserialized {
assert!(
!addrs.is_empty(),
"expected some AddrV1s in case {}: {:?}",
case_idx,
addrs
"expected some AddrV1s in case {case_idx}: {addrs:?}"
);
assert!(
addrs.len() <= 2,
"too many AddrV1s in case {}: {:?}",
case_idx,
addrs
"too many AddrV1s in case {case_idx}: {addrs:?}"
);
// Check all the fields in the first test case
@ -92,10 +88,7 @@ fn parses_msg_addr_v1_ip() {
);
}
} else {
panic!(
"unexpected message variant in case {}: {:?}",
case_idx, deserialized
);
panic!("unexpected message variant in case {case_idx}: {deserialized:?}");
}
}
}
@ -119,15 +112,10 @@ fn parses_msg_addr_v1_empty() {
if let Message::Addr(addrs) = deserialized {
assert!(
addrs.is_empty(),
"expected empty AddrV1 list for case {}: {:?}",
case_idx,
addrs,
"expected empty AddrV1 list for case {case_idx}: {addrs:?}",
);
} else {
panic!(
"unexpected message variant in case {}: {:?}",
case_idx, deserialized
);
panic!("unexpected message variant in case {case_idx}: {deserialized:?}");
}
}
}
@ -153,15 +141,11 @@ fn parses_msg_addr_v2_ip() {
if let Message::Addr(addrs) = deserialized {
assert!(
!addrs.is_empty(),
"expected some AddrV2s in case {}: {:?}",
case_idx,
addrs
"expected some AddrV2s in case {case_idx}: {addrs:?}"
);
assert!(
addrs.len() <= 2,
"too many AddrV2s in case {}: {:?}",
case_idx,
addrs
"too many AddrV2s in case {case_idx}: {addrs:?}"
);
// Check all the fields in the IPv4 and IPv6 test cases
@ -216,10 +200,7 @@ fn parses_msg_addr_v2_ip() {
);
}
} else {
panic!(
"unexpected message variant in case {}: {:?}",
case_idx, deserialized
);
panic!("unexpected message variant in case {case_idx}: {deserialized:?}");
}
}
}
@ -243,15 +224,10 @@ fn parses_msg_addr_v2_empty() {
if let Message::Addr(addrs) = deserialized {
assert!(
addrs.is_empty(),
"expected empty AddrV2 list for case {}: {:?}",
case_idx,
addrs,
"expected empty AddrV2 list for case {case_idx}: {addrs:?}",
);
} else {
panic!(
"unexpected message variant in case {}: {:?}",
case_idx, deserialized
);
panic!("unexpected message variant in case {case_idx}: {deserialized:?}");
}
}
}
@ -270,8 +246,7 @@ fn parses_msg_addr_v2_invalid() {
codec
.read_addrv2(&mut addr_v2_bytes.as_slice())
.expect_err(&format!(
"unexpected success: deserializing invalid AddrV2 case {} should have failed",
case_idx
));
"unexpected success: deserializing invalid AddrV2 case {case_idx} should have failed"
));
}
}

View File

@ -564,10 +564,7 @@ async fn rpc_getaddressutxos_invalid_arguments() {
.unwrap_err();
assert_eq!(
error.message,
format!(
"invalid address \"{}\": parse error: t-addr decoding error",
address
)
format!("invalid address \"{address}\": parse error: t-addr decoding error")
);
mempool.expect_no_requests().await;

View File

@ -207,27 +207,23 @@ impl FinalizedState {
if self.db.is_empty() {
assert_eq!(
committed_tip_hash, finalized.block.header.previous_block_hash,
"the first block added to an empty state must be a genesis block, source: {}",
source,
"the first block added to an empty state must be a genesis block, source: {source}",
);
assert_eq!(
block::Height(0),
finalized.height,
"cannot commit genesis: invalid height, source: {}",
source,
"cannot commit genesis: invalid height, source: {source}",
);
} else {
assert_eq!(
committed_tip_height.expect("state must have a genesis block committed") + 1,
Some(finalized.height),
"committed block height must be 1 more than the finalized tip height, source: {}",
source,
"committed block height must be 1 more than the finalized tip height, source: {source}",
);
assert_eq!(
committed_tip_hash, finalized.block.header.previous_block_hash,
"committed block must be a child of the finalized tip, source: {}",
source,
"committed block must be a child of the finalized tip, source: {source}",
);
}

View File

@ -458,10 +458,9 @@ impl DiskDb {
// TODO: provide a different hint if the disk is full, see #1623
Err(e) => panic!(
"Opening database {:?} failed: {:?}. \
"Opening database {path:?} failed: {e:?}. \
Hint: Check if another zebrad process is running. \
Try changing the state cache_dir in the Zebra config.",
path, e,
),
}
}

View File

@ -132,10 +132,8 @@ pub fn truncate_zero_be_bytes(mem_bytes: &[u8], disk_len: usize) -> &[u8] {
assert!(
discarded.iter().all(|&byte| byte == 0),
"unexpected `mem_bytes` content: non-zero discarded bytes: {:?}\n\
truncated: {:?}",
discarded,
truncated,
"unexpected `mem_bytes` content: non-zero discarded bytes: {discarded:?}\n\
truncated: {truncated:?}",
);
assert_eq!(truncated.len(), disk_len);

View File

@ -402,13 +402,11 @@ fn snapshot_block_and_transaction_data(state: &FinalizedState) {
// By definition, all of these lists should be in chain order.
assert!(
is_sorted(&stored_block_hashes),
"unsorted: {:?}",
stored_block_hashes
"unsorted: {stored_block_hashes:?}"
);
assert!(
is_sorted(&stored_transactions),
"unsorted: {:?}",
stored_transactions
"unsorted: {stored_transactions:?}"
);
// The blocks, trees, transactions, and their hashes are in height/index order,
@ -513,17 +511,13 @@ fn snapshot_transparent_address_data(state: &FinalizedState, height: u32) {
// Check that the lists are in chain order
assert!(
is_sorted(&stored_utxo_locations),
"unsorted: {:?}\n\
for address: {:?}",
stored_utxo_locations,
address,
"unsorted: {stored_utxo_locations:?}\n\
for address: {address:?}",
);
assert!(
is_sorted(&stored_transaction_locations),
"unsorted: {:?}\n\
for address: {:?}",
stored_transaction_locations,
address,
"unsorted: {stored_transaction_locations:?}\n\
for address: {address:?}",
);
// The default raw data serialization is very verbose, so we hex-encode the bytes.

View File

@ -556,8 +556,7 @@ fn commitment_is_validated_for_network_upgrade(network: Network, network_upgrade
zebra_chain::block::CommitmentError::InvalidChainHistoryRoot { .. },
) => {}
_ => panic!(
"Error must be InvalidBlockCommitment::InvalidChainHistoryRoot instead of {:?}",
err
"Error must be InvalidBlockCommitment::InvalidChainHistoryRoot instead of {err:?}"
),
};

View File

@ -263,15 +263,11 @@ pub fn check_failure_regexes(
let ignore_matches = ignore_matches.join(",");
let ignore_msg = if failure_matches.is_empty() {
format!(
"Log matched ignore regexes: {:?}, but no failure regexes",
ignore_matches,
)
format!("Log matched ignore regexes: {ignore_matches:?}, but no failure regexes",)
} else {
let failure_matches = failure_matches.join(",");
format!(
"Ignoring failure regexes: {:?}, because log matched ignore regexes: {:?}",
failure_matches, ignore_matches,
"Ignoring failure regexes: {failure_matches:?}, because log matched ignore regexes: {ignore_matches:?}",
)
};

View File

@ -26,17 +26,11 @@ fn is_command_available(cmd: &str, args: &[&str]) -> bool {
match status {
Err(e) => {
eprintln!(
"Skipping test because '{} {:?}' returned error {:?}",
cmd, args, e
);
eprintln!("Skipping test because '{cmd} {args:?}' returned error {e:?}");
false
}
Ok(status) if !status.success() => {
eprintln!(
"Skipping test because '{} {:?}' returned status {:?}",
cmd, args, status
);
eprintln!("Skipping test because '{cmd} {args:?}' returned status {status:?}");
false
}
_ => true,
@ -212,8 +206,7 @@ fn failure_regex_matches_stdout_failure_message() {
let expected_error = format!("{expected_error:?}");
assert!(
expected_error.contains("Logged a failure message"),
"error did not contain expected failure message: {}",
expected_error,
"error did not contain expected failure message: {expected_error}",
);
}
@ -251,8 +244,7 @@ fn failure_regex_matches_stderr_failure_message() {
let expected_error = format!("{expected_error:?}");
assert!(
expected_error.contains("Logged a failure message"),
"error did not contain expected failure message: {}",
expected_error,
"error did not contain expected failure message: {expected_error}",
);
}
@ -322,8 +314,7 @@ Unread Stdout:
multi-line failure message\
"
),
"error did not contain expected failure message: {}",
expected_error,
"error did not contain expected failure message: {expected_error}",
);
}
@ -481,8 +472,7 @@ fn failure_regex_timeout_continuous_output() {
let expected_error = format!("{expected_error:?}");
assert!(
expected_error.contains("Logged a failure message"),
"error did not contain expected failure message: {}",
expected_error,
"error did not contain expected failure message: {expected_error}",
);
}
@ -547,8 +537,7 @@ fn failure_regex_iter_matches_stdout_failure_message() {
let expected_error = format!("{expected_error:?}");
assert!(
expected_error.contains("Logged a failure message"),
"error did not contain expected failure message: {}",
expected_error,
"error did not contain expected failure message: {expected_error}",
);
}

View File

@ -47,10 +47,7 @@ fn main() {
match vergen(config.clone()) {
Ok(_) => {}
Err(e) => {
eprintln!(
"git error in vergen build script: skipping git env vars: {:?}",
e,
);
eprintln!("git error in vergen build script: skipping git env vars: {e:?}",);
*config.git_mut().enabled_mut() = false;
vergen(config).expect("non-git vergen should succeed");
}

View File

@ -72,8 +72,7 @@ pub fn app_version() -> Version {
// assume it's a cargo package version or a git tag with no hash
[_] | [_, _] => vergen_git_semver.parse().unwrap_or_else(|_| {
panic!(
"VERGEN_GIT_SEMVER without a hash {:?} must be valid semver 2.0",
vergen_git_semver
"VERGEN_GIT_SEMVER without a hash {vergen_git_semver:?} must be valid semver 2.0"
)
}),
@ -81,21 +80,14 @@ pub fn app_version() -> Version {
[hash, commit_count, tag] => {
let semver_fix = format!("{tag}+{commit_count}.{hash}");
semver_fix.parse().unwrap_or_else(|_|
panic!("Modified VERGEN_GIT_SEMVER {:?} -> {:?} -> {:?} must be valid. Note: CARGO_PKG_VERSION was {:?}.",
vergen_git_semver,
rparts,
semver_fix,
CARGO_PKG_VERSION))
panic!("Modified VERGEN_GIT_SEMVER {vergen_git_semver:?} -> {rparts:?} -> {semver_fix:?} must be valid. Note: CARGO_PKG_VERSION was {CARGO_PKG_VERSION:?}."))
}
_ => unreachable!("split is limited to 3 parts"),
}
}
_ => CARGO_PKG_VERSION.parse().unwrap_or_else(|_| {
panic!(
"CARGO_PKG_VERSION {:?} must be valid semver 2.0",
CARGO_PKG_VERSION
)
panic!("CARGO_PKG_VERSION {CARGO_PKG_VERSION:?} must be valid semver 2.0")
}),
}
}

View File

@ -157,10 +157,7 @@ impl CopyStateCmd {
old_zs::Response::Tip(Some(source_tip)) => source_tip,
old_zs::Response::Tip(None) => Err("empty source state: no blocks to copy")?,
response => Err(format!(
"unexpected response to Tip request: {:?}",
response,
))?,
response => Err(format!("unexpected response to Tip request: {response:?}",))?,
};
let source_tip_height = source_tip.0 .0;
@ -172,10 +169,7 @@ impl CopyStateCmd {
let initial_target_tip = match initial_target_tip {
new_zs::Response::Tip(target_tip) => target_tip,
response => Err(format!(
"unexpected response to Tip request: {:?}",
response,
))?,
response => Err(format!("unexpected response to Tip request: {response:?}",))?,
};
let min_target_height = initial_target_tip
.map(|target_tip| target_tip.0 .0 + 1)
@ -221,15 +215,13 @@ impl CopyStateCmd {
trace!(?height, %source_block, "read source block");
source_block
}
old_zs::Response::Block(None) => Err(format!(
"unexpected missing source block, height: {}",
height,
))?,
old_zs::Response::Block(None) => {
Err(format!("unexpected missing source block, height: {height}",))?
}
response => Err(format!(
"unexpected response to Block request, height: {}, \n \
response: {:?}",
height, response,
"unexpected response to Block request, height: {height}, \n \
response: {response:?}",
))?,
};
let source_block_hash = source_block.hash();
@ -248,9 +240,8 @@ impl CopyStateCmd {
target_block_commit_hash
}
response => Err(format!(
"unexpected response to CommitFinalizedBlock request, height: {}\n \
response: {:?}",
height, response,
"unexpected response to CommitFinalizedBlock request, height: {height}\n \
response: {response:?}",
))?,
};
@ -265,15 +256,13 @@ impl CopyStateCmd {
trace!(?height, %target_block, "read target block");
target_block
}
new_zs::Response::Block(None) => Err(format!(
"unexpected missing target block, height: {}",
height,
))?,
new_zs::Response::Block(None) => {
Err(format!("unexpected missing target block, height: {height}",))?
}
response => Err(format!(
"unexpected response to Block request, height: {},\n \
response: {:?}",
height, response,
"unexpected response to Block request, height: {height},\n \
response: {response:?}",
))?,
};
let target_block_data_hash = target_block.hash();
@ -294,18 +283,12 @@ impl CopyStateCmd {
{
Err(format!(
"unexpected mismatch between source and target blocks,\n \
max copy height: {:?},\n \
source hash: {:?},\n \
target commit hash: {:?},\n \
target data hash: {:?},\n \
source block: {:?},\n \
target block: {:?}",
max_copy_height,
source_block_hash,
target_block_commit_hash,
target_block_data_hash,
source_block,
target_block,
max copy height: {max_copy_height:?},\n \
source hash: {source_block_hash:?},\n \
target commit hash: {target_block_commit_hash:?},\n \
target data hash: {target_block_data_hash:?},\n \
source block: {source_block:?},\n \
target block: {target_block:?}",
))?;
}
@ -335,10 +318,7 @@ impl CopyStateCmd {
new_zs::Response::Tip(Some(target_tip)) => target_tip,
new_zs::Response::Tip(None) => Err("empty target state: expected written blocks")?,
response => Err(format!(
"unexpected response to Tip request: {:?}",
response,
))?,
response => Err(format!("unexpected response to Tip request: {response:?}",))?,
};
let final_target_tip_height = final_target_tip.0 .0;
let final_target_tip_hash = final_target_tip.1;
@ -352,8 +332,7 @@ impl CopyStateCmd {
old_zs::Response::Depth(source_depth) => source_depth,
response => Err(format!(
"unexpected response to Depth request: {:?}",
response,
"unexpected response to Depth request: {response:?}",
))?,
};
@ -366,16 +345,11 @@ impl CopyStateCmd {
if source_tip != final_target_tip || target_tip_source_depth != expected_target_depth {
Err(format!(
"unexpected mismatch between source and target tips,\n \
max copy height: {:?},\n \
source tip: {:?},\n \
target tip: {:?},\n \
actual target tip depth in source: {:?},\n \
expect target tip depth in source: {:?}",
max_copy_height,
source_tip,
final_target_tip,
target_tip_source_depth,
expected_target_depth,
max copy height: {max_copy_height:?},\n \
source tip: {source_tip:?},\n \
target tip: {final_target_tip:?},\n \
actual target tip depth in source: {target_tip_source_depth:?},\n \
expect target tip depth in source: {expected_target_depth:?}",
))?;
} else {
info!(
@ -391,16 +365,11 @@ impl CopyStateCmd {
if target_tip_source_depth != expected_target_depth {
Err(format!(
"unexpected mismatch between source and target tips,\n \
max copy height: {:?},\n \
source tip: {:?},\n \
target tip: {:?},\n \
actual target tip depth in source: {:?},\n \
expect target tip depth in source: {:?}",
max_copy_height,
source_tip,
final_target_tip,
target_tip_source_depth,
expected_target_depth,
max copy height: {max_copy_height:?},\n \
source tip: {source_tip:?},\n \
target tip: {final_target_tip:?},\n \
actual target tip depth in source: {target_tip_source_depth:?},\n \
expect target tip depth in source: {expected_target_depth:?}",
))?;
} else {
info!(

View File

@ -80,8 +80,7 @@ async fn mempool_requests_for_transactions() {
Ok(Response::TransactionIds(response)) => assert_eq!(response, added_transaction_ids),
Ok(Response::Nil) => assert!(
added_transaction_ids.is_empty(),
"`MempoolTransactionIds` request should match added_transaction_ids {:?}, got Ok(Nil)",
added_transaction_ids
"`MempoolTransactionIds` request should match added_transaction_ids {added_transaction_ids:?}, got Ok(Nil)"
),
_ => unreachable!(
"`MempoolTransactionIds` requests should always respond `Ok(Vec<UnminedTxId> | Nil)`, got {:?}",
@ -119,15 +118,13 @@ async fn mempool_requests_for_transactions() {
let sync_gossip_result = sync_gossip_task_handle.now_or_never();
assert!(
matches!(sync_gossip_result, None),
"unexpected error or panic in sync gossip task: {:?}",
sync_gossip_result,
"unexpected error or panic in sync gossip task: {sync_gossip_result:?}",
);
let tx_gossip_result = tx_gossip_task_handle.now_or_never();
assert!(
matches!(tx_gossip_result, None),
"unexpected error or panic in transaction gossip task: {:?}",
tx_gossip_result,
"unexpected error or panic in transaction gossip task: {tx_gossip_result:?}",
);
}
@ -209,15 +206,13 @@ async fn mempool_push_transaction() -> Result<(), crate::BoxError> {
let sync_gossip_result = sync_gossip_task_handle.now_or_never();
assert!(
matches!(sync_gossip_result, None),
"unexpected error or panic in sync gossip task: {:?}",
sync_gossip_result,
"unexpected error or panic in sync gossip task: {sync_gossip_result:?}",
);
let tx_gossip_result = tx_gossip_task_handle.now_or_never();
assert!(
matches!(tx_gossip_result, None),
"unexpected error or panic in transaction gossip task: {:?}",
tx_gossip_result,
"unexpected error or panic in transaction gossip task: {tx_gossip_result:?}",
);
Ok(())
@ -313,15 +308,13 @@ async fn mempool_advertise_transaction_ids() -> Result<(), crate::BoxError> {
let sync_gossip_result = sync_gossip_task_handle.now_or_never();
assert!(
matches!(sync_gossip_result, None),
"unexpected error or panic in sync gossip task: {:?}",
sync_gossip_result,
"unexpected error or panic in sync gossip task: {sync_gossip_result:?}",
);
let tx_gossip_result = tx_gossip_task_handle.now_or_never();
assert!(
matches!(tx_gossip_result, None),
"unexpected error or panic in transaction gossip task: {:?}",
tx_gossip_result,
"unexpected error or panic in transaction gossip task: {tx_gossip_result:?}",
);
Ok(())
@ -632,15 +625,13 @@ async fn mempool_transaction_expiration() -> Result<(), crate::BoxError> {
let sync_gossip_result = sync_gossip_task_handle.now_or_never();
assert!(
matches!(sync_gossip_result, None),
"unexpected error or panic in sync gossip task: {:?}",
sync_gossip_result,
"unexpected error or panic in sync gossip task: {sync_gossip_result:?}",
);
let tx_gossip_result = tx_gossip_task_handle.now_or_never();
assert!(
matches!(tx_gossip_result, None),
"unexpected error or panic in transaction gossip task: {:?}",
tx_gossip_result,
"unexpected error or panic in transaction gossip task: {tx_gossip_result:?}",
);
Ok(())
@ -732,15 +723,13 @@ async fn inbound_block_height_lookahead_limit() -> Result<(), crate::BoxError> {
let sync_gossip_result = sync_gossip_task_handle.now_or_never();
assert!(
matches!(sync_gossip_result, None),
"unexpected error or panic in sync gossip task: {:?}",
sync_gossip_result,
"unexpected error or panic in sync gossip task: {sync_gossip_result:?}",
);
let tx_gossip_result = tx_gossip_task_handle.now_or_never();
assert!(
matches!(tx_gossip_result, None),
"unexpected error or panic in transaction gossip task: {:?}",
tx_gossip_result,
"unexpected error or panic in transaction gossip task: {tx_gossip_result:?}",
);
Ok(())

View File

@ -103,15 +103,13 @@ async fn inbound_peers_empty_address_book() -> Result<(), crate::BoxError> {
let block_gossip_result = block_gossip_task_handle.now_or_never();
assert!(
matches!(block_gossip_result, None),
"unexpected error or panic in block gossip task: {:?}",
block_gossip_result,
"unexpected error or panic in block gossip task: {block_gossip_result:?}",
);
let tx_gossip_result = tx_gossip_task_handle.now_or_never();
assert!(
matches!(tx_gossip_result, None),
"unexpected error or panic in transaction gossip task: {:?}",
tx_gossip_result,
"unexpected error or panic in transaction gossip task: {tx_gossip_result:?}",
);
Ok(())
@ -188,15 +186,13 @@ async fn inbound_block_empty_state_notfound() -> Result<(), crate::BoxError> {
let block_gossip_result = block_gossip_task_handle.now_or_never();
assert!(
matches!(block_gossip_result, None),
"unexpected error or panic in block gossip task: {:?}",
block_gossip_result,
"unexpected error or panic in block gossip task: {block_gossip_result:?}",
);
let tx_gossip_result = tx_gossip_task_handle.now_or_never();
assert!(
matches!(tx_gossip_result, None),
"unexpected error or panic in transaction gossip task: {:?}",
tx_gossip_result,
"unexpected error or panic in transaction gossip task: {tx_gossip_result:?}",
);
Ok(())
@ -245,8 +241,7 @@ async fn inbound_tx_empty_state_notfound() -> Result<(), crate::BoxError> {
for tx in &txs {
assert!(
response_txs.contains(&Missing(*tx)),
"expected {:?}, but it was not in the response",
tx
"expected {tx:?}, but it was not in the response"
);
}
assert_eq!(response_txs.len(), txs.len());
@ -295,10 +290,8 @@ async fn inbound_tx_empty_state_notfound() -> Result<(), crate::BoxError> {
assert!(
expected.iter().any(|expected| expected == &actual),
"unexpected response: {:?} \
expected one of: {:?}",
actual,
expected,
"unexpected response: {actual:?} \
expected one of: {expected:?}",
);
}
} else {
@ -313,15 +306,13 @@ async fn inbound_tx_empty_state_notfound() -> Result<(), crate::BoxError> {
let block_gossip_result = block_gossip_task_handle.now_or_never();
assert!(
matches!(block_gossip_result, None),
"unexpected error or panic in block gossip task: {:?}",
block_gossip_result,
"unexpected error or panic in block gossip task: {block_gossip_result:?}",
);
let tx_gossip_result = tx_gossip_task_handle.now_or_never();
assert!(
matches!(tx_gossip_result, None),
"unexpected error or panic in transaction gossip task: {:?}",
tx_gossip_result,
"unexpected error or panic in transaction gossip task: {tx_gossip_result:?}",
);
Ok(())
@ -415,10 +406,8 @@ async fn outbound_tx_unrelated_response_notfound() -> Result<(), crate::BoxError
assert!(
expected.iter().any(|expected| expected == &actual),
"unexpected response: {:?} \
expected one of: {:?}",
actual,
expected,
"unexpected response: {actual:?} \
expected one of: {expected:?}",
);
}
} else {
@ -467,15 +456,13 @@ async fn outbound_tx_unrelated_response_notfound() -> Result<(), crate::BoxError
let block_gossip_result = block_gossip_task_handle.now_or_never();
assert!(
matches!(block_gossip_result, None),
"unexpected error or panic in block gossip task: {:?}",
block_gossip_result,
"unexpected error or panic in block gossip task: {block_gossip_result:?}",
);
let tx_gossip_result = tx_gossip_task_handle.now_or_never();
assert!(
matches!(tx_gossip_result, None),
"unexpected error or panic in transaction gossip task: {:?}",
tx_gossip_result,
"unexpected error or panic in transaction gossip task: {tx_gossip_result:?}",
);
Ok(())
@ -582,15 +569,13 @@ async fn outbound_tx_partial_response_notfound() -> Result<(), crate::BoxError>
let block_gossip_result = block_gossip_task_handle.now_or_never();
assert!(
matches!(block_gossip_result, None),
"unexpected error or panic in block gossip task: {:?}",
block_gossip_result,
"unexpected error or panic in block gossip task: {block_gossip_result:?}",
);
let tx_gossip_result = tx_gossip_task_handle.now_or_never();
assert!(
matches!(tx_gossip_result, None),
"unexpected error or panic in transaction gossip task: {:?}",
tx_gossip_result,
"unexpected error or panic in transaction gossip task: {tx_gossip_result:?}",
);
Ok(())

View File

@ -128,8 +128,7 @@ impl EvictionList {
let removed = self.unique_entries.remove(&key);
assert!(
removed.is_some(),
"all entries should exist in both ordered_entries and unique_entries, missing {:?} in unique_entries",
key
"all entries should exist in both ordered_entries and unique_entries, missing {key:?} in unique_entries"
);
Some(key)
} else {

View File

@ -33,10 +33,9 @@ impl MetricsEndpoint {
);
}
Err(e) => panic!(
"Opening metrics endpoint listener {:?} failed: {:?}. \
"Opening metrics endpoint listener {addr:?} failed: {e:?}. \
Hint: Check if another zebrad or zcashd process is running. \
Try changing the metrics endpoint_addr in the Zebra config.",
addr, e,
),
}
}

View File

@ -256,8 +256,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> {
let chain_sync_result = chain_sync_task_handle.now_or_never();
assert!(
matches!(chain_sync_result, None),
"unexpected error or panic in chain sync task: {:?}",
chain_sync_result,
"unexpected error or panic in chain sync task: {chain_sync_result:?}",
);
Ok(())
@ -488,8 +487,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> {
let chain_sync_result = chain_sync_task_handle.now_or_never();
assert!(
matches!(chain_sync_result, None),
"unexpected error or panic in chain sync task: {:?}",
chain_sync_result,
"unexpected error or panic in chain sync task: {chain_sync_result:?}",
);
Ok(())
@ -542,8 +540,7 @@ async fn sync_block_lookahead_drop() -> Result<(), crate::BoxError> {
let chain_sync_result = chain_sync_task_handle.now_or_never();
assert!(
matches!(chain_sync_result, None),
"unexpected error or panic in chain sync task: {:?}",
chain_sync_result,
"unexpected error or panic in chain sync task: {chain_sync_result:?}",
);
Ok(())
@ -698,8 +695,7 @@ async fn sync_block_too_high_obtain_tips() -> Result<(), crate::BoxError> {
let chain_sync_result = chain_sync_task_handle.now_or_never();
assert!(
matches!(chain_sync_result, None),
"unexpected error or panic in chain sync task: {:?}",
chain_sync_result,
"unexpected error or panic in chain sync task: {chain_sync_result:?}",
);
Ok(())
@ -920,8 +916,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> {
let chain_sync_result = chain_sync_task_handle.now_or_never();
assert!(
matches!(chain_sync_result, None),
"unexpected error or panic in chain sync task: {:?}",
chain_sync_result,
"unexpected error or panic in chain sync task: {chain_sync_result:?}",
);
Ok(())

View File

@ -77,10 +77,9 @@ impl TracingEndpoint {
let server = match Server::try_bind(&addr) {
Ok(s) => s,
Err(e) => panic!(
"Opening tracing endpoint listener {:?} failed: {:?}. \
"Opening tracing endpoint listener {addr:?} failed: {e:?}. \
Hint: Check if another zebrad or zcashd process is running. \
Try changing the tracing endpoint_addr in the Zebra config.",
addr, e,
),
}
.serve(service);

View File

@ -356,8 +356,7 @@ async fn db_init_outside_future_executor() -> Result<()> {
let block_duration = start.elapsed();
assert!(
block_duration <= MAX_ASYNC_BLOCKING_TIME,
"futures executor was blocked longer than expected ({:?})",
block_duration,
"futures executor was blocked longer than expected ({block_duration:?})",
);
db_init_handle.await?;
@ -1074,9 +1073,8 @@ fn full_sync_test(network: Network, timeout_argument_name: &str) -> Result<()> {
)
} else {
eprintln!(
"Skipped full sync test for {}, \
set the {:?} environmental variable to run the test",
network, timeout_argument_name,
"Skipped full sync test for {network}, \
set the {timeout_argument_name:?} environmental variable to run the test",
);
Ok(())
@ -1791,10 +1789,7 @@ fn zebra_zcash_listener_conflict() -> Result<()> {
let mut config = default_test_config()?;
config.network.listen_addr = listen_addr.parse().unwrap();
let dir1 = testdir()?.with_config(&mut config)?;
let regex1 = regex::escape(&format!(
"Opened Zcash protocol endpoint at {}",
listen_addr
));
let regex1 = regex::escape(&format!("Opened Zcash protocol endpoint at {listen_addr}"));
// From another folder create a configuration with the same listener.
// `network.listen_addr` will be the same in the 2 nodes.
@ -2054,8 +2049,7 @@ async fn fully_synced_rpc_test() -> Result<()> {
let expected_hex = hex::encode(expected_bytes);
assert!(
res.contains(&expected_hex),
"response did not contain the desired block: {}",
res
"response did not contain the desired block: {res}"
);
Ok(())
@ -2109,8 +2103,7 @@ async fn delete_old_databases() -> Result<()> {
// inside dir was deleted
child.expect_stdout_line_matches(format!(
"deleted outdated state directory deleted_state={:?}",
canonicalized_inside_dir
"deleted outdated state directory deleted_state={canonicalized_inside_dir:?}"
))?;
assert!(!inside_dir.as_path().exists());