chore: Fix clippy lints (#9062)
* Allows non-local impls for abscissa derive macros and fixes other clippy lints. * Fixes formatting
This commit is contained in:
parent
8cfb61f52c
commit
e15184d39b
|
@ -1,3 +1,5 @@
|
||||||
|
//! Tests for tower-fallback
|
||||||
|
|
||||||
use tower::{service_fn, Service, ServiceExt};
|
use tower::{service_fn, Service, ServiceExt};
|
||||||
use tower_fallback::Fallback;
|
use tower_fallback::Fallback;
|
||||||
|
|
||||||
|
|
|
@ -139,7 +139,7 @@ impl<'f> BestTipChanged<'f> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'f> Future for BestTipChanged<'f> {
|
impl Future for BestTipChanged<'_> {
|
||||||
type Output = Result<(), BoxError>;
|
type Output = Result<(), BoxError>;
|
||||||
|
|
||||||
fn poll(
|
fn poll(
|
||||||
|
|
|
@ -270,6 +270,6 @@ impl ZcashDeserialize for Flags {
|
||||||
// the reserved bits 2..7 of the flagsOrchard field MUST be zero."
|
// the reserved bits 2..7 of the flagsOrchard field MUST be zero."
|
||||||
// https://zips.z.cash/protocol/protocol.pdf#txnencodingandconsensus
|
// https://zips.z.cash/protocol/protocol.pdf#txnencodingandconsensus
|
||||||
Flags::from_bits(reader.read_u8()?)
|
Flags::from_bits(reader.read_u8()?)
|
||||||
.ok_or_else(|| SerializationError::Parse("invalid reserved orchard flags"))
|
.ok_or(SerializationError::Parse("invalid reserved orchard flags"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,2 +1,4 @@
|
||||||
|
//! Serialization tests.
|
||||||
|
|
||||||
mod preallocate;
|
mod preallocate;
|
||||||
mod prop;
|
mod prop;
|
||||||
|
|
|
@ -231,7 +231,7 @@ impl Transaction {
|
||||||
&'a self,
|
&'a self,
|
||||||
branch_id: ConsensusBranchId,
|
branch_id: ConsensusBranchId,
|
||||||
all_previous_outputs: &'a [transparent::Output],
|
all_previous_outputs: &'a [transparent::Output],
|
||||||
) -> sighash::SigHasher {
|
) -> sighash::SigHasher<'a> {
|
||||||
sighash::SigHasher::new(self, branch_id, all_previous_outputs)
|
sighash::SigHasher::new(self, branch_id, all_previous_outputs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -889,7 +889,7 @@ impl ZcashDeserialize for Transaction {
|
||||||
// Convert it to a NetworkUpgrade
|
// Convert it to a NetworkUpgrade
|
||||||
let network_upgrade =
|
let network_upgrade =
|
||||||
NetworkUpgrade::from_branch_id(limited_reader.read_u32::<LittleEndian>()?)
|
NetworkUpgrade::from_branch_id(limited_reader.read_u32::<LittleEndian>()?)
|
||||||
.ok_or_else(|| {
|
.ok_or({
|
||||||
SerializationError::Parse(
|
SerializationError::Parse(
|
||||||
"expected a valid network upgrade from the consensus branch id",
|
"expected a valid network upgrade from the consensus branch id",
|
||||||
)
|
)
|
||||||
|
|
|
@ -508,7 +508,9 @@ impl Codec {
|
||||||
timestamp: Utc
|
timestamp: Utc
|
||||||
.timestamp_opt(reader.read_i64::<LittleEndian>()?, 0)
|
.timestamp_opt(reader.read_i64::<LittleEndian>()?, 0)
|
||||||
.single()
|
.single()
|
||||||
.ok_or_else(|| Error::Parse("version timestamp is out of range for DateTime"))?,
|
.ok_or(Error::Parse(
|
||||||
|
"version timestamp is out of range for DateTime",
|
||||||
|
))?,
|
||||||
address_recv: AddrInVersion::zcash_deserialize(&mut reader)?,
|
address_recv: AddrInVersion::zcash_deserialize(&mut reader)?,
|
||||||
address_from: AddrInVersion::zcash_deserialize(&mut reader)?,
|
address_from: AddrInVersion::zcash_deserialize(&mut reader)?,
|
||||||
nonce: Nonce(reader.read_u64::<LittleEndian>()?),
|
nonce: Nonce(reader.read_u64::<LittleEndian>()?),
|
||||||
|
|
|
@ -93,18 +93,18 @@ impl Service<Request> for ScanService {
|
||||||
Request::Info => {
|
Request::Info => {
|
||||||
let db = self.db.clone();
|
let db = self.db.clone();
|
||||||
|
|
||||||
return async move {
|
async move {
|
||||||
Ok(Response::Info {
|
Ok(Response::Info {
|
||||||
min_sapling_birthday_height: db.network().sapling_activation_height(),
|
min_sapling_birthday_height: db.network().sapling_activation_height(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
.boxed();
|
.boxed()
|
||||||
}
|
}
|
||||||
|
|
||||||
Request::RegisterKeys(keys) => {
|
Request::RegisterKeys(keys) => {
|
||||||
let mut scan_task = self.scan_task.clone();
|
let mut scan_task = self.scan_task.clone();
|
||||||
|
|
||||||
return async move {
|
async move {
|
||||||
let newly_registered_keys = scan_task.register_keys(keys)?.await?;
|
let newly_registered_keys = scan_task.register_keys(keys)?.await?;
|
||||||
if !newly_registered_keys.is_empty() {
|
if !newly_registered_keys.is_empty() {
|
||||||
Ok(Response::RegisteredKeys(newly_registered_keys))
|
Ok(Response::RegisteredKeys(newly_registered_keys))
|
||||||
|
@ -113,14 +113,14 @@ impl Service<Request> for ScanService {
|
||||||
are valid Sapling extended full viewing keys".into())
|
are valid Sapling extended full viewing keys".into())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
.boxed();
|
.boxed()
|
||||||
}
|
}
|
||||||
|
|
||||||
Request::DeleteKeys(keys) => {
|
Request::DeleteKeys(keys) => {
|
||||||
let mut db = self.db.clone();
|
let mut db = self.db.clone();
|
||||||
let mut scan_task = self.scan_task.clone();
|
let mut scan_task = self.scan_task.clone();
|
||||||
|
|
||||||
return async move {
|
async move {
|
||||||
// Wait for a message to confirm that the scan task has removed the key up to `DELETE_KEY_TIMEOUT`
|
// Wait for a message to confirm that the scan task has removed the key up to `DELETE_KEY_TIMEOUT`
|
||||||
let remove_keys_result = tokio::time::timeout(
|
let remove_keys_result = tokio::time::timeout(
|
||||||
DELETE_KEY_TIMEOUT,
|
DELETE_KEY_TIMEOUT,
|
||||||
|
@ -141,13 +141,13 @@ impl Service<Request> for ScanService {
|
||||||
|
|
||||||
Ok(Response::DeletedKeys)
|
Ok(Response::DeletedKeys)
|
||||||
}
|
}
|
||||||
.boxed();
|
.boxed()
|
||||||
}
|
}
|
||||||
|
|
||||||
Request::Results(keys) => {
|
Request::Results(keys) => {
|
||||||
let db = self.db.clone();
|
let db = self.db.clone();
|
||||||
|
|
||||||
return async move {
|
async move {
|
||||||
let mut final_result = BTreeMap::new();
|
let mut final_result = BTreeMap::new();
|
||||||
for key in keys {
|
for key in keys {
|
||||||
let db = db.clone();
|
let db = db.clone();
|
||||||
|
@ -168,26 +168,26 @@ impl Service<Request> for ScanService {
|
||||||
|
|
||||||
Ok(Response::Results(final_result))
|
Ok(Response::Results(final_result))
|
||||||
}
|
}
|
||||||
.boxed();
|
.boxed()
|
||||||
}
|
}
|
||||||
|
|
||||||
Request::SubscribeResults(keys) => {
|
Request::SubscribeResults(keys) => {
|
||||||
let mut scan_task = self.scan_task.clone();
|
let mut scan_task = self.scan_task.clone();
|
||||||
|
|
||||||
return async move {
|
async move {
|
||||||
let results_receiver = scan_task.subscribe(keys)?.await.map_err(|_| {
|
let results_receiver = scan_task.subscribe(keys)?.await.map_err(|_| {
|
||||||
"scan task dropped responder, check that keys are registered"
|
"scan task dropped responder, check that keys are registered"
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
Ok(Response::SubscribeResults(results_receiver))
|
Ok(Response::SubscribeResults(results_receiver))
|
||||||
}
|
}
|
||||||
.boxed();
|
.boxed()
|
||||||
}
|
}
|
||||||
|
|
||||||
Request::ClearResults(keys) => {
|
Request::ClearResults(keys) => {
|
||||||
let mut db = self.db.clone();
|
let mut db = self.db.clone();
|
||||||
|
|
||||||
return async move {
|
async move {
|
||||||
// Clear results from db for the provided `keys`
|
// Clear results from db for the provided `keys`
|
||||||
tokio::task::spawn_blocking(move || {
|
tokio::task::spawn_blocking(move || {
|
||||||
db.delete_sapling_results(keys);
|
db.delete_sapling_results(keys);
|
||||||
|
@ -196,7 +196,7 @@ impl Service<Request> for ScanService {
|
||||||
|
|
||||||
Ok(Response::ClearedResults)
|
Ok(Response::ClearedResults)
|
||||||
}
|
}
|
||||||
.boxed();
|
.boxed()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -265,7 +265,7 @@ trait InsertSaplingHeight {
|
||||||
fn insert_sapling_height(self, sapling_key: &SaplingScanningKey, height: Height) -> Self;
|
fn insert_sapling_height(self, sapling_key: &SaplingScanningKey, height: Height) -> Self;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'cf> InsertSaplingHeight for WriteSaplingTxIdsBatch<'cf> {
|
impl InsertSaplingHeight for WriteSaplingTxIdsBatch<'_> {
|
||||||
/// Insert sapling height with no results.
|
/// Insert sapling height with no results.
|
||||||
///
|
///
|
||||||
/// If a result already exists for the coinbase transaction at that height,
|
/// If a result already exists for the coinbase transaction at that height,
|
||||||
|
@ -283,7 +283,7 @@ trait DeleteSaplingKeys {
|
||||||
fn delete_sapling_keys(self, sapling_key: Vec<SaplingScanningKey>) -> Self;
|
fn delete_sapling_keys(self, sapling_key: Vec<SaplingScanningKey>) -> Self;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'cf> DeleteSaplingKeys for WriteSaplingTxIdsBatch<'cf> {
|
impl DeleteSaplingKeys for WriteSaplingTxIdsBatch<'_> {
|
||||||
/// Delete sapling keys and their results.
|
/// Delete sapling keys and their results.
|
||||||
fn delete_sapling_keys(mut self, sapling_keys: Vec<SaplingScanningKey>) -> Self {
|
fn delete_sapling_keys(mut self, sapling_keys: Vec<SaplingScanningKey>) -> Self {
|
||||||
for key in &sapling_keys {
|
for key in &sapling_keys {
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
//!
|
//!
|
||||||
//! export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/state"
|
//! export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/state"
|
||||||
//! cargo test scan_task_commands --features="shielded-scan" -- --ignored --nocapture
|
//! cargo test scan_task_commands --features="shielded-scan" -- --ignored --nocapture
|
||||||
#![allow(dead_code)]
|
#![allow(dead_code, non_local_definitions)]
|
||||||
|
|
||||||
use std::{fs, time::Duration};
|
use std::{fs, time::Duration};
|
||||||
|
|
||||||
|
@ -26,6 +26,7 @@ use zebra_scan::{
|
||||||
|
|
||||||
use zebra_state::{ChainTipChange, LatestChainTip};
|
use zebra_state::{ChainTipChange, LatestChainTip};
|
||||||
|
|
||||||
|
/// Boxed state service.
|
||||||
pub type BoxStateService =
|
pub type BoxStateService =
|
||||||
BoxService<zebra_state::Request, zebra_state::Response, zebra_state::BoxError>;
|
BoxService<zebra_state::Request, zebra_state::Response, zebra_state::BoxError>;
|
||||||
|
|
||||||
|
@ -162,6 +163,7 @@ pub(crate) async fn run() -> Result<()> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Starts the state service with the provided cache directory.
|
||||||
pub async fn start_state_service_with_cache_dir(
|
pub async fn start_state_service_with_cache_dir(
|
||||||
network: &Network,
|
network: &Network,
|
||||||
cache_dir: impl Into<std::path::PathBuf>,
|
cache_dir: impl Into<std::path::PathBuf>,
|
||||||
|
|
|
@ -63,7 +63,7 @@ where
|
||||||
batch: Batch,
|
batch: Batch,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'cf, Key, Value> Debug for TypedColumnFamily<'cf, Key, Value>
|
impl<Key, Value> Debug for TypedColumnFamily<'_, Key, Value>
|
||||||
where
|
where
|
||||||
Key: IntoDisk + FromDisk + Debug,
|
Key: IntoDisk + FromDisk + Debug,
|
||||||
Value: IntoDisk + FromDisk,
|
Value: IntoDisk + FromDisk,
|
||||||
|
@ -80,7 +80,7 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'cf, Key, Value> PartialEq for TypedColumnFamily<'cf, Key, Value>
|
impl<Key, Value> PartialEq for TypedColumnFamily<'_, Key, Value>
|
||||||
where
|
where
|
||||||
Key: IntoDisk + FromDisk + Debug,
|
Key: IntoDisk + FromDisk + Debug,
|
||||||
Value: IntoDisk + FromDisk,
|
Value: IntoDisk + FromDisk,
|
||||||
|
@ -90,7 +90,7 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'cf, Key, Value> Eq for TypedColumnFamily<'cf, Key, Value>
|
impl<Key, Value> Eq for TypedColumnFamily<'_, Key, Value>
|
||||||
where
|
where
|
||||||
Key: IntoDisk + FromDisk + Debug,
|
Key: IntoDisk + FromDisk + Debug,
|
||||||
Value: IntoDisk + FromDisk,
|
Value: IntoDisk + FromDisk,
|
||||||
|
@ -243,7 +243,7 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'cf, Key, Value> TypedColumnFamily<'cf, Key, Value>
|
impl<Key, Value> TypedColumnFamily<'_, Key, Value>
|
||||||
where
|
where
|
||||||
Key: IntoDisk + FromDisk + Debug + Ord,
|
Key: IntoDisk + FromDisk + Debug + Ord,
|
||||||
Value: IntoDisk + FromDisk,
|
Value: IntoDisk + FromDisk,
|
||||||
|
@ -259,7 +259,7 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'cf, Key, Value> TypedColumnFamily<'cf, Key, Value>
|
impl<Key, Value> TypedColumnFamily<'_, Key, Value>
|
||||||
where
|
where
|
||||||
Key: IntoDisk + FromDisk + Debug + Hash + Eq,
|
Key: IntoDisk + FromDisk + Debug + Hash + Eq,
|
||||||
Value: IntoDisk + FromDisk,
|
Value: IntoDisk + FromDisk,
|
||||||
|
@ -275,7 +275,7 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'cf, Key, Value, Batch> WriteTypedBatch<'cf, Key, Value, Batch>
|
impl<Key, Value, Batch> WriteTypedBatch<'_, Key, Value, Batch>
|
||||||
where
|
where
|
||||||
Key: IntoDisk + FromDisk + Debug,
|
Key: IntoDisk + FromDisk + Debug,
|
||||||
Value: IntoDisk + FromDisk,
|
Value: IntoDisk + FromDisk,
|
||||||
|
@ -312,7 +312,7 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
// Writing a batch to the database requires an owned batch.
|
// Writing a batch to the database requires an owned batch.
|
||||||
impl<'cf, Key, Value> WriteTypedBatch<'cf, Key, Value, DiskWriteBatch>
|
impl<Key, Value> WriteTypedBatch<'_, Key, Value, DiskWriteBatch>
|
||||||
where
|
where
|
||||||
Key: IntoDisk + FromDisk + Debug,
|
Key: IntoDisk + FromDisk + Debug,
|
||||||
Value: IntoDisk + FromDisk,
|
Value: IntoDisk + FromDisk,
|
||||||
|
|
|
@ -68,7 +68,7 @@ pub trait FromDisk: Sized {
|
||||||
|
|
||||||
// Generic serialization impls
|
// Generic serialization impls
|
||||||
|
|
||||||
impl<'a, T> IntoDisk for &'a T
|
impl<T> IntoDisk for &T
|
||||||
where
|
where
|
||||||
T: IntoDisk,
|
T: IntoDisk,
|
||||||
{
|
{
|
||||||
|
|
|
@ -1272,7 +1272,7 @@ impl Chain {
|
||||||
pub fn partial_transparent_indexes<'a>(
|
pub fn partial_transparent_indexes<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
addresses: &'a HashSet<transparent::Address>,
|
addresses: &'a HashSet<transparent::Address>,
|
||||||
) -> impl Iterator<Item = &TransparentTransfers> {
|
) -> impl Iterator<Item = &'a TransparentTransfers> {
|
||||||
addresses
|
addresses
|
||||||
.iter()
|
.iter()
|
||||||
.flat_map(|address| self.partial_transparent_transfers.get(address))
|
.flat_map(|address| self.partial_transparent_transfers.get(address))
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
//! Zebrad Subcommands
|
//! Zebrad Subcommands
|
||||||
|
|
||||||
|
#![allow(non_local_definitions)]
|
||||||
|
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
use abscissa_core::{config::Override, Command, Configurable, FrameworkError, Runnable};
|
use abscissa_core::{config::Override, Command, Configurable, FrameworkError, Runnable};
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
//! An HTTP endpoint for metrics collection.
|
//! An HTTP endpoint for metrics collection.
|
||||||
|
|
||||||
|
#![allow(non_local_definitions)]
|
||||||
|
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
|
|
||||||
use abscissa_core::{Component, FrameworkError};
|
use abscissa_core::{Component, FrameworkError};
|
||||||
|
|
|
@ -7,6 +7,8 @@
|
||||||
//! The rayon thread pool is used for:
|
//! The rayon thread pool is used for:
|
||||||
//! - long-running CPU-bound tasks like cryptography, via [`rayon::spawn_fifo`].
|
//! - long-running CPU-bound tasks like cryptography, via [`rayon::spawn_fifo`].
|
||||||
|
|
||||||
|
#![allow(non_local_definitions)]
|
||||||
|
|
||||||
use std::{future::Future, time::Duration};
|
use std::{future::Future, time::Duration};
|
||||||
|
|
||||||
use abscissa_core::{Component, FrameworkError, Shutdown};
|
use abscissa_core::{Component, FrameworkError, Shutdown};
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
//! An HTTP endpoint for dynamically setting tracing filters.
|
//! An HTTP endpoint for dynamically setting tracing filters.
|
||||||
|
|
||||||
|
#![allow(non_local_definitions)]
|
||||||
|
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
|
|
||||||
use abscissa_core::{Component, FrameworkError};
|
use abscissa_core::{Component, FrameworkError};
|
||||||
|
|
Loading…
Reference in New Issue