lint(clippy): add `unwrap_in_result` lint (#4667)

* `unwrap_in_result` in zebra-chain crate

* `unwrap_in_result` in zebra-script crate

* `unwrap_in_result` in zebra-state crate

* `unwrap_in_result` in zebra-consensus crate

* `unwrap_in_result` in zebra-test crate

* `unwrap_in_result` in zebra-network crate

* `unwrap_in_result` in zebra-rpc crate

* `unwrap_in_result` in zebrad crate

* rustfmt

* revert `?` and add exceptions

* explain some panics better

* move some lint positions

* replace a panic with error

* Fix rustfmt?

Co-authored-by: teor <teor@riseup.net>
This commit is contained in:
Alfredo Garcia 2022-06-28 03:22:07 -03:00 committed by GitHub
parent 54efbe9d2d
commit 97fb85dca9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
56 changed files with 173 additions and 22 deletions

View File

@ -54,6 +54,7 @@ rustflags = [
# Panics
"-Wclippy::fallible_impl_from",
"-Wclippy::unwrap_in_result",
# TODOs:
# `cargo fix` might help do these fixes,

View File

@ -536,6 +536,7 @@ impl ZcashDeserialize for Amount<NegativeAllowed> {
}
impl ZcashSerialize for Amount<NonNegative> {
#[allow(clippy::unwrap_in_result)]
fn zcash_serialize<W: std::io::Write>(&self, mut writer: W) -> Result<(), std::io::Error> {
let amount = self
.0

View File

@ -113,6 +113,7 @@ impl Block {
/// <https://zips.z.cash/protocol/protocol.pdf#txnconsensus>
///
/// [ZIP-244]: https://zips.z.cash/zip-0244
#[allow(clippy::unwrap_in_result)]
pub fn check_transaction_network_upgrade_consistency(
&self,
network: Network,

View File

@ -101,6 +101,7 @@ pub enum BlockTimeError {
impl Header {
/// TODO: Inline this function into zebra_consensus::block::check::time_is_valid_at.
/// See <https://github.com/ZcashFoundation/zebra/issues/1021> for more details.
#[allow(clippy::unwrap_in_result)]
pub fn time_is_valid_at(
&self,
now: DateTime<Utc>,

View File

@ -21,6 +21,7 @@ use super::{merkle, Block, CountedHeader, Hash, Header};
pub const MAX_BLOCK_BYTES: u64 = 2_000_000;
impl ZcashSerialize for Header {
#[allow(clippy::unwrap_in_result)]
fn zcash_serialize<W: io::Write>(&self, mut writer: W) -> Result<(), io::Error> {
writer.write_u32::<LittleEndian>(self.version)?;
self.previous_block_hash.zcash_serialize(&mut writer)?;

View File

@ -128,6 +128,7 @@ impl NonEmptyHistoryTree {
/// `sapling_root` is the root of the Sapling note commitment tree of the block.
/// `orchard_root` is the root of the Orchard note commitment tree of the block;
/// (ignored for pre-Orchard blocks).
#[allow(clippy::unwrap_in_result)]
pub fn from_block(
network: Network,
block: Arc<Block>,
@ -186,6 +187,7 @@ impl NonEmptyHistoryTree {
/// # Panics
///
/// If the block height is not one more than the previously pushed block.
#[allow(clippy::unwrap_in_result)]
pub fn push(
&mut self,
block: Arc<Block>,
@ -419,6 +421,7 @@ impl HistoryTree {
/// Create a HistoryTree from a block.
///
/// If the block is pre-Heartwood, it returns an empty history tree.
#[allow(clippy::unwrap_in_result)]
pub fn from_block(
network: Network,
block: Arc<Block>,
@ -444,6 +447,7 @@ impl HistoryTree {
///
/// The tree is updated in-place. It is created when pushing the Heartwood
/// activation block.
#[allow(clippy::unwrap_in_result)]
pub fn push(
&mut self,
network: Network,

View File

@ -113,6 +113,7 @@ impl NoteCommitment {
///
/// <https://zips.z.cash/protocol/nu5.pdf#concretewindowedcommit>
#[allow(non_snake_case)]
#[allow(clippy::unwrap_in_result)]
pub fn new(note: Note) -> Option<Self> {
// s as in the argument name for WindowedPedersenCommit_r(s)
let mut s: BitVec<u8, Lsb0> = BitVec::new();

View File

@ -161,13 +161,15 @@ impl ConstantTimeEq for SpendingKey {
}
impl fmt::Display for SpendingKey {
#[allow(clippy::unwrap_in_result)]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let hrp = match self.network {
Network::Mainnet => sk_hrp::MAINNET,
Network::Testnet => sk_hrp::TESTNET,
};
bech32::encode_to_fmt(f, hrp, &self.bytes.to_base32(), Variant::Bech32).unwrap()
bech32::encode_to_fmt(f, hrp, &self.bytes.to_base32(), Variant::Bech32)
.expect("hrp is valid")
}
}

View File

@ -301,6 +301,7 @@ impl NoteCommitmentTree {
/// chain and input into the proof.
///
/// Returns an error if the tree is full.
#[allow(clippy::unwrap_in_result)]
pub fn append(&mut self, cm_x: pallas::Base) -> Result<(), NoteCommitmentTreeError> {
if self.inner.append(&cm_x.into()) {
// Invalidate cached root

View File

@ -105,6 +105,7 @@ impl<V: Version> Tree<V> {
/// # Panics
///
/// Will panic if `peaks` is empty.
#[allow(clippy::unwrap_in_result)]
pub fn new_from_cache(
network: Network,
network_upgrade: NetworkUpgrade,
@ -138,6 +139,7 @@ impl<V: Version> Tree<V> {
/// `sapling_root` is the root of the Sapling note commitment tree of the block.
/// `orchard_root` is the root of the Orchard note commitment tree of the block;
/// (ignored for V1 trees).
#[allow(clippy::unwrap_in_result)]
pub fn new_from_block(
network: Network,
block: Arc<Block>,
@ -171,6 +173,7 @@ impl<V: Version> Tree<V> {
///
/// Panics if the network upgrade of the given block is different from
/// the network upgrade of the other blocks in the tree.
#[allow(clippy::unwrap_in_result)]
pub fn append_leaf(
&mut self,
block: Arc<Block>,

View File

@ -48,6 +48,7 @@ impl fmt::Debug for Address {
}
impl fmt::Display for Address {
#[allow(clippy::unwrap_in_result)]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut bytes = io::Cursor::new(Vec::new());
@ -59,7 +60,8 @@ impl fmt::Display for Address {
_ => human_readable_parts::TESTNET,
};
bech32::encode_to_fmt(f, hrp, bytes.get_ref().to_base32(), Variant::Bech32).unwrap()
bech32::encode_to_fmt(f, hrp, bytes.get_ref().to_base32(), Variant::Bech32)
.expect("hrp is valid")
}
}
@ -69,7 +71,10 @@ impl std::str::FromStr for Address {
fn from_str(s: &str) -> Result<Self, Self::Err> {
match bech32::decode(s) {
Ok((hrp, bytes, Variant::Bech32)) => {
let mut decoded_bytes = io::Cursor::new(Vec::<u8>::from_base32(&bytes).unwrap());
let mut decoded_bytes =
io::Cursor::new(Vec::<u8>::from_base32(&bytes).map_err(|_| {
SerializationError::Parse("bech32::decode guarantees valid base32")
})?);
let mut diversifier_bytes = [0; 11];
decoded_bytes.read_exact(&mut diversifier_bytes)?;
@ -83,7 +88,7 @@ impl std::str::FromStr for Address {
},
diversifier: keys::Diversifier::from(diversifier_bytes),
transmission_key: keys::TransmissionKey::try_from(transmission_key_bytes)
.unwrap(),
.map_err(|_| SerializationError::Parse("invalid transmission key bytes"))?,
})
}
_ => Err(SerializationError::Parse("bech32 decoding error")),

View File

@ -254,23 +254,27 @@ impl From<[u8; 32]> for SpendingKey {
}
impl fmt::Display for SpendingKey {
#[allow(clippy::unwrap_in_result)]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let hrp = match self.network {
Network::Mainnet => sk_hrp::MAINNET,
_ => sk_hrp::TESTNET,
};
bech32::encode_to_fmt(f, hrp, &self.bytes.to_base32(), Variant::Bech32).unwrap()
bech32::encode_to_fmt(f, hrp, &self.bytes.to_base32(), Variant::Bech32)
.expect("hrp is valid")
}
}
impl FromStr for SpendingKey {
type Err = SerializationError;
#[allow(clippy::unwrap_in_result)]
fn from_str(s: &str) -> Result<Self, Self::Err> {
match bech32::decode(s) {
Ok((hrp, bytes, Variant::Bech32)) => {
let decoded = Vec::<u8>::from_base32(&bytes).unwrap();
let decoded =
Vec::<u8>::from_base32(&bytes).expect("bech32::decode guarantees valid base32");
let mut decoded_bytes = [0u8; 32];
decoded_bytes[..].copy_from_slice(&decoded[0..32]);
@ -637,13 +641,15 @@ impl fmt::Debug for IncomingViewingKey {
}
impl fmt::Display for IncomingViewingKey {
#[allow(clippy::unwrap_in_result)]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let hrp = match self.network {
Network::Mainnet => ivk_hrp::MAINNET,
_ => ivk_hrp::TESTNET,
};
bech32::encode_to_fmt(f, hrp, &self.scalar.to_bytes().to_base32(), Variant::Bech32).unwrap()
bech32::encode_to_fmt(f, hrp, &self.scalar.to_bytes().to_base32(), Variant::Bech32)
.expect("hrp is valid")
}
}
@ -690,10 +696,12 @@ impl From<(AuthorizingKey, NullifierDerivingKey)> for IncomingViewingKey {
impl FromStr for IncomingViewingKey {
type Err = SerializationError;
#[allow(clippy::unwrap_in_result)]
fn from_str(s: &str) -> Result<Self, Self::Err> {
match bech32::decode(s) {
Ok((hrp, bytes, Variant::Bech32)) => {
let decoded = Vec::<u8>::from_base32(&bytes).unwrap();
let decoded =
Vec::<u8>::from_base32(&bytes).expect("bech32::decode guarantees valid base32");
let mut scalar_bytes = [0u8; 32];
scalar_bytes[..].copy_from_slice(&decoded[0..32]);
@ -958,6 +966,7 @@ impl fmt::Debug for FullViewingKey {
}
impl fmt::Display for FullViewingKey {
#[allow(clippy::unwrap_in_result)]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut bytes = io::Cursor::new(Vec::new());
@ -970,17 +979,21 @@ impl fmt::Display for FullViewingKey {
_ => fvk_hrp::TESTNET,
};
bech32::encode_to_fmt(f, hrp, bytes.get_ref().to_base32(), Variant::Bech32).unwrap()
bech32::encode_to_fmt(f, hrp, bytes.get_ref().to_base32(), Variant::Bech32)
.expect("hrp is valid")
}
}
impl FromStr for FullViewingKey {
type Err = SerializationError;
#[allow(clippy::unwrap_in_result)]
fn from_str(s: &str) -> Result<Self, Self::Err> {
match bech32::decode(s) {
Ok((hrp, bytes, Variant::Bech32)) => {
let mut decoded_bytes = io::Cursor::new(Vec::<u8>::from_base32(&bytes).unwrap());
let mut decoded_bytes = io::Cursor::new(
Vec::<u8>::from_base32(&bytes).expect("bech32::decode guarantees valid base32"),
);
let authorizing_key_bytes = decoded_bytes.read_32_bytes()?;
let nullifier_deriving_key_bytes = decoded_bytes.read_32_bytes()?;

View File

@ -306,6 +306,7 @@ impl NoteCommitmentTree {
/// chain and input into the proof.
///
/// Returns an error if the tree is full.
#[allow(clippy::unwrap_in_result)]
pub fn append(&mut self, cm_u: jubjub::Fq) -> Result<(), NoteCommitmentTreeError> {
if self.inner.append(&cm_u.into()) {
// Invalidate cached root

View File

@ -236,6 +236,7 @@ impl TryFrom<usize> for CompactSizeMessage {
type Error = SerializationError;
#[inline]
#[allow(clippy::unwrap_in_result)]
fn try_from(size: usize) -> Result<Self, Self::Error> {
use SerializationError::Parse;
@ -284,6 +285,7 @@ impl ZcashSerialize for CompactSizeMessage {
///
/// If the value exceeds `MAX_PROTOCOL_MESSAGE_LEN`.
#[inline]
#[allow(clippy::unwrap_in_result)]
fn zcash_serialize<W: std::io::Write>(&self, writer: W) -> Result<(), std::io::Error> {
// # Security
// Defence-in-depth for memory DoS via preallocation.

View File

@ -71,6 +71,7 @@ impl DateTime32 {
/// Returns the duration elapsed since this time,
/// or if this time is in the future, returns `None`.
#[allow(clippy::unwrap_in_result)]
pub fn checked_elapsed(&self, now: chrono::DateTime<Utc>) -> Option<Duration32> {
DateTime32::try_from(now)
.expect("unexpected out of range chrono::DateTime")

View File

@ -65,6 +65,7 @@ impl std::io::Write for FakeWriter {
///
/// See `zcash_serialize_external_count` for more details, and usage information.
impl<T: ZcashSerialize> ZcashSerialize for Vec<T> {
#[allow(clippy::unwrap_in_result)]
fn zcash_serialize<W: io::Write>(&self, mut writer: W) -> Result<(), io::Error> {
let len: CompactSizeMessage = self
.len()

View File

@ -240,6 +240,7 @@ impl NoteCommitmentTree {
/// Appends a note commitment to the leafmost layer of the tree.
///
/// Returns an error if the tree is full.
#[allow(clippy::unwrap_in_result)]
pub fn append(&mut self, cm: NoteCommitment) -> Result<(), NoteCommitmentTreeError> {
if self.inner.append(&cm.into()) {
// Invalidate cached root

View File

@ -15,6 +15,7 @@ mod txid;
mod unmined;
#[cfg(any(test, feature = "proptest-impl"))]
#[allow(clippy::unwrap_in_result)]
pub mod arbitrary;
#[cfg(test)]
mod tests;
@ -946,6 +947,7 @@ impl Transaction {
/// using the outputs spent by this transaction.
///
/// See `transparent_value_balance` for details.
#[allow(clippy::unwrap_in_result)]
fn transparent_value_balance_from_outputs(
&self,
outputs: &HashMap<transparent::OutPoint, transparent::Output>,

View File

@ -86,6 +86,7 @@ impl LockTime {
}
impl ZcashSerialize for LockTime {
#[allow(clippy::unwrap_in_result)]
fn zcash_serialize<W: io::Write>(&self, mut writer: W) -> Result<(), io::Error> {
// This implementation does not check the invariants on `LockTime` so that the
// serialization is fallible only if the underlying writer is. This ensures that
@ -100,6 +101,7 @@ impl ZcashSerialize for LockTime {
}
impl ZcashDeserialize for LockTime {
#[allow(clippy::unwrap_in_result)]
fn zcash_deserialize<R: io::Read>(mut reader: R) -> Result<Self, SerializationError> {
let n = reader.read_u32::<LittleEndian>()?;
if n < Self::MIN_TIMESTAMP.try_into().expect("fits in u32") {

View File

@ -178,6 +178,7 @@ impl ZcashSerialize for sapling::ShieldedData<SharedAnchor> {
// we can't split ShieldedData out of Option<ShieldedData> deserialization,
// because the counts are read along with the arrays.
impl ZcashDeserialize for Option<sapling::ShieldedData<SharedAnchor>> {
#[allow(clippy::unwrap_in_result)]
fn zcash_deserialize<R: io::Read>(mut reader: R) -> Result<Self, SerializationError> {
// Denoted as `nSpendsSapling` and `vSpendsSapling` in the spec.
let spend_prefixes: Vec<_> = (&mut reader).zcash_deserialize_into()?;
@ -447,6 +448,7 @@ impl ZcashDeserialize for Option<orchard::ShieldedData> {
}
impl ZcashSerialize for Transaction {
#[allow(clippy::unwrap_in_result)]
fn zcash_serialize<W: io::Write>(&self, mut writer: W) -> Result<(), io::Error> {
// Post-Sapling, transaction size is limited to MAX_BLOCK_BYTES.
// (Strictly, the maximum transaction size is about 1.5 kB less,
@ -661,6 +663,7 @@ impl ZcashSerialize for Transaction {
}
impl ZcashDeserialize for Transaction {
#[allow(clippy::unwrap_in_result)]
fn zcash_deserialize<R: io::Read>(reader: R) -> Result<Self, SerializationError> {
// # Consensus
//

View File

@ -34,6 +34,7 @@ impl<'a> TxIdBuilder<'a> {
/// Compute the Transaction ID for transactions V1 to V4.
/// In these cases it's simply the hash of the serialized transaction.
#[allow(clippy::unwrap_in_result)]
fn txid_v1_to_v4(self) -> Result<Hash, io::Error> {
let mut hash_writer = sha256d::Writer::default();
self.trans

View File

@ -77,6 +77,7 @@ impl AsRef<[u8]> for CoinbaseData {
}
impl std::fmt::Debug for CoinbaseData {
#[allow(clippy::unwrap_in_result)]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let escaped = String::from_utf8(
self.0

View File

@ -294,6 +294,7 @@ impl ValueBalance<NonNegative> {
/// value pool.
///
/// See `add_block` for details.
#[allow(clippy::unwrap_in_result)]
pub fn add_chain_value_pool_change(
self,
chain_value_pool_change: ValueBalance<NegativeAllowed>,
@ -345,6 +346,7 @@ impl ValueBalance<NonNegative> {
}
/// From byte array
#[allow(clippy::unwrap_in_result)]
pub fn from_bytes(bytes: [u8; 32]) -> Result<ValueBalance<NonNegative>, ValueBalanceError> {
let transparent = Amount::from_bytes(
bytes[0..8]

View File

@ -178,6 +178,7 @@ impl CompactDifficulty {
///
/// Returns None for negative, zero, and overflow values. (zcashd rejects
/// these values, before comparing the hash.)
#[allow(clippy::unwrap_in_result)]
pub fn to_expanded(self) -> Option<ExpandedDifficulty> {
// The constants for this floating-point representation.
// Alias the struct constants here, so the code is easier to read.
@ -464,6 +465,7 @@ impl PartialOrd<ExpandedDifficulty> for block::Hash {
///
/// See `<ExpandedDifficulty as PartialOrd<block::Hash>::partial_cmp`
/// for details.
#[allow(clippy::unwrap_in_result)]
fn partial_cmp(&self, other: &ExpandedDifficulty) -> Option<Ordering> {
Some(
// Use the canonical implementation, but reverse the order

View File

@ -41,6 +41,7 @@ impl Solution {
pub const INPUT_LENGTH: usize = 4 + 32 * 3 + 4 * 2;
/// Returns `Ok(())` if `EquihashSolution` is valid for `header`
#[allow(clippy::unwrap_in_result)]
pub fn check(&self, header: &Header) -> Result<(), Error> {
let n = 200;
let k = 9;

View File

@ -521,6 +521,7 @@ where
///
/// If the block does not pass basic validity checks,
/// returns an error immediately.
#[allow(clippy::unwrap_in_result)]
fn queue_block(&mut self, block: Arc<Block>) -> Result<RequestBlock, VerifyCheckpointError> {
// Set up a oneshot channel to send results
let (tx, rx) = oneshot::channel();
@ -595,6 +596,7 @@ where
/// During checkpoint range processing, process all the blocks at `height`.
///
/// Returns the first valid block. If there is no valid block, returns None.
#[allow(clippy::unwrap_in_result)]
fn process_height(
&mut self,
height: block::Height,

View File

@ -272,6 +272,7 @@ impl AddressBook {
/// As an exception, this function can ignore all changes for specific
/// [`SocketAddr`]s. Ignored addresses will never be used to connect to
/// peers.
#[allow(clippy::unwrap_in_result)]
pub fn update(&mut self, change: MetaAddrChange) -> Option<MetaAddr> {
let previous = self.get(&change.addr());

View File

@ -294,6 +294,7 @@ impl MetaAddr {
/// [`MetaAddr`].
///
/// Returns [`None`] if the gossiped peer is missing the untrusted services field.
#[allow(clippy::unwrap_in_result)]
pub fn new_gossiped_change(self) -> Option<MetaAddrChange> {
let untrusted_services = self.services?;
@ -593,6 +594,7 @@ impl MetaAddr {
/// Return a sanitized version of this `MetaAddr`, for sending to a remote peer.
///
/// Returns `None` if this `MetaAddr` should not be sent to remote peers.
#[allow(clippy::unwrap_in_result)]
pub fn sanitize(&self, network: Network) -> Option<MetaAddr> {
if !self.last_known_info_is_valid_for_outbound(network) {
return None;

View File

@ -203,6 +203,7 @@ impl ClientRequestReceiver {
/// Closing the channel ensures that:
/// - the request stream terminates, and
/// - task notifications are not required.
#[allow(clippy::unwrap_in_result)]
pub fn close_and_flush_next(&mut self) -> Option<InProgressClientRequest> {
self.inner.close();
@ -419,6 +420,7 @@ impl MissingInventoryCollector {
impl Client {
/// Check if this connection's heartbeat task has exited.
#[allow(clippy::unwrap_in_result)]
fn check_heartbeat(&mut self, cx: &mut Context<'_>) -> Result<(), SharedPeerError> {
let is_canceled = self
.shutdown_tx

View File

@ -136,6 +136,7 @@ impl ClientTestHarness {
///
/// TODO: make ReceiveRequestAttempt generic, and use it here.
#[allow(dead_code)]
#[allow(clippy::unwrap_in_result)]
pub(crate) fn try_to_receive_inventory_change(&mut self) -> Option<InventoryChange> {
let receive_result = self
.inv_receiver

View File

@ -181,6 +181,7 @@ impl ErrorSlot {
///
/// Briefly locks the error slot's threaded `std::sync::Mutex`, to get a
/// reference to the error in the slot.
#[allow(clippy::unwrap_in_result)]
pub fn try_get_error(&self) -> Option<SharedPeerError> {
self.0
.lock()
@ -197,6 +198,7 @@ impl ErrorSlot {
///
/// Briefly locks the error slot's threaded `std::sync::Mutex`, to check for
/// a previous error, then update the error in the slot.
#[allow(clippy::unwrap_in_result)]
pub fn try_update_error(&self, e: SharedPeerError) -> Result<(), AlreadyErrored> {
let mut guard = self.0.lock().expect("error mutex should be unpoisoned");

View File

@ -587,6 +587,7 @@ where
}
/// Performs P2C on `ready_service_list` to randomly select a less-loaded ready service.
#[allow(clippy::unwrap_in_result)]
fn select_p2c_peer_from_list(&self, ready_service_list: &HashSet<D::Key>) -> Option<D::Key> {
match ready_service_list.len() {
0 => None,

View File

@ -181,6 +181,7 @@ impl AddrV2 {
///
/// The returned IP version is chosen based on `IP_ADDR_SIZE`,
/// which should be [`ADDR_V2_IPV4_ADDR_SIZE`] or [`ADDR_V2_IPV6_ADDR_SIZE`].
#[allow(clippy::unwrap_in_result)]
fn ip_addr_from_bytes<const IP_ADDR_SIZE: usize>(
addr_bytes: Vec<u8>,
) -> Result<IpAddr, SerializationError>

View File

@ -326,6 +326,7 @@ impl Decoder for Codec {
type Item = Message;
type Error = Error;
#[allow(clippy::unwrap_in_result)]
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
use Error::Parse;
match self.state {

View File

@ -78,6 +78,7 @@ pub enum Response {
}
impl fmt::Display for Response {
#[allow(clippy::unwrap_in_result)]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&match self {
Response::Nil => "Nil".to_string(),

View File

@ -343,6 +343,7 @@ where
Ok(response)
}
#[allow(clippy::unwrap_in_result)]
fn get_blockchain_info(&self) -> Result<GetBlockChainInfo> {
let network = self.network;
@ -1227,6 +1228,7 @@ pub struct GetAddressTxIdsRequest {
impl GetRawTransaction {
/// Converts `tx` and `height` into a new `GetRawTransaction` in the `verbose` format.
#[allow(clippy::unwrap_in_result)]
fn from_transaction(
tx: Arc<Transaction>,
height: Option<block::Height>,

View File

@ -152,6 +152,7 @@ impl CachedFfiTransaction {
/// Verify if the script in the input at `input_index` of a transaction correctly
/// spends the matching [`transparent::Output`] it refers to, with the [`ConsensusBranchId`]
/// of the block containing the transaction.
#[allow(clippy::unwrap_in_result)]
pub fn is_valid(&self, branch_id: ConsensusBranchId, input_index: usize) -> Result<(), Error> {
let previous_output = self
.all_previous_outputs
@ -210,6 +211,7 @@ impl CachedFfiTransaction {
/// Returns the number of transparent signature operations in the
/// transparent inputs and outputs of this transaction.
#[allow(clippy::unwrap_in_result)]
pub fn legacy_sigop_count(&self) -> Result<u64, Error> {
let mut err = 0;

View File

@ -112,6 +112,7 @@ impl Strategy for PreparedChain {
type Tree = PreparedChainTree;
type Value = <PreparedChainTree as ValueTree>::Value;
#[allow(clippy::unwrap_in_result)]
fn new_tree(&self, runner: &mut TestRunner) -> NewTree<Self> {
let mut chain = self.chain.lock().unwrap();
if chain.is_none() {

View File

@ -39,6 +39,7 @@ impl Iter<'_> {
}
}
#[allow(clippy::unwrap_in_result)]
fn next_finalized_block(&mut self) -> Option<Arc<Block>> {
let Iter { service, state } = self;

View File

@ -235,6 +235,7 @@ impl FinalizedState {
/// - Propagates any errors from updating history and note commitment trees
/// - If `hashFinalSaplingRoot` / `hashLightClientRoot` / `hashBlockCommitments`
/// does not match the expected value
#[allow(clippy::unwrap_in_result)]
pub fn commit_finalized_direct(
&mut self,
finalized: FinalizedBlock,

View File

@ -216,6 +216,7 @@ impl ReadDisk for DiskDb {
.valid()
}
#[allow(clippy::unwrap_in_result)]
fn zs_get<C, K, V>(&self, cf: &C, key: &K) -> Option<V>
where
C: rocksdb::AsColumnFamilyRef,

View File

@ -59,18 +59,21 @@ impl ZebraDb {
/// Returns the tip height and hash, if there is one.
//
// TODO: move this method to the tip section
#[allow(clippy::unwrap_in_result)]
pub fn tip(&self) -> Option<(block::Height, block::Hash)> {
let hash_by_height = self.db.cf_handle("hash_by_height").unwrap();
self.db.zs_last_key_value(&hash_by_height)
}
/// Returns the finalized hash for a given `block::Height` if it is present.
#[allow(clippy::unwrap_in_result)]
pub fn hash(&self, height: block::Height) -> Option<block::Hash> {
let hash_by_height = self.db.cf_handle("hash_by_height").unwrap();
self.db.zs_get(&hash_by_height, &height)
}
/// Returns the height of the given block if it exists.
#[allow(clippy::unwrap_in_result)]
pub fn height(&self, hash: block::Hash) -> Option<block::Height> {
let height_by_hash = self.db.cf_handle("height_by_hash").unwrap();
self.db.zs_get(&height_by_hash, &hash)
@ -80,6 +83,7 @@ impl ZebraDb {
/// [`Height`](zebra_chain::block::Height), if it exists in the finalized chain.
//
// TODO: move this method to the start of the section
#[allow(clippy::unwrap_in_result)]
pub fn block(&self, hash_or_height: HashOrHeight) -> Option<Arc<Block>> {
// Blocks
let block_header_by_height = self.db.cf_handle("block_by_height").unwrap();
@ -116,6 +120,7 @@ impl ZebraDb {
/// Returns the Sapling
/// [`NoteCommitmentTree`](sapling::tree::NoteCommitmentTree) specified by a
/// hash or height, if it exists in the finalized `db`.
#[allow(clippy::unwrap_in_result)]
pub fn sapling_tree(
&self,
hash_or_height: HashOrHeight,
@ -130,6 +135,7 @@ impl ZebraDb {
/// Returns the Orchard
/// [`NoteCommitmentTree`](orchard::tree::NoteCommitmentTree) specified by a
/// hash or height, if it exists in the finalized `db`.
#[allow(clippy::unwrap_in_result)]
pub fn orchard_tree(
&self,
hash_or_height: HashOrHeight,
@ -166,6 +172,7 @@ impl ZebraDb {
/// Returns the [`TransactionLocation`] for [`transaction::Hash`],
/// if it exists in the finalized chain.
#[allow(clippy::unwrap_in_result)]
pub fn transaction_location(&self, hash: transaction::Hash) -> Option<TransactionLocation> {
let tx_loc_by_hash = self.db.cf_handle("tx_by_hash").unwrap();
self.db.zs_get(&tx_loc_by_hash, &hash)
@ -173,6 +180,7 @@ impl ZebraDb {
/// Returns the [`transaction::Hash`] for [`TransactionLocation`],
/// if it exists in the finalized chain.
#[allow(clippy::unwrap_in_result)]
#[allow(dead_code)]
pub fn transaction_hash(&self, location: TransactionLocation) -> Option<transaction::Hash> {
let hash_by_tx_loc = self.db.cf_handle("hash_by_tx_loc").unwrap();
@ -183,6 +191,7 @@ impl ZebraDb {
/// if a transaction with that hash exists in the finalized chain.
//
// TODO: move this method to the start of the section
#[allow(clippy::unwrap_in_result)]
pub fn transaction(&self, hash: transaction::Hash) -> Option<(Arc<Transaction>, Height)> {
let tx_by_loc = self.db.cf_handle("tx_by_loc").unwrap();
@ -406,6 +415,7 @@ impl DiskWriteBatch {
/// # Errors
///
/// - This method does not currently return any errors.
#[allow(clippy::unwrap_in_result)]
pub fn prepare_block_header_transactions_batch(
&mut self,
db: &DiskDb,

View File

@ -67,6 +67,7 @@ impl DiskWriteBatch {
/// # Errors
///
/// - Returns any errors from updating the history tree
#[allow(clippy::unwrap_in_result)]
pub fn prepare_history_batch(
&mut self,
db: &DiskDb,
@ -108,6 +109,7 @@ impl DiskWriteBatch {
/// # Errors
///
/// - Propagates any errors from updating value pools
#[allow(clippy::unwrap_in_result)]
pub fn prepare_chain_value_pools_batch(
&mut self,
db: &DiskDb,

View File

@ -91,6 +91,7 @@ impl ZebraDb {
/// Returns the Sprout note commitment tree matching the given anchor.
///
/// This is used for interstitial tree building, which is unique to Sprout.
#[allow(clippy::unwrap_in_result)]
pub fn sprout_note_commitment_tree_by_anchor(
&self,
sprout_anchor: &sprout::tree::Root,
@ -118,6 +119,7 @@ impl ZebraDb {
/// Returns the Sapling note commitment tree matching the given block height.
#[allow(dead_code)]
#[allow(clippy::unwrap_in_result)]
pub fn sapling_note_commitment_tree_by_height(
&self,
height: &Height,
@ -145,6 +147,7 @@ impl ZebraDb {
/// Returns the Orchard note commitment tree matching the given block height.
#[allow(dead_code)]
#[allow(clippy::unwrap_in_result)]
pub fn orchard_note_commitment_tree_by_height(
&self,
height: &Height,
@ -199,6 +202,7 @@ impl DiskWriteBatch {
/// # Errors
///
/// - This method doesn't currently return any errors, but it might in future
#[allow(clippy::unwrap_in_result)]
pub fn prepare_nullifier_batch(
&mut self,
db: &DiskDb,
@ -263,6 +267,7 @@ impl DiskWriteBatch {
/// # Errors
///
/// - Propagates any errors from updating the history tree
#[allow(clippy::unwrap_in_result)]
pub fn prepare_note_commitment_batch(
&mut self,
db: &DiskDb,

View File

@ -43,6 +43,7 @@ impl ZebraDb {
/// Returns the [`AddressBalanceLocation`] for a [`transparent::Address`],
/// if it is in the finalized state.
#[allow(clippy::unwrap_in_result)]
pub fn address_balance_location(
&self,
address: &transparent::Address,
@ -89,6 +90,7 @@ impl ZebraDb {
/// Returns the transparent output for an [`OutputLocation`],
/// if it is unspent in the finalized state.
#[allow(clippy::unwrap_in_result)]
pub fn utxo_by_location(
&self,
output_location: OutputLocation,
@ -176,6 +178,7 @@ impl ZebraDb {
}
/// Returns the transaction hash for an [`TransactionLocation`].
#[allow(clippy::unwrap_in_result)]
pub fn tx_id_by_location(&self, tx_location: TransactionLocation) -> Option<transaction::Hash> {
let hash_by_tx_loc = self.db.cf_handle("hash_by_tx_loc").unwrap();
@ -415,6 +418,7 @@ impl DiskWriteBatch {
/// # Errors
///
/// - This method doesn't currently return any errors, but it might in future
#[allow(clippy::unwrap_in_result)]
pub fn prepare_new_transparent_outputs_batch(
&mut self,
db: &DiskDb,
@ -490,6 +494,7 @@ impl DiskWriteBatch {
/// # Errors
///
/// - This method doesn't currently return any errors, but it might in future
#[allow(clippy::unwrap_in_result)]
pub fn prepare_spent_transparent_outputs_batch(
&mut self,
db: &DiskDb,
@ -543,6 +548,7 @@ impl DiskWriteBatch {
/// # Errors
///
/// - This method doesn't currently return any errors, but it might in future
#[allow(clippy::unwrap_in_result)]
pub fn prepare_spending_transparent_tx_ids_batch(
&mut self,
db: &DiskDb,
@ -591,6 +597,7 @@ impl DiskWriteBatch {
/// # Errors
///
/// - This method doesn't currently return any errors, but it might in future
#[allow(clippy::unwrap_in_result)]
pub fn prepare_transparent_balances_batch(
&mut self,
db: &DiskDb,

View File

@ -381,6 +381,7 @@ impl NonFinalizedState {
///
/// The trees must be the trees of the finalized tip.
/// They are used to recreate the trees if a fork is needed.
#[allow(clippy::unwrap_in_result)]
fn parent_chain(
&mut self,
parent_hash: block::Hash,

View File

@ -266,6 +266,7 @@ impl Chain {
///
/// The trees must match the trees of the finalized tip and are used
/// to rebuild them after the fork.
#[allow(clippy::unwrap_in_result)]
pub fn fork(
&self,
fork_tip: block::Hash,
@ -704,6 +705,7 @@ trait UpdateWith<T> {
impl UpdateWith<ContextuallyValidBlock> for Chain {
#[instrument(skip(self, contextually_valid), fields(block = %contextually_valid.block))]
#[allow(clippy::unwrap_in_result)]
fn update_chain_tip_with(
&mut self,
contextually_valid: &ContextuallyValidBlock,
@ -992,6 +994,7 @@ impl
&HashMap<transparent::OutPoint, transparent::OrderedUtxo>,
)> for Chain
{
#[allow(clippy::unwrap_in_result)]
fn update_chain_tip_with(
&mut self,
&(created_outputs, creating_tx_hash, block_created_outputs): &(

View File

@ -66,12 +66,19 @@ impl
&transparent::OrderedUtxo,
)> for TransparentTransfers
{
#[allow(clippy::unwrap_in_result)]
fn update_chain_tip_with(
&mut self,
&(outpoint, created_utxo): &(&transparent::OutPoint, &transparent::OrderedUtxo),
) -> Result<(), ValidateContextError> {
self.balance =
(self.balance + created_utxo.utxo.output.value().constrain().unwrap()).unwrap();
self.balance = (self.balance
+ created_utxo
.utxo
.output
.value()
.constrain()
.expect("NonNegative values are always valid NegativeAllowed values"))
.expect("total UTXO value has already been checked");
let transaction_location = transaction_location(created_utxo);
let output_location = OutputLocation::from_outpoint(transaction_location, outpoint);
@ -94,8 +101,14 @@ impl
&(outpoint, created_utxo): &(&transparent::OutPoint, &transparent::OrderedUtxo),
_position: RevertPosition,
) {
self.balance =
(self.balance - created_utxo.utxo.output.value().constrain().unwrap()).unwrap();
self.balance = (self.balance
- created_utxo
.utxo
.output
.value()
.constrain()
.expect("NonNegative values are always valid NegativeAllowed values"))
.expect("reversing previous balance changes is always valid");
let transaction_location = transaction_location(created_utxo);
let output_location = OutputLocation::from_outpoint(transaction_location, outpoint);
@ -130,6 +143,7 @@ impl
&transparent::OrderedUtxo,
)> for TransparentTransfers
{
#[allow(clippy::unwrap_in_result)]
fn update_chain_tip_with(
&mut self,
&(spending_input, spending_tx_hash, spent_output): &(
@ -139,8 +153,14 @@ impl
),
) -> Result<(), ValidateContextError> {
// Spending a UTXO subtracts value from the balance
self.balance =
(self.balance - spent_output.utxo.output.value().constrain().unwrap()).unwrap();
self.balance = (self.balance
- spent_output
.utxo
.output
.value()
.constrain()
.expect("NonNegative values are always valid NegativeAllowed values"))
.expect("total UTXO value has already been checked");
let spent_outpoint = spending_input.outpoint().expect("checked by caller");
@ -166,8 +186,14 @@ impl
),
_position: RevertPosition,
) {
self.balance =
(self.balance + spent_output.utxo.output.value().constrain().unwrap()).unwrap();
self.balance = (self.balance
+ spent_output
.utxo
.output
.value()
.constrain()
.expect("NonNegative values are always valid NegativeAllowed values"))
.expect("reversing previous balance changes is always valid");
let spent_outpoint = spending_input.outpoint().expect("checked by caller");

View File

@ -131,6 +131,7 @@ impl<T> TestDirExt for T
where
Self: AsRef<Path> + Sized,
{
#[allow(clippy::unwrap_in_result)]
fn spawn_child_with_command(self, cmd: &str, args: Arguments) -> Result<TestChild<Self>> {
let mut cmd = test_cmd(cmd, self.as_ref())?;
@ -639,6 +640,7 @@ impl<T> TestChild<T> {
/// Kills the child on error, or after the configured timeout has elapsed.
/// See [`Self::expect_line_matching_regex_set`] for details.
#[instrument(skip(self))]
#[allow(clippy::unwrap_in_result)]
pub fn expect_stdout_line_matches<R>(&mut self, success_regex: R) -> Result<&mut Self>
where
R: ToRegex + Debug,
@ -665,6 +667,7 @@ impl<T> TestChild<T> {
/// Kills the child on error, or after the configured timeout has elapsed.
/// See [`Self::expect_line_matching_regex_set`] for details.
#[instrument(skip(self))]
#[allow(clippy::unwrap_in_result)]
pub fn expect_stderr_line_matches<R>(&mut self, success_regex: R) -> Result<&mut Self>
where
R: ToRegex + Debug,
@ -689,6 +692,7 @@ impl<T> TestChild<T> {
///
/// [`Self::expect_line_matching_regexes`] wrapper for strings,
/// [`Regex`](regex::Regex)es, and [`RegexSet`]s.
#[allow(clippy::unwrap_in_result)]
pub fn expect_line_matching_regex_set<L, R>(
&mut self,
lines: &mut L,
@ -709,6 +713,7 @@ impl<T> TestChild<T> {
/// Checks each line in `lines` against a regex set, and returns Ok if a line matches.
///
/// [`Self::expect_line_matching_regexes`] wrapper for regular expression iterators.
#[allow(clippy::unwrap_in_result)]
pub fn expect_line_matching_regex_iter<L, I>(
&mut self,
lines: &mut L,
@ -1007,6 +1012,7 @@ impl<T> TestOutput<T> {
/// Tests if standard output matches `regex`.
#[instrument(skip(self))]
#[allow(clippy::unwrap_in_result)]
pub fn stdout_matches<R>(&self, regex: R) -> Result<&Self>
where
R: ToRegex + Debug,
@ -1030,6 +1036,7 @@ impl<T> TestOutput<T> {
/// Tests if any lines in standard output match `regex`.
#[instrument(skip(self))]
#[allow(clippy::unwrap_in_result)]
pub fn stdout_line_matches<R>(&self, regex: R) -> Result<&Self>
where
R: ToRegex + Debug,
@ -1059,6 +1066,7 @@ impl<T> TestOutput<T> {
/// Tests if standard error matches `regex`.
#[instrument(skip(self))]
#[allow(clippy::unwrap_in_result)]
pub fn stderr_matches<R>(&self, regex: R) -> Result<&Self>
where
R: ToRegex + Debug,
@ -1082,6 +1090,7 @@ impl<T> TestOutput<T> {
/// Tests if any lines in standard error match `regex`.
#[instrument(skip(self))]
#[allow(clippy::unwrap_in_result)]
pub fn stderr_line_matches<R>(&self, regex: R) -> Result<&Self>
where
R: ToRegex + Debug,

View File

@ -194,6 +194,7 @@ impl Application for ZebradApp {
/// beyond the default ones provided by the framework, this is the place
/// to do so.
#[allow(clippy::print_stderr)]
#[allow(clippy::unwrap_in_result)]
fn register_components(&mut self, command: &Self::Cmd) -> Result<(), FrameworkError> {
use crate::components::{
metrics::MetricsEndpoint, tokio::TokioComponent, tracing::TracingEndpoint,
@ -316,7 +317,7 @@ impl Application for ZebradApp {
// This MUST happen after `Terminal::new` to ensure our preferred panic
// handler is the last one installed
let (panic_hook, eyre_hook) = builder.into_hooks();
eyre_hook.install().unwrap();
eyre_hook.install().expect("eyre_hook.install() error");
// The Sentry default config pulls in the DSN from the `SENTRY_DSN`
// environment variable.
@ -399,6 +400,7 @@ impl Application for ZebradApp {
}
/// Load this application's configuration and initialize its components.
#[allow(clippy::unwrap_in_result)]
fn init(&mut self, command: &Self::Cmd) -> Result<(), FrameworkError> {
// Create and register components with the application.
// We do this first to calculate a proper dependency ordering before
@ -406,7 +408,10 @@ impl Application for ZebradApp {
self.register_components(command)?;
// Fire callback to signal state in the application lifecycle
let config = self.config.take().unwrap();
let config = self
.config
.take()
.expect("register_components always populates the config");
self.after_config(config)?;
Ok(())

View File

@ -170,6 +170,7 @@ impl Storage {
///
/// If inserting this transaction evicts other transactions, they will be tracked
/// as [`SameEffectsChainRejectionError::RandomlyEvicted`].
#[allow(clippy::unwrap_in_result)]
pub fn insert(&mut self, tx: VerifiedUnminedTx) -> Result<UnminedTxId, MempoolError> {
// # Security
//

View File

@ -135,6 +135,7 @@ impl VerifiedSet {
/// be too bad.
///
/// [ZIP-401]: https://zips.z.cash/zip-0401
#[allow(clippy::unwrap_in_result)]
pub fn evict_one(&mut self) -> Option<VerifiedUnminedTx> {
if self.transactions.is_empty() {
None
@ -148,7 +149,8 @@ impl VerifiedSet {
.map(|tx| tx.clone().eviction_weight())
.collect();
let dist = WeightedIndex::new(weights).unwrap();
let dist = WeightedIndex::new(weights)
.expect("there is at least one weight and all weights are valid");
Some(self.remove(dist.sample(&mut thread_rng())))
}

View File

@ -22,13 +22,14 @@ pub struct TokioComponent {
}
impl TokioComponent {
#[allow(clippy::unwrap_in_result)]
pub fn new() -> Result<Self, FrameworkError> {
Ok(Self {
rt: Some(
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap(),
.expect("runtime building should not fail"),
),
})
}

View File

@ -50,6 +50,7 @@ impl TracingEndpoint {
}
#[cfg(feature = "filter-reload")]
#[allow(clippy::unwrap_in_result)]
pub fn init_tokio(&mut self, tokio_component: &TokioComponent) -> Result<(), FrameworkError> {
use hyper::{
service::{make_service_fn, service_fn},

View File

@ -111,6 +111,7 @@ impl<T> ZebradTestDirExt for T
where
Self: TestDirExt + AsRef<Path> + Sized,
{
#[allow(clippy::unwrap_in_result)]
fn spawn_child(self, extra_args: Arguments) -> Result<TestChild<Self>> {
let dir = self.as_ref();
let default_config_path = dir.join("zebrad.toml");

View File

@ -130,6 +130,7 @@ impl<T> LightWalletdTestDirExt for T
where
Self: TestDirExt + AsRef<Path> + Sized,
{
#[allow(clippy::unwrap_in_result)]
fn spawn_lightwalletd_child(
self,
lightwalletd_state_path: impl Into<Option<PathBuf>>,