Move history tree and value balance to typed column families (#8115)

* impl TryFrom<zcash_primitives::BlockHeight> for Height

* Add type-safe read and write database methods

* Only allow typed access to the scanner DB

* Update docs

* Implement a common method as a trait

* Fix imports

* Tidy state imports

* Activate tracing logging macros in the whole scanner crate

* Fix dead code warnings

* Use a more sensible export order

* Remove a 1.72 lint exception now 1.74 is stable

* Switch history trees over to TypedColumnFamily, and remove redundant code

* Add typed batch creation methods, and switch history trees to them

* Convert ValueBalance to typed column families

* Make the APIs compatible after a merge

* Use `ZebraDb` instead of `DiskDb` where needed

---------

Co-authored-by: Marek <mail@marek.onl>
This commit is contained in:
teor 2023-12-21 10:20:57 +11:00 committed by GitHub
parent 3c8b93d986
commit ad015e04d9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 220 additions and 125 deletions

View File

@ -501,6 +501,12 @@ impl From<NonEmptyHistoryTree> for HistoryTree {
}
}
impl From<Option<NonEmptyHistoryTree>> for HistoryTree {
fn from(tree: Option<NonEmptyHistoryTree>) -> Self {
HistoryTree(tree)
}
}
impl Deref for HistoryTree {
type Target = Option<NonEmptyHistoryTree>;
fn deref(&self) -> &Self::Target {

View File

@ -8,16 +8,6 @@
#![doc(html_root_url = "https://docs.rs/zebra_chain")]
// Required by bitvec! macro
#![recursion_limit = "256"]
//
// Rust 1.72 has a false positive when nested generics are used inside Arc.
// This makes the `arc_with_non_send_sync` lint trigger on a lot of proptest code.
//
// TODO: remove this allow when Rust 1.73 is stable, because this lint bug is fixed in that release:
// <https://github.com/rust-lang/rust-clippy/issues/11076>
#![cfg_attr(
any(test, feature = "proptest-impl"),
allow(clippy::arc_with_non_send_sync)
)]
#[macro_use]
extern crate bitflags;

View File

@ -33,16 +33,6 @@
#![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")]
#![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")]
#![doc(html_root_url = "https://docs.rs/zebra_consensus")]
//
// Rust 1.72 has a false positive when nested generics are used inside Arc.
// This makes the `arc_with_non_send_sync` lint trigger on a lot of proptest code.
//
// TODO: remove this allow when Rust 1.73 is stable, because this lint bug is fixed in that release:
// <https://github.com/rust-lang/rust-clippy/issues/11076>
#![cfg_attr(
any(test, feature = "proptest-impl"),
allow(clippy::arc_with_non_send_sync)
)]
mod block;
mod checkpoint;

View File

@ -132,16 +132,6 @@
#![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")]
#![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")]
#![doc(html_root_url = "https://docs.rs/zebra_network")]
//
// Rust 1.72 has a false positive when nested generics are used inside Arc.
// This makes the `arc_with_non_send_sync` lint trigger on a lot of proptest code.
//
// TODO: remove this allow when Rust 1.73 is stable, because this lint bug is fixed in that release:
// <https://github.com/rust-lang/rust-clippy/issues/11076>
#![cfg_attr(
any(test, feature = "proptest-impl"),
allow(clippy::arc_with_non_send_sync)
)]
#[macro_use]
extern crate pin_project;

View File

@ -30,7 +30,7 @@ use itertools::Itertools;
use zebra_chain::block::Height;
use zebra_state::{
SaplingScannedDatabaseEntry, SaplingScannedDatabaseIndex, SaplingScannedResult,
DiskWriteBatch, SaplingScannedDatabaseEntry, SaplingScannedDatabaseIndex, SaplingScannedResult,
SaplingScanningKey, TransactionIndex, TransactionLocation, TypedColumnFamily, WriteTypedBatch,
};
@ -53,7 +53,7 @@ pub type SaplingTxIdsCf<'cf> =
/// This constant should be used so the compiler can detect incorrectly typed accesses to the
/// column family.
pub type WriteSaplingTxIdsBatch<'cf> =
WriteTypedBatch<'cf, SaplingScannedDatabaseIndex, Option<SaplingScannedResult>>;
WriteTypedBatch<'cf, SaplingScannedDatabaseIndex, Option<SaplingScannedResult>, DiskWriteBatch>;
impl Storage {
// Reading Sapling database entries
@ -167,7 +167,7 @@ impl Storage {
) {
// We skip key heights that have one or more results, so the results for each key height
// must be in a single batch.
let mut batch = self.sapling_tx_ids_cf().for_writing();
let mut batch = self.sapling_tx_ids_cf().new_batch_for_writing();
// Every `INSERT_CONTROL_INTERVAL` we add a new entry to the scanner database for each key
// so we can track progress made in the last interval even if no transaction was yet found.
@ -192,7 +192,7 @@ impl Storage {
value: Some(sapling_result),
};
batch = batch.zs_insert(entry.index, entry.value);
batch = batch.zs_insert(&entry.index, &entry.value);
}
batch
@ -228,7 +228,7 @@ impl Storage {
// TODO: ignore incorrect changes to birthday heights,
// and redundant birthday heights
self.sapling_tx_ids_cf()
.for_writing()
.new_batch_for_writing()
.insert_sapling_height(sapling_key, skip_up_to_height)
.write_batch()
.expect("unexpected database write failure");
@ -249,6 +249,6 @@ impl<'cf> InsertSaplingHeight for WriteSaplingTxIdsBatch<'cf> {
let index = SaplingScannedDatabaseIndex::min_for_key_and_height(sapling_key, height);
// TODO: assert that we don't overwrite any entries here.
self.zs_insert(index, None)
self.zs_insert(&index, &None)
}
}

View File

@ -11,23 +11,16 @@
#![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")]
#![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")]
#![doc(html_root_url = "https://docs.rs/zebra_state")]
//
// Rust 1.72 has a false positive when nested generics are used inside Arc.
// This makes the `arc_with_non_send_sync` lint trigger on a lot of proptest code.
//
// TODO: remove this allow when Rust 1.73 is stable, because this lint bug is fixed in that release:
// <https://github.com/rust-lang/rust-clippy/issues/11076>
#![cfg_attr(
any(test, feature = "proptest-impl"),
allow(clippy::arc_with_non_send_sync)
)]
#[macro_use]
extern crate tracing;
// TODO: only export the Config struct and a few other important methods
pub mod config;
// Most constants are exported by default
pub mod constants;
// Allow use in external tests
#[cfg(any(test, feature = "proptest-impl"))]
pub mod arbitrary;
@ -59,12 +52,14 @@ pub use service::{
OutputIndex, OutputLocation, TransactionIndex, TransactionLocation,
};
// Allow use in the scanner
#[cfg(feature = "shielded-scan")]
pub use service::finalized_state::{
SaplingScannedDatabaseEntry, SaplingScannedDatabaseIndex, SaplingScannedResult,
SaplingScanningKey,
};
// Allow use in the scanner and external tests
#[cfg(any(test, feature = "proptest-impl", feature = "shielded-scan"))]
pub use service::{
finalized_state::{
@ -77,6 +72,7 @@ pub use service::{
#[cfg(feature = "getblocktemplate-rpcs")]
pub use response::GetBlockTemplateChainInfo;
// Allow use in external tests
#[cfg(any(test, feature = "proptest-impl"))]
pub use service::{
arbitrary::{populated_state, CHAIN_TIP_UPDATE_WAIT_LIMIT},
@ -88,15 +84,16 @@ pub use service::{
#[cfg(any(test, feature = "proptest-impl"))]
pub use constants::latest_version_for_adding_subtrees;
#[cfg(any(test, feature = "proptest-impl"))]
pub use config::hidden::{
write_database_format_version_to_disk, write_state_database_format_version_to_disk,
};
// Allow use only inside the crate in production
#[cfg(not(any(test, feature = "proptest-impl")))]
#[allow(unused_imports)]
pub(crate) use config::hidden::{
write_database_format_version_to_disk, write_state_database_format_version_to_disk,
};
#[cfg(any(test, feature = "proptest-impl"))]
pub use config::hidden::{
write_database_format_version_to_disk, write_state_database_format_version_to_disk,
};
pub(crate) use request::ContextuallyVerifiedBlock;

View File

@ -52,14 +52,15 @@ where
/// This type is also drop-safe: unwritten batches have to be specifically ignored.
#[must_use = "batches must be written to the database"]
#[derive(Debug, Eq, PartialEq)]
pub struct WriteTypedBatch<'cf, Key, Value>
pub struct WriteTypedBatch<'cf, Key, Value, Batch>
where
Key: IntoDisk + FromDisk + Debug,
Value: IntoDisk + FromDisk,
Batch: WriteDisk,
{
inner: TypedColumnFamily<'cf, Key, Value>,
batch: DiskWriteBatch,
batch: Batch,
}
impl<'cf, Key, Value> Debug for TypedColumnFamily<'cf, Key, Value>
@ -115,17 +116,41 @@ where
})
}
/// Returns a new writeable typed column family for this column family.
// Writing
/// Returns a typed writer for this column family for a new batch.
///
/// This is the only way to get a writeable column family, which ensures
/// These methods are the only way to get a `WriteTypedBatch`, which ensures
/// that the read and write types are consistent.
pub fn for_writing(self) -> WriteTypedBatch<'cf, Key, Value> {
pub fn new_batch_for_writing(self) -> WriteTypedBatch<'cf, Key, Value, DiskWriteBatch> {
WriteTypedBatch {
inner: self,
batch: DiskWriteBatch::new(),
}
}
/// Wraps an existing write batch, and returns a typed writer for this column family.
///
/// These methods are the only way to get a `WriteTypedBatch`, which ensures
/// that the read and write types are consistent.
pub fn take_batch_for_writing(
self,
batch: DiskWriteBatch,
) -> WriteTypedBatch<'cf, Key, Value, DiskWriteBatch> {
WriteTypedBatch { inner: self, batch }
}
/// Wraps an existing write batch reference, and returns a typed writer for this column family.
///
/// These methods are the only way to get a `WriteTypedBatch`, which ensures
/// that the read and write types are consistent.
pub fn with_batch_for_writing(
self,
batch: &mut DiskWriteBatch,
) -> WriteTypedBatch<'cf, Key, Value, &mut DiskWriteBatch> {
WriteTypedBatch { inner: self, batch }
}
// Reading
/// Returns true if this rocksdb column family does not contain any entries.
@ -250,30 +275,24 @@ where
}
}
impl<'cf, Key, Value> WriteTypedBatch<'cf, Key, Value>
impl<'cf, Key, Value, Batch> WriteTypedBatch<'cf, Key, Value, Batch>
where
Key: IntoDisk + FromDisk + Debug,
Value: IntoDisk + FromDisk,
Batch: WriteDisk,
{
// Writing batches
/// Writes this batch to this column family in the database.
pub fn write_batch(self) -> Result<(), rocksdb::Error> {
self.inner.db.write(self.batch)
}
// Batching before writing
/// Serialize and insert the given key and value into this column family,
/// overwriting any existing `value` for `key`.
pub fn zs_insert(mut self, key: Key, value: Value) -> Self {
pub fn zs_insert(mut self, key: &Key, value: &Value) -> Self {
self.batch.zs_insert(&self.inner.cf, key, value);
self
}
/// Remove the given key from this column family, if it exists.
pub fn zs_delete(mut self, key: Key) -> Self {
pub fn zs_delete(mut self, key: &Key) -> Self {
self.batch.zs_delete(&self.inner.cf, key);
self
@ -284,10 +303,25 @@ where
//.
// TODO: convert zs_delete_range() to take std::ops::RangeBounds
// see zs_range_iter() for an example of the edge cases
pub fn zs_delete_range(mut self, from: Key, until_strictly_before: Key) -> Self {
pub fn zs_delete_range(mut self, from: &Key, until_strictly_before: &Key) -> Self {
self.batch
.zs_delete_range(&self.inner.cf, from, until_strictly_before);
self
}
}
// Writing a batch to the database requires an owned batch.
impl<'cf, Key, Value> WriteTypedBatch<'cf, Key, Value, DiskWriteBatch>
where
Key: IntoDisk + FromDisk + Debug,
Value: IntoDisk + FromDisk,
{
// Writing batches
/// Writes this batch to this column family in the database,
/// taking ownership and consuming it.
pub fn write_batch(self) -> Result<(), rocksdb::Error> {
self.inner.db.write(self.batch)
}
}

View File

@ -202,6 +202,37 @@ impl WriteDisk for DiskWriteBatch {
}
}
// Allow &mut DiskWriteBatch as well as owned DiskWriteBatch
impl<T> WriteDisk for &mut T
where
T: WriteDisk,
{
fn zs_insert<C, K, V>(&mut self, cf: &C, key: K, value: V)
where
C: rocksdb::AsColumnFamilyRef,
K: IntoDisk + Debug,
V: IntoDisk,
{
(*self).zs_insert(cf, key, value)
}
fn zs_delete<C, K>(&mut self, cf: &C, key: K)
where
C: rocksdb::AsColumnFamilyRef,
K: IntoDisk + Debug,
{
(*self).zs_delete(cf, key)
}
fn zs_delete_range<C, K>(&mut self, cf: &C, from: K, until_strictly_before: K)
where
C: rocksdb::AsColumnFamilyRef,
K: IntoDisk + Debug,
{
(*self).zs_delete_range(cf, from, until_strictly_before)
}
}
/// Helper trait for retrieving and deserializing values from rocksdb column families.
///
/// # Deprecation

View File

@ -10,12 +10,8 @@ use std::collections::BTreeMap;
use bincode::Options;
use zebra_chain::{
amount::NonNegative,
block::Height,
history_tree::{HistoryTree, NonEmptyHistoryTree},
parameters::Network,
primitives::zcash_history,
value_balance::ValueBalance,
amount::NonNegative, block::Height, history_tree::NonEmptyHistoryTree, parameters::Network,
primitives::zcash_history, value_balance::ValueBalance,
};
use crate::service::finalized_state::disk_format::{FromDisk, IntoDisk};
@ -82,10 +78,3 @@ impl FromDisk for NonEmptyHistoryTree {
.expect("deserialization format should match the serialization format used by IntoDisk")
}
}
// We don't write empty history trees to disk, so we know this one is non-empty.
impl FromDisk for HistoryTree {
fn from_bytes(bytes: impl AsRef<[u8]>) -> Self {
NonEmptyHistoryTree::from_bytes(bytes).into()
}
}

View File

@ -450,6 +450,7 @@ impl DiskWriteBatch {
prev_note_commitment_trees: Option<NoteCommitmentTrees>,
) -> Result<(), BoxError> {
let db = &zebra_db.db;
// Commit block, transaction, and note commitment tree data.
self.prepare_block_header_and_transaction_data_batch(db, finalized)?;
@ -486,7 +487,7 @@ impl DiskWriteBatch {
// Commit UTXOs and value pools
self.prepare_chain_value_pools_batch(
db,
zebra_db,
finalized,
spent_utxos_by_outpoint,
value_pool,

View File

@ -11,24 +11,87 @@
//! [`crate::constants::state_database_format_version_in_code()`] must be incremented
//! each time the database format (column, serialization, etc) changes.
use std::{borrow::Borrow, collections::HashMap, sync::Arc};
use std::{
borrow::Borrow,
collections::{BTreeMap, HashMap},
sync::Arc,
};
use zebra_chain::{
amount::NonNegative, block::Height, history_tree::HistoryTree, transparent,
amount::NonNegative,
block::Height,
history_tree::{HistoryTree, NonEmptyHistoryTree},
transparent,
value_balance::ValueBalance,
};
use crate::{
request::FinalizedBlock,
service::finalized_state::{
disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk},
disk_format::RawBytes,
zebra_db::ZebraDb,
disk_db::DiskWriteBatch, disk_format::RawBytes, zebra_db::ZebraDb, TypedColumnFamily,
},
BoxError,
};
/// The name of the History Tree column family.
///
/// This constant should be used so the compiler can detect typos.
pub const HISTORY_TREE: &str = "history_tree";
/// The type for reading history trees from the database.
///
/// This constant should be used so the compiler can detect incorrectly typed accesses to the
/// column family.
pub type HistoryTreeCf<'cf> = TypedColumnFamily<'cf, (), NonEmptyHistoryTree>;
/// The legacy (1.3.0 and earlier) type for reading history trees from the database.
/// This type should not be used in new code.
pub type LegacyHistoryTreeCf<'cf> = TypedColumnFamily<'cf, Height, NonEmptyHistoryTree>;
/// A generic raw key type for reading history trees from the database, regardless of the database version.
/// This type should not be used in new code.
pub type RawHistoryTreeCf<'cf> = TypedColumnFamily<'cf, RawBytes, NonEmptyHistoryTree>;
/// The name of the chain value pools column family.
///
/// This constant should be used so the compiler can detect typos.
pub const CHAIN_VALUE_POOLS: &str = "tip_chain_value_pool";
/// The type for reading value pools from the database.
///
/// This constant should be used so the compiler can detect incorrectly typed accesses to the
/// column family.
pub type ChainValuePoolsCf<'cf> = TypedColumnFamily<'cf, (), ValueBalance<NonNegative>>;
impl ZebraDb {
// Column family convenience methods
/// Returns a typed handle to the `history_tree` column family.
pub(crate) fn history_tree_cf(&self) -> HistoryTreeCf {
HistoryTreeCf::new(&self.db, HISTORY_TREE)
.expect("column family was created when database was created")
}
/// Returns a legacy typed handle to the `history_tree` column family.
/// This should not be used in new code.
pub(crate) fn legacy_history_tree_cf(&self) -> LegacyHistoryTreeCf {
LegacyHistoryTreeCf::new(&self.db, HISTORY_TREE)
.expect("column family was created when database was created")
}
/// Returns a generic raw key typed handle to the `history_tree` column family.
/// This should not be used in new code.
pub(crate) fn raw_history_tree_cf(&self) -> RawHistoryTreeCf {
RawHistoryTreeCf::new(&self.db, HISTORY_TREE)
.expect("column family was created when database was created")
}
/// Returns a typed handle to the chain value pools column family.
pub(crate) fn chain_value_pools_cf(&self) -> ChainValuePoolsCf {
ChainValuePoolsCf::new(&self.db, CHAIN_VALUE_POOLS)
.expect("column family was created when database was created")
}
// History tree methods
/// Returns the ZIP-221 history tree of the finalized tip.
@ -36,11 +99,7 @@ impl ZebraDb {
/// If history trees have not been activated yet (pre-Heartwood), or the state is empty,
/// returns an empty history tree.
pub fn history_tree(&self) -> Arc<HistoryTree> {
if self.is_empty() {
return Arc::<HistoryTree>::default();
}
let history_tree_cf = self.db.cf_handle("history_tree").unwrap();
let history_tree_cf = self.history_tree_cf();
// # Backwards Compatibility
//
@ -56,38 +115,42 @@ impl ZebraDb {
//
// So we use the empty key `()`. Since the key has a constant value, we will always read
// the latest tree.
let mut history_tree: Option<Arc<HistoryTree>> = self.db.zs_get(&history_tree_cf, &());
let mut history_tree = history_tree_cf.zs_get(&());
if history_tree.is_none() {
let legacy_history_tree_cf = self.legacy_history_tree_cf();
// In Zebra 1.4.0 and later, we only update the history tip tree when it has changed (for every block after heartwood).
// But we write with a `()` key, not a height key.
// So we need to look for the most recent update height if the `()` key has never been written.
history_tree = self
.db
.zs_last_key_value(&history_tree_cf)
.map(|(_key, tree_value): (Height, _)| tree_value);
history_tree = legacy_history_tree_cf
.zs_last_key_value()
.map(|(_height_key, tree_value)| tree_value);
}
history_tree.unwrap_or_default()
Arc::new(HistoryTree::from(history_tree))
}
/// Returns all the history tip trees.
/// We only store the history tree for the tip, so this method is mainly used in tests.
pub fn history_trees_full_tip(
&self,
) -> impl Iterator<Item = (RawBytes, Arc<HistoryTree>)> + '_ {
let history_tree_cf = self.db.cf_handle("history_tree").unwrap();
/// We only store the history tree for the tip, so this method is only used in tests and
/// upgrades.
pub(crate) fn history_trees_full_tip(&self) -> BTreeMap<RawBytes, Arc<HistoryTree>> {
let raw_history_tree_cf = self.raw_history_tree_cf();
self.db.zs_forward_range_iter(&history_tree_cf, ..)
raw_history_tree_cf
.zs_forward_range_iter(..)
.map(|(raw_key, history_tree)| (raw_key, Arc::new(HistoryTree::from(history_tree))))
.collect()
}
// Value pool methods
/// Returns the stored `ValueBalance` for the best chain at the finalized tip height.
pub fn finalized_value_pool(&self) -> ValueBalance<NonNegative> {
let value_pool_cf = self.db.cf_handle("tip_chain_value_pool").unwrap();
self.db
.zs_get(&value_pool_cf, &())
let chain_value_pools_cf = self.chain_value_pools_cf();
chain_value_pools_cf
.zs_get(&())
.unwrap_or_else(ValueBalance::zero)
}
}
@ -96,11 +159,14 @@ impl DiskWriteBatch {
// History tree methods
/// Updates the history tree for the tip, if it is not empty.
pub fn update_history_tree(&mut self, zebra_db: &ZebraDb, tree: &HistoryTree) {
let history_tree_cf = zebra_db.db.cf_handle("history_tree").unwrap();
///
/// The batch must be written to the database by the caller.
pub fn update_history_tree(&mut self, db: &ZebraDb, tree: &HistoryTree) {
let history_tree_cf = db.history_tree_cf().with_batch_for_writing(self);
if let Some(tree) = tree.as_ref().as_ref() {
self.zs_insert(&history_tree_cf, (), tree);
// The batch is modified by this method and written by the caller.
let _ = history_tree_cf.zs_insert(&(), tree);
}
}
@ -109,11 +175,20 @@ impl DiskWriteBatch {
///
/// From state format 25.3.0 onwards, the history trees are indexed by an empty key,
/// so this method does nothing.
pub fn delete_range_history_tree(&mut self, zebra_db: &ZebraDb, from: &Height, to: &Height) {
let history_tree_cf = zebra_db.db.cf_handle("history_tree").unwrap();
///
/// The batch must be written to the database by the caller.
pub fn delete_range_history_tree(
&mut self,
db: &ZebraDb,
from: &Height,
until_strictly_before: &Height,
) {
let history_tree_cf = db.legacy_history_tree_cf().with_batch_for_writing(self);
// The batch is modified by this method and written by the caller.
//
// TODO: convert zs_delete_range() to take std::ops::RangeBounds
self.zs_delete_range(&history_tree_cf, from, to);
let _ = history_tree_cf.zs_delete_range(from, until_strictly_before);
}
// Value pool methods
@ -130,17 +205,19 @@ impl DiskWriteBatch {
#[allow(clippy::unwrap_in_result)]
pub fn prepare_chain_value_pools_batch(
&mut self,
db: &DiskDb,
db: &ZebraDb,
finalized: &FinalizedBlock,
utxos_spent_by_block: HashMap<transparent::OutPoint, transparent::Utxo>,
value_pool: ValueBalance<NonNegative>,
) -> Result<(), BoxError> {
let tip_chain_value_pool = db.cf_handle("tip_chain_value_pool").unwrap();
let chain_value_pools_cf = db.chain_value_pools_cf().with_batch_for_writing(self);
let FinalizedBlock { block, .. } = finalized;
let new_pool = value_pool.add_block(block.borrow(), &utxos_spent_by_block)?;
self.zs_insert(&tip_chain_value_pool, (), new_pool);
// The batch is modified by this method and written by the caller.
let _ = chain_value_pools_cf.zs_insert(&(), &new_pool);
Ok(())
}

View File

@ -109,16 +109,6 @@
// Tracing causes false positives on this lint:
// https://github.com/tokio-rs/tracing/issues/553
#![allow(clippy::cognitive_complexity)]
//
// Rust 1.72 has a false positive when nested generics are used inside Arc.
// This makes the `arc_with_non_send_sync` lint trigger on a lot of proptest code.
//
// TODO: remove this allow when Rust 1.73 is stable, because this lint bug is fixed in that release:
// <https://github.com/rust-lang/rust-clippy/issues/11076>
#![cfg_attr(
any(test, feature = "proptest-impl"),
allow(clippy::arc_with_non_send_sync)
)]
#[macro_use]
extern crate tracing;