Merge pull request #652 from zcash/scanner-memuse

`zcash_client_backend`: Refactor tracking of `BatchRunner` heap memory usage
This commit is contained in:
str4d 2022-09-26 17:32:49 +01:00 committed by GitHub
commit 2381a515dd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 269 additions and 78 deletions

View File

@ -22,4 +22,4 @@ codegen-units = 1
zcash_encoding = { path = "components/zcash_encoding" } zcash_encoding = { path = "components/zcash_encoding" }
zcash_note_encryption = { path = "components/zcash_note_encryption" } zcash_note_encryption = { path = "components/zcash_note_encryption" }
orchard = { git = "https://github.com/zcash/orchard.git", rev = "33f1c1141e50adb68715f3359bd75378b4756cca" } orchard = { git = "https://github.com/zcash/orchard.git", rev = "33f1c1141e50adb68715f3359bd75378b4756cca" }
group = { git = "https://github.com/zkcrypto/group.git", rev = "a7f3ceb2373e9fe536996f7b4d55c797f3e667f0" } group = { git = "https://github.com/zkcrypto/group.git", rev = "f61e3e420ed1220c8f1f80988f8c6c5e202d8715" }

View File

@ -232,7 +232,7 @@ where
// Get the nullifiers for the notes we are tracking // Get the nullifiers for the notes we are tracking
let mut nullifiers = data.get_nullifiers()?; let mut nullifiers = data.get_nullifiers()?;
let mut batch_runner = BatchRunner::new( let mut batch_runner = BatchRunner::<_, _, _, ()>::new(
100, 100,
dfvks dfvks
.iter() .iter()

View File

@ -97,8 +97,118 @@ impl<A, D: Domain> DynamicUsage for BatchReceiver<A, D> {
} }
} }
/// A tracker for the batch scanning tasks that are currently running.
///
/// This enables a [`BatchRunner`] to be optionally configured to track heap memory usage.
pub(crate) trait Tasks<Item> {
type Task: Task;
fn new() -> Self;
fn add_task(&self, item: Item) -> Self::Task;
fn run_task(&self, item: Item) {
let task = self.add_task(item);
rayon::spawn_fifo(|| task.run());
}
}
/// A batch scanning task.
pub(crate) trait Task: Send + 'static {
fn run(self);
}
impl<Item: Task> Tasks<Item> for () {
type Task = Item;
fn new() -> Self {}
fn add_task(&self, item: Item) -> Self::Task {
// Return the item itself as the task; we aren't tracking anything about it, so
// there is no need to wrap it in a newtype.
item
}
}
/// A task tracker that measures heap usage.
///
/// This struct implements `DynamicUsage` without any item bounds, but that works because
/// it only implements `Tasks` for items that implement `DynamicUsage`.
pub(crate) struct WithUsage {
// The current heap usage for all running tasks.
running_usage: Arc<AtomicUsize>,
}
impl DynamicUsage for WithUsage {
fn dynamic_usage(&self) -> usize {
self.running_usage.load(Ordering::Relaxed)
}
fn dynamic_usage_bounds(&self) -> (usize, Option<usize>) {
// Tasks are relatively short-lived, so we accept the inaccuracy of treating the
// tasks's approximate usage as its bounds.
let usage = self.dynamic_usage();
(usage, Some(usage))
}
}
impl<Item: Task + DynamicUsage> Tasks<Item> for WithUsage {
type Task = WithUsageTask<Item>;
fn new() -> Self {
Self {
running_usage: Arc::new(AtomicUsize::new(0)),
}
}
fn add_task(&self, item: Item) -> Self::Task {
// Create the task that will move onto the heap with the batch item.
let mut task = WithUsageTask {
item,
own_usage: 0,
running_usage: self.running_usage.clone(),
};
// `rayon::spawn_fifo` creates a `HeapJob` holding a closure. The size of a
// closure is (to good approximation) the size of the captured environment, which
// in this case is two moved variables:
// - An `Arc<Registry>`, which is a pointer to data that is amortized over the
// entire `rayon` thread pool, so we only count the pointer size here.
// - The spawned closure, which in our case moves `task` into it.
task.own_usage =
mem::size_of::<Arc<()>>() + mem::size_of_val(&task) + task.item.dynamic_usage();
// Approximate now as when the heap cost of this running batch begins. In practice
// this is fine, because `Self::add_task` is called from `Self::run_task` which
// immediately moves the task to the heap.
self.running_usage
.fetch_add(task.own_usage, Ordering::SeqCst);
task
}
}
/// A task that will clean up its own heap usage from the overall running usage once it is
/// complete.
pub(crate) struct WithUsageTask<Item> {
/// The item being run.
item: Item,
/// Size of this task on the heap. We assume that the size of the task does not change
/// once it has been created, to avoid needing to maintain bidirectional channels
/// between [`WithUsage`] and its tasks.
own_usage: usize,
/// Pointer to the parent [`WithUsage`]'s heap usage tracker for running tasks.
running_usage: Arc<AtomicUsize>,
}
impl<Item: Task> Task for WithUsageTask<Item> {
fn run(self) {
// Run the item.
self.item.run();
// Signal that the heap memory for this task has been freed.
self.running_usage
.fetch_sub(self.own_usage, Ordering::SeqCst);
}
}
/// A batch of outputs to trial decrypt. /// A batch of outputs to trial decrypt.
struct Batch<A, D: BatchDomain, Output: ShieldedOutput<D, COMPACT_NOTE_SIZE>> { pub(crate) struct Batch<A, D: BatchDomain, Output: ShieldedOutput<D, COMPACT_NOTE_SIZE>> {
tags: Vec<A>, tags: Vec<A>,
ivks: Vec<D::IncomingViewingKey>, ivks: Vec<D::IncomingViewingKey>,
/// We currently store outputs and repliers as parallel vectors, because /// We currently store outputs and repliers as parallel vectors, because
@ -110,33 +220,35 @@ struct Batch<A, D: BatchDomain, Output: ShieldedOutput<D, COMPACT_NOTE_SIZE>> {
/// (that is captured in the outer `OutputIndex` of each `OutputReplier`). /// (that is captured in the outer `OutputIndex` of each `OutputReplier`).
outputs: Vec<(D, Output)>, outputs: Vec<(D, Output)>,
repliers: Vec<OutputReplier<A, D>>, repliers: Vec<OutputReplier<A, D>>,
// Pointer to the parent `BatchRunner`'s heap usage tracker for running batches.
running_usage: Arc<AtomicUsize>,
}
fn base_vec_usage<T>(c: &Vec<T>) -> usize {
c.capacity() * mem::size_of::<T>()
} }
impl<A, D, Output> DynamicUsage for Batch<A, D, Output> impl<A, D, Output> DynamicUsage for Batch<A, D, Output>
where where
D: BatchDomain, A: DynamicUsage,
Output: ShieldedOutput<D, COMPACT_NOTE_SIZE>, D: BatchDomain + DynamicUsage,
D::IncomingViewingKey: DynamicUsage,
Output: ShieldedOutput<D, COMPACT_NOTE_SIZE> + DynamicUsage,
{ {
fn dynamic_usage(&self) -> usize { fn dynamic_usage(&self) -> usize {
// We don't have a `DynamicUsage` bound on `D::IncomingViewingKey`, `D`, or self.tags.dynamic_usage()
// `Output`, and we can't use newtypes because the batch decryption API takes + self.ivks.dynamic_usage()
// slices. But we know that we don't allocate memory inside either of these, so we + self.outputs.dynamic_usage()
// just compute the size directly. + self.repliers.dynamic_usage()
base_vec_usage(&self.ivks) + base_vec_usage(&self.outputs) + self.repliers.dynamic_usage()
} }
fn dynamic_usage_bounds(&self) -> (usize, Option<usize>) { fn dynamic_usage_bounds(&self) -> (usize, Option<usize>) {
let base_usage = base_vec_usage(&self.ivks) + base_vec_usage(&self.outputs); let (tags_lower, tags_upper) = self.tags.dynamic_usage_bounds();
let bounds = self.repliers.dynamic_usage_bounds(); let (ivks_lower, ivks_upper) = self.ivks.dynamic_usage_bounds();
let (outputs_lower, outputs_upper) = self.outputs.dynamic_usage_bounds();
let (repliers_lower, repliers_upper) = self.repliers.dynamic_usage_bounds();
( (
base_usage + bounds.0, tags_lower + ivks_lower + outputs_lower + repliers_lower,
bounds.1.map(|upper| base_usage + upper), tags_upper
.zip(ivks_upper)
.zip(outputs_upper)
.zip(repliers_upper)
.map(|(((a, b), c), d)| a + b + c + d),
) )
} }
} }
@ -148,18 +260,13 @@ where
Output: ShieldedOutput<D, COMPACT_NOTE_SIZE>, Output: ShieldedOutput<D, COMPACT_NOTE_SIZE>,
{ {
/// Constructs a new batch. /// Constructs a new batch.
fn new( fn new(tags: Vec<A>, ivks: Vec<D::IncomingViewingKey>) -> Self {
tags: Vec<A>,
ivks: Vec<D::IncomingViewingKey>,
running_usage: Arc<AtomicUsize>,
) -> Self {
assert_eq!(tags.len(), ivks.len()); assert_eq!(tags.len(), ivks.len());
Self { Self {
tags, tags,
ivks, ivks,
outputs: vec![], outputs: vec![],
repliers: vec![], repliers: vec![],
running_usage,
} }
} }
@ -167,22 +274,26 @@ where
fn is_empty(&self) -> bool { fn is_empty(&self) -> bool {
self.outputs.is_empty() self.outputs.is_empty()
} }
}
impl<A, D, Output> Task for Batch<A, D, Output>
where
A: Clone + Send + 'static,
D: BatchDomain + Send + 'static,
D::IncomingViewingKey: Send,
D::Memo: Send,
D::Note: Send,
D::Recipient: Send,
Output: ShieldedOutput<D, COMPACT_NOTE_SIZE> + Send + 'static,
{
/// Runs the batch of trial decryptions, and reports the results. /// Runs the batch of trial decryptions, and reports the results.
fn run(self) { fn run(self) {
// Approximate now as when the heap cost of this running batch begins. We use the
// size of `self` as a lower bound on the actual heap memory allocated by the
// rayon threadpool to store this `Batch`.
let own_usage = std::mem::size_of_val(&self) + self.dynamic_usage();
self.running_usage.fetch_add(own_usage, Ordering::SeqCst);
// Deconstruct self so we can consume the pieces individually. // Deconstruct self so we can consume the pieces individually.
let Self { let Self {
tags, tags,
ivks, ivks,
outputs, outputs,
repliers, repliers,
running_usage,
} = self; } = self;
assert_eq!(outputs.len(), repliers.len()); assert_eq!(outputs.len(), repliers.len());
@ -209,9 +320,6 @@ where
} }
} }
} }
// Signal that the heap memory for this batch is about to be freed.
running_usage.fetch_sub(own_usage, Ordering::SeqCst);
} }
} }
@ -253,29 +361,37 @@ impl DynamicUsage for ResultKey {
} }
/// Logic to run batches of trial decryptions on the global threadpool. /// Logic to run batches of trial decryptions on the global threadpool.
pub(crate) struct BatchRunner<A, D: BatchDomain, Output: ShieldedOutput<D, COMPACT_NOTE_SIZE>> { pub(crate) struct BatchRunner<A, D, Output, T>
where
D: BatchDomain,
Output: ShieldedOutput<D, COMPACT_NOTE_SIZE>,
T: Tasks<Batch<A, D, Output>>,
{
batch_size_threshold: usize, batch_size_threshold: usize,
// The batch currently being accumulated. // The batch currently being accumulated.
acc: Batch<A, D, Output>, acc: Batch<A, D, Output>,
// The dynamic memory usage of the running batches. // The running batches.
running_usage: Arc<AtomicUsize>, running_tasks: T,
// Receivers for the results of the running batches. // Receivers for the results of the running batches.
pending_results: HashMap<ResultKey, BatchReceiver<A, D>>, pending_results: HashMap<ResultKey, BatchReceiver<A, D>>,
} }
impl<A, D, Output> DynamicUsage for BatchRunner<A, D, Output> impl<A, D, Output, T> DynamicUsage for BatchRunner<A, D, Output, T>
where where
D: BatchDomain, A: DynamicUsage,
Output: ShieldedOutput<D, COMPACT_NOTE_SIZE>, D: BatchDomain + DynamicUsage,
D::IncomingViewingKey: DynamicUsage,
Output: ShieldedOutput<D, COMPACT_NOTE_SIZE> + DynamicUsage,
T: Tasks<Batch<A, D, Output>> + DynamicUsage,
{ {
fn dynamic_usage(&self) -> usize { fn dynamic_usage(&self) -> usize {
self.acc.dynamic_usage() self.acc.dynamic_usage()
+ self.running_usage.load(Ordering::Relaxed) + self.running_tasks.dynamic_usage()
+ self.pending_results.dynamic_usage() + self.pending_results.dynamic_usage()
} }
fn dynamic_usage_bounds(&self) -> (usize, Option<usize>) { fn dynamic_usage_bounds(&self) -> (usize, Option<usize>) {
let running_usage = self.running_usage.load(Ordering::Relaxed); let running_usage = self.running_tasks.dynamic_usage();
let bounds = ( let bounds = (
self.acc.dynamic_usage_bounds(), self.acc.dynamic_usage_bounds(),
@ -292,11 +408,12 @@ where
} }
} }
impl<A, D, Output> BatchRunner<A, D, Output> impl<A, D, Output, T> BatchRunner<A, D, Output, T>
where where
A: Clone, A: Clone,
D: BatchDomain, D: BatchDomain,
Output: ShieldedOutput<D, COMPACT_NOTE_SIZE>, Output: ShieldedOutput<D, COMPACT_NOTE_SIZE>,
T: Tasks<Batch<A, D, Output>>,
{ {
/// Constructs a new batch runner for the given incoming viewing keys. /// Constructs a new batch runner for the given incoming viewing keys.
pub(crate) fn new( pub(crate) fn new(
@ -304,17 +421,16 @@ where
ivks: impl Iterator<Item = (A, D::IncomingViewingKey)>, ivks: impl Iterator<Item = (A, D::IncomingViewingKey)>,
) -> Self { ) -> Self {
let (tags, ivks) = ivks.unzip(); let (tags, ivks) = ivks.unzip();
let running_usage = Arc::new(AtomicUsize::new(0));
Self { Self {
batch_size_threshold, batch_size_threshold,
acc: Batch::new(tags, ivks, running_usage.clone()), acc: Batch::new(tags, ivks),
running_usage, running_tasks: T::new(),
pending_results: HashMap::default(), pending_results: HashMap::default(),
} }
} }
} }
impl<A, D, Output> BatchRunner<A, D, Output> impl<A, D, Output, T> BatchRunner<A, D, Output, T>
where where
A: Clone + Send + 'static, A: Clone + Send + 'static,
D: BatchDomain + Send + 'static, D: BatchDomain + Send + 'static,
@ -323,6 +439,7 @@ where
D::Note: Send, D::Note: Send,
D::Recipient: Send, D::Recipient: Send,
Output: ShieldedOutput<D, COMPACT_NOTE_SIZE> + Clone + Send + 'static, Output: ShieldedOutput<D, COMPACT_NOTE_SIZE> + Clone + Send + 'static,
T: Tasks<Batch<A, D, Output>>,
{ {
/// Batches the given outputs for trial decryption. /// Batches the given outputs for trial decryption.
/// ///
@ -355,13 +472,9 @@ where
/// Subsequent calls to `Self::add_outputs` will be accumulated into a new batch. /// Subsequent calls to `Self::add_outputs` will be accumulated into a new batch.
pub(crate) fn flush(&mut self) { pub(crate) fn flush(&mut self) {
if !self.acc.is_empty() { if !self.acc.is_empty() {
let mut batch = Batch::new( let mut batch = Batch::new(self.acc.tags.clone(), self.acc.ivks.clone());
self.acc.tags.clone(),
self.acc.ivks.clone(),
self.running_usage.clone(),
);
mem::swap(&mut batch, &mut self.acc); mem::swap(&mut batch, &mut self.acc);
rayon::spawn_fifo(|| batch.run()); self.running_tasks.run_task(batch);
} }
} }

View File

@ -20,7 +20,7 @@ use zcash_primitives::{
use crate::{ use crate::{
proto::compact_formats::CompactBlock, proto::compact_formats::CompactBlock,
scan::BatchRunner, scan::{Batch, BatchRunner, Tasks},
wallet::{WalletShieldedOutput, WalletShieldedSpend, WalletTx}, wallet::{WalletShieldedOutput, WalletShieldedSpend, WalletTx},
}; };
@ -166,7 +166,7 @@ pub fn scan_block<P: consensus::Parameters + Send + 'static, K: ScanningKey>(
tree: &mut CommitmentTree<Node>, tree: &mut CommitmentTree<Node>,
existing_witnesses: &mut [&mut IncrementalWitness<Node>], existing_witnesses: &mut [&mut IncrementalWitness<Node>],
) -> Vec<WalletTx<K::Nf>> { ) -> Vec<WalletTx<K::Nf>> {
scan_block_with_runner( scan_block_with_runner::<_, _, ()>(
params, params,
block, block,
vks, vks,
@ -177,16 +177,18 @@ pub fn scan_block<P: consensus::Parameters + Send + 'static, K: ScanningKey>(
) )
} }
type TaggedBatchRunner<P, S> = type TaggedBatch<P, S> = Batch<(AccountId, S), SaplingDomain<P>, CompactOutputDescription>;
BatchRunner<(AccountId, S), SaplingDomain<P>, CompactOutputDescription>; type TaggedBatchRunner<P, S, T> =
BatchRunner<(AccountId, S), SaplingDomain<P>, CompactOutputDescription, T>;
pub(crate) fn add_block_to_runner<P, S>( pub(crate) fn add_block_to_runner<P, S, T>(
params: &P, params: &P,
block: CompactBlock, block: CompactBlock,
batch_runner: &mut TaggedBatchRunner<P, S>, batch_runner: &mut TaggedBatchRunner<P, S, T>,
) where ) where
P: consensus::Parameters + Send + 'static, P: consensus::Parameters + Send + 'static,
S: Clone + Send + 'static, S: Clone + Send + 'static,
T: Tasks<TaggedBatch<P, S>>,
{ {
let block_hash = block.hash(); let block_hash = block.hash();
let block_height = block.height(); let block_height = block.height();
@ -211,14 +213,18 @@ pub(crate) fn add_block_to_runner<P, S>(
} }
} }
pub(crate) fn scan_block_with_runner<P: consensus::Parameters + Send + 'static, K: ScanningKey>( pub(crate) fn scan_block_with_runner<
P: consensus::Parameters + Send + 'static,
K: ScanningKey,
T: Tasks<TaggedBatch<P, K::Scope>> + Sync,
>(
params: &P, params: &P,
block: CompactBlock, block: CompactBlock,
vks: &[(&AccountId, &K)], vks: &[(&AccountId, &K)],
nullifiers: &[(AccountId, Nullifier)], nullifiers: &[(AccountId, Nullifier)],
tree: &mut CommitmentTree<Node>, tree: &mut CommitmentTree<Node>,
existing_witnesses: &mut [&mut IncrementalWitness<Node>], existing_witnesses: &mut [&mut IncrementalWitness<Node>],
mut batch_runner: Option<&mut TaggedBatchRunner<P, K::Scope>>, mut batch_runner: Option<&mut TaggedBatchRunner<P, K::Scope, T>>,
) -> Vec<WalletTx<K::Nf>> { ) -> Vec<WalletTx<K::Nf>> {
let mut wtxs: Vec<WalletTx<K::Nf>> = vec![]; let mut wtxs: Vec<WalletTx<K::Nf>> = vec![];
let block_height = block.height(); let block_height = block.height();
@ -554,7 +560,7 @@ mod tests {
let mut tree = CommitmentTree::empty(); let mut tree = CommitmentTree::empty();
let mut batch_runner = if scan_multithreaded { let mut batch_runner = if scan_multithreaded {
let mut runner = BatchRunner::new( let mut runner = BatchRunner::<_, _, _, ()>::new(
10, 10,
extfvk extfvk
.to_sapling_keys() .to_sapling_keys()
@ -618,7 +624,7 @@ mod tests {
let mut tree = CommitmentTree::empty(); let mut tree = CommitmentTree::empty();
let mut batch_runner = if scan_multithreaded { let mut batch_runner = if scan_multithreaded {
let mut runner = BatchRunner::new( let mut runner = BatchRunner::<_, _, _, ()>::new(
10, 10,
extfvk extfvk
.to_sapling_keys() .to_sapling_keys()

View File

@ -36,6 +36,20 @@ and this library adheres to Rust's notion of
- `JSDescription::net_value` - `JSDescription::net_value`
- Added in `zcash_primitives::transaction::components::transparent` - Added in `zcash_primitives::transaction::components::transparent`
- `Bundle::value_balance` - `Bundle::value_balance`
- Implementations of `memuse::DynamicUsage` for the following types:
- `zcash_primitives::block::BlockHash`
- `zcash_primitives::consensus`:
- `BlockHeight`
- `MainNetwork`, `TestNetwork`, `Network`
- `NetworkUpgrade`, `BranchId`
- `zcash_primitives::sapling`:
- `keys::Scope`
- `note_encryption::SaplingDomain`
- `zcash_primitives::transaction`:
- `TxId`
- `components::sapling::CompactOutputDescription`
- `components::sapling::{OutputDescription, OutputDescriptionV5}`
- `zcash_primitives::zip32::AccountId`
### Changed ### Changed
- Migrated to `group 0.13`. - Migrated to `group 0.13`.

View File

@ -30,13 +30,13 @@ chacha20poly1305 = "0.10"
equihash = { version = "0.2", path = "../components/equihash" } equihash = { version = "0.2", path = "../components/equihash" }
ff = "0.12" ff = "0.12"
fpe = "0.5" fpe = "0.5"
group = "0.12" group = { version = "0.12", features = ["wnaf-memuse"] }
hdwallet = { version = "0.3.1", optional = true } hdwallet = { version = "0.3.1", optional = true }
hex = "0.4" hex = "0.4"
incrementalmerkletree = "0.3" incrementalmerkletree = "0.3"
jubjub = "0.9" jubjub = "0.9"
lazy_static = "1" lazy_static = "1"
memuse = "0.2" memuse = "0.2.1"
nonempty = "0.7" nonempty = "0.7"
orchard = "0.2" orchard = "0.2"
proptest = { version = "1.0.0", optional = true } proptest = { version = "1.0.0", optional = true }

View File

@ -1,6 +1,7 @@
//! Structs and methods for handling Zcash block headers. //! Structs and methods for handling Zcash block headers.
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use memuse::DynamicUsage;
use sha2::{Digest, Sha256}; use sha2::{Digest, Sha256};
use std::fmt; use std::fmt;
use std::io::{self, Read, Write}; use std::io::{self, Read, Write};
@ -12,6 +13,8 @@ pub use equihash;
#[derive(Clone, Copy, PartialEq, Eq, Hash)] #[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct BlockHash(pub [u8; 32]); pub struct BlockHash(pub [u8; 32]);
memuse::impl_no_dynamic_usage!(BlockHash);
impl fmt::Debug for BlockHash { impl fmt::Debug for BlockHash {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// The (byte-flipped) hex string is more useful than the raw bytes, because we can // The (byte-flipped) hex string is more useful than the raw bytes, because we can

View File

@ -1,5 +1,6 @@
//! Consensus logic and parameters. //! Consensus logic and parameters.
use memuse::DynamicUsage;
use std::cmp::{Ord, Ordering}; use std::cmp::{Ord, Ordering};
use std::convert::TryFrom; use std::convert::TryFrom;
use std::fmt; use std::fmt;
@ -14,6 +15,8 @@ use crate::constants;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct BlockHeight(u32); pub struct BlockHeight(u32);
memuse::impl_no_dynamic_usage!(BlockHeight);
pub const H0: BlockHeight = BlockHeight(0); pub const H0: BlockHeight = BlockHeight(0);
impl BlockHeight { impl BlockHeight {
@ -190,6 +193,8 @@ pub trait Parameters: Clone {
#[derive(PartialEq, Eq, Copy, Clone, Debug)] #[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub struct MainNetwork; pub struct MainNetwork;
memuse::impl_no_dynamic_usage!(MainNetwork);
pub const MAIN_NETWORK: MainNetwork = MainNetwork; pub const MAIN_NETWORK: MainNetwork = MainNetwork;
impl Parameters for MainNetwork { impl Parameters for MainNetwork {
@ -239,6 +244,8 @@ impl Parameters for MainNetwork {
#[derive(PartialEq, Eq, Copy, Clone, Debug)] #[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub struct TestNetwork; pub struct TestNetwork;
memuse::impl_no_dynamic_usage!(TestNetwork);
pub const TEST_NETWORK: TestNetwork = TestNetwork; pub const TEST_NETWORK: TestNetwork = TestNetwork;
impl Parameters for TestNetwork { impl Parameters for TestNetwork {
@ -290,6 +297,8 @@ pub enum Network {
TestNetwork, TestNetwork,
} }
memuse::impl_no_dynamic_usage!(Network);
impl Parameters for Network { impl Parameters for Network {
fn activation_height(&self, nu: NetworkUpgrade) -> Option<BlockHeight> { fn activation_height(&self, nu: NetworkUpgrade) -> Option<BlockHeight> {
match self { match self {
@ -387,6 +396,8 @@ pub enum NetworkUpgrade {
ZFuture, ZFuture,
} }
memuse::impl_no_dynamic_usage!(NetworkUpgrade);
impl fmt::Display for NetworkUpgrade { impl fmt::Display for NetworkUpgrade {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self { match self {
@ -467,6 +478,8 @@ pub enum BranchId {
ZFuture, ZFuture,
} }
memuse::impl_no_dynamic_usage!(BranchId);
impl TryFrom<u32> for BranchId { impl TryFrom<u32> for BranchId {
type Error = &'static str; type Error = &'static str;

View File

@ -13,6 +13,7 @@ use crate::{
}; };
use ff::PrimeField; use ff::PrimeField;
use group::{Group, GroupEncoding}; use group::{Group, GroupEncoding};
use memuse::DynamicUsage;
use subtle::CtOption; use subtle::CtOption;
use super::{NullifierDerivingKey, PaymentAddress, ProofGenerationKey, SaplingIvk, ViewingKey}; use super::{NullifierDerivingKey, PaymentAddress, ProofGenerationKey, SaplingIvk, ViewingKey};
@ -201,6 +202,8 @@ pub enum Scope {
Internal, Internal,
} }
memuse::impl_no_dynamic_usage!(Scope);
/// A Sapling key that provides the capability to view incoming and outgoing transactions. /// A Sapling key that provides the capability to view incoming and outgoing transactions.
/// ///
/// This key is useful anywhere you need to maintain accurate balance, but do not want the /// This key is useful anywhere you need to maintain accurate balance, but do not want the

View File

@ -7,6 +7,7 @@ use byteorder::{LittleEndian, WriteBytesExt};
use ff::PrimeField; use ff::PrimeField;
use group::{cofactor::CofactorGroup, GroupEncoding, WnafBase, WnafScalar}; use group::{cofactor::CofactorGroup, GroupEncoding, WnafBase, WnafScalar};
use jubjub::{AffinePoint, ExtendedPoint}; use jubjub::{AffinePoint, ExtendedPoint};
use memuse::DynamicUsage;
use rand_core::RngCore; use rand_core::RngCore;
use zcash_note_encryption::{ use zcash_note_encryption::{
@ -39,6 +40,16 @@ type PreparedScalar = WnafScalar<jubjub::Scalar, PREPARED_WINDOW_SIZE>;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct PreparedIncomingViewingKey(PreparedScalar); pub struct PreparedIncomingViewingKey(PreparedScalar);
impl DynamicUsage for PreparedIncomingViewingKey {
fn dynamic_usage(&self) -> usize {
self.0.dynamic_usage()
}
fn dynamic_usage_bounds(&self) -> (usize, Option<usize>) {
self.0.dynamic_usage_bounds()
}
}
impl PreparedIncomingViewingKey { impl PreparedIncomingViewingKey {
/// Performs the necessary precomputations to use a `SaplingIvk` for note decryption. /// Performs the necessary precomputations to use a `SaplingIvk` for note decryption.
pub fn new(ivk: &SaplingIvk) -> Self { pub fn new(ivk: &SaplingIvk) -> Self {
@ -146,6 +157,21 @@ pub struct SaplingDomain<P: consensus::Parameters> {
height: BlockHeight, height: BlockHeight,
} }
impl<P: consensus::Parameters + DynamicUsage> DynamicUsage for SaplingDomain<P> {
fn dynamic_usage(&self) -> usize {
self.params.dynamic_usage() + self.height.dynamic_usage()
}
fn dynamic_usage_bounds(&self) -> (usize, Option<usize>) {
let (params_lower, params_upper) = self.params.dynamic_usage_bounds();
let (height_lower, height_upper) = self.height.dynamic_usage_bounds();
(
params_lower + height_lower,
params_upper.zip(height_upper).map(|(a, b)| a + b),
)
}
}
impl<P: consensus::Parameters> SaplingDomain<P> { impl<P: consensus::Parameters> SaplingDomain<P> {
pub fn for_height(params: P, height: BlockHeight) -> Self { pub fn for_height(params: P, height: BlockHeight) -> Self {
Self { params, height } Self { params, height }

View File

@ -2,6 +2,7 @@ use std::convert::TryFrom;
use std::iter::Sum; use std::iter::Sum;
use std::ops::{Add, AddAssign, Neg, Sub, SubAssign}; use std::ops::{Add, AddAssign, Neg, Sub, SubAssign};
use memuse::DynamicUsage;
use orchard::value as orchard; use orchard::value as orchard;
pub const COIN: i64 = 1_0000_0000; pub const COIN: i64 = 1_0000_0000;
@ -23,17 +24,7 @@ pub const DEFAULT_FEE: Amount = Amount(1000);
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Eq, Ord)] #[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Eq, Ord)]
pub struct Amount(i64); pub struct Amount(i64);
impl memuse::DynamicUsage for Amount { memuse::impl_no_dynamic_usage!(Amount);
#[inline(always)]
fn dynamic_usage(&self) -> usize {
0
}
#[inline(always)]
fn dynamic_usage_bounds(&self) -> (usize, Option<usize>) {
(0, Some(0))
}
}
impl Amount { impl Amount {
/// Returns a zero-valued Amount. /// Returns a zero-valued Amount.

View File

@ -2,6 +2,8 @@ use core::fmt::Debug;
use ff::PrimeField; use ff::PrimeField;
use group::GroupEncoding; use group::GroupEncoding;
use memuse::DynamicUsage;
use std::io::{self, Read, Write}; use std::io::{self, Read, Write};
use zcash_note_encryption::{ use zcash_note_encryption::{
@ -261,6 +263,16 @@ pub struct OutputDescription<Proof> {
pub zkproof: Proof, pub zkproof: Proof,
} }
impl<Proof: DynamicUsage> DynamicUsage for OutputDescription<Proof> {
fn dynamic_usage(&self) -> usize {
self.zkproof.dynamic_usage()
}
fn dynamic_usage_bounds(&self) -> (usize, Option<usize>) {
self.zkproof.dynamic_usage_bounds()
}
}
impl<P: consensus::Parameters, A> ShieldedOutput<SaplingDomain<P>, ENC_CIPHERTEXT_SIZE> impl<P: consensus::Parameters, A> ShieldedOutput<SaplingDomain<P>, ENC_CIPHERTEXT_SIZE>
for OutputDescription<A> for OutputDescription<A>
{ {
@ -348,6 +360,8 @@ pub struct OutputDescriptionV5 {
pub out_ciphertext: [u8; 80], pub out_ciphertext: [u8; 80],
} }
memuse::impl_no_dynamic_usage!(OutputDescriptionV5);
impl OutputDescriptionV5 { impl OutputDescriptionV5 {
pub fn read<R: Read>(mut reader: &mut R) -> io::Result<Self> { pub fn read<R: Read>(mut reader: &mut R) -> io::Result<Self> {
let cv = read_point(&mut reader, "cv")?; let cv = read_point(&mut reader, "cv")?;
@ -395,6 +409,8 @@ pub struct CompactOutputDescription {
pub enc_ciphertext: [u8; COMPACT_NOTE_SIZE], pub enc_ciphertext: [u8; COMPACT_NOTE_SIZE],
} }
memuse::impl_no_dynamic_usage!(CompactOutputDescription);
impl<A> From<OutputDescription<A>> for CompactOutputDescription { impl<A> From<OutputDescription<A>> for CompactOutputDescription {
fn from(out: OutputDescription<A>) -> CompactOutputDescription { fn from(out: OutputDescription<A>) -> CompactOutputDescription {
CompactOutputDescription { CompactOutputDescription {

View File

@ -13,6 +13,7 @@ mod tests;
use blake2b_simd::Hash as Blake2bHash; use blake2b_simd::Hash as Blake2bHash;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use ff::PrimeField; use ff::PrimeField;
use memuse::DynamicUsage;
use std::convert::TryFrom; use std::convert::TryFrom;
use std::fmt; use std::fmt;
use std::fmt::Debug; use std::fmt::Debug;
@ -65,6 +66,8 @@ const ZFUTURE_TX_VERSION: u32 = 0x0000FFFF;
#[derive(Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)] #[derive(Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)]
pub struct TxId([u8; 32]); pub struct TxId([u8; 32]);
memuse::impl_no_dynamic_usage!(TxId);
impl fmt::Debug for TxId { impl fmt::Debug for TxId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// The (byte-flipped) hex string is more useful than the raw bytes, because we can // The (byte-flipped) hex string is more useful than the raw bytes, because we can

View File

@ -6,6 +6,7 @@ use aes::Aes256;
use blake2b_simd::Params as Blake2bParams; use blake2b_simd::Params as Blake2bParams;
use byteorder::{ByteOrder, LittleEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{ByteOrder, LittleEndian, ReadBytesExt, WriteBytesExt};
use fpe::ff1::{BinaryNumeralString, FF1}; use fpe::ff1::{BinaryNumeralString, FF1};
use memuse::DynamicUsage;
use std::ops::AddAssign; use std::ops::AddAssign;
use subtle::{Choice, ConditionallySelectable}; use subtle::{Choice, ConditionallySelectable};
@ -31,6 +32,8 @@ pub const ZIP32_SAPLING_INT_PERSONALIZATION: &[u8; 16] = b"Zcash_SaplingInt";
#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] #[derive(Debug, Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct AccountId(u32); pub struct AccountId(u32);
memuse::impl_no_dynamic_usage!(AccountId);
impl From<u32> for AccountId { impl From<u32> for AccountId {
fn from(id: u32) -> Self { fn from(id: u32) -> Self {
Self(id) Self(id)