fix(shielded): use RwLock for note commitment tree root caches (#3809)

Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
This commit is contained in:
teor 2022-03-10 09:26:49 +10:00 committed by GitHub
parent 03d123b428
commit 3291db35c0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 139 additions and 46 deletions

View File

@ -10,16 +10,14 @@
//!
//! A root of a note commitment tree is associated with each treestate.
#![allow(clippy::unit_arg)]
#![allow(clippy::derive_hash_xor_eq)]
#![allow(dead_code)]
use std::{
cell::Cell,
convert::TryFrom,
fmt,
hash::{Hash, Hasher},
io,
ops::Deref,
};
use bitvec::prelude::*;
@ -213,7 +211,7 @@ pub enum NoteCommitmentTreeError {
}
/// Orchard Incremental Note Commitment Tree
#[derive(Clone, Debug, Serialize, Deserialize)]
#[derive(Debug, Serialize, Deserialize)]
pub struct NoteCommitmentTree {
/// The tree represented as a Frontier.
///
@ -244,12 +242,9 @@ pub struct NoteCommitmentTree {
/// serialized with the tree). This is particularly important since we decided
/// to instantiate the trees from the genesis block, for simplicity.
///
/// [`Cell`] offers interior mutability (it works even with a non-mutable
/// reference to the tree) but it prevents the tree (and anything that uses it)
/// from being shared between threads. If this ever becomes an issue we can
/// leave caching to the callers (which requires much more code), or replace
/// `Cell` with `Arc<Mutex<_>>` (and be careful of deadlocks and async code.)
cached_root: Cell<Option<Root>>,
/// We use a [`RwLock`] for this cache, because it is only written once per tree update.
/// Each tree has its own cached root, a new lock is created for each clone.
cached_root: std::sync::RwLock<Option<Root>>,
}
impl NoteCommitmentTree {
@ -263,7 +258,13 @@ impl NoteCommitmentTree {
pub fn append(&mut self, cm_x: pallas::Base) -> Result<(), NoteCommitmentTreeError> {
if self.inner.append(&cm_x.into()) {
// Invalidate cached root
self.cached_root.replace(None);
let cached_root = self
.cached_root
.get_mut()
.expect("a thread that previously held exclusive lock access panicked");
*cached_root = None;
Ok(())
} else {
Err(NoteCommitmentTreeError::FullTree)
@ -273,13 +274,28 @@ impl NoteCommitmentTree {
/// Returns the current root of the tree, used as an anchor in Orchard
/// shielded transactions.
pub fn root(&self) -> Root {
match self.cached_root.get() {
// Return cached root
Some(root) => root,
if let Some(root) = self
.cached_root
.read()
.expect("a thread that previously held exclusive lock access panicked")
.deref()
{
// Return cached root.
return *root;
}
// Get exclusive access, compute the root, and cache it.
let mut write_root = self
.cached_root
.write()
.expect("a thread that previously held exclusive lock access panicked");
match write_root.deref() {
// Another thread got write access first, return cached root.
Some(root) => *root,
None => {
// Compute root and cache it
// Compute root and cache it.
let root = Root(self.inner.root().0);
self.cached_root.replace(Some(root));
*write_root = Some(root);
root
}
}
@ -308,6 +324,21 @@ impl NoteCommitmentTree {
}
}
impl Clone for NoteCommitmentTree {
/// Clones the inner tree, and creates a new `RwLock` with the cloned root data.
fn clone(&self) -> Self {
let cached_root = *self
.cached_root
.read()
.expect("a thread that previously held exclusive lock access panicked");
Self {
inner: self.inner.clone(),
cached_root: std::sync::RwLock::new(cached_root),
}
}
}
impl Default for NoteCommitmentTree {
fn default() -> Self {
Self {

View File

@ -10,14 +10,13 @@
//!
//! A root of a note commitment tree is associated with each treestate.
#![allow(clippy::unit_arg)]
#![allow(dead_code)]
use std::{
cell::Cell,
fmt,
hash::{Hash, Hasher},
io,
ops::Deref,
};
use bitvec::prelude::*;
@ -216,7 +215,7 @@ pub enum NoteCommitmentTreeError {
}
/// Sapling Incremental Note Commitment Tree.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[derive(Debug, Serialize, Deserialize)]
pub struct NoteCommitmentTree {
/// The tree represented as a Frontier.
///
@ -247,12 +246,9 @@ pub struct NoteCommitmentTree {
/// serialized with the tree). This is particularly important since we decided
/// to instantiate the trees from the genesis block, for simplicity.
///
/// [`Cell`] offers interior mutability (it works even with a non-mutable
/// reference to the tree) but it prevents the tree (and anything that uses it)
/// from being shared between threads. If this ever becomes an issue we can
/// leave caching to the callers (which requires much more code), or replace
/// `Cell` with `Arc<Mutex<_>>` (and be careful of deadlocks and async code.)
cached_root: Cell<Option<Root>>,
/// We use a [`RwLock`] for this cache, because it is only written once per tree update.
/// Each tree has its own cached root, a new lock is created for each clone.
cached_root: std::sync::RwLock<Option<Root>>,
}
impl NoteCommitmentTree {
@ -266,7 +262,13 @@ impl NoteCommitmentTree {
pub fn append(&mut self, cm_u: jubjub::Fq) -> Result<(), NoteCommitmentTreeError> {
if self.inner.append(&cm_u.into()) {
// Invalidate cached root
self.cached_root.replace(None);
let cached_root = self
.cached_root
.get_mut()
.expect("a thread that previously held exclusive lock access panicked");
*cached_root = None;
Ok(())
} else {
Err(NoteCommitmentTreeError::FullTree)
@ -276,13 +278,28 @@ impl NoteCommitmentTree {
/// Returns the current root of the tree, used as an anchor in Sapling
/// shielded transactions.
pub fn root(&self) -> Root {
match self.cached_root.get() {
// Return cached root
Some(root) => root,
if let Some(root) = self
.cached_root
.read()
.expect("a thread that previously held exclusive lock access panicked")
.deref()
{
// Return cached root.
return *root;
}
// Get exclusive access, compute the root, and cache it.
let mut write_root = self
.cached_root
.write()
.expect("a thread that previously held exclusive lock access panicked");
match write_root.deref() {
// Another thread got write access first, return cached root.
Some(root) => *root,
None => {
// Compute root and cache it
// Compute root and cache it.
let root = Root::try_from(self.inner.root().0).unwrap();
self.cached_root.replace(Some(root));
*write_root = Some(root);
root
}
}
@ -311,6 +328,21 @@ impl NoteCommitmentTree {
}
}
impl Clone for NoteCommitmentTree {
/// Clones the inner tree, and creates a new `RwLock` with the cloned root data.
fn clone(&self) -> Self {
let cached_root = *self
.cached_root
.read()
.expect("a thread that previously held exclusive lock access panicked");
Self {
inner: self.inner.clone(),
cached_root: std::sync::RwLock::new(cached_root),
}
}
}
impl Default for NoteCommitmentTree {
fn default() -> Self {
Self {

View File

@ -10,9 +10,7 @@
//!
//! A root of a note commitment tree is associated with each treestate.
#![allow(clippy::unit_arg)]
use std::{cell::Cell, fmt};
use std::{fmt, ops::Deref};
use byteorder::{BigEndian, ByteOrder};
use incrementalmerkletree::{bridgetree, Frontier};
@ -201,7 +199,7 @@ pub enum NoteCommitmentTreeError {
///
/// [Sprout Note Commitment Tree]: https://zips.z.cash/protocol/protocol.pdf#merkletree
/// [nullifier set]: https://zips.z.cash/protocol/protocol.pdf#nullifierset
#[derive(Clone, Debug, Serialize, Deserialize)]
#[derive(Debug, Serialize, Deserialize)]
pub struct NoteCommitmentTree {
/// The tree represented as a [`incrementalmerkletree::bridgetree::Frontier`].
///
@ -232,13 +230,9 @@ pub struct NoteCommitmentTree {
/// the tree). This is particularly important since we decided to
/// instantiate the trees from the genesis block, for simplicity.
///
/// [`Cell`] offers interior mutability (it works even with a non-mutable
/// reference to the tree) but it prevents the tree (and anything that uses
/// it) from being shared between threads. If this ever becomes an issue we
/// can leave caching to the callers (which requires much more code), or
/// replace `Cell` with `Arc<Mutex<_>>` (and be careful of deadlocks and
/// async code.)
cached_root: Cell<Option<Root>>,
/// We use a [`RwLock`] for this cache, because it is only written once per tree update.
/// Each tree has its own cached root, a new lock is created for each clone.
cached_root: std::sync::RwLock<Option<Root>>,
}
impl NoteCommitmentTree {
@ -248,7 +242,13 @@ impl NoteCommitmentTree {
pub fn append(&mut self, cm: NoteCommitment) -> Result<(), NoteCommitmentTreeError> {
if self.inner.append(&cm.into()) {
// Invalidate cached root
self.cached_root.replace(None);
let cached_root = self
.cached_root
.get_mut()
.expect("a thread that previously held exclusive lock access panicked");
*cached_root = None;
Ok(())
} else {
Err(NoteCommitmentTreeError::FullTree)
@ -258,13 +258,28 @@ impl NoteCommitmentTree {
/// Returns the current root of the tree; used as an anchor in Sprout
/// shielded transactions.
pub fn root(&self) -> Root {
match self.cached_root.get() {
if let Some(root) = self
.cached_root
.read()
.expect("a thread that previously held exclusive lock access panicked")
.deref()
{
// Return cached root.
Some(root) => root,
return *root;
}
// Get exclusive access, compute the root, and cache it.
let mut write_root = self
.cached_root
.write()
.expect("a thread that previously held exclusive lock access panicked");
match write_root.deref() {
// Another thread got write access first, return cached root.
Some(root) => *root,
None => {
// Compute root and cache it.
let root = Root(self.inner.root().0);
self.cached_root.replace(Some(root));
*write_root = Some(root);
root
}
}
@ -294,6 +309,21 @@ impl NoteCommitmentTree {
}
}
impl Clone for NoteCommitmentTree {
/// Clones the inner tree, and creates a new `RwLock` with the cloned root data.
fn clone(&self) -> Self {
let cached_root = *self
.cached_root
.read()
.expect("a thread that previously held exclusive lock access panicked");
Self {
inner: self.inner.clone(),
cached_root: std::sync::RwLock::new(cached_root),
}
}
}
impl Default for NoteCommitmentTree {
fn default() -> Self {
Self {