cleanup(state): Refactor subtree read functions to accept ranges and check memory first (#7739)

* refactors sapling_subtrees to use ranges

* refactor orchard_subtrees() to use ranges

* Restores check for inconsistent subtree roots

* updates vectors tests

* Applies some suggestions from code review and adds `read::tree::subtrees` function

* updates correctness comment & db read method names

* adds test_subtrees

* tests in-memory reads

* test that subtree read fns work right with excluded start bound

* applies suggestions from code review

* Applies suggestions from code review.

* Updates docs applying suggestion from code review

* adds new arg to zs_range_iter calls
This commit is contained in:
Arya 2023-10-20 01:06:05 -04:00 committed by GitHub
parent 9c993f82e2
commit 06a29831e2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 262 additions and 302 deletions

View File

@ -45,6 +45,7 @@ use zebra_chain::{
block::{self, CountedHeader, HeightDiff},
diagnostic::{task::WaitForPanics, CodeTimer},
parameters::{Network, NetworkUpgrade},
subtree::NoteCommitmentSubtreeIndex,
};
use crate::{
@ -1507,14 +1508,29 @@ impl Service<ReadRequest> for ReadStateService {
tokio::task::spawn_blocking(move || {
span.in_scope(move || {
let end_index = limit
.and_then(|limit| start_index.0.checked_add(limit.0))
.map(NoteCommitmentSubtreeIndex);
let sapling_subtrees = state.non_finalized_state_receiver.with_watch_data(
|non_finalized_state| {
read::sapling_subtrees(
non_finalized_state.best_chain(),
&state.db,
start_index,
limit,
)
if let Some(end_index) = end_index {
read::sapling_subtrees(
non_finalized_state.best_chain(),
&state.db,
start_index..end_index,
)
} else {
// If there is no end bound, just return all the trees.
// If the end bound would overflow, just returns all the trees, because that's what
// `zcashd` does. (It never calculates an end bound, so it just keeps iterating until
// the trees run out.)
read::sapling_subtrees(
non_finalized_state.best_chain(),
&state.db,
start_index..,
)
}
},
);
@ -1532,14 +1548,29 @@ impl Service<ReadRequest> for ReadStateService {
tokio::task::spawn_blocking(move || {
span.in_scope(move || {
let end_index = limit
.and_then(|limit| start_index.0.checked_add(limit.0))
.map(NoteCommitmentSubtreeIndex);
let orchard_subtrees = state.non_finalized_state_receiver.with_watch_data(
|non_finalized_state| {
read::orchard_subtrees(
non_finalized_state.best_chain(),
&state.db,
start_index,
limit,
)
if let Some(end_index) = end_index {
read::orchard_subtrees(
non_finalized_state.best_chain(),
&state.db,
start_index..end_index,
)
} else {
// If there is no end bound, just return all the trees.
// If the end bound would overflow, just returns all the trees, because that's what
// `zcashd` does. (It never calculates an end bound, so it just keeps iterating until
// the trees run out.)
read::orchard_subtrees(
non_finalized_state.best_chain(),
&state.db,
start_index..,
)
}
},
);

View File

@ -245,64 +245,20 @@ impl ZebraDb {
Some(subtree_data.with_index(index))
}
/// Returns a list of Sapling [`NoteCommitmentSubtree`]s starting at `start_index`.
/// If `limit` is provided, the list is limited to `limit` entries.
///
/// If there is no subtree at `start_index`, the returned list is empty.
/// Otherwise, subtrees are continuous up to the finalized tip.
///
/// # Correctness
///
/// This method is specifically designed for the `z_getsubtreesbyindex` state request.
/// It might not work for other RPCs or state checks.
pub fn sapling_subtree_list_by_index_for_rpc(
/// Returns a list of Sapling [`NoteCommitmentSubtree`]s in the provided range.
#[allow(clippy::unwrap_in_result)]
pub fn sapling_subtree_list_by_index_range(
&self,
start_index: NoteCommitmentSubtreeIndex,
limit: Option<NoteCommitmentSubtreeIndex>,
range: impl std::ops::RangeBounds<NoteCommitmentSubtreeIndex>,
) -> BTreeMap<NoteCommitmentSubtreeIndex, NoteCommitmentSubtreeData<sapling::tree::Node>> {
let sapling_subtrees = self
.db
.cf_handle("sapling_note_commitment_subtree")
.unwrap();
// Calculate the end bound, checking for overflow.
let exclusive_end_bound: Option<NoteCommitmentSubtreeIndex> = limit
.and_then(|limit| start_index.0.checked_add(limit.0))
.map(NoteCommitmentSubtreeIndex);
let list: BTreeMap<
NoteCommitmentSubtreeIndex,
NoteCommitmentSubtreeData<sapling::tree::Node>,
>;
if let Some(exclusive_end_bound) = exclusive_end_bound {
list = self
.db
.zs_range_iter(&sapling_subtrees, start_index..exclusive_end_bound, false)
.collect();
} else {
// If there is no end bound, just return all the trees.
// If the end bound would overflow, just returns all the trees, because that's what
// `zcashd` does. (It never calculates an end bound, so it just keeps iterating until
// the trees run out.)
list = self
.db
.zs_range_iter(&sapling_subtrees, start_index.., false)
.collect();
}
// Make sure the amount of retrieved subtrees does not exceed the given limit.
#[cfg(debug_assertions)]
if let Some(limit) = limit {
assert!(list.len() <= limit.0.into());
}
// Check that we got the start subtree.
if list.get(&start_index).is_some() {
list
} else {
BTreeMap::new()
}
self.db
.zs_range_iter(&sapling_subtrees, range, false)
.collect()
}
/// Get the sapling note commitment subtress for the finalized tip.
@ -415,64 +371,20 @@ impl ZebraDb {
Some(subtree_data.with_index(index))
}
/// Returns a list of Orchard [`NoteCommitmentSubtree`]s starting at `start_index`.
/// If `limit` is provided, the list is limited to `limit` entries.
///
/// If there is no subtree at `start_index`, the returned list is empty.
/// Otherwise, subtrees are continuous up to the finalized tip.
///
/// # Correctness
///
/// This method is specifically designed for the `z_getsubtreesbyindex` state request.
/// It might not work for other RPCs or state checks.
pub fn orchard_subtree_list_by_index_for_rpc(
/// Returns a list of Orchard [`NoteCommitmentSubtree`]s in the provided range.
#[allow(clippy::unwrap_in_result)]
pub fn orchard_subtree_list_by_index_range(
&self,
start_index: NoteCommitmentSubtreeIndex,
limit: Option<NoteCommitmentSubtreeIndex>,
range: impl std::ops::RangeBounds<NoteCommitmentSubtreeIndex>,
) -> BTreeMap<NoteCommitmentSubtreeIndex, NoteCommitmentSubtreeData<orchard::tree::Node>> {
let orchard_subtrees = self
.db
.cf_handle("orchard_note_commitment_subtree")
.unwrap();
// Calculate the end bound, checking for overflow.
let exclusive_end_bound: Option<NoteCommitmentSubtreeIndex> = limit
.and_then(|limit| start_index.0.checked_add(limit.0))
.map(NoteCommitmentSubtreeIndex);
let list: BTreeMap<
NoteCommitmentSubtreeIndex,
NoteCommitmentSubtreeData<orchard::tree::Node>,
>;
if let Some(exclusive_end_bound) = exclusive_end_bound {
list = self
.db
.zs_range_iter(&orchard_subtrees, start_index..exclusive_end_bound, false)
.collect();
} else {
// If there is no end bound, just return all the trees.
// If the end bound would overflow, just returns all the trees, because that's what
// `zcashd` does. (It never calculates an end bound, so it just keeps iterating until
// the trees run out.)
list = self
.db
.zs_range_iter(&orchard_subtrees, start_index.., false)
.collect();
}
// Make sure the amount of retrieved subtrees does not exceed the given limit.
#[cfg(debug_assertions)]
if let Some(limit) = limit {
assert!(list.len() <= limit.0.into());
}
// Check that we got the start subtree.
if list.get(&start_index).is_some() {
list
} else {
BTreeMap::new()
}
self.db
.zs_range_iter(&orchard_subtrees, range, false)
.collect()
}
/// Get the orchard note commitment subtress for the finalized tip.

View File

@ -698,8 +698,7 @@ impl Chain {
.map(|(index, subtree)| subtree.with_index(*index))
}
/// Returns a list of Sapling [`NoteCommitmentSubtree`]s at or after `start_index`.
/// If `limit` is provided, the list is limited to `limit` entries.
/// Returns a list of Sapling [`NoteCommitmentSubtree`]s in the provided range.
///
/// Unlike the finalized state and `ReadRequest::SaplingSubtrees`, the returned subtrees
/// can start after `start_index`. These subtrees are continuous up to the tip.
@ -709,17 +708,10 @@ impl Chain {
/// finalized updates.
pub fn sapling_subtrees_in_range(
&self,
start_index: NoteCommitmentSubtreeIndex,
limit: Option<NoteCommitmentSubtreeIndex>,
range: impl std::ops::RangeBounds<NoteCommitmentSubtreeIndex>,
) -> BTreeMap<NoteCommitmentSubtreeIndex, NoteCommitmentSubtreeData<sapling::tree::Node>> {
let limit = limit
.map(|limit| usize::from(limit.0))
.unwrap_or(usize::MAX);
// Since we're working in memory, it's ok to iterate through the whole range here.
self.sapling_subtrees
.range(start_index..)
.take(limit)
.range(range)
.map(|(index, subtree)| (*index, *subtree))
.collect()
}
@ -910,8 +902,7 @@ impl Chain {
.map(|(index, subtree)| subtree.with_index(*index))
}
/// Returns a list of Orchard [`NoteCommitmentSubtree`]s at or after `start_index`.
/// If `limit` is provided, the list is limited to `limit` entries.
/// Returns a list of Orchard [`NoteCommitmentSubtree`]s in the provided range.
///
/// Unlike the finalized state and `ReadRequest::OrchardSubtrees`, the returned subtrees
/// can start after `start_index`. These subtrees are continuous up to the tip.
@ -921,17 +912,10 @@ impl Chain {
/// finalized updates.
pub fn orchard_subtrees_in_range(
&self,
start_index: NoteCommitmentSubtreeIndex,
limit: Option<NoteCommitmentSubtreeIndex>,
range: impl std::ops::RangeBounds<NoteCommitmentSubtreeIndex>,
) -> BTreeMap<NoteCommitmentSubtreeIndex, NoteCommitmentSubtreeData<orchard::tree::Node>> {
let limit = limit
.map(|limit| usize::from(limit.0))
.unwrap_or(usize::MAX);
// Since we're working in memory, it's ok to iterate through the whole range here.
self.orchard_subtrees
.range(start_index..)
.take(limit)
.range(range)
.map(|(index, subtree)| (*index, *subtree))
.collect()
}

View File

@ -100,6 +100,101 @@ async fn populated_read_state_responds_correctly() -> Result<()> {
Ok(())
}
/// Tests if Zebra combines the note commitment subtrees from the finalized and
/// non-finalized states correctly.
#[tokio::test]
async fn test_read_subtrees() -> Result<()> {
use std::ops::Bound::*;
let dummy_subtree = |(index, height)| {
NoteCommitmentSubtree::new(
u16::try_from(index).expect("should fit in u16"),
Height(height),
sapling::tree::Node::default(),
)
};
let num_db_subtrees = 10;
let num_chain_subtrees = 2;
let index_offset = usize::try_from(num_db_subtrees).expect("constant should fit in usize");
let db_height_range = 0..num_db_subtrees;
let chain_height_range = num_db_subtrees..(num_db_subtrees + num_chain_subtrees);
// Prepare the finalized state.
let db = {
let db = ZebraDb::new(&Config::ephemeral(), Mainnet, true);
let db_subtrees = db_height_range.enumerate().map(dummy_subtree);
for db_subtree in db_subtrees {
let mut db_batch = DiskWriteBatch::new();
db_batch.insert_sapling_subtree(&db, &db_subtree);
db.write(db_batch)
.expect("Writing a batch with a Sapling subtree should succeed.");
}
db
};
// Prepare the non-finalized state.
let chain = {
let mut chain = Chain::default();
let chain_subtrees = chain_height_range
.enumerate()
.map(|(index, height)| dummy_subtree((index_offset + index, height)));
for chain_subtree in chain_subtrees {
chain.insert_sapling_subtree(chain_subtree);
}
Arc::new(chain)
};
let modify_chain = |chain: &Arc<Chain>, index: usize, height| {
let mut chain = chain.as_ref().clone();
chain.insert_sapling_subtree(dummy_subtree((index, height)));
Some(Arc::new(chain))
};
// There should be 10 entries in db and 2 in chain with no overlap
// Unbounded range should start at 0
let all_subtrees = sapling_subtrees(Some(chain.clone()), &db, ..);
assert_eq!(all_subtrees.len(), 12, "should have 12 subtrees in state");
// Add a subtree to `chain` that overlaps and is not consistent with the db subtrees
let first_chain_index = index_offset - 1;
let end_height = Height(400_000);
let modified_chain = modify_chain(&chain, first_chain_index, end_height.0);
// The inconsistent entry and any later entries should be omitted
let all_subtrees = sapling_subtrees(modified_chain.clone(), &db, ..);
assert_eq!(all_subtrees.len(), 10, "should have 10 subtrees in state");
let first_chain_index =
NoteCommitmentSubtreeIndex(u16::try_from(first_chain_index).expect("should fit in u16"));
// Entries should be returned without reading from disk if the chain contains the first subtree index in the range
let mut chain_subtrees = sapling_subtrees(modified_chain, &db, first_chain_index..);
assert_eq!(chain_subtrees.len(), 3, "should have 3 subtrees in chain");
let (index, subtree) = chain_subtrees
.pop_first()
.expect("chain_subtrees should not be empty");
assert_eq!(first_chain_index, index, "subtree indexes should match");
assert_eq!(end_height, subtree.end, "subtree end heights should match");
// Check that Zebra retrieves subtrees correctly when using a range with an Excluded start bound
let start = 0.into();
let range = (Excluded(start), Unbounded);
let subtrees = sapling_subtrees(Some(chain), &db, range);
assert_eq!(subtrees.len(), 11);
assert!(
!subtrees.contains_key(&start),
"should not contain excluded start bound"
);
Ok(())
}
/// Tests if Zebra combines the Sapling note commitment subtrees from the finalized and
/// non-finalized states correctly.
#[tokio::test]
@ -114,7 +209,7 @@ async fn test_sapling_subtrees() -> Result<()> {
db.write(db_batch)
.expect("Writing a batch with a Sapling subtree should succeed.");
// Prepare the non-fianlized state.
// Prepare the non-finalized state.
let chain_subtree = NoteCommitmentSubtree::new(1, Height(3), dummy_subtree_root);
let mut chain = Chain::default();
chain.insert_sapling_subtree(chain_subtree);
@ -124,40 +219,40 @@ async fn test_sapling_subtrees() -> Result<()> {
// the non-finalized state.
// Retrieve only the first subtree and check its properties.
let subtrees = sapling_subtrees(chain.clone(), &db, 0.into(), Some(1.into()));
let subtrees = sapling_subtrees(chain.clone(), &db, NoteCommitmentSubtreeIndex(0)..1.into());
let mut subtrees = subtrees.iter();
assert_eq!(subtrees.len(), 1);
assert!(subtrees_eq(subtrees.next().unwrap(), &db_subtree));
// Retrieve both subtrees using a limit and check their properties.
let subtrees = sapling_subtrees(chain.clone(), &db, 0.into(), Some(2.into()));
let subtrees = sapling_subtrees(chain.clone(), &db, NoteCommitmentSubtreeIndex(0)..2.into());
let mut subtrees = subtrees.iter();
assert_eq!(subtrees.len(), 2);
assert!(subtrees_eq(subtrees.next().unwrap(), &db_subtree));
assert!(subtrees_eq(subtrees.next().unwrap(), &chain_subtree));
// Retrieve both subtrees without using a limit and check their properties.
let subtrees = sapling_subtrees(chain.clone(), &db, 0.into(), None);
let subtrees = sapling_subtrees(chain.clone(), &db, NoteCommitmentSubtreeIndex(0)..);
let mut subtrees = subtrees.iter();
assert_eq!(subtrees.len(), 2);
assert!(subtrees_eq(subtrees.next().unwrap(), &db_subtree));
assert!(subtrees_eq(subtrees.next().unwrap(), &chain_subtree));
// Retrieve only the second subtree and check its properties.
let subtrees = sapling_subtrees(chain.clone(), &db, 1.into(), Some(1.into()));
let subtrees = sapling_subtrees(chain.clone(), &db, NoteCommitmentSubtreeIndex(1)..2.into());
let mut subtrees = subtrees.iter();
assert_eq!(subtrees.len(), 1);
assert!(subtrees_eq(subtrees.next().unwrap(), &chain_subtree));
// Retrieve only the second subtree, using a limit that would allow for more trees if they were
// present, and check its properties.
let subtrees = sapling_subtrees(chain.clone(), &db, 1.into(), Some(2.into()));
let subtrees = sapling_subtrees(chain.clone(), &db, NoteCommitmentSubtreeIndex(1)..3.into());
let mut subtrees = subtrees.iter();
assert_eq!(subtrees.len(), 1);
assert!(subtrees_eq(subtrees.next().unwrap(), &chain_subtree));
// Retrieve only the second subtree, without using any limit, and check its properties.
let subtrees = sapling_subtrees(chain, &db, 1.into(), None);
let subtrees = sapling_subtrees(chain, &db, NoteCommitmentSubtreeIndex(1)..);
let mut subtrees = subtrees.iter();
assert_eq!(subtrees.len(), 1);
assert!(subtrees_eq(subtrees.next().unwrap(), &chain_subtree));
@ -179,7 +274,7 @@ async fn test_orchard_subtrees() -> Result<()> {
db.write(db_batch)
.expect("Writing a batch with an Orchard subtree should succeed.");
// Prepare the non-fianlized state.
// Prepare the non-finalized state.
let chain_subtree = NoteCommitmentSubtree::new(1, Height(3), dummy_subtree_root);
let mut chain = Chain::default();
chain.insert_orchard_subtree(chain_subtree);
@ -189,40 +284,40 @@ async fn test_orchard_subtrees() -> Result<()> {
// the non-finalized state.
// Retrieve only the first subtree and check its properties.
let subtrees = orchard_subtrees(chain.clone(), &db, 0.into(), Some(1.into()));
let subtrees = orchard_subtrees(chain.clone(), &db, NoteCommitmentSubtreeIndex(0)..1.into());
let mut subtrees = subtrees.iter();
assert_eq!(subtrees.len(), 1);
assert!(subtrees_eq(subtrees.next().unwrap(), &db_subtree));
// Retrieve both subtrees using a limit and check their properties.
let subtrees = orchard_subtrees(chain.clone(), &db, 0.into(), Some(2.into()));
let subtrees = orchard_subtrees(chain.clone(), &db, NoteCommitmentSubtreeIndex(0)..2.into());
let mut subtrees = subtrees.iter();
assert_eq!(subtrees.len(), 2);
assert!(subtrees_eq(subtrees.next().unwrap(), &db_subtree));
assert!(subtrees_eq(subtrees.next().unwrap(), &chain_subtree));
// Retrieve both subtrees without using a limit and check their properties.
let subtrees = orchard_subtrees(chain.clone(), &db, 0.into(), None);
let subtrees = orchard_subtrees(chain.clone(), &db, NoteCommitmentSubtreeIndex(0)..);
let mut subtrees = subtrees.iter();
assert_eq!(subtrees.len(), 2);
assert!(subtrees_eq(subtrees.next().unwrap(), &db_subtree));
assert!(subtrees_eq(subtrees.next().unwrap(), &chain_subtree));
// Retrieve only the second subtree and check its properties.
let subtrees = orchard_subtrees(chain.clone(), &db, 1.into(), Some(1.into()));
let subtrees = orchard_subtrees(chain.clone(), &db, NoteCommitmentSubtreeIndex(1)..2.into());
let mut subtrees = subtrees.iter();
assert_eq!(subtrees.len(), 1);
assert!(subtrees_eq(subtrees.next().unwrap(), &chain_subtree));
// Retrieve only the second subtree, using a limit that would allow for more trees if they were
// present, and check its properties.
let subtrees = orchard_subtrees(chain.clone(), &db, 1.into(), Some(2.into()));
let subtrees = orchard_subtrees(chain.clone(), &db, NoteCommitmentSubtreeIndex(1)..3.into());
let mut subtrees = subtrees.iter();
assert_eq!(subtrees.len(), 1);
assert!(subtrees_eq(subtrees.next().unwrap(), &chain_subtree));
// Retrieve only the second subtree, without using any limit, and check its properties.
let subtrees = orchard_subtrees(chain, &db, 1.into(), None);
let subtrees = orchard_subtrees(chain, &db, NoteCommitmentSubtreeIndex(1)..);
let mut subtrees = subtrees.iter();
assert_eq!(subtrees.len(), 1);
assert!(subtrees_eq(subtrees.next().unwrap(), &chain_subtree));

View File

@ -48,99 +48,26 @@ where
.or_else(|| db.sapling_tree_by_hash_or_height(hash_or_height))
}
/// Returns a list of Sapling [`NoteCommitmentSubtree`]s starting at `start_index`.
/// Returns a list of Sapling [`NoteCommitmentSubtree`]s with indexes in the provided range.
///
/// If `limit` is provided, the list is limited to `limit` entries. If there is no subtree at
/// `start_index` in the non-finalized `chain` or finalized `db`, the returned list is empty.
/// If there is no subtree at the first index in the range, the returned list is empty.
/// Otherwise, subtrees are continuous up to the finalized tip.
///
/// # Correctness
///
/// 1. After `chain` was cloned, the StateService can commit additional blocks to the finalized
/// state `db`. Usually, the subtrees of these blocks are consistent. But if the `chain` is a
/// different fork to `db`, then the trees can be inconsistent. In that case, we ignore all the
/// trees in `chain` after the first inconsistent tree, because we know they will be inconsistent as
/// well. (It is cryptographically impossible for tree roots to be equal once the leaves have
/// diverged.)
/// 2. APIs that return single subtrees can't be used here, because they can create
/// an inconsistent list of subtrees after concurrent non-finalized and finalized updates.
/// See [`subtrees`] for more details.
pub fn sapling_subtrees<C>(
chain: Option<C>,
db: &ZebraDb,
start_index: NoteCommitmentSubtreeIndex,
limit: Option<NoteCommitmentSubtreeIndex>,
range: impl std::ops::RangeBounds<NoteCommitmentSubtreeIndex> + Clone,
) -> BTreeMap<NoteCommitmentSubtreeIndex, NoteCommitmentSubtreeData<sapling::tree::Node>>
where
C: AsRef<Chain>,
{
let mut db_list = db.sapling_subtree_list_by_index_for_rpc(start_index, limit);
if let Some(limit) = limit {
let subtrees_num = u16::try_from(db_list.len())
.expect("There can't be more than `u16::MAX` Sapling subtrees.");
// Return the subtrees if the amount of them reached the given limit.
if subtrees_num == limit.0 {
return db_list;
}
// If not, make sure the amount is below the limit.
debug_assert!(subtrees_num < limit.0);
}
// If there's no chain, then we have the complete list.
let Some(chain) = chain else {
return db_list;
};
// Unlike the other methods, this returns any trees in the range,
// even if there is no tree for start_index.
let fork_list = chain.as_ref().sapling_subtrees_in_range(start_index, limit);
// If there's no subtrees in chain, then we have the complete list.
if fork_list.is_empty() {
return db_list;
};
// Check for inconsistent trees in the fork.
for (fork_index, fork_subtree) in fork_list {
// If there's no matching index, just update the list of trees.
let Some(db_subtree) = db_list.get(&fork_index) else {
db_list.insert(fork_index, fork_subtree);
// Stop adding new subtrees once their amount reaches the given limit.
if let Some(limit) = limit {
let subtrees_num = u16::try_from(db_list.len())
.expect("There can't be more than `u16::MAX` Sapling subtrees.");
if subtrees_num == limit.0 {
break;
}
}
continue;
};
// We have an outdated chain fork, so skip this subtree and all remaining subtrees.
if &fork_subtree != db_subtree {
break;
}
// Otherwise, the subtree is already in the list, so we don't need to add it.
}
// Make sure the amount of retrieved subtrees does not exceed the given limit.
#[cfg(debug_assertions)]
if let Some(limit) = limit {
assert!(db_list.len() <= limit.0.into());
}
// Check that we got the start subtree from the non-finalized or finalized state.
// (The non-finalized state doesn't do this check.)
if db_list.get(&start_index).is_some() {
db_list
} else {
BTreeMap::new()
}
subtrees(
chain,
range,
|chain, range| chain.sapling_subtrees_in_range(range),
|range| db.sapling_subtree_list_by_index_range(range),
)
}
/// Returns the Orchard
@ -164,96 +91,107 @@ where
.or_else(|| db.orchard_tree_by_hash_or_height(hash_or_height))
}
/// Returns a list of Orchard [`NoteCommitmentSubtree`]s starting at `start_index`.
/// Returns a list of Orchard [`NoteCommitmentSubtree`]s with indexes in the provided range.
///
/// If `limit` is provided, the list is limited to `limit` entries. If there is no subtree at
/// `start_index` in the non-finalized `chain` or finalized `db`, the returned list is empty.
/// If there is no subtree at the first index in the range, the returned list is empty.
/// Otherwise, subtrees are continuous up to the finalized tip.
///
/// # Correctness
///
/// 1. After `chain` was cloned, the StateService can commit additional blocks to the finalized
/// state `db`. Usually, the subtrees of these blocks are consistent. But if the `chain` is a
/// different fork to `db`, then the trees can be inconsistent. In that case, we ignore all the
/// trees in `chain` after the first inconsistent tree, because we know they will be inconsistent as
/// well. (It is cryptographically impossible for tree roots to be equal once the leaves have
/// diverged.)
/// 2. APIs that return single subtrees can't be used here, because they can create
/// an inconsistent list of subtrees after concurrent non-finalized and finalized updates.
/// See [`subtrees`] for more details.
pub fn orchard_subtrees<C>(
chain: Option<C>,
db: &ZebraDb,
start_index: NoteCommitmentSubtreeIndex,
limit: Option<NoteCommitmentSubtreeIndex>,
range: impl std::ops::RangeBounds<NoteCommitmentSubtreeIndex> + Clone,
) -> BTreeMap<NoteCommitmentSubtreeIndex, NoteCommitmentSubtreeData<orchard::tree::Node>>
where
C: AsRef<Chain>,
{
let mut db_list = db.orchard_subtree_list_by_index_for_rpc(start_index, limit);
subtrees(
chain,
range,
|chain, range| chain.orchard_subtrees_in_range(range),
|range| db.orchard_subtree_list_by_index_range(range),
)
}
if let Some(limit) = limit {
let subtrees_num = u16::try_from(db_list.len())
.expect("There can't be more than `u16::MAX` Orchard subtrees.");
/// Returns a list of [`NoteCommitmentSubtree`]s in the provided range.
///
/// If there is no subtree at the first index in the range, the returned list is empty.
/// Otherwise, subtrees are continuous up to the finalized tip.
///
/// Accepts a `chain` from the non-finalized state, a `range` of subtree indexes to retrieve,
/// a `read_chain` function for retrieving the `range` of subtrees from `chain`, and
/// a `read_disk` function for retrieving the `range` from [`ZebraDb`].
///
/// Returns a consistent set of subtrees for the supplied chain fork and database.
/// Avoids reading the database if the subtrees are present in memory.
///
/// # Correctness
///
/// APIs that return single subtrees can't be used for `read_chain` and `read_disk`, because they
/// can create an inconsistent list of subtrees after concurrent non-finalized and finalized updates.
fn subtrees<C, Range, Node, ChainSubtreeFn, DbSubtreeFn>(
chain: Option<C>,
range: Range,
read_chain: ChainSubtreeFn,
read_disk: DbSubtreeFn,
) -> BTreeMap<NoteCommitmentSubtreeIndex, NoteCommitmentSubtreeData<Node>>
where
C: AsRef<Chain>,
Node: PartialEq,
Range: std::ops::RangeBounds<NoteCommitmentSubtreeIndex> + Clone,
ChainSubtreeFn: FnOnce(
&Chain,
Range,
)
-> BTreeMap<NoteCommitmentSubtreeIndex, NoteCommitmentSubtreeData<Node>>,
DbSubtreeFn:
FnOnce(Range) -> BTreeMap<NoteCommitmentSubtreeIndex, NoteCommitmentSubtreeData<Node>>,
{
use std::ops::Bound::*;
// Return the subtrees if the amount of them reached the given limit.
if subtrees_num == limit.0 {
return db_list;
}
// If not, make sure the amount is below the limit.
debug_assert!(subtrees_num < limit.0);
}
// If there's no chain, then we have the complete list.
let Some(chain) = chain else {
return db_list;
let start_index = match range.start_bound().cloned() {
Included(start_index) => start_index,
Excluded(start_index) => (start_index.0 + 1).into(),
Unbounded => 0.into(),
};
// Unlike the other methods, this returns any trees in the range,
// even if there is no tree for start_index.
let fork_list = chain.as_ref().orchard_subtrees_in_range(start_index, limit);
// # Correctness
//
// After `chain` was cloned, the StateService can commit additional blocks to the finalized state `db`.
// Usually, the subtrees of these blocks are consistent. But if the `chain` is a different fork to `db`,
// then the trees can be inconsistent. In that case, if `chain` does not contain a subtree at the first
// index in the provided range, we ignore all the trees in `chain` after the first inconsistent tree,
// because we know they will be inconsistent as well. (It is cryptographically impossible for tree roots
// to be equal once the leaves have diverged.)
// If there's no subtrees in chain, then we have the complete list.
if fork_list.is_empty() {
return db_list;
};
let results = match chain.map(|chain| read_chain(chain.as_ref(), range.clone())) {
Some(chain_results) if chain_results.contains_key(&start_index) => return chain_results,
Some(chain_results) => {
let mut db_results = read_disk(range);
// Check for inconsistent trees in the fork.
for (fork_index, fork_subtree) in fork_list {
// If there's no matching index, just update the list of trees.
let Some(db_subtree) = db_list.get(&fork_index) else {
db_list.insert(fork_index, fork_subtree);
// Check for inconsistent trees in the fork.
for (chain_index, chain_subtree) in chain_results {
// If there's no matching index, just update the list of trees.
let Some(db_subtree) = db_results.get(&chain_index) else {
db_results.insert(chain_index, chain_subtree);
continue;
};
// Stop adding new subtrees once their amount reaches the given limit.
if let Some(limit) = limit {
let subtrees_num = u16::try_from(db_list.len())
.expect("There can't be more than `u16::MAX` Orchard subtrees.");
if subtrees_num == limit.0 {
// We have an outdated chain fork, so skip this subtree and all remaining subtrees.
if &chain_subtree != db_subtree {
break;
}
// Otherwise, the subtree is already in the list, so we don't need to add it.
}
continue;
};
// We have an outdated chain fork, so skip this subtree and all remaining subtrees.
if &fork_subtree != db_subtree {
break;
db_results
}
None => read_disk(range),
};
// Otherwise, the subtree is already in the list, so we don't need to add it.
}
// Make sure the amount of retrieved subtrees does not exceed the given limit.
#[cfg(debug_assertions)]
if let Some(limit) = limit {
assert!(db_list.len() <= limit.0.into());
}
// Check that we got the start subtree from the non-finalized or finalized state.
// (The non-finalized state doesn't do this check.)
if db_list.get(&start_index).is_some() {
db_list
// Check that we got the start subtree
if results.contains_key(&start_index) {
results
} else {
BTreeMap::new()
}