stops tracking new indexes in finalized state when feature is unselected

This commit is contained in:
Arya 2024-10-24 19:27:41 -04:00
parent 39ede86120
commit 2585fd97bc
15 changed files with 101 additions and 38 deletions

View File

@ -27,6 +27,7 @@ indexer-rpcs = [
"tonic-reflection",
"prost",
"tokio-stream",
"zebra-state/indexer"
]
# Production features that activate extra dependencies, or extra features in dependencies

View File

@ -42,8 +42,12 @@ pub use error::{
ValidateContextError,
};
pub use request::{
CheckpointVerifiedBlock, HashOrHeight, ReadRequest, Request, SemanticallyVerifiedBlock, Spend,
CheckpointVerifiedBlock, HashOrHeight, ReadRequest, Request, SemanticallyVerifiedBlock,
};
#[cfg(feature = "indexer")]
pub use request::Spend;
pub use response::{KnownBlock, MinedTx, ReadResponse, Response};
pub use service::{
chain_tip::{ChainTipBlock, ChainTipChange, ChainTipSender, LatestChainTip, TipAction},

View File

@ -34,6 +34,7 @@ use crate::{
/// This enum implements `From` for [`transparent::OutPoint`], [`sprout::Nullifier`],
/// [`sapling::Nullifier`], and [`orchard::Nullifier`].
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[cfg(feature = "indexer")]
pub enum Spend {
/// A spend identified by a [`transparent::OutPoint`].
OutPoint(transparent::OutPoint),
@ -45,24 +46,28 @@ pub enum Spend {
Orchard(orchard::Nullifier),
}
#[cfg(feature = "indexer")]
impl From<transparent::OutPoint> for Spend {
fn from(outpoint: transparent::OutPoint) -> Self {
Self::OutPoint(outpoint)
}
}
#[cfg(feature = "indexer")]
impl From<sprout::Nullifier> for Spend {
fn from(sprout_nullifier: sprout::Nullifier) -> Self {
Self::Sprout(sprout_nullifier)
}
}
#[cfg(feature = "indexer")]
impl From<sapling::Nullifier> for Spend {
fn from(sapling_nullifier: sapling::Nullifier) -> Self {
Self::Sapling(sapling_nullifier)
}
}
#[cfg(feature = "indexer")]
impl From<orchard::Nullifier> for Spend {
fn from(orchard_nullifier: orchard::Nullifier) -> Self {
Self::Orchard(orchard_nullifier)
@ -1064,6 +1069,7 @@ pub enum ReadRequest {
///
/// Returns [`ReadResponse::TransactionId`] with the hash of the transaction
/// that spent the output at the provided [`transparent::OutPoint`].
#[cfg(feature = "indexer")]
SpendingTransactionId(Spend),
/// Looks up utxos for the provided addresses.
@ -1146,13 +1152,14 @@ impl ReadRequest {
ReadRequest::OrchardSubtrees { .. } => "orchard_subtrees",
ReadRequest::AddressBalance { .. } => "address_balance",
ReadRequest::TransactionIdsByAddresses { .. } => "transaction_ids_by_addesses",
ReadRequest::SpendingTransactionId(_) => "spending_transaction_id",
ReadRequest::UtxosByAddresses(_) => "utxos_by_addesses",
ReadRequest::CheckBestChainTipNullifiersAndAnchors(_) => {
"best_chain_tip_nullifiers_anchors"
}
ReadRequest::BestChainNextMedianTimePast => "best_chain_next_median_time_past",
ReadRequest::BestChainBlockHash(_) => "best_chain_block_hash",
#[cfg(feature = "indexer")]
ReadRequest::SpendingTransactionId(_) => "spending_transaction_id",
#[cfg(feature = "getblocktemplate-rpcs")]
ReadRequest::ChainInfo => "chain_info",
#[cfg(feature = "getblocktemplate-rpcs")]

View File

@ -178,6 +178,7 @@ pub enum ReadResponse {
/// Response to [`ReadRequest::SpendingTransactionId`],
/// with an list of transaction hashes in block order,
/// or `None` if the block was not found.
#[cfg(feature = "indexer")]
TransactionId(Option<transaction::Hash>),
/// Response to [`ReadRequest::BlockLocator`] with a block locator object.
@ -344,11 +345,13 @@ impl TryFrom<ReadResponse> for Response {
| ReadResponse::OrchardSubtrees(_)
| ReadResponse::AddressBalance(_)
| ReadResponse::AddressesTransactionIds(_)
| ReadResponse::TransactionId(_)
| ReadResponse::AddressUtxos(_) => {
Err("there is no corresponding Response for this ReadResponse")
}
#[cfg(feature = "indexer")]
ReadResponse::TransactionId(_) => Err("there is no corresponding Response for this ReadResponse"),
#[cfg(feature = "getblocktemplate-rpcs")]
ReadResponse::ValidBlockProposal => Ok(Response::ValidBlockProposal),

View File

@ -1383,6 +1383,7 @@ impl Service<ReadRequest> for ReadStateService {
.wait_for_panics()
}
#[cfg(feature = "indexer")]
ReadRequest::SpendingTransactionId(spend) => {
let state = self.clone();

View File

@ -28,6 +28,8 @@ use crate::{
pub(crate) mod add_subtrees;
pub(crate) mod cache_genesis_roots;
pub(crate) mod fix_tree_key_type;
#[cfg(feature = "indexer")]
pub(crate) mod track_tx_locs_by_spends;
/// The kind of database format change or validity check we're performing.
@ -348,9 +350,9 @@ impl DbFormatChange {
// Indexing transaction locations by their spent outpoints and revealed nullifiers.
let timer = CodeTimer::start();
warn!("started checking indexes for spending tx ids");
info!("started checking/adding indexes for spending tx ids");
track_tx_locs_by_spends::run(initial_tip_height, db, cancel_receiver)?;
warn!("finished checking indexes for spending tx ids");
info!("finished checking/adding indexes for spending tx ids");
timer.finish(module_path!(), line!(), "indexing spending transaction ids");
};

View File

@ -31,7 +31,7 @@ use zebra_chain::{
};
use crate::{
request::{FinalizedBlock, Spend},
request::FinalizedBlock,
service::finalized_state::{
disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk},
disk_format::{
@ -43,6 +43,9 @@ use crate::{
BoxError, HashOrHeight,
};
#[cfg(feature = "indexer")]
use crate::request::Spend;
#[cfg(test)]
mod tests;
@ -263,6 +266,7 @@ impl ZebraDb {
/// Returns the [`transaction::Hash`] of the transaction that spent or revealed the given
/// [`transparent::OutPoint`] or nullifier, if it is spent or revealed in the finalized state.
#[cfg(feature = "indexer")]
pub fn spending_transaction_hash(&self, spend: &Spend) -> Option<transaction::Hash> {
let tx_loc = match spend {
Spend::OutPoint(outpoint) => self.spending_tx_loc(outpoint)?,
@ -385,6 +389,9 @@ impl ZebraDb {
.iter()
.map(|(outpoint, _output_loc, utxo)| (*outpoint, utxo.clone()))
.collect();
// TODO: Add `OutputLocation`s to the values in `spent_utxos_by_outpoint` to avoid creating a second hashmap with the same keys
#[cfg(feature = "indexer")]
let out_loc_by_outpoint: HashMap<transparent::OutPoint, OutputLocation> = spent_utxos
.iter()
.map(|(outpoint, out_loc, _utxo)| (*outpoint, *out_loc))
@ -426,6 +433,7 @@ impl ZebraDb {
new_outputs_by_out_loc,
spent_utxos_by_outpoint,
spent_utxos_by_out_loc,
#[cfg(feature = "indexer")]
out_loc_by_outpoint,
address_balances,
self.finalized_value_pool(),
@ -483,7 +491,10 @@ impl DiskWriteBatch {
new_outputs_by_out_loc: BTreeMap<OutputLocation, transparent::Utxo>,
spent_utxos_by_outpoint: HashMap<transparent::OutPoint, transparent::Utxo>,
spent_utxos_by_out_loc: BTreeMap<OutputLocation, transparent::Utxo>,
out_loc_by_outpoint: HashMap<transparent::OutPoint, OutputLocation>,
#[cfg(feature = "indexer")] out_loc_by_outpoint: HashMap<
transparent::OutPoint,
OutputLocation,
>,
address_balances: HashMap<transparent::Address, AddressBalanceLocation>,
value_pool: ValueBalance<NonNegative>,
prev_note_commitment_trees: Option<NoteCommitmentTrees>,
@ -521,6 +532,7 @@ impl DiskWriteBatch {
&new_outputs_by_out_loc,
&spent_utxos_by_outpoint,
&spent_utxos_by_out_loc,
#[cfg(feature = "indexer")]
&out_loc_by_outpoint,
address_balances,
)?;

View File

@ -479,11 +479,17 @@ impl DiskWriteBatch {
let FinalizedBlock { block, height, .. } = finalized;
// Index each transaction's shielded data
#[cfg(feature = "indexer")]
for (tx_index, transaction) in block.transactions.iter().enumerate() {
let tx_loc = TransactionLocation::from_usize(*height, tx_index);
self.prepare_nullifier_batch(zebra_db, transaction, tx_loc)?;
}
#[cfg(not(feature = "indexer"))]
for transaction in &block.transactions {
self.prepare_nullifier_batch(zebra_db, transaction)?;
}
Ok(())
}
@ -498,22 +504,27 @@ impl DiskWriteBatch {
&mut self,
zebra_db: &ZebraDb,
transaction: &Transaction,
transaction_location: TransactionLocation,
#[cfg(feature = "indexer")] transaction_location: TransactionLocation,
) -> Result<(), BoxError> {
let db = &zebra_db.db;
let sprout_nullifiers = db.cf_handle("sprout_nullifiers").unwrap();
let sapling_nullifiers = db.cf_handle("sapling_nullifiers").unwrap();
let orchard_nullifiers = db.cf_handle("orchard_nullifiers").unwrap();
#[cfg(feature = "indexer")]
let insert_value = transaction_location;
#[cfg(not(feature = "indexer"))]
let insert_value = ();
// Mark sprout, sapling and orchard nullifiers as spent
for sprout_nullifier in transaction.sprout_nullifiers() {
self.zs_insert(&sprout_nullifiers, sprout_nullifier, transaction_location);
self.zs_insert(&sprout_nullifiers, sprout_nullifier, insert_value);
}
for sapling_nullifier in transaction.sapling_nullifiers() {
self.zs_insert(&sapling_nullifiers, sapling_nullifier, transaction_location);
self.zs_insert(&sapling_nullifiers, sapling_nullifier, insert_value);
}
for orchard_nullifier in transaction.orchard_nullifiers() {
self.zs_insert(&orchard_nullifiers, orchard_nullifier, transaction_location);
self.zs_insert(&orchard_nullifiers, orchard_nullifier, insert_value);
}
Ok(())

View File

@ -390,7 +390,10 @@ impl DiskWriteBatch {
new_outputs_by_out_loc: &BTreeMap<OutputLocation, transparent::Utxo>,
spent_utxos_by_outpoint: &HashMap<transparent::OutPoint, transparent::Utxo>,
spent_utxos_by_out_loc: &BTreeMap<OutputLocation, transparent::Utxo>,
out_loc_by_outpoint: &HashMap<transparent::OutPoint, OutputLocation>,
#[cfg(feature = "indexer")] out_loc_by_outpoint: &HashMap<
transparent::OutPoint,
OutputLocation,
>,
mut address_balances: HashMap<transparent::Address, AddressBalanceLocation>,
) -> Result<(), BoxError> {
let db = &zebra_db.db;
@ -420,6 +423,7 @@ impl DiskWriteBatch {
spending_tx_location,
transaction,
spent_utxos_by_outpoint,
#[cfg(feature = "indexer")]
out_loc_by_outpoint,
&address_balances,
)?;
@ -584,7 +588,10 @@ impl DiskWriteBatch {
spending_tx_location: TransactionLocation,
transaction: &Transaction,
spent_utxos_by_outpoint: &HashMap<transparent::OutPoint, transparent::Utxo>,
out_loc_by_outpoint: &HashMap<transparent::OutPoint, OutputLocation>,
#[cfg(feature = "indexer")] out_loc_by_outpoint: &HashMap<
transparent::OutPoint,
OutputLocation,
>,
address_balances: &HashMap<transparent::Address, AddressBalanceLocation>,
) -> Result<(), BoxError> {
let db = &zebra_db.db;
@ -617,14 +624,17 @@ impl DiskWriteBatch {
self.zs_insert(&tx_loc_by_transparent_addr_loc, address_transaction, ());
}
let spent_output_location = out_loc_by_outpoint
.get(&spent_outpoint)
.expect("spent outpoints must already have output locations");
#[cfg(feature = "indexer")]
{
let spent_output_location = out_loc_by_outpoint
.get(&spent_outpoint)
.expect("spent outpoints must already have output locations");
let _ = zebra_db
.tx_loc_by_spent_output_loc_cf()
.with_batch_for_writing(self)
.zs_insert(spent_output_location, &spending_tx_location);
let _ = zebra_db
.tx_loc_by_spent_output_loc_cf()
.with_batch_for_writing(self)
.zs_insert(spent_output_location, &spending_tx_location);
}
}
Ok(())

View File

@ -29,12 +29,13 @@ use zebra_chain::{
};
use crate::{
request::{Spend, Treestate},
service::check,
ContextuallyVerifiedBlock, HashOrHeight, OutputLocation, TransactionLocation,
ValidateContextError,
request::Treestate, service::check, ContextuallyVerifiedBlock, HashOrHeight, OutputLocation,
TransactionLocation, ValidateContextError,
};
#[cfg(feature = "indexer")]
use crate::request::Spend;
use self::index::TransparentTransfers;
pub mod index;
@ -1260,6 +1261,7 @@ impl Chain {
/// Returns the [`Hash`](transaction::Hash) of the transaction that spent an output at
/// the provided [`transparent::OutPoint`] or revealed the provided nullifier, if it exists
/// and is spent or revealed by this chain.
#[cfg(feature = "indexer")]
pub fn spending_transaction_hash(&self, spend: &Spend) -> Option<transaction::Hash> {
match spend {
Spend::OutPoint(outpoint) => self.spent_utxos.get(outpoint),

View File

@ -31,9 +31,12 @@ pub use address::{
utxo::{address_utxos, AddressUtxos},
};
pub use block::{
any_utxo, block, block_header, mined_transaction, spending_transaction_hash,
transaction_hashes_for_block, unspent_utxo,
any_utxo, block, block_header, mined_transaction, transaction_hashes_for_block, unspent_utxo,
};
#[cfg(feature = "indexer")]
pub use block::spending_transaction_hash;
pub use find::{
best_tip, block_locator, depth, finalized_state_contains_block_hash, find_chain_hashes,
find_chain_headers, hash_by_height, height_by_hash, next_median_time_past,

View File

@ -21,7 +21,6 @@ use zebra_chain::{
};
use crate::{
request::Spend,
response::MinedTx,
service::{
finalized_state::ZebraDb,
@ -31,6 +30,9 @@ use crate::{
HashOrHeight,
};
#[cfg(feature = "indexer")]
use crate::request::Spend;
/// Returns the [`Block`] with [`block::Hash`] or
/// [`Height`], if it exists in the non-finalized `chain` or finalized `db`.
pub fn block<C>(chain: Option<C>, db: &ZebraDb, hash_or_height: HashOrHeight) -> Option<Arc<Block>>
@ -186,6 +188,7 @@ where
/// the provided [`transparent::OutPoint`] or revealed the provided nullifier, if it exists
/// and is spent or revealed in the non-finalized `chain` or finalized `db` and its
/// spending transaction hash has been indexed.
#[cfg(feature = "indexer")]
pub fn spending_transaction_hash<C>(
chain: Option<C>,
db: &ZebraDb,

View File

@ -48,6 +48,8 @@ features = [
"journald",
"prometheus",
"sentry",
"indexer",
"getblocktemplate-rpcs"
]
[features]
@ -59,8 +61,8 @@ default-release-binaries = ["default", "sentry"]
# Production features that activate extra dependencies, or extra features in dependencies
# Indexer RPC support
indexer-rpcs = ["zebra-rpc/indexer-rpcs"]
# Indexer support
indexer = ["zebra-rpc/indexer-rpcs", "zebra-state/indexer"]
# Mining RPC support
getblocktemplate-rpcs = [

View File

@ -260,7 +260,7 @@ impl StartCmd {
// TODO: Add a shutdown signal and start the server with `serve_with_incoming_shutdown()` if
// any related unit tests sometimes crash with memory errors
#[cfg(feature = "indexer-rpcs")]
#[cfg(feature = "indexer")]
let indexer_rpc_task_handle =
if let Some(indexer_listen_addr) = config.rpc.indexer_listen_addr {
info!("spawning indexer RPC server");
@ -278,7 +278,7 @@ impl StartCmd {
tokio::spawn(std::future::pending().in_current_span())
};
#[cfg(not(feature = "indexer-rpcs"))]
#[cfg(not(feature = "indexer"))]
// Spawn a dummy indexer rpc task which doesn't do anything and never finishes.
let indexer_rpc_task_handle: tokio::task::JoinHandle<Result<(), tower::BoxError>> =
tokio::spawn(std::future::pending().in_current_span());

View File

@ -125,7 +125,7 @@
//! Example of how to run the has_spending_transaction_ids test:
//!
//! ```console
//! RUST_LOG=info ZEBRA_CACHED_STATE_DIR=/path/to/zebra/state cargo test has_spending_transaction_ids --release -- --ignored --nocapture
//! RUST_LOG=info ZEBRA_CACHED_STATE_DIR=/path/to/zebra/state cargo test has_spending_transaction_ids --features "indexer" --release -- --ignored --nocapture
//! ```
//!
//! Please refer to the documentation of each test for more information.
@ -153,7 +153,6 @@ use std::{
collections::HashSet,
env, fs, panic,
path::PathBuf,
sync::Arc,
time::{Duration, Instant},
};
@ -172,9 +171,7 @@ use zebra_chain::{
use zebra_consensus::ParameterCheckpoint;
use zebra_node_services::rpc_client::RpcRequestClient;
use zebra_rpc::server::OPENED_RPC_ENDPOINT_MSG;
use zebra_state::{
constants::LOCK_FILE_ERROR, state_database_format_version_in_code, SemanticallyVerifiedBlock,
};
use zebra_state::{constants::LOCK_FILE_ERROR, state_database_format_version_in_code};
#[cfg(not(target_os = "windows"))]
use zebra_network::constants::PORT_IN_USE_ERROR;
@ -191,7 +188,6 @@ use zebra_test::net::random_known_port;
mod common;
use common::{
cached_state::future_blocks,
check::{is_zebrad_version, EphemeralCheck, EphemeralConfig},
config::{
config_file_full_path, configs_dir, default_test_config, external_address_test_config,
@ -3553,10 +3549,16 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> {
/// spent outpoint and revealed nullifier in the last 100 blocks of a cached state.
#[tokio::test]
#[ignore]
#[cfg(feature = "indexer")]
async fn has_spending_transaction_ids() -> Result<()> {
use std::sync::Arc;
use tower::Service;
use zebra_chain::{chain_tip::ChainTip, transparent::Input};
use zebra_state::{ReadRequest, ReadResponse, Request, Response, Spend};
use zebra_state::{
ReadRequest, ReadResponse, Request, Response, SemanticallyVerifiedBlock, Spend,
};
use common::cached_state::future_blocks;
let _init_guard = zebra_test::init();
let test_type = UpdateZebraCachedStateWithRpc;