Merge branch 'feature/log-database-size' of https://github.com/elijahhampton/zebra into feature/log-database-size

This commit is contained in:
Elijah Hampton 2024-03-19 23:48:50 -04:00
commit c438db59e5
53 changed files with 2391 additions and 391 deletions

View File

@ -1,36 +0,0 @@
# This workflow manages the automatic addition of new issues to specific GitHub projects.
#
# 1. Newly opened issues are added to the "Zebra Backlog" Github project.
# 2. They are also added to the "ZF Engineering Backlog" Github project.
#
# The action makes use of the `add-to-project` action and requires a Github token
# (currently sourced from secrets) to authenticate and perform the addition.
name: Add new issues to GitHub projects
on:
issues:
types:
- opened
jobs:
# Automatically add issues and PRs to the "Zebra Backlog" Github project.
add-issue-to-zebra-backlog-project:
name: Adds all new issues to the "Zebra Backlog" Github project
runs-on: ubuntu-latest
steps:
- uses: actions/add-to-project@v0.6.0
with:
project-url: https://github.com/orgs/ZcashFoundation/projects/9
# TODO: use a PAT from a `bot` account we create for the organization
github-token: ${{ secrets.ACTIONS_PM_GITHUB_PROJECTS }}
# Automatically add issues and PRs to the "Engineering Backlog" Github project.
add-issue-to-zf-backlog-project:
name: Adds all new issues to the "ZF Engineering Backlog" Github project
runs-on: ubuntu-latest
steps:
- uses: actions/add-to-project@v0.6.0
with:
project-url: https://github.com/orgs/ZcashFoundation/projects/13
# TODO: use a PAT from a `bot` account we create for the organization
github-token: ${{ secrets.ACTIONS_PM_GITHUB_PROJECTS }}

View File

@ -5,6 +5,20 @@ All notable changes to Zebra are documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org).
## Unreleased
### Added
- `zebra_chain::Network` methods:
- `b58_pubkey_address_prefix`, `b58_script_address_prefix`, `num_funding_streams`
### Changed
- Functions that take a `zebra_chain::Network` as an argument have been moved to be methods of `Network`, including
- `zebra_chain::parameters`:
- `genesis::genesis_hash`, `NetworkUpgrade::activation_list`, `NetworkUpgrade::is_max_block_time_enforced`,
- `zebra_chain::work::difficulty::ExpandedDifficulty::target_difficutly_limit`
- `zebra_consensus::height_for_first_halving`
- `zebra_consensus::checkpoint::CheckpointList::new` (now `Network::checkpoint_list`)
## [Zebra 1.6.0](https://github.com/ZcashFoundation/zebra/releases/tag/v1.6.0) - 2024-02-23
This release exposes the shielded scanning functionality through an initial

View File

@ -239,7 +239,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.50",
"syn 2.0.52",
]
[[package]]
@ -250,7 +250,7 @@ checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.50",
"syn 2.0.52",
]
[[package]]
@ -409,7 +409,7 @@ dependencies = [
"regex",
"rustc-hash",
"shlex",
"syn 2.0.50",
"syn 2.0.52",
"which",
]
@ -805,7 +805,7 @@ dependencies = [
"heck 0.4.1",
"proc-macro2",
"quote",
"syn 2.0.50",
"syn 2.0.52",
]
[[package]]
@ -1066,7 +1066,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.50",
"syn 2.0.52",
]
[[package]]
@ -1090,7 +1090,7 @@ dependencies = [
"codespan-reporting",
"proc-macro2",
"quote",
"syn 2.0.50",
"syn 2.0.52",
]
[[package]]
@ -1107,7 +1107,7 @@ checksum = "2fa16a70dd58129e4dfffdff535fb1bce66673f7bbeec4a5a1765a504e1ccd84"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.50",
"syn 2.0.52",
]
[[package]]
@ -1155,7 +1155,7 @@ dependencies = [
"proc-macro2",
"quote",
"strsim 0.10.0",
"syn 2.0.50",
"syn 2.0.52",
]
[[package]]
@ -1177,7 +1177,7 @@ checksum = "c5a91391accf613803c2a9bf9abccdbaa07c54b4244a5b64883f9c3c137c86be"
dependencies = [
"darling_core 0.20.6",
"quote",
"syn 2.0.50",
"syn 2.0.52",
]
[[package]]
@ -1250,7 +1250,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.50",
"syn 2.0.52",
]
[[package]]
@ -1546,7 +1546,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.50",
"syn 2.0.52",
]
[[package]]
@ -2500,7 +2500,7 @@ checksum = "38b4faf00617defe497754acde3024865bc143d44a86799b24e191ecff91354f"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.50",
"syn 2.0.52",
]
[[package]]
@ -2541,9 +2541,9 @@ dependencies = [
[[package]]
name = "mio"
version = "0.8.10"
version = "0.8.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09"
checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c"
dependencies = [
"libc",
"wasi 0.11.0+wasi-snapshot-preview1",
@ -2935,7 +2935,7 @@ dependencies = [
"pest_meta",
"proc-macro2",
"quote",
"syn 2.0.50",
"syn 2.0.52",
]
[[package]]
@ -2976,7 +2976,7 @@ checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.50",
"syn 2.0.52",
]
[[package]]
@ -3077,7 +3077,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5"
dependencies = [
"proc-macro2",
"syn 2.0.50",
"syn 2.0.52",
]
[[package]]
@ -3200,7 +3200,7 @@ dependencies = [
"prost",
"prost-types",
"regex",
"syn 2.0.50",
"syn 2.0.52",
"tempfile",
"which",
]
@ -3215,7 +3215,7 @@ dependencies = [
"itertools 0.11.0",
"proc-macro2",
"quote",
"syn 2.0.50",
"syn 2.0.52",
]
[[package]]
@ -3923,7 +3923,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.50",
"syn 2.0.52",
]
[[package]]
@ -4008,7 +4008,20 @@ dependencies = [
"darling 0.20.6",
"proc-macro2",
"quote",
"syn 2.0.50",
"syn 2.0.52",
]
[[package]]
name = "serde_yaml"
version = "0.9.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8fd075d994154d4a774f95b51fb96bdc2832b0ea48425c92546073816cda1f2f"
dependencies = [
"indexmap 2.2.3",
"itoa",
"ryu",
"serde",
"unsafe-libyaml",
]
[[package]]
@ -4224,9 +4237,9 @@ dependencies = [
[[package]]
name = "syn"
version = "2.0.50"
version = "2.0.52"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74f1bdc9872430ce9b75da68329d1c1746faf50ffac5f19e02b71e37ff881ffb"
checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07"
dependencies = [
"proc-macro2",
"quote",
@ -4325,7 +4338,7 @@ checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.50",
"syn 2.0.52",
]
[[package]]
@ -4448,7 +4461,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.50",
"syn 2.0.52",
]
[[package]]
@ -4632,7 +4645,7 @@ dependencies = [
"proc-macro2",
"prost-build",
"quote",
"syn 2.0.50",
"syn 2.0.52",
]
[[package]]
@ -4645,7 +4658,7 @@ dependencies = [
"proc-macro2",
"prost-build",
"quote",
"syn 2.0.50",
"syn 2.0.52",
]
[[package]]
@ -4775,7 +4788,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.50",
"syn 2.0.52",
]
[[package]]
@ -4996,6 +5009,12 @@ dependencies = [
"subtle",
]
[[package]]
name = "unsafe-libyaml"
version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b"
[[package]]
name = "untrusted"
version = "0.7.1"
@ -5207,7 +5226,7 @@ dependencies = [
"once_cell",
"proc-macro2",
"quote",
"syn 2.0.50",
"syn 2.0.52",
"wasm-bindgen-shared",
]
@ -5241,7 +5260,7 @@ checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.50",
"syn 2.0.52",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
@ -6029,10 +6048,14 @@ dependencies = [
"hex",
"itertools 0.12.1",
"jsonrpc",
"quote",
"regex",
"reqwest",
"serde",
"serde_json",
"serde_yaml",
"structopt",
"syn 2.0.52",
"thiserror",
"tinyvec",
"tokio",
@ -6133,7 +6156,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.50",
"syn 2.0.52",
]
[[package]]
@ -6153,5 +6176,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.50",
"syn 2.0.52",
]

View File

@ -20,6 +20,7 @@
- [Shielded Scanning gRPC Server](user/shielded-scan-grpc-server.md)
- [Kibana blockchain explorer](user/elasticsearch.md)
- [Forking the Zcash Testnet with Zebra](user/fork-zebra-testnet.md)
[OpenAPI specification](user/openapi.md)
- [Troubleshooting](user/troubleshooting.md)
- [Developer Documentation](dev.md)
- [Contribution Guide](CONTRIBUTING.md)

47
book/src/user/openapi.md Normal file
View File

@ -0,0 +1,47 @@
# Zebra OpenAPI specification
The Zebra RPC methods are a collection of endpoints used for interacting with the Zcash blockchain. These methods are utilized by wallets, block explorers, web and mobile applications, and more, for retrieving and sending information to the blockchain.
While the Zebra source code and RPC methods are well-documented, accessing this information typically involves searching for each function within the [Zebra crate documentation](https://docs.rs/zebrad/latest/zebrad/#zebra-crates), which may be inconvenient for users who are not familiar with Rust development.
To address this issue, the Zebra team has created an [OpenAPI](https://www.openapis.org/) specification in the [YAML](https://en.wikipedia.org/wiki/YAML) format.
The Zebra OpenAPI specification is stored in a file named openapi.yaml, located at the root of the project. The latest version of this specification will always be available [here](https://github.com/ZcashFoundation/zebra/blob/main/openapi.yaml).
## Usage
There are several ways to utilize the specification. For users unfamiliar with OpenAPI and Swagger, simply navigate to the [Swagger Editor](https://editor.swagger.io/) and paste the specification there.
![image info](openapi1.png)
To send and receive data directly from/to the blockchain within the Swagger web app, you'll need a Zebra node with the RPC endpoint enabled.
To enable this functionality, start zebrad with a custom configuration. Generate a default configuration by running the following command:
```console
mkdir -p ~/.config
zebrad generate -o ~/.config/zebrad.toml
```
Then, add the IP address and port to the `rpc` section of the configuration:
```
[rpc]
listen_addr = "127.0.0.1:8232"
```
If you modify the address and port in the Zebra configuration, ensure to update it in the `openapi.yaml` specification as well.
Start Zebra with the following command:
```console
zebrad
```
You should now be able to send requests and receive responses within Swagger.
![image info](openapi2.png)
![image info](openapi3.png)

BIN
book/src/user/openapi1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 190 KiB

BIN
book/src/user/openapi2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 178 KiB

BIN
book/src/user/openapi3.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 174 KiB

1007
openapi.yaml Normal file

File diff suppressed because it is too large Load Diff

View File

@ -31,7 +31,6 @@ async-error = [
# Mining RPC support
getblocktemplate-rpcs = [
"zcash_address",
]
# Experimental shielded scanning support
@ -132,8 +131,7 @@ serde_json = { version = "1.0.113", optional = true }
# Production feature async-error and testing feature proptest-impl
tokio = { version = "1.36.0", optional = true }
# Production feature getblocktemplate-rpcs
zcash_address = { version = "0.3.1", optional = true }
zcash_address = { version = "0.3.1" }
# Experimental feature shielded-scan
zcash_client_backend = { version = "0.10.0-rc.1", optional = true }

View File

@ -11,6 +11,7 @@
//! A root of a note commitment tree is associated with each treestate.
use std::{
default::Default,
fmt,
hash::{Hash, Hasher},
io,
@ -710,7 +711,7 @@ impl From<Vec<pallas::Base>> for NoteCommitmentTree {
/// It is likely that the dense format will be used in future RPCs, in which
/// case the current implementation will have to change and use the format
/// compatible with [`Frontier`](bridgetree::Frontier) instead.
#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)]
#[derive(Clone, Debug, Default, Eq, PartialEq, serde::Serialize)]
pub struct SerializedTree(Vec<u8>);
impl From<&NoteCommitmentTree> for SerializedTree {

View File

@ -1,21 +1,7 @@
//! Genesis consensus parameters for each Zcash network.
use crate::{block, parameters::Network};
/// The previous block hash for the genesis block.
///
/// All known networks use the Bitcoin `null` value for the parent of the
/// genesis block. (In Bitcoin, `null` is `[0; 32]`.)
pub const GENESIS_PREVIOUS_BLOCK_HASH: block::Hash = block::Hash([0; 32]);
/// Returns the hash for the genesis block in `network`.
pub fn genesis_hash(network: Network) -> block::Hash {
match network {
// zcash-cli getblockhash 0
Network::Mainnet => "00040fe8ec8471911baa1db1266ea15dd06b4a8a5c453883c000b031973dce08",
// zcash-cli -testnet getblockhash 0
Network::Testnet => "05a60a92d99d85997cce3b87616c089f6124d7342af37106edc76126334a2c38",
}
.parse()
.expect("hard-coded hash parses")
}
pub const GENESIS_PREVIOUS_BLOCK_HASH: crate::block::Hash = crate::block::Hash([0; 32]);

View File

@ -5,7 +5,7 @@ use std::{fmt, str::FromStr};
use thiserror::Error;
use crate::{
block::{Height, HeightDiff},
block::{self, Height, HeightDiff},
parameters::NetworkUpgrade::Canopy,
};
@ -63,6 +63,35 @@ pub enum Network {
Testnet,
}
use zcash_primitives::consensus::{Network as ZcashPrimitivesNetwork, Parameters as _};
impl Network {
/// Returns the human-readable prefix for Base58Check-encoded transparent
/// pay-to-public-key-hash payment addresses for the network.
pub fn b58_pubkey_address_prefix(&self) -> [u8; 2] {
<ZcashPrimitivesNetwork>::from(*self).b58_pubkey_address_prefix()
}
/// Returns the human-readable prefix for Base58Check-encoded transparent pay-to-script-hash
/// payment addresses for the network.
pub fn b58_script_address_prefix(&self) -> [u8; 2] {
<ZcashPrimitivesNetwork>::from(*self).b58_script_address_prefix()
}
/// Returns true if the maximum block time rule is active for `network` and `height`.
///
/// Always returns true if `network` is the Mainnet.
/// If `network` is the Testnet, the `height` should be at least
/// TESTNET_MAX_TIME_START_HEIGHT to return true.
/// Returns false otherwise.
///
/// Part of the consensus rules at <https://zips.z.cash/protocol/protocol.pdf#blockheader>
pub fn is_max_block_time_enforced(self, height: block::Height) -> bool {
match self {
Network::Mainnet => true,
Network::Testnet => height >= super::TESTNET_MAX_TIME_START_HEIGHT,
}
}
}
impl From<Network> for &'static str {
fn from(network: Network) -> &'static str {
match network {

View File

@ -1,7 +1,10 @@
use proptest::prelude::*;
use super::super::{Network, ZIP_212_GRACE_PERIOD_DURATION};
use crate::parameters::NetworkUpgrade;
use crate::{
block::Height,
parameters::{NetworkUpgrade, TESTNET_MAX_TIME_START_HEIGHT},
};
proptest! {
/// Check that the mandatory checkpoint is after the ZIP-212 grace period.
@ -23,4 +26,14 @@ proptest! {
assert!(network.mandatory_checkpoint_height() >= grace_period_end_height);
}
#[test]
/// Asserts that the activation height is correct for the block
/// maximum time rule on Testnet is correct.
fn max_block_times_correct_enforcement(height in any::<Height>()) {
let _init_guard = zebra_test::init();
assert!(Network::Mainnet.is_max_block_time_enforced(height));
assert_eq!(Network::Testnet.is_max_block_time_enforced(height), TESTNET_MAX_TIME_START_HEIGHT <= height);
}
}

View File

@ -128,7 +128,7 @@ const FAKE_TESTNET_ACTIVATION_HEIGHTS: &[(block::Height, NetworkUpgrade)] = &[
/// The Consensus Branch Id, used to bind transactions and blocks to a
/// particular network upgrade.
#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)]
#[derive(Copy, Clone, Debug, Default, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct ConsensusBranchId(u32);
impl ConsensusBranchId {
@ -229,7 +229,7 @@ const TESTNET_MINIMUM_DIFFICULTY_START_HEIGHT: block::Height = block::Height(299
/// <https://zips.z.cash/protocol/protocol.pdf#blockheader>
pub const TESTNET_MAX_TIME_START_HEIGHT: block::Height = block::Height(653_606);
impl NetworkUpgrade {
impl Network {
/// Returns a map between activation heights and network upgrades for `network`,
/// in ascending height order.
///
@ -241,7 +241,7 @@ impl NetworkUpgrade {
/// When the environment variable TEST_FAKE_ACTIVATION_HEIGHTS is set
/// and it's a test build, this returns a list of fake activation heights
/// used by some tests.
pub fn activation_list(network: Network) -> BTreeMap<block::Height, NetworkUpgrade> {
pub fn activation_list(&self) -> BTreeMap<block::Height, NetworkUpgrade> {
let (mainnet_heights, testnet_heights) = {
#[cfg(not(feature = "zebra-test"))]
{
@ -269,7 +269,7 @@ impl NetworkUpgrade {
(MAINNET_ACTIVATION_HEIGHTS, TESTNET_ACTIVATION_HEIGHTS)
}
};
match network {
match self {
Mainnet => mainnet_heights,
Testnet => testnet_heights,
}
@ -277,10 +277,12 @@ impl NetworkUpgrade {
.cloned()
.collect()
}
}
impl NetworkUpgrade {
/// Returns the current network upgrade for `network` and `height`.
pub fn current(network: Network, height: block::Height) -> NetworkUpgrade {
NetworkUpgrade::activation_list(network)
network
.activation_list()
.range(..=height)
.map(|(_, nu)| *nu)
.next_back()
@ -292,7 +294,8 @@ impl NetworkUpgrade {
/// Returns None if the next upgrade has not been implemented in Zebra
/// yet.
pub fn next(network: Network, height: block::Height) -> Option<NetworkUpgrade> {
NetworkUpgrade::activation_list(network)
network
.activation_list()
.range((Excluded(height), Unbounded))
.map(|(_, nu)| *nu)
.next()
@ -303,7 +306,8 @@ impl NetworkUpgrade {
/// Returns None if this network upgrade is a future upgrade, and its
/// activation height has not been set yet.
pub fn activation_height(&self, network: Network) -> Option<block::Height> {
NetworkUpgrade::activation_list(network)
network
.activation_list()
.iter()
.filter(|(_, nu)| nu == &self)
.map(|(height, _)| *height)
@ -316,7 +320,7 @@ impl NetworkUpgrade {
/// Use [`NetworkUpgrade::activation_height`] to get the specific network
/// upgrade.
pub fn is_activation_height(network: Network, height: block::Height) -> bool {
NetworkUpgrade::activation_list(network).contains_key(&height)
network.activation_list().contains_key(&height)
}
/// Returns an unordered mapping between NetworkUpgrades and their ConsensusBranchIds.
@ -445,20 +449,6 @@ impl NetworkUpgrade {
NetworkUpgrade::current(network, height).averaging_window_timespan()
}
/// Returns true if the maximum block time rule is active for `network` and `height`.
///
/// Always returns true if `network` is the Mainnet.
/// If `network` is the Testnet, the `height` should be at least
/// TESTNET_MAX_TIME_START_HEIGHT to return true.
/// Returns false otherwise.
///
/// Part of the consensus rules at <https://zips.z.cash/protocol/protocol.pdf#blockheader>
pub fn is_max_block_time_enforced(network: Network, height: block::Height) -> bool {
match network {
Network::Mainnet => true,
Network::Testnet => height >= TESTNET_MAX_TIME_START_HEIGHT,
}
}
/// Returns the NetworkUpgrade given an u32 as ConsensusBranchId
pub fn from_branch_id(branch_id: u32) -> Option<NetworkUpgrade> {
CONSENSUS_BRANCH_IDS

View File

@ -14,14 +14,14 @@ use NetworkUpgrade::*;
fn activation_bijective() {
let _init_guard = zebra_test::init();
let mainnet_activations = NetworkUpgrade::activation_list(Mainnet);
let mainnet_activations = Mainnet.activation_list();
let mainnet_heights: HashSet<&block::Height> = mainnet_activations.keys().collect();
assert_eq!(MAINNET_ACTIVATION_HEIGHTS.len(), mainnet_heights.len());
let mainnet_nus: HashSet<&NetworkUpgrade> = mainnet_activations.values().collect();
assert_eq!(MAINNET_ACTIVATION_HEIGHTS.len(), mainnet_nus.len());
let testnet_activations = NetworkUpgrade::activation_list(Testnet);
let testnet_activations = Testnet.activation_list();
let testnet_heights: HashSet<&block::Height> = testnet_activations.keys().collect();
assert_eq!(TESTNET_ACTIVATION_HEIGHTS.len(), testnet_heights.len());
@ -46,7 +46,7 @@ fn activation_extremes_testnet() {
fn activation_extremes(network: Network) {
// The first three upgrades are Genesis, BeforeOverwinter, and Overwinter
assert_eq!(
NetworkUpgrade::activation_list(network).get(&block::Height(0)),
network.activation_list().get(&block::Height(0)),
Some(&Genesis)
);
assert_eq!(Genesis.activation_height(network), Some(block::Height(0)));
@ -62,7 +62,7 @@ fn activation_extremes(network: Network) {
);
assert_eq!(
NetworkUpgrade::activation_list(network).get(&block::Height(1)),
network.activation_list().get(&block::Height(1)),
Some(&BeforeOverwinter)
);
assert_eq!(
@ -91,7 +91,7 @@ fn activation_extremes(network: Network) {
// We assume that the last upgrade we know about continues forever
// (even if we suspect that won't be true)
assert_ne!(
NetworkUpgrade::activation_list(network).get(&block::Height::MAX),
network.activation_list().get(&block::Height::MAX),
Some(&Genesis)
);
assert!(!NetworkUpgrade::is_activation_height(
@ -121,7 +121,7 @@ fn activation_consistent_testnet() {
/// Check that the `activation_height`, `is_activation_height`,
/// `current`, and `next` functions are consistent for `network`.
fn activation_consistent(network: Network) {
let activation_list = NetworkUpgrade::activation_list(network);
let activation_list = network.activation_list();
let network_upgrades: HashSet<&NetworkUpgrade> = activation_list.values().collect();
for &network_upgrade in network_upgrades {

View File

@ -25,24 +25,12 @@ pub fn decrypts_successfully(transaction: &Transaction, network: Network, height
if let Some(bundle) = alt_tx.sapling_bundle() {
for output in bundle.shielded_outputs().iter() {
let recovery = match network {
Network::Mainnet => {
zcash_primitives::sapling::note_encryption::try_sapling_output_recovery(
&zcash_primitives::consensus::MAIN_NETWORK,
alt_height,
&null_sapling_ovk,
output,
)
}
Network::Testnet => {
zcash_primitives::sapling::note_encryption::try_sapling_output_recovery(
&zcash_primitives::consensus::TEST_NETWORK,
alt_height,
&null_sapling_ovk,
output,
)
}
};
let recovery = zcash_primitives::sapling::note_encryption::try_sapling_output_recovery(
&<zcash_primitives::consensus::Network>::from(network),
alt_height,
&null_sapling_ovk,
output,
);
if recovery.is_none() {
return false;
}

View File

@ -11,6 +11,7 @@
//! A root of a note commitment tree is associated with each treestate.
use std::{
default::Default,
fmt,
hash::{Hash, Hasher},
io,
@ -692,7 +693,7 @@ impl From<Vec<jubjub::Fq>> for NoteCommitmentTree {
/// It is likely that the dense format will be used in future RPCs, in which
/// case the current implementation will have to change and use the format
/// compatible with [`Frontier`] instead.
#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)]
#[derive(Clone, Debug, Default, Eq, PartialEq, serde::Serialize)]
pub struct SerializedTree(Vec<u8>);
impl From<&NoteCommitmentTree> for SerializedTree {

View File

@ -15,20 +15,6 @@ use crate::{
#[cfg(test)]
use proptest::prelude::*;
/// Magic numbers used to identify what networks Transparent Addresses
/// are associated with.
mod magics {
pub mod p2sh {
pub const MAINNET: [u8; 2] = [0x1C, 0xBD];
pub const TESTNET: [u8; 2] = [0x1C, 0xBA];
}
pub mod p2pkh {
pub const MAINNET: [u8; 2] = [0x1C, 0xB8];
pub const TESTNET: [u8; 2] = [0x1D, 0x25];
}
}
/// Transparent Zcash Addresses
///
/// In Bitcoin a single byte is used for the version field identifying
@ -118,24 +104,14 @@ impl ZcashSerialize for Address {
network,
script_hash,
} => {
// Dev network doesn't have a recommendation so we
// default to testnet bytes if it's not mainnet.
match *network {
Network::Mainnet => writer.write_all(&magics::p2sh::MAINNET[..])?,
_ => writer.write_all(&magics::p2sh::TESTNET[..])?,
}
writer.write_all(&network.b58_script_address_prefix())?;
writer.write_all(script_hash)?
}
Address::PayToPublicKeyHash {
network,
pub_key_hash,
} => {
// Dev network doesn't have a recommendation so we
// default to testnet bytes if it's not mainnet.
match *network {
Network::Mainnet => writer.write_all(&magics::p2pkh::MAINNET[..])?,
_ => writer.write_all(&magics::p2pkh::TESTNET[..])?,
}
writer.write_all(&network.b58_pubkey_address_prefix())?;
writer.write_all(pub_key_hash)?
}
}
@ -153,22 +129,30 @@ impl ZcashDeserialize for Address {
reader.read_exact(&mut hash_bytes)?;
match version_bytes {
magics::p2sh::MAINNET => Ok(Address::PayToScriptHash {
network: Network::Mainnet,
script_hash: hash_bytes,
}),
magics::p2sh::TESTNET => Ok(Address::PayToScriptHash {
network: Network::Testnet,
script_hash: hash_bytes,
}),
magics::p2pkh::MAINNET => Ok(Address::PayToPublicKeyHash {
network: Network::Mainnet,
pub_key_hash: hash_bytes,
}),
magics::p2pkh::TESTNET => Ok(Address::PayToPublicKeyHash {
network: Network::Testnet,
pub_key_hash: hash_bytes,
}),
zcash_primitives::constants::mainnet::B58_SCRIPT_ADDRESS_PREFIX => {
Ok(Address::PayToScriptHash {
network: Network::Mainnet,
script_hash: hash_bytes,
})
}
zcash_primitives::constants::testnet::B58_SCRIPT_ADDRESS_PREFIX => {
Ok(Address::PayToScriptHash {
network: Network::Testnet,
script_hash: hash_bytes,
})
}
zcash_primitives::constants::mainnet::B58_PUBKEY_ADDRESS_PREFIX => {
Ok(Address::PayToPublicKeyHash {
network: Network::Mainnet,
pub_key_hash: hash_bytes,
})
}
zcash_primitives::constants::testnet::B58_PUBKEY_ADDRESS_PREFIX => {
Ok(Address::PayToPublicKeyHash {
network: Network::Testnet,
pub_key_hash: hash_bytes,
})
}
_ => Err(SerializationError::Parse("bad t-addr version/type")),
}
}

View File

@ -384,32 +384,6 @@ impl ExpandedDifficulty {
U256::from_little_endian(&hash.0).into()
}
/// Returns the easiest target difficulty allowed on `network`.
///
/// # Consensus
///
/// See `PoWLimit` in the Zcash specification:
/// <https://zips.z.cash/protocol/protocol.pdf#constants>
pub fn target_difficulty_limit(network: Network) -> ExpandedDifficulty {
let limit: U256 = match network {
/* 2^243 - 1 */
Network::Mainnet => (U256::one() << 243) - 1,
/* 2^251 - 1 */
Network::Testnet => (U256::one() << 251) - 1,
};
// `zcashd` converts the PoWLimit into a compact representation before
// using it to perform difficulty filter checks.
//
// The Zcash specification converts to compact for the default difficulty
// filter, but not for testnet minimum difficulty blocks. (ZIP 205 and
// ZIP 208 don't specify this conversion either.) See #1277 for details.
ExpandedDifficulty(limit)
.to_compact()
.to_expanded()
.expect("difficulty limits are valid expanded values")
}
/// Calculate the CompactDifficulty for an expanded difficulty.
///
/// # Consensus
@ -681,7 +655,8 @@ impl PartialCumulativeWork {
pub fn difficulty_multiplier_for_display(&self, network: Network) -> f64 {
// This calculation is similar to the `getdifficulty` RPC, see that code for details.
let pow_limit = ExpandedDifficulty::target_difficulty_limit(network)
let pow_limit = network
.target_difficulty_limit()
.to_compact()
.to_work()
.expect("target difficult limit is valid work");
@ -705,6 +680,41 @@ impl PartialCumulativeWork {
}
}
/// Network methods related to Difficulty
pub trait ParameterDifficulty {
/// Returns the easiest target difficulty allowed on `network`.
///
/// # Consensus
///
/// See `PoWLimit` in the Zcash specification:
/// <https://zips.z.cash/protocol/protocol.pdf#constants>
fn target_difficulty_limit(&self) -> ExpandedDifficulty;
}
impl ParameterDifficulty for Network {
/// Returns the easiest target difficulty allowed on `network`.
/// See [`ParameterDifficulty::target_difficulty_limit`]
fn target_difficulty_limit(&self) -> ExpandedDifficulty {
let limit: U256 = match self {
/* 2^243 - 1 */
Network::Mainnet => (U256::one() << 243) - 1,
/* 2^251 - 1 */
Network::Testnet => (U256::one() << 251) - 1,
};
// `zcashd` converts the PoWLimit into a compact representation before
// using it to perform difficulty filter checks.
//
// The Zcash specification converts to compact for the default difficulty
// filter, but not for testnet minimum difficulty blocks. (ZIP 205 and
// ZIP 208 don't specify this conversion either.) See #1277 for details.
ExpandedDifficulty(limit)
.to_compact()
.to_expanded()
.expect("difficulty limits are valid expanded values")
}
}
impl From<Work> for PartialCumulativeWork {
fn from(work: Work) -> Self {
PartialCumulativeWork(work.0)

View File

@ -309,9 +309,9 @@ fn block_difficulty_for_network(network: Network) -> Result<(), Report> {
/// SPANDOC: Check the PoWLimit for block {?height, ?network, ?threshold, ?hash}
{
// the consensus rule
assert!(threshold <= ExpandedDifficulty::target_difficulty_limit(network));
assert!(threshold <= network.target_difficulty_limit());
// check that ordering is transitive, we checked `hash <= threshold` above
assert!(hash <= ExpandedDifficulty::target_difficulty_limit(network));
assert!(hash <= network.target_difficulty_limit());
}
/// SPANDOC: Check compact round-trip for block {?height, ?network}
@ -376,7 +376,7 @@ fn genesis_block_difficulty_for_network(network: Network) -> Result<(), Report>
{
assert_eq!(
threshold,
ExpandedDifficulty::target_difficulty_limit(network),
network.target_difficulty_limit(),
"genesis block difficulty thresholds must be equal to the PoWLimit"
);
}
@ -477,12 +477,12 @@ fn check_testnet_minimum_difficulty_block(height: block::Height) -> Result<(), R
/// SPANDOC: Check that the testnet minimum difficulty is the PoWLimit {?height, ?threshold, ?hash}
{
assert_eq!(threshold, ExpandedDifficulty::target_difficulty_limit(Network::Testnet),
assert_eq!(threshold, Network::Testnet.target_difficulty_limit(),
"testnet minimum difficulty thresholds should be equal to the PoWLimit. Hint: Blocks with large gaps are allowed to have the minimum difficulty, but it's not required.");
// all blocks pass the minimum difficulty threshold, even if they aren't minimum
// difficulty blocks, because it's the lowest permitted difficulty
assert!(
hash <= ExpandedDifficulty::target_difficulty_limit(Network::Testnet),
hash <= Network::Testnet.target_difficulty_limit(),
"testnet minimum difficulty hashes must be less than the PoWLimit"
);
}

View File

@ -9,7 +9,10 @@ use zebra_chain::{
block::{Block, Hash, Header, Height},
parameters::{Network, NetworkUpgrade},
transaction,
work::{difficulty::ExpandedDifficulty, equihash},
work::{
difficulty::{ExpandedDifficulty, ParameterDifficulty as _},
equihash,
},
};
use crate::{error::*, parameters::SLOW_START_INTERVAL};
@ -78,13 +81,13 @@ pub fn difficulty_threshold_is_valid(
// The PowLimit check is part of `Threshold()` in the spec, but it doesn't
// actually depend on any previous blocks.
if difficulty_threshold > ExpandedDifficulty::target_difficulty_limit(network) {
if difficulty_threshold > network.target_difficulty_limit() {
Err(BlockError::TargetDifficultyLimit(
*height,
*hash,
difficulty_threshold,
network,
ExpandedDifficulty::target_difficulty_limit(network),
network.target_difficulty_limit(),
))?;
}

View File

@ -48,22 +48,6 @@ pub fn funding_stream_values(
Ok(results)
}
/// Returns the minimum height after the first halving
/// as described in [protocol specification §7.10][7.10]
///
/// [7.10]: https://zips.z.cash/protocol/protocol.pdf#fundingstreams
pub fn height_for_first_halving(network: Network) -> Height {
// First halving on Mainnet is at Canopy
// while in Testnet is at block constant height of `1_116_000`
// https://zips.z.cash/protocol/protocol.pdf#zip214fundingstreams
match network {
Network::Mainnet => Canopy
.activation_height(network)
.expect("canopy activation height should be available"),
Network::Testnet => FIRST_HALVING_TESTNET,
}
}
/// Returns the address change period
/// as described in [protocol specification §7.10][7.10]
///
@ -78,7 +62,7 @@ fn funding_stream_address_period(height: Height, network: Network) -> u32 {
// <https://doc.rust-lang.org/stable/reference/expressions/operator-expr.html#arithmetic-and-logical-binary-operators>
// This is the same as `floor()`, because these numbers are all positive.
let height_after_first_halving = height - height_for_first_halving(network);
let height_after_first_halving = height - network.height_for_first_halving();
let address_period = (height_after_first_halving + POST_BLOSSOM_HALVING_INTERVAL)
/ FUNDING_STREAM_ADDRESS_CHANGE_INTERVAL;
@ -93,10 +77,7 @@ fn funding_stream_address_period(height: Height, network: Network) -> u32 {
///
/// [7.10]: https://zips.z.cash/protocol/protocol.pdf#fundingstreams
fn funding_stream_address_index(height: Height, network: Network) -> usize {
let num_addresses = match network {
Network::Mainnet => FUNDING_STREAMS_NUM_ADDRESSES_MAINNET,
Network::Testnet => FUNDING_STREAMS_NUM_ADDRESSES_TESTNET,
};
let num_addresses = network.num_funding_streams();
let index = 1u32
.checked_add(funding_stream_address_period(height, network))

View File

@ -131,11 +131,7 @@ mod test {
fn halving_for_network(network: Network) -> Result<(), Report> {
let blossom_height = Blossom.activation_height(network).unwrap();
let first_halving_height = match network {
Network::Mainnet => Canopy.activation_height(network).unwrap(),
// Based on "7.8 Calculation of Block Subsidy and Founders' Reward"
Network::Testnet => Height(1_116_000),
};
let first_halving_height = network.height_for_first_halving();
assert_eq!(
1,
@ -261,11 +257,7 @@ mod test {
fn block_subsidy_for_network(network: Network) -> Result<(), Report> {
let blossom_height = Blossom.activation_height(network).unwrap();
let first_halving_height = match network {
Network::Mainnet => Canopy.activation_height(network).unwrap(),
// Based on "7.8 Calculation of Block Subsidy and Founders' Reward"
Network::Testnet => Height(1_116_000),
};
let first_halving_height = network.height_for_first_halving();
// After slow-start mining and before Blossom the block subsidy is 12.5 ZEC
// https://z.cash/support/faq/#what-is-slow-start-mining

View File

@ -19,7 +19,7 @@ use zebra_chain::{
parameters::{Network, NetworkUpgrade},
serialization::{ZcashDeserialize, ZcashDeserializeInto},
transaction::{arbitrary::transaction_to_fake_v5, LockTime, Transaction},
work::difficulty::{ExpandedDifficulty, INVALID_COMPACT_DIFFICULTY},
work::difficulty::{ParameterDifficulty as _, INVALID_COMPACT_DIFFICULTY},
};
use zebra_script::CachedFfiTransaction;
use zebra_test::transcript::{ExpectedTranscriptError, Transcript};
@ -241,7 +241,7 @@ fn difficulty_validation_failure() -> Result<(), Report> {
hash,
difficulty_threshold,
Network::Mainnet,
ExpandedDifficulty::target_difficulty_limit(Network::Mainnet),
Network::Mainnet.target_difficulty_limit(),
);
assert_eq!(expected, result);

View File

@ -42,7 +42,7 @@ use crate::{
TargetHeight::{self, *},
},
error::BlockError,
BoxError,
BoxError, ParameterCheckpoint as _,
};
pub(crate) mod list;
@ -207,7 +207,7 @@ where
initial_tip: Option<(block::Height, block::Hash)>,
state_service: S,
) -> Self {
let checkpoint_list = CheckpointList::new(network);
let checkpoint_list = network.checkpoint_list();
let max_height = checkpoint_list.max_height();
tracing::info!(
?max_height,

View File

@ -17,7 +17,7 @@ use std::{
};
use zebra_chain::block;
use zebra_chain::parameters::{genesis_hash, Network};
use zebra_chain::parameters::Network;
/// The hard-coded checkpoints for mainnet, generated using the
/// `zebra-checkpoints` tool.
@ -43,6 +43,47 @@ const MAINNET_CHECKPOINTS: &str = include_str!("main-checkpoints.txt");
/// information.
const TESTNET_CHECKPOINTS: &str = include_str!("test-checkpoints.txt");
/// Network methods related to checkpoints
pub trait ParameterCheckpoint {
/// Returns the hash for the genesis block in `network`.
fn genesis_hash(&self) -> zebra_chain::block::Hash;
/// Returns the hard-coded checkpoint list for `network`.
fn checkpoint_list(&self) -> CheckpointList;
}
impl ParameterCheckpoint for Network {
fn genesis_hash(&self) -> zebra_chain::block::Hash {
match self {
// zcash-cli getblockhash 0
Network::Mainnet => "00040fe8ec8471911baa1db1266ea15dd06b4a8a5c453883c000b031973dce08",
// zcash-cli -testnet getblockhash 0
Network::Testnet => "05a60a92d99d85997cce3b87616c089f6124d7342af37106edc76126334a2c38",
}
.parse()
.expect("hard-coded hash parses")
}
fn checkpoint_list(&self) -> CheckpointList {
// parse calls CheckpointList::from_list
let checkpoint_list: CheckpointList = match self {
Network::Mainnet => MAINNET_CHECKPOINTS
.parse()
.expect("Hard-coded Mainnet checkpoint list parses and validates"),
Network::Testnet => TESTNET_CHECKPOINTS
.parse()
.expect("Hard-coded Testnet checkpoint list parses and validates"),
};
match checkpoint_list.hash(block::Height(0)) {
Some(hash) if hash == self.genesis_hash() => checkpoint_list,
Some(_) => {
panic!("The hard-coded genesis checkpoint does not match the network genesis hash")
}
None => unreachable!("Parser should have checked for a missing genesis checkpoint"),
}
}
}
/// A list of block height and hash checkpoints.
///
/// Checkpoints should be chosen to avoid forks or chain reorganizations,
@ -81,27 +122,6 @@ impl FromStr for CheckpointList {
}
impl CheckpointList {
/// Returns the hard-coded checkpoint list for `network`.
pub fn new(network: Network) -> Self {
// parse calls CheckpointList::from_list
let checkpoint_list: CheckpointList = match network {
Network::Mainnet => MAINNET_CHECKPOINTS
.parse()
.expect("Hard-coded Mainnet checkpoint list parses and validates"),
Network::Testnet => TESTNET_CHECKPOINTS
.parse()
.expect("Hard-coded Testnet checkpoint list parses and validates"),
};
match checkpoint_list.hash(block::Height(0)) {
Some(hash) if hash == genesis_hash(network) => checkpoint_list,
Some(_) => {
panic!("The hard-coded genesis checkpoint does not match the network genesis hash")
}
None => unreachable!("Parser should have checked for a missing genesis checkpoint"),
}
}
/// Create a new checkpoint list for `network` from `checkpoint_list`.
///
/// Assumes that the provided genesis checkpoint is correct.
@ -123,8 +143,8 @@ impl CheckpointList {
// Check that the list starts with the correct genesis block
match checkpoints.iter().next() {
Some((block::Height(0), hash))
if (hash == &genesis_hash(Network::Mainnet)
|| hash == &genesis_hash(Network::Testnet)) => {}
if (hash == &Network::Mainnet.genesis_hash()
|| hash == &Network::Testnet.genesis_hash()) => {}
Some((block::Height(0), _)) => {
Err("the genesis checkpoint does not match the Mainnet or Testnet genesis hash")?
}

View File

@ -235,8 +235,8 @@ fn checkpoint_list_load_hard_coded() -> Result<(), BoxError> {
.parse()
.expect("hard-coded Testnet checkpoint list should parse");
let _ = CheckpointList::new(Mainnet);
let _ = CheckpointList::new(Testnet);
let _ = Mainnet.checkpoint_list();
let _ = Testnet.checkpoint_list();
Ok(())
}
@ -257,7 +257,7 @@ fn checkpoint_list_hard_coded_mandatory(network: Network) -> Result<(), BoxError
let mandatory_checkpoint = network.mandatory_checkpoint_height();
let list = CheckpointList::new(network);
let list = network.checkpoint_list();
assert!(
list.max_height() >= mandatory_checkpoint,
@ -292,7 +292,7 @@ fn checkpoint_list_hard_coded_max_gap(network: Network) -> Result<(), BoxError>
HeightDiff::try_from(div_ceil(MAX_CHECKPOINT_BYTE_COUNT, MAX_BLOCK_BYTES))
.expect("constant fits in HeightDiff");
let list = CheckpointList::new(network);
let list = network.checkpoint_list();
let mut heights = list.0.keys();
// Check that we start at the genesis height

View File

@ -49,18 +49,19 @@ pub use block::{
subsidy::{
funding_streams::{
funding_stream_address, funding_stream_recipient_info, funding_stream_values,
height_for_first_halving, new_coinbase_script,
new_coinbase_script,
},
general::miner_subsidy,
},
Request, VerifyBlockError, MAX_BLOCK_SIGOPS,
};
pub use checkpoint::{
CheckpointList, VerifyCheckpointError, MAX_CHECKPOINT_BYTE_COUNT, MAX_CHECKPOINT_HEIGHT_GAP,
list::ParameterCheckpoint, CheckpointList, VerifyCheckpointError, MAX_CHECKPOINT_BYTE_COUNT,
MAX_CHECKPOINT_HEIGHT_GAP,
};
pub use config::Config;
pub use error::BlockError;
pub use parameters::FundingStreamReceiver;
pub use parameters::{FundingStreamReceiver, ParameterSubsidy};
pub use primitives::{ed25519, groth16, halo2, redjubjub, redpallas};
pub use router::RouterError;

View File

@ -7,7 +7,7 @@ use lazy_static::lazy_static;
use zebra_chain::{
amount::COIN,
block::{Height, HeightDiff},
parameters::Network,
parameters::{Network, NetworkUpgrade},
};
/// An initial period from Genesis to this Height where the block subsidy is gradually incremented. [What is slow-start mining][slow-mining]
@ -198,6 +198,38 @@ pub const FUNDING_STREAM_ECC_ADDRESSES_MAINNET: [&str; FUNDING_STREAMS_NUM_ADDRE
"t3XHAGxRP2FNfhAjxGjxbrQPYtQQjc3RCQD",
];
/// Functionality specific to block subsidy-related consensus rules
pub trait ParameterSubsidy {
/// Number of addresses for each funding stream in the Network.
/// [7.10]: <https://zips.z.cash/protocol/protocol.pdf#fundingstreams>
fn num_funding_streams(&self) -> usize;
/// Returns the minimum height after the first halving
/// as described in [protocol specification §7.10][7.10]
///
/// [7.10]: <https://zips.z.cash/protocol/protocol.pdf#fundingstreams>
fn height_for_first_halving(&self) -> Height;
}
/// Network methods related to Block Subsidy and Funding Streams
impl ParameterSubsidy for Network {
fn num_funding_streams(&self) -> usize {
match self {
Network::Mainnet => FUNDING_STREAMS_NUM_ADDRESSES_MAINNET,
Network::Testnet => FUNDING_STREAMS_NUM_ADDRESSES_TESTNET,
}
}
fn height_for_first_halving(&self) -> Height {
// First halving on Mainnet is at Canopy
// while in Testnet is at block constant height of `1_116_000`
// <https://zips.z.cash/protocol/protocol.pdf#zip214fundingstreams>
match self {
Network::Mainnet => NetworkUpgrade::Canopy
.activation_height(*self)
.expect("canopy activation height should be available"),
Network::Testnet => FIRST_HALVING_TESTNET,
}
}
}
/// List of addresses for the Zcash Foundation funding stream in the Mainnet.
pub const FUNDING_STREAM_ZF_ADDRESSES_MAINNET: [&str; FUNDING_STREAMS_NUM_ADDRESSES_MAINNET] =
["t3dvVE3SQEi7kqNzwrfNePxZ1d4hUyztBA1"; FUNDING_STREAMS_NUM_ADDRESSES_MAINNET];

View File

@ -36,7 +36,7 @@ use crate::{
block::{Request, SemanticBlockVerifier, VerifyBlockError},
checkpoint::{CheckpointList, CheckpointVerifier, VerifyCheckpointError},
error::TransactionError,
transaction, BoxError, Config,
transaction, BoxError, Config, ParameterCheckpoint as _,
};
#[cfg(test)]
@ -263,7 +263,7 @@ where
// > activation block hashes given in § 3.12 Mainnet and Testnet on p. 20.
//
// <https://zips.z.cash/protocol/protocol.pdf#blockchain>
let full_checkpoints = CheckpointList::new(network);
let full_checkpoints = network.checkpoint_list();
let mut already_warned = false;
for (height, checkpoint_hash) in full_checkpoints.iter() {
@ -363,7 +363,7 @@ where
pub fn init_checkpoint_list(config: Config, network: Network) -> (CheckpointList, Height) {
// TODO: Zebra parses the checkpoint list three times at startup.
// Instead, cache the checkpoint list for each `network`.
let list = CheckpointList::new(network);
let list = network.checkpoint_list();
let max_checkpoint_height = if config.checkpoint_sync {
list.max_height()

View File

@ -5,7 +5,7 @@ use std::{collections::BTreeMap, net::SocketAddr, pin::Pin};
use futures_util::future::TryFutureExt;
use tokio_stream::{wrappers::ReceiverStream, Stream};
use tonic::{transport::Server, Request, Response, Status};
use tower::ServiceExt;
use tower::{timeout::error::Elapsed, ServiceExt};
use zebra_chain::{block::Height, transaction};
use zebra_node_services::scan_service::{
@ -70,34 +70,57 @@ where
.into_iter()
.map(|KeyWithHeight { key, height }| (key, height))
.collect();
let register_keys_response_fut = self
.scan_service
.clone()
.oneshot(ScanServiceRequest::RegisterKeys(keys.clone()));
.ready()
.await
.map_err(|_| Status::unknown("service poll_ready() method returned an error"))?
.call(ScanServiceRequest::RegisterKeys(keys.clone()));
let keys: Vec<_> = keys.into_iter().map(|(key, _start_at)| key).collect();
let subscribe_results_response_fut =
self.scan_service
.clone()
.oneshot(ScanServiceRequest::SubscribeResults(
keys.iter().cloned().collect(),
));
let subscribe_results_response_fut = self
.scan_service
.clone()
.ready()
.await
.map_err(|_| Status::unknown("service poll_ready() method returned an error"))?
.call(ScanServiceRequest::SubscribeResults(
keys.iter().cloned().collect(),
));
let (register_keys_response, subscribe_results_response) =
tokio::join!(register_keys_response_fut, subscribe_results_response_fut);
let ScanServiceResponse::RegisteredKeys(_) = register_keys_response
.map_err(|err| Status::unknown(format!("scan service returned error: {err}")))?
else {
return Err(Status::unknown(
"scan service returned an unexpected response",
));
// Ignores errors from the register keys request, we expect there to be a timeout if the keys
// are already registered, or an empty response if no new keys could be parsed as Sapling efvks.
//
// This method will still return an error if every key in the `scan` request is invalid, since
// the SubscribeResults request will return an error once the `rsp_tx` is dropped in `ScanTask::process_messages`
// when it finds that none of the keys in the request are registered.
let register_keys_err = match register_keys_response {
Ok(ScanServiceResponse::RegisteredKeys(_)) => None,
Ok(response) => {
return Err(Status::internal(format!(
"unexpected response from scan service: {response:?}"
)))
}
Err(err) if err.downcast_ref::<Elapsed>().is_some() => {
return Err(Status::deadline_exceeded(
"scan service requests timed out, is Zebra synced past Sapling activation height?")
)
}
Err(err) => Some(err),
};
let ScanServiceResponse::SubscribeResults(mut results_receiver) =
subscribe_results_response
.map_err(|err| Status::unknown(format!("scan service returned error: {err}")))?
subscribe_results_response.map_err(|err| {
register_keys_err
.map(|err| Status::invalid_argument(err.to_string()))
.unwrap_or(Status::internal(err.to_string()))
})?
else {
return Err(Status::unknown(
"scan service returned an unexpected response",
@ -179,7 +202,7 @@ where
.ready()
.and_then(|service| service.call(ScanServiceRequest::Info))
.await
.map_err(|_| Status::unknown("scan service was unavailable"))?
.map_err(|err| Status::unknown(format!("scan service returned error: {err}")))?
else {
return Err(Status::unknown(
"scan service returned an unexpected response",
@ -217,20 +240,30 @@ where
return Err(Status::invalid_argument(msg));
}
let ScanServiceResponse::RegisteredKeys(keys) = self
match self
.scan_service
.clone()
.ready()
.and_then(|service| service.call(ScanServiceRequest::RegisterKeys(keys)))
.await
.map_err(|_| Status::unknown("scan service was unavailable"))?
else {
return Err(Status::unknown(
"scan service returned an unexpected response",
));
};
{
Ok(ScanServiceResponse::RegisteredKeys(keys)) => {
Ok(Response::new(RegisterKeysResponse { keys }))
}
Ok(Response::new(RegisterKeysResponse { keys }))
Ok(response) => {
return Err(Status::internal(format!(
"unexpected response from scan service: {response:?}"
)))
}
Err(err) if err.downcast_ref::<Elapsed>().is_some() => Err(Status::deadline_exceeded(
"RegisterKeys scan service request timed out, \
is Zebra synced past Sapling activation height?",
)),
Err(err) => Err(Status::unknown(err.to_string())),
}
}
async fn clear_results(

View File

@ -163,7 +163,7 @@ impl Encoder<Message> for Codec {
let start_len = dst.len();
{
let dst = &mut dst.writer();
dst.write_all(&Magic::from(self.builder.network).0[..])?;
dst.write_all(&self.builder.network.magic_value().0[..])?;
dst.write_all(command)?;
dst.write_u32::<LittleEndian>(body_length as u32)?;
@ -389,7 +389,7 @@ impl Decoder for Codec {
"read header from src buffer"
);
if magic != Magic::from(self.builder.network) {
if magic != self.builder.network.magic_value() {
return Err(Parse("supplied magic did not meet expectations"));
}
if body_len > self.builder.max_len {

View File

@ -23,11 +23,13 @@ impl fmt::Debug for Magic {
f.debug_tuple("Magic").field(&hex::encode(self.0)).finish()
}
}
impl From<Network> for Magic {
pub(crate) trait ParameterMagic {
fn magic_value(&self) -> Magic;
}
impl ParameterMagic for Network {
/// Get the magic value associated to this `Network`.
fn from(network: Network) -> Self {
match network {
fn magic_value(&self) -> Magic {
match self {
Network::Mainnet => magics::MAINNET,
Network::Testnet => magics::TESTNET,
}

View File

@ -13,9 +13,6 @@ pub enum Request {
/// Requests general info about the scanner
Info,
/// TODO: Accept `KeyHash`es and return key hashes that are registered
CheckKeyHashes(Vec<()>),
/// Submits viewing keys with their optional birth-heights for scanning.
RegisterKeys(Vec<(String, Option<u32>)>),

View File

@ -6,7 +6,7 @@
//! Some parts of the `zcashd` RPC documentation are outdated.
//! So this implementation follows the `zcashd` server and `lightwalletd` client implementations.
use std::{collections::HashSet, fmt::Debug, sync::Arc};
use std::{collections::HashSet, default::Default, fmt::Debug, sync::Arc};
use chrono::Utc;
use futures::{FutureExt, TryFutureExt};
@ -53,9 +53,12 @@ mod tests;
#[rpc(server)]
/// RPC method signatures.
pub trait Rpc {
#[rpc(name = "getinfo")]
/// Returns software information from the RPC server, as a [`GetInfo`] JSON struct.
///
/// zcashd reference: [`getinfo`](https://zcash.github.io/rpc/getinfo.html)
/// method: post
/// tags: control
///
/// # Notes
///
@ -65,12 +68,13 @@ pub trait Rpc {
///
/// Some fields from the zcashd reference are missing from Zebra's [`GetInfo`]. It only contains the fields
/// [required for lightwalletd support.](https://github.com/zcash/lightwalletd/blob/v0.4.9/common/common.go#L91-L95)
#[rpc(name = "getinfo")]
fn get_info(&self) -> Result<GetInfo>;
/// Returns blockchain state information, as a [`GetBlockChainInfo`] JSON struct.
///
/// zcashd reference: [`getblockchaininfo`](https://zcash.github.io/rpc/getblockchaininfo.html)
/// method: post
/// tags: blockchain
///
/// # Notes
///
@ -82,11 +86,13 @@ pub trait Rpc {
/// Returns the total balance of a provided `addresses` in an [`AddressBalance`] instance.
///
/// zcashd reference: [`getaddressbalance`](https://zcash.github.io/rpc/getaddressbalance.html)
/// method: post
/// tags: address
///
/// # Parameters
///
/// - `address_strings`: (map) A JSON map with a single entry
/// - `addresses`: (array of strings) A list of base-58 encoded addresses.
/// - `address_strings`: (object, example={"addresses": ["tmYXBYJj1K7vhejSec5osXK2QsGa5MTisUQ"]}) A JSON map with a single entry
/// - `addresses`: (array of strings) A list of base-58 encoded addresses.
///
/// # Notes
///
@ -109,10 +115,12 @@ pub trait Rpc {
/// Returns the [`SentTransactionHash`] for the transaction, as a JSON string.
///
/// zcashd reference: [`sendrawtransaction`](https://zcash.github.io/rpc/sendrawtransaction.html)
/// method: post
/// tags: transaction
///
/// # Parameters
///
/// - `raw_transaction_hex`: (string, required) The hex-encoded raw transaction bytes.
/// - `raw_transaction_hex`: (string, required, example="signedhex") The hex-encoded raw transaction bytes.
///
/// # Notes
///
@ -129,12 +137,13 @@ pub trait Rpc {
/// [error code `-8`.](https://github.com/zcash/zcash/issues/5758)
///
/// zcashd reference: [`getblock`](https://zcash.github.io/rpc/getblock.html)
/// method: post
/// tags: blockchain
///
/// # Parameters
///
/// - `hash | height`: (string, required) The hash or height for the block to be returned.
/// - `verbosity`: (numeric, optional, default=1) 0 for hex encoded data, 1 for a json object,
/// and 2 for json object with transaction data.
/// - `hash_or_height`: (string, required, example="1") The hash or height for the block to be returned.
/// - `verbosity`: (number, optional, default=1, example=1) 0 for hex encoded data, 1 for a json object, and 2 for json object with transaction data.
///
/// # Notes
///
@ -154,22 +163,28 @@ pub trait Rpc {
/// Returns the hash of the current best blockchain tip block, as a [`GetBlockHash`] JSON string.
///
/// zcashd reference: [`getbestblockhash`](https://zcash.github.io/rpc/getbestblockhash.html)
/// method: post
/// tags: blockchain
#[rpc(name = "getbestblockhash")]
fn get_best_block_hash(&self) -> Result<GetBlockHash>;
/// Returns all transaction ids in the memory pool, as a JSON array.
///
/// zcashd reference: [`getrawmempool`](https://zcash.github.io/rpc/getrawmempool.html)
/// method: post
/// tags: blockchain
#[rpc(name = "getrawmempool")]
fn get_raw_mempool(&self) -> BoxFuture<Result<Vec<String>>>;
/// Returns information about the given block's Sapling & Orchard tree state.
///
/// zcashd reference: [`z_gettreestate`](https://zcash.github.io/rpc/z_gettreestate.html)
/// method: post
/// tags: blockchain
///
/// # Parameters
///
/// - `hash | height`: (string, required) The block hash or height.
/// - `hash | height`: (string, required, example="00000000febc373a1da2bd9f887b105ad79ddc26ac26c2b28652d64e5207c5b5") The block hash or height.
///
/// # Notes
///
@ -182,14 +197,15 @@ pub trait Rpc {
/// Returns information about a range of Sapling or Orchard subtrees.
///
/// zcashd reference: [`z_getsubtreesbyindex`](https://zcash.github.io/rpc/z_getsubtreesbyindex.html)
/// zcashd reference: [`z_getsubtreesbyindex`](https://zcash.github.io/rpc/z_getsubtreesbyindex.html) - TODO: fix link
/// method: post
/// tags: blockchain
///
/// # Parameters
///
/// - `pool`: (string, required) The pool from which subtrees should be returned.
/// Either "sapling" or "orchard".
/// - `start_index`: (numeric, required) The index of the first 2^16-leaf subtree to return.
/// - `limit`: (numeric, optional) The maximum number of subtree values to return.
/// - `pool`: (string, required) The pool from which subtrees should be returned. Either "sapling" or "orchard".
/// - `start_index`: (number, required) The index of the first 2^16-leaf subtree to return.
/// - `limit`: (number, optional) The maximum number of subtree values to return.
///
/// # Notes
///
@ -208,11 +224,13 @@ pub trait Rpc {
/// Returns the raw transaction data, as a [`GetRawTransaction`] JSON string or structure.
///
/// zcashd reference: [`getrawtransaction`](https://zcash.github.io/rpc/getrawtransaction.html)
/// method: post
/// tags: transaction
///
/// # Parameters
///
/// - `txid`: (string, required) The transaction ID of the transaction to be returned.
/// - `verbose`: (numeric, optional, default=0) If 0, return a string of hex-encoded data, otherwise return a JSON object.
/// - `txid`: (string, required, example="mytxid") The transaction ID of the transaction to be returned.
/// - `verbose`: (number, optional, default=0, example=1) If 0, return a string of hex-encoded data, otherwise return a JSON object.
///
/// # Notes
///
@ -232,13 +250,15 @@ pub trait Rpc {
/// Returns the transaction ids made by the provided transparent addresses.
///
/// zcashd reference: [`getaddresstxids`](https://zcash.github.io/rpc/getaddresstxids.html)
/// method: post
/// tags: address
///
/// # Parameters
///
/// A [`GetAddressTxIdsRequest`] struct with the following named fields:
/// - `addresses`: (json array of string, required) The addresses to get transactions from.
/// - `start`: (numeric, required) The lower height to start looking for transactions (inclusive).
/// - `end`: (numeric, required) The top height to stop looking for transactions (inclusive).
/// - `request`: (object, required, example={\"addresses\": [\"tmYXBYJj1K7vhejSec5osXK2QsGa5MTisUQ\"], \"start\": 1000, \"end\": 2000}) A struct with the following named fields:
/// - `addresses`: (json array of string, required) The addresses to get transactions from.
/// - `start`: (numeric, required) The lower height to start looking for transactions (inclusive).
/// - `end`: (numeric, required) The top height to stop looking for transactions (inclusive).
///
/// # Notes
///
@ -251,10 +271,12 @@ pub trait Rpc {
/// Returns all unspent outputs for a list of addresses.
///
/// zcashd reference: [`getaddressutxos`](https://zcash.github.io/rpc/getaddressutxos.html)
/// method: post
/// tags: address
///
/// # Parameters
///
/// - `addresses`: (json array of string, required) The addresses to get outputs from.
/// - `addresses`: (array, required, example={\"addresses\": [\"tmYXBYJj1K7vhejSec5osXK2QsGa5MTisUQ\"]}) The addresses to get outputs from.
///
/// # Notes
///
@ -519,7 +541,7 @@ where
//
// Get the network upgrades in height order, like `zcashd`.
let mut upgrades = IndexMap::new();
for (activation_height, network_upgrade) in NetworkUpgrade::activation_list(network) {
for (activation_height, network_upgrade) in network.activation_list() {
// Zebra defines network upgrades based on incompatible consensus rule changes,
// but zcashd defines them based on ZIPs.
//
@ -1428,6 +1450,15 @@ pub struct GetInfo {
subversion: String,
}
impl Default for GetInfo {
fn default() -> Self {
GetInfo {
build: "some build version".to_string(),
subversion: "some subversion".to_string(),
}
}
}
/// Response to a `getblockchaininfo` RPC request.
///
/// See the notes for the [`Rpc::get_blockchain_info` method].
@ -1456,6 +1487,22 @@ pub struct GetBlockChainInfo {
consensus: TipConsensusBranch,
}
impl Default for GetBlockChainInfo {
fn default() -> Self {
GetBlockChainInfo {
chain: "main".to_string(),
blocks: Height(1),
best_block_hash: block::Hash([0; 32]),
estimated_height: Height(1),
upgrades: IndexMap::new(),
consensus: TipConsensusBranch {
chain_tip: ConsensusBranchIdHex(ConsensusBranchId::default()),
next_block: ConsensusBranchIdHex(ConsensusBranchId::default()),
},
}
}
}
/// A wrapper type with a list of transparent address strings.
///
/// This is used for the input parameter of [`Rpc::get_address_balance`],
@ -1590,6 +1637,18 @@ pub enum GetBlock {
},
}
impl Default for GetBlock {
fn default() -> Self {
GetBlock::Object {
hash: GetBlockHash::default(),
confirmations: 0,
height: None,
tx: Vec::new(),
trees: GetBlockTrees::default(),
}
}
}
/// Response to a `getbestblockhash` and `getblockhash` RPC request.
///
/// Contains the hex-encoded hash of the requested block.
@ -1599,6 +1658,12 @@ pub enum GetBlock {
#[serde(transparent)]
pub struct GetBlockHash(#[serde(with = "hex")] pub block::Hash);
impl Default for GetBlockHash {
fn default() -> Self {
GetBlockHash(block::Hash([0; 32]))
}
}
/// Response to a `z_gettreestate` RPC request.
///
/// Contains the hex-encoded Sapling & Orchard note commitment trees, and their
@ -1627,6 +1692,26 @@ pub struct GetTreestate {
orchard: Treestate<orchard::tree::SerializedTree>,
}
impl Default for GetTreestate {
fn default() -> Self {
GetTreestate {
hash: block::Hash([0; 32]),
height: Height(0),
time: 0,
sapling: Treestate {
commitments: Commitments {
final_state: sapling::tree::SerializedTree::default(),
},
},
orchard: Treestate {
commitments: Commitments {
final_state: orchard::tree::SerializedTree::default(),
},
},
}
}
}
/// A treestate that is included in the [`z_gettreestate`][1] RPC response.
///
/// [1]: https://zcash.github.io/rpc/z_gettreestate.html
@ -1758,6 +1843,15 @@ pub struct GetBlockTrees {
orchard: OrchardTrees,
}
impl Default for GetBlockTrees {
fn default() -> Self {
GetBlockTrees {
sapling: SaplingTrees { size: 0 },
orchard: OrchardTrees { size: 0 },
}
}
}
/// Sapling note commitment tree information.
#[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)]
pub struct SaplingTrees {

View File

@ -20,10 +20,10 @@ use zebra_chain::{
transparent::{
self, EXTRA_ZEBRA_COINBASE_DATA, MAX_COINBASE_DATA_LEN, MAX_COINBASE_HEIGHT_DATA_LEN,
},
work::difficulty::{ExpandedDifficulty, U256},
work::difficulty::{ParameterDifficulty as _, U256},
};
use zebra_consensus::{
funding_stream_address, funding_stream_values, height_for_first_halving, miner_subsidy,
funding_stream_address, funding_stream_values, miner_subsidy, ParameterSubsidy as _,
RouterError,
};
use zebra_network::AddressBookPeers;
@ -69,6 +69,8 @@ pub trait GetBlockTemplateRpc {
/// the number of blocks in this chain excluding the genesis block).
///
/// zcashd reference: [`getblockcount`](https://zcash.github.io/rpc/getblockcount.html)
/// method: post
/// tags: blockchain
///
/// # Notes
///
@ -80,10 +82,12 @@ pub trait GetBlockTemplateRpc {
/// to a block in the best chain.
///
/// zcashd reference: [`getblockhash`](https://zcash-rpc.github.io/getblockhash.html)
/// method: post
/// tags: blockchain
///
/// # Parameters
///
/// - `index`: (numeric, required) The block index.
/// - `index`: (numeric, required, example=1) The block index.
///
/// # Notes
///
@ -100,6 +104,8 @@ pub trait GetBlockTemplateRpc {
/// - `jsonrequestobject`: (string, optional) A JSON object containing arguments.
///
/// zcashd reference: [`getblocktemplate`](https://zcash-rpc.github.io/getblocktemplate.html)
/// method: post
/// tags: mining
///
/// # Notes
///
@ -124,11 +130,17 @@ pub trait GetBlockTemplateRpc {
/// Returns the [`submit_block::Response`] for the operation, as a JSON string.
///
/// zcashd reference: [`submitblock`](https://zcash.github.io/rpc/submitblock.html)
/// method: post
/// tags: mining
///
/// # Parameters
/// - `hexdata` (string, required)
/// - `jsonparametersobject` (string, optional) - currently ignored
/// - holds a single field, workid, that must be included in submissions if provided by the server.
///
/// - `hexdata`: (string, required)
/// - `jsonparametersobject`: (string, optional) - currently ignored
///
/// # Notes
///
/// - `jsonparametersobject` holds a single field, workid, that must be included in submissions if provided by the server.
#[rpc(name = "submitblock")]
fn submit_block(
&self,
@ -139,6 +151,8 @@ pub trait GetBlockTemplateRpc {
/// Returns mining-related information.
///
/// zcashd reference: [`getmininginfo`](https://zcash.github.io/rpc/getmininginfo.html)
/// method: post
/// tags: mining
#[rpc(name = "getmininginfo")]
fn get_mining_info(&self) -> BoxFuture<Result<get_mining_info::Response>>;
@ -150,6 +164,8 @@ pub trait GetBlockTemplateRpc {
/// If `height` is not supplied or is -1, uses the tip height.
///
/// zcashd reference: [`getnetworksolps`](https://zcash.github.io/rpc/getnetworksolps.html)
/// method: post
/// tags: mining
#[rpc(name = "getnetworksolps")]
fn get_network_sol_ps(
&self,
@ -164,6 +180,8 @@ pub trait GetBlockTemplateRpc {
/// See that method for details.
///
/// zcashd reference: [`getnetworkhashps`](https://zcash.github.io/rpc/getnetworkhashps.html)
/// method: post
/// tags: mining
#[rpc(name = "getnetworkhashps")]
fn get_network_hash_ps(
&self,
@ -176,6 +194,8 @@ pub trait GetBlockTemplateRpc {
/// Returns data about each connected network node.
///
/// zcashd reference: [`getpeerinfo`](https://zcash.github.io/rpc/getpeerinfo.html)
/// method: post
/// tags: network
#[rpc(name = "getpeerinfo")]
fn get_peer_info(&self) -> BoxFuture<Result<Vec<PeerInfo>>>;
@ -183,6 +203,16 @@ pub trait GetBlockTemplateRpc {
/// Returns information about the given address if valid.
///
/// zcashd reference: [`validateaddress`](https://zcash.github.io/rpc/validateaddress.html)
/// method: post
/// tags: util
///
/// # Parameters
///
/// - `address`: (string, required) The zcash address to validate.
///
/// # Notes
///
/// - No notes
#[rpc(name = "validateaddress")]
fn validate_address(&self, address: String) -> BoxFuture<Result<validate_address::Response>>;
@ -190,6 +220,16 @@ pub trait GetBlockTemplateRpc {
/// Returns information about the given address if valid.
///
/// zcashd reference: [`z_validateaddress`](https://zcash.github.io/rpc/z_validateaddress.html)
/// method: post
/// tags: util
///
/// # Parameters
///
/// - `address`: (string, required) The zcash address to validate.
///
/// # Notes
///
/// - No notes
#[rpc(name = "z_validateaddress")]
fn z_validate_address(
&self,
@ -199,22 +239,41 @@ pub trait GetBlockTemplateRpc {
/// Returns the block subsidy reward of the block at `height`, taking into account the mining slow start.
/// Returns an error if `height` is less than the height of the first halving for the current network.
///
/// `height` can be any valid current or future height.
/// If `height` is not supplied, uses the tip height.
///
/// zcashd reference: [`getblocksubsidy`](https://zcash.github.io/rpc/getblocksubsidy.html)
/// method: post
/// tags: mining
///
/// # Parameters
///
/// - `height`: (numeric, optional, example=1) Can be any valid current or future height.
///
/// # Notes
///
/// If `height` is not supplied, uses the tip height.
#[rpc(name = "getblocksubsidy")]
fn get_block_subsidy(&self, height: Option<u32>) -> BoxFuture<Result<BlockSubsidy>>;
/// Returns the proof-of-work difficulty as a multiple of the minimum difficulty.
///
/// zcashd reference: [`getdifficulty`](https://zcash.github.io/rpc/getdifficulty.html)
/// method: post
/// tags: blockchain
#[rpc(name = "getdifficulty")]
fn get_difficulty(&self) -> BoxFuture<Result<f64>>;
/// Returns the list of individual payment addresses given a unified address.
///
/// zcashd reference: [`z_listunifiedreceivers`](https://zcash.github.io/rpc/z_listunifiedreceivers.html)
/// method: post
/// tags: wallet
///
/// # Parameters
///
/// - `address`: (string, required) The zcash unified address to get the list from.
///
/// # Notes
///
/// - No notes
#[rpc(name = "z_listunifiedreceivers")]
fn z_list_unified_receivers(
&self,
@ -1098,7 +1157,7 @@ where
best_chain_tip_height(&latest_chain_tip)?
};
if height < height_for_first_halving(network) {
if height < network.height_for_first_halving() {
return Err(Error {
code: ErrorCode::ServerError(0),
message: "Zebra does not support founders' reward subsidies, \
@ -1197,7 +1256,7 @@ where
// using this calculation.)
// Get expanded difficulties (256 bits), these are the inverse of the work
let pow_limit: U256 = ExpandedDifficulty::target_difficulty_limit(network).into();
let pow_limit: U256 = network.target_difficulty_limit().into();
let difficulty: U256 = chain_info
.expected_difficulty
.to_expanded()

View File

@ -23,7 +23,7 @@ use zebra_chain::{
serialization::{DateTime32, ZcashDeserializeInto},
transaction::Transaction,
transparent,
work::difficulty::{CompactDifficulty, ExpandedDifficulty},
work::difficulty::{CompactDifficulty, ParameterDifficulty as _},
};
use zebra_network::{address_book_peers::MockAddressBookPeers, types::MetaAddr};
use zebra_node_services::mempool;
@ -115,7 +115,7 @@ pub async fn test_responses<State, ReadState>(
let fake_max_time = DateTime32::from(1654008728);
// Use a valid fractional difficulty for snapshots
let pow_limit = ExpandedDifficulty::target_difficulty_limit(network);
let pow_limit = network.target_difficulty_limit();
let fake_difficulty = pow_limit * 2 / 3;
let fake_difficulty = CompactDifficulty::from(fake_difficulty);

View File

@ -1658,7 +1658,7 @@ async fn rpc_getdifficulty() {
chain_sync_status::MockSyncStatus,
chain_tip::mock::MockChainTip,
serialization::DateTime32,
work::difficulty::{CompactDifficulty, ExpandedDifficulty, U256},
work::difficulty::{CompactDifficulty, ExpandedDifficulty, ParameterDifficulty as _, U256},
};
use zebra_network::address_book_peers::MockAddressBookPeers;
@ -1742,7 +1742,7 @@ async fn rpc_getdifficulty() {
assert_eq!(format!("{:.9}", get_difficulty.unwrap()), "0.000122072");
// Fake the ChainInfo response: difficulty limit - smallest valid difficulty
let pow_limit = ExpandedDifficulty::target_difficulty_limit(Mainnet);
let pow_limit = Mainnet.target_difficulty_limit();
let fake_difficulty = pow_limit.into();
let mut read_state2 = read_state.clone();
let mock_read_state_request_handler = async move {

View File

@ -1,6 +1,6 @@
//! Initializing the scanner and gRPC server.
use std::net::SocketAddr;
use std::{net::SocketAddr, time::Duration};
use color_eyre::Report;
use tokio::task::JoinHandle;
@ -12,6 +12,9 @@ use zebra_state::ChainTipChange;
use crate::{scan, service::ScanService, storage::Storage, Config};
/// The timeout applied to scan service calls
pub const SCAN_SERVICE_TIMEOUT: Duration = Duration::from_secs(30);
/// Initialize [`ScanService`] based on its config.
///
/// TODO: add a test for this function.
@ -25,6 +28,7 @@ pub async fn init_with_server(
info!(?config, "starting scan service");
let scan_service = ServiceBuilder::new()
.buffer(10)
.timeout(SCAN_SERVICE_TIMEOUT)
.service(ScanService::new(&config, network, state, chain_tip_change).await);
// TODO: move this to zebra-grpc init() function and include addr

View File

@ -3,7 +3,7 @@
use std::{collections::BTreeMap, future::Future, pin::Pin, task::Poll, time::Duration};
use futures::future::FutureExt;
use tower::Service;
use tower::{BoxError, Service};
use zebra_chain::{diagnostic::task::WaitForPanics, parameters::Network, transaction::Hash};
@ -32,6 +32,9 @@ pub struct ScanService {
}
/// A timeout applied to `DeleteKeys` requests.
///
/// This should be shorter than [`SCAN_SERVICE_TIMEOUT`](crate::init::SCAN_SERVICE_TIMEOUT) so the
/// request can try to delete entries from storage after the timeout before the future is dropped.
const DELETE_KEY_TIMEOUT: Duration = Duration::from_secs(15);
impl ScanService {
@ -64,7 +67,7 @@ impl ScanService {
impl Service<Request> for ScanService {
type Response = Response;
type Error = Box<dyn std::error::Error + Send + Sync + 'static>;
type Error = BoxError;
type Future =
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
@ -97,17 +100,17 @@ impl Service<Request> for ScanService {
.boxed();
}
Request::CheckKeyHashes(_key_hashes) => {
// TODO: check that these entries exist in db
}
Request::RegisterKeys(keys) => {
let mut scan_task = self.scan_task.clone();
return async move {
Ok(Response::RegisteredKeys(
scan_task.register_keys(keys)?.await?,
))
let newly_registered_keys = scan_task.register_keys(keys)?.await?;
if !newly_registered_keys.is_empty() {
Ok(Response::RegisteredKeys(newly_registered_keys))
} else {
Err("no keys were registered, check that keys are not already registered and \
are valid Sapling extended full viewing keys".into())
}
}
.boxed();
}
@ -123,7 +126,7 @@ impl Service<Request> for ScanService {
scan_task.remove_keys(keys.clone())?,
)
.await
.map_err(|_| "timeout waiting for delete keys done notification");
.map_err(|_| "request timed out removing keys from scan task".to_string());
// Delete the key from the database after either confirmation that it's been removed from the scan task, or
// waiting `DELETE_KEY_TIMEOUT`.
@ -171,7 +174,9 @@ impl Service<Request> for ScanService {
let mut scan_task = self.scan_task.clone();
return async move {
let results_receiver = scan_task.subscribe(keys).await?;
let results_receiver = scan_task.subscribe(keys)?.await.map_err(|_| {
"scan task dropped responder, check that keys are registered"
})?;
Ok(Response::SubscribeResults(results_receiver))
}
@ -193,7 +198,5 @@ impl Service<Request> for ScanService {
.boxed();
}
}
async move { Ok(Response::Results(BTreeMap::new())) }.boxed()
}
}

View File

@ -8,7 +8,6 @@ use tokio::sync::{
oneshot,
};
use tower::BoxError;
use zcash_primitives::{sapling::SaplingIvk, zip32::DiversifiableFullViewingKey};
use zebra_chain::{block::Height, parameters::Network};
use zebra_node_services::scan_service::response::ScanResult;
@ -207,14 +206,14 @@ impl ScanTask {
/// Sends a message to the scan task to start sending the results for the provided viewing keys to a channel.
///
/// Returns the channel receiver.
pub async fn subscribe(
pub fn subscribe(
&mut self,
keys: HashSet<SaplingScanningKey>,
) -> Result<Receiver<ScanResult>, BoxError> {
) -> Result<oneshot::Receiver<Receiver<ScanResult>>, TrySendError<ScanTaskCommand>> {
let (rsp_tx, rsp_rx) = oneshot::channel();
self.send(ScanTaskCommand::SubscribeResults { keys, rsp_tx })?;
Ok(rsp_rx.await?)
Ok(rsp_rx)
}
}

View File

@ -147,7 +147,12 @@ async fn scan_task_processes_messages_correctly() -> Result<(), Report> {
let subscribe_keys: HashSet<String> = sapling_keys[..5].iter().cloned().collect();
let result_receiver_fut = {
let mut mock_scan_task = mock_scan_task.clone();
tokio::spawn(async move { mock_scan_task.subscribe(subscribe_keys.clone()).await })
tokio::spawn(async move {
mock_scan_task
.subscribe(subscribe_keys.clone())
.expect("should send subscribe msg successfully")
.await
})
};
// Wait for spawned task to send subscribe message

View File

@ -1,7 +1,10 @@
//! Tests for ScanService.
use std::time::Duration;
use futures::{stream::FuturesOrdered, StreamExt};
use tokio::sync::mpsc::error::TryRecvError;
use tower::{Service, ServiceBuilder, ServiceExt};
use tower::{timeout::error::Elapsed, Service, ServiceBuilder, ServiceExt};
use color_eyre::{eyre::eyre, Result};
@ -10,6 +13,7 @@ use zebra_node_services::scan_service::{request::Request, response::Response};
use zebra_state::TransactionIndex;
use crate::{
init::SCAN_SERVICE_TIMEOUT,
service::{scan_task::ScanTaskCommand, ScanService},
storage::db::tests::{fake_sapling_results, new_test_storage},
tests::{mock_sapling_scanning_keys, ZECPAGES_SAPLING_VIEWING_KEY},
@ -329,5 +333,82 @@ async fn scan_service_registers_keys_correctly_for(network: Network) -> Result<(
_ => panic!("scan service should have responded with the `RegisteredKeys` response"),
}
// Try registering invalid keys.
let register_keys_error_message = scan_service
.ready()
.await
.map_err(|err| eyre!(err))?
.call(Request::RegisterKeys(vec![(
"invalid key".to_string(),
None,
)]))
.await
.expect_err("response should be an error when there are no valid keys to be added")
.to_string();
assert!(
register_keys_error_message.starts_with("no keys were registered"),
"error message should say that no keys were registered"
);
Ok(())
}
/// Test that the scan service with a timeout layer returns timeout errors after expected timeout
#[tokio::test]
async fn scan_service_timeout() -> Result<()> {
let db = new_test_storage(Network::Mainnet);
let (scan_service, _cmd_receiver) = ScanService::new_with_mock_scanner(db);
let mut scan_service = ServiceBuilder::new()
.buffer(10)
.timeout(SCAN_SERVICE_TIMEOUT)
.service(scan_service);
let keys = vec![String::from("fake key")];
let mut response_futs = FuturesOrdered::new();
for request in [
Request::RegisterKeys(keys.iter().cloned().map(|key| (key, None)).collect()),
Request::SubscribeResults(keys.iter().cloned().collect()),
Request::DeleteKeys(keys),
] {
let response_fut = scan_service
.ready()
.await
.expect("service should be ready")
.call(request);
response_futs.push_back(tokio::time::timeout(
SCAN_SERVICE_TIMEOUT
.checked_add(Duration::from_secs(1))
.expect("should not overflow"),
response_fut,
));
}
let expect_timeout_err = |response: Option<Result<Result<_, _>, _>>| {
response
.expect("response_futs should not be empty")
.expect("service should respond with timeout error before outer timeout")
.expect_err("service response should be a timeout error")
};
// RegisterKeys and SubscribeResults should return `Elapsed` errors from `Timeout` layer
for _ in 0..2 {
let response = response_futs.next().await;
expect_timeout_err(response)
.downcast::<Elapsed>()
.expect("service should return Elapsed error from Timeout layer");
}
let response = response_futs.next().await;
let response_error_msg = expect_timeout_err(response).to_string();
assert!(
response_error_msg.starts_with("request timed out"),
"error message should say the request timed out"
);
Ok(())
}

View File

@ -277,9 +277,7 @@ fn difficulty_threshold_and_time_are_valid(
// of that block plus 90*60 seconds.
//
// https://zips.z.cash/protocol/protocol.pdf#blockheader
if NetworkUpgrade::is_max_block_time_enforced(network, candidate_height)
&& candidate_time > block_time_max
{
if network.is_max_block_time_enforced(candidate_height) && candidate_time > block_time_max {
Err(ValidateContextError::TimeTooLate {
candidate_time,
block_time_max,

View File

@ -15,8 +15,8 @@ use zebra_chain::{
parameters::Network,
parameters::NetworkUpgrade,
parameters::POW_AVERAGING_WINDOW,
work::difficulty::ExpandedDifficulty,
work::difficulty::{CompactDifficulty, U256},
work::difficulty::{ExpandedDifficulty, ParameterDifficulty as _},
};
/// The median block span for time median calculations.
@ -188,7 +188,7 @@ impl AdjustedDifficulty {
Network::Testnet,
"invalid network: the minimum difficulty rule only applies on testnet"
);
ExpandedDifficulty::target_difficulty_limit(self.network).to_compact()
self.network.target_difficulty_limit().to_compact()
} else {
self.threshold_bits()
}
@ -210,10 +210,7 @@ impl AdjustedDifficulty {
let threshold = (self.mean_target_difficulty() / averaging_window_timespan.num_seconds())
* self.median_timespan_bounded().num_seconds();
let threshold = min(
ExpandedDifficulty::target_difficulty_limit(self.network),
threshold,
);
let threshold = min(self.network.target_difficulty_limit(), threshold);
threshold.to_compact()
}

View File

@ -41,6 +41,11 @@ name = "scanning-results-reader"
path = "src/bin/scanning-results-reader/main.rs"
required-features = ["shielded-scan"]
[[bin]]
name = "openapi-generator"
path = "src/bin/openapi-generator/main.rs"
required-features = ["openapi-generator"]
[features]
default = []
@ -74,6 +79,14 @@ shielded-scan = [
"zebra-scan"
]
openapi-generator = [
"zebra-rpc",
"syn",
"quote",
"serde_yaml",
"serde"
]
[dependencies]
color-eyre = "0.6.2"
# This is a transitive dependency via color-eyre.
@ -109,3 +122,9 @@ jsonrpc = { version = "0.17.0", optional = true }
zcash_primitives = { version = "0.13.0-rc.1", optional = true }
zcash_client_backend = {version = "0.10.0-rc.1", optional = true}
# For the openapi generator
syn = { version = "2.0.52", features = ["full"], optional = true }
quote = { version = "1.0.35", optional = true }
serde_yaml = { version = "0.9.32", optional = true }
serde = { version = "1.0.196", features = ["serde_derive"], optional = true }

View File

@ -7,6 +7,7 @@ Tools for maintaining and testing Zebra:
- [zebrad-log-filter](#zebrad-log-filter)
- [zcash-rpc-diff](#zcash-rpc-diff)
- [scanning-results-reader](#scanning-results-reader)
- [openapi-generator](#openapi-generator)
Binaries are easier to use if they are located in your system execution path.
@ -232,3 +233,80 @@ A utility for displaying Zebra's scanning results.
``` bash
cargo run --release --features shielded-scan --bin scanning-results-reader
```
## OpenAPI generator
This utility generates an `openapi.yaml` specification by extracting information from RPC method documentation in the `zebra-rpc` crate code.
### Usage
To use the generator tool, build and run it with the following command:
```console
cargo run --bin openapi-generator --features="openapi-generator"
```
This command will create or update an `openapi.yaml` file at the root of the Zebra project repository.
The latest specification generated using this utility can be found [here](https://github.com/ZcashFoundation/zebra/blob/main/openapi.yaml).
### Documentation standard
In order for the script to work, each RPC method documentation needs to follow a specific well-defined format. For example, here is the in-code documentation for the `getblock` method, which takes arguments:
```rust
/// Returns the requested block by hash or height, as a [`GetBlock`] JSON string.
/// If the block is not in Zebra's state, returns
/// [error code `-8`.](https://github.com/zcash/zcash/issues/5758)
///
/// zcashd reference: [`getblock`](https://zcash.github.io/rpc/getblock.html)
/// method: post
/// tags: blockchain
///
/// # Parameters
///
/// - `hash_or_height`: (string, required, example="1") The hash or height for the block to be returned.
/// - `verbosity`: (number, optional, default=1, example=1) 0 for hex encoded data, 1 for a json object, and 2 for json object with transaction data.
///
/// # Notes
///
/// With verbosity=1, [`lightwalletd` only reads the `tx` field of the
/// result](https://github.com/zcash/lightwalletd/blob/dfac02093d85fb31fb9a8475b884dd6abca966c7/common/common.go#L152),
/// and other clients only read the `hash` and `confirmations` fields,
/// so we only return a few fields for now.
///
/// `lightwalletd` and mining clients also do not use verbosity=2, so we don't support it.
#[rpc(name = "getblock")]
fn get_block(
&self,
hash_or_height: String,
verbosity: Option<u8>,
) -> BoxFuture<Result<GetBlock>>;
```
An example of a method with no arguments can be the `getinfo` call:
```rust
#[rpc(name = "getinfo")]
/// Returns software information from the RPC server, as a [`GetInfo`] JSON struct.
///
/// zcashd reference: [`getinfo`](https://zcash.github.io/rpc/getinfo.html)
/// method: post
/// tags: control
///
/// # Notes
///
/// [The zcashd reference](https://zcash.github.io/rpc/getinfo.html) might not show some fields
/// in Zebra's [`GetInfo`]. Zebra uses the field names and formats from the
/// [zcashd code](https://github.com/zcash/zcash/blob/v4.6.0-1/src/rpc/misc.cpp#L86-L87).
///
/// Some fields from the zcashd reference are missing from Zebra's [`GetInfo`]. It only contains the fields
/// [required for lightwalletd support.](https://github.com/zcash/lightwalletd/blob/v0.4.9/common/common.go#L91-L95)
fn get_info(&self) -> Result<GetInfo>;
```
Find more examples inside the `zebra-rpc/src/methods.rs` and the `zebra-rpc/src/methods/get_block_template_rpcs.rs` files.
The generator will detect new methods added if they are members of the `Rpc` trait for the `zebra-rpc/src/methods.rs` file and inside the `GetBlockTemplateRpc` in the file `zebra-rpc/src/methods/get_block_template_rpcs.rs`.

View File

@ -0,0 +1,544 @@
//! Generate an openapi.yaml file from the Zebra RPC methods
use std::{collections::HashMap, error::Error, fs::File, io::Write};
use quote::ToTokens;
use serde::Serialize;
use syn::LitStr;
use zebra_rpc::methods::*;
// The API server
const SERVER: &str = "http://localhost:8232";
// The API methods
#[derive(Serialize, Debug)]
struct Methods {
paths: HashMap<String, HashMap<String, MethodConfig>>,
}
// The configuration for each method
#[derive(Serialize, Clone, Debug)]
struct MethodConfig {
tags: Vec<String>,
description: String,
#[serde(rename = "requestBody")]
request_body: RequestBody,
responses: HashMap<String, Response>,
}
// The request body
#[derive(Serialize, Clone, Debug)]
struct RequestBody {
required: bool,
content: Content,
}
// The content of the request body
#[derive(Serialize, Clone, Debug)]
struct Content {
#[serde(rename = "application/json")]
application_json: Application,
}
// The application of the request body
#[derive(Serialize, Clone, Debug)]
struct Application {
schema: Schema,
}
// The schema of the request body
#[derive(Serialize, Clone, Debug)]
struct Schema {
#[serde(rename = "type")]
type_: String,
properties: HashMap<String, Property>,
}
// The properties of the request body
#[derive(Serialize, Clone, Debug)]
struct Property {
#[serde(rename = "type")]
type_: String,
#[serde(skip_serializing_if = "Option::is_none")]
items: Option<ArrayItems>,
default: String,
}
// The response
#[derive(Serialize, Clone, Debug)]
struct Response {
description: String,
content: Content,
}
// The array items
#[derive(Serialize, Clone, Debug)]
struct ArrayItems {}
fn main() -> Result<(), Box<dyn Error>> {
let current_path = env!("CARGO_MANIFEST_DIR");
// Define the paths to the Zebra RPC methods
let paths = vec![
(
format!("{}/../zebra-rpc/src/methods.rs", current_path),
"Rpc",
),
(
format!(
"{}/../zebra-rpc/src/methods/get_block_template_rpcs.rs",
current_path
),
"GetBlockTemplateRpc",
),
];
// Create a hashmap to store the method names and configuration
let mut methods = HashMap::new();
for zebra_rpc_methods_path in paths {
// Read the source code from the file
let source_code = std::fs::read_to_string(zebra_rpc_methods_path.0)?;
// Parse the source code into a syn AST
let syn_file = syn::parse_file(&source_code)?;
// Create a hashmap to store the methods configuration
let mut methods_config = HashMap::new();
// Iterate over items in the file looking for traits
for item in &syn_file.items {
if let syn::Item::Trait(trait_item) = item {
// Check if this trait is the one we're interested in
if trait_item.ident == zebra_rpc_methods_path.1 {
// Iterate over the trait items looking for methods
for trait_item in &trait_item.items {
// Extract method name
let method_name = method_name(trait_item)?;
// Extract method documentation and description
let (method_doc, mut description) = method_doc(trait_item)?;
// Request type. TODO: All methods are POST so we just hardcode it
let request_type = "post".to_string();
// Tags. TODO: We are assuming 1 tag per call for now
let tags = tags(&method_doc)?;
// Parameters
let mut parameters_example = "[]".to_string();
if let Ok((params_description, params_example)) = get_params(&method_doc) {
// Add parameters to method description:
description =
add_params_to_description(&description, &params_description);
// The Zebra API uses a `params` array to pass arguments to the RPC methods,
// so we need to add this to the OpenAPI spec instead of `parameters`
parameters_example = params_example;
}
// Create the request body
let request_body = create_request_body(&method_name, &parameters_example);
// Check if we have parameters
let mut have_parameters = true;
if parameters_example == "[]" {
have_parameters = false;
}
// Create the responses
let responses = create_responses(&method_name, have_parameters)?;
// Add the method configuration to the hashmap
methods_config.insert(
request_type,
MethodConfig {
tags,
description,
request_body,
responses,
},
);
// Add the method name and configuration to the hashmap
methods.insert(format!("/{}", method_name), methods_config.clone());
}
}
}
}
}
// Create a struct to hold all the methods
let all_methods = Methods { paths: methods };
// Add openapi header and write to file
let yaml_string = serde_yaml::to_string(&all_methods)?;
let mut w = File::create("openapi.yaml")?;
w.write_all(format!("{}{}", create_yaml(), yaml_string).as_bytes())?;
Ok(())
}
// Create the openapi.yaml header
fn create_yaml() -> String {
format!("openapi: 3.0.3
info:
title: Swagger Zebra API - OpenAPI 3.0
version: 0.0.1
description: |-
This is the Zebra API. It is a JSON-RPC 2.0 API that allows you to interact with the Zebra node.
Useful links:
- [The Zebra repository](https://github.com/ZcashFoundation/zebra)
- [The latests API spec](https://github.com/ZcashFoundation/zebra/blob/main/openapi.yaml)
servers:
- url: {}
", SERVER)
}
// Extract the method name from the trait item
fn method_name(trait_item: &syn::TraitItem) -> Result<String, Box<dyn Error>> {
let mut method_name = "".to_string();
if let syn::TraitItem::Fn(method) = trait_item {
method_name = method.sig.ident.to_string();
// Refine name if needed
method.attrs.iter().for_each(|attr| {
if attr.path().is_ident("rpc") {
let _ = attr.parse_nested_meta(|meta| {
method_name = meta.value()?.parse::<LitStr>()?.value();
Ok(())
});
}
});
}
Ok(method_name)
}
// Return the method docs array and the description of the method
fn method_doc(method: &syn::TraitItem) -> Result<(Vec<String>, String), Box<dyn Error>> {
let mut method_doc = vec![];
if let syn::TraitItem::Fn(method) = method {
// Filter only doc attributes
let doc_attrs: Vec<_> = method
.attrs
.iter()
.filter(|attr| attr.path().is_ident("doc"))
.collect();
// If no doc attributes found, return an error
if doc_attrs.is_empty() {
return Err("No documentation attribute found for the method".into());
}
method.attrs.iter().for_each(|attr| {
if attr.path().is_ident("doc") {
method_doc.push(attr.to_token_stream().to_string());
}
});
}
// Extract the description from the first line of documentation
let description = match method_doc[0].split_once('"') {
Some((_, desc)) => desc.trim().to_string().replace('\'', "''"),
None => return Err("Description not found in method documentation".into()),
};
Ok((method_doc, description))
}
// Extract the tags from the method documentation. TODO: Assuming 1 tag per method for now
fn tags(method_doc: &[String]) -> Result<Vec<String>, Box<dyn Error>> {
// Find the line containing tags information
let tags_line = method_doc
.iter()
.find(|line| line.contains("tags:"))
.ok_or("Tags not found in method documentation")?;
// Extract tags from the tags line
let mut tags = Vec::new();
let tags_str = tags_line
.split(':')
.nth(1)
.ok_or("Invalid tags line")?
.trim();
// Split the tags string into individual tags
for tag in tags_str.split(',') {
let trimmed_tag = tag.trim_matches(|c: char| !c.is_alphanumeric());
if !trimmed_tag.is_empty() {
tags.push(trimmed_tag.to_string());
}
}
Ok(tags)
}
// Extract the parameters from the method documentation
fn get_params(method_doc: &[String]) -> Result<(String, String), Box<dyn Error>> {
// Find the start and end index of the parameters
let params_start_index = method_doc
.iter()
.enumerate()
.find(|(_, line)| line.contains("# Parameters"));
let notes_start_index = method_doc
.iter()
.enumerate()
.find(|(_, line)| line.contains("# Notes"));
// If start and end indices of parameters are found, extract them
if let (Some((params_index, _)), Some((notes_index, _))) =
(params_start_index, notes_start_index)
{
let params = &method_doc[params_index + 2..notes_index - 1];
// Initialize variables to store parameter descriptions and examples
let mut param_descriptions = Vec::new();
let mut param_examples = Vec::new();
// Iterate over the parameters and extract information
for param_line in params {
// Check if the line starts with the expected format
if param_line.trim().starts_with("# [doc = \" -") {
// Extract parameter name and description
if let Some((name, description)) = extract_param_info(param_line) {
param_descriptions.push(format!("- `{}` - {}", name, description));
// Extract parameter example if available
if let Some(example) = extract_param_example(param_line) {
param_examples.push(example);
}
}
}
}
// Format parameters and examples
let params_formatted = format!("[{}]", param_examples.join(", "));
let params_description = param_descriptions.join("\n");
return Ok((params_description, params_formatted));
}
Err("No parameters found".into())
}
// Extract parameter name and description
fn extract_param_info(param_line: &str) -> Option<(String, String)> {
let start_idx = param_line.find('`')?;
let end_idx = param_line.rfind('`')?;
let name = param_line[start_idx + 1..end_idx].trim().to_string();
let description_starts = param_line.find(") ")?;
let description_ends = param_line.rfind("\"]")?;
let description = param_line[description_starts + 2..description_ends]
.trim()
.to_string();
Some((name, description))
}
// Extract parameter example if available
fn extract_param_example(param_line: &str) -> Option<String> {
if let Some(example_start) = param_line.find("example=") {
let example_ends = param_line.rfind(')')?;
let example = param_line[example_start + 8..example_ends].trim();
Some(example.to_string())
} else {
None
}
}
// Create the request body
fn create_request_body(method_name: &str, parameters_example: &str) -> RequestBody {
// Add the method name to the request body
let method_name_prop = Property {
type_: "string".to_string(),
items: None,
default: method_name.to_string(),
};
// Add a hardcoded request_id to the request body
let request_id_prop = Property {
type_: "number".to_string(),
items: None,
default: "123".to_string(),
};
// Create the schema and add the first 2 properties
let mut schema = HashMap::new();
schema.insert("method".to_string(), method_name_prop);
schema.insert("id".to_string(), request_id_prop);
// Add the parameters with the extracted examples
let default = parameters_example.replace('\\', "");
schema.insert(
"params".to_string(),
Property {
type_: "array".to_string(),
items: Some(ArrayItems {}),
default,
},
);
// Create the request body
let content = Content {
application_json: Application {
schema: Schema {
type_: "object".to_string(),
properties: schema,
},
},
};
RequestBody {
required: true,
content,
}
}
// Create the responses
fn create_responses(
method_name: &str,
have_parameters: bool,
) -> Result<HashMap<String, Response>, Box<dyn Error>> {
let mut responses = HashMap::new();
let properties = get_default_properties(method_name)?;
let res_ok = Response {
description: "OK".to_string(),
content: Content {
application_json: Application {
schema: Schema {
type_: "object".to_string(),
properties,
},
},
},
};
responses.insert("200".to_string(), res_ok);
let mut properties = HashMap::new();
if have_parameters {
properties.insert(
"error".to_string(),
Property {
type_: "string".to_string(),
items: None,
default: "Invalid parameters".to_string(),
},
);
let res_bad_request = Response {
description: "Bad request".to_string(),
content: Content {
application_json: Application {
schema: Schema {
type_: "object".to_string(),
properties,
},
},
},
};
responses.insert("400".to_string(), res_bad_request);
}
Ok(responses)
}
// Add the parameters to the method description
fn add_params_to_description(description: &str, params_description: &str) -> String {
let mut new_description = description.to_string();
new_description.push_str("\n\n**Request body `params` arguments:**\n\n");
new_description.push_str(params_description);
new_description
}
// Get requests examples by using defaults from the Zebra RPC methods
fn get_default_properties(method_name: &str) -> Result<HashMap<String, Property>, Box<dyn Error>> {
// TODO: Complete the list of methods
let type_ = "object".to_string();
let items = None;
let mut props = HashMap::new();
let properties = match method_name {
"getinfo" => {
props.insert(
"result".to_string(),
Property {
type_,
items,
default: serde_json::to_string(&GetInfo::default())?,
},
);
props
}
"getbestblockhash" => {
props.insert(
"result".to_string(),
Property {
type_,
items,
default: serde_json::to_string(&GetBlockHash::default())?,
},
);
props
}
"getblockchaininfo" => {
props.insert(
"result".to_string(),
Property {
type_,
items,
default: serde_json::to_string(&GetBlockChainInfo::default())?,
},
);
props
}
"getblock" => {
props.insert(
"result".to_string(),
Property {
type_,
items,
default: serde_json::to_string(&GetBlock::default())?,
},
);
props
}
"getblockhash" => {
props.insert(
"result".to_string(),
Property {
type_,
items,
default: serde_json::to_string(&GetBlockHash::default())?,
},
);
props
}
"z_gettreestate" => {
props.insert(
"result".to_string(),
Property {
type_,
items,
default: serde_json::to_string(&GetTreestate::default())?,
},
);
props
}
_ => {
props.insert(
"result".to_string(),
Property {
type_,
items: None,
default: "{}".to_string(),
},
);
props
}
};
Ok(properties)
}

View File

@ -21,8 +21,8 @@ use tower::{
use zebra_chain::{
block::{self, Height, HeightDiff},
chain_tip::ChainTip,
parameters::genesis_hash,
};
use zebra_consensus::ParameterCheckpoint as _;
use zebra_network as zn;
use zebra_state as zs;
@ -500,7 +500,7 @@ where
));
let new_syncer = Self {
genesis_hash: genesis_hash(config.network.network),
genesis_hash: config.network.network.genesis_hash(),
max_checkpoint_height,
checkpoint_verify_concurrency_limit,
full_verify_concurrency_limit,

View File

@ -16,7 +16,7 @@ use zebra_chain::{
fmt::humantime_seconds,
parameters::{Network, NetworkUpgrade, POST_BLOSSOM_POW_TARGET_SPACING},
};
use zebra_consensus::CheckpointList;
use zebra_consensus::ParameterCheckpoint as _;
use zebra_state::MAX_BLOCK_REORG_HEIGHT;
use crate::components::sync::SyncStatus;
@ -82,7 +82,8 @@ pub async fn show_block_chain_progress(
// The minimum height of the valid best chain, based on:
// - the hard-coded checkpoint height,
// - the minimum number of blocks after the highest checkpoint.
let after_checkpoint_height = CheckpointList::new(network)
let after_checkpoint_height = network
.checkpoint_list()
.max_height()
.add(min_after_checkpoint_blocks)
.expect("hard-coded checkpoint height is far below Height::MAX");

View File

@ -120,8 +120,9 @@ pub(crate) async fn run() -> Result<()> {
let mut result_receiver = scan_task
.subscribe(keys.iter().cloned().collect())
.expect("should send subscribe message successfully")
.await
.expect("should send and receive message successfully");
.expect("should receive response successfully");
// Wait for the scanner to send a result in the channel
let result = tokio::time::timeout(WAIT_FOR_RESULTS_DURATION, result_receiver.recv()).await?;

View File

@ -5,7 +5,7 @@ use std::time::Duration;
use color_eyre::eyre::Result;
use zebra_chain::{block::Height, chain_tip::mock::MockChainTip, parameters::Network};
use zebra_consensus::CheckpointList;
use zebra_consensus::ParameterCheckpoint as _;
use zebrad::components::sync::end_of_support::{self, EOS_PANIC_AFTER, ESTIMATED_RELEASE_HEIGHT};
// Estimated blocks per day with the current 75 seconds block spacing.
@ -54,7 +54,7 @@ fn end_of_support_function() {
#[tracing_test::traced_test]
fn end_of_support_date() {
// Get the list of checkpoints.
let list = CheckpointList::new(Network::Mainnet);
let list = Network::Mainnet.checkpoint_list();
// Get the last one we have and use it as tip.
let higher_checkpoint = list.max_height();