Merge branch 'switch-to-zsa-crates-nu6' into switch-to-zsa-crates-nu6-txv6
This commit is contained in:
commit
654b8d4b92
|
@ -44,7 +44,7 @@ jobs:
|
|||
|
||||
- name: Rust files
|
||||
id: changed-files-rust
|
||||
uses: tj-actions/changed-files@v45.0.0
|
||||
uses: tj-actions/changed-files@v45.0.2
|
||||
with:
|
||||
files: |
|
||||
**/*.rs
|
||||
|
@ -56,7 +56,7 @@ jobs:
|
|||
|
||||
- name: Workflow files
|
||||
id: changed-files-workflows
|
||||
uses: tj-actions/changed-files@v45.0.0
|
||||
uses: tj-actions/changed-files@v45.0.2
|
||||
with:
|
||||
files: |
|
||||
.github/workflows/*.yml
|
||||
|
|
49
Cargo.lock
49
Cargo.lock
|
@ -695,6 +695,12 @@ version = "1.0.0"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
|
||||
[[package]]
|
||||
name = "cfg_aliases"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"
|
||||
|
||||
[[package]]
|
||||
name = "chacha20"
|
||||
version = "0.9.1"
|
||||
|
@ -2499,6 +2505,16 @@ dependencies = [
|
|||
"lz4-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libyml"
|
||||
version = "0.0.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3302702afa434ffa30847a83305f0a69d6abd74293b6554c18ec85c7ef30c980"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"version_check",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libz-sys"
|
||||
version = "1.1.18"
|
||||
|
@ -2705,6 +2721,18 @@ dependencies = [
|
|||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nix"
|
||||
version = "0.29.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46"
|
||||
dependencies = [
|
||||
"bitflags 2.6.0",
|
||||
"cfg-if 1.0.0",
|
||||
"cfg_aliases",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nom"
|
||||
version = "7.1.3"
|
||||
|
@ -4242,16 +4270,18 @@ dependencies = [
|
|||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_yaml"
|
||||
version = "0.9.34+deprecated"
|
||||
name = "serde_yml"
|
||||
version = "0.0.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47"
|
||||
checksum = "59e2dd588bf1597a252c3b920e0143eb99b0f76e4e082f4c92ce34fbc9e71ddd"
|
||||
dependencies = [
|
||||
"indexmap 2.3.0",
|
||||
"itoa",
|
||||
"libyml",
|
||||
"memchr",
|
||||
"ryu",
|
||||
"serde",
|
||||
"unsafe-libyaml",
|
||||
"version_check",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -5209,12 +5239,6 @@ dependencies = [
|
|||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unsafe-libyaml"
|
||||
version = "0.2.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861"
|
||||
|
||||
[[package]]
|
||||
name = "untrusted"
|
||||
version = "0.7.1"
|
||||
|
@ -5989,6 +6013,7 @@ dependencies = [
|
|||
"chrono",
|
||||
"color-eyre",
|
||||
"criterion",
|
||||
"dirs",
|
||||
"ed25519-zebra",
|
||||
"equihash 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures",
|
||||
|
@ -6021,6 +6046,7 @@ dependencies = [
|
|||
"sha2",
|
||||
"spandoc",
|
||||
"static_assertions",
|
||||
"tempfile",
|
||||
"thiserror",
|
||||
"tinyvec",
|
||||
"tokio",
|
||||
|
@ -6171,6 +6197,7 @@ dependencies = [
|
|||
"jsonrpc-core",
|
||||
"jsonrpc-derive",
|
||||
"jsonrpc-http-server",
|
||||
"nix",
|
||||
"proptest",
|
||||
"prost 0.13.1",
|
||||
"rand 0.8.5",
|
||||
|
@ -6341,7 +6368,7 @@ dependencies = [
|
|||
"reqwest",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_yaml",
|
||||
"serde_yml",
|
||||
"structopt",
|
||||
"syn 2.0.72",
|
||||
"thiserror",
|
||||
|
|
|
@ -1,16 +1,22 @@
|
|||
# System Requirements
|
||||
|
||||
We recommend the following requirements for compiling and running `zebrad`:
|
||||
Zebra has the following hardware requirements.
|
||||
|
||||
## Recommended Requirements
|
||||
|
||||
- 4 CPU cores
|
||||
- 16 GB RAM
|
||||
- 300 GB available disk space for building binaries and storing cached chain
|
||||
state
|
||||
- 300 GB available disk space
|
||||
- 100 Mbps network connection, with 300 GB of uploads and downloads per month
|
||||
|
||||
Zebra's tests can take over an hour, depending on your machine. Note that you
|
||||
might be able to build and run Zebra on slower systems — we haven't tested its
|
||||
exact limits yet.
|
||||
## Minimum Hardware Requirements
|
||||
|
||||
- 2 CPU cores
|
||||
- 4 GB RAM
|
||||
- 300 GB available disk space
|
||||
|
||||
[Zebra has successfully run on an Orange Pi Zero 2W with a 512 GB microSD card
|
||||
without any issues.](https://x.com/Zerodartz/status/1811460885996798159)
|
||||
|
||||
## Disk Requirements
|
||||
|
||||
|
@ -48,9 +54,6 @@ networks.
|
|||
- Ongoing updates: 10 MB - 10 GB upload and download per day, depending on
|
||||
user-created transaction size and peer requests.
|
||||
|
||||
Zebra performs an initial sync every time its internal database version changes,
|
||||
so some version upgrades might require a full download of the whole chain.
|
||||
|
||||
Zebra needs some peers which have a round-trip latency of 2 seconds or less. If
|
||||
this is a problem for you, please [open a
|
||||
ticket.](https://github.com/ZcashFoundation/zebra/issues/new/choose)
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
# syntax=docker/dockerfile:1
|
||||
# check=skip=UndefinedVar
|
||||
|
||||
# If you want to include a file in the Docker image, add it to .dockerignore.
|
||||
#
|
||||
# We are using five stages:
|
||||
# - chef: installs cargo-chef
|
||||
# - planner: computes the recipe file
|
||||
# - deps: caches our dependencies and sets the needed variables
|
||||
# - tests: builds tests
|
||||
# - release: builds release binary
|
||||
# - runtime: is our runtime environment
|
||||
# We are using 4 stages:
|
||||
# - deps: install build dependencies and sets the needed variables
|
||||
# - tests: builds tests binaries
|
||||
# - release: builds release binaries
|
||||
# - runtime: runs the release binaries
|
||||
#
|
||||
# We first set default values for build arguments used across the stages.
|
||||
# Each stage must define the build arguments (ARGs) it uses.
|
||||
|
@ -19,26 +20,19 @@ ARG FEATURES="default-release-binaries"
|
|||
ARG TEST_FEATURES="lightwalletd-grpc-tests zebra-checkpoints"
|
||||
ARG EXPERIMENTAL_FEATURES=""
|
||||
|
||||
# This stage implements cargo-chef for docker layer caching
|
||||
FROM rust:bookworm as chef
|
||||
RUN cargo install cargo-chef --locked
|
||||
WORKDIR /opt/zebrad
|
||||
|
||||
# Analyze the current project to determine the minimum subset of files
|
||||
# (Cargo.lock and Cargo.toml manifests) required to build it and cache dependencies
|
||||
#
|
||||
# The recipe.json is the equivalent of the Python requirements.txt file
|
||||
FROM chef AS planner
|
||||
COPY . .
|
||||
RUN cargo chef prepare --recipe-path recipe.json
|
||||
|
||||
ARG APP_HOME="/opt/zebrad"
|
||||
ARG RUST_VERSION=1.79.0
|
||||
# In this stage we download all system requirements to build the project
|
||||
#
|
||||
# It also captures all the build arguments to be used as environment variables.
|
||||
# We set defaults for the arguments, in case the build does not include this information.
|
||||
FROM chef AS deps
|
||||
FROM rust:${RUST_VERSION}-bookworm AS deps
|
||||
SHELL ["/bin/bash", "-xo", "pipefail", "-c"]
|
||||
COPY --from=planner /opt/zebrad/recipe.json recipe.json
|
||||
|
||||
# Set the default path for the zebrad binary
|
||||
ARG APP_HOME
|
||||
ENV APP_HOME=${APP_HOME}
|
||||
WORKDIR ${APP_HOME}
|
||||
|
||||
# Install zebra build deps and Dockerfile deps
|
||||
RUN apt-get -qq update && \
|
||||
|
@ -48,27 +42,8 @@ RUN apt-get -qq update && \
|
|||
clang \
|
||||
ca-certificates \
|
||||
protobuf-compiler \
|
||||
rsync \
|
||||
rocksdb-tools \
|
||||
; \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/*
|
||||
|
||||
# Install google OS Config agent to be able to get information from the VMs being deployed
|
||||
# into GCP for integration testing purposes, and as Mainnet nodes
|
||||
# TODO: this shouldn't be a hardcoded requirement for everyone
|
||||
RUN if [ "$(uname -m)" != "aarch64" ]; then \
|
||||
apt-get -qq update && \
|
||||
apt-get -qq install -y --no-install-recommends \
|
||||
curl \
|
||||
lsb-release \
|
||||
&& \
|
||||
echo "deb http://packages.cloud.google.com/apt google-compute-engine-$(lsb_release -cs)-stable main" > /etc/apt/sources.list.d/google-compute-engine.list && \
|
||||
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \
|
||||
apt-get -qq update && \
|
||||
apt-get -qq install -y --no-install-recommends google-osconfig-agent; \
|
||||
fi \
|
||||
&& \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/*
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/*
|
||||
|
||||
# Build arguments and variables set for tracelog levels and debug information
|
||||
#
|
||||
|
@ -86,11 +61,14 @@ ARG COLORBT_SHOW_HIDDEN
|
|||
ENV COLORBT_SHOW_HIDDEN=${COLORBT_SHOW_HIDDEN:-1}
|
||||
|
||||
ARG SHORT_SHA
|
||||
# If this is not set, it must be the empty string, so Zebra can try an alternative git commit source:
|
||||
# If this is not set, it must be an empty string, so Zebra can try an alternative git commit source:
|
||||
# https://github.com/ZcashFoundation/zebra/blob/9ebd56092bcdfc1a09062e15a0574c94af37f389/zebrad/src/application.rs#L179-L182
|
||||
ENV SHORT_SHA=${SHORT_SHA:-}
|
||||
|
||||
ENV CARGO_HOME="/opt/zebrad/.cargo/"
|
||||
ENV CARGO_HOME="${APP_HOME}/.cargo/"
|
||||
|
||||
# Copy the entrypoint script to be used on both images
|
||||
COPY ./docker/entrypoint.sh /etc/zebrad/entrypoint.sh
|
||||
|
||||
# In this stage we build tests (without running then)
|
||||
#
|
||||
|
@ -98,12 +76,6 @@ ENV CARGO_HOME="/opt/zebrad/.cargo/"
|
|||
# An entrypoint.sh is only available in this step for easier test handling with variables.
|
||||
FROM deps AS tests
|
||||
|
||||
COPY --from=electriccoinco/lightwalletd:latest /usr/local/bin/lightwalletd /usr/local/bin/
|
||||
|
||||
# cargo uses timestamps for its cache, so they need to be in this order:
|
||||
# unmodified source files < previous build cache < modified source files
|
||||
COPY . .
|
||||
|
||||
# Skip IPv6 tests by default, as some CI environment don't have IPv6 available
|
||||
ARG ZEBRA_SKIP_IPV6_TESTS
|
||||
ENV ZEBRA_SKIP_IPV6_TESTS=${ZEBRA_SKIP_IPV6_TESTS:-1}
|
||||
|
@ -116,66 +88,81 @@ ARG EXPERIMENTAL_FEATURES
|
|||
# TODO: add empty $EXPERIMENTAL_FEATURES when we can avoid adding an extra space to the end of the string
|
||||
ARG ENTRYPOINT_FEATURES="${FEATURES} ${TEST_FEATURES}"
|
||||
|
||||
# Re-hydrate the minimum project skeleton identified by `cargo chef prepare` in the planner stage,
|
||||
# over the top of the original source files,
|
||||
# and build it to cache all possible sentry and test dependencies.
|
||||
#
|
||||
# This is the caching Docker layer for Rust tests!
|
||||
# It creates fake empty test binaries so dependencies are built, but Zebra is not fully built.
|
||||
#
|
||||
# TODO: add --locked when cargo-chef supports it
|
||||
RUN cargo chef cook --tests --release --features "${ENTRYPOINT_FEATURES}" --workspace --recipe-path recipe.json
|
||||
# Undo the source file changes made by cargo-chef.
|
||||
# rsync invalidates the cargo cache for the changed files only, by updating their timestamps.
|
||||
# This makes sure the fake empty binaries created by cargo-chef are rebuilt.
|
||||
COPY --from=planner /opt/zebrad zebra-original
|
||||
RUN rsync --recursive --checksum --itemize-changes --verbose zebra-original/ .
|
||||
RUN rm -r zebra-original
|
||||
|
||||
# Build Zebra test binaries, but don't run them
|
||||
RUN cargo test --locked --release --features "${ENTRYPOINT_FEATURES}" --workspace --no-run
|
||||
RUN cp /opt/zebrad/target/release/zebrad /usr/local/bin
|
||||
RUN cp /opt/zebrad/target/release/zebra-checkpoints /usr/local/bin
|
||||
|
||||
COPY ./docker/entrypoint.sh /
|
||||
RUN chmod u+x /entrypoint.sh
|
||||
# Leverage a cache mount to /usr/local/cargo/registry/
|
||||
# for downloaded dependencies, a cache mount to /usr/local/cargo/git/db
|
||||
# for git repository dependencies, and a cache mount to ${APP_HOME}/target/ for
|
||||
# compiled dependencies which will speed up subsequent builds.
|
||||
# Leverage a bind mount to each crate directory to avoid having to copy the
|
||||
# source code into the container. Once built, copy the executable to an
|
||||
# output directory before the cache mounted ${APP_HOME}/target/ is unmounted.
|
||||
RUN --mount=type=bind,source=zebrad,target=zebrad \
|
||||
--mount=type=bind,source=zebra-chain,target=zebra-chain \
|
||||
--mount=type=bind,source=zebra-network,target=zebra-network \
|
||||
--mount=type=bind,source=zebra-state,target=zebra-state \
|
||||
--mount=type=bind,source=zebra-script,target=zebra-script \
|
||||
--mount=type=bind,source=zebra-consensus,target=zebra-consensus \
|
||||
--mount=type=bind,source=zebra-rpc,target=zebra-rpc \
|
||||
--mount=type=bind,source=zebra-node-services,target=zebra-node-services \
|
||||
--mount=type=bind,source=zebra-test,target=zebra-test \
|
||||
--mount=type=bind,source=zebra-utils,target=zebra-utils \
|
||||
--mount=type=bind,source=zebra-scan,target=zebra-scan \
|
||||
--mount=type=bind,source=zebra-grpc,target=zebra-grpc \
|
||||
--mount=type=bind,source=tower-batch-control,target=tower-batch-control \
|
||||
--mount=type=bind,source=tower-fallback,target=tower-fallback \
|
||||
--mount=type=bind,source=Cargo.toml,target=Cargo.toml \
|
||||
--mount=type=bind,source=Cargo.lock,target=Cargo.lock \
|
||||
--mount=type=cache,target=${APP_HOME}/target/ \
|
||||
--mount=type=cache,target=/usr/local/cargo/git/db \
|
||||
--mount=type=cache,target=/usr/local/cargo/registry/ \
|
||||
cargo test --locked --release --features "${ENTRYPOINT_FEATURES}" --workspace --no-run && \
|
||||
cp ${APP_HOME}/target/release/zebrad /usr/local/bin && \
|
||||
cp ${APP_HOME}/target/release/zebra-checkpoints /usr/local/bin
|
||||
|
||||
# Copy the lightwalletd binary and source files to be able to run tests
|
||||
COPY --from=electriccoinco/lightwalletd:latest /usr/local/bin/lightwalletd /usr/local/bin/
|
||||
COPY ./ ./
|
||||
|
||||
# Entrypoint environment variables
|
||||
ENV ENTRYPOINT_FEATURES=${ENTRYPOINT_FEATURES}
|
||||
# We repeat the ARGs here, so they are available in the entrypoint.sh script for $RUN_ALL_EXPERIMENTAL_TESTS
|
||||
ARG EXPERIMENTAL_FEATURES="shielded-scan journald prometheus filter-reload"
|
||||
ARG EXPERIMENTAL_FEATURES="journald prometheus filter-reload"
|
||||
ENV ENTRYPOINT_FEATURES_EXPERIMENTAL="${ENTRYPOINT_FEATURES} ${EXPERIMENTAL_FEATURES}"
|
||||
|
||||
# By default, runs the entrypoint tests specified by the environmental variables (if any are set)
|
||||
ENTRYPOINT [ "/entrypoint.sh" ]
|
||||
ENTRYPOINT [ "/etc/zebrad/entrypoint.sh" ]
|
||||
|
||||
# In this stage we build a release (generate the zebrad binary)
|
||||
#
|
||||
# This step also adds `cargo chef` as this stage is completely independent from the
|
||||
# This step also adds `cache mounts` as this stage is completely independent from the
|
||||
# `test` stage. This step is a dependency for the `runtime` stage, which uses the resulting
|
||||
# zebrad binary from this step.
|
||||
FROM deps AS release
|
||||
|
||||
COPY . .
|
||||
|
||||
ARG FEATURES
|
||||
|
||||
# This is the caching layer for Rust zebrad builds.
|
||||
# It creates a fake empty zebrad binary, see above for details.
|
||||
#
|
||||
# TODO: add --locked when cargo-chef supports it
|
||||
RUN cargo chef cook --release --features "${FEATURES}" --package zebrad --bin zebrad --recipe-path recipe.json
|
||||
|
||||
# Undo the source file changes made by cargo-chef, so the fake empty zebrad binary is rebuilt.
|
||||
COPY --from=planner /opt/zebrad zebra-original
|
||||
RUN rsync --recursive --checksum --itemize-changes --verbose zebra-original/ .
|
||||
RUN rm -r zebra-original
|
||||
|
||||
# Build zebrad
|
||||
RUN cargo build --locked --release --features "${FEATURES}" --package zebrad --bin zebrad
|
||||
|
||||
COPY ./docker/entrypoint.sh /
|
||||
RUN chmod u+x /entrypoint.sh
|
||||
RUN --mount=type=bind,source=tower-batch-control,target=tower-batch-control \
|
||||
--mount=type=bind,source=tower-fallback,target=tower-fallback \
|
||||
--mount=type=bind,source=zebra-chain,target=zebra-chain \
|
||||
--mount=type=bind,source=zebra-consensus,target=zebra-consensus \
|
||||
--mount=type=bind,source=zebra-grpc,target=zebra-grpc \
|
||||
--mount=type=bind,source=zebra-network,target=zebra-network \
|
||||
--mount=type=bind,source=zebra-node-services,target=zebra-node-services \
|
||||
--mount=type=bind,source=zebra-rpc,target=zebra-rpc \
|
||||
--mount=type=bind,source=zebra-scan,target=zebra-scan \
|
||||
--mount=type=bind,source=zebra-script,target=zebra-script \
|
||||
--mount=type=bind,source=zebra-state,target=zebra-state \
|
||||
--mount=type=bind,source=zebra-test,target=zebra-test \
|
||||
--mount=type=bind,source=zebra-utils,target=zebra-utils \
|
||||
--mount=type=bind,source=zebrad,target=zebrad \
|
||||
--mount=type=bind,source=Cargo.toml,target=Cargo.toml \
|
||||
--mount=type=bind,source=Cargo.lock,target=Cargo.lock \
|
||||
--mount=type=cache,target=${APP_HOME}/target/ \
|
||||
--mount=type=cache,target=/usr/local/cargo/git/db \
|
||||
--mount=type=cache,target=/usr/local/cargo/registry/ \
|
||||
cargo build --locked --release --features "${FEATURES}" --package zebrad --bin zebrad && \
|
||||
cp ${APP_HOME}/target/release/zebrad /usr/local/bin
|
||||
|
||||
# This stage is only used when deploying nodes or when only the resulting zebrad binary is needed
|
||||
#
|
||||
|
@ -183,14 +170,18 @@ RUN chmod u+x /entrypoint.sh
|
|||
# binary from the `release` stage
|
||||
FROM debian:bookworm-slim AS runtime
|
||||
|
||||
# Set the default path for the zebrad binary
|
||||
ARG APP_HOME
|
||||
ENV APP_HOME=${APP_HOME}
|
||||
WORKDIR ${APP_HOME}
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
curl \
|
||||
rocksdb-tools \
|
||||
gosu \
|
||||
&& \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/*
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/*
|
||||
|
||||
# Create a non-privileged user that the app will run under.
|
||||
# Running as root inside the container is running as root in the Docker host
|
||||
|
@ -208,6 +199,7 @@ RUN addgroup --system --gid ${GID} ${USER} \
|
|||
--system \
|
||||
--disabled-login \
|
||||
--shell /bin/bash \
|
||||
--home ${APP_HOME} \
|
||||
--uid "${UID}" \
|
||||
--gid "${GID}" \
|
||||
${USER}
|
||||
|
@ -217,16 +209,20 @@ ARG FEATURES
|
|||
ENV FEATURES=${FEATURES}
|
||||
|
||||
# Path and name of the config file
|
||||
# These are set to a default value when not defined in the environment
|
||||
ENV ZEBRA_CONF_DIR=${ZEBRA_CONF_DIR:-/etc/zebrad}
|
||||
ENV ZEBRA_CONF_FILE=${ZEBRA_CONF_FILE:-zebrad.toml}
|
||||
|
||||
COPY --from=release /opt/zebrad/target/release/zebrad /usr/local/bin
|
||||
COPY --from=release /entrypoint.sh /
|
||||
RUN mkdir -p ${ZEBRA_CONF_DIR} && chown ${UID}:${UID} ${ZEBRA_CONF_DIR} \
|
||||
&& chown ${UID}:${UID} ${APP_HOME}
|
||||
|
||||
COPY --from=release /usr/local/bin/zebrad /usr/local/bin
|
||||
COPY --from=release /etc/zebrad/entrypoint.sh /etc/zebrad
|
||||
|
||||
# Expose configured ports
|
||||
EXPOSE 8233 18233
|
||||
|
||||
# Update the config file based on the Docker run variables,
|
||||
# and launch zebrad with it
|
||||
ENTRYPOINT [ "/entrypoint.sh" ]
|
||||
ENTRYPOINT [ "/etc/zebrad/entrypoint.sh" ]
|
||||
CMD ["zebrad"]
|
||||
|
|
|
@ -357,11 +357,15 @@ case "$1" in
|
|||
exec cargo test --locked --release --features "zebra-test" --package zebra-scan -- --nocapture --include-ignored scan_task_commands
|
||||
|
||||
else
|
||||
exec gosu "$USER" "$@"
|
||||
exec "$@"
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
exec gosu "$USER" "$@"
|
||||
if command -v gosu >/dev/null 2>&1; then
|
||||
exec gosu "$USER" "$@"
|
||||
else
|
||||
exec "$@"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
esac
|
135
openapi.yaml
135
openapi.yaml
|
@ -28,7 +28,7 @@ paths:
|
|||
default: getinfo
|
||||
id:
|
||||
type: string
|
||||
default: x2r3lRddGL
|
||||
default: uf2E54tQkk
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -61,7 +61,7 @@ paths:
|
|||
default: getblockchaininfo
|
||||
id:
|
||||
type: string
|
||||
default: w8Lb0nAvLd
|
||||
default: Sbre3vivr8
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -99,7 +99,7 @@ paths:
|
|||
default: getaddressbalance
|
||||
id:
|
||||
type: string
|
||||
default: QbTztoTvRo
|
||||
default: f5qarOBgzK
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -147,7 +147,7 @@ paths:
|
|||
default: sendrawtransaction
|
||||
id:
|
||||
type: string
|
||||
default: aDK5RQWj16
|
||||
default: IlNHvAcSMS
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -196,7 +196,7 @@ paths:
|
|||
default: getblock
|
||||
id:
|
||||
type: string
|
||||
default: xxCP1d61X0
|
||||
default: s9678BM3Lc
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -239,7 +239,7 @@ paths:
|
|||
default: getbestblockhash
|
||||
id:
|
||||
type: string
|
||||
default: DoZgd1j7xW
|
||||
default: FGQPJY8Tp8
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -272,7 +272,7 @@ paths:
|
|||
default: getbestblockheightandhash
|
||||
id:
|
||||
type: string
|
||||
default: 0iUFHsOjk3
|
||||
default: c2MfkL7xP9
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -305,7 +305,7 @@ paths:
|
|||
default: getrawmempool
|
||||
id:
|
||||
type: string
|
||||
default: WXG2c6FcCK
|
||||
default: BugnNFhJpA
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -343,7 +343,7 @@ paths:
|
|||
default: z_gettreestate
|
||||
id:
|
||||
type: string
|
||||
default: 38P0xXV0do
|
||||
default: fCUQvR1BVa
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -393,7 +393,7 @@ paths:
|
|||
default: z_getsubtreesbyindex
|
||||
id:
|
||||
type: string
|
||||
default: 662iR8VZGT
|
||||
default: TtPnptV6EU
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -432,7 +432,7 @@ paths:
|
|||
default: getrawtransaction
|
||||
id:
|
||||
type: string
|
||||
default: UuvVrzSzqC
|
||||
default: QqYeOGSzje
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -480,7 +480,7 @@ paths:
|
|||
default: getaddresstxids
|
||||
id:
|
||||
type: string
|
||||
default: KMss2wDMwH
|
||||
default: AsWWVyqp8x
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -528,7 +528,7 @@ paths:
|
|||
default: getaddressutxos
|
||||
id:
|
||||
type: string
|
||||
default: 4Y6BAhe6Lf
|
||||
default: Qscn5dUFgD
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -554,6 +554,39 @@ paths:
|
|||
error:
|
||||
type: string
|
||||
default: Invalid parameters
|
||||
/stop:
|
||||
post:
|
||||
tags:
|
||||
- control
|
||||
description: Stop the running zebrad process.
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
method:
|
||||
type: string
|
||||
default: stop
|
||||
id:
|
||||
type: string
|
||||
default: WuIaPXV5fO
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
default: '[]'
|
||||
responses:
|
||||
'200':
|
||||
description: OK
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
result:
|
||||
type: object
|
||||
default: 'null'
|
||||
/getblockcount:
|
||||
post:
|
||||
tags:
|
||||
|
@ -571,7 +604,7 @@ paths:
|
|||
default: getblockcount
|
||||
id:
|
||||
type: string
|
||||
default: nzPm5W3X1G
|
||||
default: '5F9M7Wp0oI'
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -609,7 +642,7 @@ paths:
|
|||
default: getblockhash
|
||||
id:
|
||||
type: string
|
||||
default: KLKosq2Z8E
|
||||
default: f7hdgVjctr
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -657,7 +690,7 @@ paths:
|
|||
default: getblocktemplate
|
||||
id:
|
||||
type: string
|
||||
default: spj7gKe2AA
|
||||
default: pq0uXn3YGs
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -695,7 +728,7 @@ paths:
|
|||
default: submitblock
|
||||
id:
|
||||
type: string
|
||||
default: QOQsC3nA7z
|
||||
default: bs4v4JmVw3
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -728,7 +761,7 @@ paths:
|
|||
default: getmininginfo
|
||||
id:
|
||||
type: string
|
||||
default: Si3Sdb9ICT
|
||||
default: pp5xV6v3pm
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -743,7 +776,7 @@ paths:
|
|||
properties:
|
||||
result:
|
||||
type: object
|
||||
default: '{"networksolps":0,"networkhashps":0,"chain":"","testnet":false}'
|
||||
default: '{"blocks":0,"networksolps":0,"networkhashps":0,"chain":"","testnet":false}'
|
||||
/getnetworksolps:
|
||||
post:
|
||||
tags:
|
||||
|
@ -761,7 +794,7 @@ paths:
|
|||
default: getnetworksolps
|
||||
id:
|
||||
type: string
|
||||
default: jWvKPdOxDa
|
||||
default: '7bU98TeCV6'
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -794,7 +827,7 @@ paths:
|
|||
default: getnetworkhashps
|
||||
id:
|
||||
type: string
|
||||
default: wnFwBVFrN0
|
||||
default: fskOJeXqjo
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -827,7 +860,7 @@ paths:
|
|||
default: getpeerinfo
|
||||
id:
|
||||
type: string
|
||||
default: NpKiq59CE8
|
||||
default: jPV8ufjDdt
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -865,7 +898,7 @@ paths:
|
|||
default: validateaddress
|
||||
id:
|
||||
type: string
|
||||
default: PDjTChWgFW
|
||||
default: xOyxICseV9
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -903,7 +936,7 @@ paths:
|
|||
default: z_validateaddress
|
||||
id:
|
||||
type: string
|
||||
default: aCeb6xbIuo
|
||||
default: xa6PoC4uN6
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -941,7 +974,7 @@ paths:
|
|||
default: getblocksubsidy
|
||||
id:
|
||||
type: string
|
||||
default: EeBvVXCJon
|
||||
default: vYEVtnVK9o
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -984,7 +1017,7 @@ paths:
|
|||
default: getdifficulty
|
||||
id:
|
||||
type: string
|
||||
default: jg2K8N0ZG4
|
||||
default: tVzSTZu2sD
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -1022,7 +1055,7 @@ paths:
|
|||
default: z_listunifiedreceivers
|
||||
id:
|
||||
type: string
|
||||
default: Y3gscsg8yT
|
||||
default: le2NmJBmPt
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
|
@ -1038,3 +1071,51 @@ paths:
|
|||
result:
|
||||
type: object
|
||||
default: '{"orchard":"orchard address if any","sapling":"sapling address if any","p2pkh":"p2pkh address if any","p2sh":"p2sh address if any"}'
|
||||
/generate:
|
||||
post:
|
||||
tags:
|
||||
- generating
|
||||
description: |-
|
||||
Mine blocks immediately. Returns the block hashes of the generated blocks.
|
||||
|
||||
**Request body `params` arguments:**
|
||||
|
||||
- `num_blocks` - Number of blocks to be generated.
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
method:
|
||||
type: string
|
||||
default: generate
|
||||
id:
|
||||
type: string
|
||||
default: vVVOWxHqlN
|
||||
params:
|
||||
type: array
|
||||
items: {}
|
||||
default: '[1]'
|
||||
responses:
|
||||
'200':
|
||||
description: OK
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
result:
|
||||
type: object
|
||||
default: '{}'
|
||||
'400':
|
||||
description: Bad request
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
default: Invalid parameters
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
|
||||
# cargo-vet config file
|
||||
|
||||
[cargo-vet]
|
||||
|
@ -1414,10 +1413,6 @@ criteria = "safe-to-deploy"
|
|||
version = "3.8.1"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.serde_yaml]]
|
||||
version = "0.9.34+deprecated"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.sha2]]
|
||||
version = "0.10.8"
|
||||
criteria = "safe-to-deploy"
|
||||
|
|
|
@ -81,6 +81,8 @@ group = "0.13.0"
|
|||
incrementalmerkletree.workspace = true
|
||||
jubjub = "0.10.0"
|
||||
lazy_static = "1.4.0"
|
||||
tempfile = "3.11.0"
|
||||
dirs = "5.0.1"
|
||||
num-integer = "0.1.46"
|
||||
primitive-types = "0.12.2"
|
||||
rand_core = "0.6.4"
|
||||
|
|
|
@ -106,7 +106,7 @@ impl ChainTip for MockChainTip {
|
|||
}
|
||||
|
||||
fn best_tip_mined_transaction_ids(&self) -> Arc<[transaction::Hash]> {
|
||||
unreachable!("Method not used in tests");
|
||||
Arc::new([])
|
||||
}
|
||||
|
||||
fn estimate_distance_to_network_chain_tip(
|
||||
|
|
|
@ -0,0 +1,71 @@
|
|||
//! Common functions used in Zebra.
|
||||
|
||||
use std::{
|
||||
ffi::OsString,
|
||||
fs,
|
||||
io::{self, Write},
|
||||
path::PathBuf,
|
||||
};
|
||||
|
||||
use tempfile::PersistError;
|
||||
|
||||
/// Returns Zebra's default cache directory path.
|
||||
pub fn default_cache_dir() -> PathBuf {
|
||||
dirs::cache_dir()
|
||||
.unwrap_or_else(|| std::env::current_dir().unwrap().join("cache"))
|
||||
.join("zebra")
|
||||
}
|
||||
|
||||
/// Accepts a target file path and a byte-slice.
|
||||
///
|
||||
/// Atomically writes the byte-slice to a file to avoid corrupting the file if Zebra
|
||||
/// panics, crashes, or exits while the file is being written, or if multiple Zebra instances
|
||||
/// try to read and write the same file.
|
||||
///
|
||||
/// Returns the provided file path if successful.
|
||||
///
|
||||
/// # Concurrency
|
||||
///
|
||||
/// This function blocks on filesystem operations and should be called in a blocking task
|
||||
/// when calling from an async environment.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// If the provided `file_path` is a directory path.
|
||||
pub fn atomic_write(
|
||||
file_path: PathBuf,
|
||||
data: &[u8],
|
||||
) -> io::Result<Result<PathBuf, PersistError<fs::File>>> {
|
||||
// Get the file's parent directory, or use Zebra's default cache directory
|
||||
let file_dir = file_path
|
||||
.parent()
|
||||
.map(|p| p.to_owned())
|
||||
.unwrap_or_else(default_cache_dir);
|
||||
|
||||
// Create the directory if needed.
|
||||
fs::create_dir_all(&file_dir)?;
|
||||
|
||||
// Give the temporary file a similar name to the permanent file,
|
||||
// but hide it in directory listings.
|
||||
let mut tmp_file_prefix: OsString = ".tmp.".into();
|
||||
tmp_file_prefix.push(
|
||||
file_path
|
||||
.file_name()
|
||||
.expect("file path must have a file name"),
|
||||
);
|
||||
|
||||
// Create the temporary file in the same directory as the permanent file,
|
||||
// so atomic filesystem operations are possible.
|
||||
let mut tmp_file = tempfile::Builder::new()
|
||||
.prefix(&tmp_file_prefix)
|
||||
.tempfile_in(file_dir)?;
|
||||
|
||||
tmp_file.write_all(data)?;
|
||||
|
||||
// Atomically write the temp file to `file_path`.
|
||||
let persist_result = tmp_file
|
||||
.persist(&file_path)
|
||||
// Drops the temp file and returns the file path.
|
||||
.map(|_| file_path);
|
||||
Ok(persist_result)
|
||||
}
|
|
@ -22,6 +22,7 @@ pub mod amount;
|
|||
pub mod block;
|
||||
pub mod chain_sync_status;
|
||||
pub mod chain_tip;
|
||||
pub mod common;
|
||||
pub mod diagnostic;
|
||||
pub mod error;
|
||||
pub mod fmt;
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
ffi::OsString,
|
||||
io::{self, ErrorKind},
|
||||
net::{IpAddr, SocketAddr},
|
||||
time::Duration,
|
||||
|
@ -10,11 +9,11 @@ use std::{
|
|||
|
||||
use indexmap::IndexSet;
|
||||
use serde::{de, Deserialize, Deserializer};
|
||||
use tempfile::NamedTempFile;
|
||||
use tokio::{fs, io::AsyncWriteExt};
|
||||
use tracing::Span;
|
||||
use tokio::fs;
|
||||
|
||||
use tracing::Span;
|
||||
use zebra_chain::{
|
||||
common::atomic_write,
|
||||
parameters::{
|
||||
testnet::{self, ConfiguredActivationHeights, ConfiguredFundingStreams},
|
||||
Magic, Network, NetworkKind,
|
||||
|
@ -503,90 +502,36 @@ impl Config {
|
|||
// Make a newline-separated list
|
||||
let peer_data = peer_list.join("\n");
|
||||
|
||||
// Write to a temporary file, so the cache is not corrupted if Zebra shuts down or crashes
|
||||
// at the same time.
|
||||
//
|
||||
// # Concurrency
|
||||
//
|
||||
// We want to use async code to avoid blocking the tokio executor on filesystem operations,
|
||||
// but `tempfile` is implemented using non-asyc methods. So we wrap its filesystem
|
||||
// operations in `tokio::spawn_blocking()`.
|
||||
//
|
||||
// TODO: split this out into an atomic_write_to_tmp_file() method if we need to re-use it
|
||||
|
||||
// Create the peer cache directory if needed
|
||||
let peer_cache_dir = peer_cache_file
|
||||
.parent()
|
||||
.expect("cache path always has a network directory")
|
||||
.to_owned();
|
||||
tokio::fs::create_dir_all(&peer_cache_dir).await?;
|
||||
|
||||
// Give the temporary file a similar name to the permanent cache file,
|
||||
// but hide it in directory listings.
|
||||
let mut tmp_peer_cache_prefix: OsString = ".tmp.".into();
|
||||
tmp_peer_cache_prefix.push(
|
||||
peer_cache_file
|
||||
.file_name()
|
||||
.expect("cache file always has a file name"),
|
||||
);
|
||||
|
||||
// Create the temporary file.
|
||||
// Do blocking filesystem operations on a dedicated thread.
|
||||
// Write the peer cache file atomically so the cache is not corrupted if Zebra shuts down
|
||||
// or crashes.
|
||||
let span = Span::current();
|
||||
let tmp_peer_cache_file = tokio::task::spawn_blocking(move || {
|
||||
span.in_scope(move || {
|
||||
// Put the temporary file in the same directory as the permanent file,
|
||||
// so atomic filesystem operations are possible.
|
||||
tempfile::Builder::new()
|
||||
.prefix(&tmp_peer_cache_prefix)
|
||||
.tempfile_in(peer_cache_dir)
|
||||
})
|
||||
let write_result = tokio::task::spawn_blocking(move || {
|
||||
span.in_scope(move || atomic_write(peer_cache_file, peer_data.as_bytes()))
|
||||
})
|
||||
.await
|
||||
.expect("unexpected panic creating temporary peer cache file")?;
|
||||
.expect("could not write the peer cache file")?;
|
||||
|
||||
// Write the list to the file asynchronously, by extracting the inner file, using it,
|
||||
// then combining it back into a type that will correctly drop the file on error.
|
||||
let (tmp_peer_cache_file, tmp_peer_cache_path) = tmp_peer_cache_file.into_parts();
|
||||
let mut tmp_peer_cache_file = tokio::fs::File::from_std(tmp_peer_cache_file);
|
||||
tmp_peer_cache_file.write_all(peer_data.as_bytes()).await?;
|
||||
match write_result {
|
||||
Ok(peer_cache_file) => {
|
||||
info!(
|
||||
cached_ip_count = ?peer_list.len(),
|
||||
?peer_cache_file,
|
||||
"updated cached peer IP addresses"
|
||||
);
|
||||
|
||||
let tmp_peer_cache_file =
|
||||
NamedTempFile::from_parts(tmp_peer_cache_file, tmp_peer_cache_path);
|
||||
|
||||
// Atomically replace the current cache with the temporary cache.
|
||||
// Do blocking filesystem operations on a dedicated thread.
|
||||
let span = Span::current();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
span.in_scope(move || {
|
||||
let result = tmp_peer_cache_file.persist(&peer_cache_file);
|
||||
|
||||
// Drops the temp file if needed
|
||||
match result {
|
||||
Ok(_temp_file) => {
|
||||
info!(
|
||||
cached_ip_count = ?peer_list.len(),
|
||||
?peer_cache_file,
|
||||
"updated cached peer IP addresses"
|
||||
);
|
||||
|
||||
for ip in &peer_list {
|
||||
metrics::counter!(
|
||||
"zcash.net.peers.cache",
|
||||
"cache" => peer_cache_file.display().to_string(),
|
||||
"remote_ip" => ip.to_string()
|
||||
)
|
||||
.increment(1);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Err(error) => Err(error.error),
|
||||
for ip in &peer_list {
|
||||
metrics::counter!(
|
||||
"zcash.net.peers.cache",
|
||||
"cache" => peer_cache_file.display().to_string(),
|
||||
"remote_ip" => ip.to_string()
|
||||
)
|
||||
.increment(1);
|
||||
}
|
||||
})
|
||||
})
|
||||
.await
|
||||
.expect("unexpected panic making temporary peer cache file permanent")
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Err(error) => Err(error.error),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use zebra_chain::parameters::Network;
|
||||
use zebra_chain::{common::default_cache_dir, parameters::Network};
|
||||
|
||||
/// A cache directory config field.
|
||||
///
|
||||
|
@ -56,12 +56,7 @@ impl CacheDir {
|
|||
/// Returns the `zebra-network` base cache directory, if enabled.
|
||||
pub fn cache_dir(&self) -> Option<PathBuf> {
|
||||
match self {
|
||||
Self::IsEnabled(is_enabled) => is_enabled.then(|| {
|
||||
dirs::cache_dir()
|
||||
.unwrap_or_else(|| std::env::current_dir().unwrap().join("cache"))
|
||||
.join("zebra")
|
||||
}),
|
||||
|
||||
Self::IsEnabled(is_enabled) => is_enabled.then(default_cache_dir),
|
||||
Self::CustomPath(cache_dir) => Some(cache_dir.to_owned()),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ rpc-client = [
|
|||
"serde_json",
|
||||
]
|
||||
|
||||
shielded-scan = ["tokio"]
|
||||
shielded-scan = []
|
||||
|
||||
[dependencies]
|
||||
zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.39" }
|
||||
|
@ -48,7 +48,7 @@ jsonrpc-core = { version = "18.0.0", optional = true }
|
|||
reqwest = { version = "0.11.26", default-features = false, features = ["rustls-tls"], optional = true }
|
||||
serde = { version = "1.0.204", optional = true }
|
||||
serde_json = { version = "1.0.122", optional = true }
|
||||
tokio = { version = "1.39.2", features = ["time"], optional = true }
|
||||
tokio = { version = "1.39.2", features = ["time", "sync"] }
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use tokio::sync::oneshot;
|
||||
use zebra_chain::transaction::{self, UnminedTx, UnminedTxId};
|
||||
|
||||
#[cfg(feature = "getblocktemplate-rpcs")]
|
||||
|
@ -114,13 +115,11 @@ pub enum Response {
|
|||
/// Returns matching cached rejected [`UnminedTxId`]s from the mempool,
|
||||
RejectedTransactionIds(HashSet<UnminedTxId>),
|
||||
|
||||
/// Returns a list of queue results.
|
||||
///
|
||||
/// These are the results of the initial queue checks.
|
||||
/// The transaction may also fail download or verification later.
|
||||
/// Returns a list of initial queue checks results and a oneshot receiver
|
||||
/// for awaiting download and/or verification results.
|
||||
///
|
||||
/// Each result matches the request at the corresponding vector index.
|
||||
Queued(Vec<Result<(), BoxError>>),
|
||||
Queued(Vec<Result<oneshot::Receiver<Result<(), BoxError>>, BoxError>>),
|
||||
|
||||
/// Confirms that the mempool has checked for recently verified transactions.
|
||||
CheckedForVerifiedTransactions,
|
||||
|
|
|
@ -87,6 +87,8 @@ tracing = "0.1.39"
|
|||
hex = { version = "0.4.3", features = ["serde"] }
|
||||
serde = { version = "1.0.204", features = ["serde_derive"] }
|
||||
|
||||
# For the `stop` RPC method.
|
||||
nix = { version = "0.29.0", features = ["signal"] }
|
||||
|
||||
zcash_primitives = { workspace = true, features = ["transparent-inputs"] }
|
||||
|
||||
|
|
|
@ -301,6 +301,19 @@ pub trait Rpc {
|
|||
&self,
|
||||
address_strings: AddressStrings,
|
||||
) -> BoxFuture<Result<Vec<GetAddressUtxos>>>;
|
||||
|
||||
/// Stop the running zebrad process.
|
||||
///
|
||||
/// # Notes
|
||||
///
|
||||
/// - Works for non windows targets only.
|
||||
/// - Works only if the network of the running zebrad process is `Regtest`.
|
||||
///
|
||||
/// zcashd reference: [`stop`](https://zcash.github.io/rpc/stop.html)
|
||||
/// method: post
|
||||
/// tags: control
|
||||
#[rpc(name = "stop")]
|
||||
fn stop(&self) -> Result<String>;
|
||||
}
|
||||
|
||||
/// RPC method implementations.
|
||||
|
@ -664,7 +677,7 @@ where
|
|||
|
||||
let response = mempool.oneshot(request).await.map_server_error()?;
|
||||
|
||||
let queue_results = match response {
|
||||
let mut queue_results = match response {
|
||||
mempool::Response::Queued(results) => results,
|
||||
_ => unreachable!("incorrect response variant from mempool service"),
|
||||
};
|
||||
|
@ -675,10 +688,17 @@ where
|
|||
"mempool service returned more results than expected"
|
||||
);
|
||||
|
||||
tracing::debug!("sent transaction to mempool: {:?}", &queue_results[0]);
|
||||
let queue_result = queue_results
|
||||
.pop()
|
||||
.expect("there should be exactly one item in Vec")
|
||||
.inspect_err(|err| tracing::debug!("sent transaction to mempool: {:?}", &err))
|
||||
.map_server_error()?
|
||||
.await;
|
||||
|
||||
queue_results[0]
|
||||
.as_ref()
|
||||
tracing::debug!("sent transaction to mempool: {:?}", &queue_result);
|
||||
|
||||
queue_result
|
||||
.map_server_error()?
|
||||
.map(|_| SentTransactionHash(transaction_hash))
|
||||
.map_server_error()
|
||||
}
|
||||
|
@ -1337,6 +1357,32 @@ where
|
|||
}
|
||||
.boxed()
|
||||
}
|
||||
|
||||
fn stop(&self) -> Result<String> {
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
if self.network.is_regtest() {
|
||||
match nix::sys::signal::raise(nix::sys::signal::SIGINT) {
|
||||
Ok(_) => Ok("Zebra server stopping".to_string()),
|
||||
Err(error) => Err(Error {
|
||||
code: ErrorCode::InternalError,
|
||||
message: format!("Failed to shut down: {}", error),
|
||||
data: None,
|
||||
}),
|
||||
}
|
||||
} else {
|
||||
Err(Error {
|
||||
code: ErrorCode::MethodNotFound,
|
||||
message: "stop is only available on regtest networks".to_string(),
|
||||
data: None,
|
||||
})
|
||||
}
|
||||
#[cfg(target_os = "windows")]
|
||||
Err(Error {
|
||||
code: ErrorCode::MethodNotFound,
|
||||
message: "stop is not available in windows targets".to_string(),
|
||||
data: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the best chain tip height of `latest_chain_tip`,
|
||||
|
|
|
@ -19,7 +19,7 @@ use zebra_chain::{
|
|||
Network, NetworkKind, NetworkUpgrade, POW_AVERAGING_WINDOW,
|
||||
},
|
||||
primitives,
|
||||
serialization::ZcashDeserializeInto,
|
||||
serialization::{ZcashDeserializeInto, ZcashSerialize},
|
||||
transparent::{
|
||||
self, EXTRA_ZEBRA_COINBASE_DATA, MAX_COINBASE_DATA_LEN, MAX_COINBASE_HEIGHT_DATA_LEN,
|
||||
},
|
||||
|
@ -47,7 +47,9 @@ use crate::methods::{
|
|||
// TODO: move the types/* modules directly under get_block_template_rpcs,
|
||||
// and combine any modules with the same names.
|
||||
types::{
|
||||
get_block_template::GetBlockTemplate,
|
||||
get_block_template::{
|
||||
proposal::TimeSource, proposal_block_from_template, GetBlockTemplate,
|
||||
},
|
||||
get_mining_info,
|
||||
long_poll::LongPollInput,
|
||||
peer_info::PeerInfo,
|
||||
|
@ -283,6 +285,22 @@ pub trait GetBlockTemplateRpc {
|
|||
&self,
|
||||
address: String,
|
||||
) -> BoxFuture<Result<unified_address::Response>>;
|
||||
|
||||
#[rpc(name = "generate")]
|
||||
/// Mine blocks immediately. Returns the block hashes of the generated blocks.
|
||||
///
|
||||
/// # Parameters
|
||||
///
|
||||
/// - `num_blocks`: (numeric, required, example=1) Number of blocks to be generated.
|
||||
///
|
||||
/// # Notes
|
||||
///
|
||||
/// Only works if the network of the running zebrad process is `Regtest`.
|
||||
///
|
||||
/// zcashd reference: [`generate`](https://zcash.github.io/rpc/generate.html)
|
||||
/// method: post
|
||||
/// tags: generating
|
||||
fn generate(&self, num_blocks: u32) -> BoxFuture<Result<Vec<GetBlockHash>>>;
|
||||
}
|
||||
|
||||
/// RPC method implementations.
|
||||
|
@ -994,9 +1012,39 @@ where
|
|||
|
||||
fn get_mining_info(&self) -> BoxFuture<Result<get_mining_info::Response>> {
|
||||
let network = self.network.clone();
|
||||
let mut state = self.state.clone();
|
||||
|
||||
let chain_tip = self.latest_chain_tip.clone();
|
||||
let tip_height = chain_tip.best_tip_height().unwrap_or(Height(0)).0;
|
||||
|
||||
let mut current_block_tx = None;
|
||||
if tip_height > 0 {
|
||||
let mined_tx_ids = chain_tip.best_tip_mined_transaction_ids();
|
||||
current_block_tx =
|
||||
(!mined_tx_ids.is_empty()).then(|| mined_tx_ids.len().saturating_sub(1));
|
||||
}
|
||||
|
||||
let solution_rate_fut = self.get_network_sol_ps(None, None);
|
||||
async move {
|
||||
// Get the current block size.
|
||||
let mut current_block_size = None;
|
||||
if tip_height > 0 {
|
||||
let request = zebra_state::ReadRequest::TipBlockSize;
|
||||
let response: zebra_state::ReadResponse = state
|
||||
.ready()
|
||||
.and_then(|service| service.call(request))
|
||||
.await
|
||||
.map_server_error()?;
|
||||
current_block_size = match response {
|
||||
zebra_state::ReadResponse::TipBlockSize(Some(block_size)) => Some(block_size),
|
||||
_ => None,
|
||||
};
|
||||
}
|
||||
|
||||
Ok(get_mining_info::Response::new(
|
||||
tip_height,
|
||||
current_block_size,
|
||||
current_block_tx,
|
||||
network,
|
||||
solution_rate_fut.await?,
|
||||
))
|
||||
|
@ -1357,6 +1405,61 @@ where
|
|||
}
|
||||
.boxed()
|
||||
}
|
||||
|
||||
fn generate(&self, num_blocks: u32) -> BoxFuture<Result<Vec<GetBlockHash>>> {
|
||||
let rpc: GetBlockTemplateRpcImpl<
|
||||
Mempool,
|
||||
State,
|
||||
Tip,
|
||||
BlockVerifierRouter,
|
||||
SyncStatus,
|
||||
AddressBook,
|
||||
> = self.clone();
|
||||
let network = self.network.clone();
|
||||
|
||||
async move {
|
||||
if !network.is_regtest() {
|
||||
return Err(Error {
|
||||
code: ErrorCode::ServerError(0),
|
||||
message: "generate is only supported on regtest".to_string(),
|
||||
data: None,
|
||||
});
|
||||
}
|
||||
|
||||
let mut block_hashes = Vec::new();
|
||||
for _ in 0..num_blocks {
|
||||
let block_template = rpc.get_block_template(None).await.map_server_error()?;
|
||||
|
||||
let get_block_template::Response::TemplateMode(block_template) = block_template
|
||||
else {
|
||||
return Err(Error {
|
||||
code: ErrorCode::ServerError(0),
|
||||
message: "error generating block template".to_string(),
|
||||
data: None,
|
||||
});
|
||||
};
|
||||
|
||||
let proposal_block = proposal_block_from_template(
|
||||
&block_template,
|
||||
TimeSource::CurTime,
|
||||
NetworkUpgrade::current(&network, Height(block_template.height)),
|
||||
)
|
||||
.map_server_error()?;
|
||||
let hex_proposal_block =
|
||||
HexData(proposal_block.zcash_serialize_to_vec().map_server_error()?);
|
||||
|
||||
let _submit = rpc
|
||||
.submit_block(hex_proposal_block, None)
|
||||
.await
|
||||
.map_server_error()?;
|
||||
|
||||
block_hashes.push(GetBlockHash(proposal_block.hash()));
|
||||
}
|
||||
|
||||
Ok(block_hashes)
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
}
|
||||
|
||||
// Put support functions in a submodule, to keep this file small.
|
||||
|
|
|
@ -5,6 +5,18 @@ use zebra_chain::parameters::Network;
|
|||
/// Response to a `getmininginfo` RPC request.
|
||||
#[derive(Debug, Default, PartialEq, Eq, serde::Serialize)]
|
||||
pub struct Response {
|
||||
/// The current tip height.
|
||||
#[serde(rename = "blocks")]
|
||||
tip_height: u32,
|
||||
|
||||
/// The size of the last mined block if any.
|
||||
#[serde(rename = "currentblocksize", skip_serializing_if = "Option::is_none")]
|
||||
current_block_size: Option<usize>,
|
||||
|
||||
/// The number of transactions in the last mined block if any.
|
||||
#[serde(rename = "currentblocktx", skip_serializing_if = "Option::is_none")]
|
||||
current_block_tx: Option<usize>,
|
||||
|
||||
/// The estimated network solution rate in Sol/s.
|
||||
networksolps: u64,
|
||||
|
||||
|
@ -20,8 +32,17 @@ pub struct Response {
|
|||
|
||||
impl Response {
|
||||
/// Creates a new `getmininginfo` response
|
||||
pub fn new(network: Network, networksolps: u64) -> Self {
|
||||
pub fn new(
|
||||
tip_height: u32,
|
||||
current_block_size: Option<usize>,
|
||||
current_block_tx: Option<usize>,
|
||||
network: Network,
|
||||
networksolps: u64,
|
||||
) -> Self {
|
||||
Self {
|
||||
tip_height,
|
||||
current_block_size,
|
||||
current_block_tx,
|
||||
networksolps,
|
||||
networkhashps: networksolps,
|
||||
chain: network.bip70_network_name(),
|
||||
|
|
|
@ -7,6 +7,7 @@ use hex::ToHex;
|
|||
use jsonrpc_core::{Error, ErrorCode};
|
||||
use proptest::{collection::vec, prelude::*};
|
||||
use thiserror::Error;
|
||||
use tokio::sync::oneshot;
|
||||
use tower::buffer::Buffer;
|
||||
|
||||
use zebra_chain::{
|
||||
|
@ -61,7 +62,9 @@ proptest! {
|
|||
|
||||
let unmined_transaction = UnminedTx::from(transaction);
|
||||
let expected_request = mempool::Request::Queue(vec![unmined_transaction.into()]);
|
||||
let response = mempool::Response::Queued(vec![Ok(())]);
|
||||
let (rsp_tx, rsp_rx) = oneshot::channel();
|
||||
let _ = rsp_tx.send(Ok(()));
|
||||
let response = mempool::Response::Queued(vec![Ok(rsp_rx)]);
|
||||
|
||||
mempool
|
||||
.expect_request(expected_request)
|
||||
|
@ -111,10 +114,10 @@ proptest! {
|
|||
.expect("Transaction serializes successfully");
|
||||
let transaction_hex = hex::encode(&transaction_bytes);
|
||||
|
||||
let send_task = tokio::spawn(rpc.send_raw_transaction(transaction_hex));
|
||||
let send_task = tokio::spawn(rpc.send_raw_transaction(transaction_hex.clone()));
|
||||
|
||||
let unmined_transaction = UnminedTx::from(transaction);
|
||||
let expected_request = mempool::Request::Queue(vec![unmined_transaction.into()]);
|
||||
let expected_request = mempool::Request::Queue(vec![unmined_transaction.clone().into()]);
|
||||
|
||||
mempool
|
||||
.expect_request(expected_request)
|
||||
|
@ -138,6 +141,32 @@ proptest! {
|
|||
"Result is not a server error: {result:?}"
|
||||
);
|
||||
|
||||
let send_task = tokio::spawn(rpc.send_raw_transaction(transaction_hex));
|
||||
|
||||
let expected_request = mempool::Request::Queue(vec![unmined_transaction.clone().into()]);
|
||||
|
||||
let (rsp_tx, rsp_rx) = oneshot::channel();
|
||||
let _ = rsp_tx.send(Err("any verification error".into()));
|
||||
mempool
|
||||
.expect_request(expected_request)
|
||||
.await?
|
||||
.respond(Ok::<_, BoxError>(mempool::Response::Queued(vec![Ok(rsp_rx)])));
|
||||
|
||||
let result = send_task
|
||||
.await
|
||||
.expect("Sending raw transactions should not panic");
|
||||
|
||||
prop_assert!(
|
||||
matches!(
|
||||
result,
|
||||
Err(Error {
|
||||
code: ErrorCode::ServerError(_),
|
||||
..
|
||||
})
|
||||
),
|
||||
"Result is not a server error: {result:?}"
|
||||
);
|
||||
|
||||
// The queue task should continue without errors or panics
|
||||
let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never();
|
||||
prop_assert!(rpc_tx_queue_task_result.is_none());
|
||||
|
@ -897,7 +926,9 @@ proptest! {
|
|||
// now a retry will be sent to the mempool
|
||||
let expected_request =
|
||||
mempool::Request::Queue(vec![mempool::Gossip::Tx(tx_unmined.clone())]);
|
||||
let response = mempool::Response::Queued(vec![Ok(())]);
|
||||
let (rsp_tx, rsp_rx) = oneshot::channel();
|
||||
let _ = rsp_tx.send(Ok(()));
|
||||
let response = mempool::Response::Queued(vec![Ok(rsp_rx)]);
|
||||
|
||||
mempool
|
||||
.expect_request(expected_request)
|
||||
|
@ -997,7 +1028,9 @@ proptest! {
|
|||
for tx in txs.clone() {
|
||||
let expected_request =
|
||||
mempool::Request::Queue(vec![mempool::Gossip::Tx(UnminedTx::from(tx))]);
|
||||
let response = mempool::Response::Queued(vec![Ok(())]);
|
||||
let (rsp_tx, rsp_rx) = oneshot::channel();
|
||||
let _ = rsp_tx.send(Ok(()));
|
||||
let response = mempool::Response::Queued(vec![Ok(rsp_rx)]);
|
||||
|
||||
mempool
|
||||
.expect_request(expected_request)
|
||||
|
|
|
@ -3,6 +3,8 @@ source: zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs
|
|||
expression: get_mining_info
|
||||
---
|
||||
{
|
||||
"blocks": 1687104,
|
||||
"currentblocksize": 1617,
|
||||
"networksolps": 2,
|
||||
"networkhashps": 2,
|
||||
"chain": "main",
|
||||
|
|
|
@ -3,6 +3,8 @@ source: zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs
|
|||
expression: get_mining_info
|
||||
---
|
||||
{
|
||||
"blocks": 1842420,
|
||||
"currentblocksize": 1618,
|
||||
"networksolps": 0,
|
||||
"networkhashps": 0,
|
||||
"chain": "test",
|
||||
|
|
|
@ -5,7 +5,7 @@ use std::{collections::HashSet, env, sync::Arc};
|
|||
use proptest::prelude::*;
|
||||
|
||||
use chrono::Duration;
|
||||
use tokio::time;
|
||||
use tokio::{sync::oneshot, time};
|
||||
use tower::ServiceExt;
|
||||
|
||||
use zebra_chain::{
|
||||
|
@ -196,7 +196,9 @@ proptest! {
|
|||
let request = Request::Queue(vec![Gossip::Tx(unmined_transaction.clone())]);
|
||||
let expected_request = Request::Queue(vec![Gossip::Tx(unmined_transaction.clone())]);
|
||||
let send_task = tokio::spawn(mempool.clone().oneshot(request));
|
||||
let response = Response::Queued(vec![Ok(())]);
|
||||
let (rsp_tx, rsp_rx) = oneshot::channel();
|
||||
let _ = rsp_tx.send(Ok(()));
|
||||
let response = Response::Queued(vec![Ok(rsp_rx)]);
|
||||
|
||||
mempool
|
||||
.expect_request(expected_request)
|
||||
|
@ -337,7 +339,9 @@ proptest! {
|
|||
// retry will queue the transaction to mempool
|
||||
let gossip = Gossip::Tx(UnminedTx::from(transaction.clone()));
|
||||
let expected_request = Request::Queue(vec![gossip]);
|
||||
let response = Response::Queued(vec![Ok(())]);
|
||||
let (rsp_tx, rsp_rx) = oneshot::channel();
|
||||
let _ = rsp_tx.send(Ok(()));
|
||||
let response = Response::Queued(vec![Ok(rsp_rx)]);
|
||||
|
||||
mempool
|
||||
.expect_request(expected_request)
|
||||
|
|
|
@ -12,7 +12,7 @@ use serde::{Deserialize, Serialize};
|
|||
use tokio::task::{spawn_blocking, JoinHandle};
|
||||
use tracing::Span;
|
||||
|
||||
use zebra_chain::parameters::Network;
|
||||
use zebra_chain::{common::default_cache_dir, parameters::Network};
|
||||
|
||||
use crate::{
|
||||
constants::{DATABASE_FORMAT_VERSION_FILE_NAME, RESTORABLE_DB_VERSIONS, STATE_DATABASE_KIND},
|
||||
|
@ -173,12 +173,8 @@ impl Config {
|
|||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
let cache_dir = dirs::cache_dir()
|
||||
.unwrap_or_else(|| std::env::current_dir().unwrap().join("cache"))
|
||||
.join("zebra");
|
||||
|
||||
Self {
|
||||
cache_dir,
|
||||
cache_dir: default_cache_dir(),
|
||||
ephemeral: false,
|
||||
delete_old_database: true,
|
||||
debug_stop_at_height: None,
|
||||
|
@ -471,6 +467,8 @@ pub(crate) use hidden::{
|
|||
pub(crate) mod hidden {
|
||||
#![allow(dead_code)]
|
||||
|
||||
use zebra_chain::common::atomic_write;
|
||||
|
||||
use super::*;
|
||||
|
||||
/// Writes `changed_version` to the on-disk state database after the format is changed.
|
||||
|
@ -512,10 +510,9 @@ pub(crate) mod hidden {
|
|||
|
||||
let version = format!("{}.{}", changed_version.minor, changed_version.patch);
|
||||
|
||||
// # Concurrency
|
||||
//
|
||||
// The caller handles locking for this file write.
|
||||
fs::write(version_path, version.as_bytes())?;
|
||||
// Write the version file atomically so the cache is not corrupted if Zebra shuts down or
|
||||
// crashes.
|
||||
atomic_write(version_path, version.as_bytes())??;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -1063,6 +1063,11 @@ pub enum ReadRequest {
|
|||
/// Returns [`ReadResponse::ValidBlockProposal`] when successful, or an error if
|
||||
/// the block fails contextual validation.
|
||||
CheckBlockProposalValidity(SemanticallyVerifiedBlock),
|
||||
|
||||
#[cfg(feature = "getblocktemplate-rpcs")]
|
||||
/// Returns [`ReadResponse::TipBlockSize(usize)`](ReadResponse::TipBlockSize)
|
||||
/// with the current best chain tip block size in bytes.
|
||||
TipBlockSize,
|
||||
}
|
||||
|
||||
impl ReadRequest {
|
||||
|
@ -1098,6 +1103,8 @@ impl ReadRequest {
|
|||
ReadRequest::SolutionRate { .. } => "solution_rate",
|
||||
#[cfg(feature = "getblocktemplate-rpcs")]
|
||||
ReadRequest::CheckBlockProposalValidity(_) => "check_block_proposal_validity",
|
||||
#[cfg(feature = "getblocktemplate-rpcs")]
|
||||
ReadRequest::TipBlockSize => "tip_block_size",
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -229,6 +229,10 @@ pub enum ReadResponse {
|
|||
#[cfg(feature = "getblocktemplate-rpcs")]
|
||||
/// Response to [`ReadRequest::CheckBlockProposalValidity`]
|
||||
ValidBlockProposal,
|
||||
|
||||
#[cfg(feature = "getblocktemplate-rpcs")]
|
||||
/// Response to [`ReadRequest::TipBlockSize`]
|
||||
TipBlockSize(Option<usize>),
|
||||
}
|
||||
|
||||
/// A structure with the information needed from the state to build a `getblocktemplate` RPC response.
|
||||
|
@ -315,7 +319,7 @@ impl TryFrom<ReadResponse> for Response {
|
|||
ReadResponse::ValidBlockProposal => Ok(Response::ValidBlockProposal),
|
||||
|
||||
#[cfg(feature = "getblocktemplate-rpcs")]
|
||||
ReadResponse::ChainInfo(_) | ReadResponse::SolutionRate(_) => {
|
||||
ReadResponse::ChainInfo(_) | ReadResponse::SolutionRate(_) | ReadResponse::TipBlockSize(_) => {
|
||||
Err("there is no corresponding Response for this ReadResponse")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,6 +39,9 @@ use zebra_chain::{
|
|||
subtree::NoteCommitmentSubtreeIndex,
|
||||
};
|
||||
|
||||
#[cfg(feature = "getblocktemplate-rpcs")]
|
||||
use zebra_chain::{block::Height, serialization::ZcashSerialize};
|
||||
|
||||
use crate::{
|
||||
constants::{
|
||||
MAX_FIND_BLOCK_HASHES_RESULTS, MAX_FIND_BLOCK_HEADERS_RESULTS_FOR_ZEBRA,
|
||||
|
@ -1905,6 +1908,46 @@ impl Service<ReadRequest> for ReadStateService {
|
|||
})
|
||||
.wait_for_panics()
|
||||
}
|
||||
|
||||
#[cfg(feature = "getblocktemplate-rpcs")]
|
||||
ReadRequest::TipBlockSize => {
|
||||
let state = self.clone();
|
||||
|
||||
tokio::task::spawn_blocking(move || {
|
||||
span.in_scope(move || {
|
||||
// Get the best chain tip height.
|
||||
let tip_height = state
|
||||
.non_finalized_state_receiver
|
||||
.with_watch_data(|non_finalized_state| {
|
||||
read::tip_height(non_finalized_state.best_chain(), &state.db)
|
||||
})
|
||||
.unwrap_or(Height(0));
|
||||
|
||||
// Get the block at the best chain tip height.
|
||||
let block = state.non_finalized_state_receiver.with_watch_data(
|
||||
|non_finalized_state| {
|
||||
read::block(
|
||||
non_finalized_state.best_chain(),
|
||||
&state.db,
|
||||
tip_height.into(),
|
||||
)
|
||||
},
|
||||
);
|
||||
|
||||
// The work is done in the future.
|
||||
timer.finish(module_path!(), line!(), "ReadRequest::TipBlockSize");
|
||||
|
||||
// Respond with the length of the obtained block if any.
|
||||
match block {
|
||||
Some(b) => Ok(ReadResponse::TipBlockSize(Some(
|
||||
b.zcash_serialize_to_vec()?.len(),
|
||||
))),
|
||||
None => Ok(ReadResponse::TipBlockSize(None)),
|
||||
}
|
||||
})
|
||||
})
|
||||
.wait_for_panics()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -290,6 +290,7 @@ impl ZebraDb {
|
|||
///
|
||||
/// - Propagates any errors from writing to the DB
|
||||
/// - Propagates any errors from updating history and note commitment trees
|
||||
#[allow(clippy::unwrap_in_result)]
|
||||
pub(in super::super) fn write_block(
|
||||
&mut self,
|
||||
finalized: FinalizedBlock,
|
||||
|
|
|
@ -77,7 +77,7 @@ openapi-generator = [
|
|||
"zebra-rpc",
|
||||
"syn",
|
||||
"quote",
|
||||
"serde_yaml",
|
||||
"serde_yml",
|
||||
"serde"
|
||||
]
|
||||
|
||||
|
@ -121,7 +121,7 @@ zcash_protocol.workspace = true
|
|||
rand = "0.8.5"
|
||||
syn = { version = "2.0.72", features = ["full"], optional = true }
|
||||
quote = { version = "1.0.36", optional = true }
|
||||
serde_yaml = { version = "0.9.34+deprecated", optional = true }
|
||||
serde_yml = { version = "0.0.12", optional = true }
|
||||
serde = { version = "1.0.204", features = ["serde_derive"], optional = true }
|
||||
indexmap = "2.3.0"
|
||||
|
||||
|
|
|
@ -112,7 +112,7 @@ This program is commonly used as part of `zebrad-log-filter` where hashes will b
|
|||
|
||||
The program is designed to filter the output from the zebra terminal or log file. Each time a hash is seen the script will capture it and get the additional information using `zebrad-hash-lookup`.
|
||||
|
||||
Assuming `zebrad`, `zclash-cli`, `zebrad-hash-lookup` and `zebrad-log-filter` are in your path the program can used as:
|
||||
Assuming `zebrad`, `zcash-cli`, `zebrad-hash-lookup` and `zebrad-log-filter` are in your path the program can used as:
|
||||
|
||||
```sh
|
||||
$ zebrad -v start | zebrad-log-filter
|
||||
|
|
|
@ -174,9 +174,9 @@ fn main() -> Result<(), Box<dyn Error>> {
|
|||
let all_methods = Methods { paths: methods };
|
||||
|
||||
// Add openapi header and write to file
|
||||
let yaml_string = serde_yaml::to_string(&all_methods)?;
|
||||
let yml_string = serde_yml::to_string(&all_methods)?;
|
||||
let mut w = File::create("openapi.yaml")?;
|
||||
w.write_all(format!("{}{}", create_yaml(), yaml_string).as_bytes())?;
|
||||
w.write_all(format!("{}{}", create_yaml(), yml_string).as_bytes())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -543,6 +543,7 @@ fn get_default_properties(method_name: &str) -> Result<IndexMap<String, Property
|
|||
)?,
|
||||
// control
|
||||
"getinfo" => default_property(type_, items.clone(), GetInfo::default())?,
|
||||
"stop" => default_property(type_, items.clone(), ())?,
|
||||
// transaction
|
||||
"sendrawtransaction" => {
|
||||
default_property(type_, items.clone(), SentTransactionHash::default())?
|
||||
|
|
|
@ -27,7 +27,7 @@ use std::{
|
|||
};
|
||||
|
||||
use futures::{future::FutureExt, stream::Stream};
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::sync::{broadcast, oneshot};
|
||||
use tokio_stream::StreamExt;
|
||||
use tower::{buffer::Buffer, timeout::Timeout, util::BoxService, Service};
|
||||
|
||||
|
@ -560,7 +560,7 @@ impl Service<Request> for Mempool {
|
|||
for tx in tx_retries {
|
||||
// This is just an efficiency optimisation, so we don't care if queueing
|
||||
// transaction requests fails.
|
||||
let _result = tx_downloads.download_if_needed_and_verify(tx);
|
||||
let _result = tx_downloads.download_if_needed_and_verify(tx, None);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -608,8 +608,8 @@ impl Service<Request> for Mempool {
|
|||
tracing::trace!("chain grew during tx verification, retrying ..",);
|
||||
|
||||
// We don't care if re-queueing the transaction request fails.
|
||||
let _result =
|
||||
tx_downloads.download_if_needed_and_verify(tx.transaction.into());
|
||||
let _result = tx_downloads
|
||||
.download_if_needed_and_verify(tx.transaction.into(), None);
|
||||
}
|
||||
}
|
||||
Ok(Err((txid, error))) => {
|
||||
|
@ -758,16 +758,24 @@ impl Service<Request> for Mempool {
|
|||
Request::Queue(gossiped_txs) => {
|
||||
trace!(req_count = ?gossiped_txs.len(), "got mempool Queue request");
|
||||
|
||||
let rsp: Vec<Result<(), BoxError>> = gossiped_txs
|
||||
.into_iter()
|
||||
.map(|gossiped_tx| -> Result<(), MempoolError> {
|
||||
storage.should_download_or_verify(gossiped_tx.id())?;
|
||||
tx_downloads.download_if_needed_and_verify(gossiped_tx)?;
|
||||
let rsp: Vec<Result<oneshot::Receiver<Result<(), BoxError>>, BoxError>> =
|
||||
gossiped_txs
|
||||
.into_iter()
|
||||
.map(
|
||||
|gossiped_tx| -> Result<
|
||||
oneshot::Receiver<Result<(), BoxError>>,
|
||||
MempoolError,
|
||||
> {
|
||||
let (rsp_tx, rsp_rx) = oneshot::channel();
|
||||
storage.should_download_or_verify(gossiped_tx.id())?;
|
||||
tx_downloads
|
||||
.download_if_needed_and_verify(gossiped_tx, Some(rsp_tx))?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.map(|result| result.map_err(BoxError::from))
|
||||
.collect();
|
||||
Ok(rsp_rx)
|
||||
},
|
||||
)
|
||||
.map(|result| result.map_err(BoxError::from))
|
||||
.collect();
|
||||
|
||||
// We've added transactions to the queue
|
||||
self.update_metrics();
|
||||
|
|
|
@ -6,7 +6,7 @@ use proptest::{
|
|||
collection::{hash_set, vec},
|
||||
prelude::*,
|
||||
};
|
||||
use tokio::time;
|
||||
use tokio::{sync::oneshot, time};
|
||||
|
||||
use zebra_chain::{
|
||||
chain_sync_status::ChainSyncStatus, parameters::Network, transaction::UnminedTxId,
|
||||
|
@ -317,9 +317,17 @@ async fn respond_to_queue_request(
|
|||
expected_transaction_ids: HashSet<UnminedTxId>,
|
||||
response: impl IntoIterator<Item = Result<(), MempoolError>>,
|
||||
) -> Result<(), TestCaseError> {
|
||||
let response = response
|
||||
let response: Vec<Result<oneshot::Receiver<Result<(), BoxError>>, BoxError>> = response
|
||||
.into_iter()
|
||||
.map(|result| result.map_err(BoxError::from))
|
||||
.map(|result| {
|
||||
result
|
||||
.map(|_| {
|
||||
let (rsp_tx, rsp_rx) = oneshot::channel();
|
||||
let _ = rsp_tx.send(Ok(()));
|
||||
rsp_rx
|
||||
})
|
||||
.map_err(BoxError::from)
|
||||
})
|
||||
.collect();
|
||||
|
||||
mempool
|
||||
|
|
|
@ -51,7 +51,7 @@ use zebra_chain::{
|
|||
use zebra_consensus::transaction as tx;
|
||||
use zebra_network as zn;
|
||||
use zebra_node_services::mempool::Gossip;
|
||||
use zebra_state as zs;
|
||||
use zebra_state::{self as zs, CloneError};
|
||||
|
||||
use crate::components::sync::{BLOCK_DOWNLOAD_TIMEOUT, BLOCK_VERIFY_TIMEOUT};
|
||||
|
||||
|
@ -105,17 +105,17 @@ pub const MAX_INBOUND_CONCURRENCY: usize = 25;
|
|||
struct CancelDownloadAndVerify;
|
||||
|
||||
/// Errors that can occur while downloading and verifying a transaction.
|
||||
#[derive(Error, Debug)]
|
||||
#[derive(Error, Debug, Clone)]
|
||||
#[allow(dead_code)]
|
||||
pub enum TransactionDownloadVerifyError {
|
||||
#[error("transaction is already in state")]
|
||||
InState,
|
||||
|
||||
#[error("error in state service")]
|
||||
StateError(#[source] BoxError),
|
||||
StateError(#[source] CloneError),
|
||||
|
||||
#[error("error downloading transaction")]
|
||||
DownloadFailed(#[source] BoxError),
|
||||
DownloadFailed(#[source] CloneError),
|
||||
|
||||
#[error("transaction download / verification was cancelled")]
|
||||
Cancelled,
|
||||
|
@ -240,9 +240,11 @@ where
|
|||
///
|
||||
/// Returns the action taken in response to the queue request.
|
||||
#[instrument(skip(self, gossiped_tx), fields(txid = %gossiped_tx.id()))]
|
||||
#[allow(clippy::unwrap_in_result)]
|
||||
pub fn download_if_needed_and_verify(
|
||||
&mut self,
|
||||
gossiped_tx: Gossip,
|
||||
rsp_tx: Option<oneshot::Sender<Result<(), BoxError>>>,
|
||||
) -> Result<(), MempoolError> {
|
||||
let txid = gossiped_tx.id();
|
||||
|
||||
|
@ -295,7 +297,7 @@ where
|
|||
Ok((Some(height), next_height))
|
||||
}
|
||||
Ok(_) => unreachable!("wrong response"),
|
||||
Err(e) => Err(TransactionDownloadVerifyError::StateError(e)),
|
||||
Err(e) => Err(TransactionDownloadVerifyError::StateError(e.into())),
|
||||
}?;
|
||||
|
||||
trace!(?txid, ?next_height, "got next height");
|
||||
|
@ -307,11 +309,12 @@ where
|
|||
let tx = match network
|
||||
.oneshot(req)
|
||||
.await
|
||||
.map_err(CloneError::from)
|
||||
.map_err(TransactionDownloadVerifyError::DownloadFailed)?
|
||||
{
|
||||
zn::Response::Transactions(mut txs) => txs.pop().ok_or_else(|| {
|
||||
TransactionDownloadVerifyError::DownloadFailed(
|
||||
"no transactions returned".into(),
|
||||
BoxError::from("no transactions returned").into(),
|
||||
)
|
||||
})?,
|
||||
_ => unreachable!("wrong response to transaction request"),
|
||||
|
@ -373,7 +376,7 @@ where
|
|||
|
||||
let task = tokio::spawn(async move {
|
||||
// Prefer the cancel handle if both are ready.
|
||||
tokio::select! {
|
||||
let result = tokio::select! {
|
||||
biased;
|
||||
_ = &mut cancel_rx => {
|
||||
trace!("task cancelled prior to completion");
|
||||
|
@ -381,7 +384,19 @@ where
|
|||
Err((TransactionDownloadVerifyError::Cancelled, txid))
|
||||
}
|
||||
verification = fut => verification,
|
||||
};
|
||||
|
||||
// Send the result to responder channel if one was provided.
|
||||
if let Some(rsp_tx) = rsp_tx {
|
||||
let _ = rsp_tx.send(
|
||||
result
|
||||
.as_ref()
|
||||
.map(|_| ())
|
||||
.map_err(|(err, _)| err.clone().into()),
|
||||
);
|
||||
}
|
||||
|
||||
result
|
||||
});
|
||||
|
||||
self.pending.push(task);
|
||||
|
@ -458,6 +473,7 @@ where
|
|||
match state
|
||||
.ready()
|
||||
.await
|
||||
.map_err(CloneError::from)
|
||||
.map_err(TransactionDownloadVerifyError::StateError)?
|
||||
.call(zs::Request::Transaction(txid.mined_id()))
|
||||
.await
|
||||
|
@ -465,7 +481,7 @@ where
|
|||
Ok(zs::Response::Transaction(None)) => Ok(()),
|
||||
Ok(zs::Response::Transaction(Some(_))) => Err(TransactionDownloadVerifyError::InState),
|
||||
Ok(_) => unreachable!("wrong response"),
|
||||
Err(e) => Err(TransactionDownloadVerifyError::StateError(e)),
|
||||
Err(e) => Err(TransactionDownloadVerifyError::StateError(e.into())),
|
||||
}?;
|
||||
|
||||
Ok(())
|
||||
|
|
|
@ -445,12 +445,17 @@ async fn mempool_cancel_mined() -> Result<(), Report> {
|
|||
.call(Request::Queue(vec![txid.into()]))
|
||||
.await
|
||||
.unwrap();
|
||||
let queued_responses = match response {
|
||||
let mut queued_responses = match response {
|
||||
Response::Queued(queue_responses) => queue_responses,
|
||||
_ => unreachable!("will never happen in this test"),
|
||||
};
|
||||
assert_eq!(queued_responses.len(), 1);
|
||||
assert!(queued_responses[0].is_ok());
|
||||
|
||||
let queued_response = queued_responses
|
||||
.pop()
|
||||
.expect("already checked that there is exactly 1 item in Vec")
|
||||
.expect("initial queue checks result should be Ok");
|
||||
|
||||
assert_eq!(mempool.tx_downloads().in_flight(), 1);
|
||||
|
||||
// Push block 2 to the state
|
||||
|
@ -489,6 +494,14 @@ async fn mempool_cancel_mined() -> Result<(), Report> {
|
|||
// Check if download was cancelled.
|
||||
assert_eq!(mempool.tx_downloads().in_flight(), 0);
|
||||
|
||||
assert!(
|
||||
queued_response
|
||||
.await
|
||||
.expect("channel should not be closed")
|
||||
.is_err(),
|
||||
"queued tx should fail to download and verify due to chain tip change"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
|
@ -136,7 +136,7 @@ pub async fn run(network: Network) -> Result<()> {
|
|||
?zebra_rpc_address,
|
||||
"waiting for zebrad to open its RPC port...",
|
||||
);
|
||||
zebrad.expect_stdout_line_matches(&format!("Opened RPC endpoint at {zebra_rpc_address}"))?;
|
||||
zebrad.expect_stdout_line_matches(format!("Opened RPC endpoint at {zebra_rpc_address}"))?;
|
||||
|
||||
tracing::info!(
|
||||
?network,
|
||||
|
|
|
@ -34,7 +34,7 @@ pub(crate) async fn run() -> Result<()> {
|
|||
let rpc_address = zebra_rpc_address.expect("getpeerinfo test must have RPC port");
|
||||
|
||||
// Wait until port is open.
|
||||
zebrad.expect_stdout_line_matches(&format!("Opened RPC endpoint at {rpc_address}"))?;
|
||||
zebrad.expect_stdout_line_matches(format!("Opened RPC endpoint at {rpc_address}"))?;
|
||||
|
||||
tracing::info!(?rpc_address, "zebrad opened its RPC port",);
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ pub(crate) async fn run() -> Result<()> {
|
|||
?rpc_address,
|
||||
"spawned isolated zebrad with shorter chain, waiting for zebrad to open its RPC port..."
|
||||
);
|
||||
zebrad.expect_stdout_line_matches(&format!("Opened RPC endpoint at {rpc_address}"))?;
|
||||
zebrad.expect_stdout_line_matches(format!("Opened RPC endpoint at {rpc_address}"))?;
|
||||
|
||||
tracing::info!(?rpc_address, "zebrad opened its RPC port",);
|
||||
|
||||
|
|
|
@ -118,7 +118,7 @@ pub async fn run() -> Result<()> {
|
|||
?zebra_rpc_address,
|
||||
"spawned isolated zebrad with shorter chain, waiting for zebrad to open its RPC port..."
|
||||
);
|
||||
zebrad.expect_stdout_line_matches(&format!("Opened RPC endpoint at {zebra_rpc_address}"))?;
|
||||
zebrad.expect_stdout_line_matches(format!("Opened RPC endpoint at {zebra_rpc_address}"))?;
|
||||
|
||||
tracing::info!(
|
||||
?zebra_rpc_address,
|
||||
|
|
Loading…
Reference in New Issue