refactor(build): deprecate gcr.io as a registry and build faster (#4298)

* refactor(build): use better stage naming and document it

* refactort(build): use multiple cache sources

* docs(build): add a comment for cache

* fix(build): remove gcr.io as it does not supports OCI images
This commit is contained in:
Gustavo Valverde 2022-05-10 17:00:09 -04:00 committed by GitHub
parent ed1bde1b8d
commit 93e158936e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 70 additions and 40 deletions

View File

@ -63,7 +63,6 @@ jobs:
# list of Docker images to use as base name for tags
images: |
us-docker.pkg.dev/zealous-zebra/zebra/${{ inputs.image_name }}
gcr.io/zealous-zebra/zcashfoundation-zebra/${{ inputs.image_name }}
# generate Docker tags based on the following events/attributes
tags: |
type=schedule
@ -95,14 +94,6 @@ jobs:
password: ${{ steps.auth.outputs.access_token }}
logout: false
- name: Login to Google Container Registry
uses: docker/login-action@v2.0.0
with:
registry: gcr.io
username: oauth2accesstoken
password: ${{ steps.auth.outputs.access_token }}
logout: false
# Build and push image to Google Artifact Registry
- name: Build & push
id: docker_build
@ -123,5 +114,12 @@ jobs:
CHECKPOINT_SYNC=${{ inputs.checkpoint_sync }}
RUST_LOG=${{ inputs.rust_log }}
push: true
cache-from: type=gha,scope=${{ inputs.image_name }}
cache-to: type=gha,mode=max,scope=${{ inputs.image_name }}
# To improve build speeds, for eacrh branch we push an additonal image to the registry,
# to be used as the caching layer, using the `max` caching mode.
#
# We use multiple cache sources to confirm a cache hit, starting from the `main` branch cahe,
# and if there's no hit, then continue with a cache scoped per branch.
cache-from: |
type=registry,ref=us-docker.pkg.dev/zealous-zebra/zebra/${{ inputs.image_name }}:main-cache
type=registry,ref=us-docker.pkg.dev/zealous-zebra/zebra/${{ inputs.image_name }}:${{ env.GITHUB_REF_SLUG_URL }}-cache
cache-to: type=registry,ref=us-docker.pkg.dev/zealous-zebra/zebra/${{ inputs.image_name }}:${{ env.GITHUB_REF_SLUG_URL }}-cache,mode=max

View File

@ -88,7 +88,7 @@ jobs:
uses: ./.github/workflows/build-docker-image.yml
with:
dockerfile_path: ./docker/Dockerfile
dockerfile_target: tester
dockerfile_target: tests
image_name: zebrad-test
# TODO: validate how to use variable/conditional values for Testnet
network: Mainnet

View File

@ -20,7 +20,7 @@ jobs:
uses: ./.github/workflows/build-docker-image.yml
with:
dockerfile_path: ./docker/zcash-params/Dockerfile
dockerfile_target: builder
dockerfile_target: release
image_name: zcash-params
rust_backtrace: full
rust_lib_backtrace: full

View File

@ -1,19 +1,29 @@
# This steps implement cargo-chef for docker layer caching
# We are using four stages:
# We are using five stages:
# - chef: installs cargo-chef
# - planner: computes the recipe file
# - builder: caches our dependencies and builds the binary
# - tester: builds and run tests
# - deps: caches our dependencies and sets the needed variables
# - tests: builds tests
# - release: builds release binary
# - runtime: is our runtime environment
#
# This stage implements cargo-chef for docker layer caching
FROM rust:bullseye as chef
RUN cargo install cargo-chef --locked
WORKDIR /app
# Analyze the current project to determine the minimum subset of files
# (Cargo.lock and Cargo.toml manifests) required to build it and cache dependencies
#
# The recipe.json is the equivalent of the Python requirements.txt file
FROM chef AS planner
COPY . .
RUN cargo chef prepare --recipe-path recipe.json
FROM chef AS builder
# In this stage we download all system requirements to build the project
#
# It also captures all the build arguments to be used as environment variables.
# We set defaults for the arguments, in case the build does not include this information.
FROM chef AS deps
SHELL ["/bin/bash", "-xo", "pipefail", "-c"]
COPY --from=planner /app/recipe.json recipe.json
@ -29,7 +39,9 @@ RUN apt-get -qq update && \
; \
rm -rf /var/lib/apt/lists/* /tmp/*
# Install google OS Config agent
# Install google OS Config agent to be able to get information from the VMs being deployed
# into GCP for integration testing purposes, and as Mainnet nodes
# TODO: this shouldn't be a harcoded requirement for everyone
RUN if [ "$(uname -m)" != "aarch64" ]; then \
apt-get -qq update && \
apt-get -qq install -y --no-install-recommends \
@ -44,10 +56,10 @@ RUN if [ "$(uname -m)" != "aarch64" ]; then \
&& \
rm -rf /var/lib/apt/lists/* /tmp/*
ENV CARGO_HOME /app/.cargo/
# Build dependencies - this is the caching Docker layer!
RUN cargo chef cook --release --features enable-sentry --recipe-path recipe.json
# Build arguments and variables set to change how tests are run, tracelog levels,
# and Network to be used (Mainnet or Testnet)
#
# We set defaults to all variables.
ARG RUST_BACKTRACE
ENV RUST_BACKTRACE ${RUST_BACKTRACE:-0}
@ -71,41 +83,61 @@ ENV CHECKPOINT_SYNC ${CHECKPOINT_SYNC:-true}
ARG NETWORK
ENV NETWORK ${NETWORK:-Mainnet}
COPY . .
# Build zebra
RUN cargo build --locked --release --features enable-sentry --bin zebrad
ENV CARGO_HOME /app/.cargo/
FROM builder AS tester
# Pre-download Zcash Sprout and Sapling parameters
# In this stage we build tests (without running then)
#
# We also download needed dependencies for tests to work, from other images.
# An entrypoint.sh is only available in this step for easier test handling with variables.
FROM deps AS tests
# TODO: do not hardcode the user /root/ even though is a safe assumption
# Pre-download Zcash Sprout, Sapling parameters and Lightwalletd binary
COPY --from=us-docker.pkg.dev/zealous-zebra/zebra/zcash-params /root/.zcash-params /root/.zcash-params
COPY --from=us-docker.pkg.dev/zealous-zebra/zebra/lightwalletd /lightwalletd /usr/local/bin
RUN cargo test --locked --release --features enable-sentry --workspace --no-run
# Re-hydrate the minimum project skeleton identified by `cargo chef prepare` in the planner stage,
# and build it to cache dependencies.
# This is the caching Docker layer for Rust!
RUN cargo chef cook --release --workspace --recipe-path recipe.json
COPY . .
RUN cargo test --locked --release --workspace --no-run
COPY ./docker/entrypoint.sh /
RUN chmod u+x /entrypoint.sh
ARG CHECKPOINT_SYNC=true
ARG NETWORK=Mainnet
ARG TEST_FULL_SYNC
ENV TEST_FULL_SYNC ${TEST_FULL_SYNC:-0}
ARG RUN_ALL_TESTS
ENV RUN_ALL_TESTS ${RUN_ALL_TESTS:-0}
ENTRYPOINT ["/entrypoint.sh"]
CMD [ "cargo"]
# Runner image
# In this stage we build a release (generate the zebrad binary)
#
# This step also adds `cargo chef` as this stage is completely independent from the
# `test` stage. This step is a dependency for the `runtime` stage, which uses the resulting
# zebrad binary from this step.
FROM deps AS release
RUN cargo chef cook --release --features enable-sentry --recipe-path recipe.json
COPY . .
# Build zebra
RUN cargo build --locked --release --features enable-sentry --bin zebrad
# This stage is only used when deploying nodes or when only the resulting zebrad binary is needed
#
# To save space, this step starts from scratch using debian, and only adds the resulting
# binary from the `release` stage, and the Zcash Sprout & Sapling parameters from ZCash
FROM debian:bullseye-slim AS runtime
COPY --from=builder /app/target/release/zebrad /usr/local/bin
COPY --from=release /app/target/release/zebrad /usr/local/bin
COPY --from=us-docker.pkg.dev/zealous-zebra/zebra/zcash-params /root/.zcash-params /root/.zcash-params
RUN apt-get update && \
apt-get install -y --no-install-recommends \
ca-certificates
ARG CHECKPOINT_SYNC=true
ARG NETWORK=Mainnet
# Build the `zebrad.toml` before starting the container, using the arguments from build
# time, or using the default values set just above.
RUN set -ex; \
{ \
echo "[consensus]"; \

View File

@ -8,7 +8,7 @@ FROM chef AS planner
COPY . .
RUN cargo chef prepare --recipe-path recipe.json
FROM chef AS builder
FROM chef AS release
COPY --from=planner /app/recipe.json recipe.json
# Install zebra build deps