From d85b010cf94186de636839193f01246b72cb60d7 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 12 Dec 2023 23:51:28 +0000 Subject: [PATCH] ref(workflow): move most scripts to their own executables (#8005) * ref(workflow): move most scripts to their own executable * debug: JSON value * fix(scripts): move remaining script to its own file * fix(script): revert to the correct disk search logic * fix(scripts) * fix(scripts): use correct NETWORK with lowercase * fix: typo * fix(script): wrong variable assignment * fix(script): use correct return values inside a function * fix(script): fix value assigment * test: debug * fix(script): make disk conditions simpler * fix(script): export variables to the `shell` executing the script * fix(script): do not fail on expected unbound variables * test: output * fix(scripts): do not `echo` a variable more than once * fix(scripts): typo * docs(workflow): adds a description at the top of each file (#8009) Co-authored-by: Marek Co-authored-by: teor --------- Co-authored-by: teor Co-authored-by: Alfredo Garcia Co-authored-by: Marek --- .github/workflows/cd-deploy-nodes-gcp.yml | 12 +- .../workflows/chore-delete-gcp-resources.yml | 162 ++---------------- .../workflows/chore-project-management.yml | 8 +- .github/workflows/ci-build-crates.yml | 10 ++ .github/workflows/ci-coverage.yml | 8 + .../workflows/ci-integration-tests-gcp.yml | 3 + .github/workflows/ci-lint.yml | 7 + .github/workflows/ci-unit-tests-docker.yml | 11 ++ .github/workflows/ci-unit-tests-os.yml | 6 + .github/workflows/docs-deploy-firebase.yml | 4 + .github/workflows/manual-zcashd-deploy.yml | 7 + .github/workflows/release-crates-io.yml | 19 +- .github/workflows/release-drafter.yml | 6 +- .../scripts/gcp-delete-old-cache-images.sh | 45 +++++ .../workflows/scripts/gcp-delete-old-disks.sh | 49 ++++++ .../scripts/gcp-delete-old-instances.sh | 42 +++++ .../scripts/gcp-delete-old-templates.sh | 33 ++++ .../scripts/gcp-get-available-disks.sh | 42 +++++ .../workflows/scripts/gcp-get-cached-disks.sh | 72 ++++++++ .../scripts/gcp-vm-startup-script.sh | 2 +- .../scripts/release-crates-dry-run.sh | 31 ++++ .github/workflows/sub-build-docker-image.yml | 6 + .../sub-deploy-integration-tests-gcp.yml | 82 ++------- .github/workflows/sub-find-cached-disks.yml | 48 ++---- .github/workflows/sub-test-zebra-config.yml | 4 + 25 files changed, 446 insertions(+), 273 deletions(-) create mode 100755 .github/workflows/scripts/gcp-delete-old-cache-images.sh create mode 100755 .github/workflows/scripts/gcp-delete-old-disks.sh create mode 100755 .github/workflows/scripts/gcp-delete-old-instances.sh create mode 100755 .github/workflows/scripts/gcp-delete-old-templates.sh create mode 100755 .github/workflows/scripts/gcp-get-available-disks.sh create mode 100755 .github/workflows/scripts/gcp-get-cached-disks.sh create mode 100755 .github/workflows/scripts/release-crates-dry-run.sh diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index 9515a4244..e77302a73 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -1,6 +1,16 @@ # Google Cloud node deployments and tests that run when Rust code or dependencies are modified, # but only on PRs from the ZcashFoundation/zebra repository. -# (External PRs are tested/deployed by mergify.) +# (External PRs are tested/deployed by mergify.) +# +# 1. `versioning`: Extracts the major version from the release semver. Useful for segregating instances based on major versions. +# 2. `build`: Builds a Docker image named `zebrad` with the necessary tags derived from Git. +# 3. `test-configuration-file`: Validates Zebra using the default config with the latest version. +# 4. `test-configuration-file-testnet`: Tests the Docker image for the testnet configuration. +# 5. `test-zebra-conf-path`: Verifies Zebra with a custom Docker config file. +# 6. `deploy-nodes`: Deploys Managed Instance Groups (MiGs) for Mainnet and Testnet. If triggered by main branch pushes, it always replaces the MiG. For releases, MiGs are replaced only if deploying the same major version; otherwise, a new major version is deployed. +# 7. `deploy-instance`: Deploys a single node in a specified GCP zone for testing specific commits. Instances from this job aren't auto-replaced or deleted. +# +# The overall goal is to ensure that Zebra nodes are consistently deployed, tested, and managed on GCP. name: Deploy Nodes to GCP # Ensures that only one workflow task will run at a time. Previous deployments, if diff --git a/.github/workflows/chore-delete-gcp-resources.yml b/.github/workflows/chore-delete-gcp-resources.yml index 9291796a0..60d5fd748 100644 --- a/.github/workflows/chore-delete-gcp-resources.yml +++ b/.github/workflows/chore-delete-gcp-resources.yml @@ -1,4 +1,13 @@ -# TODO: rename this action name and filename to Delete infra resources +# This workflow is designed to delete old Google Cloud Platform (GCP) resources to save on costs. +# +# 1. Deletes specific instances in GCP older than a defined number of days. +# 2. Deletes instance templates older than a set number of days. +# 3. Deletes older disks not currently in use, with certain ones prefixed by commit hashes or "zebrad-". +# 4. Deletes cache images from GCP, retaining a specified number of the latest images for certain types like zebrad checkpoint cache, zebrad tip cache, and lightwalletd + zebrad tip cache. +# 5. Deletes unused artifacts from Google Artifact Registry older than a defined number of hours while retaining the latest few. +# +# It uses the gcloud CLI for most of its operations and also leverages specific GitHub Actions like the gcr-cleaner for deleting old images from the Google Artifact Registry. +# The workflow is scheduled to run daily at 0700 UTC. name: Delete GCP resources on: @@ -56,29 +65,11 @@ jobs: # so it can't be shell-quoted. - name: Delete old instances run: | - DELETE_BEFORE_DATE=$(date --date="$DELETE_INSTANCE_DAYS days ago" '+%Y%m%d') - - IFS=$'\n' - INSTANCES=$(gcloud compute instances list --sort-by=creationTimestamp --filter="name~-[0-9a-f]{7,}$ AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME,ZONE)' | \ - sed 's/\(.*\)\t\(.*\)/\1 --zone=\2/') - - for INSTANCE_AND_ZONE in $INSTANCES - do - IFS=$' ' - gcloud compute instances delete --verbosity=info ${INSTANCE_AND_ZONE} --delete-disks=all || continue - IFS=$'\n' - done - + ./.github/workflows/scripts/gcp-delete-old-instances.sh # Deletes all the instance templates older than $DELETE_AGE_DAYS days. - name: Delete old instance templates run: | - DELETE_BEFORE_DATE=$(date --date="$DELETE_AGE_DAYS days ago" '+%Y%m%d') - TEMPLATES=$(gcloud compute instance-templates list --sort-by=creationTimestamp --filter="name~-[0-9a-f]{7,}$ AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME)') - - for TEMPLATE in $TEMPLATES - do - gcloud compute instance-templates delete "${TEMPLATE}" || continue - done + ./.github/workflows/scripts/gcp-delete-old-templates.sh # Deletes all mainnet and testnet disks older than $DELETE_AGE_DAYS days. # @@ -89,31 +80,7 @@ jobs: # so it can't be shell-quoted. - name: Delete old disks run: | - DELETE_BEFORE_DATE=$(date --date="$DELETE_AGE_DAYS days ago" '+%Y%m%d') - - IFS=$'\n' - # Disks created by PR jobs, and other jobs that use a commit hash - COMMIT_DISKS=$(gcloud compute disks list --sort-by=creationTimestamp --filter="name~-[0-9a-f]{7,}$ AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME,LOCATION,LOCATION_SCOPE)' | \ - sed 's/\(.*\)\t\(.*\)\t\(.*\)/\1 --\3=\2/') - - for DISK_AND_LOCATION in $COMMIT_DISKS - do - IFS=$' ' - gcloud compute disks delete --verbosity=info ${DISK_AND_LOCATION} || continue - IFS=$'\n' - done - - IFS=$'\n' - # Disks created by managed instance groups, and other jobs that start with "zebrad-" - ZEBRAD_DISKS=$(gcloud compute disks list --sort-by=creationTimestamp --filter="name~^zebrad- AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME,LOCATION,LOCATION_SCOPE)' | \ - sed 's/\(.*\)\t\(.*\)\t\(.*\)/\1 --\3=\2/') - - for DISK_AND_LOCATION in $ZEBRAD_DISKS - do - IFS=$' ' - gcloud compute disks delete --verbosity=info ${DISK_AND_LOCATION} || continue - IFS=$'\n' - done + ./.github/workflows/scripts/gcp-delete-old-disks.sh # Deletes mainnet and testnet cache images older than $DELETE_AGE_DAYS days. # @@ -125,108 +92,9 @@ jobs: # # TODO: # - refactor out repeated shell script code - - name: Delete old cache disks + - name: Delete old cache images run: | - DELETE_BEFORE_DATE=$(date --date="$DELETE_AGE_DAYS days ago" '+%Y%m%d') - - # As of April 2023, these disk names look like: - # zebrad-cache-6039-merge-62c8ecc-v25-mainnet-checkpoint-053559 - # - # Mainnet zebrad checkpoint - ZEBRAD_MAINNET_CHECKPOINT_IMAGES=$(gcloud compute images list --sort-by=~creationTimestamp --filter="name~^zebrad-cache-.*-mainnet-checkpoint AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME)') - KEPT_IMAGES=0 - for IMAGE in $ZEBRAD_MAINNET_CHECKPOINT_IMAGES - do - if [[ "$KEPT_IMAGES" -lt "$KEEP_LATEST_IMAGE_COUNT" ]]; - then - KEPT_IMAGES=$((KEPT_IMAGES+1)) - echo "Keeping image $KEPT_IMAGES named $IMAGE" - continue - fi - - gcloud compute images delete "${IMAGE}" || continue - done - - # Testnet zebrad checkpoint - ZEBRAD_TESTNET_CHECKPOINT_IMAGES=$(gcloud compute images list --sort-by=~creationTimestamp --filter="name~^zebrad-cache-.*-testnet-checkpoint AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME)') - KEPT_IMAGES=0 - for IMAGE in $ZEBRAD_TESTNET_CHECKPOINT_IMAGES - do - if [[ "$KEPT_IMAGES" -lt "$KEEP_LATEST_IMAGE_COUNT" ]]; - then - KEPT_IMAGES=$((KEPT_IMAGES+1)) - echo "Keeping image $KEPT_IMAGES named $IMAGE" - continue - fi - - gcloud compute images delete "${IMAGE}" || continue - done - - # As of April 2023, these disk names look like: - # zebrad-cache-6556-merge-a2ca4de-v25-mainnet-tip(-u)?-140654 - # - # Mainnet zebrad tip - ZEBRAD_MAINNET_TIP_IMAGES=$(gcloud compute images list --sort-by=~creationTimestamp --filter="name~^zebrad-cache-.*-mainnet-tip AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME)') - KEPT_IMAGES=0 - for IMAGE in $ZEBRAD_MAINNET_TIP_IMAGES - do - if [[ "$KEPT_IMAGES" -lt "$KEEP_LATEST_IMAGE_COUNT" ]]; - then - KEPT_IMAGES=$((KEPT_IMAGES+1)) - echo "Keeping image $KEPT_IMAGES named $IMAGE" - continue - fi - - gcloud compute images delete "${IMAGE}" || continue - done - - # Testnet zebrad tip - ZEBRAD_TESTNET_TIP_IMAGES=$(gcloud compute images list --sort-by=~creationTimestamp --filter="name~^zebrad-cache-.*-testnet-tip AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME)') - KEPT_IMAGES=0 - for IMAGE in $ZEBRAD_TESTNET_TIP_IMAGES - do - if [[ "$KEPT_IMAGES" -lt "$KEEP_LATEST_IMAGE_COUNT" ]]; - then - KEPT_IMAGES=$((KEPT_IMAGES+1)) - echo "Keeping image $KEPT_IMAGES named $IMAGE" - continue - fi - - gcloud compute images delete "${IMAGE}" || continue - done - - # As of April 2023, these disk names look like: - # lwd-cache-main-fb3fec0-v25-mainnet-tip(-u)?-061314 - # - # Mainnet lightwalletd tip - LWD_MAINNET_TIP_IMAGES=$(gcloud compute images list --sort-by=~creationTimestamp --filter="name~^lwd-cache-.*-mainnet-tip AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME)') - KEPT_IMAGES=0 - for IMAGE in $LWD_MAINNET_TIP_IMAGES - do - if [[ "$KEPT_IMAGES" -lt "$KEEP_LATEST_IMAGE_COUNT" ]]; - then - KEPT_IMAGES=$((KEPT_IMAGES+1)) - echo "Keeping image $KEPT_IMAGES named $IMAGE" - continue - fi - - gcloud compute images delete "${IMAGE}" || continue - done - - # Testnet lightwalletd tip - LWD_TESTNET_TIP_IMAGES=$(gcloud compute images list --sort-by=~creationTimestamp --filter="name~^lwd-cache-.*-testnet-tip AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME)') - KEPT_IMAGES=0 - for IMAGE in $LWD_TESTNET_TIP_IMAGES - do - if [[ "$KEPT_IMAGES" -lt "$KEEP_LATEST_IMAGE_COUNT" ]]; - then - KEPT_IMAGES=$((KEPT_IMAGES+1)) - echo "Keeping image $KEPT_IMAGES named $IMAGE" - continue - fi - - gcloud compute images delete "${IMAGE}" || continue - done + ./.github/workflows/scripts/gcp-delete-old-cache-images.sh # We're using a generic approach here, which allows multiple registries to be included, # even those not related to GCP. Enough reason to create a separate job. diff --git a/.github/workflows/chore-project-management.yml b/.github/workflows/chore-project-management.yml index b9c4e1a8b..4825e353c 100644 --- a/.github/workflows/chore-project-management.yml +++ b/.github/workflows/chore-project-management.yml @@ -1,6 +1,12 @@ +# This workflow manages the automatic addition of new issues to specific GitHub projects. +# +# 1. Newly opened issues are added to the "Zebra Backlog" Github project. +# 2. They are also added to the "ZF Engineering Backlog" Github project. +# +# The action makes use of the `add-to-project` action and requires a Github token +# (currently sourced from secrets) to authenticate and perform the addition. name: Add new issues to GitHub projects -# Configuration for automatically adding issues to various Github projects for Project Management purposes on: issues: types: diff --git a/.github/workflows/ci-build-crates.yml b/.github/workflows/ci-build-crates.yml index 823ac95f0..1eecbbbfe 100644 --- a/.github/workflows/ci-build-crates.yml +++ b/.github/workflows/ci-build-crates.yml @@ -1,3 +1,13 @@ +# This workflow facilitates the individual building of Rust crates present in the repository. +# 1. A matrix is generated dynamically to identify each crate in the repository. +# 2. This matrix is checked for validity. +# 3. Each identified crate undergoes three build processes: +# - With no features. +# - With the default features. +# - With all the features enabled. +# 4. In case of build failures outside of pull requests, an issue is either opened or updated +# in the repository to report the failure. +# Throughout the workflow, various setup steps ensure the correct environment and tools are present. name: Build crates individually # Ensures that only one workflow task will run at a time. Previous builds, if diff --git a/.github/workflows/ci-coverage.yml b/.github/workflows/ci-coverage.yml index 5d6179fed..482b0d7db 100644 --- a/.github/workflows/ci-coverage.yml +++ b/.github/workflows/ci-coverage.yml @@ -1,3 +1,11 @@ +# This workflow calculates the test coverage for the Rust codebase. +# 1. The code is checked out. +# 2. Rust with the stable toolchain, minimal profile, and llvm-tools-preview component is set up. +# 3. Necessary tools like 'cargo-llvm-cov' are installed. +# 4. Proptest is minimized for efficient coverage test runs. +# 5. Tests are run without producing a report to gather coverage information. +# 6. A coverage report (lcov format) is generated based on the gathered information. +# 7. Finally, this report is uploaded to Codecov for visualization and analysis. name: Coverage # Ensures that only one workflow task will run at a time. Previous builds, if diff --git a/.github/workflows/ci-integration-tests-gcp.yml b/.github/workflows/ci-integration-tests-gcp.yml index 902575c56..e0d2e07e6 100644 --- a/.github/workflows/ci-integration-tests-gcp.yml +++ b/.github/workflows/ci-integration-tests-gcp.yml @@ -1,5 +1,8 @@ # Google Cloud integration tests that run when Rust code or dependencies are modified, # but only on PRs from the ZcashFoundation/zebra repository. (External PRs are tested by mergify.) +# +# Specific conditions and dependencies are set for each job to ensure they are executed in the correct sequence and under the right circumstances. +# Each test has a description of the conditions under which it runs. name: Integration Tests on GCP # Ensures that only one workflow task will run at a time. Previous builds, if diff --git a/.github/workflows/ci-lint.yml b/.github/workflows/ci-lint.yml index 50296e738..d72a53395 100644 --- a/.github/workflows/ci-lint.yml +++ b/.github/workflows/ci-lint.yml @@ -1,3 +1,10 @@ +# This workflow conducts various linting checks for a Rust-based project. +# 1. Determines if Rust or workflow files have been modified. +# 2. Runs the Clippy linter on Rust files, producing annotations and failing on warnings. +# 3. Ensures Rust code formatting complies with 'rustfmt' standards. +# 4. Lints GitHub Actions workflow files for common issues. +# 5. Checks for common spelling errors in the codebase. +# The workflow is designed to maintain code quality and consistency, running checks conditionally based on the changed files. name: Lint # Ensures that only one workflow task will run at a time. Previous builds, if diff --git a/.github/workflows/ci-unit-tests-docker.yml b/.github/workflows/ci-unit-tests-docker.yml index 62a2514f4..799068af4 100644 --- a/.github/workflows/ci-unit-tests-docker.yml +++ b/.github/workflows/ci-unit-tests-docker.yml @@ -1,5 +1,16 @@ # Google Cloud unit tests that run when Rust code or dependencies are modified, # but only on PRs from the ZcashFoundation/zebra repository. (External PRs are tested by mergify.) +# +# This workflow is designed for running various unit tests within Docker containers. +# Jobs: +# 1. Builds a Docker image for tests, adaptable to the specified network (Mainnet or Testnet). +# 2. 'test-all': Executes all Zebra tests, including normally ignored ones, in a Docker environment. +# 3. 'test-fake-activation-heights': Runs state tests with fake activation heights, isolating its build products. +# 4. 'test-empty-sync': Tests Zebra's ability to sync and checkpoint from an empty state. +# 5. 'test-lightwalletd-integration': Validates integration with 'lightwalletd' starting from an empty state. +# 6. 'test-configuration-file': Assesses the default Docker configuration for Zebra. +# 7. 'test-configuration-file-testnet': Checks the Docker image reconfiguration for the Testnet. +# 8. 'test-zebra-conf-path': Tests Zebra using a custom Docker configuration. name: Docker Unit Tests # Ensures that only one workflow task will run at a time. Previous builds, if diff --git a/.github/workflows/ci-unit-tests-os.yml b/.github/workflows/ci-unit-tests-os.yml index 4075cbd20..b02c36adf 100644 --- a/.github/workflows/ci-unit-tests-os.yml +++ b/.github/workflows/ci-unit-tests-os.yml @@ -1,3 +1,9 @@ +# This workflow performs unit tests across different operating systems and Rust versions. It includes steps for: +# - Testing on Ubuntu and macOS with stable and beta Rust toolchains. +# - Installing Zebra from the lockfile without cache on Ubuntu. +# - Verifying that Cargo.lock is up-to-date with Cargo.toml changes. +# - Running cargo-deny checks for dependencies. +# - Checking for unused dependencies in the code. name: Multi-OS Unit Tests # Ensures that only one workflow task will run at a time. Previous builds, if diff --git a/.github/workflows/docs-deploy-firebase.yml b/.github/workflows/docs-deploy-firebase.yml index a1095f59f..21930a1b9 100644 --- a/.github/workflows/docs-deploy-firebase.yml +++ b/.github/workflows/docs-deploy-firebase.yml @@ -1,5 +1,9 @@ # Google Cloud docs updates that run when docs, Rust code, or dependencies are modified, # but only on PRs from the ZcashFoundation/zebra repository. (External PRs are deployed by mergify.) + +# - Builds and deploys Zebra Book Docs using mdBook, setting up necessary tools and deploying to Firebase. +# - Compiles and deploys external documentation, setting up Rust with the beta toolchain and default profile, building the docs, and deploying them to Firebase. +# - Assembles and deploys internal documentation with similar steps, including private items in the documentation, and deploys to Firebase. name: Docs # Ensures that only one workflow task will run at a time. Previous deployments, if diff --git a/.github/workflows/manual-zcashd-deploy.yml b/.github/workflows/manual-zcashd-deploy.yml index 7bfb5cf70..158f919c9 100644 --- a/.github/workflows/manual-zcashd-deploy.yml +++ b/.github/workflows/manual-zcashd-deploy.yml @@ -1,3 +1,10 @@ +# This workflow is designed for manually deploying zcashd nodes to Google Cloud Platform (GCP) based on user inputs. +# - Allows selection of network type (Mainnet or Testnet) and instance group size. +# - Converts network name to lowercase to comply with GCP labeling requirements. +# - Authenticates with Google Cloud using provided credentials. +# - Creates a GCP instance template from a container image of zcashd. +# - Checks if the specified instance group already exists. +# - Depending on the existence check, either creates a new managed instance group or updates the existing one with the new template. name: Zcashd Manual Deploy on: diff --git a/.github/workflows/release-crates-io.yml b/.github/workflows/release-crates-io.yml index 8546ee774..304f8a152 100644 --- a/.github/workflows/release-crates-io.yml +++ b/.github/workflows/release-crates-io.yml @@ -93,26 +93,9 @@ jobs: # # These steps should be kept up to date with the release checklist. # - # TODO: move these steps into a script which is run in the release checklist and CI - name: Crate release dry run run: | - set -ex - git config --global user.email "release-tests-no-reply@zfnd.org" - git config --global user.name "Automated Release Test" - # This script must be the same as: - # https://github.com/ZcashFoundation/zebra/blob/main/.github/PULL_REQUEST_TEMPLATE/release-checklist.md#update-crate-versions - # with an extra `--no-confirm` argument for non-interactive testing. - cargo release version --verbose --execute --no-confirm --allow-branch '*' --workspace --exclude zebrad beta - cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebrad patch - cargo release replace --verbose --execute --no-confirm --allow-branch '*' --package zebrad - cargo release commit --verbose --execute --no-confirm --allow-branch '*' - # Check the release will work using a dry run - # - # Workaround unpublished dependency version errors by skipping those crates: - # https://github.com/crate-ci/cargo-release/issues/691 - # - # TODO: check all crates after fixing these errors - cargo release publish --verbose --dry-run --allow-branch '*' --workspace --exclude zebra-consensus --exclude zebra-utils --exclude zebrad + ./.github/workflows/scripts/release-crates-dry-run.sh # TODO: actually do the release here #release-crates: diff --git a/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml index 39e1ba7bd..c2d2d8119 100644 --- a/.github/workflows/release-drafter.yml +++ b/.github/workflows/release-drafter.yml @@ -1,6 +1,10 @@ -# Creates a draft release with all the PR names since the last release. +# This workflow automates the creation and updating of draft releases. It compiles PR titles into the draft release notes. # https://github.com/ZcashFoundation/zebra/releases # +# - Updates the draft release upon each merge into 'main'. +# - Utilizes the release-drafter GitHub Action to accumulate PR titles since the last release into a draft release note. +# - Suitable permissions are set for creating releases and handling pull requests. +# # Workflow is based on: # https://github.com/marketplace/actions/release-drafter#usage name: Release Drafter diff --git a/.github/workflows/scripts/gcp-delete-old-cache-images.sh b/.github/workflows/scripts/gcp-delete-old-cache-images.sh new file mode 100755 index 000000000..a614e5fa5 --- /dev/null +++ b/.github/workflows/scripts/gcp-delete-old-cache-images.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +# Function to handle image deletion logic +delete_images() { + local image_type="$1" + local filter="$2" + local kept_images=0 + + echo "Processing ${image_type} images" + images=$(gcloud compute images list --sort-by=~creationTimestamp --filter="${filter} AND creationTimestamp < ${DELETE_BEFORE_DATE}" --format='value(NAME)') + + for image in ${images}; do + if [[ "${kept_images}" -lt "${KEEP_LATEST_IMAGE_COUNT}" ]]; then + ((kept_images++)) + echo "Keeping image ${kept_images} named ${image}" + else + echo "Deleting image: ${image}" + gcloud compute images delete "${image}" || echo "Failed to delete image: ${image}" + fi + done +} + +# Check if necessary variables are set +if ! [[ "${DELETE_AGE_DAYS}" =~ ^[0-9]+$ && "${KEEP_LATEST_IMAGE_COUNT}" =~ ^[0-9]+$ ]]; then + echo "ERROR: One or more required variables are not set or not numeric" + exit 1 +fi + +# Set pipefail +set -o pipefail + +# Calculate the date before which images should be deleted +DELETE_BEFORE_DATE=$(date --date="${DELETE_AGE_DAYS} days ago" '+%Y%m%d') + +# Mainnet and Testnet zebrad checkpoint +delete_images "Mainnet zebrad checkpoint" "name~^zebrad-cache-.*-mainnet-checkpoint" # As of April 2023, these disk names look like: zebrad-cache-6556-merge-a2ca4de-v25-mainnet-tip(-u)?-140654 +delete_images "Testnet zebrad checkpoint" "name~^zebrad-cache-.*-testnet-checkpoint" + +# Mainnet and Testnet zebrad tip +delete_images "Mainnet zebrad tip" "name~^zebrad-cache-.*-mainnet-tip" +delete_images "Testnet zebrad tip" "name~^zebrad-cache-.*-testnet-tip" + +# Mainnet and Testnet lightwalletd tip +delete_images "Mainnet lightwalletd tip" "name~^lwd-cache-.*-mainnet-tip" +delete_images "Testnet lightwalletd tip" "name~^lwd-cache-.*-testnet-tip" diff --git a/.github/workflows/scripts/gcp-delete-old-disks.sh b/.github/workflows/scripts/gcp-delete-old-disks.sh new file mode 100755 index 000000000..1a6dd8305 --- /dev/null +++ b/.github/workflows/scripts/gcp-delete-old-disks.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +# Check if DELETE_AGE_DAYS is set and is a number +if ! [[ "${DELETE_AGE_DAYS}" =~ ^[0-9]+$ ]]; then + echo "ERROR: DELETE_AGE_DAYS is not set or not a number" + exit 1 +fi + +# Set pipefail to catch errors in pipelines +set -o pipefail + +# Calculate the date before which disks should be deleted +DELETE_BEFORE_DATE=$(date --date="${DELETE_AGE_DAYS} days ago" '+%Y%m%d') + +# Fetch disks created by PR jobs, and other jobs that use a commit hash +if ! COMMIT_DISKS=$(gcloud compute disks list --sort-by=creationTimestamp --filter="name~-[0-9a-f]{7,}$ AND creationTimestamp < ${DELETE_BEFORE_DATE}" --format='value(NAME,LOCATION,LOCATION_SCOPE)' | sed 's/\(.*\)\t\(.*\)\t\(.*\)/\1 --\3=\2/'); then + echo "Error fetching COMMIT_DISKS." + exit 1 +fi + +# Delete commit disks if any are found +IFS=$'\n' +for DISK_AND_LOCATION in ${COMMIT_DISKS}; do + IFS=$' ' + echo "Deleting disk: ${DISK_AND_LOCATION}" + if ! gcloud compute disks delete --verbosity=info "${DISK_AND_LOCATION}"; then + echo "Failed to delete disk: ${DISK_AND_LOCATION}" + fi + IFS=$'\n' +done +IFS=$' \t\n' # Reset IFS to its default value + +# Fetch disks created by managed instance groups, and other jobs that start with "zebrad-" +if ! ZEBRAD_DISKS=$(gcloud compute disks list --sort-by=creationTimestamp --filter="name~^zebrad- AND creationTimestamp < ${DELETE_BEFORE_DATE}" --format='value(NAME,LOCATION,LOCATION_SCOPE)' | sed 's/\(.*\)\t\(.*\)\t\(.*\)/\1 --\3=\2/'); then + echo "Error fetching ZEBRAD_DISKS." + exit 1 +fi + +# Delete zebrad disks if any are found +IFS=$'\n' +for DISK_AND_LOCATION in ${ZEBRAD_DISKS}; do + IFS=$' ' + echo "Deleting disk: ${DISK_AND_LOCATION}" + if ! gcloud compute disks delete --verbosity=info "${DISK_AND_LOCATION}"; then + echo "Failed to delete disk: ${DISK_AND_LOCATION}" + fi + IFS=$'\n' +done +IFS=$' \t\n' # Reset IFS to its default value diff --git a/.github/workflows/scripts/gcp-delete-old-instances.sh b/.github/workflows/scripts/gcp-delete-old-instances.sh new file mode 100755 index 000000000..12ea0d1c3 --- /dev/null +++ b/.github/workflows/scripts/gcp-delete-old-instances.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +# Check if DELETE_INSTANCE_DAYS is set and is a number +if ! [[ "${DELETE_INSTANCE_DAYS}" =~ ^[0-9]+$ ]]; then + echo "ERROR: DELETE_INSTANCE_DAYS is not set or not a number" + exit 1 +fi + +# Set pipefail to catch errors in pipelines +set -o pipefail + +# Calculate the date before which instances should be deleted +DELETE_BEFORE_DATE=$(date --date="${DELETE_INSTANCE_DAYS} days ago" '+%Y%m%d') + +# Check if gcloud command is available +if ! command -v gcloud &> /dev/null; then + echo "ERROR: gcloud command not found" + exit 1 +fi + +# Fetch the list of instances to delete +if ! INSTANCES=$(gcloud compute instances list --sort-by=creationTimestamp --filter="name~-[0-9a-f]{7,}$ AND creationTimestamp < ${DELETE_BEFORE_DATE}" --format='value(NAME,ZONE)' | sed 's/\(.*\)\t\(.*\)/\1 --zone=\2/'); then + echo "Error fetching instances." + exit 1 +fi + +# Delete instances if any are found +if [[ -n "${INSTANCES}" ]]; then + IFS=$'\n' + for INSTANCE_AND_ZONE in ${INSTANCES}; do + IFS=$' ' + echo "Deleting instance: ${INSTANCE_AND_ZONE}" + gcloud compute instances delete --verbosity=info "${INSTANCE_AND_ZONE}" --delete-disks=all || { + echo "Failed to delete instance: ${INSTANCE_AND_ZONE}" + continue + } + IFS=$'\n' + done + IFS=$' \t\n' # Reset IFS to its default value +else + echo "No instances to delete." +fi diff --git a/.github/workflows/scripts/gcp-delete-old-templates.sh b/.github/workflows/scripts/gcp-delete-old-templates.sh new file mode 100755 index 000000000..29898dc21 --- /dev/null +++ b/.github/workflows/scripts/gcp-delete-old-templates.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +# Check if DELETE_AGE_DAYS is set and is a number +if ! [[ "${DELETE_AGE_DAYS}" =~ ^[0-9]+$ ]]; then + echo "ERROR: DELETE_AGE_DAYS is not set or not a number" + exit 1 +fi + +# Set pipefail to catch errors in pipelines +set -o pipefail + +# Calculate the date before which templates should be deleted +DELETE_BEFORE_DATE=$(date --date="${DELETE_AGE_DAYS} days ago" '+%Y%m%d') + +# Check if gcloud command is available +if ! command -v gcloud &> /dev/null; then + echo "ERROR: gcloud command not found" + exit 1 +fi + +# Fetch the list of instance templates to delete +if ! TEMPLATES=$(gcloud compute instance-templates list --sort-by=creationTimestamp --filter="name~-[0-9a-f]{7,}$ AND creationTimestamp < ${DELETE_BEFORE_DATE}" --format='value(NAME)'); then + echo "Error fetching instance templates." + exit 1 +fi + +# Delete templates if any are found +for TEMPLATE in ${TEMPLATES}; do + echo "Deleting template: ${TEMPLATE}" + if ! gcloud compute instance-templates delete "${TEMPLATE}"; then + echo "Failed to delete template: ${TEMPLATE}" + fi +done diff --git a/.github/workflows/scripts/gcp-get-available-disks.sh b/.github/workflows/scripts/gcp-get-available-disks.sh new file mode 100755 index 000000000..667c6f36c --- /dev/null +++ b/.github/workflows/scripts/gcp-get-available-disks.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +# Description: +# Check if there are cached state disks available for subsequent jobs to use. +# +# This lookup uses the state version from constants.rs. +# It accepts disks generated by any branch, including draft and unmerged PRs. +# +# If the disk exists, sets the corresponding output to "true": +# - lwd_tip_disk +# - zebra_tip_disk +# - zebra_checkpoint_disk + +set -euxo pipefail + + +LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "${GITHUB_WORKSPACE}/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1) +echo "STATE_VERSION: ${LOCAL_STATE_VERSION}" + +# Function to find a disk image and output its name +find_disk_image() { +local base_name="${1}" +local disk_type="${2}" +local disk_pattern="${base_name}-cache" +local output_var="${base_name}_${disk_type}_disk" +local disk_image + +disk_image=$(gcloud compute images list --filter="status=READY AND name~${disk_pattern}-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${disk_type}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) + +if [[ -z "${disk_image}" ]]; then + echo "No ${disk_type^^} disk found for ${base_name^^} on network: ${NETWORK}" + echo "${output_var}=false" >> "${GITHUB_OUTPUT}" +else + echo "Disk: ${disk_image}" + echo "${output_var}=true" >> "${GITHUB_OUTPUT}" +fi +} + +# Find and output LWD and Zebra disks +find_disk_image "lwd" "tip" +find_disk_image "zebrad" "tip" +find_disk_image "zebrad" "checkpoint" diff --git a/.github/workflows/scripts/gcp-get-cached-disks.sh b/.github/workflows/scripts/gcp-get-cached-disks.sh new file mode 100755 index 000000000..9b05c2570 --- /dev/null +++ b/.github/workflows/scripts/gcp-get-cached-disks.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash + +# Description: +# This script finds a cached Google Cloud Compute image based on specific criteria. +# It prioritizes images from the current commit, falls back to the main branch, +# and finally checks other branches if needed. The selected image is used for +# setting up the environment in a CI/CD pipeline. + +set -eo pipefail + +# Function to find and report a cached disk image +find_cached_disk_image() { + local search_pattern="${1}" + local git_source="${2}" + local disk_name + + disk_name=$(gcloud compute images list --filter="status=READY AND name~${search_pattern}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) + + # Use >&2 to redirect to stderr and avoid sending wrong assignments to stdout + if [[ -n "${disk_name}" ]]; then + echo "Found ${git_source} Disk: ${disk_name}" >&2 + disk_description=$(gcloud compute images describe "${disk_name}" --format="value(DESCRIPTION)") + echo "Description: ${disk_description}" >&2 + echo "${disk_name}" # This is the actual return value when a disk is found + else + echo "No ${git_source} disk found." >&2 + fi +} + +# Extract local state version +echo "Extracting local state version..." +LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "${GITHUB_WORKSPACE}/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1) +echo "STATE_VERSION: ${LOCAL_STATE_VERSION}" + +# Define DISK_PREFIX based on the requiring state directory +if [[ "${NEEDS_LWD_STATE}" == "true" ]]; then + DISK_PREFIX="${LWD_STATE_DIR}" +else + DISK_PREFIX="${ZEBRA_STATE_DIR:-${DISK_PREFIX}}" +fi + +# Find the most suitable cached disk image +echo "Finding the most suitable cached disk image..." +if [[ -z "${CACHED_DISK_NAME}" ]]; then + # Try to find a cached disk image from the current commit + COMMIT_DISK_PREFIX="${DISK_PREFIX}-.+-${GITHUB_SHA_SHORT}-v${LOCAL_STATE_VERSION}-${NETWORK}-${DISK_SUFFIX}" + CACHED_DISK_NAME=$(find_cached_disk_image "${COMMIT_DISK_PREFIX}" "commit") + # If no cached disk image is found, try to find one from the main branch + if [[ "${PREFER_MAIN_CACHED_STATE}" == "true" ]]; then + MAIN_DISK_PREFIX="${DISK_PREFIX}-main-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${DISK_SUFFIX}" + CACHED_DISK_NAME=$(find_cached_disk_image "${MAIN_DISK_PREFIX}" "main branch") + # Else, try to find one from any branch + else + ANY_DISK_PREFIX="${DISK_PREFIX}-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${DISK_SUFFIX}" + CACHED_DISK_NAME=$(find_cached_disk_image "${ANY_DISK_PREFIX}" "any branch") + fi +fi + +# Handle case where no suitable disk image is found +if [[ -z "${CACHED_DISK_NAME}" ]]; then + echo "No suitable cached state disk available." + echo "Expected pattern: ${COMMIT_DISK_PREFIX}" + echo "Cached state test jobs must depend on the cached state rebuild job." + exit 1 +fi + +echo "Selected Disk: ${CACHED_DISK_NAME}" + +# Exporting variables for subsequent steps +echo "Exporting variables for subsequent steps..." +export CACHED_DISK_NAME="${CACHED_DISK_NAME}" +export LOCAL_STATE_VERSION="${LOCAL_STATE_VERSION}" diff --git a/.github/workflows/scripts/gcp-vm-startup-script.sh b/.github/workflows/scripts/gcp-vm-startup-script.sh index da65ff267..7098ed898 100755 --- a/.github/workflows/scripts/gcp-vm-startup-script.sh +++ b/.github/workflows/scripts/gcp-vm-startup-script.sh @@ -1,4 +1,4 @@ -#! /bin/bash +#!/usr/bin/env bash # Increase the Google Cloud instance sshd connection limit # # This script appends 'MaxStartups 500' to /etc/ssh/sshd_config allowing up to 500 diff --git a/.github/workflows/scripts/release-crates-dry-run.sh b/.github/workflows/scripts/release-crates-dry-run.sh new file mode 100755 index 000000000..cee45b94f --- /dev/null +++ b/.github/workflows/scripts/release-crates-dry-run.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +set -ex + +# Check if necessary tools are installed +if ! command -v git &> /dev/null || ! command -v cargo &> /dev/null; then + echo "ERROR: Required tools (git, cargo) are not installed." + exit 1 +fi + +git config --global user.email "release-tests-no-reply@zfnd.org" +git config --global user.name "Automated Release Test" + +# Ensure cargo-release is installed +if ! cargo release --version &> /dev/null; then + echo "ERROR: cargo release must be installed." + exit 1 +fi + +# Release process +# Ensure to have an extra `--no-confirm` argument for non-interactive testing. +cargo release version --verbose --execute --no-confirm --allow-branch '*' --workspace --exclude zebrad beta +cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebrad patch +cargo release replace --verbose --execute --no-confirm --allow-branch '*' --package zebrad +cargo release commit --verbose --execute --no-confirm --allow-branch '*' + +# Dry run to check the release +# Workaround for unpublished dependency version errors: https://github.com/crate-ci/cargo-release/issues/691 +# TODO: check all crates after fixing these errors +cargo release publish --verbose --dry-run --allow-branch '*' --workspace --exclude zebra-consensus --exclude zebra-utils --exclude zebrad + +echo "Release process completed." diff --git a/.github/workflows/sub-build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml index 19912f40b..1ac07708a 100644 --- a/.github/workflows/sub-build-docker-image.yml +++ b/.github/workflows/sub-build-docker-image.yml @@ -1,3 +1,9 @@ +# This workflow automates the building and pushing of Docker images based on user-defined inputs. It includes: +# - Accepting various inputs like image name, Dockerfile path, target, and additional Rust-related parameters. +# - Authenticates with Google Cloud and logs into Google Artifact Registry and DockerHub. +# - Uses Docker Buildx for improved build performance and caching. +# - Builds the Docker image and pushes it to both Google Artifact Registry and potentially DockerHub, depending on release type. +# - Manages caching strategies to optimize build times across different branches. name: Build docker image on: diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index 8cec4b302..355701a15 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -183,74 +183,22 @@ jobs: - name: Find ${{ inputs.test_id }} cached state disk id: get-disk-name if: ${{ inputs.needs_zebra_state || inputs.needs_lwd_state }} + env: + GITHUB_SHA_SHORT: ${{ env.GITHUB_SHA_SHORT }} + NEEDS_LWD_STATE: ${{ inputs.needs_lwd_state }} + LWD_STATE_DIR: ${{ inputs.lwd_state_dir }} + ZEBRA_STATE_DIR: ${{ inputs.zebra_state_dir }} + DISK_PREFIX: ${{ inputs.disk_prefix }} + NETWORK: ${{ env.NETWORK }} # use lowercase version from env, not input + DISK_SUFFIX: ${{ inputs.disk_suffix }} + PREFER_MAIN_CACHED_STATE: ${{ inputs.prefer_main_cached_state }} run: | - set -x - LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "$GITHUB_WORKSPACE/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1) - echo "STATE_VERSION: $LOCAL_STATE_VERSION" + source ./.github/workflows/scripts/gcp-get-cached-disks.sh + echo "cached_disk_name=${CACHED_DISK_NAME}" >> "${GITHUB_OUTPUT}" + echo "STATE_VERSION=${LOCAL_STATE_VERSION}" >> "${GITHUB_ENV}" + echo "CACHED_DISK_NAME=${CACHED_DISK_NAME}" >> "${GITHUB_ENV}" + echo "DISK_OPTION=image=${CACHED_DISK_NAME}" >> "${GITHUB_ENV}" - if [[ "${{ inputs.needs_lwd_state }}" == "true" ]]; then - DISK_PREFIX=${{ inputs.lwd_state_dir }} - else - DISK_PREFIX=${{ inputs.zebra_state_dir || inputs.disk_prefix }} - fi - - # Try to find an image generated from a previous step or run of this commit. - # Fields are listed in the "Create image from state disk" step. - # - # We don't want to match the full branch name here, because: - # - we want to ignore the different GITHUB_REFs across manually triggered jobs, - # pushed branches, and PRs, - # - previous commits might have been buggy, - # or they might have worked and hide bugs in this commit - # (we can't avoid this issue entirely, but we don't want to make it more likely), and - # - the branch name might have been shortened for the image. - # - # The probability of two matching short commit hashes within the same month is very low. - COMMIT_DISK_PREFIX="${DISK_PREFIX}-.+-${{ env.GITHUB_SHA_SHORT }}-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" - COMMIT_CACHED_DISK_NAME=$(gcloud compute images list --filter="status=READY AND name~${COMMIT_DISK_PREFIX}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) - echo "${GITHUB_REF_SLUG_URL}-${{ env.GITHUB_SHA_SHORT }} Disk: $COMMIT_CACHED_DISK_NAME" - if [[ -n "$COMMIT_CACHED_DISK_NAME" ]]; then - echo "Description: $(gcloud compute images describe $COMMIT_CACHED_DISK_NAME --format='value(DESCRIPTION)')" - fi - - # Try to find an image generated from the main branch - MAIN_CACHED_DISK_NAME=$(gcloud compute images list --filter="status=READY AND name~${DISK_PREFIX}-main-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) - echo "main Disk: $MAIN_CACHED_DISK_NAME" - if [[ -n "$MAIN_CACHED_DISK_NAME" ]]; then - echo "Description: $(gcloud compute images describe $MAIN_CACHED_DISK_NAME --format='value(DESCRIPTION)')" - fi - - # Try to find an image generated from any other branch - ANY_CACHED_DISK_NAME=$(gcloud compute images list --filter="status=READY AND name~${DISK_PREFIX}-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) - echo "any branch Disk: $ANY_CACHED_DISK_NAME" - if [[ -n "$ANY_CACHED_DISK_NAME" ]]; then - echo "Description: $(gcloud compute images describe $ANY_CACHED_DISK_NAME --format='value(DESCRIPTION)')" - fi - - # Select a cached disk based on the job settings - CACHED_DISK_NAME="$COMMIT_CACHED_DISK_NAME" - if [[ -z "$CACHED_DISK_NAME" ]] && [[ "${{ inputs.prefer_main_cached_state }}" == "true" ]]; then - echo "Preferring main branch cached state to other branches..." - CACHED_DISK_NAME="$MAIN_CACHED_DISK_NAME" - fi - if [[ -z "$CACHED_DISK_NAME" ]]; then - CACHED_DISK_NAME="$ANY_CACHED_DISK_NAME" - fi - - if [[ -z "$CACHED_DISK_NAME" ]]; then - echo "No cached state disk available" - echo "Expected ${COMMIT_DISK_PREFIX}" - echo "Also searched for cached disks from other branches" - echo "Cached state test jobs must depend on the cached state rebuild job" - exit 1 - fi - - echo "Selected Disk: $CACHED_DISK_NAME" - echo "cached_disk_name=$CACHED_DISK_NAME" >> "$GITHUB_OUTPUT" - - echo "STATE_VERSION=$LOCAL_STATE_VERSION" >> "$GITHUB_ENV" - echo "CACHED_DISK_NAME=$CACHED_DISK_NAME" >> "$GITHUB_ENV" - echo "DISK_OPTION=image=$CACHED_DISK_NAME," >> "$GITHUB_ENV" # Create a Compute Engine virtual machine and attach a cached state disk using the # $CACHED_DISK_NAME variable as the source image to populate the disk cached state @@ -264,7 +212,7 @@ jobs: --boot-disk-type pd-ssd \ --image-project=cos-cloud \ --image-family=cos-stable \ - --create-disk=${DISK_OPTION}name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=400GB,type=pd-ssd \ + --create-disk=${DISK_OPTION},name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=400GB,type=pd-ssd \ --container-image=gcr.io/google-containers/busybox \ --machine-type ${{ vars.GCP_LARGE_MACHINE }} \ --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ diff --git a/.github/workflows/sub-find-cached-disks.yml b/.github/workflows/sub-find-cached-disks.yml index 2e81f760b..5ca797201 100644 --- a/.github/workflows/sub-find-cached-disks.yml +++ b/.github/workflows/sub-find-cached-disks.yml @@ -1,3 +1,10 @@ +# Check if Cached State Disks Exist Workflow +# This workflow is designed to check the availability of cached state disks in Google Cloud Platform (GCP) for different types of Zcash applications. +# - Accepts network type as input to determine which disks to search for. +# - Checks for the existence of three types of disks: lightwalletd tip, Zebra tip, and Zebra checkpoint. +# - Uses Google Cloud SDK to query and identify available disks based on network and version. +# - Outputs the availability of each disk type, which can be utilized in subsequent workflows. +# The workflow streamlines the process of verifying disk availability, crucial for optimizing and speeding up integration tests and deployments. name: Check if cached state disks exist on: @@ -57,43 +64,10 @@ jobs: echo "NETWORK=${NETWORK_CAPS,,}" >> $GITHUB_ENV # Check if there are cached state disks available for subsequent jobs to use. - # - # This lookup uses the state version from constants.rs. - # It accepts disks generated by any branch, including draft and unmerged PRs. - # - # If the disk exists, sets the corresponding output to "true": - # - lwd_tip_disk - # - zebra_tip_disk - # - zebra_checkpoint_disk - name: Check if cached state disks exist id: get-available-disks + env: + GITHUB_WORKSPACE: ${{ env.GITHUB_WORKSPACE }} + NETWORK: ${{ env.NETWORK }} # use lowercase version from env, not input run: | - LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "$GITHUB_WORKSPACE/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1) - echo "STATE_VERSION: $LOCAL_STATE_VERSION" - - LWD_TIP_DISK=$(gcloud compute images list --filter="status=READY AND name~lwd-cache-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-tip" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) - if [[ -z "$LWD_TIP_DISK" ]]; then - echo "No TIP disk found for lightwalletd on network: ${NETWORK}" - echo "lwd_tip_disk=${{ toJSON(false) }}" >> "$GITHUB_OUTPUT" - else - echo "Disk: $LWD_TIP_DISK" - echo "lwd_tip_disk=${{ toJSON(true) }}" >> "$GITHUB_OUTPUT" - fi - - ZEBRA_TIP_DISK=$(gcloud compute images list --filter="status=READY AND name~zebrad-cache-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-tip" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) - if [[ -z "$ZEBRA_TIP_DISK" ]]; then - echo "No TIP disk found for Zebra on network: ${NETWORK}" - echo "zebra_tip_disk=${{ toJSON(false) }}" >> "$GITHUB_OUTPUT" - else - echo "Disk: $ZEBRA_TIP_DISK" - echo "zebra_tip_disk=${{ toJSON(true) }}" >> "$GITHUB_OUTPUT" - fi - - ZEBRA_CHECKPOINT_DISK=$(gcloud compute images list --filter="status=READY AND name~zebrad-cache-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-checkpoint" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) - if [[ -z "$ZEBRA_CHECKPOINT_DISK" ]]; then - echo "No CHECKPOINT disk found for Zebra on network: ${NETWORK}" - echo "zebra_checkpoint_disk=${{ toJSON(false) }}" >> "$GITHUB_OUTPUT" - else - echo "Disk: $ZEBRA_CHECKPOINT_DISK" - echo "zebra_checkpoint_disk=${{ toJSON(true) }}" >> "$GITHUB_OUTPUT" - fi + ./.github/workflows/scripts/gcp-get-available-disks.sh diff --git a/.github/workflows/sub-test-zebra-config.yml b/.github/workflows/sub-test-zebra-config.yml index 5fd840120..6ccd8574f 100644 --- a/.github/workflows/sub-test-zebra-config.yml +++ b/.github/workflows/sub-test-zebra-config.yml @@ -1,3 +1,7 @@ +# This workflow is designed to test Zebra configuration files using Docker containers. +# - Runs a specified Docker image with the provided test variables and network settings. +# - Monitors and analyzes container logs for specific patterns to determine test success. +# - Provides flexibility in testing various configurations and networks by dynamically adjusting input parameters. name: Test Zebra Config Files on: