Revert "ref(workflow): move most scripts to their own executables (#8005)"

This reverts commit d85b010cf9.
This commit is contained in:
teor 2024-01-02 17:01:51 +10:00 committed by GitHub
parent f21d7c6934
commit 15693567f3
25 changed files with 273 additions and 446 deletions

View File

@ -1,16 +1,6 @@
# Google Cloud node deployments and tests that run when Rust code or dependencies are modified,
# but only on PRs from the ZcashFoundation/zebra repository.
# (External PRs are tested/deployed by mergify.)
#
# 1. `versioning`: Extracts the major version from the release semver. Useful for segregating instances based on major versions.
# 2. `build`: Builds a Docker image named `zebrad` with the necessary tags derived from Git.
# 3. `test-configuration-file`: Validates Zebra using the default config with the latest version.
# 4. `test-configuration-file-testnet`: Tests the Docker image for the testnet configuration.
# 5. `test-zebra-conf-path`: Verifies Zebra with a custom Docker config file.
# 6. `deploy-nodes`: Deploys Managed Instance Groups (MiGs) for Mainnet and Testnet. If triggered by main branch pushes, it always replaces the MiG. For releases, MiGs are replaced only if deploying the same major version; otherwise, a new major version is deployed.
# 7. `deploy-instance`: Deploys a single node in a specified GCP zone for testing specific commits. Instances from this job aren't auto-replaced or deleted.
#
# The overall goal is to ensure that Zebra nodes are consistently deployed, tested, and managed on GCP.
# (External PRs are tested/deployed by mergify.)
name: Deploy Nodes to GCP
# Ensures that only one workflow task will run at a time. Previous deployments, if

View File

@ -1,13 +1,4 @@
# This workflow is designed to delete old Google Cloud Platform (GCP) resources to save on costs.
#
# 1. Deletes specific instances in GCP older than a defined number of days.
# 2. Deletes instance templates older than a set number of days.
# 3. Deletes older disks not currently in use, with certain ones prefixed by commit hashes or "zebrad-".
# 4. Deletes cache images from GCP, retaining a specified number of the latest images for certain types like zebrad checkpoint cache, zebrad tip cache, and lightwalletd + zebrad tip cache.
# 5. Deletes unused artifacts from Google Artifact Registry older than a defined number of hours while retaining the latest few.
#
# It uses the gcloud CLI for most of its operations and also leverages specific GitHub Actions like the gcr-cleaner for deleting old images from the Google Artifact Registry.
# The workflow is scheduled to run daily at 0700 UTC.
# TODO: rename this action name and filename to Delete infra resources
name: Delete GCP resources
on:
@ -65,11 +56,29 @@ jobs:
# so it can't be shell-quoted.
- name: Delete old instances
run: |
./.github/workflows/scripts/gcp-delete-old-instances.sh
DELETE_BEFORE_DATE=$(date --date="$DELETE_INSTANCE_DAYS days ago" '+%Y%m%d')
IFS=$'\n'
INSTANCES=$(gcloud compute instances list --sort-by=creationTimestamp --filter="name~-[0-9a-f]{7,}$ AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME,ZONE)' | \
sed 's/\(.*\)\t\(.*\)/\1 --zone=\2/')
for INSTANCE_AND_ZONE in $INSTANCES
do
IFS=$' '
gcloud compute instances delete --verbosity=info ${INSTANCE_AND_ZONE} --delete-disks=all || continue
IFS=$'\n'
done
# Deletes all the instance templates older than $DELETE_AGE_DAYS days.
- name: Delete old instance templates
run: |
./.github/workflows/scripts/gcp-delete-old-templates.sh
DELETE_BEFORE_DATE=$(date --date="$DELETE_AGE_DAYS days ago" '+%Y%m%d')
TEMPLATES=$(gcloud compute instance-templates list --sort-by=creationTimestamp --filter="name~-[0-9a-f]{7,}$ AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME)')
for TEMPLATE in $TEMPLATES
do
gcloud compute instance-templates delete "${TEMPLATE}" || continue
done
# Deletes all mainnet and testnet disks older than $DELETE_AGE_DAYS days.
#
@ -80,7 +89,31 @@ jobs:
# so it can't be shell-quoted.
- name: Delete old disks
run: |
./.github/workflows/scripts/gcp-delete-old-disks.sh
DELETE_BEFORE_DATE=$(date --date="$DELETE_AGE_DAYS days ago" '+%Y%m%d')
IFS=$'\n'
# Disks created by PR jobs, and other jobs that use a commit hash
COMMIT_DISKS=$(gcloud compute disks list --sort-by=creationTimestamp --filter="name~-[0-9a-f]{7,}$ AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME,LOCATION,LOCATION_SCOPE)' | \
sed 's/\(.*\)\t\(.*\)\t\(.*\)/\1 --\3=\2/')
for DISK_AND_LOCATION in $COMMIT_DISKS
do
IFS=$' '
gcloud compute disks delete --verbosity=info ${DISK_AND_LOCATION} || continue
IFS=$'\n'
done
IFS=$'\n'
# Disks created by managed instance groups, and other jobs that start with "zebrad-"
ZEBRAD_DISKS=$(gcloud compute disks list --sort-by=creationTimestamp --filter="name~^zebrad- AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME,LOCATION,LOCATION_SCOPE)' | \
sed 's/\(.*\)\t\(.*\)\t\(.*\)/\1 --\3=\2/')
for DISK_AND_LOCATION in $ZEBRAD_DISKS
do
IFS=$' '
gcloud compute disks delete --verbosity=info ${DISK_AND_LOCATION} || continue
IFS=$'\n'
done
# Deletes mainnet and testnet cache images older than $DELETE_AGE_DAYS days.
#
@ -92,9 +125,108 @@ jobs:
#
# TODO:
# - refactor out repeated shell script code
- name: Delete old cache images
- name: Delete old cache disks
run: |
./.github/workflows/scripts/gcp-delete-old-cache-images.sh
DELETE_BEFORE_DATE=$(date --date="$DELETE_AGE_DAYS days ago" '+%Y%m%d')
# As of April 2023, these disk names look like:
# zebrad-cache-6039-merge-62c8ecc-v25-mainnet-checkpoint-053559
#
# Mainnet zebrad checkpoint
ZEBRAD_MAINNET_CHECKPOINT_IMAGES=$(gcloud compute images list --sort-by=~creationTimestamp --filter="name~^zebrad-cache-.*-mainnet-checkpoint AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME)')
KEPT_IMAGES=0
for IMAGE in $ZEBRAD_MAINNET_CHECKPOINT_IMAGES
do
if [[ "$KEPT_IMAGES" -lt "$KEEP_LATEST_IMAGE_COUNT" ]];
then
KEPT_IMAGES=$((KEPT_IMAGES+1))
echo "Keeping image $KEPT_IMAGES named $IMAGE"
continue
fi
gcloud compute images delete "${IMAGE}" || continue
done
# Testnet zebrad checkpoint
ZEBRAD_TESTNET_CHECKPOINT_IMAGES=$(gcloud compute images list --sort-by=~creationTimestamp --filter="name~^zebrad-cache-.*-testnet-checkpoint AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME)')
KEPT_IMAGES=0
for IMAGE in $ZEBRAD_TESTNET_CHECKPOINT_IMAGES
do
if [[ "$KEPT_IMAGES" -lt "$KEEP_LATEST_IMAGE_COUNT" ]];
then
KEPT_IMAGES=$((KEPT_IMAGES+1))
echo "Keeping image $KEPT_IMAGES named $IMAGE"
continue
fi
gcloud compute images delete "${IMAGE}" || continue
done
# As of April 2023, these disk names look like:
# zebrad-cache-6556-merge-a2ca4de-v25-mainnet-tip(-u)?-140654
#
# Mainnet zebrad tip
ZEBRAD_MAINNET_TIP_IMAGES=$(gcloud compute images list --sort-by=~creationTimestamp --filter="name~^zebrad-cache-.*-mainnet-tip AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME)')
KEPT_IMAGES=0
for IMAGE in $ZEBRAD_MAINNET_TIP_IMAGES
do
if [[ "$KEPT_IMAGES" -lt "$KEEP_LATEST_IMAGE_COUNT" ]];
then
KEPT_IMAGES=$((KEPT_IMAGES+1))
echo "Keeping image $KEPT_IMAGES named $IMAGE"
continue
fi
gcloud compute images delete "${IMAGE}" || continue
done
# Testnet zebrad tip
ZEBRAD_TESTNET_TIP_IMAGES=$(gcloud compute images list --sort-by=~creationTimestamp --filter="name~^zebrad-cache-.*-testnet-tip AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME)')
KEPT_IMAGES=0
for IMAGE in $ZEBRAD_TESTNET_TIP_IMAGES
do
if [[ "$KEPT_IMAGES" -lt "$KEEP_LATEST_IMAGE_COUNT" ]];
then
KEPT_IMAGES=$((KEPT_IMAGES+1))
echo "Keeping image $KEPT_IMAGES named $IMAGE"
continue
fi
gcloud compute images delete "${IMAGE}" || continue
done
# As of April 2023, these disk names look like:
# lwd-cache-main-fb3fec0-v25-mainnet-tip(-u)?-061314
#
# Mainnet lightwalletd tip
LWD_MAINNET_TIP_IMAGES=$(gcloud compute images list --sort-by=~creationTimestamp --filter="name~^lwd-cache-.*-mainnet-tip AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME)')
KEPT_IMAGES=0
for IMAGE in $LWD_MAINNET_TIP_IMAGES
do
if [[ "$KEPT_IMAGES" -lt "$KEEP_LATEST_IMAGE_COUNT" ]];
then
KEPT_IMAGES=$((KEPT_IMAGES+1))
echo "Keeping image $KEPT_IMAGES named $IMAGE"
continue
fi
gcloud compute images delete "${IMAGE}" || continue
done
# Testnet lightwalletd tip
LWD_TESTNET_TIP_IMAGES=$(gcloud compute images list --sort-by=~creationTimestamp --filter="name~^lwd-cache-.*-testnet-tip AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME)')
KEPT_IMAGES=0
for IMAGE in $LWD_TESTNET_TIP_IMAGES
do
if [[ "$KEPT_IMAGES" -lt "$KEEP_LATEST_IMAGE_COUNT" ]];
then
KEPT_IMAGES=$((KEPT_IMAGES+1))
echo "Keeping image $KEPT_IMAGES named $IMAGE"
continue
fi
gcloud compute images delete "${IMAGE}" || continue
done
# We're using a generic approach here, which allows multiple registries to be included,
# even those not related to GCP. Enough reason to create a separate job.

View File

@ -1,12 +1,6 @@
# This workflow manages the automatic addition of new issues to specific GitHub projects.
#
# 1. Newly opened issues are added to the "Zebra Backlog" Github project.
# 2. They are also added to the "ZF Engineering Backlog" Github project.
#
# The action makes use of the `add-to-project` action and requires a Github token
# (currently sourced from secrets) to authenticate and perform the addition.
name: Add new issues to GitHub projects
# Configuration for automatically adding issues to various Github projects for Project Management purposes
on:
issues:
types:

View File

@ -1,13 +1,3 @@
# This workflow facilitates the individual building of Rust crates present in the repository.
# 1. A matrix is generated dynamically to identify each crate in the repository.
# 2. This matrix is checked for validity.
# 3. Each identified crate undergoes three build processes:
# - With no features.
# - With the default features.
# - With all the features enabled.
# 4. In case of build failures outside of pull requests, an issue is either opened or updated
# in the repository to report the failure.
# Throughout the workflow, various setup steps ensure the correct environment and tools are present.
name: Build crates individually
# Ensures that only one workflow task will run at a time. Previous builds, if

View File

@ -1,11 +1,3 @@
# This workflow calculates the test coverage for the Rust codebase.
# 1. The code is checked out.
# 2. Rust with the stable toolchain, minimal profile, and llvm-tools-preview component is set up.
# 3. Necessary tools like 'cargo-llvm-cov' are installed.
# 4. Proptest is minimized for efficient coverage test runs.
# 5. Tests are run without producing a report to gather coverage information.
# 6. A coverage report (lcov format) is generated based on the gathered information.
# 7. Finally, this report is uploaded to Codecov for visualization and analysis.
name: Coverage
# Ensures that only one workflow task will run at a time. Previous builds, if

View File

@ -1,8 +1,5 @@
# Google Cloud integration tests that run when Rust code or dependencies are modified,
# but only on PRs from the ZcashFoundation/zebra repository. (External PRs are tested by mergify.)
#
# Specific conditions and dependencies are set for each job to ensure they are executed in the correct sequence and under the right circumstances.
# Each test has a description of the conditions under which it runs.
name: Integration Tests on GCP
# Ensures that only one workflow task will run at a time. Previous builds, if

View File

@ -1,10 +1,3 @@
# This workflow conducts various linting checks for a Rust-based project.
# 1. Determines if Rust or workflow files have been modified.
# 2. Runs the Clippy linter on Rust files, producing annotations and failing on warnings.
# 3. Ensures Rust code formatting complies with 'rustfmt' standards.
# 4. Lints GitHub Actions workflow files for common issues.
# 5. Checks for common spelling errors in the codebase.
# The workflow is designed to maintain code quality and consistency, running checks conditionally based on the changed files.
name: Lint
# Ensures that only one workflow task will run at a time. Previous builds, if

View File

@ -1,16 +1,5 @@
# Google Cloud unit tests that run when Rust code or dependencies are modified,
# but only on PRs from the ZcashFoundation/zebra repository. (External PRs are tested by mergify.)
#
# This workflow is designed for running various unit tests within Docker containers.
# Jobs:
# 1. Builds a Docker image for tests, adaptable to the specified network (Mainnet or Testnet).
# 2. 'test-all': Executes all Zebra tests, including normally ignored ones, in a Docker environment.
# 3. 'test-fake-activation-heights': Runs state tests with fake activation heights, isolating its build products.
# 4. 'test-empty-sync': Tests Zebra's ability to sync and checkpoint from an empty state.
# 5. 'test-lightwalletd-integration': Validates integration with 'lightwalletd' starting from an empty state.
# 6. 'test-configuration-file': Assesses the default Docker configuration for Zebra.
# 7. 'test-configuration-file-testnet': Checks the Docker image reconfiguration for the Testnet.
# 8. 'test-zebra-conf-path': Tests Zebra using a custom Docker configuration.
name: Docker Unit Tests
# Ensures that only one workflow task will run at a time. Previous builds, if

View File

@ -1,9 +1,3 @@
# This workflow performs unit tests across different operating systems and Rust versions. It includes steps for:
# - Testing on Ubuntu and macOS with stable and beta Rust toolchains.
# - Installing Zebra from the lockfile without cache on Ubuntu.
# - Verifying that Cargo.lock is up-to-date with Cargo.toml changes.
# - Running cargo-deny checks for dependencies.
# - Checking for unused dependencies in the code.
name: Multi-OS Unit Tests
# Ensures that only one workflow task will run at a time. Previous builds, if

View File

@ -1,9 +1,5 @@
# Google Cloud docs updates that run when docs, Rust code, or dependencies are modified,
# but only on PRs from the ZcashFoundation/zebra repository. (External PRs are deployed by mergify.)
# - Builds and deploys Zebra Book Docs using mdBook, setting up necessary tools and deploying to Firebase.
# - Compiles and deploys external documentation, setting up Rust with the beta toolchain and default profile, building the docs, and deploying them to Firebase.
# - Assembles and deploys internal documentation with similar steps, including private items in the documentation, and deploys to Firebase.
name: Docs
# Ensures that only one workflow task will run at a time. Previous deployments, if

View File

@ -1,10 +1,3 @@
# This workflow is designed for manually deploying zcashd nodes to Google Cloud Platform (GCP) based on user inputs.
# - Allows selection of network type (Mainnet or Testnet) and instance group size.
# - Converts network name to lowercase to comply with GCP labeling requirements.
# - Authenticates with Google Cloud using provided credentials.
# - Creates a GCP instance template from a container image of zcashd.
# - Checks if the specified instance group already exists.
# - Depending on the existence check, either creates a new managed instance group or updates the existing one with the new template.
name: Zcashd Manual Deploy
on:

View File

@ -93,9 +93,26 @@ jobs:
#
# These steps should be kept up to date with the release checklist.
#
# TODO: move these steps into a script which is run in the release checklist and CI
- name: Crate release dry run
run: |
./.github/workflows/scripts/release-crates-dry-run.sh
set -ex
git config --global user.email "release-tests-no-reply@zfnd.org"
git config --global user.name "Automated Release Test"
# This script must be the same as:
# https://github.com/ZcashFoundation/zebra/blob/main/.github/PULL_REQUEST_TEMPLATE/release-checklist.md#update-crate-versions
# with an extra `--no-confirm` argument for non-interactive testing.
cargo release version --verbose --execute --no-confirm --allow-branch '*' --workspace --exclude zebrad beta
cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebrad patch
cargo release replace --verbose --execute --no-confirm --allow-branch '*' --package zebrad
cargo release commit --verbose --execute --no-confirm --allow-branch '*'
# Check the release will work using a dry run
#
# Workaround unpublished dependency version errors by skipping those crates:
# https://github.com/crate-ci/cargo-release/issues/691
#
# TODO: check all crates after fixing these errors
cargo release publish --verbose --dry-run --allow-branch '*' --workspace --exclude zebra-consensus --exclude zebra-utils --exclude zebrad
# TODO: actually do the release here
#release-crates:

View File

@ -1,10 +1,6 @@
# This workflow automates the creation and updating of draft releases. It compiles PR titles into the draft release notes.
# Creates a draft release with all the PR names since the last release.
# https://github.com/ZcashFoundation/zebra/releases
#
# - Updates the draft release upon each merge into 'main'.
# - Utilizes the release-drafter GitHub Action to accumulate PR titles since the last release into a draft release note.
# - Suitable permissions are set for creating releases and handling pull requests.
#
# Workflow is based on:
# https://github.com/marketplace/actions/release-drafter#usage
name: Release Drafter

View File

@ -1,45 +0,0 @@
#!/usr/bin/env bash
# Function to handle image deletion logic
delete_images() {
local image_type="$1"
local filter="$2"
local kept_images=0
echo "Processing ${image_type} images"
images=$(gcloud compute images list --sort-by=~creationTimestamp --filter="${filter} AND creationTimestamp < ${DELETE_BEFORE_DATE}" --format='value(NAME)')
for image in ${images}; do
if [[ "${kept_images}" -lt "${KEEP_LATEST_IMAGE_COUNT}" ]]; then
((kept_images++))
echo "Keeping image ${kept_images} named ${image}"
else
echo "Deleting image: ${image}"
gcloud compute images delete "${image}" || echo "Failed to delete image: ${image}"
fi
done
}
# Check if necessary variables are set
if ! [[ "${DELETE_AGE_DAYS}" =~ ^[0-9]+$ && "${KEEP_LATEST_IMAGE_COUNT}" =~ ^[0-9]+$ ]]; then
echo "ERROR: One or more required variables are not set or not numeric"
exit 1
fi
# Set pipefail
set -o pipefail
# Calculate the date before which images should be deleted
DELETE_BEFORE_DATE=$(date --date="${DELETE_AGE_DAYS} days ago" '+%Y%m%d')
# Mainnet and Testnet zebrad checkpoint
delete_images "Mainnet zebrad checkpoint" "name~^zebrad-cache-.*-mainnet-checkpoint" # As of April 2023, these disk names look like: zebrad-cache-6556-merge-a2ca4de-v25-mainnet-tip(-u)?-140654
delete_images "Testnet zebrad checkpoint" "name~^zebrad-cache-.*-testnet-checkpoint"
# Mainnet and Testnet zebrad tip
delete_images "Mainnet zebrad tip" "name~^zebrad-cache-.*-mainnet-tip"
delete_images "Testnet zebrad tip" "name~^zebrad-cache-.*-testnet-tip"
# Mainnet and Testnet lightwalletd tip
delete_images "Mainnet lightwalletd tip" "name~^lwd-cache-.*-mainnet-tip"
delete_images "Testnet lightwalletd tip" "name~^lwd-cache-.*-testnet-tip"

View File

@ -1,49 +0,0 @@
#!/usr/bin/env bash
# Check if DELETE_AGE_DAYS is set and is a number
if ! [[ "${DELETE_AGE_DAYS}" =~ ^[0-9]+$ ]]; then
echo "ERROR: DELETE_AGE_DAYS is not set or not a number"
exit 1
fi
# Set pipefail to catch errors in pipelines
set -o pipefail
# Calculate the date before which disks should be deleted
DELETE_BEFORE_DATE=$(date --date="${DELETE_AGE_DAYS} days ago" '+%Y%m%d')
# Fetch disks created by PR jobs, and other jobs that use a commit hash
if ! COMMIT_DISKS=$(gcloud compute disks list --sort-by=creationTimestamp --filter="name~-[0-9a-f]{7,}$ AND creationTimestamp < ${DELETE_BEFORE_DATE}" --format='value(NAME,LOCATION,LOCATION_SCOPE)' | sed 's/\(.*\)\t\(.*\)\t\(.*\)/\1 --\3=\2/'); then
echo "Error fetching COMMIT_DISKS."
exit 1
fi
# Delete commit disks if any are found
IFS=$'\n'
for DISK_AND_LOCATION in ${COMMIT_DISKS}; do
IFS=$' '
echo "Deleting disk: ${DISK_AND_LOCATION}"
if ! gcloud compute disks delete --verbosity=info "${DISK_AND_LOCATION}"; then
echo "Failed to delete disk: ${DISK_AND_LOCATION}"
fi
IFS=$'\n'
done
IFS=$' \t\n' # Reset IFS to its default value
# Fetch disks created by managed instance groups, and other jobs that start with "zebrad-"
if ! ZEBRAD_DISKS=$(gcloud compute disks list --sort-by=creationTimestamp --filter="name~^zebrad- AND creationTimestamp < ${DELETE_BEFORE_DATE}" --format='value(NAME,LOCATION,LOCATION_SCOPE)' | sed 's/\(.*\)\t\(.*\)\t\(.*\)/\1 --\3=\2/'); then
echo "Error fetching ZEBRAD_DISKS."
exit 1
fi
# Delete zebrad disks if any are found
IFS=$'\n'
for DISK_AND_LOCATION in ${ZEBRAD_DISKS}; do
IFS=$' '
echo "Deleting disk: ${DISK_AND_LOCATION}"
if ! gcloud compute disks delete --verbosity=info "${DISK_AND_LOCATION}"; then
echo "Failed to delete disk: ${DISK_AND_LOCATION}"
fi
IFS=$'\n'
done
IFS=$' \t\n' # Reset IFS to its default value

View File

@ -1,42 +0,0 @@
#!/usr/bin/env bash
# Check if DELETE_INSTANCE_DAYS is set and is a number
if ! [[ "${DELETE_INSTANCE_DAYS}" =~ ^[0-9]+$ ]]; then
echo "ERROR: DELETE_INSTANCE_DAYS is not set or not a number"
exit 1
fi
# Set pipefail to catch errors in pipelines
set -o pipefail
# Calculate the date before which instances should be deleted
DELETE_BEFORE_DATE=$(date --date="${DELETE_INSTANCE_DAYS} days ago" '+%Y%m%d')
# Check if gcloud command is available
if ! command -v gcloud &> /dev/null; then
echo "ERROR: gcloud command not found"
exit 1
fi
# Fetch the list of instances to delete
if ! INSTANCES=$(gcloud compute instances list --sort-by=creationTimestamp --filter="name~-[0-9a-f]{7,}$ AND creationTimestamp < ${DELETE_BEFORE_DATE}" --format='value(NAME,ZONE)' | sed 's/\(.*\)\t\(.*\)/\1 --zone=\2/'); then
echo "Error fetching instances."
exit 1
fi
# Delete instances if any are found
if [[ -n "${INSTANCES}" ]]; then
IFS=$'\n'
for INSTANCE_AND_ZONE in ${INSTANCES}; do
IFS=$' '
echo "Deleting instance: ${INSTANCE_AND_ZONE}"
gcloud compute instances delete --verbosity=info "${INSTANCE_AND_ZONE}" --delete-disks=all || {
echo "Failed to delete instance: ${INSTANCE_AND_ZONE}"
continue
}
IFS=$'\n'
done
IFS=$' \t\n' # Reset IFS to its default value
else
echo "No instances to delete."
fi

View File

@ -1,33 +0,0 @@
#!/usr/bin/env bash
# Check if DELETE_AGE_DAYS is set and is a number
if ! [[ "${DELETE_AGE_DAYS}" =~ ^[0-9]+$ ]]; then
echo "ERROR: DELETE_AGE_DAYS is not set or not a number"
exit 1
fi
# Set pipefail to catch errors in pipelines
set -o pipefail
# Calculate the date before which templates should be deleted
DELETE_BEFORE_DATE=$(date --date="${DELETE_AGE_DAYS} days ago" '+%Y%m%d')
# Check if gcloud command is available
if ! command -v gcloud &> /dev/null; then
echo "ERROR: gcloud command not found"
exit 1
fi
# Fetch the list of instance templates to delete
if ! TEMPLATES=$(gcloud compute instance-templates list --sort-by=creationTimestamp --filter="name~-[0-9a-f]{7,}$ AND creationTimestamp < ${DELETE_BEFORE_DATE}" --format='value(NAME)'); then
echo "Error fetching instance templates."
exit 1
fi
# Delete templates if any are found
for TEMPLATE in ${TEMPLATES}; do
echo "Deleting template: ${TEMPLATE}"
if ! gcloud compute instance-templates delete "${TEMPLATE}"; then
echo "Failed to delete template: ${TEMPLATE}"
fi
done

View File

@ -1,42 +0,0 @@
#!/usr/bin/env bash
# Description:
# Check if there are cached state disks available for subsequent jobs to use.
#
# This lookup uses the state version from constants.rs.
# It accepts disks generated by any branch, including draft and unmerged PRs.
#
# If the disk exists, sets the corresponding output to "true":
# - lwd_tip_disk
# - zebra_tip_disk
# - zebra_checkpoint_disk
set -euxo pipefail
LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "${GITHUB_WORKSPACE}/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1)
echo "STATE_VERSION: ${LOCAL_STATE_VERSION}"
# Function to find a disk image and output its name
find_disk_image() {
local base_name="${1}"
local disk_type="${2}"
local disk_pattern="${base_name}-cache"
local output_var="${base_name}_${disk_type}_disk"
local disk_image
disk_image=$(gcloud compute images list --filter="status=READY AND name~${disk_pattern}-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${disk_type}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1)
if [[ -z "${disk_image}" ]]; then
echo "No ${disk_type^^} disk found for ${base_name^^} on network: ${NETWORK}"
echo "${output_var}=false" >> "${GITHUB_OUTPUT}"
else
echo "Disk: ${disk_image}"
echo "${output_var}=true" >> "${GITHUB_OUTPUT}"
fi
}
# Find and output LWD and Zebra disks
find_disk_image "lwd" "tip"
find_disk_image "zebrad" "tip"
find_disk_image "zebrad" "checkpoint"

View File

@ -1,72 +0,0 @@
#!/usr/bin/env bash
# Description:
# This script finds a cached Google Cloud Compute image based on specific criteria.
# It prioritizes images from the current commit, falls back to the main branch,
# and finally checks other branches if needed. The selected image is used for
# setting up the environment in a CI/CD pipeline.
set -eo pipefail
# Function to find and report a cached disk image
find_cached_disk_image() {
local search_pattern="${1}"
local git_source="${2}"
local disk_name
disk_name=$(gcloud compute images list --filter="status=READY AND name~${search_pattern}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1)
# Use >&2 to redirect to stderr and avoid sending wrong assignments to stdout
if [[ -n "${disk_name}" ]]; then
echo "Found ${git_source} Disk: ${disk_name}" >&2
disk_description=$(gcloud compute images describe "${disk_name}" --format="value(DESCRIPTION)")
echo "Description: ${disk_description}" >&2
echo "${disk_name}" # This is the actual return value when a disk is found
else
echo "No ${git_source} disk found." >&2
fi
}
# Extract local state version
echo "Extracting local state version..."
LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "${GITHUB_WORKSPACE}/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1)
echo "STATE_VERSION: ${LOCAL_STATE_VERSION}"
# Define DISK_PREFIX based on the requiring state directory
if [[ "${NEEDS_LWD_STATE}" == "true" ]]; then
DISK_PREFIX="${LWD_STATE_DIR}"
else
DISK_PREFIX="${ZEBRA_STATE_DIR:-${DISK_PREFIX}}"
fi
# Find the most suitable cached disk image
echo "Finding the most suitable cached disk image..."
if [[ -z "${CACHED_DISK_NAME}" ]]; then
# Try to find a cached disk image from the current commit
COMMIT_DISK_PREFIX="${DISK_PREFIX}-.+-${GITHUB_SHA_SHORT}-v${LOCAL_STATE_VERSION}-${NETWORK}-${DISK_SUFFIX}"
CACHED_DISK_NAME=$(find_cached_disk_image "${COMMIT_DISK_PREFIX}" "commit")
# If no cached disk image is found, try to find one from the main branch
if [[ "${PREFER_MAIN_CACHED_STATE}" == "true" ]]; then
MAIN_DISK_PREFIX="${DISK_PREFIX}-main-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${DISK_SUFFIX}"
CACHED_DISK_NAME=$(find_cached_disk_image "${MAIN_DISK_PREFIX}" "main branch")
# Else, try to find one from any branch
else
ANY_DISK_PREFIX="${DISK_PREFIX}-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${DISK_SUFFIX}"
CACHED_DISK_NAME=$(find_cached_disk_image "${ANY_DISK_PREFIX}" "any branch")
fi
fi
# Handle case where no suitable disk image is found
if [[ -z "${CACHED_DISK_NAME}" ]]; then
echo "No suitable cached state disk available."
echo "Expected pattern: ${COMMIT_DISK_PREFIX}"
echo "Cached state test jobs must depend on the cached state rebuild job."
exit 1
fi
echo "Selected Disk: ${CACHED_DISK_NAME}"
# Exporting variables for subsequent steps
echo "Exporting variables for subsequent steps..."
export CACHED_DISK_NAME="${CACHED_DISK_NAME}"
export LOCAL_STATE_VERSION="${LOCAL_STATE_VERSION}"

View File

@ -1,4 +1,4 @@
#!/usr/bin/env bash
#! /bin/bash
# Increase the Google Cloud instance sshd connection limit
#
# This script appends 'MaxStartups 500' to /etc/ssh/sshd_config allowing up to 500

View File

@ -1,31 +0,0 @@
#!/usr/bin/env bash
set -ex
# Check if necessary tools are installed
if ! command -v git &> /dev/null || ! command -v cargo &> /dev/null; then
echo "ERROR: Required tools (git, cargo) are not installed."
exit 1
fi
git config --global user.email "release-tests-no-reply@zfnd.org"
git config --global user.name "Automated Release Test"
# Ensure cargo-release is installed
if ! cargo release --version &> /dev/null; then
echo "ERROR: cargo release must be installed."
exit 1
fi
# Release process
# Ensure to have an extra `--no-confirm` argument for non-interactive testing.
cargo release version --verbose --execute --no-confirm --allow-branch '*' --workspace --exclude zebrad beta
cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebrad patch
cargo release replace --verbose --execute --no-confirm --allow-branch '*' --package zebrad
cargo release commit --verbose --execute --no-confirm --allow-branch '*'
# Dry run to check the release
# Workaround for unpublished dependency version errors: https://github.com/crate-ci/cargo-release/issues/691
# TODO: check all crates after fixing these errors
cargo release publish --verbose --dry-run --allow-branch '*' --workspace --exclude zebra-consensus --exclude zebra-utils --exclude zebrad
echo "Release process completed."

View File

@ -1,9 +1,3 @@
# This workflow automates the building and pushing of Docker images based on user-defined inputs. It includes:
# - Accepting various inputs like image name, Dockerfile path, target, and additional Rust-related parameters.
# - Authenticates with Google Cloud and logs into Google Artifact Registry and DockerHub.
# - Uses Docker Buildx for improved build performance and caching.
# - Builds the Docker image and pushes it to both Google Artifact Registry and potentially DockerHub, depending on release type.
# - Manages caching strategies to optimize build times across different branches.
name: Build docker image
on:

View File

@ -183,22 +183,74 @@ jobs:
- name: Find ${{ inputs.test_id }} cached state disk
id: get-disk-name
if: ${{ inputs.needs_zebra_state || inputs.needs_lwd_state }}
env:
GITHUB_SHA_SHORT: ${{ env.GITHUB_SHA_SHORT }}
NEEDS_LWD_STATE: ${{ inputs.needs_lwd_state }}
LWD_STATE_DIR: ${{ inputs.lwd_state_dir }}
ZEBRA_STATE_DIR: ${{ inputs.zebra_state_dir }}
DISK_PREFIX: ${{ inputs.disk_prefix }}
NETWORK: ${{ env.NETWORK }} # use lowercase version from env, not input
DISK_SUFFIX: ${{ inputs.disk_suffix }}
PREFER_MAIN_CACHED_STATE: ${{ inputs.prefer_main_cached_state }}
run: |
source ./.github/workflows/scripts/gcp-get-cached-disks.sh
echo "cached_disk_name=${CACHED_DISK_NAME}" >> "${GITHUB_OUTPUT}"
echo "STATE_VERSION=${LOCAL_STATE_VERSION}" >> "${GITHUB_ENV}"
echo "CACHED_DISK_NAME=${CACHED_DISK_NAME}" >> "${GITHUB_ENV}"
echo "DISK_OPTION=image=${CACHED_DISK_NAME}" >> "${GITHUB_ENV}"
set -x
LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "$GITHUB_WORKSPACE/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1)
echo "STATE_VERSION: $LOCAL_STATE_VERSION"
if [[ "${{ inputs.needs_lwd_state }}" == "true" ]]; then
DISK_PREFIX=${{ inputs.lwd_state_dir }}
else
DISK_PREFIX=${{ inputs.zebra_state_dir || inputs.disk_prefix }}
fi
# Try to find an image generated from a previous step or run of this commit.
# Fields are listed in the "Create image from state disk" step.
#
# We don't want to match the full branch name here, because:
# - we want to ignore the different GITHUB_REFs across manually triggered jobs,
# pushed branches, and PRs,
# - previous commits might have been buggy,
# or they might have worked and hide bugs in this commit
# (we can't avoid this issue entirely, but we don't want to make it more likely), and
# - the branch name might have been shortened for the image.
#
# The probability of two matching short commit hashes within the same month is very low.
COMMIT_DISK_PREFIX="${DISK_PREFIX}-.+-${{ env.GITHUB_SHA_SHORT }}-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}"
COMMIT_CACHED_DISK_NAME=$(gcloud compute images list --filter="status=READY AND name~${COMMIT_DISK_PREFIX}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1)
echo "${GITHUB_REF_SLUG_URL}-${{ env.GITHUB_SHA_SHORT }} Disk: $COMMIT_CACHED_DISK_NAME"
if [[ -n "$COMMIT_CACHED_DISK_NAME" ]]; then
echo "Description: $(gcloud compute images describe $COMMIT_CACHED_DISK_NAME --format='value(DESCRIPTION)')"
fi
# Try to find an image generated from the main branch
MAIN_CACHED_DISK_NAME=$(gcloud compute images list --filter="status=READY AND name~${DISK_PREFIX}-main-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1)
echo "main Disk: $MAIN_CACHED_DISK_NAME"
if [[ -n "$MAIN_CACHED_DISK_NAME" ]]; then
echo "Description: $(gcloud compute images describe $MAIN_CACHED_DISK_NAME --format='value(DESCRIPTION)')"
fi
# Try to find an image generated from any other branch
ANY_CACHED_DISK_NAME=$(gcloud compute images list --filter="status=READY AND name~${DISK_PREFIX}-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1)
echo "any branch Disk: $ANY_CACHED_DISK_NAME"
if [[ -n "$ANY_CACHED_DISK_NAME" ]]; then
echo "Description: $(gcloud compute images describe $ANY_CACHED_DISK_NAME --format='value(DESCRIPTION)')"
fi
# Select a cached disk based on the job settings
CACHED_DISK_NAME="$COMMIT_CACHED_DISK_NAME"
if [[ -z "$CACHED_DISK_NAME" ]] && [[ "${{ inputs.prefer_main_cached_state }}" == "true" ]]; then
echo "Preferring main branch cached state to other branches..."
CACHED_DISK_NAME="$MAIN_CACHED_DISK_NAME"
fi
if [[ -z "$CACHED_DISK_NAME" ]]; then
CACHED_DISK_NAME="$ANY_CACHED_DISK_NAME"
fi
if [[ -z "$CACHED_DISK_NAME" ]]; then
echo "No cached state disk available"
echo "Expected ${COMMIT_DISK_PREFIX}"
echo "Also searched for cached disks from other branches"
echo "Cached state test jobs must depend on the cached state rebuild job"
exit 1
fi
echo "Selected Disk: $CACHED_DISK_NAME"
echo "cached_disk_name=$CACHED_DISK_NAME" >> "$GITHUB_OUTPUT"
echo "STATE_VERSION=$LOCAL_STATE_VERSION" >> "$GITHUB_ENV"
echo "CACHED_DISK_NAME=$CACHED_DISK_NAME" >> "$GITHUB_ENV"
echo "DISK_OPTION=image=$CACHED_DISK_NAME," >> "$GITHUB_ENV"
# Create a Compute Engine virtual machine and attach a cached state disk using the
# $CACHED_DISK_NAME variable as the source image to populate the disk cached state
@ -212,7 +264,7 @@ jobs:
--boot-disk-type pd-ssd \
--image-project=cos-cloud \
--image-family=cos-stable \
--create-disk=${DISK_OPTION},name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=400GB,type=pd-ssd \
--create-disk=${DISK_OPTION}name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=400GB,type=pd-ssd \
--container-image=gcr.io/google-containers/busybox \
--machine-type ${{ vars.GCP_LARGE_MACHINE }} \
--network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \

View File

@ -1,10 +1,3 @@
# Check if Cached State Disks Exist Workflow
# This workflow is designed to check the availability of cached state disks in Google Cloud Platform (GCP) for different types of Zcash applications.
# - Accepts network type as input to determine which disks to search for.
# - Checks for the existence of three types of disks: lightwalletd tip, Zebra tip, and Zebra checkpoint.
# - Uses Google Cloud SDK to query and identify available disks based on network and version.
# - Outputs the availability of each disk type, which can be utilized in subsequent workflows.
# The workflow streamlines the process of verifying disk availability, crucial for optimizing and speeding up integration tests and deployments.
name: Check if cached state disks exist
on:
@ -64,10 +57,43 @@ jobs:
echo "NETWORK=${NETWORK_CAPS,,}" >> $GITHUB_ENV
# Check if there are cached state disks available for subsequent jobs to use.
#
# This lookup uses the state version from constants.rs.
# It accepts disks generated by any branch, including draft and unmerged PRs.
#
# If the disk exists, sets the corresponding output to "true":
# - lwd_tip_disk
# - zebra_tip_disk
# - zebra_checkpoint_disk
- name: Check if cached state disks exist
id: get-available-disks
env:
GITHUB_WORKSPACE: ${{ env.GITHUB_WORKSPACE }}
NETWORK: ${{ env.NETWORK }} # use lowercase version from env, not input
run: |
./.github/workflows/scripts/gcp-get-available-disks.sh
LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "$GITHUB_WORKSPACE/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1)
echo "STATE_VERSION: $LOCAL_STATE_VERSION"
LWD_TIP_DISK=$(gcloud compute images list --filter="status=READY AND name~lwd-cache-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-tip" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1)
if [[ -z "$LWD_TIP_DISK" ]]; then
echo "No TIP disk found for lightwalletd on network: ${NETWORK}"
echo "lwd_tip_disk=${{ toJSON(false) }}" >> "$GITHUB_OUTPUT"
else
echo "Disk: $LWD_TIP_DISK"
echo "lwd_tip_disk=${{ toJSON(true) }}" >> "$GITHUB_OUTPUT"
fi
ZEBRA_TIP_DISK=$(gcloud compute images list --filter="status=READY AND name~zebrad-cache-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-tip" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1)
if [[ -z "$ZEBRA_TIP_DISK" ]]; then
echo "No TIP disk found for Zebra on network: ${NETWORK}"
echo "zebra_tip_disk=${{ toJSON(false) }}" >> "$GITHUB_OUTPUT"
else
echo "Disk: $ZEBRA_TIP_DISK"
echo "zebra_tip_disk=${{ toJSON(true) }}" >> "$GITHUB_OUTPUT"
fi
ZEBRA_CHECKPOINT_DISK=$(gcloud compute images list --filter="status=READY AND name~zebrad-cache-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-checkpoint" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1)
if [[ -z "$ZEBRA_CHECKPOINT_DISK" ]]; then
echo "No CHECKPOINT disk found for Zebra on network: ${NETWORK}"
echo "zebra_checkpoint_disk=${{ toJSON(false) }}" >> "$GITHUB_OUTPUT"
else
echo "Disk: $ZEBRA_CHECKPOINT_DISK"
echo "zebra_checkpoint_disk=${{ toJSON(true) }}" >> "$GITHUB_OUTPUT"
fi

View File

@ -1,7 +1,3 @@
# This workflow is designed to test Zebra configuration files using Docker containers.
# - Runs a specified Docker image with the provided test variables and network settings.
# - Monitors and analyzes container logs for specific patterns to determine test success.
# - Provides flexibility in testing various configurations and networks by dynamically adjusting input parameters.
name: Test Zebra Config Files
on: