2023-10-17 23:16:02 -07:00
name : Deploy Tests to GCP
2022-05-04 15:55:02 -07:00
on :
workflow_call :
inputs :
2022-06-23 16:22:52 -07:00
# Status and logging
2022-05-04 15:55:02 -07:00
test_id :
required : true
type : string
2022-06-23 16:22:52 -07:00
description : 'Unique identifier for the test'
2022-05-04 15:55:02 -07:00
test_description :
required : true
type : string
2022-06-23 16:22:52 -07:00
description : 'Explains what the test does'
2022-11-07 14:29:37 -08:00
height_grep_text :
required : false
type : string
description : 'Regular expression to find the tip height in test logs, and add it to newly created cached state image metadata'
2022-06-23 16:22:52 -07:00
# Test selection and parameters
2022-05-04 15:55:02 -07:00
test_variables :
required : true
type : string
2022-06-23 16:22:52 -07:00
description : 'Environmental variables used to select and configure the test'
network :
required : false
type : string
default : Mainnet
description : 'Zcash network to test against'
2022-11-07 14:29:37 -08:00
is_long_test :
required : false
type : boolean
default : false
description : 'Does this test need multiple run jobs? (Does it run longer than 6 hours?)'
2022-06-23 16:22:52 -07:00
# Cached state
#
2022-05-13 08:20:17 -07:00
# TODO: find a better name
2022-05-05 22:30:38 -07:00
root_state_path :
2022-05-04 15:55:02 -07:00
required : false
type : string
default : '/zebrad-cache'
2022-06-23 16:22:52 -07:00
description : 'Cached state base directory path'
2022-05-13 08:20:17 -07:00
# TODO: find a better name
2022-05-05 22:30:38 -07:00
zebra_state_dir :
required : false
type : string
default : ''
2022-06-23 16:22:52 -07:00
description : 'Zebra cached state directory and input image prefix to search in GCP'
2022-05-13 08:20:17 -07:00
# TODO: find a better name
lwd_state_dir :
required : false
type : string
default : ''
2022-06-23 16:22:52 -07:00
description : 'Lightwalletd cached state directory and input image prefix to search in GCP'
2022-05-04 15:55:02 -07:00
disk_prefix :
required : false
type : string
default : 'zebrad-cache'
2022-06-23 16:22:52 -07:00
description : 'Image name prefix, and `zebra_state_dir` name for newly created cached states'
2022-05-04 15:55:02 -07:00
disk_suffix :
required : false
type : string
2022-06-23 16:22:52 -07:00
description : 'Image name suffix'
2022-05-04 15:55:02 -07:00
needs_zebra_state :
required : true
type : boolean
2022-06-23 16:22:52 -07:00
description : 'Does the test use Zebra cached state?'
2022-05-13 15:02:05 -07:00
needs_lwd_state :
required : false
type : boolean
2022-06-23 16:22:52 -07:00
description : 'Does the test use Lightwalletd and Zebra cached state?'
2022-08-25 06:09:20 -07:00
# main branch states can be outdated and slower, but they can also be more reliable
prefer_main_cached_state :
required : false
type : boolean
default : false
description : 'Does the test prefer to use a main branch cached state?'
2022-05-04 15:55:02 -07:00
saves_to_disk :
required : true
type : boolean
2023-07-13 14:36:15 -07:00
description : 'Can this test create new or updated cached state disks?'
force_save_to_disk :
required : false
type : boolean
default : false
description : 'Force this test to create a new or updated cached state disk'
2022-06-23 16:22:52 -07:00
app_name :
required : false
type : string
default : 'zebra'
2022-09-08 13:25:00 -07:00
description : 'Application name, used to work out when a job is an update job'
2022-05-04 15:55:02 -07:00
env :
2022-06-30 03:33:01 -07:00
# How many previous log lines we show at the start of each new log job.
# Increase this number if some log lines are skipped between jobs
#
# We want to show all the logs since the last job finished,
# but we don't know how long it will be between jobs.
# 200 lines is about 6-15 minutes of sync logs, or one panic log.
EXTRA_LOG_LINES : 200
2022-09-08 13:25:00 -07:00
# How many blocks to wait before creating an updated cached state image.
# 1 day is approximately 1152 blocks.
2022-09-27 17:33:15 -07:00
CACHED_STATE_UPDATE_LIMIT : 576
2022-05-04 15:55:02 -07:00
jobs :
2023-10-19 04:52:14 -07:00
# Show all the test logs, then follow the logs of the test we just launched, until it finishes.
# Then check the result of the test.
#
# If `inputs.is_long_test` is `true`, the timeout is 5 days, otherwise it's 3 hours.
test-result :
name : Run ${{ inputs.test_id }} test
2023-09-12 23:07:29 -07:00
runs-on : zfnd-runners
2023-10-19 04:52:14 -07:00
timeout-minutes : ${{ inputs.is_long_test && 7200 || 180 }}
2022-09-08 13:25:00 -07:00
outputs :
cached_disk_name : ${{ steps.get-disk-name.outputs.cached_disk_name }}
2022-05-04 15:55:02 -07:00
permissions :
contents : 'read'
id-token : 'write'
steps :
2023-10-09 14:26:08 -07:00
- uses : actions/checkout@v4.1.0
2022-05-04 15:55:02 -07:00
with :
persist-credentials : false
fetch-depth : '2'
2023-08-11 11:34:49 -07:00
- uses : r7kamura/rust-problem-matchers@v1.4.0
2022-05-04 15:55:02 -07:00
- name : Inject slug/short variables
uses : rlespinasse/github-slug-action@v4
with :
short-length : 7
2022-11-25 13:11:22 -08:00
- name : Downcase network name for disks and labels
2022-05-04 15:55:02 -07:00
run : |
2022-11-25 13:11:22 -08:00
NETWORK_CAPS="${{ inputs.network }}"
echo "NETWORK=${NETWORK_CAPS,,}" >> "$GITHUB_ENV"
2022-05-04 15:55:02 -07:00
2022-11-21 10:18:26 -08:00
# Install our SSH secret
- name : Install private SSH key
2023-03-27 18:40:04 -07:00
uses : shimataro/ssh-key-action@v2.5.1
2022-11-21 10:18:26 -08:00
with :
key : ${{ secrets.GCP_SSH_PRIVATE_KEY }}
name : google_compute_engine
known_hosts : unnecessary
- name : Generate public SSH key
2023-09-12 23:07:29 -07:00
run : |
sudo apt-get update && sudo apt-get -qq install -y --no-install-recommends openssh-client
ssh-keygen -y -f ~/.ssh/google_compute_engine > ~/.ssh/google_compute_engine.pub
2022-11-21 10:18:26 -08:00
2022-05-04 15:55:02 -07:00
# Setup gcloud CLI
- name : Authenticate to Google Cloud
id : auth
2023-05-10 19:39:36 -07:00
uses : google-github-actions/auth@v1.1.1
2022-05-04 15:55:02 -07:00
with :
2022-08-23 20:49:55 -07:00
retries : '3'
2023-04-12 23:56:21 -07:00
workload_identity_provider : '${{ vars.GCP_WIF }}'
service_account : '${{ vars.GCP_DEPLOYMENTS_SA }}'
2022-11-09 22:32:21 -08:00
- name : Set up Cloud SDK
2023-05-13 18:34:22 -07:00
uses : google-github-actions/setup-gcloud@v1.1.1
2022-05-04 15:55:02 -07:00
2022-05-12 20:07:37 -07:00
# Find a cached state disk for this job, matching all of:
2022-05-13 08:20:17 -07:00
# - disk cached state (lwd_state_dir/zebra_state_dir or disk_prefix) - zebrad-cache or lwd-cache
2022-05-12 20:07:37 -07:00
# - state version (from the source code) - v{N}
# - network (network) - mainnet or testnet
# - disk target height kind (disk_suffix) - checkpoint or tip
#
2022-05-13 15:02:05 -07:00
# If the test needs a lightwalletd state (needs_lwd_state) set the variable DISK_PREFIX accordingly
# - To ${{ inputs.lwd_state_dir }}" if needed
# - To ${{ inputs.zebra_state_dir || inputs.disk_prefix }} if not
#
2022-05-12 20:07:37 -07:00
# If there are multiple disks:
2022-08-28 22:29:38 -07:00
# - prefer images generated from the same commit, then
2022-08-25 06:09:20 -07:00
# - if prefer_main_cached_state is true, prefer images from the `main` branch, then
2022-08-28 22:29:38 -07:00
# - use any images from any other branch or commit.
2022-08-25 06:09:20 -07:00
# Within each of these categories:
2022-05-12 20:07:37 -07:00
# - prefer newer images to older images
2022-05-04 15:55:02 -07:00
#
2022-05-13 15:02:05 -07:00
# Passes the disk name to subsequent steps using $CACHED_DISK_NAME env variable
# Passes the state version to subsequent steps using $STATE_VERSION env variable
2023-09-18 02:02:00 -07:00
#
2023-10-17 23:16:02 -07:00
# TODO: move this script into a file, and call it from sub-find-cached-disks.yml as well.
2022-06-23 16:22:52 -07:00
- name : Find ${{ inputs.test_id }} cached state disk
2022-05-04 15:55:02 -07:00
id : get-disk-name
2023-10-19 04:52:14 -07:00
if : ${{ inputs.needs_zebra_state || inputs.needs_lwd_state }}
2022-05-04 15:55:02 -07:00
run : |
2023-10-06 06:00:57 -07:00
set -x
2022-05-04 15:55:02 -07:00
LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "$GITHUB_WORKSPACE/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1)
2022-05-13 08:20:17 -07:00
echo "STATE_VERSION: $LOCAL_STATE_VERSION"
2022-05-04 15:55:02 -07:00
2022-05-13 15:02:05 -07:00
if [[ "${{ inputs.needs_lwd_state }}" == "true" ]]; then
DISK_PREFIX=${{ inputs.lwd_state_dir }}
else
DISK_PREFIX=${{ inputs.zebra_state_dir || inputs.disk_prefix }}
fi
2022-08-28 02:47:42 -07:00
# Try to find an image generated from a previous step or run of this commit.
# Fields are listed in the "Create image from state disk" step.
#
2022-08-28 22:29:38 -07:00
# We don't want to match the full branch name here, because:
# - we want to ignore the different GITHUB_REFs across manually triggered jobs,
# pushed branches, and PRs,
# - previous commits might have been buggy,
# or they might have worked and hide bugs in this commit
# (we can't avoid this issue entirely, but we don't want to make it more likely), and
# - the branch name might have been shortened for the image.
2022-08-28 02:47:42 -07:00
#
# The probability of two matching short commit hashes within the same month is very low.
COMMIT_DISK_PREFIX="${DISK_PREFIX}-.+-${{ env.GITHUB_SHA_SHORT }}-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}"
2022-11-22 19:53:49 -08:00
COMMIT_CACHED_DISK_NAME=$(gcloud compute images list --filter="status=READY AND name~${COMMIT_DISK_PREFIX}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1)
2022-08-28 02:47:42 -07:00
echo "${GITHUB_REF_SLUG_URL}-${{ env.GITHUB_SHA_SHORT }} Disk: $COMMIT_CACHED_DISK_NAME"
2022-08-25 06:09:20 -07:00
if [[ -n "$COMMIT_CACHED_DISK_NAME" ]]; then
echo "Description: $(gcloud compute images describe $COMMIT_CACHED_DISK_NAME --format='value(DESCRIPTION)')"
fi
2022-07-28 14:06:18 -07:00
2022-08-25 06:09:20 -07:00
# Try to find an image generated from the main branch
2022-11-22 19:53:49 -08:00
MAIN_CACHED_DISK_NAME=$(gcloud compute images list --filter="status=READY AND name~${DISK_PREFIX}-main-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1)
2022-08-25 06:09:20 -07:00
echo "main Disk: $MAIN_CACHED_DISK_NAME"
if [[ -n "$MAIN_CACHED_DISK_NAME" ]]; then
echo "Description: $(gcloud compute images describe $MAIN_CACHED_DISK_NAME --format='value(DESCRIPTION)')"
2022-07-28 14:06:18 -07:00
fi
2022-05-04 15:55:02 -07:00
2022-08-25 06:09:20 -07:00
# Try to find an image generated from any other branch
2022-11-22 19:53:49 -08:00
ANY_CACHED_DISK_NAME=$(gcloud compute images list --filter="status=READY AND name~${DISK_PREFIX}-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1)
2022-08-25 06:09:20 -07:00
echo "any branch Disk: $ANY_CACHED_DISK_NAME"
if [[ -n "$ANY_CACHED_DISK_NAME" ]]; then
echo "Description: $(gcloud compute images describe $ANY_CACHED_DISK_NAME --format='value(DESCRIPTION)')"
fi
# Select a cached disk based on the job settings
CACHED_DISK_NAME="$COMMIT_CACHED_DISK_NAME"
if [[ -z "$CACHED_DISK_NAME" ]] && [[ "${{ inputs.prefer_main_cached_state }}" == "true" ]]; then
echo "Preferring main branch cached state to other branches..."
CACHED_DISK_NAME="$MAIN_CACHED_DISK_NAME"
fi
2022-05-12 20:07:37 -07:00
if [[ -z "$CACHED_DISK_NAME" ]]; then
2022-08-25 06:09:20 -07:00
CACHED_DISK_NAME="$ANY_CACHED_DISK_NAME"
2022-05-12 20:07:37 -07:00
fi
if [[ -z "$CACHED_DISK_NAME" ]]; then
echo "No cached state disk available"
2022-08-25 06:09:20 -07:00
echo "Expected ${COMMIT_DISK_PREFIX}"
echo "Also searched for cached disks from other branches"
2022-05-12 20:07:37 -07:00
echo "Cached state test jobs must depend on the cached state rebuild job"
exit 1
fi
2022-05-04 15:55:02 -07:00
2022-08-25 06:09:20 -07:00
echo "Selected Disk: $CACHED_DISK_NAME"
2023-01-31 12:40:05 -08:00
echo "cached_disk_name=$CACHED_DISK_NAME" >> "$GITHUB_OUTPUT"
2022-05-04 15:55:02 -07:00
2022-11-25 13:11:22 -08:00
echo "STATE_VERSION=$LOCAL_STATE_VERSION" >> "$GITHUB_ENV"
echo "CACHED_DISK_NAME=$CACHED_DISK_NAME" >> "$GITHUB_ENV"
2023-10-19 04:52:14 -07:00
echo "DISK_OPTION=image=$CACHED_DISK_NAME," >> "$GITHUB_ENV"
2022-05-04 15:55:02 -07:00
2022-06-23 16:22:52 -07:00
# Create a Compute Engine virtual machine and attach a cached state disk using the
2022-05-13 08:20:17 -07:00
# $CACHED_DISK_NAME variable as the source image to populate the disk cached state
2023-10-19 04:52:14 -07:00
# if the test needs it.
2022-06-23 16:22:52 -07:00
- name : Create ${{ inputs.test_id }} GCP compute instance
2022-05-04 15:55:02 -07:00
id : create-instance
2023-10-19 04:52:14 -07:00
shell : /usr/bin/bash -x {0}
2022-05-04 15:55:02 -07:00
run : |
gcloud compute instances create-with-container "${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \
2023-10-08 20:10:08 -07:00
--boot-disk-size 50GB \
2022-05-04 15:55:02 -07:00
--boot-disk-type pd-ssd \
2022-11-16 15:08:28 -08:00
--image-project=cos-cloud \
--image-family=cos-stable \
2023-10-19 04:52:14 -07:00
--create-disk=${DISK_OPTION}name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=400GB,type=pd-ssd \
2023-10-06 06:00:57 -07:00
--container-image=gcr.io/google-containers/busybox \
2023-04-12 23:56:21 -07:00
--machine-type ${{ vars.GCP_LARGE_MACHINE }} \
2023-05-09 17:45:32 -07:00
--network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \
2022-05-04 15:55:02 -07:00
--scopes cloud-platform \
2022-11-21 10:18:26 -08:00
--metadata=google-monitoring-enabled=TRUE,google-logging-enabled=TRUE \
2022-10-10 17:11:49 -07:00
--metadata-from-file=startup-script=.github/workflows/scripts/gcp-vm-startup-script.sh \
2022-11-25 13:11:22 -08:00
--labels=app=${{ inputs.app_name }},environment=test,network=${NETWORK},github_ref=${{ env.GITHUB_REF_SLUG_URL }},test=${{ inputs.test_id }} \
2022-05-04 15:55:02 -07:00
--tags ${{ inputs.app_name }} \
2023-04-12 23:56:21 -07:00
--zone ${{ vars.GCP_ZONE }}
2022-05-04 15:55:02 -07:00
2023-10-19 04:52:14 -07:00
# Format the mounted disk if the test doesn't use a cached state.
- name : Format ${{ inputs.test_id }} volume
if : ${{ !inputs.needs_zebra_state && !inputs.needs_lwd_state }}
shell : /usr/bin/bash -ex {0}
2022-10-10 17:11:49 -07:00
run : |
2022-11-16 06:27:09 -08:00
gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \
2023-04-12 23:56:21 -07:00
--zone ${{ vars.GCP_ZONE }} \
2022-10-10 17:11:49 -07:00
--ssh-flag="-o ServerAliveInterval=5" \
--ssh-flag="-o ConnectionAttempts=20" \
--ssh-flag="-o ConnectTimeout=5" \
2023-10-09 10:59:59 -07:00
--command=' \
set -ex;
# Extract the correct disk name based on the device-name
2023-10-19 04:52:14 -07:00
DISK_NAME=$(ls -l /dev/disk/by-id | grep -oE "google-${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} -> ../../[^ ]+" | grep -oE "/[^/]+$" | cut -c 2-);
sudo mkfs.ext4 -v /dev/$DISK_NAME \
2023-10-09 10:59:59 -07:00
'
2022-05-13 08:20:17 -07:00
2023-10-19 04:52:14 -07:00
# Launch the test with the previously created disk or cached state.
#
# This step uses a $MOUNT_FLAGS variable to mount the disk to the docker container.
# If the test needs Lightwalletd state, we add the Lightwalletd state mount to the $MOUNT_FLAGS variable.
2022-06-23 16:22:52 -07:00
#
# SSH into the just created VM, and create a Docker container to run the incoming test
2022-10-10 17:11:49 -07:00
# from ${{ inputs.test_id }}, then mount the sudo docker volume created in the previous job.
2022-05-13 08:20:17 -07:00
#
# In this step we're using the same disk for simplicity, as mounting multiple disks to the
# VM and to the container might require more steps in this workflow, and additional
# considerations.
#
2023-10-19 04:52:14 -07:00
# The disk mounted in the VM is located at /dev/$DISK_NAME, we mount the root `/` of this disk to the docker
# container, and might have two different paths (if lightwalletd state is needed):
2022-05-13 08:20:17 -07:00
# - /var/cache/zebrad-cache -> ${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} -> $ZEBRA_CACHED_STATE_DIR
# - /var/cache/lwd-cache -> ${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }} -> $LIGHTWALLETD_DATA_DIR
#
2023-10-06 06:00:57 -07:00
# Currently we do this by mounting the same disk at both paths.
#
2022-05-13 08:20:17 -07:00
# This doesn't cause any path conflicts, because Zebra and lightwalletd create different
# subdirectories for their data. (But Zebra, lightwalletd, and the test harness must not
# delete the whole cache directory.)
#
2023-10-19 04:52:14 -07:00
# These paths must match the variables used by the tests in Rust, which are also set in
2023-10-17 23:16:02 -07:00
# `ci-unit-tests-docker.yml` to be able to run this tests.
2022-05-13 08:20:17 -07:00
#
# Although we're mounting the disk root to both directories, Zebra and Lightwalletd
# will only respect the values from $ZEBRA_CACHED_STATE_DIR and $LIGHTWALLETD_DATA_DIR,
2023-10-19 04:52:14 -07:00
# the inputs like ${{ inputs.zebra_state_dir }} and ${{ inputs.lwd_state_dir }}
# are only used to match those variables paths.
2022-06-27 17:36:18 -07:00
- name : Launch ${{ inputs.test_id }} test
2023-10-19 04:52:14 -07:00
id : launch-test
shell : /usr/bin/bash -x {0}
2022-10-10 17:11:49 -07:00
run : |
2022-11-16 06:27:09 -08:00
gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \
2023-04-12 23:56:21 -07:00
--zone ${{ vars.GCP_ZONE }} \
2022-10-10 17:11:49 -07:00
--ssh-flag="-o ServerAliveInterval=5" \
--ssh-flag="-o ConnectionAttempts=20" \
--ssh-flag="-o ConnectTimeout=5" \
2023-10-09 10:59:59 -07:00
--command=' \
2023-10-19 04:52:14 -07:00
2023-10-09 10:59:59 -07:00
# Extract the correct disk name based on the device-name
2023-10-19 04:52:14 -07:00
DISK_NAME=$(ls -l /dev/disk/by-id | grep -oE "google-${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} -> ../../[^ ]+" | grep -oE "/[^/]+$" | cut -c 2-)
MOUNT_FLAGS="--mount type=volume,volume-driver=local,volume-opt=device=/dev/$DISK_NAME,volume-opt=type=ext4,dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }}"
# Check if we need to mount for Lightwalletd state
# lightwalletd-full-sync reads Zebra and writes lwd, so it is handled specially.
if [[ "${{ inputs.needs_lwd_state }}" == "true" || "${{ inputs.test_id }}" == "lwd-full-sync" ]]; then
MOUNT_FLAGS="$MOUNT_FLAGS --mount type=volume,volume-driver=local,volume-opt=device=/dev/$DISK_NAME,volume-opt=type=ext4,dst=${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }}"
fi
2023-10-09 10:59:59 -07:00
2022-10-10 17:11:49 -07:00
sudo docker run \
--name ${{ inputs.test_id }} \
--tty \
--detach \
${{ inputs.test_variables }} \
2023-10-19 04:52:14 -07:00
${MOUNT_FLAGS} \
2023-04-12 23:56:21 -07:00
${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \
2023-10-09 10:59:59 -07:00
'
# Show debug logs if previous job failed
- name : Show debug logs if previous job failed
2023-10-19 04:52:14 -07:00
if : ${{ failure() }}
shell : /usr/bin/bash -x {0}
2023-10-09 10:59:59 -07:00
run : |
gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \
--zone ${{ vars.GCP_ZONE }} \
--ssh-flag="-o ServerAliveInterval=5" \
--ssh-flag="-o ConnectionAttempts=20" \
--ssh-flag="-o ConnectTimeout=5" \
--command=' \
lsblk;
sudo lsof /dev/$DISK_NAME;
sudo dmesg;
sudo journalctl -b \
'
2022-10-10 17:11:49 -07:00
2022-12-05 17:36:05 -08:00
# Show all the logs since the container launched,
2023-07-10 11:20:50 -07:00
# following until we see zebrad startup messages.
#
# This check limits the number of log lines, so tests running on the wrong network don't
# run until the job timeout. If Zebra does a complete recompile, there are a few hundred log
# lines before the startup logs. So that's what we use here.
2022-12-05 17:36:05 -08:00
#
# The log pipeline ignores the exit status of `docker logs`.
# It also ignores the expected 'broken pipe' error from `tee`,
# which happens when `grep` finds a matching output and moves on to the next job.
#
# Errors in the tests are caught by the final test status job.
2023-07-10 11:20:50 -07:00
- name : Check startup logs for ${{ inputs.test_id }}
2023-10-19 04:52:14 -07:00
shell : /usr/bin/bash -x {0}
2023-07-10 11:20:50 -07:00
run : |
gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \
--zone ${{ vars.GCP_ZONE }} \
--ssh-flag="-o ServerAliveInterval=5" \
--ssh-flag="-o ConnectionAttempts=20" \
--ssh-flag="-o ConnectTimeout=5" \
2023-10-09 10:59:59 -07:00
--command=' \
2023-07-10 11:20:50 -07:00
sudo docker logs \
--tail all \
--follow \
${{ inputs.test_id }} | \
head -700 | \
2023-10-09 10:59:59 -07:00
tee --output-error=exit-nopipe /dev/stderr | \
2023-07-10 11:20:50 -07:00
grep --max-count=1 --extended-regexp --color=always \
2023-10-09 17:49:06 -07:00
"Zcash network: ${{ inputs.network }}" ; \
2023-10-09 10:59:59 -07:00
'
2023-07-10 11:20:50 -07:00
2023-09-12 23:07:29 -07:00
# Check that the container executed at least 1 Rust test harness test, and that all tests passed.
# Then wait for the container to finish, and exit with the test's exit status.
# Also shows all the test logs.
#
# If the container has already finished, `docker wait` should return its status.
# But sometimes this doesn't work, so we use `docker inspect` as a fallback.
#
# `docker wait` prints the container exit status as a string, but we need to exit the `ssh` command
# with that status.
# (`docker wait` can also wait for multiple containers, but we only ever wait for a single container.)
- name : Result of ${{ inputs.test_id }} test
2023-10-19 04:52:14 -07:00
shell : /usr/bin/bash -x {0}
2022-10-10 17:11:49 -07:00
run : |
2022-11-16 06:27:09 -08:00
gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \
2023-04-12 23:56:21 -07:00
--zone ${{ vars.GCP_ZONE }} \
2022-10-10 17:11:49 -07:00
--ssh-flag="-o ServerAliveInterval=5" \
--ssh-flag="-o ConnectionAttempts=20" \
--ssh-flag="-o ConnectTimeout=5" \
2023-09-12 23:07:29 -07:00
--command=' \
2022-10-10 17:11:49 -07:00
sudo docker logs \
--tail all \
--follow \
${{ inputs.test_id }} | \
2023-10-09 10:59:59 -07:00
tee --output-error=exit-nopipe /dev/stderr | \
2022-10-10 17:11:49 -07:00
grep --max-count=1 --extended-regexp --color=always \
2023-10-09 10:59:59 -07:00
"test result: .*ok.* [1-9][0-9]* passed.*finished in" ;
LOGS_EXIT_STATUS=$?;
EXIT_STATUS=$(sudo docker wait ${{ inputs.test_id }} || echo "Error retrieving exit status");
echo "sudo docker exit status: $EXIT_STATUS";
2023-09-20 11:58:18 -07:00
2023-10-09 10:59:59 -07:00
# If grep found the pattern, exit with the Docker container"s exit status
if [ $LOGS_EXIT_STATUS -eq 0 ]; then
exit $EXIT_STATUS;
fi
2023-09-20 11:58:18 -07:00
2023-10-09 10:59:59 -07:00
# Handle other potential errors here
echo "An error occurred while processing the logs.";
exit 1; \
2023-09-12 23:07:29 -07:00
'
2022-06-30 03:33:01 -07:00
2023-09-12 23:07:29 -07:00
# create a state image from the instance's state disk, if requested by the caller
create-state-image :
name : Create ${{ inputs.test_id }} cached state image
2022-07-28 14:07:29 -07:00
runs-on : ubuntu-latest
2023-10-19 04:52:14 -07:00
needs : [ test-result ]
2023-09-12 23:07:29 -07:00
# We run exactly one of without-cached-state or with-cached-state, and we always skip the other one.
# Normally, if a job is skipped, all the jobs that depend on it are also skipped.
# So we need to override the default success() check to make this job run.
if : ${{ !cancelled() && !failure() && (inputs.saves_to_disk || inputs.force_save_to_disk) }}
2022-07-28 14:07:29 -07:00
permissions :
contents : 'read'
id-token : 'write'
steps :
2023-10-09 14:26:08 -07:00
- uses : actions/checkout@v4.1.0
2022-07-28 14:07:29 -07:00
with :
persist-credentials : false
fetch-depth : '2'
2023-09-12 23:07:29 -07:00
- uses : r7kamura/rust-problem-matchers@v1.4.0
2022-07-28 14:07:29 -07:00
- name : Inject slug/short variables
uses : rlespinasse/github-slug-action@v4
with :
short-length : 7
2023-09-12 23:07:29 -07:00
# Performs formatting on disk name components.
#
# Disk images in GCP are required to be in lowercase, but the blockchain network
# uses sentence case, so we need to downcase ${{ inputs.network }}.
#
# Disk image names in GCP are limited to 63 characters, so we need to limit
# branch names to 12 characters.
#
# Passes ${{ inputs.network }} to subsequent steps using $NETWORK env variable.
# Passes ${{ env.GITHUB_REF_SLUG_URL }} to subsequent steps using $SHORT_GITHUB_REF env variable.
- name : Format network name and branch name for disks
2022-10-10 17:11:49 -07:00
run : |
2023-09-12 23:07:29 -07:00
NETWORK_CAPS="${{ inputs.network }}"
echo "NETWORK=${NETWORK_CAPS,,}" >> "$GITHUB_ENV"
LONG_GITHUB_REF="${{ env.GITHUB_REF_SLUG_URL }}"
echo "SHORT_GITHUB_REF=${LONG_GITHUB_REF:0:12}" >> "$GITHUB_ENV"
2022-07-28 17:43:47 -07:00
2022-11-21 10:18:26 -08:00
# Install our SSH secret
- name : Install private SSH key
2023-03-27 18:40:04 -07:00
uses : shimataro/ssh-key-action@v2.5.1
2022-11-21 10:18:26 -08:00
with :
key : ${{ secrets.GCP_SSH_PRIVATE_KEY }}
name : google_compute_engine
known_hosts : unnecessary
- name : Generate public SSH key
2022-10-10 17:11:49 -07:00
run : |
2023-09-12 23:07:29 -07:00
sudo apt-get update && sudo apt-get -qq install -y --no-install-recommends openssh-client
ssh-keygen -y -f ~/.ssh/google_compute_engine > ~/.ssh/google_compute_engine.pub
2022-11-21 10:18:26 -08:00
2022-05-25 23:12:45 -07:00
# Setup gcloud CLI
- name : Authenticate to Google Cloud
id : auth
2023-05-10 19:39:36 -07:00
uses : google-github-actions/auth@v1.1.1
2022-05-25 23:12:45 -07:00
with :
2023-04-12 23:56:21 -07:00
workload_identity_provider : '${{ vars.GCP_WIF }}'
service_account : '${{ vars.GCP_DEPLOYMENTS_SA }}'
2022-11-09 22:32:21 -08:00
- name : Set up Cloud SDK
2023-05-13 18:34:22 -07:00
uses : google-github-actions/setup-gcloud@v1.1.1
2022-05-25 23:12:45 -07:00
# Get the state version from the local constants.rs file to be used in the image creation,
# as the state version is part of the disk image name.
#
# Passes the state version to subsequent steps using $STATE_VERSION env variable
- name : Get state version from constants.rs
run : |
LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" $GITHUB_WORKSPACE/zebra-state/src/constants.rs | grep -oE "[0-9]+" | tail -n1)
echo "STATE_VERSION: $LOCAL_STATE_VERSION"
2022-11-25 13:11:22 -08:00
echo "STATE_VERSION=$LOCAL_STATE_VERSION" >> "$GITHUB_ENV"
2022-09-12 14:28:21 -07:00
2022-09-08 13:25:00 -07:00
# Sets the $UPDATE_SUFFIX env var to "-u" if updating a previous cached state,
# and the empty string otherwise.
#
# Also sets a unique date and time suffix $TIME_SUFFIX.
- name : Set update and time suffixes
run : |
UPDATE_SUFFIX=""
if [[ "${{ inputs.needs_zebra_state }}" == "true" ]] && [[ "${{ inputs.app_name }}" == "zebrad" ]]; then
UPDATE_SUFFIX="-u"
fi
# TODO: find a better logic for the lwd-full-sync case
if [[ "${{ inputs.needs_lwd_state }}" == "true" ]] && [[ "${{ inputs.app_name }}" == "lightwalletd" ]] && [[ "${{ inputs.test_id }}" != 'lwd-full-sync' ]]; then
UPDATE_SUFFIX="-u"
fi
2022-09-12 14:28:21 -07:00
# We're going to delete old images after a few days, so we only need the time here
TIME_SUFFIX=$(date '+%H%M%S' --utc)
2022-09-08 13:25:00 -07:00
2022-11-25 13:11:22 -08:00
echo "UPDATE_SUFFIX=$UPDATE_SUFFIX" >> "$GITHUB_ENV"
echo "TIME_SUFFIX=$TIME_SUFFIX" >> "$GITHUB_ENV"
2022-05-25 23:12:45 -07:00
2023-07-13 14:36:15 -07:00
# Get the full initial and running database versions from the test logs.
# These versions are used as part of the disk description and labels.
#
# If these versions are missing from the logs, the job fails.
#
# Typically, the database versions are around line 20 in the logs..
# But we check the first 1000 log lines, just in case the test harness recompiles all the
# dependencies before running the test. (This can happen if the cache is invalid.)
#
# Passes the versions to subsequent steps using the $INITIAL_DISK_DB_VERSION,
# $RUNNING_DB_VERSION, and $DB_VERSION_SUMMARY env variables.
- name : Get database versions from logs
2023-10-19 04:52:14 -07:00
shell : /usr/bin/bash -x {0}
2023-07-13 14:36:15 -07:00
run : |
INITIAL_DISK_DB_VERSION=""
RUNNING_DB_VERSION=""
DB_VERSION_SUMMARY=""
DOCKER_LOGS=$( \
gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \
--zone ${{ vars.GCP_ZONE }} \
--ssh-flag="-o ServerAliveInterval=5" \
--ssh-flag="-o ConnectionAttempts=20" \
--ssh-flag="-o ConnectTimeout=5" \
2023-10-09 10:59:59 -07:00
--command=' \
2023-07-13 14:36:15 -07:00
sudo docker logs ${{ inputs.test_id }} | head -1000 \
2023-10-09 10:59:59 -07:00
')
2023-07-13 14:36:15 -07:00
# either a semantic version or "creating new database"
INITIAL_DISK_DB_VERSION=$( \
echo "$DOCKER_LOGS" | \
grep --extended-regexp --only-matching 'initial disk state version : [ 0 -9a-z\.]+' | \
grep --extended-regexp --only-matching '[0-9a-z\.]+' | \
tail -1 || \
[ [ $? == 1 ]] \
)
if [[ -z "$INITIAL_DISK_DB_VERSION" ]]; then
echo "Checked logs:"
echo ""
echo "$DOCKER_LOGS"
echo ""
echo "Missing initial disk database version in logs: $INITIAL_DISK_DB_VERSION"
# Fail the tests, because Zebra didn't log the initial disk database version,
# or the regex in this step is wrong.
false
fi
if [[ "$INITIAL_DISK_DB_VERSION" = "creating.new.database" ]]; then
INITIAL_DISK_DB_VERSION="new"
else
INITIAL_DISK_DB_VERSION="v${INITIAL_DISK_DB_VERSION//./-}"
fi
echo "Found initial disk database version in logs: $INITIAL_DISK_DB_VERSION"
echo "INITIAL_DISK_DB_VERSION=$INITIAL_DISK_DB_VERSION" >> "$GITHUB_ENV"
RUNNING_DB_VERSION=$( \
echo "$DOCKER_LOGS" | \
grep --extended-regexp --only-matching 'running state version : [ 0 -9 \.]+' | \
grep --extended-regexp --only-matching '[0-9\.]+' | \
tail -1 || \
[ [ $? == 1 ]] \
)
if [[ -z "$RUNNING_DB_VERSION" ]]; then
echo "Checked logs:"
echo ""
echo "$DOCKER_LOGS"
echo ""
echo "Missing running database version in logs: $RUNNING_DB_VERSION"
# Fail the tests, because Zebra didn't log the running database version,
# or the regex in this step is wrong.
false
fi
RUNNING_DB_VERSION="v${RUNNING_DB_VERSION//./-}"
echo "Found running database version in logs: $RUNNING_DB_VERSION"
echo "RUNNING_DB_VERSION=$RUNNING_DB_VERSION" >> "$GITHUB_ENV"
if [[ "$INITIAL_DISK_DB_VERSION" = "$RUNNING_DB_VERSION" ]]; then
DB_VERSION_SUMMARY="$RUNNING_DB_VERSION"
elif [[ "$INITIAL_DISK_DB_VERSION" = "new" ]]; then
DB_VERSION_SUMMARY="$RUNNING_DB_VERSION in new database"
else
DB_VERSION_SUMMARY="$INITIAL_DISK_DB_VERSION changing to $RUNNING_DB_VERSION"
fi
echo "Summarised database versions from logs: $DB_VERSION_SUMMARY"
echo "DB_VERSION_SUMMARY=$DB_VERSION_SUMMARY" >> "$GITHUB_ENV"
2022-05-25 23:12:45 -07:00
# Get the sync height from the test logs, which is later used as part of the
2022-09-08 13:25:00 -07:00
# disk description and labels.
2022-10-10 17:11:49 -07:00
#
2022-05-25 23:12:45 -07:00
# The regex used to grep the sync height is provided by ${{ inputs.height_grep_text }},
2022-05-31 20:53:51 -07:00
# this allows to dynamically change the height as needed by different situations or
2022-09-08 13:25:00 -07:00
# based on the logs output from different tests.
2022-05-25 23:12:45 -07:00
#
2022-09-08 13:25:00 -07:00
# If the sync height is missing from the logs, the job fails.
#
2023-07-13 14:36:15 -07:00
# Passes the sync height to subsequent steps using the $SYNC_HEIGHT env variable.
2022-05-25 23:12:45 -07:00
- name : Get sync height from logs
2023-10-19 04:52:14 -07:00
shell : /usr/bin/bash -x {0}
2022-05-25 23:12:45 -07:00
run : |
SYNC_HEIGHT=""
2022-10-10 17:11:49 -07:00
DOCKER_LOGS=$( \
2022-11-16 06:27:09 -08:00
gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \
2023-04-12 23:56:21 -07:00
--zone ${{ vars.GCP_ZONE }} \
2022-10-10 17:11:49 -07:00
--ssh-flag="-o ServerAliveInterval=5" \
--ssh-flag="-o ConnectionAttempts=20" \
--ssh-flag="-o ConnectTimeout=5" \
2023-10-09 10:59:59 -07:00
--command=' \
2022-11-16 06:27:09 -08:00
sudo docker logs ${{ inputs.test_id }} --tail 200 \
2023-10-09 10:59:59 -07:00
')
2022-10-10 17:11:49 -07:00
2022-09-08 13:25:00 -07:00
SYNC_HEIGHT=$( \
2022-10-10 17:11:49 -07:00
echo "$DOCKER_LOGS" | \
2022-09-08 13:25:00 -07:00
grep --extended-regexp --only-matching '${{ inputs.height_grep_text }}[0-9]+' | \
2023-07-13 14:36:15 -07:00
grep --extended-regexp --only-matching '[0-9]+' | \
2022-09-08 13:25:00 -07:00
tail -1 || \
[ [ $? == 1 ]] \
)
2022-09-12 14:28:21 -07:00
2022-09-08 13:25:00 -07:00
if [[ -z "$SYNC_HEIGHT" ]]; then
2023-07-13 14:36:15 -07:00
echo "Checked logs:"
echo ""
echo "$DOCKER_LOGS"
echo ""
2022-09-08 13:25:00 -07:00
echo "Missing sync height in logs: $SYNC_HEIGHT"
# Fail the tests, because Zebra and lightwalletd didn't log their sync heights,
# or the CI workflow sync height regex is wrong.
false
fi
2022-09-12 14:28:21 -07:00
2022-09-08 13:25:00 -07:00
echo "Found sync height in logs: $SYNC_HEIGHT"
2022-11-25 13:11:22 -08:00
echo "SYNC_HEIGHT=$SYNC_HEIGHT" >> "$GITHUB_ENV"
2022-05-25 23:12:45 -07:00
2022-09-08 13:25:00 -07:00
# Get the original cached state height from google cloud.
2022-08-25 06:09:20 -07:00
#
2022-09-08 13:25:00 -07:00
# If the height is missing from the image labels, uses zero instead.
#
# TODO: fail the job if needs_zebra_state but the height is missing
# we can make this change after all the old images have been deleted, this should happen around 15 September 2022
# we'll also need to do a manual checkpoint rebuild before opening the PR for this change
#
# Passes the original height to subsequent steps using $ORIGINAL_HEIGHT env variable.
- name : Get original cached state height from google cloud
2022-08-25 06:09:20 -07:00
run : |
2022-09-08 13:25:00 -07:00
ORIGINAL_HEIGHT="0"
2023-10-19 04:52:14 -07:00
ORIGINAL_DISK_NAME="${{ format('{0}', needs.test-result.outputs.cached_disk_name) }}"
2022-08-25 06:09:20 -07:00
2023-09-18 02:02:00 -07:00
if [[ -n "$ORIGINAL_DISK_NAME" ]]; then
ORIGINAL_HEIGHT=$(gcloud compute images list --filter="status=READY AND name=$ORIGINAL_DISK_NAME" --format="value(labels.height)")
2022-09-08 13:25:00 -07:00
ORIGINAL_HEIGHT=${ORIGINAL_HEIGHT:-0}
2023-09-18 02:02:00 -07:00
echo "$ORIGINAL_DISK_NAME height: $ORIGINAL_HEIGHT"
else
ORIGINAL_DISK_NAME="new-disk"
echo "newly created disk, original height set to 0"
2022-08-25 06:09:20 -07:00
fi
2022-09-12 14:28:21 -07:00
2022-11-25 13:11:22 -08:00
echo "ORIGINAL_HEIGHT=$ORIGINAL_HEIGHT" >> "$GITHUB_ENV"
2023-09-18 02:02:00 -07:00
echo "ORIGINAL_DISK_NAME=$ORIGINAL_DISK_NAME" >> "$GITHUB_ENV"
2022-09-12 14:28:21 -07:00
2022-08-28 22:29:38 -07:00
# Create an image from the state disk, which will be used for any tests that start
# after it is created. These tests can be in the same workflow, or in a different PR.
2022-08-28 02:47:42 -07:00
#
2022-08-28 22:29:38 -07:00
# Using the newest image makes future jobs faster, because it is closer to the chain tip.
#
2022-09-08 13:25:00 -07:00
# Skips creating updated images if the original image is less than $CACHED_STATE_UPDATE_LIMIT behind the current tip.
# Full sync images are always created.
#
2022-08-28 22:29:38 -07:00
# The image can contain:
# - Zebra cached state, or
# - Zebra + lightwalletd cached state.
2022-08-28 02:47:42 -07:00
# Which cached state is being saved to the disk is defined by ${{ inputs.disk_prefix }}.
#
2022-08-28 22:29:38 -07:00
# Google Cloud doesn't have an atomic image replacement operation.
# We don't want to delete and re-create the image, because that causes a ~5 minute
# window where might be no recent image. So we add an extra image with a unique name,
# which gets selected because it has a later creation time.
# This also simplifies the process of deleting old images,
# because we don't have to worry about accidentally deleting all the images.
#
2022-08-28 02:47:42 -07:00
# The timestamp makes images from the same commit unique,
# as long as they don't finish in the same second.
2022-08-28 22:29:38 -07:00
# (This is unlikely, because each image created by a workflow has a different name.)
#
# The image name must also be 63 characters or less.
2022-05-13 08:20:17 -07:00
#
# Force the image creation (--force) as the disk is still attached even though is not being
2022-08-28 02:47:42 -07:00
# used by the container.
2022-05-13 08:20:17 -07:00
- name : Create image from state disk
run : |
2022-09-08 13:25:00 -07:00
MINIMUM_UPDATE_HEIGHT=$((ORIGINAL_HEIGHT+CACHED_STATE_UPDATE_LIMIT))
2023-07-13 14:36:15 -07:00
if [[ -z "$UPDATE_SUFFIX" ]] || [[ "$SYNC_HEIGHT" -gt "$MINIMUM_UPDATE_HEIGHT" ]] || [[ "${{ inputs.force_save_to_disk }}" == "true" ]]; then
2022-09-08 13:25:00 -07:00
gcloud compute images create \
2023-04-12 23:56:21 -07:00
"${{ inputs.disk_prefix }}-${SHORT_GITHUB_REF}-${{ env.GITHUB_SHA_SHORT }}-v${{ env.STATE_VERSION }}-${NETWORK}-${{ inputs.disk_suffix }}${UPDATE_SUFFIX}-${TIME_SUFFIX}" \
2022-09-08 13:25:00 -07:00
--force \
--source-disk=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \
2023-04-12 23:56:21 -07:00
--source-disk-zone=${{ vars.GCP_ZONE }} \
2022-09-08 13:25:00 -07:00
--storage-location=us \
2023-07-13 14:36:15 -07:00
--description="Created from commit ${{ env.GITHUB_SHA_SHORT }} with height ${{ env.SYNC_HEIGHT }} and database format ${{ env.DB_VERSION_SUMMARY }}" \
2023-09-18 02:02:00 -07:00
--labels="height=${{ env.SYNC_HEIGHT }},purpose=${{ inputs.disk_prefix }},commit=${{ env.GITHUB_SHA_SHORT }},state-version=${{ env.STATE_VERSION }},state-running-version=${RUNNING_DB_VERSION},initial-state-disk-version=${INITIAL_DISK_DB_VERSION},network=${NETWORK},target-height-kind=${{ inputs.disk_suffix }},update-flag=${UPDATE_SUFFIX},force-save=${{ inputs.force_save_to_disk }},updated-from-height=${ORIGINAL_HEIGHT},updated-from-disk=${ORIGINAL_DISK_NAME},test-id=${{ inputs.test_id }},app-name=${{ inputs.app_name }}"
2022-09-08 13:25:00 -07:00
else
2023-09-18 02:02:00 -07:00
echo "Skipped cached state update because the new sync height $SYNC_HEIGHT was less than $CACHED_STATE_UPDATE_LIMIT blocks above the original height $ORIGINAL_HEIGHT of $ORIGINAL_DISK_NAME"
2022-09-08 13:25:00 -07:00
fi
2022-05-13 08:20:17 -07:00
2022-06-23 16:22:52 -07:00
# delete the Google Cloud instance for this test
2022-05-25 23:12:45 -07:00
delete-instance :
name : Delete ${{ inputs.test_id }} instance
runs-on : ubuntu-latest
needs : [ create-state-image ]
# If a disk generation step timeouts (+6 hours) the previous job (creating the image) will be skipped.
# Even if the instance continues running, no image will be created, so it's better to delete it.
if : always()
continue-on-error : true
permissions :
contents : 'read'
id-token : 'write'
steps :
2023-10-09 14:26:08 -07:00
- uses : actions/checkout@v4.1.0
2022-06-30 03:33:01 -07:00
with :
persist-credentials : false
fetch-depth : '2'
2023-08-11 11:34:49 -07:00
- uses : r7kamura/rust-problem-matchers@v1.4.0
2022-06-30 03:33:01 -07:00
2022-05-25 23:12:45 -07:00
- name : Inject slug/short variables
uses : rlespinasse/github-slug-action@v4
with :
short-length : 7
# Setup gcloud CLI
- name : Authenticate to Google Cloud
id : auth
2023-05-10 19:39:36 -07:00
uses : google-github-actions/auth@v1.1.1
2022-05-25 23:12:45 -07:00
with :
2023-04-12 23:56:21 -07:00
workload_identity_provider : '${{ vars.GCP_WIF }}'
service_account : '${{ vars.GCP_DEPLOYMENTS_SA }}'
2022-11-09 22:32:21 -08:00
- name : Set up Cloud SDK
2023-05-13 18:34:22 -07:00
uses : google-github-actions/setup-gcloud@v1.1.1
2022-05-25 23:12:45 -07:00
# Deletes the instances that has been recently deployed in the actual commit after all
# previous jobs have run, no matter the outcome of the job.
2022-05-04 15:55:02 -07:00
- name : Delete test instance
continue-on-error : true
run : |
INSTANCE=$(gcloud compute instances list --filter=${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} --format='value(NAME)')
if [ -z "${INSTANCE}" ]; then
echo "No instance to delete"
else
2023-04-12 23:56:21 -07:00
gcloud compute instances delete "${INSTANCE}" --zone "${{ vars.GCP_ZONE }}" --delete-disks all --quiet
2022-05-04 15:55:02 -07:00
fi