refactor: use gcloud to search for cached disk state (#3775)

This commit is contained in:
Gustavo Valverde 2022-03-07 23:19:04 -04:00 committed by GitHub
parent c724c354d7
commit 4a5f0c25ce
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 11 additions and 39 deletions

View File

@ -194,7 +194,6 @@ jobs:
runs-on: ubuntu-latest
needs: build
outputs:
disk_short_sha: ${{ steps.disk-short-sha.outputs.disk_short_sha }}
any_changed: ${{ steps.changed-files-specific.outputs.any_changed }}
steps:
- uses: actions/checkout@v2.4.0
@ -306,22 +305,6 @@ jobs:
--storage-location=us \
--description="Created from head branch ${{ env.GITHUB_HEAD_REF_SLUG_URL }} targeting ${{ env.GITHUB_BASE_REF_SLUG }} from PR ${{ env.GITHUB_REF_SLUG_URL }} with commit ${{ env.GITHUB_EVENT_PULL_REQUEST_HEAD_SHA }}"
- name: Output and write the disk SHORT_SHA to a txt
if: steps.sync-to-checkpoint.outcome == 'success'
id: disk-short-sha
run: |
short_sha=$(echo "${{ env.GITHUB_SHA_SHORT }}")
echo "$short_sha" > latest-disk-state-sha.txt
echo "::set-output name=disk_short_sha::$short_sha"
- name: Upload the disk state txt
if: steps.sync-to-checkpoint.outcome == 'success'
uses: actions/upload-artifact@v2.3.1
with:
name: latest-disk-state-sha
path: latest-disk-state-sha.txt
retention-days: 1095
- name: Delete test instance
# Do not delete the instance if the sync timeouts in GitHub
if: ${{ steps.sync-to-checkpoint.outcome == 'success' || steps.sync-to-checkpoint.outcome == 'failure' }}
@ -346,26 +329,6 @@ jobs:
run: |
echo LOWER_NET_NAME="${{ github.event.inputs.network || env.NETWORK }}" | awk '{print tolower($0)}' >> $GITHUB_ENV
# Get the latest uploaded txt with the disk SHORT_SHA from this workflow
- name: Download latest disk state SHORT_SHA
uses: dawidd6/action-download-artifact@v2.17.0
# Just search for the latest uploaded artifact if the previous disk regeneration job was skipped,
# otherwise use the output from ${{ needs.regenerate-stateful-disks.outputs.disk_short_sha }}
if: ${{ needs.regenerate-stateful-disks.outputs.any_changed != 'true' || github.event.inputs.regenerate-disks != 'true'}}
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
workflow: test.yml
workflow_conclusion: ''
name: latest-disk-state-sha
check_artifacts: true
- name: Get disk state SHA from txt
id: get-disk-sha
if: ${{ needs.regenerate-stateful-disks.outputs.any_changed != 'true' || github.event.inputs.regenerate-disks != 'true'}}
run: |
output=$(cat latest-disk-state-sha.txt)
echo "::set-output name=sha::$output"
# Setup gcloud CLI
- name: Authenticate to Google Cloud
id: auth
@ -373,6 +336,15 @@ jobs:
with:
credentials_json: ${{ secrets.GOOGLE_CREDENTIALS }}
- name: Get disk state name from gcloud
id: get-disk-name
if: ${{ needs.regenerate-stateful-disks.outputs.any_changed != 'true' || github.event.inputs.regenerate-disks != 'true'}}
run: |
output=$(gcloud compute images list --filter="zebrad-cache" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1)
echo "Disk: $output"
echo "Description: $(gcloud compute images describe $output --format='value(DESCRIPTION)')"
echo "::set-output name=sha::$output"
# Creates Compute Engine virtual machine instance w/ disks
- name: Create GCP compute instance
id: create-instance
@ -380,7 +352,7 @@ jobs:
gcloud compute instances create-with-container "sync-checkpoint-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \
--boot-disk-size 100GB \
--boot-disk-type pd-ssd \
--create-disk=image=zebrad-cache-${{ env.DISK_SHORT_SHA }}-${{ env.lower_net_name }}-canopy,name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${{ env.lower_net_name }}-canopy,size=100GB,type=pd-ssd \
--create-disk=image=${{ env.CACHED_DISK_NAME }},name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${{ env.lower_net_name }}-canopy,size=100GB,type=pd-ssd \
--container-mount-disk=mount-path='/zebrad-cache',name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${{ env.lower_net_name }}-canopy \
--container-image ${{ env.GAR_BASE }}/${{ env.IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \
--container-restart-policy=never \
@ -402,7 +374,7 @@ jobs:
--tags zebrad \
--zone "${{ env.ZONE }}"
env:
DISK_SHORT_SHA: ${{ needs.regenerate-stateful-disks.outputs.disk_short_sha || steps.get-disk-sha.outputs.sha }}
CACHED_DISK_NAME: ${{ steps.get-disk-name.outputs.sha }}
# TODO: this approach is very mesy, but getting the just created container name is very error prone and GCP doesn't have a workaround for this without requiring a TTY
# This TODO relates to the following issues: