fix(test): evaluate "if" conditions correctly and use last disk SHA (#3556)
* fix(test): use the short SHA from actual run if valid * fix(test): if condition must evaluate to a single false * fix(test): do not run logs and upload if not needed * imp(test): allow test stateful sync after disk regeneration This takes is fast enough, so it shouldn't do any harm if run just after a ~3 hours test Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
This commit is contained in:
parent
8c07d3906d
commit
2e61998182
|
@ -163,9 +163,11 @@ jobs:
|
|||
docker run -e ZEBRA_SKIP_IPV6_TESTS --name zebrad-tests -t ${{ env.GAR_BASE }}/${{ env.IMAGE_NAME }}:${{ env.GITHUB_EVENT_PULL_REQUEST_HEAD_SHA_SHORT || env.GITHUB_SHA_SHORT }} cargo test --locked --release --features enable-sentry --workspace -- -Zunstable-options --include-ignored
|
||||
|
||||
regenerate-stateful-disks:
|
||||
name: Renerate state disks
|
||||
name: Regenerate stateful disks
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
outputs:
|
||||
disk_short_sha: ${{ steps.disk-short-sha.outputs.disk_short_sha }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2.4.0
|
||||
with:
|
||||
|
@ -197,7 +199,7 @@ jobs:
|
|||
|
||||
- name: Create GCP compute instance
|
||||
id: create-instance
|
||||
if: ${{ steps.changed-files-specific.outputs.any_changed == 'true' }} || ${{ github.event.inputs.regenerate-disks == 'true' }}
|
||||
if: ${{ steps.changed-files-specific.outputs.any_changed == 'true' || github.event.inputs.regenerate-disks == 'true' }}
|
||||
run: |
|
||||
gcloud compute instances create-with-container "zebrad-tests-${{ env.GITHUB_HEAD_REF_SLUG_URL || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_EVENT_PULL_REQUEST_HEAD_SHA_SHORT || env.GITHUB_SHA_SHORT }}" \
|
||||
--boot-disk-size 100GB \
|
||||
|
@ -244,6 +246,7 @@ jobs:
|
|||
|
||||
- name: Regenerate stateful disks logs
|
||||
id: sync-to-checkpoint
|
||||
if: steps.create-instance.outcome == 'success'
|
||||
run: |
|
||||
gcloud compute ssh \
|
||||
zebrad-tests-${{ env.GITHUB_HEAD_REF_SLUG_URL || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_EVENT_PULL_REQUEST_HEAD_SHA_SHORT || env.GITHUB_SHA_SHORT }} \
|
||||
|
@ -267,13 +270,16 @@ jobs:
|
|||
--storage-location=us \
|
||||
--description="Created from head branch ${{ env.GITHUB_HEAD_REF_SLUG_URL }} targeting ${{ env.GITHUB_BASE_REF_SLUG }} from PR ${{ env.GITHUB_REF_SLUG_URL }} with commit ${{ env.GITHUB_EVENT_PULL_REQUEST_HEAD_SHA }}"
|
||||
|
||||
- name: Write the disk SHORT_SHA to a txt
|
||||
- name: Output and write the disk SHORT_SHA to a txt
|
||||
id: disk-short-sha
|
||||
if: steps.sync-to-checkpoint.outcome == 'success'
|
||||
run: |
|
||||
short_sha=$(echo "${{ env.GITHUB_EVENT_PULL_REQUEST_HEAD_SHA_SHORT || env.GITHUB_SHA_SHORT }}")
|
||||
echo "$short_sha" > latest-disk-state-sha.txt
|
||||
echo "::set-output name=disk_short_sha::$short_sha"
|
||||
|
||||
- name: Upload the disk state txt
|
||||
if: steps.sync-to-checkpoint.outcome == 'success'
|
||||
uses: actions/upload-artifact@v2.3.1
|
||||
with:
|
||||
name: latest-disk-state-sha
|
||||
|
@ -291,7 +297,6 @@ jobs:
|
|||
name: Test blocks sync
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ build, regenerate-stateful-disks]
|
||||
if: ${{ github.event.inputs.regenerate-disks != 'true' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2.4.0
|
||||
with:
|
||||
|
@ -310,6 +315,7 @@ jobs:
|
|||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
workflow: test.yml
|
||||
workflow_conclusion: ''
|
||||
name: latest-disk-state-sha
|
||||
check_artifacts: true
|
||||
|
||||
|
@ -355,7 +361,7 @@ jobs:
|
|||
--tags zebrad \
|
||||
--zone "${{ env.ZONE }}"
|
||||
env:
|
||||
DISK_SHORT_SHA: ${{ steps.get-disk-sha.outputs.sha }}
|
||||
DISK_SHORT_SHA: ${{ needs.regenerate-stateful-disks.outputs.disk_short_sha || steps.get-disk-sha.outputs.sha }}
|
||||
|
||||
# TODO: this approach is very mesy, but getting the just created container name is very error prone and GCP doesn't have a workaround for this without requiring a TTY
|
||||
# This TODO relates to the following issues:
|
||||
|
|
Loading…
Reference in New Issue