change(scan): Remove support for starting the scanner at `zebrad` startup (#8594)

* Remove code that starts the scanner

* Update CHANGELOG

* Disable tests of the scanner

* Disable scanning tests in CI

---------

Co-authored-by: Alfredo Garcia <oxarbitrage@gmail.com>
This commit is contained in:
Marek 2024-06-12 20:39:00 +02:00 committed by GitHub
parent 1b5f9426f9
commit 736f5f5415
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 238 additions and 259 deletions

View File

@ -9,7 +9,7 @@ on:
workflow_call:
inputs:
network:
default: 'Mainnet'
default: "Mainnet"
type: string
regenerate-disks:
default: false
@ -59,7 +59,7 @@ jobs:
if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }}
uses: ./.github/workflows/sub-find-cached-disks.yml
with:
network: 'Testnet'
network: "Testnet"
# zebrad cached checkpoint state tests
@ -72,14 +72,14 @@ jobs:
# Note: the output from get-available-disks should match with the caller workflow inputs
regenerate-stateful-disks:
name: Zebra checkpoint
needs: [ get-available-disks ]
needs: [get-available-disks]
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
if: ${{ !fromJSON(needs.get-available-disks.outputs.zebra_checkpoint_disk) || github.event.inputs.regenerate-disks == 'true' }}
with:
app_name: zebrad
test_id: sync-to-checkpoint
test_description: Test sync up to mandatory checkpoint
test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_DISK_REBUILD=1 -e ZEBRA_FORCE_USE_COLOR=1'
test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_DISK_REBUILD=1 -e ZEBRA_FORCE_USE_COLOR=1"
needs_zebra_state: false
saves_to_disk: true
force_save_to_disk: ${{ inputs.force_save_to_disk || false }}
@ -101,14 +101,14 @@ jobs:
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
test-stateful-sync:
name: Zebra checkpoint update
needs: [ regenerate-stateful-disks, get-available-disks ]
needs: [regenerate-stateful-disks, get-available-disks]
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_checkpoint_disk) || needs.regenerate-stateful-disks.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
with:
app_name: zebrad
test_id: sync-past-checkpoint
test_description: Test full validation sync from a cached state
test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_CHECKPOINT_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1'
test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_CHECKPOINT_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1"
needs_zebra_state: true
saves_to_disk: false
disk_suffix: checkpoint
@ -129,7 +129,7 @@ jobs:
# Note: the output from get-available-disks should match with the caller workflow inputs
test-full-sync:
name: Zebra tip
needs: [ get-available-disks ]
needs: [get-available-disks]
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
if: ${{ github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet') }}
with:
@ -138,7 +138,7 @@ jobs:
test_description: Test a full sync up to the tip
# The value of FULL_SYNC_MAINNET_TIMEOUT_MINUTES is currently ignored.
# TODO: update the test to use {{ input.network }} instead?
test_variables: '-e NETWORK=Mainnet -e FULL_SYNC_MAINNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1'
test_variables: "-e NETWORK=Mainnet -e FULL_SYNC_MAINNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1"
# This test runs for longer than 6 hours, so it needs multiple jobs
is_long_test: true
needs_zebra_state: false
@ -172,21 +172,21 @@ jobs:
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
test-update-sync:
name: Zebra tip update
needs: [ test-full-sync, get-available-disks ]
needs: [test-full-sync, get-available-disks]
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
with:
app_name: zebrad
test_id: update-to-tip
test_description: Test syncing to tip with a Zebra tip state
test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_UPDATE_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache'
test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_UPDATE_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache"
needs_zebra_state: true
# update the disk on every PR, to increase CI speed
saves_to_disk: true
force_save_to_disk: ${{ inputs.force_save_to_disk || false }}
disk_suffix: tip
root_state_path: '/var/cache'
zebra_state_dir: 'zebrad-cache'
root_state_path: "/var/cache"
zebra_state_dir: "zebrad-cache"
height_grep_text: 'current_height.*=.*Height.*\('
secrets: inherit
@ -205,7 +205,7 @@ jobs:
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
checkpoints-mainnet:
name: Generate checkpoints mainnet
needs: [ test-full-sync, get-available-disks ]
needs: [test-full-sync, get-available-disks]
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
with:
@ -213,13 +213,13 @@ jobs:
test_id: checkpoints-mainnet
test_description: Generate Zebra checkpoints on mainnet
# TODO: update the test to use {{ input.network }} instead?
test_variables: '-e NETWORK=Mainnet -e GENERATE_CHECKPOINTS_MAINNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache'
test_variables: "-e NETWORK=Mainnet -e GENERATE_CHECKPOINTS_MAINNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache"
needs_zebra_state: true
# test-update-sync updates the disk on every PR, so we don't need to do it here
saves_to_disk: false
disk_suffix: tip
root_state_path: '/var/cache'
zebra_state_dir: 'zebrad-cache'
root_state_path: "/var/cache"
zebra_state_dir: "zebrad-cache"
height_grep_text: 'current_height.*=.*Height.*\('
secrets: inherit
@ -240,7 +240,7 @@ jobs:
# Note: the output from get-available-disks-testnet should match with the caller workflow inputs
test-full-sync-testnet:
name: Zebra tip on testnet
needs: [ get-available-disks-testnet ]
needs: [get-available-disks-testnet]
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
if: ${{ (github.event_name == 'schedule' && vars.SCHEDULE_TESTNET_FULL_SYNC == 'true') || !fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && (inputs.network || vars.ZCASH_NETWORK) == 'Testnet') }}
with:
@ -248,7 +248,7 @@ jobs:
test_id: full-sync-testnet
test_description: Test a full sync up to the tip on testnet
# The value of FULL_SYNC_TESTNET_TIMEOUT_MINUTES is currently ignored.
test_variables: '-e NETWORK=Testnet -e FULL_SYNC_TESTNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1'
test_variables: "-e NETWORK=Testnet -e FULL_SYNC_TESTNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1"
network: "Testnet"
# A full testnet sync could take 2-10 hours in April 2023.
# The time varies a lot due to the small number of nodes.
@ -286,14 +286,14 @@ jobs:
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
checkpoints-testnet:
name: Generate checkpoints testnet
needs: [ test-full-sync-testnet, get-available-disks-testnet ]
needs: [test-full-sync-testnet, get-available-disks-testnet]
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || needs.test-full-sync-testnet.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
with:
app_name: zebrad
test_id: checkpoints-testnet
test_description: Generate Zebra checkpoints on testnet
test_variables: '-e NETWORK=Testnet -e GENERATE_CHECKPOINTS_TESTNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache'
test_variables: "-e NETWORK=Testnet -e GENERATE_CHECKPOINTS_TESTNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache"
network: "Testnet"
needs_zebra_state: true
# update the disk on every PR, to increase CI speed
@ -301,8 +301,8 @@ jobs:
saves_to_disk: true
force_save_to_disk: ${{ inputs.force_save_to_disk || false }}
disk_suffix: tip
root_state_path: '/var/cache'
zebra_state_dir: 'zebrad-cache'
root_state_path: "/var/cache"
zebra_state_dir: "zebrad-cache"
height_grep_text: 'zebra_tip_height.*=.*Height.*\('
secrets: inherit
@ -319,7 +319,7 @@ jobs:
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
lightwalletd-full-sync:
name: lightwalletd tip
needs: [ test-full-sync, get-available-disks ]
needs: [test-full-sync, get-available-disks]
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
# Currently the lightwalletd tests only work on Mainnet
if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && (github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || github.event.inputs.run-lwd-sync == 'true' ) }}
@ -327,7 +327,7 @@ jobs:
app_name: lightwalletd
test_id: lwd-full-sync
test_description: Test lightwalletd full sync
test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_FULL_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache'
test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_FULL_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache"
# This test runs for longer than 6 hours, so it needs multiple jobs
is_long_test: true
needs_zebra_state: true
@ -336,10 +336,10 @@ jobs:
force_save_to_disk: ${{ inputs.force_save_to_disk || false }}
disk_prefix: lwd-cache
disk_suffix: tip
root_state_path: '/var/cache'
zebra_state_dir: 'zebrad-cache'
lwd_state_dir: 'lwd-cache'
height_grep_text: 'Waiting for block: '
root_state_path: "/var/cache"
zebra_state_dir: "zebrad-cache"
lwd_state_dir: "lwd-cache"
height_grep_text: "Waiting for block: "
secrets: inherit
# We want to prevent multiple lightwalletd full syncs running at the same time,
# but we don't want to cancel running syncs on `main` if a new PR gets merged,
@ -359,24 +359,24 @@ jobs:
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
lightwalletd-update-sync:
name: lightwalletd tip update
needs: [ lightwalletd-full-sync, get-available-disks ]
needs: [lightwalletd-full-sync, get-available-disks]
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
with:
app_name: lightwalletd
test_id: lwd-update-sync
test_description: Test lightwalletd update sync with both states
test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_UPDATE_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache'
test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_UPDATE_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache"
needs_zebra_state: true
needs_lwd_state: true
saves_to_disk: true
force_save_to_disk: ${{ inputs.force_save_to_disk || false }}
disk_prefix: lwd-cache
disk_suffix: tip
root_state_path: '/var/cache'
zebra_state_dir: 'zebrad-cache'
lwd_state_dir: 'lwd-cache'
height_grep_text: 'Waiting for block: '
root_state_path: "/var/cache"
zebra_state_dir: "zebrad-cache"
lwd_state_dir: "lwd-cache"
height_grep_text: "Waiting for block: "
secrets: inherit
# Test that Zebra can answer a synthetic RPC call, using a cached Zebra tip state
@ -391,19 +391,19 @@ jobs:
# TODO: move this job below the rest of the mainnet jobs that just use Zebra cached state
lightwalletd-rpc-test:
name: Zebra tip JSON-RPC
needs: [ test-full-sync, get-available-disks ]
needs: [test-full-sync, get-available-disks]
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
with:
app_name: lightwalletd
test_id: fully-synced-rpc
test_description: Test lightwalletd RPC with a Zebra tip state
test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_RPC_CALL=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache'
test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_RPC_CALL=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache"
needs_zebra_state: true
saves_to_disk: false
disk_suffix: tip
root_state_path: '/var/cache'
zebra_state_dir: 'zebrad-cache'
root_state_path: "/var/cache"
zebra_state_dir: "zebrad-cache"
secrets: inherit
# Test that Zebra can handle a lightwalletd send transaction RPC call, using a cached Zebra tip state
@ -416,21 +416,21 @@ jobs:
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
lightwalletd-transactions-test:
name: lightwalletd tip send
needs: [ lightwalletd-full-sync, get-available-disks ]
needs: [lightwalletd-full-sync, get-available-disks]
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
with:
app_name: lightwalletd
test_id: lwd-send-transactions
test_description: Test sending transactions via lightwalletd
test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_TRANSACTIONS=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache'
test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_TRANSACTIONS=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache"
needs_zebra_state: true
needs_lwd_state: true
saves_to_disk: false
disk_suffix: tip
root_state_path: '/var/cache'
zebra_state_dir: 'zebrad-cache'
lwd_state_dir: 'lwd-cache'
root_state_path: "/var/cache"
zebra_state_dir: "zebrad-cache"
lwd_state_dir: "lwd-cache"
secrets: inherit
# Test that Zebra can handle gRPC wallet calls, using a cached Zebra tip state
@ -443,21 +443,21 @@ jobs:
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
lightwalletd-grpc-test:
name: lightwalletd GRPC tests
needs: [ lightwalletd-full-sync, get-available-disks ]
needs: [lightwalletd-full-sync, get-available-disks]
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
with:
app_name: lightwalletd
test_id: lwd-grpc-wallet
test_description: Test gRPC calls via lightwalletd
test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_GRPC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache'
test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_GRPC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache"
needs_zebra_state: true
needs_lwd_state: true
saves_to_disk: false
disk_suffix: tip
root_state_path: '/var/cache'
zebra_state_dir: 'zebrad-cache'
lwd_state_dir: 'lwd-cache'
root_state_path: "/var/cache"
zebra_state_dir: "zebrad-cache"
lwd_state_dir: "lwd-cache"
secrets: inherit
## getblocktemplate-rpcs using cached Zebra state on mainnet
@ -474,20 +474,20 @@ jobs:
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
get-block-template-test:
name: get block template
needs: [ test-full-sync, get-available-disks ]
needs: [test-full-sync, get-available-disks]
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
with:
app_name: zebrad
test_id: get-block-template
test_description: Test getblocktemplate RPC method via Zebra's rpc server
test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_GET_BLOCK_TEMPLATE=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache'
test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_GET_BLOCK_TEMPLATE=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache"
needs_zebra_state: true
needs_lwd_state: false
saves_to_disk: false
disk_suffix: tip
root_state_path: '/var/cache'
zebra_state_dir: 'zebrad-cache'
root_state_path: "/var/cache"
zebra_state_dir: "zebrad-cache"
secrets: inherit
# Test that Zebra can handle a submit block RPC call, using a cached Zebra tip state
@ -516,31 +516,32 @@ jobs:
zebra_state_dir: "zebrad-cache"
secrets: inherit
# Test that the scanner can continue scanning where it was left when zebrad restarts.
#
# Runs:
# - after every PR is merged to `main`
# - on every PR update
#
# If the state version has changed, waits for the new cached states to be created.
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
scan-start-where-left-test:
name: Scan starts where left
needs: [test-full-sync, get-available-disks]
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
with:
app_name: zebrad
test_id: scan-start-where-left
test_description: Test that the scanner can continue scanning where it was left when zebrad restarts.
test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SCAN_START_WHERE_LEFT=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache"
needs_zebra_state: true
needs_lwd_state: false
saves_to_disk: true
disk_suffix: tip
root_state_path: "/var/cache"
zebra_state_dir: "zebrad-cache"
secrets: inherit
# TODO: Move this test once we have the new scanner binary.
# # Test that the scanner can continue scanning where it was left when zebrad restarts.
# #
# # Runs:
# # - after every PR is merged to `main`
# # - on every PR update
# #
# # If the state version has changed, waits for the new cached states to be created.
# # Otherwise, if the state rebuild was skipped, runs immediately after the build job.
# scan-start-where-left-test:
# name: Scan starts where left
# needs: [test-full-sync, get-available-disks]
# uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
# if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
# with:
# app_name: zebrad
# test_id: scan-start-where-left
# test_description: Test that the scanner can continue scanning where it was left when zebrad restarts.
# test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SCAN_START_WHERE_LEFT=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache"
# needs_zebra_state: true
# needs_lwd_state: false
# saves_to_disk: true
# disk_suffix: tip
# root_state_path: "/var/cache"
# zebra_state_dir: "zebrad-cache"
# secrets: inherit
# Test that the scan task registers keys, deletes keys, and subscribes to results for keys while running.
#
@ -589,7 +590,7 @@ jobs:
get-block-template-test,
submit-block-test,
scan-start-where-left-test,
scan-task-commands-test
scan-task-commands-test,
]
# Only open tickets for failed scheduled jobs, manual workflow runs, or `main` branch merges.
# (PR statuses are already reported in the PR jobs list, and checked by Mergify.)

View File

@ -9,7 +9,11 @@ and this project adheres to [Semantic Versioning](https://semver.org).
### Changed
- We realized that a longer than `zcashd` end of support could be problematic in some cases so we reverted back from 20 to 16 weeks ([#8530](https://github.com/ZcashFoundation/zebra/pull/8530))
- We realized that a longer than `zcashd` end of support could be problematic in
some cases so we reverted back from 20 to 16 weeks
([#8530](https://github.com/ZcashFoundation/zebra/pull/8530))
- The `zebrad` binary no longer contains the scanner of shielded transactions.
This means `zebrad` no longer contains users' viewing keys.
- We're no longer using general conditional compilation attributes for `tor`,
but only feature flags instead.

View File

@ -45,11 +45,6 @@
//! * Progress Task
//! * logs progress towards the chain tip
//!
//! Shielded Scanning:
//! * Shielded Scanner Task
//! * if the user has configured Zebra with their shielded viewing keys, scans new and existing
//! blocks for transactions that use those keys
//!
//! Block Mining:
//! * Internal Miner Task
//! * if the user has configured Zebra to mine blocks, spawns tasks to generate new blocks,
@ -339,24 +334,6 @@ impl StartCmd {
tokio::spawn(syncer.sync().in_current_span())
};
#[cfg(feature = "shielded-scan")]
// Spawn never ending scan task only if we have keys to scan for.
let scan_task_handle = {
// TODO: log the number of keys and update the scan_task_starts() test
info!("spawning shielded scanner with configured viewing keys");
zebra_scan::spawn_init(
config.shielded_scan.clone(),
config.network.network.clone(),
state,
chain_tip_change,
)
};
#[cfg(not(feature = "shielded-scan"))]
// Spawn a dummy scan task which doesn't do anything and never finishes.
let scan_task_handle: tokio::task::JoinHandle<Result<(), Report>> =
tokio::spawn(std::future::pending().in_current_span());
// And finally, spawn the internal Zcash miner, if it is enabled.
//
// TODO: add a config to enable the miner rather than a feature.
@ -398,7 +375,6 @@ impl StartCmd {
pin!(tx_gossip_task_handle);
pin!(progress_task_handle);
pin!(end_of_support_task_handle);
pin!(scan_task_handle);
pin!(miner_task_handle);
// startup tasks
@ -487,10 +463,6 @@ impl StartCmd {
Ok(())
}
scan_result = &mut scan_task_handle => scan_result
.expect("unexpected panic in the scan task")
.map(|_| info!("scan task exited")),
miner_result = &mut miner_task_handle => miner_result
.expect("unexpected panic in the miner task")
.map(|_| info!("miner task exited")),
@ -519,7 +491,6 @@ impl StartCmd {
tx_gossip_task_handle.abort();
progress_task_handle.abort();
end_of_support_task_handle.abort();
scan_task_handle.abort();
miner_task_handle.abort();
// startup tasks

View File

@ -2903,195 +2903,198 @@ async fn fully_synced_rpc_z_getsubtreesbyindex_snapshot_test() -> Result<()> {
Ok(())
}
/// Test that the scanner task gets started when the node starts.
#[test]
#[cfg(feature = "shielded-scan")]
fn scan_task_starts() -> Result<()> {
use indexmap::IndexMap;
use zebra_scan::tests::ZECPAGES_SAPLING_VIEWING_KEY;
// TODO: Move this test once we have the new scanner binary.
// /// Test that the scanner task gets started when the node starts.
// #[test]
// #[cfg(feature = "shielded-scan")]
// fn scan_task_starts() -> Result<()> {
// use indexmap::IndexMap;
// use zebra_scan::tests::ZECPAGES_SAPLING_VIEWING_KEY;
let _init_guard = zebra_test::init();
// let _init_guard = zebra_test::init();
let test_type = TestType::LaunchWithEmptyState {
launches_lightwalletd: false,
};
let mut config = default_test_config(&Mainnet)?;
let mut keys = IndexMap::new();
keys.insert(ZECPAGES_SAPLING_VIEWING_KEY.to_string(), 1);
config.shielded_scan.sapling_keys_to_scan = keys;
// let test_type = TestType::LaunchWithEmptyState {
// launches_lightwalletd: false,
// };
// let mut config = default_test_config(&Mainnet)?;
// let mut keys = IndexMap::new();
// keys.insert(ZECPAGES_SAPLING_VIEWING_KEY.to_string(), 1);
// config.shielded_scan.sapling_keys_to_scan = keys;
// Start zebra with the config.
let mut zebrad = testdir()?
.with_exact_config(&config)?
.spawn_child(args!["start"])?
.with_timeout(test_type.zebrad_timeout());
// // Start zebra with the config.
// let mut zebrad = testdir()?
// .with_exact_config(&config)?
// .spawn_child(args!["start"])?
// .with_timeout(test_type.zebrad_timeout());
// Check scanner was started.
zebrad.expect_stdout_line_matches("loaded Zebra scanner cache")?;
// // Check scanner was started.
// zebrad.expect_stdout_line_matches("loaded Zebra scanner cache")?;
// Look for 2 scanner notices indicating we are below sapling activation.
zebrad.expect_stdout_line_matches("scanner is waiting for Sapling activation. Current tip: [0-9]{1,4}, Sapling activation: 419200")?;
zebrad.expect_stdout_line_matches("scanner is waiting for Sapling activation. Current tip: [0-9]{1,4}, Sapling activation: 419200")?;
// // Look for 2 scanner notices indicating we are below sapling activation.
// zebrad.expect_stdout_line_matches("scanner is waiting for Sapling activation. Current tip: [0-9]{1,4}, Sapling activation: 419200")?;
// zebrad.expect_stdout_line_matches("scanner is waiting for Sapling activation. Current tip: [0-9]{1,4}, Sapling activation: 419200")?;
// Kill the node.
zebrad.kill(false)?;
// // Kill the node.
// zebrad.kill(false)?;
// Check that scan task started and the first scanning is done.
let output = zebrad.wait_with_output()?;
// // Check that scan task started and the first scanning is done.
// let output = zebrad.wait_with_output()?;
// Make sure the command was killed
output.assert_was_killed()?;
output.assert_failure()?;
// // Make sure the command was killed
// output.assert_was_killed()?;
// output.assert_failure()?;
Ok(())
}
// Ok(())
// }
/// Test that the scanner gRPC server starts when the node starts.
#[tokio::test]
#[cfg(all(feature = "shielded-scan", not(target_os = "windows")))]
async fn scan_rpc_server_starts() -> Result<()> {
use zebra_grpc::scanner::{scanner_client::ScannerClient, Empty};
// TODO: Move this test once we have the new scanner binary.
// /// Test that the scanner gRPC server starts when the node starts.
// #[tokio::test]
// #[cfg(all(feature = "shielded-scan", not(target_os = "windows")))]
// async fn scan_rpc_server_starts() -> Result<()> {
// use zebra_grpc::scanner::{scanner_client::ScannerClient, Empty};
let _init_guard = zebra_test::init();
// let _init_guard = zebra_test::init();
let test_type = TestType::LaunchWithEmptyState {
launches_lightwalletd: false,
};
// let test_type = TestType::LaunchWithEmptyState {
// launches_lightwalletd: false,
// };
let port = random_known_port();
let listen_addr = format!("127.0.0.1:{port}");
let mut config = default_test_config(&Mainnet)?;
config.shielded_scan.listen_addr = Some(listen_addr.parse()?);
// let port = random_known_port();
// let listen_addr = format!("127.0.0.1:{port}");
// let mut config = default_test_config(&Mainnet)?;
// config.shielded_scan.listen_addr = Some(listen_addr.parse()?);
// Start zebra with the config.
let mut zebrad = testdir()?
.with_exact_config(&config)?
.spawn_child(args!["start"])?
.with_timeout(test_type.zebrad_timeout());
// // Start zebra with the config.
// let mut zebrad = testdir()?
// .with_exact_config(&config)?
// .spawn_child(args!["start"])?
// .with_timeout(test_type.zebrad_timeout());
// Wait until gRPC server is starting.
tokio::time::sleep(LAUNCH_DELAY).await;
zebrad.expect_stdout_line_matches("starting scan gRPC server")?;
tokio::time::sleep(Duration::from_secs(1)).await;
// // Wait until gRPC server is starting.
// tokio::time::sleep(LAUNCH_DELAY).await;
// zebrad.expect_stdout_line_matches("starting scan gRPC server")?;
// tokio::time::sleep(Duration::from_secs(1)).await;
let mut client = ScannerClient::connect(format!("http://{listen_addr}")).await?;
// let mut client = ScannerClient::connect(format!("http://{listen_addr}")).await?;
let request = tonic::Request::new(Empty {});
// let request = tonic::Request::new(Empty {});
client.get_info(request).await?;
// client.get_info(request).await?;
// Kill the node.
zebrad.kill(false)?;
// // Kill the node.
// zebrad.kill(false)?;
// Check that scan task started and the first scanning is done.
let output = zebrad.wait_with_output()?;
// // Check that scan task started and the first scanning is done.
// let output = zebrad.wait_with_output()?;
// Make sure the command was killed
output.assert_was_killed()?;
output.assert_failure()?;
// // Make sure the command was killed
// output.assert_was_killed()?;
// output.assert_failure()?;
Ok(())
}
// Ok(())
// }
/// Test that the scanner can continue scanning where it was left when zebrad restarts.
///
/// Needs a cache state close to the tip. A possible way to run it locally is:
///
/// export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/state"
/// cargo test scan_start_where_left --features="shielded-scan" -- --ignored --nocapture
///
/// The test will run zebrad with a key to scan, scan the first few blocks after sapling and then stops.
/// Then it will restart zebrad and check that it resumes scanning where it was left.
///
/// Note: This test will remove all the contents you may have in the ZEBRA_CACHED_STATE_DIR/private-scan directory
/// so it can start with an empty scanning state.
#[ignore]
#[test]
#[cfg(feature = "shielded-scan")]
fn scan_start_where_left() -> Result<()> {
use indexmap::IndexMap;
use zebra_scan::{storage::db::SCANNER_DATABASE_KIND, tests::ZECPAGES_SAPLING_VIEWING_KEY};
// TODO: Move this test once we have the new scanner binary.
// /// Test that the scanner can continue scanning where it was left when zebrad restarts.
// ///
// /// Needs a cache state close to the tip. A possible way to run it locally is:
// ///
// /// export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/state"
// /// cargo test scan_start_where_left --features="shielded-scan" -- --ignored --nocapture
// ///
// /// The test will run zebrad with a key to scan, scan the first few blocks after sapling and then stops.
// /// Then it will restart zebrad and check that it resumes scanning where it was left.
// ///
// /// Note: This test will remove all the contents you may have in the ZEBRA_CACHED_STATE_DIR/private-scan directory
// /// so it can start with an empty scanning state.
// #[ignore]
// #[test]
// #[cfg(feature = "shielded-scan")]
// fn scan_start_where_left() -> Result<()> {
// use indexmap::IndexMap;
// use zebra_scan::{storage::db::SCANNER_DATABASE_KIND, tests::ZECPAGES_SAPLING_VIEWING_KEY};
let _init_guard = zebra_test::init();
// let _init_guard = zebra_test::init();
// use `UpdateZebraCachedStateNoRpc` as the test type to make sure a zebrad cache state is available.
let test_type = TestType::UpdateZebraCachedStateNoRpc;
if let Some(cache_dir) = test_type.zebrad_state_path("scan test") {
// Add a key to the config
let mut config = default_test_config(&Mainnet)?;
let mut keys = IndexMap::new();
keys.insert(ZECPAGES_SAPLING_VIEWING_KEY.to_string(), 1);
config.shielded_scan.sapling_keys_to_scan = keys;
// // use `UpdateZebraCachedStateNoRpc` as the test type to make sure a zebrad cache state is available.
// let test_type = TestType::UpdateZebraCachedStateNoRpc;
// if let Some(cache_dir) = test_type.zebrad_state_path("scan test") {
// // Add a key to the config
// let mut config = default_test_config(&Mainnet)?;
// let mut keys = IndexMap::new();
// keys.insert(ZECPAGES_SAPLING_VIEWING_KEY.to_string(), 1);
// config.shielded_scan.sapling_keys_to_scan = keys;
// Add the cache dir to shielded scan, make it the same as the zebrad cache state.
config
.shielded_scan
.db_config_mut()
.cache_dir
.clone_from(&cache_dir);
config.shielded_scan.db_config_mut().ephemeral = false;
// // Add the cache dir to shielded scan, make it the same as the zebrad cache state.
// config
// .shielded_scan
// .db_config_mut()
// .cache_dir
// .clone_from(&cache_dir);
// config.shielded_scan.db_config_mut().ephemeral = false;
// Add the cache dir to state.
config.state.cache_dir.clone_from(&cache_dir);
config.state.ephemeral = false;
// // Add the cache dir to state.
// config.state.cache_dir.clone_from(&cache_dir);
// config.state.ephemeral = false;
// Remove the scan directory before starting.
let scan_db_path = cache_dir.join(SCANNER_DATABASE_KIND);
fs::remove_dir_all(std::path::Path::new(&scan_db_path)).ok();
// // Remove the scan directory before starting.
// let scan_db_path = cache_dir.join(SCANNER_DATABASE_KIND);
// fs::remove_dir_all(std::path::Path::new(&scan_db_path)).ok();
// Start zebra with the config.
let mut zebrad = testdir()?
.with_exact_config(&config)?
.spawn_child(args!["start"])?
.with_timeout(test_type.zebrad_timeout());
// // Start zebra with the config.
// let mut zebrad = testdir()?
// .with_exact_config(&config)?
// .spawn_child(args!["start"])?
// .with_timeout(test_type.zebrad_timeout());
// Check scanner was started.
zebrad.expect_stdout_line_matches("loaded Zebra scanner cache")?;
// // Check scanner was started.
// zebrad.expect_stdout_line_matches("loaded Zebra scanner cache")?;
// The first time
zebrad.expect_stdout_line_matches(
r"Scanning the blockchain for key 0, started at block 419200, now at block 420000",
)?;
// // The first time
// zebrad.expect_stdout_line_matches(
// r"Scanning the blockchain for key 0, started at block 419200, now at block 420000",
// )?;
// Make sure scanner scans a few blocks.
zebrad.expect_stdout_line_matches(
r"Scanning the blockchain for key 0, started at block 419200, now at block 430000",
)?;
zebrad.expect_stdout_line_matches(
r"Scanning the blockchain for key 0, started at block 419200, now at block 440000",
)?;
// // Make sure scanner scans a few blocks.
// zebrad.expect_stdout_line_matches(
// r"Scanning the blockchain for key 0, started at block 419200, now at block 430000",
// )?;
// zebrad.expect_stdout_line_matches(
// r"Scanning the blockchain for key 0, started at block 419200, now at block 440000",
// )?;
// Kill the node.
zebrad.kill(false)?;
let output = zebrad.wait_with_output()?;
// // Kill the node.
// zebrad.kill(false)?;
// let output = zebrad.wait_with_output()?;
// Make sure the command was killed
output.assert_was_killed()?;
output.assert_failure()?;
// // Make sure the command was killed
// output.assert_was_killed()?;
// output.assert_failure()?;
// Start the node again.
let mut zebrad = testdir()?
.with_exact_config(&config)?
.spawn_child(args!["start"])?
.with_timeout(test_type.zebrad_timeout());
// // Start the node again.
// let mut zebrad = testdir()?
// .with_exact_config(&config)?
// .spawn_child(args!["start"])?
// .with_timeout(test_type.zebrad_timeout());
// Resuming message.
zebrad.expect_stdout_line_matches(
"Last scanned height for key number 0 is 439000, resuming at 439001",
)?;
zebrad.expect_stdout_line_matches("loaded Zebra scanner cache")?;
// // Resuming message.
// zebrad.expect_stdout_line_matches(
// "Last scanned height for key number 0 is 439000, resuming at 439001",
// )?;
// zebrad.expect_stdout_line_matches("loaded Zebra scanner cache")?;
// Start scanning where it was left.
zebrad.expect_stdout_line_matches(
r"Scanning the blockchain for key 0, started at block 439001, now at block 440000",
)?;
zebrad.expect_stdout_line_matches(
r"Scanning the blockchain for key 0, started at block 439001, now at block 450000",
)?;
}
// // Start scanning where it was left.
// zebrad.expect_stdout_line_matches(
// r"Scanning the blockchain for key 0, started at block 439001, now at block 440000",
// )?;
// zebrad.expect_stdout_line_matches(
// r"Scanning the blockchain for key 0, started at block 439001, now at block 450000",
// )?;
// }
Ok(())
}
// Ok(())
// }
// TODO: Add this test to CI (#8236)
/// Tests successful: