2020-07-31 23:15:26 -07:00
//! Acceptance test: runs zebrad as a subprocess and asserts its
2019-08-29 14:46:54 -07:00
//! output for given argument combinations matches what is expected.
2020-09-07 17:05:23 -07:00
//!
2021-06-28 18:20:32 -07:00
//! ## Note on port conflict
2021-01-29 04:36:33 -08:00
//!
2020-09-07 17:05:23 -07:00
//! If the test child has a cache or port conflict with another test, or a
//! running zebrad or zcashd, then it will panic. But the acceptance tests
//! expect it to run until it is killed.
//!
//! If these conflicts cause test failures:
//! - run the tests in an isolated environment,
//! - run zebrad on a custom cache path and port,
//! - run zcashd on a custom port.
2021-06-28 18:20:32 -07:00
//!
//! ## Failures due to Configured Network Interfaces or Network Connectivity
//!
//! If your test environment does not have any IPv6 interfaces configured, skip IPv6 tests
//! by setting the `ZEBRA_SKIP_IPV6_TESTS` environmental variable.
//!
//! If it does not have any IPv4 interfaces, IPv4 localhost is not on `127.0.0.1`,
//! or you have poor network connectivity,
//! skip all the network tests by setting the `ZEBRA_SKIP_NETWORK_TESTS` environmental variable.
2022-06-15 01:05:02 -07:00
//!
//! ## Large/full sync tests
//!
//! This file has sync tests that are marked as ignored because they take too much time to run.
//! Some of them require environment variables or directories to be present:
//!
//! - `FULL_SYNC_MAINNET_TIMEOUT_MINUTES` env variable: The total number of minutes we
//! will allow this test to run or give up. Value for the Mainnet full sync tests.
//! - `FULL_SYNC_TESTNET_TIMEOUT_MINUTES` env variable: The total number of minutes we
//! will allow this test to run or give up. Value for the Testnet ful sync tests.
//! - `/zebrad-cache` directory: For some sync tests, this needs to be created in
//! the file system, the created directory should have write permissions.
//!
//! Here are some examples on how to run each of the tests:
//!
//! ```console
//! $ cargo test sync_large_checkpoints_mainnet -- --ignored --nocapture
//!
//! $ cargo test sync_large_checkpoints_mempool_mainnet -- --ignored --nocapture
//!
//! $ sudo mkdir /zebrad-cache
//! $ sudo chmod 777 /zebrad-cache
//! $ export FULL_SYNC_MAINNET_TIMEOUT_MINUTES=600
//! $ cargo test full_sync_mainnet -- --ignored --nocapture
//!
//! $ sudo mkdir /zebrad-cache
//! $ sudo chmod 777 /zebrad-cache
//! $ export FULL_SYNC_TESTNET_TIMEOUT_MINUTES=600
//! $ cargo test full_sync_testnet -- --ignored --nocapture
//! ```
//!
//! Please refer to the documentation of each test for more information.
//!
//! ## Lightwalletd tests
//!
//! The lightwalletd software is an interface service that uses zebrad or zcashd RPC methods to serve wallets or other applications with blockchain content in an efficient manner.
//! There are several versions of lightwalled in the form of different forks. The original
2023-08-30 15:30:20 -07:00
//! repo is <https://github.com/zcash/lightwalletd>, zecwallet Lite uses a custom fork: <https://github.com/adityapk00/lightwalletd>.
//! Initially this tests were made with `adityapk00/lightwalletd` fork but changes for fast spendability support had
//! been made to `zcash/lightwalletd` only.
//!
//! We expect `adityapk00/lightwalletd` to remain working with Zebra but for this tests we are using `zcash/lightwalletd`.
2022-06-15 01:05:02 -07:00
//!
//! Zebra lightwalletd tests are not all marked as ignored but none will run unless
//! at least the `ZEBRA_TEST_LIGHTWALLETD` environment variable is present:
//!
//! - `ZEBRA_TEST_LIGHTWALLETD` env variable: Needs to be present to run any of the lightwalletd tests.
//! - `ZEBRA_CACHED_STATE_DIR` env var: The path to a zebra blockchain database.
//! - `LIGHTWALLETD_DATA_DIR` env variable. The path to a lightwalletd database.
//! - `--features lightwalletd-grpc-tests` cargo flag. The flag given to cargo to build the source code of the running test.
//!
//! Here are some examples of running each test:
//!
//! ```console
//! $ export ZEBRA_TEST_LIGHTWALLETD=true
//! $ cargo test lightwalletd_integration -- --nocapture
//!
//! $ export ZEBRA_TEST_LIGHTWALLETD=true
2023-04-26 21:39:43 -07:00
//! $ export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/state"
2022-06-15 01:05:02 -07:00
//! $ export LIGHTWALLETD_DATA_DIR="/path/to/lightwalletd/database"
//! $ cargo test lightwalletd_update_sync -- --nocapture
//!
//! $ export ZEBRA_TEST_LIGHTWALLETD=true
2023-04-26 21:39:43 -07:00
//! $ export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/state"
2022-06-15 01:05:02 -07:00
//! $ cargo test lightwalletd_full_sync -- --ignored --nocapture
//!
//! $ export ZEBRA_TEST_LIGHTWALLETD=true
//! $ cargo test lightwalletd_test_suite -- --ignored --nocapture
//!
//! $ export ZEBRA_TEST_LIGHTWALLETD=true
2023-04-26 21:39:43 -07:00
//! $ export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/state"
2022-06-15 01:05:02 -07:00
//! $ cargo test fully_synced_rpc_test -- --ignored --nocapture
//!
//! $ export ZEBRA_TEST_LIGHTWALLETD=true
2023-04-26 21:39:43 -07:00
//! $ export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/state"
2022-06-15 01:05:02 -07:00
//! $ export LIGHTWALLETD_DATA_DIR="/path/to/lightwalletd/database"
//! $ cargo test sending_transactions_using_lightwalletd --features lightwalletd-grpc-tests -- --ignored --nocapture
//!
//! $ export ZEBRA_TEST_LIGHTWALLETD=true
2023-04-26 21:39:43 -07:00
//! $ export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/state"
2022-06-15 01:05:02 -07:00
//! $ export LIGHTWALLETD_DATA_DIR="/path/to/lightwalletd/database"
//! $ cargo test lightwalletd_wallet_grpc_tests --features lightwalletd-grpc-tests -- --ignored --nocapture
//! ```
//!
2022-11-03 20:57:08 -07:00
//! ## Getblocktemplate tests
//!
2022-12-01 11:39:01 -08:00
//! Example of how to run the get_block_template test:
//!
//! ```console
2023-04-26 21:39:43 -07:00
//! ZEBRA_CACHED_STATE_DIR=/path/to/zebra/state cargo test get_block_template --features getblocktemplate-rpcs --release -- --ignored --nocapture
2022-12-01 11:39:01 -08:00
//! ```
//!
2022-11-03 20:57:08 -07:00
//! Example of how to run the submit_block test:
//!
//! ```console
2023-04-26 21:39:43 -07:00
//! ZEBRA_CACHED_STATE_DIR=/path/to/zebra/state cargo test submit_block --features getblocktemplate-rpcs --release -- --ignored --nocapture
2022-11-03 20:57:08 -07:00
//! ```
//!
2022-06-15 01:05:02 -07:00
//! Please refer to the documentation of each test for more information.
2022-09-06 06:32:33 -07:00
//!
2023-04-26 21:39:43 -07:00
//! ## Checkpoint Generation Tests
//!
//! Generate checkpoints on mainnet and testnet using a cached state:
//! ```console
//! GENERATE_CHECKPOINTS_MAINNET=1 ENTRYPOINT_FEATURES=zebra-checkpoints ZEBRA_CACHED_STATE_DIR=/path/to/zebra/state docker/entrypoint.sh
//! GENERATE_CHECKPOINTS_TESTNET=1 ENTRYPOINT_FEATURES=zebra-checkpoints ZEBRA_CACHED_STATE_DIR=/path/to/zebra/state docker/entrypoint.sh
//! ```
//!
2022-09-06 06:32:33 -07:00
//! ## Disk Space for Testing
//!
//! The full sync and lightwalletd tests with cached state expect a temporary directory with
//! at least 300 GB of disk space (2 copies of the full chain). To use another disk for the
//! temporary test files:
//!
//! ```sh
//! export TMPDIR=/path/to/disk/directory
//! ```
2020-07-31 23:15:26 -07:00
2022-10-03 19:51:53 -07:00
use std ::{
2023-07-13 14:36:15 -07:00
cmp ::Ordering ,
2022-10-03 19:51:53 -07:00
collections ::HashSet ,
env , fs , panic ,
path ::PathBuf ,
time ::{ Duration , Instant } ,
} ;
2022-03-18 09:02:22 -07:00
2021-06-03 19:28:43 -07:00
use color_eyre ::{
2022-06-27 17:36:36 -07:00
eyre ::{ eyre , Result , WrapErr } ,
2021-06-03 19:28:43 -07:00
Help ,
} ;
2023-07-13 14:36:15 -07:00
use semver ::Version ;
2023-09-20 16:40:21 -07:00
use serde_json ::Value ;
2020-09-11 13:39:39 -07:00
2020-10-27 02:25:29 -07:00
use zebra_chain ::{
change(state): Write non-finalized blocks to the state in a separate thread, to avoid network and RPC hangs (#5257)
* Add a new block commit task and channels, that don't do anything yet
* Add last_block_hash_sent to the state service, to avoid database accesses
* Update last_block_hash_sent regardless of commit errors
* Rename a field to StateService.max_queued_finalized_height
* Commit finalized blocks to the state in a separate task
* Check for panics in the block write task
* Wait for the block commit task in tests, and check for errors
* Always run a proptest that sleeps once
* Add extra debugging to state shutdowns
* Work around a RocksDB shutdown bug
* Close the finalized block channel when we're finished with it
* Only reset state queue once per error
* Update some TODOs
* Add a module doc comment
* Drop channels and check for closed channels in the block commit task
* Close state channels and tasks on drop
* Remove some duplicate fields across StateService and ReadStateService
* Try tweaking the shutdown steps
* Update and clarify some comments
* Clarify another comment
* Don't try to cancel RocksDB background work on drop
* Fix up some comments
* Remove some duplicate code
* Remove redundant workarounds for shutdown issues
* Remode a redundant channel close in the block commit task
* Remove a mistaken `!force` shutdown condition
* Remove duplicate force-shutdown code and explain it better
* Improve RPC error logging
* Wait for chain tip updates in the RPC tests
* Wait 2 seconds for chain tip updates before skipping them
* Remove an unnecessary block_in_place()
* Fix some test error messages that were changed by earlier fixes
* Expand some comments, fix typos
Co-authored-by: Marek <mail@marek.onl>
* Actually drop children of failed blocks
* Explain why we drop descendants of failed blocks
* Clarify a comment
* Wait for chain tip updates in a failing test on macOS
* Clean duplicate finalized blocks when the non-finalized state activates
* Send an error when receiving a duplicate finalized block
* Update checkpoint block behaviour, document its consensus rule
* Wait for chain tip changes in inbound_block_height_lookahead_limit test
* Wait for the genesis block to commit in the fake peer set mempool tests
* Disable unreliable mempool verification check in the send transaction test
* Appease rustfmt
* Use clear_finalized_block_queue() everywhere that blocks are dropped
* Document how Finalized and NonFinalized clones are different
* sends non-finalized blocks to the block write task
* passes ZebraDb to commit_new_chain, commit_block, and no_duplicates_in_finalized_chain instead of FinalizedState
* Update zebra-state/src/service/write.rs
Co-authored-by: teor <teor@riseup.net>
* updates comments, renames send_process_queued, other minor cleanup
* update assert_block_can_be_validated comment
* removes `mem` field from StateService
* removes `disk` field from StateService and updates block_iter to use `ZebraDb` instead of the finalized state
* updates tests that use the disk to use read_service.db instead
* moves best_tip to a read fn and returns finalized & non-finalized states from setup instead of the state service
* changes `contextual_validity` to get the network from the finalized_state instead of another param
* swaps out StateService with FinalizedState and NonFinalizedState in tests
* adds NotReadyToBeCommitted error and returns it from validate_and_commit when a blocks parent hash is not in any chain
* removes NonFinalizedWriteCmd and calls, moves update_latest_channels above rsp_tx.send
* makes parent_errors_map an indexmap
* clears non-finalized block queue when the receiver is dropped and when the StateService is being dropped
* sends non-finalized blocks to the block write task
* passes ZebraDb to commit_new_chain, commit_block, and no_duplicates_in_finalized_chain instead of FinalizedState
* updates comments, renames send_process_queued, other minor cleanup
* Update zebra-state/src/service/write.rs
Co-authored-by: teor <teor@riseup.net>
* update assert_block_can_be_validated comment
* removes `mem` field from StateService
* removes `disk` field from StateService and updates block_iter to use `ZebraDb` instead of the finalized state
* updates tests that use the disk to use read_service.db instead
* moves best_tip to a read fn and returns finalized & non-finalized states from setup instead of the state service
* changes `contextual_validity` to get the network from the finalized_state instead of another param
* swaps out StateService with FinalizedState and NonFinalizedState in tests
* adds NotReadyToBeCommitted error and returns it from validate_and_commit when a blocks parent hash is not in any chain
* removes NonFinalizedWriteCmd and calls, moves update_latest_channels above rsp_tx.send
* makes parent_errors_map an indexmap
* clears non-finalized block queue when the receiver is dropped and when the StateService is being dropped
* removes duplicate field definitions on StateService that were a result of a bad merge
* update NotReadyToBeCommitted error message
* Appear rustfmt
* Fix doc links
* Rename a function to initial_contextual_validity()
* Do error tasks on Err, and success tasks on Ok
* Simplify parent_error_map truncation
* Rewrite best_tip() to use tip()
* Rename latest_mem() to latest_non_finalized_state()
```sh
fastmod latest_mem latest_non_finalized_state zebra*
cargo fmt --all
```
* Simplify latest_non_finalized_state() using a new WatchReceiver API
* Expand some error messages
* Send the result after updating the channels, and document why
* wait for chain_tip_update before cancelling download in mempool_cancel_mined
* adds `sent_non_finalized_block_hashes` field to StateService
* adds batched sent_hash insertions and checks sent hashes in queue_and_commit_non_finalized before adding a block to the queue
* check that the `curr_buf` in SentHashes is not empty before pushing it to the `sent_bufs`
* Apply suggestions from code review
Co-authored-by: teor <teor@riseup.net>
* Fix rustfmt
* Check for finalized block heights using zs_contains()
* adds known_utxos field to SentHashes
* updates comment on SentHashes.add method
* Apply suggestions from code review
Co-authored-by: teor <teor@riseup.net>
* return early when there's a duplicate hash in QueuedBlocks.queue instead of panicking
* Make finalized UTXOs near the final checkpoint available for full block verification
* Replace a checkpoint height literal with the actual config
* Update mainnet and testnet checkpoints - 7 October 2022
* Fix some state service init arguments
* Allow more lookahead in the downloader, but less lookahead in the syncer
* Add the latest config to the tests, and fix the latest config check
* Increase the number of finalized blocks checked for non-finalized block UTXO spends
* fix(log): reduce verbose logs for block commits (#5348)
* Remove some verbose block write channel logs
* Only warn about tracing endpoint if the address is actually set
* Use CloneError instead of formatting a non-cloneable error
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
* Increase block verify timeout
* Work around a known block timeout bug by using a shorter timeout
Co-authored-by: teor <teor@riseup.net>
Co-authored-by: Marek <mail@marek.onl>
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2022-10-11 12:25:45 -07:00
block ::{ self , Height } ,
2021-06-17 17:05:28 -07:00
parameters ::Network ::{ self , * } ,
2020-10-27 02:25:29 -07:00
} ;
2021-01-29 04:36:33 -08:00
use zebra_network ::constants ::PORT_IN_USE_ERROR ;
2023-04-26 16:35:53 -07:00
use zebra_node_services ::rpc_client ::RpcRequestClient ;
2023-07-13 14:36:15 -07:00
use zebra_state ::{ constants ::LOCK_FILE_ERROR , database_format_version_in_code } ;
2020-10-27 02:25:29 -07:00
2023-10-11 17:25:37 -07:00
use zebra_test ::{
args ,
command ::{ to_regex ::CollectRegexSet , ContextFrom } ,
net ::random_known_port ,
prelude ::* ,
} ;
2022-03-18 09:02:22 -07:00
mod common ;
use common ::{
check ::{ is_zebrad_version , EphemeralCheck , EphemeralConfig } ,
2022-11-09 19:40:21 -08:00
config ::random_known_rpc_port_config ,
2022-09-07 22:21:43 -07:00
config ::{
config_file_full_path , configs_dir , default_test_config , persistent_test_config , testdir ,
} ,
2023-06-06 01:28:14 -07:00
launch ::{
2023-07-13 14:36:15 -07:00
spawn_zebrad_for_rpc , spawn_zebrad_without_rpc , ZebradTestDirExt , BETWEEN_NODES_DELAY ,
EXTENDED_LAUNCH_DELAY , LAUNCH_DELAY ,
2023-06-06 01:28:14 -07:00
} ,
2022-11-09 19:40:21 -08:00
lightwalletd ::{ can_spawn_lightwalletd_for_rpc , spawn_lightwalletd_for_rpc } ,
2022-03-18 09:02:22 -07:00
sync ::{
create_cached_database_height , sync_until , MempoolBehavior , LARGE_CHECKPOINT_TEST_HEIGHT ,
2022-10-05 21:12:27 -07:00
LARGE_CHECKPOINT_TIMEOUT , MEDIUM_CHECKPOINT_TEST_HEIGHT , STOP_AT_HEIGHT_REGEX ,
STOP_ON_LOAD_TIMEOUT , SYNC_FINISHED_REGEX , TINY_CHECKPOINT_TEST_HEIGHT ,
TINY_CHECKPOINT_TIMEOUT ,
2022-03-18 09:02:22 -07:00
} ,
2022-11-09 19:40:21 -08:00
test_type ::TestType ::{ self , * } ,
2022-03-18 09:02:22 -07:00
} ;
2020-07-31 23:15:26 -07:00
2023-10-19 07:50:46 -07:00
use crate ::common ::cached_state ::{
wait_for_state_version_message , wait_for_state_version_upgrade , DATABASE_FORMAT_UPGRADE_IS_LONG ,
} ;
2023-09-19 07:49:36 -07:00
2022-10-03 19:51:53 -07:00
/// The maximum amount of time that we allow the creation of a future to block the `tokio` executor.
///
/// This should be larger than the amount of time between thread time slices on a busy test VM.
///
/// This limit only applies to some tests.
pub const MAX_ASYNC_BLOCKING_TIME : Duration = zebra_test ::mock_service ::DEFAULT_MAX_REQUEST_DELAY ;
2022-11-08 20:42:04 -08:00
/// The test config file prefix for `--feature getblocktemplate-rpcs` configs.
pub const GET_BLOCK_TEMPLATE_CONFIG_PREFIX : & str = " getblocktemplate- " ;
2020-07-31 23:15:26 -07:00
#[ test ]
fn generate_no_args ( ) -> Result < ( ) > {
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2020-11-09 16:29:25 -08:00
2020-09-11 13:39:39 -07:00
let child = testdir ( ) ?
2023-10-17 21:15:17 -07:00
. with_config ( & mut default_test_config ( Mainnet ) ? ) ?
2022-04-19 03:28:52 -07:00
. spawn_child ( args! [ " generate " ] ) ? ;
2020-07-31 23:15:26 -07:00
let output = child . wait_with_output ( ) ? ;
let output = output . assert_success ( ) ? ;
2020-08-10 12:50:48 -07:00
// First line
2021-06-03 19:28:43 -07:00
output . stdout_line_contains ( " # Default configuration for zebrad " ) ? ;
2020-07-31 23:15:26 -07:00
Ok ( ( ) )
}
#[ test ]
fn generate_args ( ) -> Result < ( ) > {
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2020-11-09 16:29:25 -08:00
2020-09-11 13:39:39 -07:00
let testdir = testdir ( ) ? ;
let testdir = & testdir ;
2020-07-31 23:15:26 -07:00
// unexpected free argument `argument`
2022-04-19 03:28:52 -07:00
let child = testdir . spawn_child ( args! [ " generate " , " argument " ] ) ? ;
2020-07-31 23:15:26 -07:00
let output = child . wait_with_output ( ) ? ;
output . assert_failure ( ) ? ;
// unrecognized option `-f`
2022-04-19 03:28:52 -07:00
let child = testdir . spawn_child ( args! [ " generate " , " -f " ] ) ? ;
2020-07-31 23:15:26 -07:00
let output = child . wait_with_output ( ) ? ;
output . assert_failure ( ) ? ;
// missing argument to option `-o`
2022-04-19 03:28:52 -07:00
let child = testdir . spawn_child ( args! [ " generate " , " -o " ] ) ? ;
2020-07-31 23:15:26 -07:00
let output = child . wait_with_output ( ) ? ;
output . assert_failure ( ) ? ;
2020-08-13 13:31:13 -07:00
// Add a config file name to tempdir path
2020-09-11 13:39:39 -07:00
let generated_config_path = testdir . path ( ) . join ( " zebrad.toml " ) ;
2020-08-13 13:31:13 -07:00
2020-07-31 23:15:26 -07:00
// Valid
2020-09-11 13:39:39 -07:00
let child =
2022-04-19 03:28:52 -07:00
testdir . spawn_child ( args! [ " generate " , " -o " : generated_config_path . to_str ( ) . unwrap ( ) ] ) ? ;
2020-08-13 13:31:13 -07:00
2020-07-31 23:15:26 -07:00
let output = child . wait_with_output ( ) ? ;
2020-09-01 12:39:04 -07:00
let output = output . assert_success ( ) ? ;
2019-08-29 14:46:54 -07:00
2021-01-11 18:26:08 -08:00
assert_with_context! (
testdir . path ( ) . exists ( ) ,
& output ,
" test temp directory not found "
) ;
assert_with_context! (
generated_config_path . exists ( ) ,
& output ,
" generated config file not found "
) ;
2020-07-31 23:15:26 -07:00
Ok ( ( ) )
}
#[ test ]
fn help_no_args ( ) -> Result < ( ) > {
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2020-11-09 16:29:25 -08:00
2023-10-17 21:15:17 -07:00
let testdir = testdir ( ) ? . with_config ( & mut default_test_config ( Mainnet ) ? ) ? ;
2020-07-31 23:15:26 -07:00
2022-04-19 03:28:52 -07:00
let child = testdir . spawn_child ( args! [ " help " ] ) ? ;
2020-07-31 23:15:26 -07:00
let output = child . wait_with_output ( ) ? ;
let output = output . assert_success ( ) ? ;
2021-06-03 19:28:43 -07:00
// The first line should have the version
output . any_output_line (
is_zebrad_version ,
& output . output . stdout ,
" stdout " ,
2023-06-06 23:03:42 -07:00
" are valid zebrad semantic versions " ,
2021-06-03 19:28:43 -07:00
) ? ;
2020-08-10 12:50:48 -07:00
2023-06-06 23:03:42 -07:00
// Make sure we are in help by looking for the usage string
output . stdout_line_contains ( " Usage: " ) ? ;
2020-07-31 23:15:26 -07:00
Ok ( ( ) )
}
#[ test ]
fn help_args ( ) -> Result < ( ) > {
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2020-11-09 16:29:25 -08:00
2020-09-11 13:39:39 -07:00
let testdir = testdir ( ) ? ;
let testdir = & testdir ;
2020-07-31 23:15:26 -07:00
// The subcommand "argument" wasn't recognized.
2022-04-19 03:28:52 -07:00
let child = testdir . spawn_child ( args! [ " help " , " argument " ] ) ? ;
2020-07-31 23:15:26 -07:00
let output = child . wait_with_output ( ) ? ;
output . assert_failure ( ) ? ;
// option `-f` does not accept an argument
2022-04-19 03:28:52 -07:00
let child = testdir . spawn_child ( args! [ " help " , " -f " ] ) ? ;
2020-07-31 23:15:26 -07:00
let output = child . wait_with_output ( ) ? ;
output . assert_failure ( ) ? ;
Ok ( ( ) )
}
2019-08-29 14:46:54 -07:00
2020-08-19 14:48:22 -07:00
#[ test ]
2020-07-31 23:15:26 -07:00
fn start_no_args ( ) -> Result < ( ) > {
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2020-11-09 16:29:25 -08:00
2020-08-31 01:32:55 -07:00
// start caches state, so run one of the start tests with persistent state
2023-10-17 21:15:17 -07:00
let testdir = testdir ( ) ? . with_config ( & mut persistent_test_config ( Mainnet ) ? ) ? ;
2020-07-31 23:15:26 -07:00
2022-04-19 03:28:52 -07:00
let mut child = testdir . spawn_child ( args! [ " -v " , " start " ] ) ? ;
2020-07-31 23:15:26 -07:00
2020-12-15 01:02:30 -08:00
// Run the program and kill it after a few seconds
std ::thread ::sleep ( LAUNCH_DELAY ) ;
2022-08-28 16:52:19 -07:00
child . kill ( false ) ? ;
2020-07-31 23:15:26 -07:00
let output = child . wait_with_output ( ) ? ;
let output = output . assert_failure ( ) ? ;
2021-06-03 19:28:43 -07:00
output . stdout_line_contains ( " Starting zebrad " ) ? ;
2020-07-31 23:15:26 -07:00
2021-06-28 22:03:51 -07:00
// Make sure the command passed the legacy chain check
output . stdout_line_contains ( " starting legacy chain check " ) ? ;
output . stdout_line_contains ( " no legacy chain found " ) ? ;
2020-08-04 13:38:39 -07:00
// Make sure the command was killed
2020-09-01 12:39:04 -07:00
output . assert_was_killed ( ) ? ;
2020-08-04 13:38:39 -07:00
2020-07-31 23:15:26 -07:00
Ok ( ( ) )
}
2020-08-19 14:48:22 -07:00
#[ test ]
2020-07-31 23:15:26 -07:00
fn start_args ( ) -> Result < ( ) > {
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2020-11-09 16:29:25 -08:00
2023-10-17 21:15:17 -07:00
let testdir = testdir ( ) ? . with_config ( & mut default_test_config ( Mainnet ) ? ) ? ;
2020-09-11 13:39:39 -07:00
let testdir = & testdir ;
2020-07-31 23:15:26 -07:00
2022-04-19 03:28:52 -07:00
let mut child = testdir . spawn_child ( args! [ " start " ] ) ? ;
2020-12-15 01:02:30 -08:00
// Run the program and kill it after a few seconds
std ::thread ::sleep ( LAUNCH_DELAY ) ;
2022-08-28 16:52:19 -07:00
child . kill ( false ) ? ;
2020-07-31 23:15:26 -07:00
let output = child . wait_with_output ( ) ? ;
2020-08-04 13:38:39 -07:00
// Make sure the command was killed
2020-09-01 12:39:04 -07:00
output . assert_was_killed ( ) ? ;
2020-08-04 13:38:39 -07:00
2020-07-31 23:15:26 -07:00
output . assert_failure ( ) ? ;
// unrecognized option `-f`
2022-04-19 03:28:52 -07:00
let child = testdir . spawn_child ( args! [ " start " , " -f " ] ) ? ;
2020-07-31 23:15:26 -07:00
let output = child . wait_with_output ( ) ? ;
output . assert_failure ( ) ? ;
Ok ( ( ) )
}
2022-09-26 08:45:42 -07:00
#[ tokio::test ]
async fn db_init_outside_future_executor ( ) -> Result < ( ) > {
let _init_guard = zebra_test ::init ( ) ;
2023-10-17 21:15:17 -07:00
let config = default_test_config ( Mainnet ) ? ;
2022-09-26 08:45:42 -07:00
let start = Instant ::now ( ) ;
change(state): Write non-finalized blocks to the state in a separate thread, to avoid network and RPC hangs (#5257)
* Add a new block commit task and channels, that don't do anything yet
* Add last_block_hash_sent to the state service, to avoid database accesses
* Update last_block_hash_sent regardless of commit errors
* Rename a field to StateService.max_queued_finalized_height
* Commit finalized blocks to the state in a separate task
* Check for panics in the block write task
* Wait for the block commit task in tests, and check for errors
* Always run a proptest that sleeps once
* Add extra debugging to state shutdowns
* Work around a RocksDB shutdown bug
* Close the finalized block channel when we're finished with it
* Only reset state queue once per error
* Update some TODOs
* Add a module doc comment
* Drop channels and check for closed channels in the block commit task
* Close state channels and tasks on drop
* Remove some duplicate fields across StateService and ReadStateService
* Try tweaking the shutdown steps
* Update and clarify some comments
* Clarify another comment
* Don't try to cancel RocksDB background work on drop
* Fix up some comments
* Remove some duplicate code
* Remove redundant workarounds for shutdown issues
* Remode a redundant channel close in the block commit task
* Remove a mistaken `!force` shutdown condition
* Remove duplicate force-shutdown code and explain it better
* Improve RPC error logging
* Wait for chain tip updates in the RPC tests
* Wait 2 seconds for chain tip updates before skipping them
* Remove an unnecessary block_in_place()
* Fix some test error messages that were changed by earlier fixes
* Expand some comments, fix typos
Co-authored-by: Marek <mail@marek.onl>
* Actually drop children of failed blocks
* Explain why we drop descendants of failed blocks
* Clarify a comment
* Wait for chain tip updates in a failing test on macOS
* Clean duplicate finalized blocks when the non-finalized state activates
* Send an error when receiving a duplicate finalized block
* Update checkpoint block behaviour, document its consensus rule
* Wait for chain tip changes in inbound_block_height_lookahead_limit test
* Wait for the genesis block to commit in the fake peer set mempool tests
* Disable unreliable mempool verification check in the send transaction test
* Appease rustfmt
* Use clear_finalized_block_queue() everywhere that blocks are dropped
* Document how Finalized and NonFinalized clones are different
* sends non-finalized blocks to the block write task
* passes ZebraDb to commit_new_chain, commit_block, and no_duplicates_in_finalized_chain instead of FinalizedState
* Update zebra-state/src/service/write.rs
Co-authored-by: teor <teor@riseup.net>
* updates comments, renames send_process_queued, other minor cleanup
* update assert_block_can_be_validated comment
* removes `mem` field from StateService
* removes `disk` field from StateService and updates block_iter to use `ZebraDb` instead of the finalized state
* updates tests that use the disk to use read_service.db instead
* moves best_tip to a read fn and returns finalized & non-finalized states from setup instead of the state service
* changes `contextual_validity` to get the network from the finalized_state instead of another param
* swaps out StateService with FinalizedState and NonFinalizedState in tests
* adds NotReadyToBeCommitted error and returns it from validate_and_commit when a blocks parent hash is not in any chain
* removes NonFinalizedWriteCmd and calls, moves update_latest_channels above rsp_tx.send
* makes parent_errors_map an indexmap
* clears non-finalized block queue when the receiver is dropped and when the StateService is being dropped
* sends non-finalized blocks to the block write task
* passes ZebraDb to commit_new_chain, commit_block, and no_duplicates_in_finalized_chain instead of FinalizedState
* updates comments, renames send_process_queued, other minor cleanup
* Update zebra-state/src/service/write.rs
Co-authored-by: teor <teor@riseup.net>
* update assert_block_can_be_validated comment
* removes `mem` field from StateService
* removes `disk` field from StateService and updates block_iter to use `ZebraDb` instead of the finalized state
* updates tests that use the disk to use read_service.db instead
* moves best_tip to a read fn and returns finalized & non-finalized states from setup instead of the state service
* changes `contextual_validity` to get the network from the finalized_state instead of another param
* swaps out StateService with FinalizedState and NonFinalizedState in tests
* adds NotReadyToBeCommitted error and returns it from validate_and_commit when a blocks parent hash is not in any chain
* removes NonFinalizedWriteCmd and calls, moves update_latest_channels above rsp_tx.send
* makes parent_errors_map an indexmap
* clears non-finalized block queue when the receiver is dropped and when the StateService is being dropped
* removes duplicate field definitions on StateService that were a result of a bad merge
* update NotReadyToBeCommitted error message
* Appear rustfmt
* Fix doc links
* Rename a function to initial_contextual_validity()
* Do error tasks on Err, and success tasks on Ok
* Simplify parent_error_map truncation
* Rewrite best_tip() to use tip()
* Rename latest_mem() to latest_non_finalized_state()
```sh
fastmod latest_mem latest_non_finalized_state zebra*
cargo fmt --all
```
* Simplify latest_non_finalized_state() using a new WatchReceiver API
* Expand some error messages
* Send the result after updating the channels, and document why
* wait for chain_tip_update before cancelling download in mempool_cancel_mined
* adds `sent_non_finalized_block_hashes` field to StateService
* adds batched sent_hash insertions and checks sent hashes in queue_and_commit_non_finalized before adding a block to the queue
* check that the `curr_buf` in SentHashes is not empty before pushing it to the `sent_bufs`
* Apply suggestions from code review
Co-authored-by: teor <teor@riseup.net>
* Fix rustfmt
* Check for finalized block heights using zs_contains()
* adds known_utxos field to SentHashes
* updates comment on SentHashes.add method
* Apply suggestions from code review
Co-authored-by: teor <teor@riseup.net>
* return early when there's a duplicate hash in QueuedBlocks.queue instead of panicking
* Make finalized UTXOs near the final checkpoint available for full block verification
* Replace a checkpoint height literal with the actual config
* Update mainnet and testnet checkpoints - 7 October 2022
* Fix some state service init arguments
* Allow more lookahead in the downloader, but less lookahead in the syncer
* Add the latest config to the tests, and fix the latest config check
* Increase the number of finalized blocks checked for non-finalized block UTXO spends
* fix(log): reduce verbose logs for block commits (#5348)
* Remove some verbose block write channel logs
* Only warn about tracing endpoint if the address is actually set
* Use CloneError instead of formatting a non-cloneable error
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
* Increase block verify timeout
* Work around a known block timeout bug by using a shorter timeout
Co-authored-by: teor <teor@riseup.net>
Co-authored-by: Marek <mail@marek.onl>
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2022-10-11 12:25:45 -07:00
// This test doesn't need UTXOs to be verified efficiently, because it uses an empty state.
let db_init_handle =
zebra_state ::spawn_init ( config . state . clone ( ) , config . network . network , Height ::MAX , 0 ) ;
2022-09-26 08:45:42 -07:00
// it's faster to panic if it takes longer than expected, since the executor
// will wait indefinitely for blocking operation to finish once started
let block_duration = start . elapsed ( ) ;
assert! (
2022-10-03 19:51:53 -07:00
block_duration < = MAX_ASYNC_BLOCKING_TIME ,
2022-12-07 17:05:57 -08:00
" futures executor was blocked longer than expected ({block_duration:?}) " ,
2022-09-26 08:45:42 -07:00
) ;
db_init_handle . await ? ;
Ok ( ( ) )
}
2023-06-06 01:28:14 -07:00
/// Check that the block state and peer list caches are written to disk.
2020-08-31 01:32:55 -07:00
#[ test ]
fn persistent_mode ( ) -> Result < ( ) > {
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2020-11-09 16:29:25 -08:00
2023-10-17 21:15:17 -07:00
let testdir = testdir ( ) ? . with_config ( & mut persistent_test_config ( Mainnet ) ? ) ? ;
2020-09-11 13:39:39 -07:00
let testdir = & testdir ;
2020-08-31 01:32:55 -07:00
2022-04-19 03:28:52 -07:00
let mut child = testdir . spawn_child ( args! [ " -v " , " start " ] ) ? ;
2020-08-31 01:32:55 -07:00
2020-12-15 01:02:30 -08:00
// Run the program and kill it after a few seconds
2023-06-06 01:28:14 -07:00
std ::thread ::sleep ( EXTENDED_LAUNCH_DELAY ) ;
2022-08-28 16:52:19 -07:00
child . kill ( false ) ? ;
2020-08-31 01:32:55 -07:00
let output = child . wait_with_output ( ) ? ;
// Make sure the command was killed
2020-09-01 12:39:04 -07:00
output . assert_was_killed ( ) ? ;
2020-08-31 01:32:55 -07:00
2020-09-11 13:39:39 -07:00
let cache_dir = testdir . path ( ) . join ( " state " ) ;
2021-01-11 18:26:08 -08:00
assert_with_context! (
cache_dir . read_dir ( ) ? . count ( ) > 0 ,
& output ,
" state directory empty despite persistent state config "
) ;
2020-08-31 01:32:55 -07:00
2023-06-06 01:28:14 -07:00
let cache_dir = testdir . path ( ) . join ( " network " ) ;
assert_with_context! (
cache_dir . read_dir ( ) ? . count ( ) > 0 ,
& output ,
" network directory empty despite persistent network config "
) ;
2020-08-31 01:32:55 -07:00
Ok ( ( ) )
}
2021-01-11 18:26:08 -08:00
#[ test ]
fn ephemeral_existing_directory ( ) -> Result < ( ) > {
ephemeral ( EphemeralConfig ::Default , EphemeralCheck ::ExistingDirectory )
}
2020-08-31 01:32:55 -07:00
2021-01-11 18:26:08 -08:00
#[ test ]
fn ephemeral_missing_directory ( ) -> Result < ( ) > {
ephemeral ( EphemeralConfig ::Default , EphemeralCheck ::MissingDirectory )
}
2020-08-31 01:32:55 -07:00
2021-01-11 18:26:08 -08:00
#[ test ]
fn misconfigured_ephemeral_existing_directory ( ) -> Result < ( ) > {
ephemeral (
EphemeralConfig ::MisconfiguredCacheDir ,
EphemeralCheck ::ExistingDirectory ,
)
2020-08-31 01:32:55 -07:00
}
#[ test ]
2021-01-11 18:26:08 -08:00
fn misconfigured_ephemeral_missing_directory ( ) -> Result < ( ) > {
ephemeral (
EphemeralConfig ::MisconfiguredCacheDir ,
EphemeralCheck ::MissingDirectory ,
)
}
2020-08-31 01:32:55 -07:00
2023-06-06 01:28:14 -07:00
/// Check that the state directory created on disk matches the state config.
///
/// TODO: do a similar test for `network.cache_dir`
2022-08-28 10:08:43 -07:00
#[ tracing::instrument ]
2021-01-11 18:26:08 -08:00
fn ephemeral ( cache_dir_config : EphemeralConfig , cache_dir_check : EphemeralCheck ) -> Result < ( ) > {
use std ::io ::ErrorKind ;
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2020-08-31 01:32:55 -07:00
2023-10-17 21:15:17 -07:00
let mut config = default_test_config ( Mainnet ) ? ;
2022-01-14 07:11:23 -08:00
let run_dir = testdir ( ) ? ;
2020-08-31 12:43:43 -07:00
2021-01-11 18:26:08 -08:00
let ignored_cache_dir = run_dir . path ( ) . join ( " state " ) ;
if cache_dir_config = = EphemeralConfig ::MisconfiguredCacheDir {
// Write a configuration that sets both the cache_dir and ephemeral options
config . state . cache_dir = ignored_cache_dir . clone ( ) ;
}
if cache_dir_check = = EphemeralCheck ::ExistingDirectory {
// We set the cache_dir config to a newly created empty temp directory,
// then make sure that it is empty after the test
fs ::create_dir ( & ignored_cache_dir ) ? ;
}
2020-08-31 01:32:55 -07:00
2021-01-11 18:26:08 -08:00
let mut child = run_dir
. path ( )
2021-01-13 22:59:06 -08:00
. with_config ( & mut config ) ?
2022-04-19 03:28:52 -07:00
. spawn_child ( args! [ " start " ] ) ? ;
2020-12-15 01:02:30 -08:00
// Run the program and kill it after a few seconds
2023-06-06 01:28:14 -07:00
std ::thread ::sleep ( EXTENDED_LAUNCH_DELAY ) ;
2022-08-28 16:52:19 -07:00
child . kill ( false ) ? ;
2020-08-31 01:32:55 -07:00
let output = child . wait_with_output ( ) ? ;
// Make sure the command was killed
2020-09-01 12:39:04 -07:00
output . assert_was_killed ( ) ? ;
2020-08-31 01:32:55 -07:00
2021-01-11 18:26:08 -08:00
let expected_run_dir_file_names = match cache_dir_check {
// we created the state directory, so it should still exist
EphemeralCheck ::ExistingDirectory = > {
assert_with_context! (
ignored_cache_dir
. read_dir ( )
. expect ( " ignored_cache_dir should still exist " )
. count ( )
= = 0 ,
& output ,
" ignored_cache_dir not empty for ephemeral {:?} {:?}: {:?} " ,
cache_dir_config ,
cache_dir_check ,
ignored_cache_dir . read_dir ( ) . unwrap ( ) . collect ::< Vec < _ > > ( )
) ;
2023-06-06 01:28:14 -07:00
[ " state " , " network " , " zebrad.toml " ] . iter ( )
2021-01-11 18:26:08 -08:00
}
// we didn't create the state directory, so it should not exist
EphemeralCheck ::MissingDirectory = > {
assert_with_context! (
ignored_cache_dir
. read_dir ( )
. expect_err ( " ignored_cache_dir should not exist " )
. kind ( )
= = ErrorKind ::NotFound ,
& output ,
" unexpected creation of ignored_cache_dir for ephemeral {:?} {:?}: the cache dir exists and contains these files: {:?} " ,
cache_dir_config ,
cache_dir_check ,
ignored_cache_dir . read_dir ( ) . unwrap ( ) . collect ::< Vec < _ > > ( )
) ;
2023-06-06 01:28:14 -07:00
[ " network " , " zebrad.toml " ] . iter ( )
2021-01-11 18:26:08 -08:00
}
} ;
let expected_run_dir_file_names = expected_run_dir_file_names . map ( Into ::into ) . collect ( ) ;
let run_dir_file_names = run_dir
. path ( )
. read_dir ( )
. expect ( " run_dir should still exist " )
. map ( | dir_entry | dir_entry . expect ( " run_dir is readable " ) . file_name ( ) )
// ignore directory list order, because it can vary based on the OS and filesystem
. collect ::< HashSet < _ > > ( ) ;
2020-09-11 13:39:39 -07:00
assert_with_context! (
2021-01-11 18:26:08 -08:00
run_dir_file_names = = expected_run_dir_file_names ,
& output ,
" run_dir not empty for ephemeral {:?} {:?}: expected {:?}, actual: {:?} " ,
cache_dir_config ,
cache_dir_check ,
expected_run_dir_file_names ,
run_dir_file_names
2020-09-11 13:39:39 -07:00
) ;
2020-08-31 01:32:55 -07:00
Ok ( ( ) )
}
2020-07-31 23:15:26 -07:00
#[ test ]
fn version_no_args ( ) -> Result < ( ) > {
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2020-11-09 16:29:25 -08:00
2023-10-17 21:15:17 -07:00
let testdir = testdir ( ) ? . with_config ( & mut default_test_config ( Mainnet ) ? ) ? ;
2020-07-31 23:15:26 -07:00
2023-06-06 23:03:42 -07:00
let child = testdir . spawn_child ( args! [ " --version " ] ) ? ;
2020-07-31 23:15:26 -07:00
let output = child . wait_with_output ( ) ? ;
let output = output . assert_success ( ) ? ;
2021-06-03 19:28:43 -07:00
// The output should only contain the version
output . output_check (
is_zebrad_version ,
& output . output . stdout ,
" stdout " ,
" a valid zebrad semantic version " ,
) ? ;
2020-07-31 23:15:26 -07:00
Ok ( ( ) )
}
#[ test ]
fn version_args ( ) -> Result < ( ) > {
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2020-11-09 16:29:25 -08:00
2023-10-17 21:15:17 -07:00
let testdir = testdir ( ) ? . with_config ( & mut default_test_config ( Mainnet ) ? ) ? ;
2020-09-11 13:39:39 -07:00
let testdir = & testdir ;
2020-07-31 23:15:26 -07:00
2023-06-06 23:03:42 -07:00
// unrecognized option `-f`
let child = testdir . spawn_child ( args! [ " tip-height " , " -f " ] ) ? ;
2020-07-31 23:15:26 -07:00
let output = child . wait_with_output ( ) ? ;
output . assert_failure ( ) ? ;
2023-06-06 23:03:42 -07:00
// unrecognized option `-f` is ignored
let child = testdir . spawn_child ( args! [ " --version " , " -f " ] ) ? ;
2020-07-31 23:15:26 -07:00
let output = child . wait_with_output ( ) ? ;
2023-06-06 23:03:42 -07:00
let output = output . assert_success ( ) ? ;
// The output should only contain the version
output . output_check (
is_zebrad_version ,
& output . output . stdout ,
" stdout " ,
" a valid zebrad semantic version " ,
) ? ;
2020-07-31 23:15:26 -07:00
Ok ( ( ) )
}
2022-06-27 17:36:36 -07:00
/// Run config tests that use the default ports and paths.
///
/// Unlike the other tests, these tests can not be run in parallel, because
/// they use the generated config. So parallel execution can cause port and
/// cache conflicts.
2020-07-31 23:15:26 -07:00
#[ test ]
2022-09-07 22:21:43 -07:00
fn config_tests ( ) -> Result < ( ) > {
2021-06-03 19:28:43 -07:00
valid_generated_config ( " start " , " Starting zebrad " ) ? ;
2020-08-20 03:49:38 -07:00
2022-06-27 17:36:36 -07:00
// Check what happens when Zebra parses an invalid config
invalid_generated_config ( ) ? ;
2022-09-07 22:21:43 -07:00
// Check that we have a current version of the config stored
last_config_is_stored ( ) ? ;
2023-06-05 16:56:08 -07:00
// Check that Zebra's previous configurations still work
stored_configs_work ( ) ? ;
2022-06-26 17:07:37 -07:00
2023-10-11 17:25:37 -07:00
// We run the `zebrad` app test after the config tests, to avoid potential port conflicts
2022-08-30 02:01:33 -07:00
app_no_args ( ) ? ;
Ok ( ( ) )
}
/// Test that `zebrad` runs the start command with no args
2022-12-08 17:41:46 -08:00
#[ tracing::instrument ]
2022-08-30 02:01:33 -07:00
fn app_no_args ( ) -> Result < ( ) > {
let _init_guard = zebra_test ::init ( ) ;
// start caches state, so run one of the start tests with persistent state
2023-10-17 21:15:17 -07:00
let testdir = testdir ( ) ? . with_config ( & mut persistent_test_config ( Mainnet ) ? ) ? ;
2022-08-30 02:01:33 -07:00
2023-10-11 17:25:37 -07:00
tracing ::info! ( ? testdir , " running zebrad with no config (default settings) " ) ;
2022-08-30 02:01:33 -07:00
let mut child = testdir . spawn_child ( args! [ ] ) ? ;
// Run the program and kill it after a few seconds
std ::thread ::sleep ( LAUNCH_DELAY ) ;
child . kill ( true ) ? ;
let output = child . wait_with_output ( ) ? ;
let output = output . assert_failure ( ) ? ;
output . stdout_line_contains ( " Starting zebrad " ) ? ;
// Make sure the command passed the legacy chain check
output . stdout_line_contains ( " starting legacy chain check " ) ? ;
output . stdout_line_contains ( " no legacy chain found " ) ? ;
// Make sure the command was killed
output . assert_was_killed ( ) ? ;
2020-08-20 03:49:38 -07:00
Ok ( ( ) )
}
2022-06-27 17:36:36 -07:00
/// Test that `zebrad start` can parse the output from `zebrad generate`.
2022-08-28 10:08:43 -07:00
#[ tracing::instrument ]
2021-06-03 19:28:43 -07:00
fn valid_generated_config ( command : & str , expect_stdout_line_contains : & str ) -> Result < ( ) > {
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2020-11-09 16:29:25 -08:00
2020-09-11 13:39:39 -07:00
let testdir = testdir ( ) ? ;
let testdir = & testdir ;
2020-08-13 13:31:13 -07:00
// Add a config file name to tempdir path
2020-09-11 13:39:39 -07:00
let generated_config_path = testdir . path ( ) . join ( " zebrad.toml " ) ;
2020-08-13 13:31:13 -07:00
2023-10-11 17:25:37 -07:00
tracing ::info! ( ? generated_config_path , " generating valid config " ) ;
2020-08-13 13:31:13 -07:00
// Generate configuration in temp dir path
2020-09-11 13:39:39 -07:00
let child =
2022-04-19 03:28:52 -07:00
testdir . spawn_child ( args! [ " generate " , " -o " : generated_config_path . to_str ( ) . unwrap ( ) ] ) ? ;
2020-08-13 13:31:13 -07:00
let output = child . wait_with_output ( ) ? ;
2020-09-01 12:39:04 -07:00
let output = output . assert_success ( ) ? ;
2020-08-13 13:31:13 -07:00
2021-01-11 18:26:08 -08:00
assert_with_context! (
generated_config_path . exists ( ) ,
& output ,
" generated config file not found "
) ;
2020-08-13 13:31:13 -07:00
2023-10-11 17:25:37 -07:00
tracing ::info! ( ? generated_config_path , " testing valid config parsing " ) ;
2020-12-15 01:02:30 -08:00
// Run command using temp dir and kill it after a few seconds
2022-04-19 03:28:52 -07:00
let mut child = testdir . spawn_child ( args! [ command ] ) ? ;
2020-12-15 01:02:30 -08:00
std ::thread ::sleep ( LAUNCH_DELAY ) ;
2022-08-28 16:52:19 -07:00
child . kill ( false ) ? ;
2020-08-13 13:31:13 -07:00
let output = child . wait_with_output ( ) ? ;
let output = output . assert_failure ( ) ? ;
2021-06-03 19:28:43 -07:00
output . stdout_line_contains ( expect_stdout_line_contains ) ? ;
2020-08-13 13:31:13 -07:00
2020-09-07 17:05:23 -07:00
// [Note on port conflict](#Note on port conflict)
output . assert_was_killed ( ) . wrap_err ( " Possible port or cache conflict. Are there other acceptance test, zebrad, or zcashd processes running? " ) ? ;
2020-08-13 13:31:13 -07:00
2021-01-11 18:26:08 -08:00
assert_with_context! (
testdir . path ( ) . exists ( ) ,
& output ,
" test temp directory not found "
) ;
assert_with_context! (
generated_config_path . exists ( ) ,
& output ,
" generated config file not found "
) ;
2020-07-31 23:15:26 -07:00
Ok ( ( ) )
2019-08-29 14:46:54 -07:00
}
2020-09-07 17:05:23 -07:00
2022-09-07 22:21:43 -07:00
/// Check if the config produced by current zebrad is stored.
2022-12-08 17:41:46 -08:00
#[ tracing::instrument ]
#[ allow(clippy::print_stdout) ]
2022-09-07 22:21:43 -07:00
fn last_config_is_stored ( ) -> Result < ( ) > {
let _init_guard = zebra_test ::init ( ) ;
let testdir = testdir ( ) ? ;
// Add a config file name to tempdir path
let generated_config_path = testdir . path ( ) . join ( " zebrad.toml " ) ;
2023-10-11 17:25:37 -07:00
tracing ::info! ( ? generated_config_path , " generated current config " ) ;
2022-09-07 22:21:43 -07:00
// Generate configuration in temp dir path
let child =
testdir . spawn_child ( args! [ " generate " , " -o " : generated_config_path . to_str ( ) . unwrap ( ) ] ) ? ;
let output = child . wait_with_output ( ) ? ;
let output = output . assert_success ( ) ? ;
assert_with_context! (
generated_config_path . exists ( ) ,
& output ,
" generated config file not found "
) ;
2023-10-11 17:25:37 -07:00
tracing ::info! (
? generated_config_path ,
" testing current config is in stored configs "
) ;
2022-09-07 22:21:43 -07:00
// Get the contents of the generated config file
let generated_content =
fs ::read_to_string ( generated_config_path ) . expect ( " Should have been able to read the file " ) ;
// We need to replace the cache dir path as stored configs has a dummy `cache_dir` string there.
change(state): Write non-finalized blocks to the state in a separate thread, to avoid network and RPC hangs (#5257)
* Add a new block commit task and channels, that don't do anything yet
* Add last_block_hash_sent to the state service, to avoid database accesses
* Update last_block_hash_sent regardless of commit errors
* Rename a field to StateService.max_queued_finalized_height
* Commit finalized blocks to the state in a separate task
* Check for panics in the block write task
* Wait for the block commit task in tests, and check for errors
* Always run a proptest that sleeps once
* Add extra debugging to state shutdowns
* Work around a RocksDB shutdown bug
* Close the finalized block channel when we're finished with it
* Only reset state queue once per error
* Update some TODOs
* Add a module doc comment
* Drop channels and check for closed channels in the block commit task
* Close state channels and tasks on drop
* Remove some duplicate fields across StateService and ReadStateService
* Try tweaking the shutdown steps
* Update and clarify some comments
* Clarify another comment
* Don't try to cancel RocksDB background work on drop
* Fix up some comments
* Remove some duplicate code
* Remove redundant workarounds for shutdown issues
* Remode a redundant channel close in the block commit task
* Remove a mistaken `!force` shutdown condition
* Remove duplicate force-shutdown code and explain it better
* Improve RPC error logging
* Wait for chain tip updates in the RPC tests
* Wait 2 seconds for chain tip updates before skipping them
* Remove an unnecessary block_in_place()
* Fix some test error messages that were changed by earlier fixes
* Expand some comments, fix typos
Co-authored-by: Marek <mail@marek.onl>
* Actually drop children of failed blocks
* Explain why we drop descendants of failed blocks
* Clarify a comment
* Wait for chain tip updates in a failing test on macOS
* Clean duplicate finalized blocks when the non-finalized state activates
* Send an error when receiving a duplicate finalized block
* Update checkpoint block behaviour, document its consensus rule
* Wait for chain tip changes in inbound_block_height_lookahead_limit test
* Wait for the genesis block to commit in the fake peer set mempool tests
* Disable unreliable mempool verification check in the send transaction test
* Appease rustfmt
* Use clear_finalized_block_queue() everywhere that blocks are dropped
* Document how Finalized and NonFinalized clones are different
* sends non-finalized blocks to the block write task
* passes ZebraDb to commit_new_chain, commit_block, and no_duplicates_in_finalized_chain instead of FinalizedState
* Update zebra-state/src/service/write.rs
Co-authored-by: teor <teor@riseup.net>
* updates comments, renames send_process_queued, other minor cleanup
* update assert_block_can_be_validated comment
* removes `mem` field from StateService
* removes `disk` field from StateService and updates block_iter to use `ZebraDb` instead of the finalized state
* updates tests that use the disk to use read_service.db instead
* moves best_tip to a read fn and returns finalized & non-finalized states from setup instead of the state service
* changes `contextual_validity` to get the network from the finalized_state instead of another param
* swaps out StateService with FinalizedState and NonFinalizedState in tests
* adds NotReadyToBeCommitted error and returns it from validate_and_commit when a blocks parent hash is not in any chain
* removes NonFinalizedWriteCmd and calls, moves update_latest_channels above rsp_tx.send
* makes parent_errors_map an indexmap
* clears non-finalized block queue when the receiver is dropped and when the StateService is being dropped
* sends non-finalized blocks to the block write task
* passes ZebraDb to commit_new_chain, commit_block, and no_duplicates_in_finalized_chain instead of FinalizedState
* updates comments, renames send_process_queued, other minor cleanup
* Update zebra-state/src/service/write.rs
Co-authored-by: teor <teor@riseup.net>
* update assert_block_can_be_validated comment
* removes `mem` field from StateService
* removes `disk` field from StateService and updates block_iter to use `ZebraDb` instead of the finalized state
* updates tests that use the disk to use read_service.db instead
* moves best_tip to a read fn and returns finalized & non-finalized states from setup instead of the state service
* changes `contextual_validity` to get the network from the finalized_state instead of another param
* swaps out StateService with FinalizedState and NonFinalizedState in tests
* adds NotReadyToBeCommitted error and returns it from validate_and_commit when a blocks parent hash is not in any chain
* removes NonFinalizedWriteCmd and calls, moves update_latest_channels above rsp_tx.send
* makes parent_errors_map an indexmap
* clears non-finalized block queue when the receiver is dropped and when the StateService is being dropped
* removes duplicate field definitions on StateService that were a result of a bad merge
* update NotReadyToBeCommitted error message
* Appear rustfmt
* Fix doc links
* Rename a function to initial_contextual_validity()
* Do error tasks on Err, and success tasks on Ok
* Simplify parent_error_map truncation
* Rewrite best_tip() to use tip()
* Rename latest_mem() to latest_non_finalized_state()
```sh
fastmod latest_mem latest_non_finalized_state zebra*
cargo fmt --all
```
* Simplify latest_non_finalized_state() using a new WatchReceiver API
* Expand some error messages
* Send the result after updating the channels, and document why
* wait for chain_tip_update before cancelling download in mempool_cancel_mined
* adds `sent_non_finalized_block_hashes` field to StateService
* adds batched sent_hash insertions and checks sent hashes in queue_and_commit_non_finalized before adding a block to the queue
* check that the `curr_buf` in SentHashes is not empty before pushing it to the `sent_bufs`
* Apply suggestions from code review
Co-authored-by: teor <teor@riseup.net>
* Fix rustfmt
* Check for finalized block heights using zs_contains()
* adds known_utxos field to SentHashes
* updates comment on SentHashes.add method
* Apply suggestions from code review
Co-authored-by: teor <teor@riseup.net>
* return early when there's a duplicate hash in QueuedBlocks.queue instead of panicking
* Make finalized UTXOs near the final checkpoint available for full block verification
* Replace a checkpoint height literal with the actual config
* Update mainnet and testnet checkpoints - 7 October 2022
* Fix some state service init arguments
* Allow more lookahead in the downloader, but less lookahead in the syncer
* Add the latest config to the tests, and fix the latest config check
* Increase the number of finalized blocks checked for non-finalized block UTXO spends
* fix(log): reduce verbose logs for block commits (#5348)
* Remove some verbose block write channel logs
* Only warn about tracing endpoint if the address is actually set
* Use CloneError instead of formatting a non-cloneable error
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
* Increase block verify timeout
* Work around a known block timeout bug by using a shorter timeout
Co-authored-by: teor <teor@riseup.net>
Co-authored-by: Marek <mail@marek.onl>
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2022-10-11 12:25:45 -07:00
let processed_generated_content = generated_content
. replace (
zebra_state ::Config ::default ( )
. cache_dir
. to_str ( )
. expect ( " a valid cache dir " ) ,
" cache_dir " ,
)
. trim ( )
. to_string ( ) ;
2022-09-07 22:21:43 -07:00
// Loop all the stored configs
2023-06-05 16:56:08 -07:00
//
// TODO: use the same filename list code in last_config_is_stored() and stored_configs_work()
2022-09-07 22:21:43 -07:00
for config_file in configs_dir ( )
. read_dir ( )
. expect ( " read_dir call failed " )
. flatten ( )
{
2023-06-05 16:56:08 -07:00
let config_file_path = config_file . path ( ) ;
let config_file_name = config_file_path
. file_name ( )
. expect ( " config files must have a file name " )
. to_string_lossy ( ) ;
if config_file_name . as_ref ( ) . starts_with ( '.' ) | | config_file_name . as_ref ( ) . starts_with ( '#' )
{
// Skip editor files and other invalid config paths
tracing ::info! (
? config_file_path ,
" skipping hidden/temporary config file path "
) ;
continue ;
}
2022-09-07 22:21:43 -07:00
// Read stored config
2023-06-05 16:56:08 -07:00
let stored_content = fs ::read_to_string ( config_file_full_path ( config_file_path ) )
change(state): Write non-finalized blocks to the state in a separate thread, to avoid network and RPC hangs (#5257)
* Add a new block commit task and channels, that don't do anything yet
* Add last_block_hash_sent to the state service, to avoid database accesses
* Update last_block_hash_sent regardless of commit errors
* Rename a field to StateService.max_queued_finalized_height
* Commit finalized blocks to the state in a separate task
* Check for panics in the block write task
* Wait for the block commit task in tests, and check for errors
* Always run a proptest that sleeps once
* Add extra debugging to state shutdowns
* Work around a RocksDB shutdown bug
* Close the finalized block channel when we're finished with it
* Only reset state queue once per error
* Update some TODOs
* Add a module doc comment
* Drop channels and check for closed channels in the block commit task
* Close state channels and tasks on drop
* Remove some duplicate fields across StateService and ReadStateService
* Try tweaking the shutdown steps
* Update and clarify some comments
* Clarify another comment
* Don't try to cancel RocksDB background work on drop
* Fix up some comments
* Remove some duplicate code
* Remove redundant workarounds for shutdown issues
* Remode a redundant channel close in the block commit task
* Remove a mistaken `!force` shutdown condition
* Remove duplicate force-shutdown code and explain it better
* Improve RPC error logging
* Wait for chain tip updates in the RPC tests
* Wait 2 seconds for chain tip updates before skipping them
* Remove an unnecessary block_in_place()
* Fix some test error messages that were changed by earlier fixes
* Expand some comments, fix typos
Co-authored-by: Marek <mail@marek.onl>
* Actually drop children of failed blocks
* Explain why we drop descendants of failed blocks
* Clarify a comment
* Wait for chain tip updates in a failing test on macOS
* Clean duplicate finalized blocks when the non-finalized state activates
* Send an error when receiving a duplicate finalized block
* Update checkpoint block behaviour, document its consensus rule
* Wait for chain tip changes in inbound_block_height_lookahead_limit test
* Wait for the genesis block to commit in the fake peer set mempool tests
* Disable unreliable mempool verification check in the send transaction test
* Appease rustfmt
* Use clear_finalized_block_queue() everywhere that blocks are dropped
* Document how Finalized and NonFinalized clones are different
* sends non-finalized blocks to the block write task
* passes ZebraDb to commit_new_chain, commit_block, and no_duplicates_in_finalized_chain instead of FinalizedState
* Update zebra-state/src/service/write.rs
Co-authored-by: teor <teor@riseup.net>
* updates comments, renames send_process_queued, other minor cleanup
* update assert_block_can_be_validated comment
* removes `mem` field from StateService
* removes `disk` field from StateService and updates block_iter to use `ZebraDb` instead of the finalized state
* updates tests that use the disk to use read_service.db instead
* moves best_tip to a read fn and returns finalized & non-finalized states from setup instead of the state service
* changes `contextual_validity` to get the network from the finalized_state instead of another param
* swaps out StateService with FinalizedState and NonFinalizedState in tests
* adds NotReadyToBeCommitted error and returns it from validate_and_commit when a blocks parent hash is not in any chain
* removes NonFinalizedWriteCmd and calls, moves update_latest_channels above rsp_tx.send
* makes parent_errors_map an indexmap
* clears non-finalized block queue when the receiver is dropped and when the StateService is being dropped
* sends non-finalized blocks to the block write task
* passes ZebraDb to commit_new_chain, commit_block, and no_duplicates_in_finalized_chain instead of FinalizedState
* updates comments, renames send_process_queued, other minor cleanup
* Update zebra-state/src/service/write.rs
Co-authored-by: teor <teor@riseup.net>
* update assert_block_can_be_validated comment
* removes `mem` field from StateService
* removes `disk` field from StateService and updates block_iter to use `ZebraDb` instead of the finalized state
* updates tests that use the disk to use read_service.db instead
* moves best_tip to a read fn and returns finalized & non-finalized states from setup instead of the state service
* changes `contextual_validity` to get the network from the finalized_state instead of another param
* swaps out StateService with FinalizedState and NonFinalizedState in tests
* adds NotReadyToBeCommitted error and returns it from validate_and_commit when a blocks parent hash is not in any chain
* removes NonFinalizedWriteCmd and calls, moves update_latest_channels above rsp_tx.send
* makes parent_errors_map an indexmap
* clears non-finalized block queue when the receiver is dropped and when the StateService is being dropped
* removes duplicate field definitions on StateService that were a result of a bad merge
* update NotReadyToBeCommitted error message
* Appear rustfmt
* Fix doc links
* Rename a function to initial_contextual_validity()
* Do error tasks on Err, and success tasks on Ok
* Simplify parent_error_map truncation
* Rewrite best_tip() to use tip()
* Rename latest_mem() to latest_non_finalized_state()
```sh
fastmod latest_mem latest_non_finalized_state zebra*
cargo fmt --all
```
* Simplify latest_non_finalized_state() using a new WatchReceiver API
* Expand some error messages
* Send the result after updating the channels, and document why
* wait for chain_tip_update before cancelling download in mempool_cancel_mined
* adds `sent_non_finalized_block_hashes` field to StateService
* adds batched sent_hash insertions and checks sent hashes in queue_and_commit_non_finalized before adding a block to the queue
* check that the `curr_buf` in SentHashes is not empty before pushing it to the `sent_bufs`
* Apply suggestions from code review
Co-authored-by: teor <teor@riseup.net>
* Fix rustfmt
* Check for finalized block heights using zs_contains()
* adds known_utxos field to SentHashes
* updates comment on SentHashes.add method
* Apply suggestions from code review
Co-authored-by: teor <teor@riseup.net>
* return early when there's a duplicate hash in QueuedBlocks.queue instead of panicking
* Make finalized UTXOs near the final checkpoint available for full block verification
* Replace a checkpoint height literal with the actual config
* Update mainnet and testnet checkpoints - 7 October 2022
* Fix some state service init arguments
* Allow more lookahead in the downloader, but less lookahead in the syncer
* Add the latest config to the tests, and fix the latest config check
* Increase the number of finalized blocks checked for non-finalized block UTXO spends
* fix(log): reduce verbose logs for block commits (#5348)
* Remove some verbose block write channel logs
* Only warn about tracing endpoint if the address is actually set
* Use CloneError instead of formatting a non-cloneable error
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
* Increase block verify timeout
* Work around a known block timeout bug by using a shorter timeout
Co-authored-by: teor <teor@riseup.net>
Co-authored-by: Marek <mail@marek.onl>
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2022-10-11 12:25:45 -07:00
. expect ( " Should have been able to read the file " )
. trim ( )
. to_string ( ) ;
2022-09-07 22:21:43 -07:00
// If any stored config is equal to the generated then we are good.
if stored_content . eq ( & processed_generated_content ) {
return Ok ( ( ) ) ;
}
}
change(state): Write non-finalized blocks to the state in a separate thread, to avoid network and RPC hangs (#5257)
* Add a new block commit task and channels, that don't do anything yet
* Add last_block_hash_sent to the state service, to avoid database accesses
* Update last_block_hash_sent regardless of commit errors
* Rename a field to StateService.max_queued_finalized_height
* Commit finalized blocks to the state in a separate task
* Check for panics in the block write task
* Wait for the block commit task in tests, and check for errors
* Always run a proptest that sleeps once
* Add extra debugging to state shutdowns
* Work around a RocksDB shutdown bug
* Close the finalized block channel when we're finished with it
* Only reset state queue once per error
* Update some TODOs
* Add a module doc comment
* Drop channels and check for closed channels in the block commit task
* Close state channels and tasks on drop
* Remove some duplicate fields across StateService and ReadStateService
* Try tweaking the shutdown steps
* Update and clarify some comments
* Clarify another comment
* Don't try to cancel RocksDB background work on drop
* Fix up some comments
* Remove some duplicate code
* Remove redundant workarounds for shutdown issues
* Remode a redundant channel close in the block commit task
* Remove a mistaken `!force` shutdown condition
* Remove duplicate force-shutdown code and explain it better
* Improve RPC error logging
* Wait for chain tip updates in the RPC tests
* Wait 2 seconds for chain tip updates before skipping them
* Remove an unnecessary block_in_place()
* Fix some test error messages that were changed by earlier fixes
* Expand some comments, fix typos
Co-authored-by: Marek <mail@marek.onl>
* Actually drop children of failed blocks
* Explain why we drop descendants of failed blocks
* Clarify a comment
* Wait for chain tip updates in a failing test on macOS
* Clean duplicate finalized blocks when the non-finalized state activates
* Send an error when receiving a duplicate finalized block
* Update checkpoint block behaviour, document its consensus rule
* Wait for chain tip changes in inbound_block_height_lookahead_limit test
* Wait for the genesis block to commit in the fake peer set mempool tests
* Disable unreliable mempool verification check in the send transaction test
* Appease rustfmt
* Use clear_finalized_block_queue() everywhere that blocks are dropped
* Document how Finalized and NonFinalized clones are different
* sends non-finalized blocks to the block write task
* passes ZebraDb to commit_new_chain, commit_block, and no_duplicates_in_finalized_chain instead of FinalizedState
* Update zebra-state/src/service/write.rs
Co-authored-by: teor <teor@riseup.net>
* updates comments, renames send_process_queued, other minor cleanup
* update assert_block_can_be_validated comment
* removes `mem` field from StateService
* removes `disk` field from StateService and updates block_iter to use `ZebraDb` instead of the finalized state
* updates tests that use the disk to use read_service.db instead
* moves best_tip to a read fn and returns finalized & non-finalized states from setup instead of the state service
* changes `contextual_validity` to get the network from the finalized_state instead of another param
* swaps out StateService with FinalizedState and NonFinalizedState in tests
* adds NotReadyToBeCommitted error and returns it from validate_and_commit when a blocks parent hash is not in any chain
* removes NonFinalizedWriteCmd and calls, moves update_latest_channels above rsp_tx.send
* makes parent_errors_map an indexmap
* clears non-finalized block queue when the receiver is dropped and when the StateService is being dropped
* sends non-finalized blocks to the block write task
* passes ZebraDb to commit_new_chain, commit_block, and no_duplicates_in_finalized_chain instead of FinalizedState
* updates comments, renames send_process_queued, other minor cleanup
* Update zebra-state/src/service/write.rs
Co-authored-by: teor <teor@riseup.net>
* update assert_block_can_be_validated comment
* removes `mem` field from StateService
* removes `disk` field from StateService and updates block_iter to use `ZebraDb` instead of the finalized state
* updates tests that use the disk to use read_service.db instead
* moves best_tip to a read fn and returns finalized & non-finalized states from setup instead of the state service
* changes `contextual_validity` to get the network from the finalized_state instead of another param
* swaps out StateService with FinalizedState and NonFinalizedState in tests
* adds NotReadyToBeCommitted error and returns it from validate_and_commit when a blocks parent hash is not in any chain
* removes NonFinalizedWriteCmd and calls, moves update_latest_channels above rsp_tx.send
* makes parent_errors_map an indexmap
* clears non-finalized block queue when the receiver is dropped and when the StateService is being dropped
* removes duplicate field definitions on StateService that were a result of a bad merge
* update NotReadyToBeCommitted error message
* Appear rustfmt
* Fix doc links
* Rename a function to initial_contextual_validity()
* Do error tasks on Err, and success tasks on Ok
* Simplify parent_error_map truncation
* Rewrite best_tip() to use tip()
* Rename latest_mem() to latest_non_finalized_state()
```sh
fastmod latest_mem latest_non_finalized_state zebra*
cargo fmt --all
```
* Simplify latest_non_finalized_state() using a new WatchReceiver API
* Expand some error messages
* Send the result after updating the channels, and document why
* wait for chain_tip_update before cancelling download in mempool_cancel_mined
* adds `sent_non_finalized_block_hashes` field to StateService
* adds batched sent_hash insertions and checks sent hashes in queue_and_commit_non_finalized before adding a block to the queue
* check that the `curr_buf` in SentHashes is not empty before pushing it to the `sent_bufs`
* Apply suggestions from code review
Co-authored-by: teor <teor@riseup.net>
* Fix rustfmt
* Check for finalized block heights using zs_contains()
* adds known_utxos field to SentHashes
* updates comment on SentHashes.add method
* Apply suggestions from code review
Co-authored-by: teor <teor@riseup.net>
* return early when there's a duplicate hash in QueuedBlocks.queue instead of panicking
* Make finalized UTXOs near the final checkpoint available for full block verification
* Replace a checkpoint height literal with the actual config
* Update mainnet and testnet checkpoints - 7 October 2022
* Fix some state service init arguments
* Allow more lookahead in the downloader, but less lookahead in the syncer
* Add the latest config to the tests, and fix the latest config check
* Increase the number of finalized blocks checked for non-finalized block UTXO spends
* fix(log): reduce verbose logs for block commits (#5348)
* Remove some verbose block write channel logs
* Only warn about tracing endpoint if the address is actually set
* Use CloneError instead of formatting a non-cloneable error
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
* Increase block verify timeout
* Work around a known block timeout bug by using a shorter timeout
Co-authored-by: teor <teor@riseup.net>
Co-authored-by: Marek <mail@marek.onl>
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2022-10-11 12:25:45 -07:00
2022-12-08 17:41:46 -08:00
println! (
" \n \
Here is the missing config file : \ n \
\ n \
{ processed_generated_content } \ n "
) ;
2022-09-07 22:21:43 -07:00
Err ( eyre! (
2022-12-08 17:41:46 -08:00
" latest zebrad config is not being tested for compatibility. \n \
\ n \
Take the missing config file logged above , \ n \
and commit it to Zebra ' s git repository as :\ n \
zebrad / tests / common / configs / { } < next - release - tag > . toml \ n \
\ n \
Or run : \ n \
2022-11-08 20:42:04 -08:00
cargo build { } - - bin zebrad & & \ n \
zebrad generate | \ n \
2023-06-06 01:28:14 -07:00
sed ' s / cache_dir = \ " .* \" /cache_dir = \" cache_dir \" /' > \n \
2022-12-08 17:41:46 -08:00
zebrad / tests / common / configs / { } < next - release - tag > . toml " ,
if cfg! ( feature = " getblocktemplate-rpcs " ) {
GET_BLOCK_TEMPLATE_CONFIG_PREFIX
} else {
" "
} ,
2022-11-08 20:42:04 -08:00
if cfg! ( feature = " getblocktemplate-rpcs " ) {
" --features=getblocktemplate-rpcs "
} else {
" "
} ,
if cfg! ( feature = " getblocktemplate-rpcs " ) {
GET_BLOCK_TEMPLATE_CONFIG_PREFIX
} else {
" "
} ,
2022-09-07 22:21:43 -07:00
) )
}
2022-06-27 17:36:36 -07:00
/// Checks that Zebra prints an informative message when it cannot parse the
/// config file.
2022-12-08 17:41:46 -08:00
#[ tracing::instrument ]
2022-06-27 17:36:36 -07:00
fn invalid_generated_config ( ) -> Result < ( ) > {
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2022-06-27 17:36:36 -07:00
let testdir = & testdir ( ) ? ;
// Add a config file name to tempdir path.
let config_path = testdir . path ( ) . join ( " zebrad.toml " ) ;
2023-10-11 17:25:37 -07:00
tracing ::info! (
? config_path ,
" testing invalid config parsing: generating valid config "
) ;
2022-06-27 17:36:36 -07:00
// Generate a valid config file in the temp dir.
let child = testdir . spawn_child ( args! [ " generate " , " -o " : config_path . to_str ( ) . unwrap ( ) ] ) ? ;
let output = child . wait_with_output ( ) ? ;
let output = output . assert_success ( ) ? ;
assert_with_context! (
config_path . exists ( ) ,
& output ,
" generated config file not found "
) ;
// Load the valid config file that Zebra generated.
let mut config_file = fs ::read_to_string ( config_path . to_str ( ) . unwrap ( ) ) . unwrap ( ) ;
// Let's now alter the config file so that it contains a deprecated format
// of `mempool.eviction_memory_time`.
config_file = config_file
. lines ( )
// Remove the valid `eviction_memory_time` key/value pair from the
// config.
. filter ( | line | ! line . contains ( " eviction_memory_time " ) )
. map ( | line | line . to_owned ( ) + " \n " )
. collect ( ) ;
// Append the `eviction_memory_time` key/value pair in a deprecated format.
config_file + = r "
[ mempool . eviction_memory_time ]
nanos = 0
secs = 3600
" ;
2023-10-11 17:25:37 -07:00
tracing ::info! ( ? config_path , " writing invalid config " ) ;
2022-06-27 17:36:36 -07:00
// Write the altered config file so that Zebra can pick it up.
fs ::write ( config_path . to_str ( ) . unwrap ( ) , config_file . as_bytes ( ) )
. expect ( " Could not write the altered config file. " ) ;
2023-10-11 17:25:37 -07:00
tracing ::info! ( ? config_path , " testing invalid config parsing " ) ;
2022-06-27 17:36:36 -07:00
// Run Zebra in a temp dir so that it loads the config.
let mut child = testdir . spawn_child ( args! [ " start " ] ) ? ;
// Return an error if Zebra is running for more than two seconds.
//
// Since the config is invalid, Zebra should terminate instantly after its
// start. Two seconds should be sufficient for Zebra to read the config file
// and terminate.
std ::thread ::sleep ( Duration ::from_secs ( 2 ) ) ;
if child . is_running ( ) {
2022-08-28 16:52:19 -07:00
// We're going to error anyway, so return an error that makes sense to the developer.
child . kill ( true ) ? ;
return Err ( eyre! (
" Zebra should have exited after reading the invalid config "
) ) ;
2022-06-27 17:36:36 -07:00
}
let output = child . wait_with_output ( ) ? ;
// Check that Zebra produced an informative message.
output . stderr_contains ( " Zebra could not parse the provided config file. This might mean you are using a deprecated format of the file. " ) ? ;
Ok ( ( ) )
}
2022-09-07 22:21:43 -07:00
/// Test all versions of `zebrad.toml` we have stored can be parsed by the latest `zebrad`.
2022-12-08 17:41:46 -08:00
#[ tracing::instrument ]
2023-06-05 16:56:08 -07:00
fn stored_configs_work ( ) -> Result < ( ) > {
2022-09-07 22:21:43 -07:00
let old_configs_dir = configs_dir ( ) ;
2022-06-26 17:07:37 -07:00
2023-10-11 17:25:37 -07:00
tracing ::info! ( ? old_configs_dir , " testing older config parsing " ) ;
2022-09-07 22:21:43 -07:00
for config_file in old_configs_dir
. read_dir ( )
. expect ( " read_dir call failed " )
. flatten ( )
{
2023-06-05 16:56:08 -07:00
let config_file_path = config_file . path ( ) ;
let config_file_name = config_file_path
. file_name ( )
. expect ( " config files must have a file name " )
2023-10-11 17:25:37 -07:00
. to_str ( )
. expect ( " config file names are valid unicode " ) ;
2023-06-05 16:56:08 -07:00
2023-10-11 17:25:37 -07:00
if config_file_name . starts_with ( '.' ) | | config_file_name . starts_with ( '#' ) {
2023-06-05 16:56:08 -07:00
// Skip editor files and other invalid config paths
tracing ::info! (
? config_file_path ,
" skipping hidden/temporary config file path "
) ;
continue ;
}
2022-10-31 17:43:45 -07:00
// ignore files starting with getblocktemplate prefix
// if we were not built with the getblocktemplate-rpcs feature.
#[ cfg(not(feature = " getblocktemplate-rpcs " )) ]
2023-10-11 17:25:37 -07:00
if config_file_name . starts_with ( GET_BLOCK_TEMPLATE_CONFIG_PREFIX ) {
2023-06-05 16:56:08 -07:00
tracing ::info! (
? config_file_path ,
" skipping getblocktemplate-rpcs config file path "
) ;
2022-10-31 17:43:45 -07:00
continue ;
}
2022-09-07 22:21:43 -07:00
let run_dir = testdir ( ) ? ;
let stored_config_path = config_file_full_path ( config_file . path ( ) ) ;
2022-06-26 17:07:37 -07:00
2023-10-11 17:25:37 -07:00
tracing ::info! (
? stored_config_path ,
" testing old config can be parsed by current zebrad "
) ;
2022-09-07 22:21:43 -07:00
// run zebra with stored config
let mut child =
run_dir . spawn_child ( args! [ " -c " , stored_config_path . to_str ( ) . unwrap ( ) , " start " ] ) ? ;
2022-06-26 17:07:37 -07:00
2023-10-11 17:25:37 -07:00
let success_regexes = [
// When logs are sent to the terminal, we see the config loading message and path.
format! (
" loaded zebrad config.*config_path.*=.*{} " ,
regex ::escape ( config_file_name )
) ,
// If they are sent to a file, we see a log file message on stdout,
// and a logo, welcome message, and progress bar on stderr.
" Sending logs to " . to_string ( ) ,
// TODO: add expect_stdout_or_stderr_line_matches() and check for this instead:
//"Thank you for running a mainnet zebrad".to_string(),
] ;
tracing ::info! (
? stored_config_path ,
? success_regexes ,
" waiting for zebrad to parse config and start logging "
) ;
let success_regexes = success_regexes
. iter ( )
. collect_regex_set ( )
. expect ( " regexes are valid " ) ;
// Zebra was able to start with the stored config.
child . expect_stdout_line_matches ( success_regexes ) ? ;
2022-06-26 17:07:37 -07:00
2022-09-07 22:21:43 -07:00
// finish
child . kill ( false ) ? ;
2022-06-26 17:07:37 -07:00
2022-09-07 22:21:43 -07:00
let output = child . wait_with_output ( ) ? ;
let output = output . assert_failure ( ) ? ;
// [Note on port conflict](#Note on port conflict)
output
. assert_was_killed ( )
. wrap_err ( " Possible port conflict. Are there other acceptance tests running? " ) ? ;
}
2022-06-26 17:07:37 -07:00
Ok ( ( ) )
}
2020-10-09 16:50:33 -07:00
/// Test if `zebrad` can sync the first checkpoint on mainnet.
///
2020-10-20 02:45:27 -07:00
/// The first checkpoint contains a single genesis block.
2020-09-11 13:39:39 -07:00
#[ test ]
2020-09-08 18:59:24 -07:00
fn sync_one_checkpoint_mainnet ( ) -> Result < ( ) > {
2020-10-20 02:45:27 -07:00
sync_until (
2021-10-21 05:30:11 -07:00
TINY_CHECKPOINT_TEST_HEIGHT ,
2020-10-20 02:45:27 -07:00
Mainnet ,
2020-10-27 02:25:29 -07:00
STOP_AT_HEIGHT_REGEX ,
2021-10-21 05:30:11 -07:00
TINY_CHECKPOINT_TIMEOUT ,
2020-10-27 02:25:29 -07:00
None ,
2022-02-15 15:46:20 -08:00
MempoolBehavior ::ShouldNotActivate ,
2022-02-22 18:20:52 -08:00
// checkpoint sync is irrelevant here - all tested checkpoints are mandatory
true ,
true ,
2020-10-20 02:45:27 -07:00
)
2020-10-27 02:25:29 -07:00
. map ( | _tempdir | ( ) )
2020-09-08 18:59:24 -07:00
}
2020-10-09 16:50:33 -07:00
/// Test if `zebrad` can sync the first checkpoint on testnet.
///
2020-10-20 02:45:27 -07:00
/// The first checkpoint contains a single genesis block.
2022-06-13 13:13:30 -07:00
// TODO: disabled because testnet is not currently reliable
// #[test]
#[ allow(dead_code) ]
2020-09-08 18:59:24 -07:00
fn sync_one_checkpoint_testnet ( ) -> Result < ( ) > {
2020-10-20 02:45:27 -07:00
sync_until (
2021-10-21 05:30:11 -07:00
TINY_CHECKPOINT_TEST_HEIGHT ,
2020-10-20 02:45:27 -07:00
Testnet ,
2020-10-27 02:25:29 -07:00
STOP_AT_HEIGHT_REGEX ,
2021-10-21 05:30:11 -07:00
TINY_CHECKPOINT_TIMEOUT ,
2020-10-27 02:25:29 -07:00
None ,
2022-02-15 15:46:20 -08:00
MempoolBehavior ::ShouldNotActivate ,
2022-02-22 18:20:52 -08:00
// checkpoint sync is irrelevant here - all tested checkpoints are mandatory
true ,
true ,
2020-10-20 02:45:27 -07:00
)
2020-10-27 02:25:29 -07:00
. map ( | _tempdir | ( ) )
2020-09-08 18:59:24 -07:00
}
2020-10-27 02:25:29 -07:00
/// Test if `zebrad` can sync the first checkpoint, restart, and stop on load.
#[ test ]
fn restart_stop_at_height ( ) -> Result < ( ) > {
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2021-06-15 15:48:09 -07:00
2021-10-21 05:30:11 -07:00
restart_stop_at_height_for_network ( Network ::Mainnet , TINY_CHECKPOINT_TEST_HEIGHT ) ? ;
2022-06-13 13:13:30 -07:00
// TODO: disabled because testnet is not currently reliable
// restart_stop_at_height_for_network(Network::Testnet, TINY_CHECKPOINT_TEST_HEIGHT)?;
2021-06-15 15:48:09 -07:00
Ok ( ( ) )
}
2022-04-27 16:06:11 -07:00
fn restart_stop_at_height_for_network ( network : Network , height : block ::Height ) -> Result < ( ) > {
2020-10-27 02:25:29 -07:00
let reuse_tempdir = sync_until (
2021-06-15 15:48:09 -07:00
height ,
network ,
2020-10-27 02:25:29 -07:00
STOP_AT_HEIGHT_REGEX ,
2021-10-21 05:30:11 -07:00
TINY_CHECKPOINT_TIMEOUT ,
2020-10-27 02:25:29 -07:00
None ,
2022-02-15 15:46:20 -08:00
MempoolBehavior ::ShouldNotActivate ,
2022-02-22 18:20:52 -08:00
// checkpoint sync is irrelevant here - all tested checkpoints are mandatory
true ,
true ,
2020-10-27 02:25:29 -07:00
) ? ;
2021-04-07 02:25:31 -07:00
// if stopping corrupts the rocksdb database, zebrad might hang or crash here
2021-04-07 02:23:48 -07:00
// if stopping does not write the rocksdb database to disk, Zebra will
// sync, rather than stopping immediately at the configured height
2020-10-27 02:25:29 -07:00
sync_until (
2021-06-15 15:48:09 -07:00
height ,
network ,
2020-11-12 17:37:52 -08:00
" state is already at the configured height " ,
2020-10-27 02:25:29 -07:00
STOP_ON_LOAD_TIMEOUT ,
2021-10-13 08:04:49 -07:00
reuse_tempdir ,
2022-02-15 15:46:20 -08:00
MempoolBehavior ::ShouldNotActivate ,
2022-02-22 18:20:52 -08:00
// checkpoint sync is irrelevant here - all tested checkpoints are mandatory
true ,
false ,
2020-10-27 02:25:29 -07:00
) ? ;
Ok ( ( ) )
}
2021-10-13 08:04:49 -07:00
/// Test if `zebrad` can activate the mempool on mainnet.
/// Debug activation happens after committing the genesis block.
#[ test ]
fn activate_mempool_mainnet ( ) -> Result < ( ) > {
sync_until (
2022-04-27 16:06:11 -07:00
block ::Height ( TINY_CHECKPOINT_TEST_HEIGHT . 0 + 1 ) ,
2021-10-13 08:04:49 -07:00
Mainnet ,
STOP_AT_HEIGHT_REGEX ,
2021-10-21 05:30:11 -07:00
TINY_CHECKPOINT_TIMEOUT ,
2021-10-13 08:04:49 -07:00
None ,
2022-02-15 15:46:20 -08:00
MempoolBehavior ::ForceActivationAt ( TINY_CHECKPOINT_TEST_HEIGHT ) ,
2022-02-22 18:20:52 -08:00
// checkpoint sync is irrelevant here - all tested checkpoints are mandatory
true ,
true ,
2021-10-13 08:04:49 -07:00
)
. map ( | _tempdir | ( ) )
}
2020-10-27 02:25:29 -07:00
/// Test if `zebrad` can sync some larger checkpoints on mainnet.
2020-10-20 02:53:28 -07:00
///
2020-10-20 02:55:11 -07:00
/// This test might fail or timeout on slow or unreliable networks,
/// so we don't run it by default. It also takes a lot longer than
/// our 10 second target time for default tests.
2020-10-20 02:53:28 -07:00
#[ test ]
2020-10-20 02:55:11 -07:00
#[ ignore ]
2020-10-27 02:25:29 -07:00
fn sync_large_checkpoints_mainnet ( ) -> Result < ( ) > {
let reuse_tempdir = sync_until (
LARGE_CHECKPOINT_TEST_HEIGHT ,
Mainnet ,
STOP_AT_HEIGHT_REGEX ,
LARGE_CHECKPOINT_TIMEOUT ,
None ,
2022-02-15 15:46:20 -08:00
MempoolBehavior ::ShouldNotActivate ,
2022-02-22 18:20:52 -08:00
// checkpoint sync is irrelevant here - all tested checkpoints are mandatory
true ,
true ,
2020-10-27 02:25:29 -07:00
) ? ;
// if this sync fails, see the failure notes in `restart_stop_at_height`
2020-10-20 02:53:28 -07:00
sync_until (
2020-10-27 02:25:29 -07:00
( LARGE_CHECKPOINT_TEST_HEIGHT - 1 ) . unwrap ( ) ,
2020-10-20 02:53:28 -07:00
Mainnet ,
2020-10-27 02:25:29 -07:00
" previous state height is greater than the stop height " ,
STOP_ON_LOAD_TIMEOUT ,
2021-10-13 08:04:49 -07:00
reuse_tempdir ,
2022-02-15 15:46:20 -08:00
MempoolBehavior ::ShouldNotActivate ,
2022-02-22 18:20:52 -08:00
// checkpoint sync is irrelevant here - all tested checkpoints are mandatory
true ,
false ,
2020-10-27 02:25:29 -07:00
) ? ;
Ok ( ( ) )
2020-10-20 02:53:28 -07:00
}
2021-10-21 05:30:11 -07:00
// TODO: We had `sync_large_checkpoints_testnet` and `sync_large_checkpoints_mempool_testnet`,
// but they were removed because the testnet is unreliable (#1222).
// We should re-add them after we have more testnet instances (#1791).
2020-10-20 02:53:28 -07:00
2021-10-14 19:27:50 -07:00
/// Test if `zebrad` can run side by side with the mempool.
2021-10-21 05:30:11 -07:00
/// This is done by running the mempool and syncing some checkpoints.
2021-10-14 19:27:50 -07:00
#[ test ]
2021-10-21 05:30:11 -07:00
#[ ignore ]
fn sync_large_checkpoints_mempool_mainnet ( ) -> Result < ( ) > {
2021-10-14 19:27:50 -07:00
sync_until (
2021-10-21 05:30:11 -07:00
MEDIUM_CHECKPOINT_TEST_HEIGHT ,
2021-10-14 19:27:50 -07:00
Mainnet ,
STOP_AT_HEIGHT_REGEX ,
LARGE_CHECKPOINT_TIMEOUT ,
None ,
2022-02-15 15:46:20 -08:00
MempoolBehavior ::ForceActivationAt ( TINY_CHECKPOINT_TEST_HEIGHT ) ,
2022-02-22 18:20:52 -08:00
// checkpoint sync is irrelevant here - all tested checkpoints are mandatory
true ,
true ,
2021-10-14 19:27:50 -07:00
)
. map ( | _tempdir | ( ) )
}
2022-08-28 10:08:43 -07:00
#[ tracing::instrument ]
2020-10-29 18:00:02 -07:00
fn create_cached_database ( network : Network ) -> Result < ( ) > {
2021-06-17 17:05:28 -07:00
let height = network . mandatory_checkpoint_height ( ) ;
2023-06-01 05:29:03 -07:00
let checkpoint_stop_regex =
format! ( " {STOP_AT_HEIGHT_REGEX} .*commit checkpoint-verified request " ) ;
2022-03-02 00:53:00 -08:00
2021-12-14 13:43:07 -08:00
create_cached_database_height (
network ,
height ,
2022-02-22 18:20:52 -08:00
// Use checkpoints to increase sync performance while caching the database
true ,
2022-03-02 00:53:00 -08:00
// Check that we're still using checkpoints when we finish the cached sync
& checkpoint_stop_regex ,
2021-12-14 13:43:07 -08:00
)
2020-10-29 18:00:02 -07:00
}
2020-10-28 12:34:43 -07:00
2022-08-28 10:08:43 -07:00
#[ tracing::instrument ]
2021-06-17 17:05:28 -07:00
fn sync_past_mandatory_checkpoint ( network : Network ) -> Result < ( ) > {
let height = network . mandatory_checkpoint_height ( ) + 1200 ;
2022-03-02 00:53:00 -08:00
let full_validation_stop_regex =
2023-06-01 05:29:03 -07:00
format! ( " {STOP_AT_HEIGHT_REGEX} .*commit contextually-verified request " ) ;
2022-03-02 00:53:00 -08:00
2021-07-09 05:47:09 -07:00
create_cached_database_height (
network ,
height . unwrap ( ) ,
2022-02-22 18:20:52 -08:00
// Test full validation by turning checkpoints off
false ,
2022-05-05 02:25:46 -07:00
// Check that we're doing full validation when we finish the cached sync
2022-03-02 00:53:00 -08:00
& full_validation_stop_regex ,
2021-07-09 05:47:09 -07:00
)
2020-10-29 18:00:02 -07:00
}
2020-10-28 12:34:43 -07:00
2022-05-05 02:25:46 -07:00
/// Sync `network` until the chain tip is reached, or a timeout elapses.
///
/// The timeout is specified using an environment variable, with the name configured by the
/// `timeout_argument_name` parameter. The value of the environment variable must the number of
/// minutes specified as an integer.
2022-06-15 01:05:02 -07:00
#[ allow(clippy::print_stderr) ]
2022-08-28 10:08:43 -07:00
#[ tracing::instrument ]
2022-05-05 02:25:46 -07:00
fn full_sync_test ( network : Network , timeout_argument_name : & str ) -> Result < ( ) > {
let timeout_argument : Option < u64 > = env ::var ( timeout_argument_name )
. ok ( )
. and_then ( | timeout_string | timeout_string . parse ( ) . ok ( ) ) ;
// # TODO
//
// Replace hard-coded values in create_cached_database_height with:
// - the timeout in the environmental variable
// - the path from ZEBRA_CACHED_STATE_DIR
if let Some ( _timeout_minutes ) = timeout_argument {
create_cached_database_height (
network ,
// Just keep going until we reach the chain tip
block ::Height ::MAX ,
// Use the checkpoints to sync quickly, then do full validation until the chain tip
true ,
// Finish when we reach the chain tip
2022-06-30 08:14:30 -07:00
SYNC_FINISHED_REGEX ,
2022-05-05 02:25:46 -07:00
)
} else {
2022-06-15 01:05:02 -07:00
eprintln! (
2022-12-07 17:05:57 -08:00
" Skipped full sync test for {network}, \
set the { timeout_argument_name :? } environmental variable to run the test " ,
2022-05-05 02:25:46 -07:00
) ;
Ok ( ( ) )
}
}
2020-10-30 12:45:26 -07:00
// These tests are ignored because they're too long running to run during our
// traditional CI, and they depend on persistent state that cannot be made
// available in github actions or google cloud build. Instead we run these tests
// directly in a vm we spin up on google compute engine, where we can mount
2022-05-05 02:25:46 -07:00
// drives populated by the sync_to_mandatory_checkpoint tests, snapshot those drives,
// and then use them to more quickly run the sync_past_mandatory_checkpoint tests.
2020-10-30 12:45:26 -07:00
2021-06-17 17:05:28 -07:00
/// Sync up to the mandatory checkpoint height on mainnet and stop.
2021-06-03 15:48:40 -07:00
#[ allow(dead_code) ]
2021-06-17 17:05:28 -07:00
#[ cfg_attr(feature = " test_sync_to_mandatory_checkpoint_mainnet " , test) ]
2022-05-05 02:25:46 -07:00
fn sync_to_mandatory_checkpoint_mainnet ( ) -> Result < ( ) > {
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2020-10-29 18:00:02 -07:00
let network = Mainnet ;
2022-05-05 02:25:46 -07:00
create_cached_database ( network )
2020-10-29 18:00:02 -07:00
}
2020-12-19 15:06:18 -08:00
2021-06-17 17:05:28 -07:00
/// Sync to the mandatory checkpoint height testnet and stop.
2021-06-03 15:48:40 -07:00
#[ allow(dead_code) ]
2021-06-17 17:05:28 -07:00
#[ cfg_attr(feature = " test_sync_to_mandatory_checkpoint_testnet " , test) ]
2022-05-05 02:25:46 -07:00
fn sync_to_mandatory_checkpoint_testnet ( ) -> Result < ( ) > {
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2020-10-29 18:00:02 -07:00
let network = Testnet ;
2022-05-05 02:25:46 -07:00
create_cached_database ( network )
2020-10-29 18:00:02 -07:00
}
2020-10-28 12:34:43 -07:00
2021-06-17 17:05:28 -07:00
/// Test syncing 1200 blocks (3 checkpoints) past the mandatory checkpoint on mainnet.
2020-11-22 21:12:12 -08:00
///
2021-06-17 17:05:28 -07:00
/// This assumes that the config'd state is already synced at or near the mandatory checkpoint
/// activation on mainnet. If the state has already synced past the mandatory checkpoint
2020-11-22 21:12:12 -08:00
/// activation by 1200 blocks, it will fail.
2021-06-03 15:48:40 -07:00
#[ allow(dead_code) ]
2021-06-17 17:05:28 -07:00
#[ cfg_attr(feature = " test_sync_past_mandatory_checkpoint_mainnet " , test) ]
2022-05-05 02:25:46 -07:00
fn sync_past_mandatory_checkpoint_mainnet ( ) -> Result < ( ) > {
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2020-10-29 18:00:02 -07:00
let network = Mainnet ;
2022-05-05 02:25:46 -07:00
sync_past_mandatory_checkpoint ( network )
2020-10-29 18:00:02 -07:00
}
2021-06-17 17:05:28 -07:00
/// Test syncing 1200 blocks (3 checkpoints) past the mandatory checkpoint on testnet.
2020-11-22 21:12:12 -08:00
///
2021-06-17 17:05:28 -07:00
/// This assumes that the config'd state is already synced at or near the mandatory checkpoint
/// activation on testnet. If the state has already synced past the mandatory checkpoint
2020-11-22 21:12:12 -08:00
/// activation by 1200 blocks, it will fail.
2021-06-03 15:48:40 -07:00
#[ allow(dead_code) ]
2021-06-17 17:05:28 -07:00
#[ cfg_attr(feature = " test_sync_past_mandatory_checkpoint_testnet " , test) ]
2022-05-05 02:25:46 -07:00
fn sync_past_mandatory_checkpoint_testnet ( ) -> Result < ( ) > {
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2020-10-29 18:00:02 -07:00
let network = Testnet ;
2022-05-05 02:25:46 -07:00
sync_past_mandatory_checkpoint ( network )
}
/// Test if `zebrad` can fully sync the chain on mainnet.
///
/// This test takes a long time to run, so we don't run it by default. This test is only executed
/// if there is an environment variable named `FULL_SYNC_MAINNET_TIMEOUT_MINUTES` set with the number
/// of minutes to wait for synchronization to complete before considering that the test failed.
#[ test ]
#[ ignore ]
fn full_sync_mainnet ( ) -> Result < ( ) > {
// TODO: add "ZEBRA" at the start of this env var, to avoid clashes
full_sync_test ( Mainnet , " FULL_SYNC_MAINNET_TIMEOUT_MINUTES " )
}
/// Test if `zebrad` can fully sync the chain on testnet.
///
/// This test takes a long time to run, so we don't run it by default. This test is only executed
/// if there is an environment variable named `FULL_SYNC_TESTNET_TIMEOUT_MINUTES` set with the number
/// of minutes to wait for synchronization to complete before considering that the test failed.
#[ test ]
#[ ignore ]
fn full_sync_testnet ( ) -> Result < ( ) > {
// TODO: add "ZEBRA" at the start of this env var, to avoid clashes
full_sync_test ( Testnet , " FULL_SYNC_TESTNET_TIMEOUT_MINUTES " )
2020-10-28 12:34:43 -07:00
}
2022-06-16 12:56:40 -07:00
#[ cfg(feature = " prometheus " ) ]
2020-09-07 17:05:23 -07:00
#[ tokio::test ]
async fn metrics_endpoint ( ) -> Result < ( ) > {
2020-12-14 23:33:37 -08:00
use hyper ::Client ;
2020-09-07 17:05:23 -07:00
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2020-09-07 17:05:23 -07:00
// [Note on port conflict](#Note on port conflict)
2020-12-17 17:32:33 -08:00
let port = random_known_port ( ) ;
2022-10-27 06:25:18 -07:00
let endpoint = format! ( " 127.0.0.1: {port} " ) ;
let url = format! ( " http:// {endpoint} " ) ;
2020-09-07 17:05:23 -07:00
// Write a configuration that has metrics endpoint_addr set
2023-10-17 21:15:17 -07:00
let mut config = default_test_config ( Mainnet ) ? ;
2020-09-07 17:05:23 -07:00
config . metrics . endpoint_addr = Some ( endpoint . parse ( ) . unwrap ( ) ) ;
2022-01-14 07:11:23 -08:00
let dir = testdir ( ) ? . with_config ( & mut config ) ? ;
2022-04-19 03:28:52 -07:00
let child = dir . spawn_child ( args! [ " start " ] ) ? ;
2020-09-07 17:05:23 -07:00
2020-12-15 01:02:30 -08:00
// Run `zebrad` for a few seconds before testing the endpoint
// Since we're an async function, we have to use a sleep future, not thread sleep.
tokio ::time ::sleep ( LAUNCH_DELAY ) . await ;
2020-09-07 17:05:23 -07:00
// Create an http client
let client = Client ::new ( ) ;
// Test metrics endpoint
2021-02-18 12:46:27 -08:00
let res = client . get ( url . try_into ( ) . expect ( " url is valid " ) ) . await ;
let ( res , child ) = child . kill_on_error ( res ) ? ;
2020-09-07 17:05:23 -07:00
assert! ( res . status ( ) . is_success ( ) ) ;
2021-02-18 12:46:27 -08:00
let body = hyper ::body ::to_bytes ( res ) . await ;
let ( body , mut child ) = child . kill_on_error ( body ) ? ;
2022-08-28 16:52:19 -07:00
child . kill ( false ) ? ;
2020-09-07 17:05:23 -07:00
let output = child . wait_with_output ( ) ? ;
let output = output . assert_failure ( ) ? ;
2021-06-03 19:28:43 -07:00
output . any_output_line_contains (
2021-11-02 11:46:57 -07:00
" # TYPE zebrad_build_info counter " ,
2021-06-03 19:28:43 -07:00
& body ,
" metrics exporter response " ,
" the metrics response header " ,
) ? ;
std ::str ::from_utf8 ( & body ) . expect ( " unexpected invalid UTF-8 in metrics exporter response " ) ;
2020-09-07 17:05:23 -07:00
// Make sure metrics was started
2022-10-27 06:25:18 -07:00
output . stdout_line_contains ( format! ( " Opened metrics endpoint at {endpoint} " ) . as_str ( ) ) ? ;
2020-09-07 17:05:23 -07:00
// [Note on port conflict](#Note on port conflict)
output
. assert_was_killed ( )
. wrap_err ( " Possible port conflict. Are there other acceptance tests running? " ) ? ;
Ok ( ( ) )
}
2022-06-16 12:56:40 -07:00
#[ cfg(feature = " filter-reload " ) ]
2020-09-07 17:05:23 -07:00
#[ tokio::test ]
async fn tracing_endpoint ( ) -> Result < ( ) > {
2020-12-14 23:33:37 -08:00
use hyper ::{ Body , Client , Request } ;
2020-09-07 17:05:23 -07:00
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2020-09-07 17:05:23 -07:00
// [Note on port conflict](#Note on port conflict)
2020-12-17 17:32:33 -08:00
let port = random_known_port ( ) ;
2022-10-27 06:25:18 -07:00
let endpoint = format! ( " 127.0.0.1: {port} " ) ;
let url_default = format! ( " http:// {endpoint} " ) ;
let url_filter = format! ( " {url_default} /filter " ) ;
2020-09-07 17:05:23 -07:00
// Write a configuration that has tracing endpoint_addr option set
2023-10-17 21:15:17 -07:00
let mut config = default_test_config ( Mainnet ) ? ;
2020-09-07 17:05:23 -07:00
config . tracing . endpoint_addr = Some ( endpoint . parse ( ) . unwrap ( ) ) ;
2022-01-14 07:11:23 -08:00
let dir = testdir ( ) ? . with_config ( & mut config ) ? ;
2022-04-19 03:28:52 -07:00
let child = dir . spawn_child ( args! [ " start " ] ) ? ;
2020-09-07 17:05:23 -07:00
2020-12-15 01:02:30 -08:00
// Run `zebrad` for a few seconds before testing the endpoint
// Since we're an async function, we have to use a sleep future, not thread sleep.
tokio ::time ::sleep ( LAUNCH_DELAY ) . await ;
2020-09-07 17:05:23 -07:00
// Create an http client
let client = Client ::new ( ) ;
// Test tracing endpoint
2020-12-14 23:33:37 -08:00
let res = client
. get ( url_default . try_into ( ) . expect ( " url_default is valid " ) )
2021-02-18 12:46:27 -08:00
. await ;
let ( res , child ) = child . kill_on_error ( res ) ? ;
2020-09-07 17:05:23 -07:00
assert! ( res . status ( ) . is_success ( ) ) ;
2021-02-18 12:46:27 -08:00
let body = hyper ::body ::to_bytes ( res ) . await ;
let ( body , child ) = child . kill_on_error ( body ) ? ;
2020-09-07 17:05:23 -07:00
// Set a filter and make sure it was changed
2020-12-14 23:33:37 -08:00
let request = Request ::post ( url_filter . clone ( ) )
2020-09-07 17:05:23 -07:00
. body ( Body ::from ( " zebrad=debug " ) )
. unwrap ( ) ;
2021-02-18 12:46:27 -08:00
let post = client . request ( request ) . await ;
let ( _post , child ) = child . kill_on_error ( post ) ? ;
2020-09-07 17:05:23 -07:00
2020-12-14 23:33:37 -08:00
let tracing_res = client
. get ( url_filter . try_into ( ) . expect ( " url_filter is valid " ) )
2021-02-18 12:46:27 -08:00
. await ;
let ( tracing_res , child ) = child . kill_on_error ( tracing_res ) ? ;
2020-09-07 17:05:23 -07:00
assert! ( tracing_res . status ( ) . is_success ( ) ) ;
2021-02-18 12:46:27 -08:00
let tracing_body = hyper ::body ::to_bytes ( tracing_res ) . await ;
let ( tracing_body , mut child ) = child . kill_on_error ( tracing_body ) ? ;
2020-09-07 17:05:23 -07:00
2022-08-28 16:52:19 -07:00
child . kill ( false ) ? ;
2020-09-07 17:05:23 -07:00
let output = child . wait_with_output ( ) ? ;
let output = output . assert_failure ( ) ? ;
// Make sure tracing endpoint was started
2022-10-27 06:25:18 -07:00
output . stdout_line_contains ( format! ( " Opened tracing endpoint at {endpoint} " ) . as_str ( ) ) ? ;
2021-06-03 15:48:40 -07:00
// TODO: Match some trace level messages from output
2020-09-07 17:05:23 -07:00
2021-06-03 19:28:43 -07:00
// Make sure the endpoint header is correct
// The header is split over two lines. But we don't want to require line
// breaks at a specific word, so we run two checks for different substrings.
output . any_output_line_contains (
" HTTP endpoint allows dynamic control of the filter " ,
& body ,
" tracing filter endpoint response " ,
" the tracing response header " ,
) ? ;
output . any_output_line_contains (
" tracing events " ,
& body ,
" tracing filter endpoint response " ,
" the tracing response header " ,
) ? ;
std ::str ::from_utf8 ( & body ) . expect ( " unexpected invalid UTF-8 in tracing filter response " ) ;
// Make sure endpoint requests change the filter
output . any_output_line_contains (
" zebrad=debug " ,
& tracing_body ,
" tracing filter endpoint response " ,
" the modified tracing filter " ,
) ? ;
std ::str ::from_utf8 ( & tracing_body )
. expect ( " unexpected invalid UTF-8 in modified tracing filter response " ) ;
2020-09-07 17:05:23 -07:00
// [Note on port conflict](#Note on port conflict)
output
. assert_was_killed ( )
. wrap_err ( " Possible port conflict. Are there other acceptance tests running? " ) ? ;
Ok ( ( ) )
}
2021-01-29 04:36:33 -08:00
2022-09-03 22:03:15 -07:00
/// Test that the JSON-RPC endpoint responds to a request,
/// when configured with a single thread.
2022-03-02 16:39:47 -08:00
#[ tokio::test ]
2022-09-03 22:03:15 -07:00
async fn rpc_endpoint_single_thread ( ) -> Result < ( ) > {
rpc_endpoint ( false ) . await
}
/// Test that the JSON-RPC endpoint responds to a request,
/// when configured with multiple threads.
#[ tokio::test ]
async fn rpc_endpoint_parallel_threads ( ) -> Result < ( ) > {
rpc_endpoint ( true ) . await
}
/// Test that the JSON-RPC endpoint responds to a request.
///
/// Set `parallel_cpu_threads` to true to auto-configure based on the number of CPU cores.
#[ tracing::instrument ]
async fn rpc_endpoint ( parallel_cpu_threads : bool ) -> Result < ( ) > {
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2022-03-02 16:39:47 -08:00
if zebra_test ::net ::zebra_skip_network_tests ( ) {
return Ok ( ( ) ) ;
}
// Write a configuration that has RPC listen_addr set
2022-03-18 09:02:22 -07:00
// [Note on port conflict](#Note on port conflict)
2023-10-17 21:15:17 -07:00
let mut config = random_known_rpc_port_config ( parallel_cpu_threads , Mainnet ) ? ;
2022-03-02 16:39:47 -08:00
let dir = testdir ( ) ? . with_config ( & mut config ) ? ;
2022-04-19 03:28:52 -07:00
let mut child = dir . spawn_child ( args! [ " start " ] ) ? ;
2022-03-02 16:39:47 -08:00
// Wait until port is open.
2022-03-18 09:02:22 -07:00
child . expect_stdout_line_matches (
format! ( " Opened RPC endpoint at {} " , config . rpc . listen_addr . unwrap ( ) ) . as_str ( ) ,
) ? ;
2022-03-02 16:39:47 -08:00
// Create an http client
2023-04-26 16:35:53 -07:00
let client = RpcRequestClient ::new ( config . rpc . listen_addr . unwrap ( ) ) ;
2022-03-02 16:39:47 -08:00
2022-11-16 13:23:29 -08:00
// Make the call to the `getinfo` RPC method
let res = client . call ( " getinfo " , " [] " . to_string ( ) ) . await ? ;
2022-03-02 16:39:47 -08:00
// Test rpc endpoint response
assert! ( res . status ( ) . is_success ( ) ) ;
2022-11-16 13:23:29 -08:00
let body = res . bytes ( ) . await ;
2022-03-02 16:39:47 -08:00
let ( body , mut child ) = child . kill_on_error ( body ) ? ;
let parsed : Value = serde_json ::from_slice ( & body ) ? ;
// Check that we have at least 4 characters in the `build` field.
let build = parsed [ " result " ] [ " build " ] . as_str ( ) . unwrap ( ) ;
2022-10-27 06:25:18 -07:00
assert! ( build . len ( ) > 4 , " Got {build} " ) ;
2022-03-02 16:39:47 -08:00
// Check that the `subversion` field has "Zebra" in it.
let subversion = parsed [ " result " ] [ " subversion " ] . as_str ( ) . unwrap ( ) ;
2022-10-27 06:25:18 -07:00
assert! ( subversion . contains ( " Zebra " ) , " Got {subversion} " ) ;
2022-03-02 16:39:47 -08:00
2022-08-28 16:52:19 -07:00
child . kill ( false ) ? ;
2022-03-02 16:39:47 -08:00
let output = child . wait_with_output ( ) ? ;
let output = output . assert_failure ( ) ? ;
// [Note on port conflict](#Note on port conflict)
output
. assert_was_killed ( )
. wrap_err ( " Possible port conflict. Are there other acceptance tests running? " ) ? ;
Ok ( ( ) )
}
2022-02-23 03:52:30 -08:00
2023-06-14 12:02:55 -07:00
/// Test that the JSON-RPC endpoint responds to requests with different content types.
///
/// This test ensures that the curl examples of zcashd rpc methods will also work in Zebra.
///
/// https://zcash.github.io/rpc/getblockchaininfo.html
#[ tokio::test ]
async fn rpc_endpoint_client_content_type ( ) -> Result < ( ) > {
let _init_guard = zebra_test ::init ( ) ;
if zebra_test ::net ::zebra_skip_network_tests ( ) {
return Ok ( ( ) ) ;
}
// Write a configuration that has RPC listen_addr set
// [Note on port conflict](#Note on port conflict)
2023-10-17 21:15:17 -07:00
let mut config = random_known_rpc_port_config ( true , Mainnet ) ? ;
2023-06-14 12:02:55 -07:00
let dir = testdir ( ) ? . with_config ( & mut config ) ? ;
let mut child = dir . spawn_child ( args! [ " start " ] ) ? ;
// Wait until port is open.
child . expect_stdout_line_matches (
format! ( " Opened RPC endpoint at {} " , config . rpc . listen_addr . unwrap ( ) ) . as_str ( ) ,
) ? ;
// Create an http client
let client = RpcRequestClient ::new ( config . rpc . listen_addr . unwrap ( ) ) ;
// Call to `getinfo` RPC method with a no content type.
let res = client
. call_with_no_content_type ( " getinfo " , " [] " . to_string ( ) )
. await ? ;
// Zebra will insert valid `application/json` content type and succeed.
assert! ( res . status ( ) . is_success ( ) ) ;
// Call to `getinfo` RPC method with a `text/plain`.
let res = client
. call_with_content_type ( " getinfo " , " [] " . to_string ( ) , " text/plain " . to_string ( ) )
. await ? ;
// Zebra will replace to the valid `application/json` content type and succeed.
assert! ( res . status ( ) . is_success ( ) ) ;
// Call to `getinfo` RPC method with a `text/plain` content type as the zcashd rpc docs.
let res = client
. call_with_content_type ( " getinfo " , " [] " . to_string ( ) , " text/plain; " . to_string ( ) )
. await ? ;
// Zebra will replace to the valid `application/json` content type and succeed.
assert! ( res . status ( ) . is_success ( ) ) ;
// Call to `getinfo` RPC method with a `text/plain; other string` content type.
let res = client
. call_with_content_type (
" getinfo " ,
" [] " . to_string ( ) ,
" text/plain; other string " . to_string ( ) ,
)
. await ? ;
// Zebra will replace to the valid `application/json` content type and succeed.
assert! ( res . status ( ) . is_success ( ) ) ;
// Call to `getinfo` RPC method with a valid `application/json` content type.
let res = client
. call_with_content_type ( " getinfo " , " [] " . to_string ( ) , " application/json " . to_string ( ) )
. await ? ;
// Zebra will not replace valid content type and succeed.
assert! ( res . status ( ) . is_success ( ) ) ;
// Call to `getinfo` RPC method with invalid string as content type.
let res = client
. call_with_content_type ( " getinfo " , " [] " . to_string ( ) , " whatever " . to_string ( ) )
. await ? ;
// Zebra will not replace unrecognized content type and fail.
assert! ( res . status ( ) . is_client_error ( ) ) ;
Ok ( ( ) )
}
2022-11-01 19:42:49 -07:00
/// Test that Zebra's non-blocking logger works, by creating lots of debug output, but not reading the logs.
/// Then make sure Zebra drops excess log lines. (Previously, it would block waiting for logs to be read.)
///
/// This test is unreliable and sometimes hangs on macOS.
2022-09-07 00:39:30 -07:00
#[ test ]
2022-11-01 19:42:49 -07:00
#[ cfg(not(target_os = " macos " )) ]
2022-09-07 00:39:30 -07:00
fn non_blocking_logger ( ) -> Result < ( ) > {
use futures ::FutureExt ;
use std ::{ sync ::mpsc , time ::Duration } ;
let rt = tokio ::runtime ::Runtime ::new ( ) . unwrap ( ) ;
let ( done_tx , done_rx ) = mpsc ::channel ( ) ;
let test_task_handle : tokio ::task ::JoinHandle < Result < ( ) > > = rt . spawn ( async move {
let _init_guard = zebra_test ::init ( ) ;
// Write a configuration that has RPC listen_addr set
// [Note on port conflict](#Note on port conflict)
2023-10-17 21:15:17 -07:00
let mut config = random_known_rpc_port_config ( false , Mainnet ) ? ;
2022-09-07 00:39:30 -07:00
config . tracing . filter = Some ( " trace " . to_string ( ) ) ;
config . tracing . buffer_limit = 100 ;
let zebra_rpc_address = config . rpc . listen_addr . unwrap ( ) ;
let dir = testdir ( ) ? . with_config ( & mut config ) ? ;
2022-10-05 14:45:18 -07:00
let mut child = dir
. spawn_child ( args! [ " start " ] ) ?
. with_timeout ( TINY_CHECKPOINT_TIMEOUT ) ;
2022-09-07 00:39:30 -07:00
// Wait until port is open.
child . expect_stdout_line_matches (
format! ( " Opened RPC endpoint at {} " , config . rpc . listen_addr . unwrap ( ) ) . as_str ( ) ,
) ? ;
// Create an http client
2023-04-26 16:35:53 -07:00
let client = RpcRequestClient ::new ( zebra_rpc_address ) ;
2022-09-07 00:39:30 -07:00
// Most of Zebra's lines are 100-200 characters long, so 500 requests should print enough to fill the unix pipe,
// fill the channel that tracing logs are queued onto, and drop logs rather than block execution.
for _ in 0 .. 500 {
2022-11-16 13:23:29 -08:00
let res = client . call ( " getinfo " , " [] " . to_string ( ) ) . await ? ;
2022-09-07 00:39:30 -07:00
// Test that zebrad rpc endpoint is still responding to requests
assert! ( res . status ( ) . is_success ( ) ) ;
}
child . kill ( false ) ? ;
let output = child . wait_with_output ( ) ? ;
let output = output . assert_failure ( ) ? ;
// [Note on port conflict](#Note on port conflict)
output
. assert_was_killed ( )
. wrap_err ( " Possible port conflict. Are there other acceptance tests running? " ) ? ;
done_tx . send ( ( ) ) ? ;
Ok ( ( ) )
} ) ;
// Wait until the spawned task finishes or return an error in 45 seconds
if done_rx . recv_timeout ( Duration ::from_secs ( 45 ) ) . is_err ( ) {
return Err ( eyre! ( " unexpected test task hang " ) ) ;
}
rt . shutdown_timeout ( Duration ::from_secs ( 3 ) ) ;
match test_task_handle . now_or_never ( ) {
Some ( Ok ( result ) ) = > result ,
Some ( Err ( error ) ) = > Err ( eyre! ( " join error: {:?} " , error ) ) ,
None = > Err ( eyre! ( " unexpected test task hang " ) ) ,
}
}
2022-04-29 12:56:11 -07:00
/// Make sure `lightwalletd` works with Zebra, when both their states are empty.
2022-03-25 05:25:31 -07:00
///
2022-04-29 12:56:11 -07:00
/// This test only runs when the `ZEBRA_TEST_LIGHTWALLETD` env var is set.
2022-03-25 05:25:31 -07:00
///
2022-04-29 12:56:11 -07:00
/// This test doesn't work on Windows, so it is always skipped on that platform.
#[ test ]
#[ cfg(not(target_os = " windows " )) ]
fn lightwalletd_integration ( ) -> Result < ( ) > {
2023-01-16 23:09:07 -08:00
lightwalletd_integration_test ( LaunchWithEmptyState {
launches_lightwalletd : true ,
} )
2022-04-29 12:56:11 -07:00
}
2022-03-22 18:34:37 -07:00
2022-07-25 15:13:25 -07:00
/// Make sure `zebrad` can sync from peers, but don't actually launch `lightwalletd`.
2022-03-25 05:25:31 -07:00
///
2022-07-25 15:13:25 -07:00
/// This test only runs when the `ZEBRA_CACHED_STATE_DIR` env var is set.
///
/// This test might work on Windows.
#[ test ]
fn zebrad_update_sync ( ) -> Result < ( ) > {
lightwalletd_integration_test ( UpdateZebraCachedStateNoRpc )
}
/// Make sure `lightwalletd` can sync from Zebra, in update sync mode.
2022-03-25 05:25:31 -07:00
///
2022-10-05 21:12:27 -07:00
/// This test only runs when:
/// - the `ZEBRA_TEST_LIGHTWALLETD`, `ZEBRA_CACHED_STATE_DIR`, and
/// `LIGHTWALLETD_DATA_DIR` env vars are set, and
/// - Zebra is compiled with `--features=lightwalletd-grpc-tests`.
2022-04-29 12:56:11 -07:00
///
/// This test doesn't work on Windows, so it is always skipped on that platform.
#[ test ]
#[ cfg(not(target_os = " windows " )) ]
2022-10-05 21:12:27 -07:00
#[ cfg(feature = " lightwalletd-grpc-tests " ) ]
2022-04-29 12:56:11 -07:00
fn lightwalletd_update_sync ( ) -> Result < ( ) > {
lightwalletd_integration_test ( UpdateCachedState )
}
2022-03-25 05:25:31 -07:00
2022-04-29 12:56:11 -07:00
/// Make sure `lightwalletd` can fully sync from genesis using Zebra.
2022-02-23 03:52:30 -08:00
///
2022-10-05 21:12:27 -07:00
/// This test only runs when:
/// - the `ZEBRA_TEST_LIGHTWALLETD` and `ZEBRA_CACHED_STATE_DIR` env vars are set, and
/// - Zebra is compiled with `--features=lightwalletd-grpc-tests`.
///
2022-02-23 20:28:09 -08:00
///
/// This test doesn't work on Windows, so it is always skipped on that platform.
2022-02-23 03:52:30 -08:00
#[ test ]
2022-04-29 12:56:11 -07:00
#[ ignore ]
2022-02-23 03:52:30 -08:00
#[ cfg(not(target_os = " windows " )) ]
2022-10-05 21:12:27 -07:00
#[ cfg(feature = " lightwalletd-grpc-tests " ) ]
2022-04-29 12:56:11 -07:00
fn lightwalletd_full_sync ( ) -> Result < ( ) > {
lightwalletd_integration_test ( FullSyncFromGenesis {
allow_lightwalletd_cached_state : false ,
} )
}
/// Make sure `lightwalletd` can sync from Zebra, in all available modes.
///
/// Runs the tests in this order:
/// - launch lightwalletd with empty states,
2022-06-01 04:36:59 -07:00
/// - if `ZEBRA_CACHED_STATE_DIR` is set:
/// - run a full sync
/// - if `ZEBRA_CACHED_STATE_DIR` and `LIGHTWALLETD_DATA_DIR` are set:
/// - run a quick update sync,
/// - run a send transaction gRPC test,
/// - run read-only gRPC tests.
///
2022-10-05 21:12:27 -07:00
/// The lightwalletd full, update, and gRPC tests only run with `--features=lightwalletd-grpc-tests`.
2022-04-29 12:56:11 -07:00
///
/// These tests don't work on Windows, so they are always skipped on that platform.
2022-06-01 04:36:59 -07:00
#[ tokio::test ]
2022-04-29 12:56:11 -07:00
#[ ignore ]
#[ cfg(not(target_os = " windows " )) ]
2022-06-01 04:36:59 -07:00
async fn lightwalletd_test_suite ( ) -> Result < ( ) > {
2023-01-16 23:09:07 -08:00
lightwalletd_integration_test ( LaunchWithEmptyState {
launches_lightwalletd : true ,
} ) ? ;
2022-04-29 12:56:11 -07:00
2022-07-25 15:13:25 -07:00
// Only runs when ZEBRA_CACHED_STATE_DIR is set.
lightwalletd_integration_test ( UpdateZebraCachedStateNoRpc ) ? ;
2022-10-05 21:12:27 -07:00
// These tests need the compile-time gRPC feature
2022-06-01 04:36:59 -07:00
#[ cfg(feature = " lightwalletd-grpc-tests " ) ]
{
2022-10-05 21:12:27 -07:00
// Do the quick tests first
// Only runs when LIGHTWALLETD_DATA_DIR and ZEBRA_CACHED_STATE_DIR are set
lightwalletd_integration_test ( UpdateCachedState ) ? ;
// Only runs when LIGHTWALLETD_DATA_DIR and ZEBRA_CACHED_STATE_DIR are set
2022-06-01 04:36:59 -07:00
common ::lightwalletd ::wallet_grpc_test ::run ( ) . await ? ;
2022-10-05 21:12:27 -07:00
// Then do the slow tests
// Only runs when ZEBRA_CACHED_STATE_DIR is set.
// When manually running the test suite, allow cached state in the full sync test.
lightwalletd_integration_test ( FullSyncFromGenesis {
allow_lightwalletd_cached_state : true ,
} ) ? ;
// Only runs when LIGHTWALLETD_DATA_DIR and ZEBRA_CACHED_STATE_DIR are set
common ::lightwalletd ::send_transaction_test ::run ( ) . await ? ;
2022-06-01 04:36:59 -07:00
}
2022-04-29 12:56:11 -07:00
Ok ( ( ) )
}
/// Run a lightwalletd integration test with a configuration for `test_type`.
///
2022-10-05 21:12:27 -07:00
/// Tests that sync `lightwalletd` to the chain tip require the `lightwalletd-grpc-tests` feature`:
/// - [`FullSyncFromGenesis`]
/// - [`UpdateCachedState`]
///
2022-07-25 15:13:25 -07:00
/// Set `FullSyncFromGenesis { allow_lightwalletd_cached_state: true }` to speed up manual full sync tests.
2022-04-29 12:56:11 -07:00
///
2022-10-05 21:12:27 -07:00
/// # Relibility
///
2022-04-29 12:56:11 -07:00
/// The random ports in this test can cause [rare port conflicts.](#Note on port conflict)
2022-10-05 21:12:27 -07:00
///
/// # Panics
///
/// If the `test_type` requires `--features=lightwalletd-grpc-tests`,
/// but Zebra was not compiled with that feature.
2022-08-28 10:08:43 -07:00
#[ tracing::instrument ]
2022-11-09 19:40:21 -08:00
fn lightwalletd_integration_test ( test_type : TestType ) -> Result < ( ) > {
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2022-02-23 03:52:30 -08:00
2022-10-05 21:12:27 -07:00
// We run these sync tests with a network connection, for better test coverage.
let use_internet_connection = true ;
let network = Mainnet ;
let test_name = " lightwalletd_integration_test " ;
2022-07-31 23:21:08 -07:00
2022-10-05 21:12:27 -07:00
if test_type . launches_lightwalletd ( ) & & ! can_spawn_lightwalletd_for_rpc ( test_name , test_type ) {
tracing ::info! ( " skipping test due to missing lightwalletd network or cached state " ) ;
2022-02-23 20:28:09 -08:00
return Ok ( ( ) ) ;
}
2022-10-05 21:12:27 -07:00
// Launch zebra with peers and using a predefined zebrad state path.
let ( mut zebrad , zebra_rpc_address ) = if let Some ( zebrad_and_address ) =
spawn_zebrad_for_rpc ( network , test_name , test_type , use_internet_connection ) ?
2022-05-04 18:08:27 -07:00
{
2022-10-05 21:12:27 -07:00
tracing ::info! (
? test_type ,
" running lightwalletd & zebrad integration test, launching zebrad... " ,
) ;
zebrad_and_address
2022-04-29 12:56:11 -07:00
} else {
2022-10-05 21:12:27 -07:00
// Skip the test, we don't have the required cached state
2022-04-29 12:56:11 -07:00
return Ok ( ( ) ) ;
} ;
2023-09-19 07:49:36 -07:00
// Store the state version message so we can wait for the upgrade later if needed.
let state_version_message = wait_for_state_version_message ( & mut zebrad ) ? ;
2022-04-29 12:56:11 -07:00
if test_type . needs_zebra_cached_state ( ) {
2022-05-31 20:53:51 -07:00
zebrad
. expect_stdout_line_matches ( r "loaded Zebra state cache .*tip.*=.*Height\([0-9]{7}\)" ) ? ;
2022-04-29 12:56:11 -07:00
} else {
// Timeout the test if we're somehow accidentally using a cached state
2022-05-31 20:53:51 -07:00
zebrad . expect_stdout_line_matches ( " loaded Zebra state cache .*tip.*=.*None " ) ? ;
2022-04-29 12:56:11 -07:00
}
2022-02-23 03:52:30 -08:00
2023-10-19 07:50:46 -07:00
// Wait for the state to upgrade and the RPC port, if the upgrade is short.
//
// If incompletely upgraded states get written to the CI cache,
// change DATABASE_FORMAT_UPGRADE_IS_LONG to true.
if test_type . launches_lightwalletd ( ) & & ! DATABASE_FORMAT_UPGRADE_IS_LONG {
2022-10-05 21:12:27 -07:00
tracing ::info! (
? test_type ,
? zebra_rpc_address ,
" waiting for zebrad to open its RPC port... "
) ;
2023-10-19 07:50:46 -07:00
wait_for_state_version_upgrade (
& mut zebrad ,
& state_version_message ,
database_format_version_in_code ( ) ,
[ format! (
" Opened RPC endpoint at {} " ,
zebra_rpc_address . expect ( " lightwalletd test must have RPC port " )
) ] ,
) ? ;
} else {
wait_for_state_version_upgrade (
& mut zebrad ,
& state_version_message ,
database_format_version_in_code ( ) ,
None ,
) ? ;
}
2022-02-23 03:52:30 -08:00
2023-10-19 07:50:46 -07:00
// Launch lightwalletd, if needed
let lightwalletd_and_port = if test_type . launches_lightwalletd ( ) {
2022-10-05 21:12:27 -07:00
tracing ::info! (
? zebra_rpc_address ,
" launching lightwalletd connected to zebrad " ,
) ;
2022-02-23 03:52:30 -08:00
2022-10-05 21:12:27 -07:00
// Launch lightwalletd
let ( mut lightwalletd , lightwalletd_rpc_port ) = spawn_lightwalletd_for_rpc (
network ,
test_name ,
test_type ,
zebra_rpc_address . expect ( " lightwalletd test must have RPC port " ) ,
) ?
. expect ( " already checked for lightwalletd cached state and network " ) ;
tracing ::info! (
? lightwalletd_rpc_port ,
" spawned lightwalletd connected to zebrad " ,
) ;
2022-02-23 03:52:30 -08:00
2022-07-25 15:13:25 -07:00
// Check that `lightwalletd` is calling the expected Zebra RPCs
2022-02-23 03:52:30 -08:00
2022-07-25 15:13:25 -07:00
// getblockchaininfo
2022-06-01 04:36:59 -07:00
if test_type . needs_zebra_cached_state ( ) {
2022-07-25 15:13:25 -07:00
lightwalletd . expect_stdout_line_matches (
" Got sapling height 419200 block height [0-9]{7} chain main branchID [0-9a-f]{8} " ,
) ? ;
2022-06-01 04:36:59 -07:00
} else {
2022-07-25 15:13:25 -07:00
// Timeout the test if we're somehow accidentally using a cached state in our temp dir
lightwalletd . expect_stdout_line_matches (
" Got sapling height 419200 block height [0-9]{1,6} chain main branchID 00000000 " ,
) ? ;
}
if test_type . needs_lightwalletd_cached_state ( ) {
2023-08-10 20:53:31 -07:00
lightwalletd
2023-09-12 20:33:09 -07:00
. expect_stdout_line_matches ( " Done reading [0-9]{7} blocks from disk cache " ) ? ;
2022-07-25 15:13:25 -07:00
} else if ! test_type . allow_lightwalletd_cached_state ( ) {
// Timeout the test if we're somehow accidentally using a cached state in our temp dir
2023-08-10 20:53:31 -07:00
lightwalletd . expect_stdout_line_matches ( " Done reading 0 blocks from disk cache " ) ? ;
2022-06-01 04:36:59 -07:00
}
2022-03-03 05:03:06 -08:00
2022-07-25 15:13:25 -07:00
// getblock with the first Sapling block in Zebra's state
//
// zcash/lightwalletd calls getbestblockhash here, but
// adityapk00/lightwalletd calls getblock
//
// The log also depends on what is in Zebra's state:
//
// # Cached Zebra State
//
// lightwalletd ingests blocks into its cache.
//
// # Empty Zebra State
//
// lightwalletd tries to download the Sapling activation block, but it's not in the state.
//
// Until the Sapling activation block has been downloaded,
// lightwalletd will keep retrying getblock.
if ! test_type . allow_lightwalletd_cached_state ( ) {
if test_type . needs_zebra_cached_state ( ) {
lightwalletd . expect_stdout_line_matches (
" ([Aa]dding block to cache)|([Ww]aiting for block) " ,
) ? ;
} else {
lightwalletd . expect_stdout_line_matches ( regex ::escape (
" Waiting for zcashd height to reach Sapling activation height (419200) " ,
) ) ? ;
}
}
2022-10-05 21:12:27 -07:00
Some ( ( lightwalletd , lightwalletd_rpc_port ) )
2022-07-25 15:13:25 -07:00
} else {
None
} ;
2023-09-19 07:49:36 -07:00
// Wait for zebrad and lightwalletd to sync, if needed.
2022-07-28 14:06:18 -07:00
let ( mut zebrad , lightwalletd ) = if test_type . needs_zebra_cached_state ( ) {
2022-10-05 21:12:27 -07:00
if let Some ( ( lightwalletd , lightwalletd_rpc_port ) ) = lightwalletd_and_port {
#[ cfg(feature = " lightwalletd-grpc-tests " ) ]
{
use common ::lightwalletd ::sync ::wait_for_zebrad_and_lightwalletd_sync ;
tracing ::info! (
? lightwalletd_rpc_port ,
" waiting for zebrad and lightwalletd to sync... " ,
) ;
2023-09-19 07:49:36 -07:00
let ( lightwalletd , mut zebrad ) = wait_for_zebrad_and_lightwalletd_sync (
2022-10-05 21:12:27 -07:00
lightwalletd ,
lightwalletd_rpc_port ,
zebrad ,
zebra_rpc_address . expect ( " lightwalletd test must have RPC port " ) ,
test_type ,
// We want to wait for the mempool and network for better coverage
true ,
use_internet_connection ,
) ? ;
2023-10-19 07:50:46 -07:00
// Wait for the state to upgrade, if the upgrade is long.
// If this line hangs, change DATABASE_FORMAT_UPGRADE_IS_LONG to false,
// or combine "wait for sync" with "wait for state version upgrade".
if DATABASE_FORMAT_UPGRADE_IS_LONG {
wait_for_state_version_upgrade (
& mut zebrad ,
& state_version_message ,
database_format_version_in_code ( ) ,
None ,
) ? ;
}
2023-09-19 07:49:36 -07:00
2022-10-05 21:12:27 -07:00
( zebrad , Some ( lightwalletd ) )
}
#[ cfg(not(feature = " lightwalletd-grpc-tests " )) ]
panic! (
" the {test_type:?} test requires `cargo test --feature lightwalletd-grpc-tests` \n \
zebrad : { zebrad :? } \ n \
lightwalletd : { lightwalletd :? } \ n \
lightwalletd_rpc_port : { lightwalletd_rpc_port :? } "
) ;
2022-07-28 14:06:18 -07:00
} else {
// We're just syncing Zebra, so there's no lightwalletd to check
tracing ::info! ( ? test_type , " waiting for zebrad to sync to the tip " ) ;
zebrad . expect_stdout_line_matches ( SYNC_FINISHED_REGEX ) ? ;
2023-10-19 07:50:46 -07:00
// Wait for the state to upgrade, if the upgrade is long.
// If this line hangs, change DATABASE_FORMAT_UPGRADE_IS_LONG to false.
if DATABASE_FORMAT_UPGRADE_IS_LONG {
wait_for_state_version_upgrade (
& mut zebrad ,
& state_version_message ,
database_format_version_in_code ( ) ,
None ,
) ? ;
}
2023-09-19 07:49:36 -07:00
2022-07-28 14:06:18 -07:00
( zebrad , None )
2022-06-01 04:36:59 -07:00
}
2022-07-28 14:06:18 -07:00
} else {
2022-10-05 21:12:27 -07:00
let lightwalletd = lightwalletd_and_port . map ( | ( lightwalletd , _port ) | lightwalletd ) ;
2022-07-28 14:06:18 -07:00
// We don't have a cached state, so we don't do any tip checks for Zebra or lightwalletd
( zebrad , lightwalletd )
} ;
tracing ::info! (
? test_type ,
" cleaning up child processes and checking for errors " ,
) ;
2022-03-11 16:20:17 -08:00
2022-02-23 03:52:30 -08:00
// Cleanup both processes
2022-07-25 15:13:25 -07:00
//
2022-02-23 03:52:30 -08:00
// If the test fails here, see the [note on port conflict](#Note on port conflict)
//
2022-03-03 05:03:06 -08:00
// zcash/lightwalletd exits by itself, but
// adityapk00/lightwalletd keeps on going, so it gets killed by the test harness.
2022-08-28 16:52:19 -07:00
zebrad . kill ( false ) ? ;
2022-07-25 15:13:25 -07:00
if let Some ( mut lightwalletd ) = lightwalletd {
2022-08-28 16:52:19 -07:00
lightwalletd . kill ( false ) ? ;
2022-07-25 15:13:25 -07:00
let lightwalletd_output = lightwalletd . wait_with_output ( ) ? . assert_failure ( ) ? ;
lightwalletd_output
. assert_was_killed ( )
. wrap_err ( " Possible port conflict. Are there other acceptance tests running? " ) ? ;
}
let zebrad_output = zebrad . wait_with_output ( ) ? . assert_failure ( ) ? ;
2022-03-18 09:02:22 -07:00
2022-02-23 03:52:30 -08:00
zebrad_output
. assert_was_killed ( )
. wrap_err ( " Possible port conflict. Are there other acceptance tests running? " ) ? ;
Ok ( ( ) )
}
2021-01-29 04:36:33 -08:00
/// Test will start 2 zebrad nodes one after the other using the same Zcash listener.
/// It is expected that the first node spawned will get exclusive use of the port.
/// The second node will panic with the Zcash listener conflict hint added in #1535.
#[ test ]
2021-02-15 11:52:30 -08:00
fn zebra_zcash_listener_conflict ( ) -> Result < ( ) > {
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2021-01-29 04:36:33 -08:00
// [Note on port conflict](#Note on port conflict)
let port = random_known_port ( ) ;
2022-10-27 06:25:18 -07:00
let listen_addr = format! ( " 127.0.0.1: {port} " ) ;
2021-01-29 04:36:33 -08:00
// Write a configuration that has our created network listen_addr
2023-10-17 21:15:17 -07:00
let mut config = default_test_config ( Mainnet ) ? ;
2021-01-29 04:36:33 -08:00
config . network . listen_addr = listen_addr . parse ( ) . unwrap ( ) ;
2022-01-14 07:11:23 -08:00
let dir1 = testdir ( ) ? . with_config ( & mut config ) ? ;
2022-12-07 17:05:57 -08:00
let regex1 = regex ::escape ( & format! ( " Opened Zcash protocol endpoint at {listen_addr} " ) ) ;
2021-01-29 04:36:33 -08:00
// From another folder create a configuration with the same listener.
// `network.listen_addr` will be the same in the 2 nodes.
// (But since the config is ephemeral, they will have different state paths.)
2022-01-14 07:11:23 -08:00
let dir2 = testdir ( ) ? . with_config ( & mut config ) ? ;
2021-01-29 04:36:33 -08:00
check_config_conflict ( dir1 , regex1 . as_str ( ) , dir2 , PORT_IN_USE_ERROR . as_str ( ) ) ? ;
Ok ( ( ) )
}
/// Start 2 zebrad nodes using the same metrics listener port, but different
/// state directories and Zcash listener ports. The first node should get
/// exclusive use of the port. The second node will panic with the Zcash metrics
/// conflict hint added in #1535.
#[ test ]
2022-06-16 12:56:40 -07:00
#[ cfg(feature = " prometheus " ) ]
2021-02-15 11:52:30 -08:00
fn zebra_metrics_conflict ( ) -> Result < ( ) > {
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2021-01-29 04:36:33 -08:00
// [Note on port conflict](#Note on port conflict)
let port = random_known_port ( ) ;
2022-10-27 06:25:18 -07:00
let listen_addr = format! ( " 127.0.0.1: {port} " ) ;
2021-01-29 04:36:33 -08:00
// Write a configuration that has our created metrics endpoint_addr
2023-10-17 21:15:17 -07:00
let mut config = default_test_config ( Mainnet ) ? ;
2021-01-29 04:36:33 -08:00
config . metrics . endpoint_addr = Some ( listen_addr . parse ( ) . unwrap ( ) ) ;
2022-01-14 07:11:23 -08:00
let dir1 = testdir ( ) ? . with_config ( & mut config ) ? ;
2022-10-27 06:25:18 -07:00
let regex1 = regex ::escape ( & format! ( r "Opened metrics endpoint at {listen_addr}" ) ) ;
2021-01-29 04:36:33 -08:00
// From another folder create a configuration with the same endpoint.
// `metrics.endpoint_addr` will be the same in the 2 nodes.
// But they will have different Zcash listeners (auto port) and states (ephemeral)
2022-01-14 07:11:23 -08:00
let dir2 = testdir ( ) ? . with_config ( & mut config ) ? ;
2021-01-29 04:36:33 -08:00
check_config_conflict ( dir1 , regex1 . as_str ( ) , dir2 , PORT_IN_USE_ERROR . as_str ( ) ) ? ;
Ok ( ( ) )
}
/// Start 2 zebrad nodes using the same tracing listener port, but different
/// state directories and Zcash listener ports. The first node should get
/// exclusive use of the port. The second node will panic with the Zcash tracing
/// conflict hint added in #1535.
#[ test ]
2022-06-16 12:56:40 -07:00
#[ cfg(feature = " filter-reload " ) ]
2021-02-15 11:52:30 -08:00
fn zebra_tracing_conflict ( ) -> Result < ( ) > {
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2021-01-29 04:36:33 -08:00
// [Note on port conflict](#Note on port conflict)
let port = random_known_port ( ) ;
2022-10-27 06:25:18 -07:00
let listen_addr = format! ( " 127.0.0.1: {port} " ) ;
2021-01-29 04:36:33 -08:00
// Write a configuration that has our created tracing endpoint_addr
2023-10-17 21:15:17 -07:00
let mut config = default_test_config ( Mainnet ) ? ;
2021-01-29 04:36:33 -08:00
config . tracing . endpoint_addr = Some ( listen_addr . parse ( ) . unwrap ( ) ) ;
2022-01-14 07:11:23 -08:00
let dir1 = testdir ( ) ? . with_config ( & mut config ) ? ;
2022-10-27 06:25:18 -07:00
let regex1 = regex ::escape ( & format! ( r "Opened tracing endpoint at {listen_addr}" ) ) ;
2021-01-29 04:36:33 -08:00
// From another folder create a configuration with the same endpoint.
// `tracing.endpoint_addr` will be the same in the 2 nodes.
// But they will have different Zcash listeners (auto port) and states (ephemeral)
2022-01-14 07:11:23 -08:00
let dir2 = testdir ( ) ? . with_config ( & mut config ) ? ;
2021-01-29 04:36:33 -08:00
check_config_conflict ( dir1 , regex1 . as_str ( ) , dir2 , PORT_IN_USE_ERROR . as_str ( ) ) ? ;
Ok ( ( ) )
}
2022-03-02 16:39:47 -08:00
/// Start 2 zebrad nodes using the same RPC listener port, but different
/// state directories and Zcash listener ports. The first node should get
/// exclusive use of the port. The second node will panic.
2022-06-14 17:31:18 -07:00
///
/// This test is sometimes unreliable on Windows, and hangs on macOS.
/// We believe this is a CI infrastructure issue, not a platform-specific issue.
2022-03-02 16:39:47 -08:00
#[ test ]
2022-06-14 17:31:18 -07:00
#[ cfg(not(any(target_os = " windows " , target_os = " macos " ))) ]
2022-03-02 16:39:47 -08:00
fn zebra_rpc_conflict ( ) -> Result < ( ) > {
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2022-03-02 16:39:47 -08:00
2022-03-03 23:01:41 -08:00
if zebra_test ::net ::zebra_skip_network_tests ( ) {
return Ok ( ( ) ) ;
}
2022-03-18 09:02:22 -07:00
// Write a configuration that has RPC listen_addr set
2022-03-02 16:39:47 -08:00
// [Note on port conflict](#Note on port conflict)
2022-09-03 22:03:15 -07:00
//
// This is the required setting to detect port conflicts.
2023-10-17 21:15:17 -07:00
let mut config = random_known_rpc_port_config ( false , Mainnet ) ? ;
2022-03-02 16:39:47 -08:00
let dir1 = testdir ( ) ? . with_config ( & mut config ) ? ;
2022-03-18 09:02:22 -07:00
let regex1 = regex ::escape ( & format! (
r "Opened RPC endpoint at {}" ,
config . rpc . listen_addr . unwrap ( ) ,
) ) ;
2022-03-02 16:39:47 -08:00
// From another folder create a configuration with the same endpoint.
// `rpc.listen_addr` will be the same in the 2 nodes.
// But they will have different Zcash listeners (auto port) and states (ephemeral)
let dir2 = testdir ( ) ? . with_config ( & mut config ) ? ;
check_config_conflict ( dir1 , regex1 . as_str ( ) , dir2 , " Unable to start RPC server " ) ? ;
Ok ( ( ) )
}
2021-01-29 04:36:33 -08:00
/// Start 2 zebrad nodes using the same state directory, but different Zcash
/// listener ports. The first node should get exclusive access to the database.
/// The second node will panic with the Zcash state conflict hint added in #1535.
#[ test ]
2021-02-15 11:52:30 -08:00
fn zebra_state_conflict ( ) -> Result < ( ) > {
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2021-01-29 04:36:33 -08:00
// A persistent config has a fixed temp state directory, but asks the OS to
// automatically choose an unused port
2023-10-17 21:15:17 -07:00
let mut config = persistent_test_config ( Mainnet ) ? ;
2022-01-14 07:11:23 -08:00
let dir_conflict = testdir ( ) ? . with_config ( & mut config ) ? ;
2021-01-29 04:36:33 -08:00
// Windows problems with this match will be worked on at #1654
// We are matching the whole opened path only for unix by now.
2021-06-03 19:28:43 -07:00
let contains = if cfg! ( unix ) {
2021-01-29 04:36:33 -08:00
let mut dir_conflict_full = PathBuf ::new ( ) ;
dir_conflict_full . push ( dir_conflict . path ( ) ) ;
dir_conflict_full . push ( " state " ) ;
dir_conflict_full . push ( format! (
" v{} " ,
2023-07-13 14:36:15 -07:00
zebra_state ::database_format_version_in_code ( ) . major ,
2021-01-29 04:36:33 -08:00
) ) ;
dir_conflict_full . push ( config . network . network . to_string ( ) . to_lowercase ( ) ) ;
format! (
" Opened Zebra state cache at {} " ,
dir_conflict_full . display ( )
)
} else {
String ::from ( " Opened Zebra state cache at " )
} ;
check_config_conflict (
dir_conflict . path ( ) ,
2021-06-03 19:28:43 -07:00
regex ::escape ( & contains ) . as_str ( ) ,
2021-01-29 04:36:33 -08:00
dir_conflict . path ( ) ,
LOCK_FILE_ERROR . as_str ( ) ,
) ? ;
Ok ( ( ) )
}
/// Launch a node in `first_dir`, wait a few seconds, then launch a node in
/// `second_dir`. Check that the first node's stdout contains
/// `first_stdout_regex`, and the second node's stderr contains
/// `second_stderr_regex`.
2022-08-28 10:08:43 -07:00
#[ tracing::instrument ]
2021-01-29 04:36:33 -08:00
fn check_config_conflict < T , U > (
first_dir : T ,
first_stdout_regex : & str ,
second_dir : U ,
second_stderr_regex : & str ,
) -> Result < ( ) >
where
2022-08-28 10:08:43 -07:00
T : ZebradTestDirExt + std ::fmt ::Debug ,
U : ZebradTestDirExt + std ::fmt ::Debug ,
2021-01-29 04:36:33 -08:00
{
// Start the first node
2022-04-19 03:28:52 -07:00
let mut node1 = first_dir . spawn_child ( args! [ " start " ] ) ? ;
2021-01-29 04:36:33 -08:00
2022-02-15 07:30:10 -08:00
// Wait until node1 has used the conflicting resource.
node1 . expect_stdout_line_matches ( first_stdout_regex ) ? ;
2021-01-29 04:36:33 -08:00
2022-03-02 16:39:47 -08:00
// Wait a bit before launching the second node.
std ::thread ::sleep ( BETWEEN_NODES_DELAY ) ;
2021-01-29 04:36:33 -08:00
// Spawn the second node
2022-04-19 03:28:52 -07:00
let node2 = second_dir . spawn_child ( args! [ " start " ] ) ;
2021-02-18 12:46:27 -08:00
let ( node2 , mut node1 ) = node1 . kill_on_error ( node2 ) ? ;
2021-01-29 04:36:33 -08:00
// Wait a few seconds and kill first node.
// Second node is terminated by panic, no need to kill.
std ::thread ::sleep ( LAUNCH_DELAY ) ;
2022-08-28 16:52:19 -07:00
let node1_kill_res = node1 . kill ( false ) ;
2022-02-15 07:30:10 -08:00
let ( _ , mut node2 ) = node2 . kill_on_error ( node1_kill_res ) ? ;
2021-02-18 12:46:27 -08:00
2021-02-18 22:02:48 -08:00
// node2 should have panicked due to a conflict. Kill it here anyway, so it
// doesn't outlive the test on error.
//
2021-02-19 01:38:18 -08:00
// This code doesn't work on Windows or macOS. It's cleanup code that only
// runs when node2 doesn't panic as expected. So it's ok to skip it.
// See #1781.
#[ cfg(target_os = " linux " ) ]
2021-02-18 15:57:09 -08:00
if node2 . is_running ( ) {
return node2
2021-02-18 22:02:48 -08:00
. kill_on_error ::< ( ) , _ > ( Err ( eyre! (
" conflicted node2 was still running, but the test expected a panic "
) ) )
2022-02-15 07:30:10 -08:00
. context_from ( & mut node1 )
2021-02-18 15:57:09 -08:00
. map ( | _ | ( ) ) ;
}
2022-02-15 07:30:10 -08:00
2021-02-18 15:57:09 -08:00
// Now we're sure both nodes are dead, and we have both their outputs
2022-02-15 07:30:10 -08:00
let output1 = node1 . wait_with_output ( ) . context_from ( & mut node2 ) ? ;
2021-02-18 15:57:09 -08:00
let output2 = node2 . wait_with_output ( ) . context_from ( & output1 ) ? ;
2022-02-15 07:30:10 -08:00
// Make sure the first node was killed, rather than exiting with an error.
2021-02-18 15:57:09 -08:00
output1
. assert_was_killed ( )
. warning ( " Possible port conflict. Are there other acceptance tests running? " )
. context_from ( & output2 ) ? ;
2021-01-29 04:36:33 -08:00
2022-02-15 07:30:10 -08:00
// Make sure node2 has the expected resource conflict.
2021-02-18 15:57:09 -08:00
output2
2021-06-03 19:28:43 -07:00
. stderr_line_matches ( second_stderr_regex )
2021-02-18 15:57:09 -08:00
. context_from ( & output1 ) ? ;
2021-01-29 04:36:33 -08:00
output2
. assert_was_not_killed ( )
2021-02-18 15:57:09 -08:00
. warning ( " Possible port conflict. Are there other acceptance tests running? " )
. context_from ( & output1 ) ? ;
2021-01-29 04:36:33 -08:00
Ok ( ( ) )
}
2022-04-25 22:32:27 -07:00
#[ tokio::test ]
#[ ignore ]
async fn fully_synced_rpc_test ( ) -> Result < ( ) > {
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2022-04-25 22:32:27 -07:00
2022-04-29 12:56:11 -07:00
// We're only using cached Zebra state here, so this test type is the most similar
2022-11-09 19:40:21 -08:00
let test_type = TestType ::UpdateCachedState ;
2022-10-05 21:12:27 -07:00
let network = Network ::Mainnet ;
2022-04-29 12:56:11 -07:00
2022-10-05 21:12:27 -07:00
let ( mut zebrad , zebra_rpc_address ) = if let Some ( zebrad_and_address ) =
spawn_zebrad_for_rpc ( network , " fully_synced_rpc_test " , test_type , false ) ?
{
tracing ::info! ( " running fully synced zebrad RPC test " ) ;
2022-04-29 12:56:11 -07:00
2022-10-05 21:12:27 -07:00
zebrad_and_address
} else {
// Skip the test, we don't have the required cached state
2022-04-29 12:56:11 -07:00
return Ok ( ( ) ) ;
2022-04-25 22:32:27 -07:00
} ;
2022-11-16 13:23:29 -08:00
let zebra_rpc_address = zebra_rpc_address . expect ( " lightwalletd test must have RPC port " ) ;
2023-01-05 07:46:04 -08:00
zebrad . expect_stdout_line_matches ( format! ( " Opened RPC endpoint at {zebra_rpc_address} " ) ) ? ;
2022-11-16 13:23:29 -08:00
2023-04-26 16:35:53 -07:00
let client = RpcRequestClient ::new ( zebra_rpc_address ) ;
2022-04-25 22:32:27 -07:00
// Make a getblock test that works only on synced node (high block number).
// The block is before the mandatory checkpoint, so the checkpoint cached state can be used
// if desired.
let res = client
2022-11-16 13:23:29 -08:00
. text_from_call ( " getblock " , r # "["1180900", 0]"# . to_string ( ) )
2022-04-25 22:32:27 -07:00
. await ? ;
// Simple textual check to avoid fully parsing the response, for simplicity
let expected_bytes = zebra_test ::vectors ::MAINNET_BLOCKS
. get ( & 1_180_900 )
. expect ( " test block must exist " ) ;
let expected_hex = hex ::encode ( expected_bytes ) ;
assert! (
res . contains ( & expected_hex ) ,
2022-12-07 17:05:57 -08:00
" response did not contain the desired block: {res} "
2022-04-25 22:32:27 -07:00
) ;
Ok ( ( ) )
}
2022-06-20 17:59:51 -07:00
#[ tokio::test ]
async fn delete_old_databases ( ) -> Result < ( ) > {
use std ::fs ::{ canonicalize , create_dir } ;
2022-08-04 08:44:44 -07:00
let _init_guard = zebra_test ::init ( ) ;
2022-06-20 17:59:51 -07:00
2022-08-02 16:36:58 -07:00
// Skip this test because it can be very slow without a network.
//
// The delete databases task is launched last during startup, after network setup.
// If there is no network, network setup can take a long time to timeout,
// so the task takes a long time to launch, slowing down this test.
if zebra_test ::net ::zebra_skip_network_tests ( ) {
return Ok ( ( ) ) ;
}
2023-10-17 21:15:17 -07:00
let mut config = default_test_config ( Mainnet ) ? ;
2022-06-20 17:59:51 -07:00
let run_dir = testdir ( ) ? ;
let cache_dir = run_dir . path ( ) . join ( " state " ) ;
// create cache dir
create_dir ( cache_dir . clone ( ) ) ? ;
// create a v1 dir outside cache dir that should not be deleted
let outside_dir = run_dir . path ( ) . join ( " v1 " ) ;
create_dir ( & outside_dir ) ? ;
assert! ( outside_dir . as_path ( ) . exists ( ) ) ;
// create a `v1` dir inside cache dir that should be deleted
let inside_dir = cache_dir . join ( " v1 " ) ;
create_dir ( & inside_dir ) ? ;
let canonicalized_inside_dir = canonicalize ( inside_dir . clone ( ) ) . ok ( ) . unwrap ( ) ;
assert! ( inside_dir . as_path ( ) . exists ( ) ) ;
// modify config with our cache dir and not ephemeral configuration
// (delete old databases function will not run when ephemeral = true)
config . state . cache_dir = cache_dir ;
config . state . ephemeral = false ;
// run zebra with our config
let mut child = run_dir
. with_config ( & mut config ) ?
. spawn_child ( args! [ " start " ] ) ? ;
// delete checker running
child . expect_stdout_line_matches ( " checking for old database versions " . to_string ( ) ) ? ;
// inside dir was deleted
child . expect_stdout_line_matches ( format! (
2022-12-07 17:05:57 -08:00
" deleted outdated state directory deleted_state={canonicalized_inside_dir:?} "
2022-06-20 17:59:51 -07:00
) ) ? ;
assert! ( ! inside_dir . as_path ( ) . exists ( ) ) ;
// deleting old databases task ended
child . expect_stdout_line_matches ( " finished old database version cleanup task " . to_string ( ) ) ? ;
// outside dir was not deleted
assert! ( outside_dir . as_path ( ) . exists ( ) ) ;
// finish
2022-08-28 16:52:19 -07:00
child . kill ( false ) ? ;
2022-06-20 17:59:51 -07:00
let output = child . wait_with_output ( ) ? ;
let output = output . assert_failure ( ) ? ;
// [Note on port conflict](#Note on port conflict)
output
. assert_was_killed ( )
. wrap_err ( " Possible port conflict. Are there other acceptance tests running? " ) ? ;
Ok ( ( ) )
}
2022-04-27 16:06:11 -07:00
/// Test sending transactions using a lightwalletd instance connected to a zebrad instance.
2022-04-25 22:32:27 -07:00
///
2022-04-27 16:06:11 -07:00
/// See [`common::lightwalletd::send_transaction_test`] for more information.
2022-06-01 04:36:59 -07:00
///
/// This test doesn't work on Windows, so it is always skipped on that platform.
2022-04-27 16:06:11 -07:00
#[ tokio::test ]
2022-05-05 22:30:38 -07:00
#[ ignore ]
2022-09-02 01:54:40 -07:00
#[ cfg(feature = " lightwalletd-grpc-tests " ) ]
2022-06-01 04:36:59 -07:00
#[ cfg(not(target_os = " windows " )) ]
2022-04-27 16:06:11 -07:00
async fn sending_transactions_using_lightwalletd ( ) -> Result < ( ) > {
common ::lightwalletd ::send_transaction_test ::run ( ) . await
2022-04-25 22:32:27 -07:00
}
2022-05-03 21:00:46 -07:00
/// Test all the rpc methods a wallet connected to lightwalletd can call.
///
/// See [`common::lightwalletd::wallet_grpc_test`] for more information.
2022-06-01 04:36:59 -07:00
///
/// This test doesn't work on Windows, so it is always skipped on that platform.
2022-05-03 21:00:46 -07:00
#[ tokio::test ]
2022-05-11 07:06:58 -07:00
#[ ignore ]
2022-09-02 01:54:40 -07:00
#[ cfg(feature = " lightwalletd-grpc-tests " ) ]
2022-06-01 04:36:59 -07:00
#[ cfg(not(target_os = " windows " )) ]
2022-05-03 21:00:46 -07:00
async fn lightwalletd_wallet_grpc_tests ( ) -> Result < ( ) > {
common ::lightwalletd ::wallet_grpc_test ::run ( ) . await
}
2022-11-03 20:57:08 -07:00
2023-01-16 23:09:07 -08:00
/// Test successful getpeerinfo rpc call
///
/// See [`common::get_block_template_rpcs::get_peer_info`] for more information.
#[ tokio::test ]
#[ cfg(feature = " getblocktemplate-rpcs " ) ]
async fn get_peer_info ( ) -> Result < ( ) > {
common ::get_block_template_rpcs ::get_peer_info ::run ( ) . await
}
2022-12-01 11:39:01 -08:00
/// Test successful getblocktemplate rpc call
///
/// See [`common::get_block_template_rpcs::get_block_template`] for more information.
#[ tokio::test ]
#[ ignore ]
#[ cfg(feature = " getblocktemplate-rpcs " ) ]
async fn get_block_template ( ) -> Result < ( ) > {
common ::get_block_template_rpcs ::get_block_template ::run ( ) . await
}
2022-11-03 20:57:08 -07:00
/// Test successful submitblock rpc call
///
2022-12-01 11:39:01 -08:00
/// See [`common::get_block_template_rpcs::submit_block`] for more information.
2022-11-03 20:57:08 -07:00
#[ tokio::test ]
#[ ignore ]
#[ cfg(feature = " getblocktemplate-rpcs " ) ]
async fn submit_block ( ) -> Result < ( ) > {
common ::get_block_template_rpcs ::submit_block ::run ( ) . await
}
2023-04-26 21:39:43 -07:00
2023-04-28 07:13:21 -07:00
/// Check that the the end of support code is called at least once.
#[ test ]
fn end_of_support_is_checked_at_start ( ) -> Result < ( ) > {
let _init_guard = zebra_test ::init ( ) ;
2023-10-17 21:15:17 -07:00
let testdir = testdir ( ) ? . with_config ( & mut default_test_config ( Mainnet ) ? ) ? ;
2023-04-28 07:13:21 -07:00
let mut child = testdir . spawn_child ( args! [ " start " ] ) ? ;
// Give enough time to start up the eos task.
std ::thread ::sleep ( Duration ::from_secs ( 30 ) ) ;
child . kill ( false ) ? ;
let output = child . wait_with_output ( ) ? ;
let output = output . assert_failure ( ) ? ;
// Zebra started
output . stdout_line_contains ( " Starting zebrad " ) ? ;
// End of support task started.
output . stdout_line_contains ( " Starting end of support task " ) ? ;
// Make sure the command was killed
output . assert_was_killed ( ) ? ;
Ok ( ( ) )
}
2023-07-13 14:36:15 -07:00
2023-04-26 21:39:43 -07:00
/// Test `zebra-checkpoints` on mainnet.
///
/// If you want to run this test individually, see the module documentation.
/// See [`common::checkpoints`] for more information.
#[ tokio::test ]
#[ ignore ]
#[ cfg(feature = " zebra-checkpoints " ) ]
async fn generate_checkpoints_mainnet ( ) -> Result < ( ) > {
common ::checkpoints ::run ( Mainnet ) . await
}
/// Test `zebra-checkpoints` on testnet.
/// This test might fail if testnet is unstable.
///
/// If you want to run this test individually, see the module documentation.
/// See [`common::checkpoints`] for more information.
#[ tokio::test ]
#[ ignore ]
#[ cfg(feature = " zebra-checkpoints " ) ]
async fn generate_checkpoints_testnet ( ) -> Result < ( ) > {
common ::checkpoints ::run ( Testnet ) . await
}
2023-07-13 14:36:15 -07:00
/// Check that new states are created with the current state format version,
/// and that restarting `zebrad` doesn't change the format version.
#[ tokio::test ]
async fn new_state_format ( ) -> Result < ( ) > {
for network in [ Mainnet , Testnet ] {
state_format_test ( " new_state_format_test " , network , 2 , None ) . await ? ;
}
Ok ( ( ) )
}
/// Check that outdated states are updated to the current state format version,
/// and that restarting `zebrad` doesn't change the updated format version.
///
/// TODO: test partial updates, once we have some updates that take a while.
/// (or just add a delay during tests)
#[ tokio::test ]
async fn update_state_format ( ) -> Result < ( ) > {
let mut fake_version = database_format_version_in_code ( ) ;
fake_version . minor = 0 ;
fake_version . patch = 0 ;
for network in [ Mainnet , Testnet ] {
state_format_test ( " update_state_format_test " , network , 3 , Some ( & fake_version ) ) . await ? ;
}
Ok ( ( ) )
}
/// Check that newer state formats are downgraded to the current state format version,
/// and that restarting `zebrad` doesn't change the format version.
///
/// Future version compatibility is a best-effort attempt, this test can be disabled if it fails.
#[ tokio::test ]
async fn downgrade_state_format ( ) -> Result < ( ) > {
let mut fake_version = database_format_version_in_code ( ) ;
fake_version . minor = u16 ::MAX . into ( ) ;
fake_version . patch = 0 ;
for network in [ Mainnet , Testnet ] {
state_format_test (
" downgrade_state_format_test " ,
network ,
3 ,
Some ( & fake_version ) ,
)
. await ? ;
}
Ok ( ( ) )
}
/// Test state format changes, see calling tests for details.
async fn state_format_test (
base_test_name : & str ,
network : Network ,
reopen_count : usize ,
fake_version : Option < & Version > ,
) -> Result < ( ) > {
let _init_guard = zebra_test ::init ( ) ;
let test_name = & format! ( " {base_test_name} /new " ) ;
// # Create a new state and check it has the current version
let zebrad = spawn_zebrad_without_rpc ( network , test_name , false , false , None , false ) ? ;
// Skip the test unless it has the required state and environmental variables.
let Some ( mut zebrad ) = zebrad else {
return Ok ( ( ) ) ;
} ;
tracing ::info! ( ? network , " running {test_name} using zebrad " ) ;
zebrad . expect_stdout_line_matches ( " creating new database with the current format " ) ? ;
zebrad . expect_stdout_line_matches ( " loaded Zebra state cache " ) ? ;
// Give Zebra enough time to actually write the database to disk.
tokio ::time ::sleep ( Duration ::from_secs ( 1 ) ) . await ;
let logs = zebrad . kill_and_return_output ( false ) ? ;
assert! (
! logs . contains ( " marked database format as upgraded " ) ,
" unexpected format upgrade in logs: \n \
{ logs } "
) ;
assert! (
! logs . contains ( " marked database format as downgraded " ) ,
" unexpected format downgrade in logs: \n \
{ logs } "
) ;
let output = zebrad . wait_with_output ( ) ? ;
let mut output = output . assert_failure ( ) ? ;
let mut dir = output
. take_dir ( )
. expect ( " dir should not already have been taken " ) ;
// [Note on port conflict](#Note on port conflict)
output
. assert_was_killed ( )
. wrap_err ( " Possible port conflict. Are there other acceptance tests running? " ) ? ;
// # Apply the fake version if needed
let mut expect_older_version = false ;
let mut expect_newer_version = false ;
if let Some ( fake_version ) = fake_version {
let test_name = & format! ( " {base_test_name} /apply_fake_version/ {fake_version} " ) ;
tracing ::info! ( ? network , " running {test_name} using zebra-state " ) ;
2023-10-17 21:15:17 -07:00
let config = UseAnyState
. zebrad_config ( test_name , false , Some ( dir . path ( ) ) , network )
2023-07-13 14:36:15 -07:00
. expect ( " already checked config " ) ? ;
zebra_state ::write_database_format_version_to_disk ( fake_version , & config . state , network )
. expect ( " can't write fake database version to disk " ) ;
// Give zebra_state enough time to actually write the database version to disk.
tokio ::time ::sleep ( Duration ::from_secs ( 1 ) ) . await ;
let running_version = database_format_version_in_code ( ) ;
match fake_version . cmp ( & running_version ) {
Ordering ::Less = > expect_older_version = true ,
Ordering ::Equal = > { }
Ordering ::Greater = > expect_newer_version = true ,
}
}
// # Reopen that state and check the version hasn't changed
for reopened in 0 .. reopen_count {
let test_name = & format! ( " {base_test_name} /reopen/ {reopened} " ) ;
if reopened > 0 {
expect_older_version = false ;
expect_newer_version = false ;
}
let mut zebrad = spawn_zebrad_without_rpc ( network , test_name , false , false , dir , false ) ?
. expect ( " unexpectedly missing required state or env vars " ) ;
tracing ::info! ( ? network , " running {test_name} using zebrad " ) ;
if expect_older_version {
zebrad . expect_stdout_line_matches ( " trying to open older database format " ) ? ;
zebrad . expect_stdout_line_matches ( " marked database format as upgraded " ) ? ;
zebrad . expect_stdout_line_matches ( " database is fully upgraded " ) ? ;
} else if expect_newer_version {
zebrad . expect_stdout_line_matches ( " trying to open newer database format " ) ? ;
zebrad . expect_stdout_line_matches ( " marked database format as downgraded " ) ? ;
} else {
zebrad . expect_stdout_line_matches ( " trying to open current database format " ) ? ;
zebrad . expect_stdout_line_matches ( " loaded Zebra state cache " ) ? ;
}
// Give Zebra enough time to actually write the database to disk.
tokio ::time ::sleep ( Duration ::from_secs ( 1 ) ) . await ;
let logs = zebrad . kill_and_return_output ( false ) ? ;
if ! expect_older_version {
assert! (
! logs . contains ( " marked database format as upgraded " ) ,
" unexpected format upgrade in logs: \n \
{ logs } "
) ;
}
if ! expect_newer_version {
assert! (
! logs . contains ( " marked database format as downgraded " ) ,
" unexpected format downgrade in logs: \n \
{ logs } "
) ;
}
let output = zebrad . wait_with_output ( ) ? ;
let mut output = output . assert_failure ( ) ? ;
dir = output
. take_dir ( )
. expect ( " dir should not already have been taken " ) ;
// [Note on port conflict](#Note on port conflict)
output
. assert_was_killed ( )
. wrap_err ( " Possible port conflict. Are there other acceptance tests running? " ) ? ;
}
Ok ( ( ) )
}
2023-09-20 16:40:21 -07:00
/// Snapshot the `z_getsubtreesbyindex` method in a synchronized chain.
///
/// This test name must have the same prefix as the `fully_synced_rpc_test`, so they can be run in the same test job.
#[ tokio::test ]
#[ ignore ]
async fn fully_synced_rpc_z_getsubtreesbyindex_snapshot_test ( ) -> Result < ( ) > {
let _init_guard = zebra_test ::init ( ) ;
// We're only using cached Zebra state here, so this test type is the most similar
let test_type = TestType ::UpdateZebraCachedStateWithRpc ;
let network = Network ::Mainnet ;
let ( mut zebrad , zebra_rpc_address ) = if let Some ( zebrad_and_address ) = spawn_zebrad_for_rpc (
network ,
" rpc_z_getsubtreesbyindex_sync_snapshots " ,
test_type ,
true ,
) ? {
tracing ::info! ( " running fully synced zebrad z_getsubtreesbyindex RPC test " ) ;
zebrad_and_address
} else {
// Skip the test, we don't have the required cached state
return Ok ( ( ) ) ;
} ;
// Store the state version message so we can wait for the upgrade later if needed.
let state_version_message = wait_for_state_version_message ( & mut zebrad ) ? ;
2023-10-19 07:50:46 -07:00
// It doesn't matter how long the state version upgrade takes,
// because the sync finished regex is repeated every minute.
wait_for_state_version_upgrade (
& mut zebrad ,
& state_version_message ,
database_format_version_in_code ( ) ,
None ,
) ? ;
2023-09-20 16:40:21 -07:00
// Wait for zebrad to load the full cached blockchain.
zebrad . expect_stdout_line_matches ( SYNC_FINISHED_REGEX ) ? ;
// Create an http client
let client =
RpcRequestClient ::new ( zebra_rpc_address . expect ( " already checked that address is valid " ) ) ;
// Create test vector matrix
let zcashd_test_vectors = vec! [
(
" z_getsubtreesbyindex_mainnet_sapling_0_1 " . to_string ( ) ,
r # "["sapling", 0, 1]"# . to_string ( ) ,
) ,
(
" z_getsubtreesbyindex_mainnet_sapling_0_11 " . to_string ( ) ,
r # "["sapling", 0, 11]"# . to_string ( ) ,
) ,
(
" z_getsubtreesbyindex_mainnet_sapling_17_1 " . to_string ( ) ,
r # "["sapling", 17, 1]"# . to_string ( ) ,
) ,
(
" z_getsubtreesbyindex_mainnet_sapling_1090_6 " . to_string ( ) ,
r # "["sapling", 1090, 6]"# . to_string ( ) ,
) ,
(
" z_getsubtreesbyindex_mainnet_orchard_0_1 " . to_string ( ) ,
r # "["orchard", 0, 1]"# . to_string ( ) ,
) ,
(
" z_getsubtreesbyindex_mainnet_orchard_338_1 " . to_string ( ) ,
r # "["orchard", 338, 1]"# . to_string ( ) ,
) ,
(
" z_getsubtreesbyindex_mainnet_orchard_585_1 " . to_string ( ) ,
r # "["orchard", 585, 1]"# . to_string ( ) ,
) ,
] ;
for i in zcashd_test_vectors {
let res = client . call ( " z_getsubtreesbyindex " , i . 1 ) . await ? ;
let body = res . bytes ( ) . await ;
let parsed : Value = serde_json ::from_slice ( & body . expect ( " Response is valid json " ) ) ? ;
insta ::assert_json_snapshot! ( i . 0 , parsed ) ;
}
zebrad . kill ( false ) ? ;
let output = zebrad . wait_with_output ( ) ? ;
let output = output . assert_failure ( ) ? ;
// [Note on port conflict](#Note on port conflict)
output
. assert_was_killed ( )
. wrap_err ( " Possible port conflict. Are there other acceptance tests running? " ) ? ;
Ok ( ( ) )
}