2019-08-29 14:46:54 -07:00
|
|
|
[workspace]
|
|
|
|
members = [
|
2020-07-14 13:23:43 -07:00
|
|
|
"zebrad",
|
2019-08-29 14:46:54 -07:00
|
|
|
"zebra-chain",
|
|
|
|
"zebra-network",
|
2020-06-02 16:16:17 -07:00
|
|
|
"zebra-state",
|
2019-08-29 14:46:54 -07:00
|
|
|
"zebra-script",
|
|
|
|
"zebra-consensus",
|
|
|
|
"zebra-rpc",
|
2022-02-25 13:43:21 -08:00
|
|
|
"zebra-node-services",
|
2020-06-22 19:34:11 -07:00
|
|
|
"zebra-test",
|
2020-07-10 12:31:01 -07:00
|
|
|
"zebra-utils",
|
2023-11-08 14:05:51 -08:00
|
|
|
"zebra-scan",
|
2024-10-29 16:33:44 -07:00
|
|
|
"zebra-grpc",
|
2023-06-11 20:59:10 -07:00
|
|
|
"tower-batch-control",
|
2020-07-14 16:21:01 -07:00
|
|
|
"tower-fallback",
|
2019-10-10 15:43:54 -07:00
|
|
|
]
|
fix panic in seed subcommand (#401)
Co-authored-by: Jane Lusby <jane@zfnd.org>
Prior to this change, the seed subcommand would consistently encounter a panic in one of the background tasks, but would continue running after the panic. This is indicative of two bugs.
First, zebrad was not configured to treat panics as non recoverable and instead defaulted to the tokio defaults, which are to catch panics in tasks and return them via the join handle if available, or to print them if the join handle has been discarded. This is likely a poor fit for zebrad as an application, we do not need to maximize uptime or minimize the extent of an outage should one of our tasks / services start encountering panics. Ignoring a panic increases our risk of observing invalid state, causing all sorts of wild and bad bugs. To deal with this we've switched the default panic behavior from `unwind` to `abort`. This makes panics fail immediately and take down the entire application, regardless of where they occur, which is consistent with our treatment of misbehaving connections.
The second bug is the panic itself. This was triggered by a duplicate entry in the initial_peers set. To fix this we've switched the storage for the peers from a `Vec` to a `HashSet`, which has similar properties but guarantees uniqueness of its keys.
2020-05-27 17:40:12 -07:00
|
|
|
|
2023-06-06 23:04:04 -07:00
|
|
|
# Use the edition 2021 dependency resolver in the workspace, to match the crates
|
|
|
|
resolver = "2"
|
|
|
|
|
2023-11-07 09:34:17 -08:00
|
|
|
# `cargo release` settings
|
|
|
|
|
2024-08-23 17:55:57 -07:00
|
|
|
[workspace.dependencies]
|
2024-10-29 16:33:44 -07:00
|
|
|
incrementalmerkletree = { version = "0.7.0", features = ["legacy-api"] }
|
|
|
|
orchard = "0.10.0"
|
|
|
|
sapling-crypto = "0.3.0"
|
|
|
|
zcash_address = "0.6.0"
|
|
|
|
zcash_client_backend = "0.14.0"
|
2024-08-27 16:24:28 -07:00
|
|
|
zcash_encoding = "0.2.1"
|
|
|
|
zcash_history = "0.4.0"
|
2024-10-29 16:33:44 -07:00
|
|
|
zcash_keys = "0.4.0"
|
|
|
|
zcash_primitives = "0.19.0"
|
|
|
|
zcash_proofs = "0.19.0"
|
|
|
|
zcash_protocol = "0.4.0"
|
2024-08-23 17:55:57 -07:00
|
|
|
|
2023-11-07 09:34:17 -08:00
|
|
|
[workspace.metadata.release]
|
|
|
|
|
|
|
|
# We always do releases from the main branch
|
|
|
|
allow-branch = ["main"]
|
|
|
|
|
2023-06-20 00:10:40 -07:00
|
|
|
# Compilation settings
|
|
|
|
|
fix panic in seed subcommand (#401)
Co-authored-by: Jane Lusby <jane@zfnd.org>
Prior to this change, the seed subcommand would consistently encounter a panic in one of the background tasks, but would continue running after the panic. This is indicative of two bugs.
First, zebrad was not configured to treat panics as non recoverable and instead defaulted to the tokio defaults, which are to catch panics in tasks and return them via the join handle if available, or to print them if the join handle has been discarded. This is likely a poor fit for zebrad as an application, we do not need to maximize uptime or minimize the extent of an outage should one of our tasks / services start encountering panics. Ignoring a panic increases our risk of observing invalid state, causing all sorts of wild and bad bugs. To deal with this we've switched the default panic behavior from `unwind` to `abort`. This makes panics fail immediately and take down the entire application, regardless of where they occur, which is consistent with our treatment of misbehaving connections.
The second bug is the panic itself. This was triggered by a duplicate entry in the initial_peers set. To fix this we've switched the storage for the peers from a `Vec` to a `HashSet`, which has similar properties but guarantees uniqueness of its keys.
2020-05-27 17:40:12 -07:00
|
|
|
[profile.dev]
|
|
|
|
panic = "abort"
|
|
|
|
|
2021-12-14 09:12:35 -08:00
|
|
|
# Speed up tests by optimizing performance-critical crates
|
|
|
|
|
2022-08-22 20:43:18 -07:00
|
|
|
# Cryptographic crates
|
|
|
|
|
2021-12-14 09:12:35 -08:00
|
|
|
[profile.dev.package.blake2b_simd]
|
|
|
|
opt-level = 3
|
|
|
|
|
|
|
|
[profile.dev.package.ff]
|
|
|
|
opt-level = 3
|
|
|
|
|
|
|
|
[profile.dev.package.group]
|
|
|
|
opt-level = 3
|
|
|
|
|
|
|
|
[profile.dev.package.pasta_curves]
|
|
|
|
opt-level = 3
|
|
|
|
|
2022-04-18 17:14:16 -07:00
|
|
|
[profile.dev.package.halo2_proofs]
|
|
|
|
opt-level = 3
|
|
|
|
|
|
|
|
[profile.dev.package.halo2_gadgets]
|
2021-12-14 09:12:35 -08:00
|
|
|
opt-level = 3
|
|
|
|
|
|
|
|
[profile.dev.package.bls12_381]
|
|
|
|
opt-level = 3
|
|
|
|
|
2024-01-11 06:41:01 -08:00
|
|
|
[profile.dev.package.byteorder]
|
|
|
|
opt-level = 3
|
|
|
|
|
|
|
|
[profile.dev.package.equihash]
|
|
|
|
opt-level = 3
|
|
|
|
|
2021-12-14 09:12:35 -08:00
|
|
|
[profile.dev.package.zcash_proofs]
|
2022-08-22 20:43:18 -07:00
|
|
|
opt-level = 3
|
|
|
|
|
|
|
|
[profile.dev.package.ring]
|
|
|
|
opt-level = 3
|
|
|
|
|
|
|
|
[profile.dev.package.spin]
|
|
|
|
opt-level = 3
|
|
|
|
|
|
|
|
[profile.dev.package.untrusted]
|
|
|
|
opt-level = 3
|
2021-12-14 09:12:35 -08:00
|
|
|
|
|
|
|
|
fix panic in seed subcommand (#401)
Co-authored-by: Jane Lusby <jane@zfnd.org>
Prior to this change, the seed subcommand would consistently encounter a panic in one of the background tasks, but would continue running after the panic. This is indicative of two bugs.
First, zebrad was not configured to treat panics as non recoverable and instead defaulted to the tokio defaults, which are to catch panics in tasks and return them via the join handle if available, or to print them if the join handle has been discarded. This is likely a poor fit for zebrad as an application, we do not need to maximize uptime or minimize the extent of an outage should one of our tasks / services start encountering panics. Ignoring a panic increases our risk of observing invalid state, causing all sorts of wild and bad bugs. To deal with this we've switched the default panic behavior from `unwind` to `abort`. This makes panics fail immediately and take down the entire application, regardless of where they occur, which is consistent with our treatment of misbehaving connections.
The second bug is the panic itself. This was triggered by a duplicate entry in the initial_peers set. To fix this we've switched the storage for the peers from a `Vec` to a `HashSet`, which has similar properties but guarantees uniqueness of its keys.
2020-05-27 17:40:12 -07:00
|
|
|
[profile.release]
|
2020-06-04 19:34:06 -07:00
|
|
|
panic = "abort"
|
2020-09-21 14:00:20 -07:00
|
|
|
|
2022-04-26 19:51:26 -07:00
|
|
|
# Speed up release builds and sync tests using link-time optimization.
|
|
|
|
# Some of Zebra's code is CPU-intensive, and needs extra optimizations for peak performance.
|
|
|
|
#
|
|
|
|
# TODO:
|
|
|
|
# - add "-Clinker-plugin-lto" in .cargo/config.toml to speed up release builds
|
|
|
|
# - add "-Clinker=clang -Clink-arg=-fuse-ld=lld" in .cargo/config.toml
|
|
|
|
# - also use LTO on C/C++ code:
|
|
|
|
# - use clang to compile all C/C++ code
|
|
|
|
# - add "-flto=thin" to all C/C++ code builds
|
|
|
|
# - see https://doc.rust-lang.org/rustc/linker-plugin-lto.html#cc-code-as-a-dependency-in-rust
|
|
|
|
lto = "thin"
|
2024-10-10 10:09:04 -07:00
|
|
|
|