2020-10-16 16:44:30 -07:00
|
|
|
//! Errors that can occur when checking consensus rules.
|
|
|
|
//!
|
|
|
|
//! Each error variant corresponds to a consensus rule, so enumerating
|
|
|
|
//! all possible verification failures enumerates the consensus rules we
|
|
|
|
//! implement, and ensures that we don't reject blocks or transactions
|
|
|
|
//! for a non-enumerated reason.
|
2020-09-21 11:54:06 -07:00
|
|
|
|
2021-11-22 21:53:53 -08:00
|
|
|
use chrono::{DateTime, Utc};
|
2020-09-21 11:54:06 -07:00
|
|
|
use thiserror::Error;
|
|
|
|
|
2023-04-17 20:43:39 -07:00
|
|
|
use zebra_chain::{
|
|
|
|
amount, block, orchard, sapling, sprout,
|
|
|
|
transparent::{self, MIN_TRANSPARENT_COINBASE_MATURITY},
|
|
|
|
};
|
2022-11-29 20:40:15 -08:00
|
|
|
use zebra_state::ValidateContextError;
|
2021-10-27 19:49:28 -07:00
|
|
|
|
2021-11-15 12:55:32 -08:00
|
|
|
use crate::{block::MAX_BLOCK_SIGOPS, BoxError};
|
2020-10-26 23:42:27 -07:00
|
|
|
|
Send crawled transaction IDs to downloader (#2801)
* Rename type parameter to be more explicit
Replace the single letter with a proper name.
* Remove imports for `Request` and `Response`
The type names will conflict with the ones for the mempool service.
* Attach `Mempool` service to the `Crawler`
Add a field to the `Crawler` type to store a way to access the `Mempool`
service.
* Forward crawled transactions to downloader
The crawled transactions are now sent to the transaction downloader and
verifier, to be included in the mempool.
* Derive `Eq` and `PartialEq` for `mempool::Request`
Make it simpler to use the `MockService::expect_request` method.
* Test if crawled transactions are downloaded
Create some dummy crawled transactions, and let the crawler discover
them. Then check if they are forwarded to the mempool to be downloaded
and verified.
* Don't send empty transaction ID list to downloader
Ignore response from peers that don't provide any crawled transactions.
* Log errors when forwarding crawled transaction IDs
Calling the Mempool service should not fail, so if an error happens it
should be visible. However, errors when downloading individual
transactions can happen from time to time, so there's no need for them
to be very visible.
* Document existing `mempool::Crawler` test
Provide some depth as to what the test expect from the crawler's
behavior.
* Refactor to create `setup_crawler` helper function
Make it easier to reuse the common test setup code.
* Simplify code to expect requests
Now that `zebra_network::Request` implement `Eq`, the call can be
simplified into `expect_request`.
* Refactor to create `respond_with_transaction_ids`
A helper function that checks for a network crawl request and responds
with the given list of crawled transaction IDs.
* Refactor to create `crawler_iterator` helper
A function to intercept and respond to the fanned-out requests sent
during a single crawl iteration.
* Refactor to create `respond_to_queue_request`
Reduce the repeated code necessary to intercept and reply to a request
for queuing transactions to be downloaded.
* Add `respond_to_queue_request_with_error` helper
Intercepts a mempool request to queue transactions to be downloaded, and
responds with an error, simulating an internal problem in the mempool
service implementation.
* Derive `Arbitrary` for `NetworkUpgrade`
This is required for deriving `Arbitrary` for some error types.
* Derive `Arbitrary` for `TransactionError`
Allow random transaction errors to be generated for property tests.
* Derive `Arbitrary` for `MempoolError`
Allow random Mempool errors to be generated for property tests.
* Test if errors don't stop the mempool crawler
The crawler should be robust enough to continue operating even if the
mempool service fails to download transactions or even fails to handle
requests to enqueue transactions.
* Reduce the log level for download errors
They should happen regularly, so there's no need to have them with a
high visibility level.
Co-authored-by: teor <teor@riseup.net>
* Stop crawler if service stops
If `Mempool::poll_ready` returns an error, it's because the mempool
service has stopped and can't handle any requests, so the crawler should
stop as well.
Co-authored-by: teor <teor@riseup.net>
Co-authored-by: Conrado Gouvea <conrado@zfnd.org>
2021-10-04 17:55:42 -07:00
|
|
|
#[cfg(any(test, feature = "proptest-impl"))]
|
|
|
|
use proptest_derive::Arbitrary;
|
|
|
|
|
2021-11-25 16:37:24 -08:00
|
|
|
/// Workaround for format string identifier rules.
|
|
|
|
const MAX_EXPIRY_HEIGHT: block::Height = block::Height::MAX_EXPIRY_HEIGHT;
|
|
|
|
|
2021-10-06 18:20:38 -07:00
|
|
|
#[derive(Error, Copy, Clone, Debug, PartialEq, Eq)]
|
2023-02-20 21:30:29 -08:00
|
|
|
#[allow(missing_docs)]
|
2020-10-12 13:54:48 -07:00
|
|
|
pub enum SubsidyError {
|
|
|
|
#[error("no coinbase transaction in block")]
|
|
|
|
NoCoinbase,
|
|
|
|
|
2021-11-12 10:20:31 -08:00
|
|
|
#[error("funding stream expected output not found")]
|
2021-11-08 14:33:12 -08:00
|
|
|
FundingStreamNotFound,
|
2021-11-23 19:36:17 -08:00
|
|
|
|
|
|
|
#[error("miner fees are invalid")]
|
|
|
|
InvalidMinerFees,
|
|
|
|
|
|
|
|
#[error("a sum of amounts overflowed")]
|
|
|
|
SumOverflow,
|
2020-10-12 13:54:48 -07:00
|
|
|
}
|
|
|
|
|
2021-10-06 18:20:38 -07:00
|
|
|
#[derive(Error, Clone, Debug, PartialEq, Eq)]
|
Send crawled transaction IDs to downloader (#2801)
* Rename type parameter to be more explicit
Replace the single letter with a proper name.
* Remove imports for `Request` and `Response`
The type names will conflict with the ones for the mempool service.
* Attach `Mempool` service to the `Crawler`
Add a field to the `Crawler` type to store a way to access the `Mempool`
service.
* Forward crawled transactions to downloader
The crawled transactions are now sent to the transaction downloader and
verifier, to be included in the mempool.
* Derive `Eq` and `PartialEq` for `mempool::Request`
Make it simpler to use the `MockService::expect_request` method.
* Test if crawled transactions are downloaded
Create some dummy crawled transactions, and let the crawler discover
them. Then check if they are forwarded to the mempool to be downloaded
and verified.
* Don't send empty transaction ID list to downloader
Ignore response from peers that don't provide any crawled transactions.
* Log errors when forwarding crawled transaction IDs
Calling the Mempool service should not fail, so if an error happens it
should be visible. However, errors when downloading individual
transactions can happen from time to time, so there's no need for them
to be very visible.
* Document existing `mempool::Crawler` test
Provide some depth as to what the test expect from the crawler's
behavior.
* Refactor to create `setup_crawler` helper function
Make it easier to reuse the common test setup code.
* Simplify code to expect requests
Now that `zebra_network::Request` implement `Eq`, the call can be
simplified into `expect_request`.
* Refactor to create `respond_with_transaction_ids`
A helper function that checks for a network crawl request and responds
with the given list of crawled transaction IDs.
* Refactor to create `crawler_iterator` helper
A function to intercept and respond to the fanned-out requests sent
during a single crawl iteration.
* Refactor to create `respond_to_queue_request`
Reduce the repeated code necessary to intercept and reply to a request
for queuing transactions to be downloaded.
* Add `respond_to_queue_request_with_error` helper
Intercepts a mempool request to queue transactions to be downloaded, and
responds with an error, simulating an internal problem in the mempool
service implementation.
* Derive `Arbitrary` for `NetworkUpgrade`
This is required for deriving `Arbitrary` for some error types.
* Derive `Arbitrary` for `TransactionError`
Allow random transaction errors to be generated for property tests.
* Derive `Arbitrary` for `MempoolError`
Allow random Mempool errors to be generated for property tests.
* Test if errors don't stop the mempool crawler
The crawler should be robust enough to continue operating even if the
mempool service fails to download transactions or even fails to handle
requests to enqueue transactions.
* Reduce the log level for download errors
They should happen regularly, so there's no need to have them with a
high visibility level.
Co-authored-by: teor <teor@riseup.net>
* Stop crawler if service stops
If `Mempool::poll_ready` returns an error, it's because the mempool
service has stopped and can't handle any requests, so the crawler should
stop as well.
Co-authored-by: teor <teor@riseup.net>
Co-authored-by: Conrado Gouvea <conrado@zfnd.org>
2021-10-04 17:55:42 -07:00
|
|
|
#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))]
|
2023-02-20 21:30:29 -08:00
|
|
|
#[allow(missing_docs)]
|
2020-09-21 11:54:06 -07:00
|
|
|
pub enum TransactionError {
|
|
|
|
#[error("first transaction must be coinbase")]
|
|
|
|
CoinbasePosition,
|
|
|
|
|
|
|
|
#[error("coinbase input found in non-coinbase transaction")]
|
2021-04-27 17:43:00 -07:00
|
|
|
CoinbaseAfterFirst,
|
|
|
|
|
2020-11-19 19:06:10 -08:00
|
|
|
#[error("coinbase transaction MUST NOT have any JoinSplit descriptions")]
|
|
|
|
CoinbaseHasJoinSplit,
|
|
|
|
|
|
|
|
#[error("coinbase transaction MUST NOT have any Spend descriptions")]
|
|
|
|
CoinbaseHasSpend,
|
|
|
|
|
|
|
|
#[error("coinbase transaction MUST NOT have any Output descriptions pre-Heartwood")]
|
|
|
|
CoinbaseHasOutputPreHeartwood,
|
2020-10-26 23:42:27 -07:00
|
|
|
|
2021-06-02 18:54:08 -07:00
|
|
|
#[error("coinbase transaction MUST NOT have the EnableSpendsOrchard flag set")]
|
|
|
|
CoinbaseHasEnableSpendsOrchard,
|
|
|
|
|
2021-11-11 14:18:37 -08:00
|
|
|
#[error("coinbase transaction Sapling or Orchard outputs MUST be decryptable with an all-zero outgoing viewing key")]
|
|
|
|
CoinbaseOutputsNotDecryptable,
|
|
|
|
|
2021-09-01 17:06:20 -07:00
|
|
|
#[error("coinbase inputs MUST NOT exist in mempool")]
|
|
|
|
CoinbaseInMempool,
|
|
|
|
|
2022-04-20 02:31:12 -07:00
|
|
|
#[error("non-coinbase transactions MUST NOT have coinbase inputs")]
|
|
|
|
NonCoinbaseHasCoinbaseInput,
|
|
|
|
|
2021-11-22 21:53:53 -08:00
|
|
|
#[error("transaction is locked until after block height {}", _0.0)]
|
|
|
|
LockedUntilAfterBlockHeight(block::Height),
|
|
|
|
|
|
|
|
#[error("transaction is locked until after block time {0}")]
|
|
|
|
#[cfg_attr(any(test, feature = "proptest-impl"), proptest(skip))]
|
|
|
|
LockedUntilAfterBlockTime(DateTime<Utc>),
|
|
|
|
|
2021-11-25 16:37:24 -08:00
|
|
|
#[error(
|
|
|
|
"coinbase expiry {expiry_height:?} must be the same as the block {block_height:?} \
|
|
|
|
after NU5 activation, failing transaction: {transaction_hash:?}"
|
|
|
|
)]
|
|
|
|
CoinbaseExpiryBlockHeight {
|
|
|
|
expiry_height: Option<zebra_chain::block::Height>,
|
|
|
|
block_height: zebra_chain::block::Height,
|
|
|
|
transaction_hash: zebra_chain::transaction::Hash,
|
|
|
|
},
|
|
|
|
|
|
|
|
#[error(
|
|
|
|
"expiry {expiry_height:?} must be less than the maximum {MAX_EXPIRY_HEIGHT:?} \
|
|
|
|
coinbase: {is_coinbase}, block: {block_height:?}, failing transaction: {transaction_hash:?}"
|
|
|
|
)]
|
|
|
|
MaximumExpiryHeight {
|
|
|
|
expiry_height: zebra_chain::block::Height,
|
|
|
|
is_coinbase: bool,
|
|
|
|
block_height: zebra_chain::block::Height,
|
|
|
|
transaction_hash: zebra_chain::transaction::Hash,
|
|
|
|
},
|
|
|
|
|
|
|
|
#[error(
|
|
|
|
"transaction must not be mined at a block {block_height:?} \
|
|
|
|
greater than its expiry {expiry_height:?}, failing transaction {transaction_hash:?}"
|
|
|
|
)]
|
|
|
|
ExpiredTransaction {
|
|
|
|
expiry_height: zebra_chain::block::Height,
|
|
|
|
block_height: zebra_chain::block::Height,
|
|
|
|
transaction_hash: zebra_chain::transaction::Hash,
|
|
|
|
},
|
2021-11-22 21:17:05 -08:00
|
|
|
|
2020-10-12 13:54:48 -07:00
|
|
|
#[error("coinbase transaction failed subsidy validation")]
|
Send crawled transaction IDs to downloader (#2801)
* Rename type parameter to be more explicit
Replace the single letter with a proper name.
* Remove imports for `Request` and `Response`
The type names will conflict with the ones for the mempool service.
* Attach `Mempool` service to the `Crawler`
Add a field to the `Crawler` type to store a way to access the `Mempool`
service.
* Forward crawled transactions to downloader
The crawled transactions are now sent to the transaction downloader and
verifier, to be included in the mempool.
* Derive `Eq` and `PartialEq` for `mempool::Request`
Make it simpler to use the `MockService::expect_request` method.
* Test if crawled transactions are downloaded
Create some dummy crawled transactions, and let the crawler discover
them. Then check if they are forwarded to the mempool to be downloaded
and verified.
* Don't send empty transaction ID list to downloader
Ignore response from peers that don't provide any crawled transactions.
* Log errors when forwarding crawled transaction IDs
Calling the Mempool service should not fail, so if an error happens it
should be visible. However, errors when downloading individual
transactions can happen from time to time, so there's no need for them
to be very visible.
* Document existing `mempool::Crawler` test
Provide some depth as to what the test expect from the crawler's
behavior.
* Refactor to create `setup_crawler` helper function
Make it easier to reuse the common test setup code.
* Simplify code to expect requests
Now that `zebra_network::Request` implement `Eq`, the call can be
simplified into `expect_request`.
* Refactor to create `respond_with_transaction_ids`
A helper function that checks for a network crawl request and responds
with the given list of crawled transaction IDs.
* Refactor to create `crawler_iterator` helper
A function to intercept and respond to the fanned-out requests sent
during a single crawl iteration.
* Refactor to create `respond_to_queue_request`
Reduce the repeated code necessary to intercept and reply to a request
for queuing transactions to be downloaded.
* Add `respond_to_queue_request_with_error` helper
Intercepts a mempool request to queue transactions to be downloaded, and
responds with an error, simulating an internal problem in the mempool
service implementation.
* Derive `Arbitrary` for `NetworkUpgrade`
This is required for deriving `Arbitrary` for some error types.
* Derive `Arbitrary` for `TransactionError`
Allow random transaction errors to be generated for property tests.
* Derive `Arbitrary` for `MempoolError`
Allow random Mempool errors to be generated for property tests.
* Test if errors don't stop the mempool crawler
The crawler should be robust enough to continue operating even if the
mempool service fails to download transactions or even fails to handle
requests to enqueue transactions.
* Reduce the log level for download errors
They should happen regularly, so there's no need to have them with a
high visibility level.
Co-authored-by: teor <teor@riseup.net>
* Stop crawler if service stops
If `Mempool::poll_ready` returns an error, it's because the mempool
service has stopped and can't handle any requests, so the crawler should
stop as well.
Co-authored-by: teor <teor@riseup.net>
Co-authored-by: Conrado Gouvea <conrado@zfnd.org>
2021-10-04 17:55:42 -07:00
|
|
|
#[cfg_attr(any(test, feature = "proptest-impl"), proptest(skip))]
|
2020-10-12 13:54:48 -07:00
|
|
|
Subsidy(#[from] SubsidyError),
|
2020-10-26 23:42:27 -07:00
|
|
|
|
|
|
|
#[error("transaction version number MUST be >= 4")]
|
|
|
|
WrongVersion,
|
|
|
|
|
2021-06-14 17:15:59 -07:00
|
|
|
#[error("transaction version {0} not supported by the network upgrade {1:?}")]
|
|
|
|
UnsupportedByNetworkUpgrade(u32, zebra_chain::parameters::NetworkUpgrade),
|
|
|
|
|
2020-11-19 19:13:52 -08:00
|
|
|
#[error("must have at least one input: transparent, shielded spend, or joinsplit")]
|
|
|
|
NoInputs,
|
|
|
|
|
|
|
|
#[error("must have at least one output: transparent, shielded output, or joinsplit")]
|
|
|
|
NoOutputs,
|
2020-10-26 23:42:27 -07:00
|
|
|
|
|
|
|
#[error("if there are no Spends or Outputs, the value balance MUST be 0.")]
|
|
|
|
BadBalance,
|
|
|
|
|
2020-10-28 17:22:25 -07:00
|
|
|
#[error("could not verify a transparent script")]
|
Send crawled transaction IDs to downloader (#2801)
* Rename type parameter to be more explicit
Replace the single letter with a proper name.
* Remove imports for `Request` and `Response`
The type names will conflict with the ones for the mempool service.
* Attach `Mempool` service to the `Crawler`
Add a field to the `Crawler` type to store a way to access the `Mempool`
service.
* Forward crawled transactions to downloader
The crawled transactions are now sent to the transaction downloader and
verifier, to be included in the mempool.
* Derive `Eq` and `PartialEq` for `mempool::Request`
Make it simpler to use the `MockService::expect_request` method.
* Test if crawled transactions are downloaded
Create some dummy crawled transactions, and let the crawler discover
them. Then check if they are forwarded to the mempool to be downloaded
and verified.
* Don't send empty transaction ID list to downloader
Ignore response from peers that don't provide any crawled transactions.
* Log errors when forwarding crawled transaction IDs
Calling the Mempool service should not fail, so if an error happens it
should be visible. However, errors when downloading individual
transactions can happen from time to time, so there's no need for them
to be very visible.
* Document existing `mempool::Crawler` test
Provide some depth as to what the test expect from the crawler's
behavior.
* Refactor to create `setup_crawler` helper function
Make it easier to reuse the common test setup code.
* Simplify code to expect requests
Now that `zebra_network::Request` implement `Eq`, the call can be
simplified into `expect_request`.
* Refactor to create `respond_with_transaction_ids`
A helper function that checks for a network crawl request and responds
with the given list of crawled transaction IDs.
* Refactor to create `crawler_iterator` helper
A function to intercept and respond to the fanned-out requests sent
during a single crawl iteration.
* Refactor to create `respond_to_queue_request`
Reduce the repeated code necessary to intercept and reply to a request
for queuing transactions to be downloaded.
* Add `respond_to_queue_request_with_error` helper
Intercepts a mempool request to queue transactions to be downloaded, and
responds with an error, simulating an internal problem in the mempool
service implementation.
* Derive `Arbitrary` for `NetworkUpgrade`
This is required for deriving `Arbitrary` for some error types.
* Derive `Arbitrary` for `TransactionError`
Allow random transaction errors to be generated for property tests.
* Derive `Arbitrary` for `MempoolError`
Allow random Mempool errors to be generated for property tests.
* Test if errors don't stop the mempool crawler
The crawler should be robust enough to continue operating even if the
mempool service fails to download transactions or even fails to handle
requests to enqueue transactions.
* Reduce the log level for download errors
They should happen regularly, so there's no need to have them with a
high visibility level.
Co-authored-by: teor <teor@riseup.net>
* Stop crawler if service stops
If `Mempool::poll_ready` returns an error, it's because the mempool
service has stopped and can't handle any requests, so the crawler should
stop as well.
Co-authored-by: teor <teor@riseup.net>
Co-authored-by: Conrado Gouvea <conrado@zfnd.org>
2021-10-04 17:55:42 -07:00
|
|
|
#[cfg_attr(any(test, feature = "proptest-impl"), proptest(skip))]
|
2020-10-26 23:42:27 -07:00
|
|
|
Script(#[from] zebra_script::Error),
|
|
|
|
|
2021-03-24 09:28:25 -07:00
|
|
|
#[error("spend description cv and rk MUST NOT be of small order")]
|
|
|
|
SmallOrder,
|
|
|
|
|
2023-03-27 21:13:04 -07:00
|
|
|
// TODO: the underlying error is bellman::VerificationError, but it does not implement
|
2021-12-13 11:50:49 -08:00
|
|
|
// Arbitrary as required here.
|
2020-10-26 23:42:27 -07:00
|
|
|
#[error("spend proof MUST be valid given a primary input formed from the other fields except spendAuthSig")]
|
2021-12-13 11:50:49 -08:00
|
|
|
Groth16(String),
|
|
|
|
|
2023-03-27 21:13:04 -07:00
|
|
|
// TODO: the underlying error is io::Error, but it does not implement Clone as required here.
|
2021-12-13 11:50:49 -08:00
|
|
|
#[error("Groth16 proof is malformed")]
|
|
|
|
MalformedGroth16(String),
|
2020-10-26 23:42:27 -07:00
|
|
|
|
|
|
|
#[error(
|
2021-07-01 23:50:23 -07:00
|
|
|
"Sprout joinSplitSig MUST represent a valid signature under joinSplitPubKey of dataToBeSigned"
|
2020-10-26 23:42:27 -07:00
|
|
|
)]
|
Send crawled transaction IDs to downloader (#2801)
* Rename type parameter to be more explicit
Replace the single letter with a proper name.
* Remove imports for `Request` and `Response`
The type names will conflict with the ones for the mempool service.
* Attach `Mempool` service to the `Crawler`
Add a field to the `Crawler` type to store a way to access the `Mempool`
service.
* Forward crawled transactions to downloader
The crawled transactions are now sent to the transaction downloader and
verifier, to be included in the mempool.
* Derive `Eq` and `PartialEq` for `mempool::Request`
Make it simpler to use the `MockService::expect_request` method.
* Test if crawled transactions are downloaded
Create some dummy crawled transactions, and let the crawler discover
them. Then check if they are forwarded to the mempool to be downloaded
and verified.
* Don't send empty transaction ID list to downloader
Ignore response from peers that don't provide any crawled transactions.
* Log errors when forwarding crawled transaction IDs
Calling the Mempool service should not fail, so if an error happens it
should be visible. However, errors when downloading individual
transactions can happen from time to time, so there's no need for them
to be very visible.
* Document existing `mempool::Crawler` test
Provide some depth as to what the test expect from the crawler's
behavior.
* Refactor to create `setup_crawler` helper function
Make it easier to reuse the common test setup code.
* Simplify code to expect requests
Now that `zebra_network::Request` implement `Eq`, the call can be
simplified into `expect_request`.
* Refactor to create `respond_with_transaction_ids`
A helper function that checks for a network crawl request and responds
with the given list of crawled transaction IDs.
* Refactor to create `crawler_iterator` helper
A function to intercept and respond to the fanned-out requests sent
during a single crawl iteration.
* Refactor to create `respond_to_queue_request`
Reduce the repeated code necessary to intercept and reply to a request
for queuing transactions to be downloaded.
* Add `respond_to_queue_request_with_error` helper
Intercepts a mempool request to queue transactions to be downloaded, and
responds with an error, simulating an internal problem in the mempool
service implementation.
* Derive `Arbitrary` for `NetworkUpgrade`
This is required for deriving `Arbitrary` for some error types.
* Derive `Arbitrary` for `TransactionError`
Allow random transaction errors to be generated for property tests.
* Derive `Arbitrary` for `MempoolError`
Allow random Mempool errors to be generated for property tests.
* Test if errors don't stop the mempool crawler
The crawler should be robust enough to continue operating even if the
mempool service fails to download transactions or even fails to handle
requests to enqueue transactions.
* Reduce the log level for download errors
They should happen regularly, so there's no need to have them with a
high visibility level.
Co-authored-by: teor <teor@riseup.net>
* Stop crawler if service stops
If `Mempool::poll_ready` returns an error, it's because the mempool
service has stopped and can't handle any requests, so the crawler should
stop as well.
Co-authored-by: teor <teor@riseup.net>
Co-authored-by: Conrado Gouvea <conrado@zfnd.org>
2021-10-04 17:55:42 -07:00
|
|
|
#[cfg_attr(any(test, feature = "proptest-impl"), proptest(skip))]
|
2021-03-24 09:28:25 -07:00
|
|
|
Ed25519(#[from] zebra_chain::primitives::ed25519::Error),
|
2020-10-26 23:42:27 -07:00
|
|
|
|
2021-07-01 23:50:23 -07:00
|
|
|
#[error("Sapling bindingSig MUST represent a valid signature under the transaction binding validating key bvk of SigHash")]
|
Send crawled transaction IDs to downloader (#2801)
* Rename type parameter to be more explicit
Replace the single letter with a proper name.
* Remove imports for `Request` and `Response`
The type names will conflict with the ones for the mempool service.
* Attach `Mempool` service to the `Crawler`
Add a field to the `Crawler` type to store a way to access the `Mempool`
service.
* Forward crawled transactions to downloader
The crawled transactions are now sent to the transaction downloader and
verifier, to be included in the mempool.
* Derive `Eq` and `PartialEq` for `mempool::Request`
Make it simpler to use the `MockService::expect_request` method.
* Test if crawled transactions are downloaded
Create some dummy crawled transactions, and let the crawler discover
them. Then check if they are forwarded to the mempool to be downloaded
and verified.
* Don't send empty transaction ID list to downloader
Ignore response from peers that don't provide any crawled transactions.
* Log errors when forwarding crawled transaction IDs
Calling the Mempool service should not fail, so if an error happens it
should be visible. However, errors when downloading individual
transactions can happen from time to time, so there's no need for them
to be very visible.
* Document existing `mempool::Crawler` test
Provide some depth as to what the test expect from the crawler's
behavior.
* Refactor to create `setup_crawler` helper function
Make it easier to reuse the common test setup code.
* Simplify code to expect requests
Now that `zebra_network::Request` implement `Eq`, the call can be
simplified into `expect_request`.
* Refactor to create `respond_with_transaction_ids`
A helper function that checks for a network crawl request and responds
with the given list of crawled transaction IDs.
* Refactor to create `crawler_iterator` helper
A function to intercept and respond to the fanned-out requests sent
during a single crawl iteration.
* Refactor to create `respond_to_queue_request`
Reduce the repeated code necessary to intercept and reply to a request
for queuing transactions to be downloaded.
* Add `respond_to_queue_request_with_error` helper
Intercepts a mempool request to queue transactions to be downloaded, and
responds with an error, simulating an internal problem in the mempool
service implementation.
* Derive `Arbitrary` for `NetworkUpgrade`
This is required for deriving `Arbitrary` for some error types.
* Derive `Arbitrary` for `TransactionError`
Allow random transaction errors to be generated for property tests.
* Derive `Arbitrary` for `MempoolError`
Allow random Mempool errors to be generated for property tests.
* Test if errors don't stop the mempool crawler
The crawler should be robust enough to continue operating even if the
mempool service fails to download transactions or even fails to handle
requests to enqueue transactions.
* Reduce the log level for download errors
They should happen regularly, so there's no need to have them with a
high visibility level.
Co-authored-by: teor <teor@riseup.net>
* Stop crawler if service stops
If `Mempool::poll_ready` returns an error, it's because the mempool
service has stopped and can't handle any requests, so the crawler should
stop as well.
Co-authored-by: teor <teor@riseup.net>
Co-authored-by: Conrado Gouvea <conrado@zfnd.org>
2021-10-04 17:55:42 -07:00
|
|
|
#[cfg_attr(any(test, feature = "proptest-impl"), proptest(skip))]
|
2021-03-24 09:28:25 -07:00
|
|
|
RedJubjub(zebra_chain::primitives::redjubjub::Error),
|
2020-11-23 22:46:02 -08:00
|
|
|
|
2021-07-01 23:50:23 -07:00
|
|
|
#[error("Orchard bindingSig MUST represent a valid signature under the transaction binding validating key bvk of SigHash")]
|
Send crawled transaction IDs to downloader (#2801)
* Rename type parameter to be more explicit
Replace the single letter with a proper name.
* Remove imports for `Request` and `Response`
The type names will conflict with the ones for the mempool service.
* Attach `Mempool` service to the `Crawler`
Add a field to the `Crawler` type to store a way to access the `Mempool`
service.
* Forward crawled transactions to downloader
The crawled transactions are now sent to the transaction downloader and
verifier, to be included in the mempool.
* Derive `Eq` and `PartialEq` for `mempool::Request`
Make it simpler to use the `MockService::expect_request` method.
* Test if crawled transactions are downloaded
Create some dummy crawled transactions, and let the crawler discover
them. Then check if they are forwarded to the mempool to be downloaded
and verified.
* Don't send empty transaction ID list to downloader
Ignore response from peers that don't provide any crawled transactions.
* Log errors when forwarding crawled transaction IDs
Calling the Mempool service should not fail, so if an error happens it
should be visible. However, errors when downloading individual
transactions can happen from time to time, so there's no need for them
to be very visible.
* Document existing `mempool::Crawler` test
Provide some depth as to what the test expect from the crawler's
behavior.
* Refactor to create `setup_crawler` helper function
Make it easier to reuse the common test setup code.
* Simplify code to expect requests
Now that `zebra_network::Request` implement `Eq`, the call can be
simplified into `expect_request`.
* Refactor to create `respond_with_transaction_ids`
A helper function that checks for a network crawl request and responds
with the given list of crawled transaction IDs.
* Refactor to create `crawler_iterator` helper
A function to intercept and respond to the fanned-out requests sent
during a single crawl iteration.
* Refactor to create `respond_to_queue_request`
Reduce the repeated code necessary to intercept and reply to a request
for queuing transactions to be downloaded.
* Add `respond_to_queue_request_with_error` helper
Intercepts a mempool request to queue transactions to be downloaded, and
responds with an error, simulating an internal problem in the mempool
service implementation.
* Derive `Arbitrary` for `NetworkUpgrade`
This is required for deriving `Arbitrary` for some error types.
* Derive `Arbitrary` for `TransactionError`
Allow random transaction errors to be generated for property tests.
* Derive `Arbitrary` for `MempoolError`
Allow random Mempool errors to be generated for property tests.
* Test if errors don't stop the mempool crawler
The crawler should be robust enough to continue operating even if the
mempool service fails to download transactions or even fails to handle
requests to enqueue transactions.
* Reduce the log level for download errors
They should happen regularly, so there's no need to have them with a
high visibility level.
Co-authored-by: teor <teor@riseup.net>
* Stop crawler if service stops
If `Mempool::poll_ready` returns an error, it's because the mempool
service has stopped and can't handle any requests, so the crawler should
stop as well.
Co-authored-by: teor <teor@riseup.net>
Co-authored-by: Conrado Gouvea <conrado@zfnd.org>
2021-10-04 17:55:42 -07:00
|
|
|
#[cfg_attr(any(test, feature = "proptest-impl"), proptest(skip))]
|
2023-02-01 15:27:28 -08:00
|
|
|
RedPallas(zebra_chain::primitives::reddsa::Error),
|
2021-07-01 23:50:23 -07:00
|
|
|
|
2020-11-23 22:46:02 -08:00
|
|
|
// temporary error type until #1186 is fixed
|
|
|
|
#[error("Downcast from BoxError to redjubjub::Error failed")]
|
|
|
|
InternalDowncastError(String),
|
2021-07-01 16:03:34 -07:00
|
|
|
|
2021-12-13 11:50:49 -08:00
|
|
|
#[error("either vpub_old or vpub_new must be zero")]
|
|
|
|
BothVPubsNonZero,
|
|
|
|
|
2021-07-01 16:03:34 -07:00
|
|
|
#[error("adding to the sprout pool is disabled after Canopy")]
|
|
|
|
DisabledAddToSproutPool,
|
2021-10-14 14:15:10 -07:00
|
|
|
|
|
|
|
#[error("could not calculate the transaction fee")]
|
|
|
|
IncorrectFee,
|
2021-10-27 19:49:28 -07:00
|
|
|
|
|
|
|
#[error("transparent double-spend: {_0:?} is spent twice")]
|
|
|
|
DuplicateTransparentSpend(transparent::OutPoint),
|
|
|
|
|
|
|
|
#[error("sprout double-spend: duplicate nullifier: {_0:?}")]
|
|
|
|
DuplicateSproutNullifier(sprout::Nullifier),
|
|
|
|
|
|
|
|
#[error("sapling double-spend: duplicate nullifier: {_0:?}")]
|
|
|
|
DuplicateSaplingNullifier(sapling::Nullifier),
|
|
|
|
|
|
|
|
#[error("orchard double-spend: duplicate nullifier: {_0:?}")]
|
|
|
|
DuplicateOrchardNullifier(orchard::Nullifier),
|
2021-11-08 13:45:54 -08:00
|
|
|
|
|
|
|
#[error("must have at least one active orchard flag")]
|
|
|
|
NotEnoughFlags,
|
2022-11-10 22:40:35 -08:00
|
|
|
|
|
|
|
#[error("could not find a mempool transaction input UTXO in the best chain")]
|
|
|
|
TransparentInputNotFound,
|
2022-11-29 20:40:15 -08:00
|
|
|
|
|
|
|
#[error("could not validate nullifiers and anchors on best chain")]
|
|
|
|
#[cfg_attr(any(test, feature = "proptest-impl"), proptest(skip))]
|
2022-12-07 22:11:33 -08:00
|
|
|
// This error variant is at least 128 bytes
|
2023-04-17 20:43:39 -07:00
|
|
|
ValidateContextError(Box<ValidateContextError>),
|
2023-01-27 13:46:51 -08:00
|
|
|
|
|
|
|
#[error("could not validate mempool transaction lock time on best chain")]
|
|
|
|
#[cfg_attr(any(test, feature = "proptest-impl"), proptest(skip))]
|
|
|
|
// TODO: turn this into a typed error
|
|
|
|
ValidateMempoolLockTimeError(String),
|
2023-04-17 20:43:39 -07:00
|
|
|
|
|
|
|
#[error(
|
|
|
|
"immature transparent coinbase spend: \
|
|
|
|
attempt to spend {outpoint:?} at {spend_height:?}, \
|
|
|
|
but spends are invalid before {min_spend_height:?}, \
|
|
|
|
which is {MIN_TRANSPARENT_COINBASE_MATURITY:?} blocks \
|
|
|
|
after it was created at {created_height:?}"
|
|
|
|
)]
|
|
|
|
#[non_exhaustive]
|
|
|
|
ImmatureTransparentCoinbaseSpend {
|
|
|
|
outpoint: transparent::OutPoint,
|
|
|
|
spend_height: block::Height,
|
|
|
|
min_spend_height: block::Height,
|
|
|
|
created_height: block::Height,
|
|
|
|
},
|
|
|
|
|
|
|
|
#[error(
|
|
|
|
"unshielded transparent coinbase spend: {outpoint:?} \
|
|
|
|
must be spent in a transaction which only has shielded outputs"
|
|
|
|
)]
|
|
|
|
#[non_exhaustive]
|
|
|
|
UnshieldedTransparentCoinbaseSpend {
|
|
|
|
outpoint: transparent::OutPoint,
|
|
|
|
min_spend_height: block::Height,
|
|
|
|
},
|
2023-05-01 17:13:33 -07:00
|
|
|
|
|
|
|
#[error("failed to verify ZIP-317 transaction rules, transaction was not inserted to mempool")]
|
|
|
|
#[cfg_attr(any(test, feature = "proptest-impl"), proptest(skip))]
|
|
|
|
Zip317(#[from] zebra_chain::transaction::zip317::Error),
|
2020-10-26 23:42:27 -07:00
|
|
|
}
|
|
|
|
|
2022-12-07 22:11:33 -08:00
|
|
|
impl From<ValidateContextError> for TransactionError {
|
|
|
|
fn from(err: ValidateContextError) -> Self {
|
2023-04-17 20:43:39 -07:00
|
|
|
TransactionError::ValidateContextError(Box::new(err))
|
2022-12-07 22:11:33 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: use a dedicated variant and From impl for each concrete type, and update callers (#5732)
|
2020-10-26 23:42:27 -07:00
|
|
|
impl From<BoxError> for TransactionError {
|
2021-08-25 08:07:26 -07:00
|
|
|
fn from(mut err: BoxError) -> Self {
|
|
|
|
// TODO: handle redpallas::Error, ScriptInvalid, InvalidSignature
|
2021-03-24 09:28:25 -07:00
|
|
|
match err.downcast::<zebra_chain::primitives::redjubjub::Error>() {
|
2021-08-25 08:07:26 -07:00
|
|
|
Ok(e) => return TransactionError::RedJubjub(*e),
|
|
|
|
Err(e) => err = e,
|
2020-10-26 23:42:27 -07:00
|
|
|
}
|
2021-08-25 08:07:26 -07:00
|
|
|
|
2022-11-29 20:40:15 -08:00
|
|
|
match err.downcast::<ValidateContextError>() {
|
|
|
|
Ok(e) => return (*e).into(),
|
|
|
|
Err(e) => err = e,
|
|
|
|
}
|
|
|
|
|
2021-08-25 08:07:26 -07:00
|
|
|
// buffered transaction verifier service error
|
|
|
|
match err.downcast::<TransactionError>() {
|
|
|
|
Ok(e) => return *e,
|
|
|
|
Err(e) => err = e,
|
|
|
|
}
|
|
|
|
|
|
|
|
TransactionError::InternalDowncastError(format!(
|
2022-12-07 17:05:57 -08:00
|
|
|
"downcast to known transaction error type failed, original error: {err:?}",
|
2021-08-25 08:07:26 -07:00
|
|
|
))
|
2020-10-26 23:42:27 -07:00
|
|
|
}
|
2020-10-12 13:54:48 -07:00
|
|
|
}
|
|
|
|
|
2021-10-06 18:20:38 -07:00
|
|
|
#[derive(Error, Clone, Debug, PartialEq, Eq)]
|
2023-02-20 21:30:29 -08:00
|
|
|
#[allow(missing_docs)]
|
2020-09-21 11:54:06 -07:00
|
|
|
pub enum BlockError {
|
|
|
|
#[error("block contains invalid transactions")]
|
|
|
|
Transaction(#[from] TransactionError),
|
|
|
|
|
2020-11-06 11:07:30 -08:00
|
|
|
#[error("block has no transactions")]
|
2020-09-21 11:54:06 -07:00
|
|
|
NoTransactions,
|
|
|
|
|
2020-11-24 19:55:15 -08:00
|
|
|
#[error("block has mismatched merkle root")]
|
|
|
|
BadMerkleRoot {
|
2021-03-24 09:28:25 -07:00
|
|
|
actual: zebra_chain::block::merkle::Root,
|
|
|
|
expected: zebra_chain::block::merkle::Root,
|
2020-11-24 19:55:15 -08:00
|
|
|
},
|
|
|
|
|
2020-11-25 12:17:16 -08:00
|
|
|
#[error("block contains duplicate transactions")]
|
|
|
|
DuplicateTransaction,
|
|
|
|
|
2023-03-24 00:10:07 -07:00
|
|
|
#[error("block {0:?} is already in present in the state {1:?}")]
|
|
|
|
AlreadyInChain(zebra_chain::block::Hash, zebra_state::KnownBlock),
|
2020-09-21 11:54:06 -07:00
|
|
|
|
|
|
|
#[error("invalid block {0:?}: missing block height")]
|
|
|
|
MissingHeight(zebra_chain::block::Hash),
|
|
|
|
|
|
|
|
#[error("invalid block height {0:?} in {1:?}: greater than the maximum height {2:?}")]
|
|
|
|
MaxHeight(
|
|
|
|
zebra_chain::block::Height,
|
|
|
|
zebra_chain::block::Hash,
|
|
|
|
zebra_chain::block::Height,
|
|
|
|
),
|
|
|
|
|
|
|
|
#[error("invalid difficulty threshold in block header {0:?} {1:?}")]
|
|
|
|
InvalidDifficulty(zebra_chain::block::Height, zebra_chain::block::Hash),
|
|
|
|
|
2020-10-13 18:35:45 -07:00
|
|
|
#[error("block {0:?} has a difficulty threshold {2:?} that is easier than the {3:?} difficulty limit {4:?}, hash: {1:?}")]
|
2020-10-12 15:17:40 -07:00
|
|
|
TargetDifficultyLimit(
|
|
|
|
zebra_chain::block::Height,
|
|
|
|
zebra_chain::block::Hash,
|
|
|
|
zebra_chain::work::difficulty::ExpandedDifficulty,
|
|
|
|
zebra_chain::parameters::Network,
|
|
|
|
zebra_chain::work::difficulty::ExpandedDifficulty,
|
|
|
|
),
|
|
|
|
|
2020-11-04 18:57:31 -08:00
|
|
|
#[error(
|
|
|
|
"block {0:?} on {3:?} has a hash {1:?} that is easier than its difficulty threshold {2:?}"
|
|
|
|
)]
|
2020-09-21 11:54:06 -07:00
|
|
|
DifficultyFilter(
|
|
|
|
zebra_chain::block::Height,
|
|
|
|
zebra_chain::block::Hash,
|
|
|
|
zebra_chain::work::difficulty::ExpandedDifficulty,
|
2020-11-04 18:57:31 -08:00
|
|
|
zebra_chain::parameters::Network,
|
2020-09-21 11:54:06 -07:00
|
|
|
),
|
2021-05-09 18:31:45 -07:00
|
|
|
|
|
|
|
#[error("transaction has wrong consensus branch id for block network upgrade")]
|
|
|
|
WrongTransactionConsensusBranchId,
|
2021-11-15 12:55:32 -08:00
|
|
|
|
|
|
|
#[error(
|
|
|
|
"block {height:?} {hash:?} has {legacy_sigop_count} legacy transparent signature operations, \
|
|
|
|
but the limit is {MAX_BLOCK_SIGOPS}"
|
|
|
|
)]
|
|
|
|
TooManyTransparentSignatureOperations {
|
|
|
|
height: zebra_chain::block::Height,
|
|
|
|
hash: zebra_chain::block::Hash,
|
|
|
|
legacy_sigop_count: u64,
|
|
|
|
},
|
2021-11-23 07:31:56 -08:00
|
|
|
|
|
|
|
#[error("summing miner fees for block {height:?} {hash:?} failed: {source:?}")]
|
|
|
|
SummingMinerFees {
|
|
|
|
height: zebra_chain::block::Height,
|
|
|
|
hash: zebra_chain::block::Hash,
|
|
|
|
source: amount::Error,
|
|
|
|
},
|
2020-09-21 11:54:06 -07:00
|
|
|
}
|
2022-12-07 22:11:33 -08:00
|
|
|
|
|
|
|
impl From<SubsidyError> for BlockError {
|
|
|
|
fn from(err: SubsidyError) -> BlockError {
|
|
|
|
BlockError::Transaction(TransactionError::Subsidy(err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl BlockError {
|
|
|
|
/// Returns `true` if this is definitely a duplicate request.
|
|
|
|
/// Some duplicate requests might not be detected, and therefore return `false`.
|
|
|
|
pub fn is_duplicate_request(&self) -> bool {
|
|
|
|
matches!(self, BlockError::AlreadyInChain(..))
|
|
|
|
}
|
|
|
|
}
|