Re-use some block checks in the CheckpointVerifier
This commit is contained in:
parent
252affdf84
commit
c1a910942d
|
@ -33,7 +33,7 @@ use zebra_state as zs;
|
||||||
use crate::{error::*, transaction as tx};
|
use crate::{error::*, transaction as tx};
|
||||||
use crate::{script, BoxError};
|
use crate::{script, BoxError};
|
||||||
|
|
||||||
mod check;
|
pub mod check;
|
||||||
mod subsidy;
|
mod subsidy;
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests;
|
mod tests;
|
||||||
|
|
|
@ -183,10 +183,22 @@ pub fn merkle_root_validity(
|
||||||
|
|
||||||
// Bitcoin's transaction Merkle trees are malleable, allowing blocks with
|
// Bitcoin's transaction Merkle trees are malleable, allowing blocks with
|
||||||
// duplicate transactions to have the same Merkle root as blocks without
|
// duplicate transactions to have the same Merkle root as blocks without
|
||||||
// duplicate transactions. Duplicate transactions should cause a block to be
|
// duplicate transactions.
|
||||||
|
//
|
||||||
|
// Collecting into a HashSet deduplicates, so this checks that there are no
|
||||||
|
// duplicate transaction hashes, preventing Merkle root malleability.
|
||||||
|
//
|
||||||
|
// ## Full Block Validation
|
||||||
|
//
|
||||||
|
// Duplicate transactions should cause a block to be
|
||||||
// rejected, as duplicate transactions imply that the block contains a
|
// rejected, as duplicate transactions imply that the block contains a
|
||||||
// double-spend. As a defense-in-depth, however, we also check that there
|
// double-spend. As a defense-in-depth, however, we also check that there
|
||||||
// are no duplicate transaction hashes, by collecting into a HashSet.
|
// are no duplicate transaction hashes.
|
||||||
|
//
|
||||||
|
// ## Checkpoint Validation
|
||||||
|
//
|
||||||
|
// To prevent malleability (CVE-2012-2459), we also need to check
|
||||||
|
// whether the transaction hashes are unique.
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
if transaction_hashes.len() != transaction_hashes.iter().collect::<HashSet<_>>().len() {
|
if transaction_hashes.len() != transaction_hashes.iter().collect::<HashSet<_>>().len() {
|
||||||
return Err(BlockError::DuplicateTransaction);
|
return Err(BlockError::DuplicateTransaction);
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
//! block for the configured network.
|
//! block for the configured network.
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
collections::{BTreeMap, HashSet},
|
collections::BTreeMap,
|
||||||
ops::{Bound, Bound::*},
|
ops::{Bound, Bound::*},
|
||||||
pin::Pin,
|
pin::Pin,
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
|
@ -30,10 +30,11 @@ use tracing::instrument;
|
||||||
use zebra_chain::{
|
use zebra_chain::{
|
||||||
block::{self, Block},
|
block::{self, Block},
|
||||||
parameters::{Network, GENESIS_PREVIOUS_BLOCK_HASH},
|
parameters::{Network, GENESIS_PREVIOUS_BLOCK_HASH},
|
||||||
|
work::equihash,
|
||||||
};
|
};
|
||||||
use zebra_state as zs;
|
use zebra_state as zs;
|
||||||
|
|
||||||
use crate::BoxError;
|
use crate::{block::VerifyBlockError, error::BlockError, BoxError};
|
||||||
|
|
||||||
pub(crate) mod list;
|
pub(crate) mod list;
|
||||||
mod types;
|
mod types;
|
||||||
|
@ -453,23 +454,8 @@ where
|
||||||
.iter()
|
.iter()
|
||||||
.map(|tx| tx.hash())
|
.map(|tx| tx.hash())
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
let merkle_root = transaction_hashes.iter().cloned().collect();
|
|
||||||
|
|
||||||
// Check that the Merkle root is valid.
|
crate::block::check::merkle_root_validity(&block, &transaction_hashes)?;
|
||||||
if block.header.merkle_root != merkle_root {
|
|
||||||
return Err(VerifyCheckpointError::BadMerkleRoot {
|
|
||||||
expected: block.header.merkle_root,
|
|
||||||
actual: merkle_root,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// To prevent malleability (CVE-2012-2459), we also need to check
|
|
||||||
// whether the transaction hashes are unique. Collecting into a HashSet
|
|
||||||
// deduplicates, so this checks that there are no duplicate transaction
|
|
||||||
// hashes, preventing Merkle root malleability.
|
|
||||||
if transaction_hashes.len() != transaction_hashes.iter().collect::<HashSet<_>>().len() {
|
|
||||||
return Err(VerifyCheckpointError::DuplicateTransaction);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(block_height)
|
Ok(block_height)
|
||||||
}
|
}
|
||||||
|
@ -831,6 +817,8 @@ pub enum VerifyCheckpointError {
|
||||||
CommitFinalized(BoxError),
|
CommitFinalized(BoxError),
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
CheckpointList(BoxError),
|
CheckpointList(BoxError),
|
||||||
|
#[error(transparent)]
|
||||||
|
VerifyBlock(BoxError),
|
||||||
#[error("too many queued blocks at this height")]
|
#[error("too many queued blocks at this height")]
|
||||||
QueuedLimit,
|
QueuedLimit,
|
||||||
#[error("the block hash does not match the chained checkpoint hash, expected {expected:?} found {found:?}")]
|
#[error("the block hash does not match the chained checkpoint hash, expected {expected:?} found {found:?}")]
|
||||||
|
@ -842,6 +830,18 @@ pub enum VerifyCheckpointError {
|
||||||
ShuttingDown,
|
ShuttingDown,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<VerifyBlockError> for VerifyCheckpointError {
|
||||||
|
fn from(err: VerifyBlockError) -> VerifyCheckpointError {
|
||||||
|
VerifyCheckpointError::VerifyBlock(err.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<BlockError> for VerifyCheckpointError {
|
||||||
|
fn from(err: BlockError) -> VerifyCheckpointError {
|
||||||
|
VerifyCheckpointError::VerifyBlock(err.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// The CheckpointVerifier service implementation.
|
/// The CheckpointVerifier service implementation.
|
||||||
///
|
///
|
||||||
/// After verification, the block futures resolve to their hashes.
|
/// After verification, the block futures resolve to their hashes.
|
||||||
|
|
Loading…
Reference in New Issue