Implement Expanded to Compact Difficulty Conversion (#1196)
* Implement Expanded to Compact Difficulty * Implement Arbitrary for CompactDifficulty Remove the derive, and generate values from random block hashes. * Implement Arbitrary for ExpandedDifficulty and Work * Use Arbitrary for CompactDifficulty in Arbitrary for Block * Test difficulty on all block test vectors And cleanup some duplicate test code * Round-trip tests for compact test cases * Round-trip tests for compact difficulty in block test vectors * Make Add for Work return PartialCumulativeWork Remove AddAssign for Work Rewrite a proptest using Sub for PartialCumulativeWork Use Arbitrary for Work * Add roundtrip work sum tests * Add roundtrip comparison difficulty tests * Add failing proptest cases due to test bugs * Use Some(_) rather than _.into() * Reduce visibility of difficulty type inner values * Split work and other difficulty proptests This change makes sure that rejected work values don't disable property tests on other types.
This commit is contained in:
parent
f338048012
commit
1c31225aac
|
@ -0,0 +1,7 @@
|
|||
# Seeds for failure cases proptest has generated in the past. It is
|
||||
# automatically read and these particular cases re-run before any
|
||||
# novel cases are generated.
|
||||
#
|
||||
# It is recommended to check this file in to source control so that
|
||||
# everyone who runs the test benefits from these saved cases.
|
||||
cc 8e9b7658e31f20a01083e3b065f8ca0cdc98fedaf3058405e9e9fb59fd90b570 # shrinks to expanded_seed = block::Hash("0000000000000000000000000000000000000000000000000000000000000000")
|
|
@ -73,6 +73,7 @@ impl Arbitrary for Header {
|
|||
any::<[u8; 32]>(),
|
||||
// time is interpreted as u32 in the spec, but rust timestamps are i64
|
||||
(0i64..(u32::MAX as i64)),
|
||||
any::<CompactDifficulty>(),
|
||||
any::<[u8; 32]>(),
|
||||
any::<equihash::Solution>(),
|
||||
)
|
||||
|
@ -80,19 +81,19 @@ impl Arbitrary for Header {
|
|||
|(
|
||||
version,
|
||||
previous_block_hash,
|
||||
merkle_root_hash,
|
||||
merkle_root,
|
||||
root_bytes,
|
||||
timestamp,
|
||||
difficulty_threshold,
|
||||
nonce,
|
||||
solution,
|
||||
)| Header {
|
||||
version,
|
||||
previous_block_hash,
|
||||
merkle_root: merkle_root_hash,
|
||||
merkle_root,
|
||||
root_bytes,
|
||||
time: Utc.timestamp(timestamp, 0),
|
||||
// TODO: replace with `ExpandedDifficulty.to_compact` when that method is implemented
|
||||
difficulty_threshold: CompactDifficulty(545259519),
|
||||
difficulty_threshold,
|
||||
nonce,
|
||||
solution,
|
||||
},
|
||||
|
|
|
@ -13,14 +13,16 @@
|
|||
|
||||
use crate::{block, parameters::Network};
|
||||
|
||||
use std::cmp::{Ordering, PartialEq, PartialOrd};
|
||||
use std::{fmt, ops::Add, ops::AddAssign};
|
||||
use std::{
|
||||
cmp::{Ordering, PartialEq, PartialOrd},
|
||||
convert::TryFrom,
|
||||
fmt,
|
||||
};
|
||||
|
||||
use primitive_types::U256;
|
||||
|
||||
#[cfg(any(test, feature = "proptest-impl"))]
|
||||
use proptest_derive::Arbitrary;
|
||||
|
||||
mod arbitrary;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
|
@ -53,8 +55,7 @@ mod tests;
|
|||
/// multiple equivalent `CompactDifficulty` values, due to redundancy in the
|
||||
/// floating-point format.
|
||||
#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize)]
|
||||
#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))]
|
||||
pub struct CompactDifficulty(pub u32);
|
||||
pub struct CompactDifficulty(pub(crate) u32);
|
||||
|
||||
impl fmt::Debug for CompactDifficulty {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
|
@ -65,6 +66,9 @@ impl fmt::Debug for CompactDifficulty {
|
|||
}
|
||||
}
|
||||
|
||||
/// An invalid CompactDifficulty value, for testing.
|
||||
pub const INVALID_COMPACT_DIFFICULTY: CompactDifficulty = CompactDifficulty(u32::MAX);
|
||||
|
||||
/// A 256-bit unsigned "expanded difficulty" value.
|
||||
///
|
||||
/// Used as a target threshold for the difficulty of a `block::Hash`.
|
||||
|
@ -74,13 +78,19 @@ impl fmt::Debug for CompactDifficulty {
|
|||
/// The precise bit pattern of an `ExpandedDifficulty` value is
|
||||
/// consensus-critical, because it is compared with the `block::Hash`.
|
||||
///
|
||||
/// Note that each `CompactDifficulty` value represents a range of
|
||||
/// `ExpandedDifficulty` values, because the precision of the
|
||||
/// floating-point format requires rounding on conversion.
|
||||
/// Note that each `CompactDifficulty` value can be converted from a
|
||||
/// range of `ExpandedDifficulty` values, because the precision of
|
||||
/// the floating-point format requires rounding on conversion.
|
||||
///
|
||||
/// Therefore, consensus-critical code must perform the specified
|
||||
/// conversions to `CompactDifficulty`, even if the original
|
||||
/// `ExpandedDifficulty` values are known.
|
||||
///
|
||||
/// Callers should avoid constructing `ExpandedDifficulty` zero
|
||||
/// values, because they are rejected by the consensus rules,
|
||||
/// and cause some conversion functions to panic.
|
||||
//
|
||||
// TODO: Use NonZeroU256, when available
|
||||
#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd)]
|
||||
pub struct ExpandedDifficulty(U256);
|
||||
|
||||
|
@ -236,7 +246,7 @@ impl CompactDifficulty {
|
|||
// `((2^256 - expanded - 1) / (expanded + 1)) + 1`, or
|
||||
let result = (!expanded.0 / (expanded.0 + 1)) + 1;
|
||||
if result <= u128::MAX.into() {
|
||||
Work(result.as_u128()).into()
|
||||
Some(Work(result.as_u128()))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
|
@ -255,7 +265,7 @@ impl ExpandedDifficulty {
|
|||
///
|
||||
/// Hashes are not used to calculate the difficulties of future blocks, so
|
||||
/// users of this module should avoid converting hashes into difficulties.
|
||||
fn from_hash(hash: &block::Hash) -> ExpandedDifficulty {
|
||||
pub(super) fn from_hash(hash: &block::Hash) -> ExpandedDifficulty {
|
||||
U256::from_little_endian(&hash.0).into()
|
||||
}
|
||||
|
||||
|
@ -272,6 +282,73 @@ impl ExpandedDifficulty {
|
|||
|
||||
limit.into()
|
||||
}
|
||||
|
||||
/// Calculate the CompactDifficulty for an expanded difficulty.
|
||||
///
|
||||
/// See `ToCompact()` in the Zcash Specification, and `GetCompact()`
|
||||
/// in zcashd.
|
||||
///
|
||||
/// Panics:
|
||||
///
|
||||
/// If `self` is zero.
|
||||
///
|
||||
/// `ExpandedDifficulty` values are generated in two ways:
|
||||
/// * conversion from `CompactDifficulty` values, which rejects zeroes, and
|
||||
/// * difficulty adjustment calculations, which impose a non-zero minimum
|
||||
/// `target_difficulty_limit`.
|
||||
///
|
||||
/// Neither of these methods yield zero values.
|
||||
pub fn to_compact(&self) -> CompactDifficulty {
|
||||
// The zcashd implementation supports negative and zero compact values.
|
||||
// These values are rejected by the protocol rules. Zebra is designed so
|
||||
// that invalid states are not representable. Therefore, this function
|
||||
// does not produce negative compact values, and panics on zero compact
|
||||
// values. (The negative compact value code in zcashd is unused.)
|
||||
assert!(self.0 > 0.into(), "Zero difficulty values are invalid");
|
||||
|
||||
// The constants for this floating-point representation.
|
||||
// Alias the constants here, so the code is easier to read.
|
||||
const UNSIGNED_MANTISSA_MASK: u32 = CompactDifficulty::UNSIGNED_MANTISSA_MASK;
|
||||
const OFFSET: i32 = CompactDifficulty::OFFSET;
|
||||
|
||||
// Calculate the final size, accounting for the sign bit.
|
||||
// This is the size *after* applying the sign bit adjustment in `ToCompact()`.
|
||||
let size = self.0.bits() / 8 + 1;
|
||||
|
||||
// Make sure the mantissa is non-negative, by shifting down values that
|
||||
// would otherwise overflow into the sign bit
|
||||
let mantissa = if self.0 <= UNSIGNED_MANTISSA_MASK.into() {
|
||||
// Value is small, shift up if needed
|
||||
self.0 << (8 * (3 - size))
|
||||
} else {
|
||||
// Value is large, shift down
|
||||
self.0 >> (8 * (size - 3))
|
||||
};
|
||||
|
||||
// This assertion also makes sure that size fits in its 8 bit compact field
|
||||
assert!(
|
||||
size < (31 + OFFSET) as _,
|
||||
format!(
|
||||
"256^size (256^{}) must fit in a u256, after the sign bit adjustment and offset",
|
||||
size
|
||||
)
|
||||
);
|
||||
let size = u32::try_from(size).expect("a 0-6 bit value fits in a u32");
|
||||
|
||||
assert!(
|
||||
mantissa <= UNSIGNED_MANTISSA_MASK.into(),
|
||||
format!("mantissa {:x?} must fit in its compact field", mantissa)
|
||||
);
|
||||
let mantissa = u32::try_from(mantissa).expect("a 0-23 bit value fits in a u32");
|
||||
|
||||
if mantissa > 0 {
|
||||
CompactDifficulty(mantissa + (size << 24))
|
||||
} else {
|
||||
// This check catches invalid mantissas. Overflows and underflows
|
||||
// should also be unreachable, but they aren't caught here.
|
||||
unreachable!("converted CompactDifficulty values must be valid")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<U256> for ExpandedDifficulty {
|
||||
|
@ -328,21 +405,11 @@ impl PartialOrd<ExpandedDifficulty> for block::Hash {
|
|||
}
|
||||
}
|
||||
|
||||
impl Add for Work {
|
||||
type Output = Self;
|
||||
impl std::ops::Add for Work {
|
||||
type Output = PartialCumulativeWork;
|
||||
|
||||
fn add(self, rhs: Work) -> Self {
|
||||
let result = self
|
||||
.0
|
||||
.checked_add(rhs.0)
|
||||
.expect("Work values do not overflow");
|
||||
Work(result)
|
||||
}
|
||||
}
|
||||
|
||||
impl AddAssign for Work {
|
||||
fn add_assign(&mut self, rhs: Work) {
|
||||
*self = *self + rhs;
|
||||
fn add(self, rhs: Work) -> PartialCumulativeWork {
|
||||
PartialCumulativeWork::from(self) + rhs
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -350,6 +417,12 @@ impl AddAssign for Work {
|
|||
/// Partial work used to track relative work in non-finalized chains
|
||||
pub struct PartialCumulativeWork(u128);
|
||||
|
||||
impl From<Work> for PartialCumulativeWork {
|
||||
fn from(work: Work) -> Self {
|
||||
PartialCumulativeWork(work.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::Add<Work> for PartialCumulativeWork {
|
||||
type Output = PartialCumulativeWork;
|
||||
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
use super::*;
|
||||
|
||||
use crate::block;
|
||||
|
||||
use proptest::{arbitrary::Arbitrary, collection::vec, prelude::*};
|
||||
|
||||
impl Arbitrary for CompactDifficulty {
|
||||
type Parameters = ();
|
||||
|
||||
fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy {
|
||||
(vec(any::<u8>(), 32))
|
||||
.prop_filter_map("zero CompactDifficulty values are invalid", |v| {
|
||||
let mut bytes = [0; 32];
|
||||
bytes.copy_from_slice(v.as_slice());
|
||||
if bytes == [0; 32] {
|
||||
return None;
|
||||
}
|
||||
// In the Zcash protocol, a CompactDifficulty is generated using the difficulty
|
||||
// adjustment functions. Instead of using those functions, we make a random
|
||||
// ExpandedDifficulty, then convert it to a CompactDifficulty.
|
||||
ExpandedDifficulty::from_hash(&block::Hash(bytes))
|
||||
.to_compact()
|
||||
.into()
|
||||
})
|
||||
.boxed()
|
||||
}
|
||||
|
||||
type Strategy = BoxedStrategy<Self>;
|
||||
}
|
||||
|
||||
impl Arbitrary for ExpandedDifficulty {
|
||||
type Parameters = ();
|
||||
|
||||
fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy {
|
||||
any::<CompactDifficulty>()
|
||||
.prop_map(|d| {
|
||||
// In the Zcash protocol, an ExpandedDifficulty is converted from a CompactDifficulty,
|
||||
// or generated using the difficulty adjustment functions. We use the conversion in
|
||||
// our proptest strategy.
|
||||
d.to_expanded()
|
||||
.expect("arbitrary CompactDifficulty is valid")
|
||||
})
|
||||
.boxed()
|
||||
}
|
||||
|
||||
type Strategy = BoxedStrategy<Self>;
|
||||
}
|
||||
|
||||
impl Arbitrary for Work {
|
||||
type Parameters = ();
|
||||
|
||||
fn arbitrary_with(_args: ()) -> Self::Strategy {
|
||||
// In the Zcash protocol, a Work is converted from an ExpandedDifficulty.
|
||||
// But some randomised difficulties are impractically large, and will
|
||||
// never appear in any real-world block. So we just use a random Work value.
|
||||
(any::<u128>())
|
||||
.prop_filter_map("zero Work values are invalid", |w| {
|
||||
if w == 0 {
|
||||
None
|
||||
} else {
|
||||
Work(w).into()
|
||||
}
|
||||
})
|
||||
.boxed()
|
||||
}
|
||||
|
||||
type Strategy = BoxedStrategy<Self>;
|
||||
}
|
||||
|
||||
impl Arbitrary for PartialCumulativeWork {
|
||||
type Parameters = ();
|
||||
|
||||
fn arbitrary_with(_args: ()) -> Self::Strategy {
|
||||
// In Zebra's implementation, a PartialCumulativeWork is the sum of 0..100 Work values.
|
||||
// But our Work values are randomised, rather than being derived from real-world
|
||||
// difficulties. So we don't need to sum multiple Work values here.
|
||||
(any::<Work>())
|
||||
.prop_map(PartialCumulativeWork::from)
|
||||
.boxed()
|
||||
}
|
||||
|
||||
type Strategy = BoxedStrategy<Self>;
|
||||
}
|
|
@ -1,458 +1,2 @@
|
|||
use super::*;
|
||||
|
||||
use crate::block::Block;
|
||||
use crate::serialization::ZcashDeserialize;
|
||||
|
||||
use color_eyre::eyre::Report;
|
||||
use proptest::prelude::*;
|
||||
use std::sync::Arc;
|
||||
|
||||
// Alias the struct constants here, so the code is easier to read.
|
||||
const PRECISION: u32 = CompactDifficulty::PRECISION;
|
||||
const SIGN_BIT: u32 = CompactDifficulty::SIGN_BIT;
|
||||
const UNSIGNED_MANTISSA_MASK: u32 = CompactDifficulty::UNSIGNED_MANTISSA_MASK;
|
||||
const OFFSET: i32 = CompactDifficulty::OFFSET;
|
||||
|
||||
impl Arbitrary for ExpandedDifficulty {
|
||||
type Parameters = ();
|
||||
|
||||
fn arbitrary_with(_args: ()) -> Self::Strategy {
|
||||
(any::<[u8; 32]>())
|
||||
.prop_map(|v| ExpandedDifficulty(U256::from_little_endian(&v)))
|
||||
.boxed()
|
||||
}
|
||||
|
||||
type Strategy = BoxedStrategy<Self>;
|
||||
}
|
||||
|
||||
impl Arbitrary for Work {
|
||||
type Parameters = ();
|
||||
|
||||
fn arbitrary_with(_args: ()) -> Self::Strategy {
|
||||
(any::<u128>()).prop_map(Work).boxed()
|
||||
}
|
||||
|
||||
type Strategy = BoxedStrategy<Self>;
|
||||
}
|
||||
|
||||
/// Test debug formatting.
|
||||
#[test]
|
||||
fn debug_format() {
|
||||
zebra_test::init();
|
||||
|
||||
assert_eq!(
|
||||
format!("{:?}", CompactDifficulty(0)),
|
||||
"CompactDifficulty(0x00000000)"
|
||||
);
|
||||
assert_eq!(
|
||||
format!("{:?}", CompactDifficulty(1)),
|
||||
"CompactDifficulty(0x00000001)"
|
||||
);
|
||||
assert_eq!(
|
||||
format!("{:?}", CompactDifficulty(u32::MAX)),
|
||||
"CompactDifficulty(0xffffffff)"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
format!("{:?}", ExpandedDifficulty(U256::zero())),
|
||||
"ExpandedDifficulty(\"0000000000000000000000000000000000000000000000000000000000000000\")"
|
||||
);
|
||||
assert_eq!(
|
||||
format!("{:?}", ExpandedDifficulty(U256::one())),
|
||||
"ExpandedDifficulty(\"0100000000000000000000000000000000000000000000000000000000000000\")"
|
||||
);
|
||||
assert_eq!(
|
||||
format!("{:?}", ExpandedDifficulty(U256::MAX)),
|
||||
"ExpandedDifficulty(\"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\")"
|
||||
);
|
||||
|
||||
assert_eq!(format!("{:?}", Work(0)), "Work(0x0, 0, -inf)");
|
||||
assert_eq!(
|
||||
format!("{:?}", Work(u8::MAX as u128)),
|
||||
"Work(0xff, 255, 7.99435)"
|
||||
);
|
||||
assert_eq!(
|
||||
format!("{:?}", Work(u64::MAX as u128)),
|
||||
"Work(0xffffffffffffffff, 18446744073709551615, 64.00000)"
|
||||
);
|
||||
assert_eq!(
|
||||
format!("{:?}", Work(u128::MAX)),
|
||||
"Work(0xffffffffffffffffffffffffffffffff, 340282366920938463463374607431768211455, 128.00000)"
|
||||
);
|
||||
}
|
||||
|
||||
/// Test zero values for CompactDifficulty.
|
||||
#[test]
|
||||
fn compact_zero() {
|
||||
zebra_test::init();
|
||||
|
||||
let natural_zero = CompactDifficulty(0);
|
||||
assert_eq!(natural_zero.to_expanded(), None);
|
||||
assert_eq!(natural_zero.to_work(), None);
|
||||
|
||||
// Small value zeroes
|
||||
let small_zero_1 = CompactDifficulty(1);
|
||||
assert_eq!(small_zero_1.to_expanded(), None);
|
||||
assert_eq!(small_zero_1.to_work(), None);
|
||||
let small_zero_max = CompactDifficulty(UNSIGNED_MANTISSA_MASK);
|
||||
assert_eq!(small_zero_max.to_expanded(), None);
|
||||
assert_eq!(small_zero_max.to_work(), None);
|
||||
|
||||
// Special-cased zeroes, negative in the floating-point representation
|
||||
let sc_zero = CompactDifficulty(SIGN_BIT);
|
||||
assert_eq!(sc_zero.to_expanded(), None);
|
||||
assert_eq!(sc_zero.to_work(), None);
|
||||
let sc_zero_next = CompactDifficulty(SIGN_BIT + 1);
|
||||
assert_eq!(sc_zero_next.to_expanded(), None);
|
||||
assert_eq!(sc_zero_next.to_work(), None);
|
||||
let sc_zero_high = CompactDifficulty((1 << PRECISION) - 1);
|
||||
assert_eq!(sc_zero_high.to_expanded(), None);
|
||||
assert_eq!(sc_zero_high.to_work(), None);
|
||||
let sc_zero_max = CompactDifficulty(u32::MAX);
|
||||
assert_eq!(sc_zero_max.to_expanded(), None);
|
||||
assert_eq!(sc_zero_max.to_work(), None);
|
||||
}
|
||||
|
||||
/// Test extreme values for CompactDifficulty.
|
||||
#[test]
|
||||
fn compact_extremes() {
|
||||
zebra_test::init();
|
||||
|
||||
// Values equal to one
|
||||
let expanded_one = Some(ExpandedDifficulty(U256::one()));
|
||||
let work_one = None;
|
||||
|
||||
let one = CompactDifficulty(OFFSET as u32 * (1 << PRECISION) + 1);
|
||||
assert_eq!(one.to_expanded(), expanded_one);
|
||||
assert_eq!(one.to_work(), work_one);
|
||||
let another_one = CompactDifficulty((1 << PRECISION) + (1 << 16));
|
||||
assert_eq!(another_one.to_expanded(), expanded_one);
|
||||
assert_eq!(another_one.to_work(), work_one);
|
||||
|
||||
// Maximum mantissa
|
||||
let expanded_mant = Some(ExpandedDifficulty(UNSIGNED_MANTISSA_MASK.into()));
|
||||
let work_mant = None;
|
||||
|
||||
let mant = CompactDifficulty(OFFSET as u32 * (1 << PRECISION) + UNSIGNED_MANTISSA_MASK);
|
||||
assert_eq!(mant.to_expanded(), expanded_mant);
|
||||
assert_eq!(mant.to_work(), work_mant);
|
||||
|
||||
// Maximum valid exponent
|
||||
let exponent: U256 = (31 * 8).into();
|
||||
let u256_exp = U256::from(2).pow(exponent);
|
||||
let expanded_exp = Some(ExpandedDifficulty(u256_exp));
|
||||
let work_exp = Some(Work(
|
||||
((U256::MAX - u256_exp) / (u256_exp + 1) + 1).as_u128(),
|
||||
));
|
||||
|
||||
let exp = CompactDifficulty((31 + OFFSET as u32) * (1 << PRECISION) + 1);
|
||||
assert_eq!(exp.to_expanded(), expanded_exp);
|
||||
assert_eq!(exp.to_work(), work_exp);
|
||||
|
||||
// Maximum valid mantissa and exponent
|
||||
let exponent: U256 = (29 * 8).into();
|
||||
let u256_me = U256::from(UNSIGNED_MANTISSA_MASK) * U256::from(2).pow(exponent);
|
||||
let expanded_me = Some(ExpandedDifficulty(u256_me));
|
||||
let work_me = Some(Work((!u256_me / (u256_me + 1) + 1).as_u128()));
|
||||
|
||||
let me = CompactDifficulty((31 + 1) * (1 << PRECISION) + UNSIGNED_MANTISSA_MASK);
|
||||
assert_eq!(me.to_expanded(), expanded_me);
|
||||
assert_eq!(me.to_work(), work_me);
|
||||
|
||||
// Maximum value, at least according to the spec
|
||||
//
|
||||
// According to ToTarget() in the spec, this value is
|
||||
// `(2^23 - 1) * 256^253`, which is larger than the maximum expanded
|
||||
// value. Therefore, a block can never pass with this threshold.
|
||||
//
|
||||
// zcashd rejects these blocks without comparing the hash.
|
||||
let difficulty_max = CompactDifficulty(u32::MAX & !SIGN_BIT);
|
||||
assert_eq!(difficulty_max.to_expanded(), None);
|
||||
assert_eq!(difficulty_max.to_work(), None);
|
||||
|
||||
// Bitcoin test vectors for CompactDifficulty
|
||||
// See https://developer.bitcoin.org/reference/block_chain.html#target-nbits
|
||||
// These values are not in the table below, because they do not fit in u128
|
||||
//
|
||||
// The minimum difficulty on the bitcoin mainnet and testnet
|
||||
let difficulty_btc_main = CompactDifficulty(0x1d00ffff);
|
||||
let u256_btc_main = U256::from(0xffff) << 208;
|
||||
let expanded_btc_main = Some(ExpandedDifficulty(u256_btc_main));
|
||||
let work_btc_main = Some(Work(0x100010001));
|
||||
assert_eq!(difficulty_btc_main.to_expanded(), expanded_btc_main);
|
||||
assert_eq!(difficulty_btc_main.to_work(), work_btc_main);
|
||||
|
||||
// The minimum difficulty in bitcoin regtest
|
||||
// This is also the easiest respesentable difficulty
|
||||
let difficulty_btc_reg = CompactDifficulty(0x207fffff);
|
||||
let u256_btc_reg = U256::from(0x7fffff) << 232;
|
||||
let expanded_btc_reg = Some(ExpandedDifficulty(u256_btc_reg));
|
||||
let work_btc_reg = Some(Work(0x2));
|
||||
assert_eq!(difficulty_btc_reg.to_expanded(), expanded_btc_reg);
|
||||
assert_eq!(difficulty_btc_reg.to_work(), work_btc_reg);
|
||||
}
|
||||
|
||||
/// Bitcoin test vectors for CompactDifficulty, and their corresponding
|
||||
/// ExpandedDifficulty and Work values.
|
||||
/// See https://developer.bitcoin.org/reference/block_chain.html#target-nbits
|
||||
static COMPACT_DIFFICULTY_CASES: &[(u32, Option<u128>, Option<u128>)] = &[
|
||||
// These Work values will never happen in practice, because the corresponding
|
||||
// difficulties are extremely high. So it is ok for us to reject them.
|
||||
(0x01003456, None /* 0x00 */, None),
|
||||
(0x01123456, Some(0x12), None),
|
||||
(0x02008000, Some(0x80), None),
|
||||
(0x05009234, Some(0x92340000), None),
|
||||
(0x04923456, None /* -0x12345600 */, None),
|
||||
(0x04123456, Some(0x12345600), None),
|
||||
];
|
||||
|
||||
/// Test Bitcoin test vectors for CompactDifficulty.
|
||||
#[test]
|
||||
#[spandoc::spandoc]
|
||||
fn compact_bitcoin_test_vectors() {
|
||||
zebra_test::init();
|
||||
|
||||
// We use two spans, so we can diagnose conversion panics, and mismatching results
|
||||
for (compact, expected_expanded, expected_work) in COMPACT_DIFFICULTY_CASES.iter().cloned() {
|
||||
/// SPANDOC: Convert compact to expanded and work {?compact, ?expected_expanded, ?expected_work}
|
||||
{
|
||||
let expected_expanded = expected_expanded.map(U256::from).map(ExpandedDifficulty);
|
||||
let expected_work = expected_work.map(Work);
|
||||
|
||||
let compact = CompactDifficulty(compact);
|
||||
let actual_expanded = compact.to_expanded();
|
||||
let actual_work = compact.to_work();
|
||||
|
||||
/// SPANDOC: Test that compact produces the expected expanded and work {?compact, ?expected_expanded, ?actual_expanded, ?expected_work, ?actual_work}
|
||||
{
|
||||
assert_eq!(actual_expanded, expected_expanded);
|
||||
assert_eq!(actual_work, expected_work);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Test blocks using CompactDifficulty.
|
||||
#[test]
|
||||
#[spandoc::spandoc]
|
||||
fn block_difficulty() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
let mut blockchain = Vec::new();
|
||||
for b in &[
|
||||
&zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES[..],
|
||||
&zebra_test::vectors::BLOCK_MAINNET_1_BYTES[..],
|
||||
&zebra_test::vectors::BLOCK_MAINNET_2_BYTES[..],
|
||||
&zebra_test::vectors::BLOCK_MAINNET_3_BYTES[..],
|
||||
&zebra_test::vectors::BLOCK_MAINNET_4_BYTES[..],
|
||||
&zebra_test::vectors::BLOCK_MAINNET_5_BYTES[..],
|
||||
&zebra_test::vectors::BLOCK_MAINNET_6_BYTES[..],
|
||||
&zebra_test::vectors::BLOCK_MAINNET_7_BYTES[..],
|
||||
&zebra_test::vectors::BLOCK_MAINNET_8_BYTES[..],
|
||||
&zebra_test::vectors::BLOCK_MAINNET_9_BYTES[..],
|
||||
&zebra_test::vectors::BLOCK_MAINNET_10_BYTES[..],
|
||||
] {
|
||||
let block = Arc::<Block>::zcash_deserialize(*b)?;
|
||||
let hash = block.hash();
|
||||
blockchain.push((block.clone(), block.coinbase_height().unwrap(), hash));
|
||||
}
|
||||
|
||||
let diff_zero = ExpandedDifficulty(U256::zero());
|
||||
let diff_one = ExpandedDifficulty(U256::one());
|
||||
let diff_max = ExpandedDifficulty(U256::MAX);
|
||||
|
||||
let work_zero = Work(0);
|
||||
let work_max = Work(u128::MAX);
|
||||
|
||||
let mut cumulative_work = Work::default();
|
||||
let mut previous_cumulative_work = Work::default();
|
||||
for (block, height, hash) in blockchain {
|
||||
/// SPANDOC: Calculate the threshold for mainnet block {?height}
|
||||
let threshold = block
|
||||
.header
|
||||
.difficulty_threshold
|
||||
.to_expanded()
|
||||
.expect("Chain blocks have valid difficulty thresholds.");
|
||||
|
||||
/// SPANDOC: Check the difficulty for mainnet block {?height, ?threshold, ?hash}
|
||||
{
|
||||
assert!(hash <= threshold);
|
||||
// also check the comparison operators work
|
||||
assert!(hash > diff_zero);
|
||||
assert!(hash > diff_one);
|
||||
assert!(hash < diff_max);
|
||||
}
|
||||
|
||||
/// SPANDOC: Check the work for mainnet block {?height}
|
||||
{
|
||||
let work = block
|
||||
.header
|
||||
.difficulty_threshold
|
||||
.to_work()
|
||||
.expect("Chain blocks have valid work.");
|
||||
// also check the comparison operators work
|
||||
assert!(work > work_zero);
|
||||
assert!(work < work_max);
|
||||
|
||||
cumulative_work += work;
|
||||
assert!(cumulative_work > work_zero);
|
||||
assert!(cumulative_work < work_max);
|
||||
|
||||
assert!(cumulative_work > previous_cumulative_work);
|
||||
|
||||
previous_cumulative_work = cumulative_work;
|
||||
}
|
||||
|
||||
/// SPANDOC: Calculate the work for mainnet block {?height}
|
||||
let _work = block
|
||||
.header
|
||||
.difficulty_threshold
|
||||
.to_work()
|
||||
.expect("Chain blocks have valid work.");
|
||||
|
||||
// TODO: check work comparison operators and cumulative work addition
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test ExpandedDifficulty ordering
|
||||
#[test]
|
||||
#[spandoc::spandoc]
|
||||
#[allow(clippy::eq_op)]
|
||||
fn expanded_order() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
let zero = ExpandedDifficulty(U256::zero());
|
||||
let one = ExpandedDifficulty(U256::one());
|
||||
let max_value = ExpandedDifficulty(U256::MAX);
|
||||
|
||||
assert!(zero < one);
|
||||
assert!(zero < max_value);
|
||||
assert!(one < max_value);
|
||||
|
||||
assert_eq!(zero, zero);
|
||||
assert!(zero <= one);
|
||||
assert!(one >= zero);
|
||||
assert!(one > zero);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test ExpandedDifficulty and block::Hash ordering
|
||||
#[test]
|
||||
#[spandoc::spandoc]
|
||||
fn expanded_hash_order() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
let ex_zero = ExpandedDifficulty(U256::zero());
|
||||
let ex_one = ExpandedDifficulty(U256::one());
|
||||
let ex_max = ExpandedDifficulty(U256::MAX);
|
||||
let hash_zero = block::Hash([0; 32]);
|
||||
let hash_max = block::Hash([0xff; 32]);
|
||||
|
||||
assert_eq!(hash_zero, ex_zero);
|
||||
assert!(hash_zero < ex_one);
|
||||
assert!(hash_zero < ex_max);
|
||||
|
||||
assert!(hash_max > ex_zero);
|
||||
assert!(hash_max > ex_one);
|
||||
assert_eq!(hash_max, ex_max);
|
||||
|
||||
assert!(ex_one > hash_zero);
|
||||
assert!(ex_one < hash_max);
|
||||
|
||||
assert!(hash_zero >= ex_zero);
|
||||
assert!(ex_zero >= hash_zero);
|
||||
assert!(hash_zero <= ex_zero);
|
||||
assert!(ex_zero <= hash_zero);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
proptest! {
|
||||
/// Check that CompactDifficulty expands, and converts to work.
|
||||
///
|
||||
/// Make sure the conversions don't panic, and that they compare correctly.
|
||||
#[test]
|
||||
fn prop_compact_expand_work(compact in any::<CompactDifficulty>()) {
|
||||
// TODO: use random ExpandedDifficulties, once we have ExpandedDifficulty::to_compact()
|
||||
//
|
||||
// This change will increase the number of valid random work values.
|
||||
let expanded = compact.to_expanded();
|
||||
let work = compact.to_work();
|
||||
|
||||
let hash_zero = block::Hash([0; 32]);
|
||||
let hash_max = block::Hash([0xff; 32]);
|
||||
|
||||
let work_zero = Work(0);
|
||||
let work_max = Work(u128::MAX);
|
||||
|
||||
if let Some(expanded) = expanded {
|
||||
prop_assert!(expanded >= hash_zero);
|
||||
prop_assert!(expanded <= hash_max);
|
||||
}
|
||||
|
||||
if let Some(work) = work {
|
||||
prop_assert!(work > work_zero);
|
||||
prop_assert!(work < work_max);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check that a random ExpandedDifficulty compares correctly with fixed block::Hash
|
||||
#[test]
|
||||
fn prop_expanded_order(expanded in any::<ExpandedDifficulty>()) {
|
||||
// TODO: round-trip test, once we have ExpandedDifficulty::to_compact()
|
||||
let hash_zero = block::Hash([0; 32]);
|
||||
let hash_max = block::Hash([0xff; 32]);
|
||||
|
||||
prop_assert!(expanded >= hash_zero);
|
||||
prop_assert!(expanded <= hash_max);
|
||||
}
|
||||
|
||||
/// Check that ExpandedDifficulty compares correctly with a random block::Hash.
|
||||
#[test]
|
||||
fn prop_hash_order(hash in any::<block::Hash>()) {
|
||||
let ex_zero = ExpandedDifficulty(U256::zero());
|
||||
let ex_one = ExpandedDifficulty(U256::one());
|
||||
let ex_max = ExpandedDifficulty(U256::MAX);
|
||||
|
||||
prop_assert!(hash >= ex_zero);
|
||||
prop_assert!(hash <= ex_max);
|
||||
prop_assert!(hash >= ex_one || hash == ex_zero);
|
||||
}
|
||||
|
||||
/// Check that a random ExpandedDifficulty and block::Hash compare correctly.
|
||||
#[test]
|
||||
#[allow(clippy::double_comparisons)]
|
||||
fn prop_expanded_hash_order(expanded in any::<ExpandedDifficulty>(), hash in any::<block::Hash>()) {
|
||||
prop_assert!(expanded < hash || expanded > hash || expanded == hash);
|
||||
}
|
||||
|
||||
/// Check that the work values for two random ExpandedDifficulties add
|
||||
/// correctly.
|
||||
#[test]
|
||||
fn prop_work(compact1 in any::<CompactDifficulty>(), compact2 in any::<CompactDifficulty>()) {
|
||||
// TODO: use random ExpandedDifficulties, once we have ExpandedDifficulty::to_compact()
|
||||
//
|
||||
// This change will increase the number of valid random work values.
|
||||
let work1 = compact1.to_work();
|
||||
let work2 = compact2.to_work();
|
||||
|
||||
if let (Some(work1), Some(work2)) = (work1, work2) {
|
||||
let work_limit = Work(u128::MAX/2);
|
||||
if work1 < work_limit && work2 < work_limit {
|
||||
let work_total = work1 + work2;
|
||||
prop_assert!(work_total >= work1);
|
||||
prop_assert!(work_total >= work2);
|
||||
} else if work1 < work_limit {
|
||||
let work_total = work1 + work1;
|
||||
prop_assert!(work_total >= work1);
|
||||
} else if work2 < work_limit {
|
||||
let work_total = work2 + work2;
|
||||
prop_assert!(work_total >= work2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
mod prop;
|
||||
mod vectors;
|
||||
|
|
|
@ -0,0 +1,228 @@
|
|||
use primitive_types::U256;
|
||||
use proptest::{arbitrary::any, prelude::*};
|
||||
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use crate::block;
|
||||
|
||||
use super::super::*;
|
||||
|
||||
proptest! {
|
||||
/// Check Expanded, Compact, and Work conversions.
|
||||
///
|
||||
/// Make sure the conversions don't panic, and that they round-trip and compare
|
||||
/// correctly.
|
||||
#[test]
|
||||
fn prop_difficulty_conversion(expanded_seed in any::<block::Hash>()) {
|
||||
let expanded_seed = ExpandedDifficulty::from_hash(&expanded_seed);
|
||||
|
||||
let hash_zero = block::Hash([0; 32]);
|
||||
let hash_max = block::Hash([0xff; 32]);
|
||||
prop_assert!(expanded_seed >= hash_zero);
|
||||
prop_assert!(expanded_seed <= hash_max);
|
||||
|
||||
// Skip invalid seeds
|
||||
prop_assume!(expanded_seed != hash_zero);
|
||||
|
||||
let compact = expanded_seed.to_compact();
|
||||
let expanded_trunc = compact.to_expanded();
|
||||
let work = compact.to_work();
|
||||
|
||||
if let Some(expanded_trunc) = expanded_trunc {
|
||||
// zero compact values are invalid, and return None on conversion
|
||||
prop_assert!(expanded_trunc > hash_zero);
|
||||
// the maximum compact value has less precision than a hash
|
||||
prop_assert!(expanded_trunc < hash_max);
|
||||
|
||||
// the truncated value should be less than or equal to the seed
|
||||
prop_assert!(expanded_trunc <= expanded_seed);
|
||||
|
||||
// roundtrip
|
||||
let compact_trip = expanded_trunc.to_compact();
|
||||
prop_assert_eq!(compact, compact_trip);
|
||||
|
||||
let expanded_trip = compact_trip.to_expanded().expect("roundtrip expanded is valid");
|
||||
prop_assert_eq!(expanded_trunc, expanded_trip);
|
||||
|
||||
// Some impossibly hard compact values are not valid work values in Zebra
|
||||
if let Some(work) = work {
|
||||
// roundtrip
|
||||
let work_trip = compact_trip.to_work().expect("roundtrip work is valid if work is valid");
|
||||
prop_assert_eq!(work, work_trip);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Check Work and PartialCumulativeWork conversions.
|
||||
///
|
||||
/// Make sure the conversions don't panic, and that they compare correctly.
|
||||
#[test]
|
||||
fn prop_work_conversion(work in any::<Work>()) {
|
||||
let work_zero = Work(0);
|
||||
let work_max = Work(u128::MAX);
|
||||
|
||||
prop_assert!(work > work_zero);
|
||||
// similarly, the maximum compact value has less precision than work
|
||||
prop_assert!(work < work_max);
|
||||
|
||||
let partial_work = PartialCumulativeWork::from(work);
|
||||
prop_assert!(partial_work > PartialCumulativeWork::from(work_zero));
|
||||
prop_assert!(partial_work < PartialCumulativeWork::from(work_max));
|
||||
|
||||
// Now try adding zero to convert to PartialCumulativeWork
|
||||
prop_assert!(partial_work > work_zero + work_zero);
|
||||
prop_assert!(partial_work < work_max + work_zero);
|
||||
}
|
||||
|
||||
/// Check that a random ExpandedDifficulty compares correctly with fixed block::Hash
|
||||
#[test]
|
||||
fn prop_expanded_cmp(expanded in any::<ExpandedDifficulty>()) {
|
||||
let hash_zero = block::Hash([0; 32]);
|
||||
let hash_max = block::Hash([0xff; 32]);
|
||||
|
||||
// zero compact values are invalid, and return None on conversion
|
||||
prop_assert!(expanded > hash_zero);
|
||||
// the maximum compact value has less precision than a hash
|
||||
prop_assert!(expanded < hash_max);
|
||||
}
|
||||
|
||||
/// Check that ExpandedDifficulty compares correctly with a random block::Hash.
|
||||
#[test]
|
||||
fn prop_hash_cmp(hash in any::<block::Hash>()) {
|
||||
let ex_zero = ExpandedDifficulty(U256::zero());
|
||||
let ex_one = ExpandedDifficulty(U256::one());
|
||||
let ex_max = ExpandedDifficulty(U256::MAX);
|
||||
|
||||
prop_assert!(hash >= ex_zero);
|
||||
prop_assert!(hash <= ex_max);
|
||||
prop_assert!(hash >= ex_one || hash == ex_zero);
|
||||
}
|
||||
|
||||
/// Check that a random ExpandedDifficulty and block::Hash compare without panicking.
|
||||
#[test]
|
||||
#[allow(clippy::double_comparisons)]
|
||||
fn prop_expanded_hash_cmp(expanded in any::<ExpandedDifficulty>(), hash in any::<block::Hash>()) {
|
||||
prop_assert!(expanded < hash || expanded > hash || expanded == hash);
|
||||
}
|
||||
|
||||
/// Check that two random CompactDifficulty values compare and round-trip correctly.
|
||||
#[test]
|
||||
#[allow(clippy::double_comparisons)]
|
||||
fn prop_compact_roundtrip(compact1 in any::<CompactDifficulty>(), compact2 in any::<CompactDifficulty>()) {
|
||||
prop_assert!(compact1 != compact2 || compact1 == compact2);
|
||||
|
||||
let expanded1 = compact1.to_expanded().expect("arbitrary compact values are valid");
|
||||
let expanded2 = compact2.to_expanded().expect("arbitrary compact values are valid");
|
||||
|
||||
if expanded1 != expanded2 {
|
||||
prop_assert!(compact1 != compact2);
|
||||
} else {
|
||||
prop_assert!(compact1 == compact2);
|
||||
}
|
||||
|
||||
let compact1_trip = expanded1.to_compact();
|
||||
let compact2_trip = expanded2.to_compact();
|
||||
|
||||
if compact1_trip != compact2_trip {
|
||||
prop_assert!(compact1 != compact2);
|
||||
} else {
|
||||
prop_assert!(compact1 == compact2);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check that two random ExpandedDifficulty values compare and round-trip correctly.
|
||||
#[test]
|
||||
fn prop_expanded_roundtrip(expanded1_seed in any::<block::Hash>(), expanded2_seed in any::<block::Hash>()) {
|
||||
let expanded1_seed = ExpandedDifficulty::from_hash(&expanded1_seed);
|
||||
let expanded2_seed = ExpandedDifficulty::from_hash(&expanded2_seed);
|
||||
|
||||
// Skip invalid seeds
|
||||
let expanded_zero = ExpandedDifficulty(U256::zero());
|
||||
prop_assume!(expanded1_seed != expanded_zero);
|
||||
prop_assume!(expanded2_seed != expanded_zero);
|
||||
|
||||
let compact1 = expanded1_seed.to_compact();
|
||||
let compact2 = expanded2_seed.to_compact();
|
||||
let expanded1_trunc = compact1.to_expanded();
|
||||
let expanded2_trunc = compact2.to_expanded();
|
||||
let work1 = compact1.to_work();
|
||||
let work2 = compact2.to_work();
|
||||
|
||||
match expanded1_seed.cmp(&expanded2_seed) {
|
||||
Ordering::Greater => {
|
||||
// seed to compact truncation can turn expanded and work inequalities into equalities
|
||||
prop_assert!(expanded1_trunc >= expanded2_trunc);
|
||||
}
|
||||
Ordering::Equal => {
|
||||
prop_assert!(compact1 == compact2);
|
||||
prop_assert!(expanded1_trunc == expanded2_trunc);
|
||||
}
|
||||
Ordering::Less => {
|
||||
prop_assert!(expanded1_trunc <= expanded2_trunc);
|
||||
}
|
||||
}
|
||||
|
||||
if expanded1_trunc == expanded2_trunc {
|
||||
prop_assert!(compact1 == compact2);
|
||||
}
|
||||
|
||||
// Skip impossibly hard work values
|
||||
prop_assume!(work1.is_some());
|
||||
prop_assume!(work2.is_some());
|
||||
|
||||
match expanded1_seed.cmp(&expanded2_seed) {
|
||||
Ordering::Greater => {
|
||||
// work comparisons are reversed, because conversion involves division
|
||||
prop_assert!(work1 <= work2);
|
||||
}
|
||||
Ordering::Equal => {
|
||||
prop_assert!(work1 == work2);
|
||||
}
|
||||
Ordering::Less => {
|
||||
prop_assert!(work1 >= work2);
|
||||
}
|
||||
}
|
||||
|
||||
match expanded1_trunc.cmp(&expanded2_trunc) {
|
||||
Ordering::Greater => {
|
||||
// expanded to work truncation can turn work inequalities into equalities
|
||||
prop_assert!(work1 <= work2);
|
||||
}
|
||||
Ordering::Equal => {
|
||||
prop_assert!(work1 == work2);
|
||||
}
|
||||
Ordering::Less => {
|
||||
prop_assert!(work1 >= work2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Check that two random Work values compare, add, and subtract correctly.
|
||||
#[test]
|
||||
fn prop_work_sum(work1 in any::<Work>(), work2 in any::<Work>()) {
|
||||
|
||||
// If the sum won't panic
|
||||
if work1.0.checked_add(work2.0).is_some() {
|
||||
let work_total = work1 + work2;
|
||||
prop_assert!(work_total >= PartialCumulativeWork::from(work1));
|
||||
prop_assert!(work_total >= PartialCumulativeWork::from(work2));
|
||||
|
||||
prop_assert_eq!(work_total - work1, PartialCumulativeWork::from(work2));
|
||||
prop_assert_eq!(work_total - work2, PartialCumulativeWork::from(work1));
|
||||
}
|
||||
|
||||
if work1.0.checked_add(work1.0).is_some() {
|
||||
let work_total = work1 + work1;
|
||||
prop_assert!(work_total >= PartialCumulativeWork::from(work1));
|
||||
|
||||
prop_assert_eq!(work_total - work1, PartialCumulativeWork::from(work1));
|
||||
}
|
||||
|
||||
if work2.0.checked_add(work2.0).is_some() {
|
||||
let work_total = work2 + work2;
|
||||
prop_assert!(work_total >= PartialCumulativeWork::from(work2));
|
||||
|
||||
prop_assert_eq!(work_total - work2, PartialCumulativeWork::from(work2));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,372 @@
|
|||
use color_eyre::eyre::Report;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::block::Block;
|
||||
use crate::serialization::ZcashDeserialize;
|
||||
|
||||
use super::super::*;
|
||||
|
||||
// Alias the struct constants here, so the code is easier to read.
|
||||
const PRECISION: u32 = CompactDifficulty::PRECISION;
|
||||
const SIGN_BIT: u32 = CompactDifficulty::SIGN_BIT;
|
||||
const UNSIGNED_MANTISSA_MASK: u32 = CompactDifficulty::UNSIGNED_MANTISSA_MASK;
|
||||
const OFFSET: i32 = CompactDifficulty::OFFSET;
|
||||
|
||||
/// Test debug formatting.
|
||||
#[test]
|
||||
fn debug_format() {
|
||||
zebra_test::init();
|
||||
|
||||
assert_eq!(
|
||||
format!("{:?}", CompactDifficulty(0)),
|
||||
"CompactDifficulty(0x00000000)"
|
||||
);
|
||||
assert_eq!(
|
||||
format!("{:?}", CompactDifficulty(1)),
|
||||
"CompactDifficulty(0x00000001)"
|
||||
);
|
||||
assert_eq!(
|
||||
format!("{:?}", CompactDifficulty(u32::MAX)),
|
||||
"CompactDifficulty(0xffffffff)"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
format!("{:?}", ExpandedDifficulty(U256::zero())),
|
||||
"ExpandedDifficulty(\"0000000000000000000000000000000000000000000000000000000000000000\")"
|
||||
);
|
||||
assert_eq!(
|
||||
format!("{:?}", ExpandedDifficulty(U256::one())),
|
||||
"ExpandedDifficulty(\"0100000000000000000000000000000000000000000000000000000000000000\")"
|
||||
);
|
||||
assert_eq!(
|
||||
format!("{:?}", ExpandedDifficulty(U256::MAX)),
|
||||
"ExpandedDifficulty(\"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\")"
|
||||
);
|
||||
|
||||
assert_eq!(format!("{:?}", Work(0)), "Work(0x0, 0, -inf)");
|
||||
assert_eq!(
|
||||
format!("{:?}", Work(u8::MAX as u128)),
|
||||
"Work(0xff, 255, 7.99435)"
|
||||
);
|
||||
assert_eq!(
|
||||
format!("{:?}", Work(u64::MAX as u128)),
|
||||
"Work(0xffffffffffffffff, 18446744073709551615, 64.00000)"
|
||||
);
|
||||
assert_eq!(
|
||||
format!("{:?}", Work(u128::MAX)),
|
||||
"Work(0xffffffffffffffffffffffffffffffff, 340282366920938463463374607431768211455, 128.00000)"
|
||||
);
|
||||
}
|
||||
|
||||
/// Test zero values for CompactDifficulty.
|
||||
#[test]
|
||||
fn compact_zero() {
|
||||
zebra_test::init();
|
||||
|
||||
let natural_zero = CompactDifficulty(0);
|
||||
assert_eq!(natural_zero.to_expanded(), None);
|
||||
assert_eq!(natural_zero.to_work(), None);
|
||||
|
||||
// Small value zeroes
|
||||
let small_zero_1 = CompactDifficulty(1);
|
||||
assert_eq!(small_zero_1.to_expanded(), None);
|
||||
assert_eq!(small_zero_1.to_work(), None);
|
||||
let small_zero_max = CompactDifficulty(UNSIGNED_MANTISSA_MASK);
|
||||
assert_eq!(small_zero_max.to_expanded(), None);
|
||||
assert_eq!(small_zero_max.to_work(), None);
|
||||
|
||||
// Special-cased zeroes, negative in the floating-point representation
|
||||
let sc_zero = CompactDifficulty(SIGN_BIT);
|
||||
assert_eq!(sc_zero.to_expanded(), None);
|
||||
assert_eq!(sc_zero.to_work(), None);
|
||||
let sc_zero_next = CompactDifficulty(SIGN_BIT + 1);
|
||||
assert_eq!(sc_zero_next.to_expanded(), None);
|
||||
assert_eq!(sc_zero_next.to_work(), None);
|
||||
let sc_zero_high = CompactDifficulty((1 << PRECISION) - 1);
|
||||
assert_eq!(sc_zero_high.to_expanded(), None);
|
||||
assert_eq!(sc_zero_high.to_work(), None);
|
||||
let sc_zero_max = CompactDifficulty(u32::MAX);
|
||||
assert_eq!(sc_zero_max.to_expanded(), None);
|
||||
assert_eq!(sc_zero_max.to_work(), None);
|
||||
}
|
||||
|
||||
/// Test extreme values for CompactDifficulty.
|
||||
#[test]
|
||||
fn compact_extremes() {
|
||||
zebra_test::init();
|
||||
|
||||
// Values equal to one
|
||||
let expanded_one = Some(ExpandedDifficulty(U256::one()));
|
||||
let work_one = None;
|
||||
|
||||
let canonical_one = CompactDifficulty((1 << PRECISION) + (1 << 16));
|
||||
assert_eq!(canonical_one.to_expanded(), expanded_one);
|
||||
assert_eq!(
|
||||
canonical_one.to_expanded().unwrap().to_compact(),
|
||||
canonical_one
|
||||
);
|
||||
assert_eq!(canonical_one.to_work(), work_one);
|
||||
|
||||
let another_one = CompactDifficulty(OFFSET as u32 * (1 << PRECISION) + 1);
|
||||
assert_eq!(another_one.to_expanded(), expanded_one);
|
||||
assert_eq!(
|
||||
another_one.to_expanded().unwrap().to_compact(),
|
||||
canonical_one
|
||||
);
|
||||
assert_eq!(another_one.to_work(), work_one);
|
||||
|
||||
// Maximum mantissa
|
||||
let expanded_mant = Some(ExpandedDifficulty(UNSIGNED_MANTISSA_MASK.into()));
|
||||
let work_mant = None;
|
||||
|
||||
let mant = CompactDifficulty(OFFSET as u32 * (1 << PRECISION) + UNSIGNED_MANTISSA_MASK);
|
||||
assert_eq!(mant.to_expanded(), expanded_mant);
|
||||
assert_eq!(mant.to_expanded().unwrap().to_compact(), mant);
|
||||
assert_eq!(mant.to_work(), work_mant);
|
||||
|
||||
// Maximum valid exponent
|
||||
let exponent: U256 = (31 * 8).into();
|
||||
let u256_exp = U256::from(2).pow(exponent);
|
||||
let expanded_exp = Some(ExpandedDifficulty(u256_exp));
|
||||
let work_exp = Some(Work(
|
||||
((U256::MAX - u256_exp) / (u256_exp + 1) + 1).as_u128(),
|
||||
));
|
||||
|
||||
let canonical_exp =
|
||||
CompactDifficulty(((31 + OFFSET - 2) as u32) * (1 << PRECISION) + (1 << 16));
|
||||
let another_exp = CompactDifficulty((31 + OFFSET as u32) * (1 << PRECISION) + 1);
|
||||
assert_eq!(canonical_exp.to_expanded(), expanded_exp);
|
||||
assert_eq!(another_exp.to_expanded(), expanded_exp);
|
||||
assert_eq!(
|
||||
canonical_exp.to_expanded().unwrap().to_compact(),
|
||||
canonical_exp
|
||||
);
|
||||
assert_eq!(
|
||||
another_exp.to_expanded().unwrap().to_compact(),
|
||||
canonical_exp
|
||||
);
|
||||
assert_eq!(canonical_exp.to_work(), work_exp);
|
||||
assert_eq!(another_exp.to_work(), work_exp);
|
||||
|
||||
// Maximum valid mantissa and exponent
|
||||
let exponent: U256 = (29 * 8).into();
|
||||
let u256_me = U256::from(UNSIGNED_MANTISSA_MASK) * U256::from(2).pow(exponent);
|
||||
let expanded_me = Some(ExpandedDifficulty(u256_me));
|
||||
let work_me = Some(Work((!u256_me / (u256_me + 1) + 1).as_u128()));
|
||||
|
||||
let me = CompactDifficulty((31 + 1) * (1 << PRECISION) + UNSIGNED_MANTISSA_MASK);
|
||||
assert_eq!(me.to_expanded(), expanded_me);
|
||||
assert_eq!(me.to_expanded().unwrap().to_compact(), me);
|
||||
assert_eq!(me.to_work(), work_me);
|
||||
|
||||
// Maximum value, at least according to the spec
|
||||
//
|
||||
// According to ToTarget() in the spec, this value is
|
||||
// `(2^23 - 1) * 256^253`, which is larger than the maximum expanded
|
||||
// value. Therefore, a block can never pass with this threshold.
|
||||
//
|
||||
// zcashd rejects these blocks without comparing the hash.
|
||||
let difficulty_max = CompactDifficulty(u32::MAX & !SIGN_BIT);
|
||||
assert_eq!(difficulty_max.to_expanded(), None);
|
||||
assert_eq!(difficulty_max.to_work(), None);
|
||||
|
||||
// Bitcoin test vectors for CompactDifficulty
|
||||
// See https://developer.bitcoin.org/reference/block_chain.html#target-nbits
|
||||
// These values are not in the table below, because they do not fit in u128
|
||||
//
|
||||
// The minimum difficulty on the bitcoin mainnet and testnet
|
||||
let difficulty_btc_main = CompactDifficulty(0x1d00ffff);
|
||||
let u256_btc_main = U256::from(0xffff) << 208;
|
||||
let expanded_btc_main = Some(ExpandedDifficulty(u256_btc_main));
|
||||
let work_btc_main = Some(Work(0x100010001));
|
||||
assert_eq!(difficulty_btc_main.to_expanded(), expanded_btc_main);
|
||||
assert_eq!(
|
||||
difficulty_btc_main.to_expanded().unwrap().to_compact(),
|
||||
difficulty_btc_main
|
||||
);
|
||||
assert_eq!(difficulty_btc_main.to_work(), work_btc_main);
|
||||
|
||||
// The minimum difficulty in bitcoin regtest
|
||||
// This is also the easiest respesentable difficulty
|
||||
let difficulty_btc_reg = CompactDifficulty(0x207fffff);
|
||||
let u256_btc_reg = U256::from(0x7fffff) << 232;
|
||||
let expanded_btc_reg = Some(ExpandedDifficulty(u256_btc_reg));
|
||||
let work_btc_reg = Some(Work(0x2));
|
||||
assert_eq!(difficulty_btc_reg.to_expanded(), expanded_btc_reg);
|
||||
assert_eq!(
|
||||
difficulty_btc_reg.to_expanded().unwrap().to_compact(),
|
||||
difficulty_btc_reg
|
||||
);
|
||||
assert_eq!(difficulty_btc_reg.to_work(), work_btc_reg);
|
||||
}
|
||||
|
||||
/// Bitcoin test vectors for CompactDifficulty, and their corresponding
|
||||
/// ExpandedDifficulty and Work values.
|
||||
/// See https://developer.bitcoin.org/reference/block_chain.html#target-nbits
|
||||
static COMPACT_DIFFICULTY_CASES: &[(u32, Option<u128>, Option<u128>)] = &[
|
||||
// These Work values will never happen in practice, because the corresponding
|
||||
// difficulties are extremely high. So it is ok for us to reject them.
|
||||
(0x01003456, None /* 0x00 */, None),
|
||||
(0x01123456, Some(0x12), None),
|
||||
(0x02008000, Some(0x80), None),
|
||||
(0x05009234, Some(0x92340000), None),
|
||||
(0x04923456, None /* -0x12345600 */, None),
|
||||
(0x04123456, Some(0x12345600), None),
|
||||
];
|
||||
|
||||
/// Test Bitcoin test vectors for CompactDifficulty.
|
||||
#[test]
|
||||
#[spandoc::spandoc]
|
||||
fn compact_bitcoin_test_vectors() {
|
||||
zebra_test::init();
|
||||
|
||||
// We use two spans, so we can diagnose conversion panics, and mismatching results
|
||||
for (compact, expected_expanded, expected_work) in COMPACT_DIFFICULTY_CASES.iter().cloned() {
|
||||
/// SPANDOC: Convert compact to expanded and work {?compact, ?expected_expanded, ?expected_work}
|
||||
{
|
||||
let expected_expanded = expected_expanded.map(U256::from).map(ExpandedDifficulty);
|
||||
let expected_work = expected_work.map(Work);
|
||||
|
||||
let compact = CompactDifficulty(compact);
|
||||
let actual_expanded = compact.to_expanded();
|
||||
let actual_work = compact.to_work();
|
||||
let canonical_compact = actual_expanded.map(|e| e.to_compact());
|
||||
let round_trip_expanded = canonical_compact.map(|c| c.to_expanded());
|
||||
|
||||
/// SPANDOC: Test that compact produces the expected expanded and work {?compact, ?expected_expanded, ?actual_expanded, ?expected_work, ?actual_work, ?canonical_compact, ?round_trip_expanded}
|
||||
{
|
||||
assert_eq!(actual_expanded, expected_expanded);
|
||||
if expected_expanded.is_some() {
|
||||
assert_eq!(round_trip_expanded.unwrap(), actual_expanded);
|
||||
}
|
||||
assert_eq!(actual_work, expected_work);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Test blocks using CompactDifficulty.
|
||||
#[test]
|
||||
#[spandoc::spandoc]
|
||||
fn block_difficulty() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
let mut blockchain = Vec::new();
|
||||
for b in zebra_test::vectors::BLOCKS.iter() {
|
||||
let block = Arc::<Block>::zcash_deserialize(*b)?;
|
||||
let hash = block.hash();
|
||||
blockchain.push((block.clone(), block.coinbase_height().unwrap(), hash));
|
||||
}
|
||||
|
||||
let diff_zero = ExpandedDifficulty(U256::zero());
|
||||
let diff_one = ExpandedDifficulty(U256::one());
|
||||
let diff_max = ExpandedDifficulty(U256::MAX);
|
||||
|
||||
let work_zero = PartialCumulativeWork(0);
|
||||
let work_max = PartialCumulativeWork(u128::MAX);
|
||||
|
||||
let mut cumulative_work = PartialCumulativeWork::default();
|
||||
let mut previous_cumulative_work = PartialCumulativeWork::default();
|
||||
for (block, height, hash) in blockchain {
|
||||
/// SPANDOC: Calculate the threshold for block {?height}
|
||||
let threshold = block
|
||||
.header
|
||||
.difficulty_threshold
|
||||
.to_expanded()
|
||||
.expect("Chain blocks have valid difficulty thresholds.");
|
||||
|
||||
/// SPANDOC: Check the difficulty for block {?height, ?threshold, ?hash}
|
||||
{
|
||||
assert!(hash <= threshold);
|
||||
// also check the comparison operators work
|
||||
assert!(hash > diff_zero);
|
||||
assert!(hash > diff_one);
|
||||
assert!(hash < diff_max);
|
||||
}
|
||||
|
||||
/// SPANDOC: Check compact round-trip for block {?height}
|
||||
{
|
||||
let canonical_compact = threshold.to_compact();
|
||||
|
||||
assert_eq!(block.header.difficulty_threshold,
|
||||
canonical_compact);
|
||||
}
|
||||
|
||||
/// SPANDOC: Check the work for block {?height}
|
||||
{
|
||||
let work = block
|
||||
.header
|
||||
.difficulty_threshold
|
||||
.to_work()
|
||||
.expect("Chain blocks have valid work.");
|
||||
|
||||
// also check the comparison operators work
|
||||
assert!(PartialCumulativeWork::from(work) > work_zero);
|
||||
assert!(PartialCumulativeWork::from(work) < work_max);
|
||||
|
||||
cumulative_work += work;
|
||||
assert!(cumulative_work > work_zero);
|
||||
assert!(cumulative_work < work_max);
|
||||
|
||||
assert!(cumulative_work > previous_cumulative_work);
|
||||
|
||||
previous_cumulative_work = cumulative_work;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test ExpandedDifficulty ordering
|
||||
#[test]
|
||||
#[spandoc::spandoc]
|
||||
#[allow(clippy::eq_op)]
|
||||
fn expanded_order() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
let zero = ExpandedDifficulty(U256::zero());
|
||||
let one = ExpandedDifficulty(U256::one());
|
||||
let max_value = ExpandedDifficulty(U256::MAX);
|
||||
|
||||
assert!(zero < one);
|
||||
assert!(zero < max_value);
|
||||
assert!(one < max_value);
|
||||
|
||||
assert_eq!(zero, zero);
|
||||
assert!(zero <= one);
|
||||
assert!(one >= zero);
|
||||
assert!(one > zero);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test ExpandedDifficulty and block::Hash ordering
|
||||
#[test]
|
||||
#[spandoc::spandoc]
|
||||
fn expanded_hash_order() -> Result<(), Report> {
|
||||
zebra_test::init();
|
||||
|
||||
let ex_zero = ExpandedDifficulty(U256::zero());
|
||||
let ex_one = ExpandedDifficulty(U256::one());
|
||||
let ex_max = ExpandedDifficulty(U256::MAX);
|
||||
let hash_zero = block::Hash([0; 32]);
|
||||
let hash_max = block::Hash([0xff; 32]);
|
||||
|
||||
assert_eq!(hash_zero, ex_zero);
|
||||
assert!(hash_zero < ex_one);
|
||||
assert!(hash_zero < ex_max);
|
||||
|
||||
assert!(hash_max > ex_zero);
|
||||
assert!(hash_max > ex_one);
|
||||
assert_eq!(hash_max, ex_max);
|
||||
|
||||
assert!(ex_one > hash_zero);
|
||||
assert!(ex_one < hash_max);
|
||||
|
||||
assert!(hash_zero >= ex_zero);
|
||||
assert!(ex_zero >= hash_zero);
|
||||
assert!(hash_zero <= ex_zero);
|
||||
assert!(ex_zero <= hash_zero);
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -15,7 +15,7 @@ use zebra_chain::{
|
|||
block::{self, Block, Height},
|
||||
parameters::{Network, NetworkUpgrade},
|
||||
serialization::{ZcashDeserialize, ZcashDeserializeInto},
|
||||
work::difficulty::{CompactDifficulty, ExpandedDifficulty},
|
||||
work::difficulty::{ExpandedDifficulty, INVALID_COMPACT_DIFFICULTY},
|
||||
};
|
||||
use zebra_test::transcript::{TransError, Transcript};
|
||||
|
||||
|
@ -188,7 +188,7 @@ fn difficulty_validation_failure() -> Result<(), Report> {
|
|||
let hash = block.hash();
|
||||
|
||||
// Set the difficulty field to an invalid value
|
||||
block.header.difficulty_threshold = CompactDifficulty(u32::MAX);
|
||||
block.header.difficulty_threshold = INVALID_COMPACT_DIFFICULTY;
|
||||
|
||||
// Validate the block
|
||||
let result =
|
||||
|
|
Loading…
Reference in New Issue