change(rpc): Simplify `getdifficulty` RPC implementation (#6105)

* Use existing Work type to calculate get_difficulty RPC, round in f64 tests

* Use a valid difficulty in all snapshots

* Explain compatibility TODO

* Fix typo

* Document consensus rules in the difficulty module

* Calculate the difficulty using the high 128 bits of the U256 values

* Require 6 significant figures of accuracy in the difficulty unit tests

* fixup! Calculate the difficulty using the high 128 bits of the U256 values

* Update snapshots
This commit is contained in:
teor 2023-02-09 09:41:41 +10:00 committed by GitHub
parent a7c784f21d
commit 4f289299fd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 195 additions and 48 deletions

View File

@ -37,7 +37,7 @@ mod tests;
/// with the block header hash, and
/// - calculating the block work.
///
/// Details:
/// # Consensus
///
/// This is a floating-point encoding, with a 24-bit signed mantissa,
/// an 8-bit exponent, an offset of 3, and a radix of 256.
@ -56,6 +56,12 @@ mod tests;
/// Without these consensus rules, some `ExpandedDifficulty` values would have
/// multiple equivalent `CompactDifficulty` values, due to redundancy in the
/// floating-point format.
///
/// > Deterministic conversions between a target threshold and a “compact" nBits value
/// > are not fully defined in the Bitcoin documentation, and so we define them here:
/// > (see equations in the Zcash Specification [section 7.7.4])
///
/// [section 7.7.4]: https://zips.z.cash/protocol/protocol.pdf#nbits
#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize)]
pub struct CompactDifficulty(pub(crate) u32);
@ -66,7 +72,7 @@ pub const INVALID_COMPACT_DIFFICULTY: CompactDifficulty = CompactDifficulty(u32:
///
/// Used as a target threshold for the difficulty of a `block::Hash`.
///
/// Details:
/// # Consensus
///
/// The precise bit pattern of an `ExpandedDifficulty` value is
/// consensus-critical, because it is compared with the `block::Hash`.
@ -82,6 +88,15 @@ pub const INVALID_COMPACT_DIFFICULTY: CompactDifficulty = CompactDifficulty(u32:
/// Callers should avoid constructing `ExpandedDifficulty` zero
/// values, because they are rejected by the consensus rules,
/// and cause some conversion functions to panic.
///
/// > The difficulty filter is unchanged from Bitcoin, and is calculated using SHA-256d on the
/// > whole block header (including solutionSize and solution). The result is interpreted as a
/// > 256-bit integer represented in little-endian byte order, which MUST be less than or equal
/// > to the target threshold given by ToTarget(nBits).
///
/// Zcash Specification [section 7.7.2].
///
/// [section 7.7.2]: https://zips.z.cash/protocol/protocol.pdf#difficulty
//
// TODO: Use NonZeroU256, when available
#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd)]
@ -91,7 +106,7 @@ pub struct ExpandedDifficulty(U256);
///
/// Used to calculate the total work for each chain of blocks.
///
/// Details:
/// # Consensus
///
/// The relative value of `Work` is consensus-critical, because it is used to
/// choose the best chain. But its precise value and bit pattern are not
@ -102,6 +117,14 @@ pub struct ExpandedDifficulty(U256);
/// work to ever exceed 2^128. The current total chain work for Zcash is 2^58,
/// and Bitcoin adds around 2^91 work per year. (Each extra bit represents twice
/// as much work.)
///
/// > a node chooses the “best” block chain visible to it by finding the chain of valid blocks
/// > with the greatest total work. The work of a block with value nBits for the nBits field in
/// > its block header is defined as `floor(2^256 / (ToTarget(nBits) + 1))`.
///
/// Zcash Specification [section 7.7.5].
///
/// [section 7.7.5]: https://zips.z.cash/protocol/protocol.pdf#workdef
#[derive(Clone, Copy, Default, Eq, PartialEq, Ord, PartialOrd)]
pub struct Work(u128);
@ -148,7 +171,8 @@ impl CompactDifficulty {
/// Calculate the ExpandedDifficulty for a compact representation.
///
/// See `ToTarget()` in the Zcash Specification, and `CheckProofOfWork()` in
/// zcashd.
/// zcashd:
/// <https://zips.z.cash/protocol/protocol.pdf#nbits>
///
/// Returns None for negative, zero, and overflow values. (zcashd rejects
/// these values, before comparing the hash.)
@ -315,6 +339,10 @@ impl TryFrom<ExpandedDifficulty> for Work {
type Error = ();
fn try_from(expanded: ExpandedDifficulty) -> Result<Self, Self::Error> {
// Consensus:
//
// <https://zips.z.cash/protocol/protocol.pdf#workdef>
//
// We need to compute `2^256 / (expanded + 1)`, but we can't represent
// 2^256, as it's too large for a u256. However, as 2^256 is at least as
// large as `expanded + 1`, it is equal to
@ -352,7 +380,10 @@ impl ExpandedDifficulty {
/// Returns the easiest target difficulty allowed on `network`.
///
/// See `PoWLimit` in the Zcash specification.
/// # Consensus
///
/// See `PoWLimit` in the Zcash specification:
/// <https://zips.z.cash/protocol/protocol.pdf#constants>
pub fn target_difficulty_limit(network: Network) -> ExpandedDifficulty {
let limit: U256 = match network {
/* 2^243 - 1 */
@ -375,10 +406,13 @@ impl ExpandedDifficulty {
/// Calculate the CompactDifficulty for an expanded difficulty.
///
/// See `ToCompact()` in the Zcash Specification, and `GetCompact()`
/// in zcashd.
/// # Consensus
///
/// Panics:
/// See `ToCompact()` in the Zcash Specification, and `GetCompact()`
/// in zcashd:
/// <https://zips.z.cash/protocol/protocol.pdf#nbits>
///
/// # Panics
///
/// If `self` is zero.
///
@ -561,11 +595,15 @@ impl PartialEq<block::Hash> for ExpandedDifficulty {
}
impl PartialOrd<block::Hash> for ExpandedDifficulty {
/// # Consensus
///
/// `block::Hash`es are compared with `ExpandedDifficulty` thresholds by
/// converting the hash to a 256-bit integer in little-endian order.
///
/// Greater values represent *less* work. This matches the convention in
/// zcashd and bitcoin.
///
/// <https://zips.z.cash/protocol/protocol.pdf#workdef>
fn partial_cmp(&self, other: &block::Hash) -> Option<Ordering> {
self.partial_cmp(&ExpandedDifficulty::from_hash(other))
}
@ -584,6 +622,8 @@ impl PartialEq<ExpandedDifficulty> for block::Hash {
impl PartialOrd<ExpandedDifficulty> for block::Hash {
/// How does `self` compare to `other`?
///
/// # Consensus
///
/// See `<ExpandedDifficulty as PartialOrd<block::Hash>::partial_cmp`
/// for details.
#[allow(clippy::unwrap_in_result)]
@ -606,8 +646,17 @@ impl std::ops::Add for Work {
}
}
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
/// Partial work used to track relative work in non-finalized chains
///
/// # Consensus
///
/// Use to choose the best chain with the most work.
///
/// Since it is only relative values that matter, Zebra uses the partial work from a shared
/// fork root block to find the best chain.
///
/// See [`Work`] for details.
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
pub struct PartialCumulativeWork(u128);
impl PartialCumulativeWork {

View File

@ -18,6 +18,7 @@ use zebra_chain::{
primitives,
serialization::ZcashDeserializeInto,
transparent,
work::difficulty::{ExpandedDifficulty, U256},
};
use zebra_consensus::{
funding_stream_address, funding_stream_values, height_for_first_halving, miner_subsidy,
@ -918,6 +919,11 @@ where
async move {
let request = ReadRequest::ChainInfo;
// # TODO
// - add a separate request like BestChainNextMedianTimePast, but skipping the
// consistency check, because any block's difficulty is ok for display
// - return 1.0 for a "not enough blocks in the state" error, like `zcashd`:
// <https://github.com/zcash/zcash/blob/7b28054e8b46eb46a9589d0bdc8e29f9fa1dc82d/src/rpc/blockchain.cpp#L40-L41>
let response = state
.ready()
.and_then(|service| service.call(request))
@ -933,32 +939,46 @@ where
_ => unreachable!("unmatched response to a chain info request"),
};
// The following code is ported from zcashd implementation.
// https://github.com/zcash/zcash/blob/v5.4.0-rc4/src/rpc/blockchain.cpp#L46-L73
// This RPC is typically used for display purposes, so it is not consensus-critical.
// But it uses the difficulty consensus rules for its calculations.
//
// Consensus:
// https://zips.z.cash/protocol/protocol.pdf#nbits
//
// The zcashd implementation performs to_expanded() on f64,
// and then does an inverse division:
// https://github.com/zcash/zcash/blob/d6e2fada844373a8554ee085418e68de4b593a6c/src/rpc/blockchain.cpp#L46-L73
//
// But in Zebra we divide the high 128 bits of each expanded difficulty. This gives
// a similar result, because the lower 128 bits are insignificant after conversion
// to `f64` with a 53-bit mantissa.
//
// `pow_limit >> 128 / difficulty >> 128` is the same as the work calculation
// `(2^256 / pow_limit) / (2^256 / difficulty)`, but it's a bit more accurate.
//
// To simplify the calculation, we don't scale for leading zeroes. (Bitcoin's
// difficulty currently uses 68 bits, so even it would still have full precision
// using this calculation.)
let pow_limit = u32::from_be_bytes(
zebra_chain::work::difficulty::ExpandedDifficulty::target_difficulty_limit(network)
.to_compact()
.bytes_in_display_order(),
);
let bits = u32::from_be_bytes(chain_info.expected_difficulty.bytes_in_display_order());
// Get expanded difficulties (256 bits), these are the inverse of the work
let pow_limit: U256 = ExpandedDifficulty::target_difficulty_limit(network).into();
let difficulty: U256 = chain_info
.expected_difficulty
.to_expanded()
.expect("valid blocks have valid difficulties")
.into();
let mut n_shift = (bits >> 24) & 0xff;
let n_shift_amount = (pow_limit >> 24) & 0xff;
// Shift out the lower 128 bits (256 bits, but the top 128 are all zeroes)
let pow_limit = pow_limit >> 128;
let difficulty = difficulty >> 128;
let mut d_diff: f64 = (pow_limit & 0x00ffffff) as f64 / (bits & 0x00ffffff) as f64;
// Convert to u128 then f64.
// We could also convert U256 to String, then parse as f64, but that's slower.
let pow_limit = pow_limit.as_u128() as f64;
let difficulty = difficulty.as_u128() as f64;
while n_shift < n_shift_amount {
d_diff *= 256.0;
n_shift += 1;
}
while n_shift > n_shift_amount {
d_diff /= 256.0;
n_shift -= 1;
}
Ok(d_diff)
// Invert the division to give approximately: `work(difficulty) / work(pow_limit)`
Ok(pow_limit / difficulty)
}
.boxed()
}

View File

@ -19,7 +19,7 @@ use zebra_chain::{
serialization::{DateTime32, ZcashDeserializeInto},
transaction::Transaction,
transparent,
work::difficulty::{CompactDifficulty, ExpandedDifficulty, U256},
work::difficulty::{CompactDifficulty, ExpandedDifficulty},
};
use zebra_network::{address_book_peers::MockAddressBookPeers, types::MetaAddr};
use zebra_node_services::mempool;
@ -112,7 +112,11 @@ pub async fn test_responses<State, ReadState>(
let fake_cur_time = DateTime32::from(1654008617);
// nu5 block time + 123
let fake_max_time = DateTime32::from(1654008728);
let fake_difficulty = CompactDifficulty::from(ExpandedDifficulty::from(U256::one()));
// Use a valid fractional difficulty for snapshots
let pow_limit = ExpandedDifficulty::target_difficulty_limit(network);
let fake_difficulty = pow_limit * 2 / 3;
let fake_difficulty = CompactDifficulty::from(fake_difficulty);
let (mock_chain_tip, mock_chain_tip_sender) = MockChainTip::new();
mock_chain_tip_sender.send_best_tip_height(fake_tip_height);

View File

@ -28,7 +28,7 @@ expression: block_template
"required": true
},
"longpollid": "00016871043eab7f731654008728000000000000000000",
"target": "0000000000000000000000000000000000000000000000000000000000000001",
"target": "0005555400000000000000000000000000000000000000000000000000000000",
"mintime": 1654008606,
"mutable": [
"time",
@ -39,7 +39,7 @@ expression: block_template
"sigoplimit": 20000,
"sizelimit": 2000000,
"curtime": 1654008617,
"bits": "01010000",
"bits": "1f055554",
"height": 1687105,
"maxtime": 1654008728
}

View File

@ -28,7 +28,7 @@ expression: block_template
"required": true
},
"longpollid": "00018424203eab7f731654008728000000000000000000",
"target": "0000000000000000000000000000000000000000000000000000000000000001",
"target": "0555540000000000000000000000000000000000000000000000000000000000",
"mintime": 1654008606,
"mutable": [
"time",
@ -39,7 +39,7 @@ expression: block_template
"sigoplimit": 20000,
"sizelimit": 2000000,
"curtime": 1654008617,
"bits": "01010000",
"bits": "20055554",
"height": 1842421,
"maxtime": 1654008728
}

View File

@ -28,7 +28,7 @@ expression: block_template
"required": true
},
"longpollid": "00016871043eab7f731654008728000000000000000000",
"target": "0000000000000000000000000000000000000000000000000000000000000001",
"target": "0005555400000000000000000000000000000000000000000000000000000000",
"mintime": 1654008606,
"mutable": [
"time",
@ -39,7 +39,7 @@ expression: block_template
"sigoplimit": 20000,
"sizelimit": 2000000,
"curtime": 1654008617,
"bits": "01010000",
"bits": "1f055554",
"height": 1687105,
"maxtime": 1654008728,
"submitold": false

View File

@ -28,7 +28,7 @@ expression: block_template
"required": true
},
"longpollid": "00018424203eab7f731654008728000000000000000000",
"target": "0000000000000000000000000000000000000000000000000000000000000001",
"target": "0555540000000000000000000000000000000000000000000000000000000000",
"mintime": 1654008606,
"mutable": [
"time",
@ -39,7 +39,7 @@ expression: block_template
"sigoplimit": 20000,
"sizelimit": 2000000,
"curtime": 1654008617,
"bits": "01010000",
"bits": "20055554",
"height": 1842421,
"maxtime": 1654008728,
"submitold": false

View File

@ -2,4 +2,4 @@
source: zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs
expression: difficulty
---
14134749558280407000000000000000000000000000000000000000000000000000000000.0
1.5000028610338632

View File

@ -2,4 +2,4 @@
source: zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs
expression: difficulty
---
3618495886919784300000000000000000000000000000000000000000000000000000000000.0
1.5000028610338632

View File

@ -1457,7 +1457,7 @@ async fn rpc_getdifficulty() {
let mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests();
let mut read_state = MockService::build().for_unit_tests();
let read_state = MockService::build().for_unit_tests();
let chain_verifier = MockService::build().for_unit_tests();
let mut mock_sync_status = MockSyncStatus::default();
@ -1478,7 +1478,6 @@ async fn rpc_getdifficulty() {
let fake_cur_time = DateTime32::from(1654008617);
// nu5 block time + 123
let fake_max_time = DateTime32::from(1654008728);
let fake_difficulty = CompactDifficulty::from(ExpandedDifficulty::from(U256::MAX));
let (mock_chain_tip, mock_chain_tip_sender) = MockChainTip::new();
mock_chain_tip_sender.send_best_tip_height(fake_tip_height);
@ -1497,9 +1496,12 @@ async fn rpc_getdifficulty() {
MockAddressBookPeers::default(),
);
// Fake the ChainInfo response
// Fake the ChainInfo response: smallest numeric difficulty
// (this is invalid on mainnet and testnet under the consensus rules)
let fake_difficulty = CompactDifficulty::from(ExpandedDifficulty::from(U256::MAX));
let mut read_state1 = read_state.clone();
let mock_read_state_request_handler = async move {
read_state
read_state1
.expect_request_that(|req| matches!(req, ReadRequest::ChainInfo))
.await
.respond(ReadResponse::ChainInfo(GetBlockTemplateChainInfo {
@ -1516,5 +1518,77 @@ async fn rpc_getdifficulty() {
let get_difficulty_fut = get_block_template_rpc.get_difficulty();
let (get_difficulty, ..) = tokio::join!(get_difficulty_fut, mock_read_state_request_handler,);
assert_eq!(get_difficulty.unwrap(), 0.00012207194233937495);
// Our implementation is slightly different to `zcashd`, so we require 6 significant figures
// of accuracy in our unit tests. (Most clients will hide more than 2-3.)
assert_eq!(format!("{:.9}", get_difficulty.unwrap()), "0.000122072");
// Fake the ChainInfo response: difficulty limit - smallest valid difficulty
let pow_limit = ExpandedDifficulty::target_difficulty_limit(Mainnet);
let fake_difficulty = pow_limit.into();
let mut read_state2 = read_state.clone();
let mock_read_state_request_handler = async move {
read_state2
.expect_request_that(|req| matches!(req, ReadRequest::ChainInfo))
.await
.respond(ReadResponse::ChainInfo(GetBlockTemplateChainInfo {
expected_difficulty: fake_difficulty,
tip_height: fake_tip_height,
tip_hash: fake_tip_hash,
cur_time: fake_cur_time,
min_time: fake_min_time,
max_time: fake_max_time,
history_tree: fake_history_tree(Mainnet),
}));
};
let get_difficulty_fut = get_block_template_rpc.get_difficulty();
let (get_difficulty, ..) = tokio::join!(get_difficulty_fut, mock_read_state_request_handler,);
assert_eq!(format!("{:.5}", get_difficulty.unwrap()), "1.00000");
// Fake the ChainInfo response: fractional difficulty
let fake_difficulty = pow_limit * 2 / 3;
let mut read_state3 = read_state.clone();
let mock_read_state_request_handler = async move {
read_state3
.expect_request_that(|req| matches!(req, ReadRequest::ChainInfo))
.await
.respond(ReadResponse::ChainInfo(GetBlockTemplateChainInfo {
expected_difficulty: fake_difficulty.into(),
tip_height: fake_tip_height,
tip_hash: fake_tip_hash,
cur_time: fake_cur_time,
min_time: fake_min_time,
max_time: fake_max_time,
history_tree: fake_history_tree(Mainnet),
}));
};
let get_difficulty_fut = get_block_template_rpc.get_difficulty();
let (get_difficulty, ..) = tokio::join!(get_difficulty_fut, mock_read_state_request_handler,);
assert_eq!(format!("{:.5}", get_difficulty.unwrap()), "1.50000");
// Fake the ChainInfo response: large integer difficulty
let fake_difficulty = pow_limit / 4096;
let mut read_state4 = read_state.clone();
let mock_read_state_request_handler = async move {
read_state4
.expect_request_that(|req| matches!(req, ReadRequest::ChainInfo))
.await
.respond(ReadResponse::ChainInfo(GetBlockTemplateChainInfo {
expected_difficulty: fake_difficulty.into(),
tip_height: fake_tip_height,
tip_hash: fake_tip_hash,
cur_time: fake_cur_time,
min_time: fake_min_time,
max_time: fake_max_time,
history_tree: fake_history_tree(Mainnet),
}));
};
let get_difficulty_fut = get_block_template_rpc.get_difficulty();
let (get_difficulty, ..) = tokio::join!(get_difficulty_fut, mock_read_state_request_handler,);
assert_eq!(format!("{:.2}", get_difficulty.unwrap()), "4096.00");
}