Merge pull request #26 from paritytech/fix_work_on_testnet

Fix required work calculation on testnet
This commit is contained in:
Svyatoslav Nikolsky 2018-12-26 16:07:23 +03:00 committed by GitHub
commit 300945807e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 263 additions and 237 deletions

View File

@ -25,6 +25,7 @@ pub fn main(benchmark: &mut Benchmark) {
// test setup
let genesis = test_data::genesis();
let consensus = ConsensusParams::new(Network::Unitest);
let mut rolling_hash = genesis.hash();
let mut blocks: Vec<IndexedBlock> = Vec::new();
@ -44,6 +45,7 @@ pub fn main(benchmark: &mut Benchmark) {
.merkled_header()
.parent(rolling_hash.clone())
.nonce((x as u8).into())
.time(consensus.pow_target_spacing * 7 * (x as u32))
.build()
.build();
rolling_hash = next_block.hash();
@ -88,6 +90,7 @@ pub fn main(benchmark: &mut Benchmark) {
builder
.merkled_header()
.parent(rolling_hash.clone())
.time(consensus.pow_target_spacing * 7 * ((b + BLOCKS_INITIAL) as u32))
.build()
.build()
.into());
@ -96,7 +99,7 @@ pub fn main(benchmark: &mut Benchmark) {
assert_eq!(store.best_block().hash, rolling_hash);
let chain_verifier = ChainVerifier::new(store.clone(), ConsensusParams::new(Network::Unitest));
let chain_verifier = ChainVerifier::new(store.clone(), consensus);
// bench
benchmark.start();

View File

@ -252,7 +252,7 @@ impl BlockAssembler {
let best_block = store.best_block();
let previous_header_hash = best_block.hash;
let height = best_block.number + 1;
let bits = work_required(previous_header_hash.clone(), height, store.as_block_header_provider(), consensus);
let bits = work_required(previous_header_hash.clone(), time, height, store.as_block_header_provider(), consensus);
let version = BLOCK_VERSION;
// TODO: sync with ZCash RPC - need to return founder reward?

View File

@ -47,6 +47,8 @@ pub struct ConsensusParams {
pub pow_max_adjust_up: u32,
/// Optimal blocks interval (in seconds).
pub pow_target_spacing: u32,
/// Allow minimal difficulty after block at given height.
pub pow_allow_min_difficulty_after_height: Option<u32>,
/// 'Slow start' interval parameter.
///
@ -161,6 +163,7 @@ impl ConsensusParams {
pow_max_adjust_down: 32,
pow_max_adjust_up: 16,
pow_target_spacing: (2.5 * 60.0) as u32,
pow_allow_min_difficulty_after_height: None,
subsidy_slow_start_interval: 20_000,
subsidy_halving_interval: 840_000,
@ -239,6 +242,7 @@ impl ConsensusParams {
pow_max_adjust_down: 32,
pow_max_adjust_up: 16,
pow_target_spacing: (2.5 * 60.0) as u32,
pow_allow_min_difficulty_after_height: Some(299187),
subsidy_slow_start_interval: 20_000,
subsidy_halving_interval: 840_000,
@ -317,6 +321,7 @@ impl ConsensusParams {
pow_max_adjust_down: 0,
pow_max_adjust_up: 0,
pow_target_spacing: (2.5 * 60.0) as u32,
pow_allow_min_difficulty_after_height: Some(0),
subsidy_slow_start_interval: 0,
subsidy_halving_interval: 150,
@ -348,6 +353,7 @@ impl ConsensusParams {
pow_max_adjust_down: 0,
pow_max_adjust_up: 0,
pow_target_spacing: (2.5 * 60.0) as u32,
pow_allow_min_difficulty_after_height: Some(0),
subsidy_slow_start_interval: 0,
subsidy_halving_interval: 150,

View File

@ -75,7 +75,7 @@ impl Network {
pub fn genesis_block(&self) -> Block {
match *self {
Network::Mainnet | Network::Other(_) => "040000000000000000000000000000000000000000000000000000000000000000000000db4d7a85b768123f1dff1d4c4cece70083b2d27e117b4ac2e31d087988a5eac4000000000000000000000000000000000000000000000000000000000000000090041358ffff071f5712000000000000000000000000000000000000000000000000000000000000fd4005000a889f00854b8665cd555f4656f68179d31ccadc1b1f7fb0952726313b16941da348284d67add4686121d4e3d930160c1348d8191c25f12b267a6a9c131b5031cbf8af1f79c9d513076a216ec87ed045fa966e01214ed83ca02dc1797270a454720d3206ac7d931a0a680c5c5e099057592570ca9bdf6058343958b31901fce1a15a4f38fd347750912e14004c73dfe588b903b6c03166582eeaf30529b14072a7b3079e3a684601b9b3024054201f7440b0ee9eb1a7120ff43f713735494aa27b1f8bab60d7f398bca14f6abb2adbf29b04099121438a7974b078a11635b594e9170f1086140b4173822dd697894483e1c6b4e8b8dcd5cb12ca4903bc61e108871d4d915a9093c18ac9b02b6716ce1013ca2c1174e319c1a570215bc9ab5f7564765f7be20524dc3fdf8aa356fd94d445e05ab165ad8bb4a0db096c097618c81098f91443c719416d39837af6de85015dca0de89462b1d8386758b2cf8a99e00953b308032ae44c35e05eb71842922eb69797f68813b59caf266cb6c213569ae3280505421a7e3a0a37fdf8e2ea354fc5422816655394a9454bac542a9298f176e211020d63dee6852c40de02267e2fc9d5e1ff2ad9309506f02a1a71a0501b16d0d36f70cdfd8de78116c0c506ee0b8ddfdeb561acadf31746b5a9dd32c21930884397fb1682164cb565cc14e089d66635a32618f7eb05fe05082b8a3fae620571660a6b89886eac53dec109d7cbb6930ca698a168f301a950be152da1be2b9e07516995e20baceebecb5579d7cdbc16d09f3a50cb3c7dffe33f26686d4ff3f8946ee6475e98cf7b3cf9062b6966e838f865ff3de5fb064a37a21da7bb8dfd2501a29e184f207caaba364f36f2329a77515dcb710e29ffbf73e2bbd773fab1f9a6b005567affff605c132e4e4dd69f36bd201005458cfbd2c658701eb2a700251cefd886b1e674ae816d3f719bac64be649c172ba27a4fd55947d95d53ba4cbc73de97b8af5ed4840b659370c556e7376457f51e5ebb66018849923db82c1c9a819f173cccdb8f3324b239609a300018d0fb094adf5bd7cbb3834c69e6d0b3798065c525b20f040e965e1a161af78ff7561cd874f5f1b75aa0bc77f720589e1b810f831eac5073e6dd46d00a2793f70f7427f0f798f2f53a67e615e65d356e66fe40609a958a05edb4c175bcc383ea0530e67ddbe479a898943c6e3074c6fcc252d6014de3a3d292b03f0d88d312fe221be7be7e3c59d07fa0f2f4029e364f1f355c5d01fa53770d0cd76d82bf7e60f6903bc1beb772e6fde4a70be51d9c7e03c8d6d8dfb361a234ba47c470fe630820bbd920715621b9fbedb49fcee165ead0875e6c2b1af16f50b5d6140cc981122fcbcf7c5a4e3772b3661b628e08380abc545957e59f634705b1bbde2f0b4e055a5ec5676d859be77e20962b645e051a880fddb0180b4555789e1f9344a436a84dc5579e2553f1e5fb0a599c137be36cabbed0319831fea3fddf94ddc7971e4bcf02cdc93294a9aab3e3b13e3b058235b4f4ec06ba4ceaa49d675b4ba80716f3bc6976b1fbf9c8bf1f3e3a4dc1cd83ef9cf816667fb94f1e923ff63fef072e6a19321e4812f96cb0ffa864da50ad74deb76917a336f31dce03ed5f0303aad5e6a83634f9fcc371096f8288b8f02ddded5ff1bb9d49331e4a84dbe1543164438fde9ad71dab024779dcdde0b6602b5ae0a6265c14b94edd83b37403f4b78fcd2ed555b596402c28ee81d87a909c4e8722b30c71ecdd861b05f61f8b1231795c76adba2fdefa451b283a5d527955b9f3de1b9828e7b2e74123dd47062ddcc09b05e7fa13cb2212a6fdbc65d7e852cec463ec6fd929f5b8483cf3052113b13dac91b69f49d1b7d1aec01c4a68e41ce1570101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff071f0104455a6361736830623963346565663862376363343137656535303031653335303039383462366665613335363833613763616331343161303433633432303634383335643334ffffffff010000000000000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(),
Network::Testnet => "040000000000000000000000000000000000000000000000000000000000000000000000db4d7a85b768123f1dff1d4c4cece70083b2d27e117b4ac2e31d087988a5eac4000000000000000000000000000000000000000000000000000000000000000090041358ffff071f5712000000000000000000000000000000000000000000000000000000000000fd4005000a889f00854b8665cd555f4656f68179d31ccadc1b1f7fb0952726313b16941da348284d67add4686121d4e3d930160c1348d8191c25f12b267a6a9c131b5031cbf8af1f79c9d513076a216ec87ed045fa966e01214ed83ca02dc1797270a454720d3206ac7d931a0a680c5c5e099057592570ca9bdf6058343958b31901fce1a15a4f38fd347750912e14004c73dfe588b903b6c03166582eeaf30529b14072a7b3079e3a684601b9b3024054201f7440b0ee9eb1a7120ff43f713735494aa27b1f8bab60d7f398bca14f6abb2adbf29b04099121438a7974b078a11635b594e9170f1086140b4173822dd697894483e1c6b4e8b8dcd5cb12ca4903bc61e108871d4d915a9093c18ac9b02b6716ce1013ca2c1174e319c1a570215bc9ab5f7564765f7be20524dc3fdf8aa356fd94d445e05ab165ad8bb4a0db096c097618c81098f91443c719416d39837af6de85015dca0de89462b1d8386758b2cf8a99e00953b308032ae44c35e05eb71842922eb69797f68813b59caf266cb6c213569ae3280505421a7e3a0a37fdf8e2ea354fc5422816655394a9454bac542a9298f176e211020d63dee6852c40de02267e2fc9d5e1ff2ad9309506f02a1a71a0501b16d0d36f70cdfd8de78116c0c506ee0b8ddfdeb561acadf31746b5a9dd32c21930884397fb1682164cb565cc14e089d66635a32618f7eb05fe05082b8a3fae620571660a6b89886eac53dec109d7cbb6930ca698a168f301a950be152da1be2b9e07516995e20baceebecb5579d7cdbc16d09f3a50cb3c7dffe33f26686d4ff3f8946ee6475e98cf7b3cf9062b6966e838f865ff3de5fb064a37a21da7bb8dfd2501a29e184f207caaba364f36f2329a77515dcb710e29ffbf73e2bbd773fab1f9a6b005567affff605c132e4e4dd69f36bd201005458cfbd2c658701eb2a700251cefd886b1e674ae816d3f719bac64be649c172ba27a4fd55947d95d53ba4cbc73de97b8af5ed4840b659370c556e7376457f51e5ebb66018849923db82c1c9a819f173cccdb8f3324b239609a300018d0fb094adf5bd7cbb3834c69e6d0b3798065c525b20f040e965e1a161af78ff7561cd874f5f1b75aa0bc77f720589e1b810f831eac5073e6dd46d00a2793f70f7427f0f798f2f53a67e615e65d356e66fe40609a958a05edb4c175bcc383ea0530e67ddbe479a898943c6e3074c6fcc252d6014de3a3d292b03f0d88d312fe221be7be7e3c59d07fa0f2f4029e364f1f355c5d01fa53770d0cd76d82bf7e60f6903bc1beb772e6fde4a70be51d9c7e03c8d6d8dfb361a234ba47c470fe630820bbd920715621b9fbedb49fcee165ead0875e6c2b1af16f50b5d6140cc981122fcbcf7c5a4e3772b3661b628e08380abc545957e59f634705b1bbde2f0b4e055a5ec5676d859be77e20962b645e051a880fddb0180b4555789e1f9344a436a84dc5579e2553f1e5fb0a599c137be36cabbed0319831fea3fddf94ddc7971e4bcf02cdc93294a9aab3e3b13e3b058235b4f4ec06ba4ceaa49d675b4ba80716f3bc6976b1fbf9c8bf1f3e3a4dc1cd83ef9cf816667fb94f1e923ff63fef072e6a19321e4812f96cb0ffa864da50ad74deb76917a336f31dce03ed5f0303aad5e6a83634f9fcc371096f8288b8f02ddded5ff1bb9d49331e4a84dbe1543164438fde9ad71dab024779dcdde0b6602b5ae0a6265c14b94edd83b37403f4b78fcd2ed555b596402c28ee81d87a909c4e8722b30c71ecdd861b05f61f8b1231795c76adba2fdefa451b283a5d527955b9f3de1b9828e7b2e74123dd47062ddcc09b05e7fa13cb2212a6fdbc65d7e852cec463ec6fd929f5b8483cf3052113b13dac91b69f49d1b7d1aec01c4a68e41ce1570101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff071f0104455a6361736830623963346565663862376363343137656535303031653335303039383462366665613335363833613763616331343161303433633432303634383335643334ffffffff010000000000000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(),
Network::Testnet => "040000000000000000000000000000000000000000000000000000000000000000000000db4d7a85b768123f1dff1d4c4cece70083b2d27e117b4ac2e31d087988a5eac40000000000000000000000000000000000000000000000000000000000000000a11e1358ffff07200600000000000000000000000000000000000000000000000000000000000000fd400500a6a51259c3f6732481e2d035197218b7a69504461d04335503cd69759b2d02bd2b53a9653f42cb33c608511c953673fa9da76170958115fe92157ad3bb5720d927f18e09459bf5c6072973e143e20f9bdf0584058c96b7c2234c7565f100d5eea083ba5d3dbaff9f0681799a113e7beff4a611d2b49590563109962baa149b628aae869af791f2f70bb041bd7ebfa658570917f6654a142b05e7ec0289a4f46470be7be5f693b90173eaaa6e84907170f32602204f1f4e1c04b1830116ffd0c54f0b1caa9a5698357bd8aa1f5ac8fc93b405265d824ba0e49f69dab5446653927298e6b7bdc61ee86ff31c07bde86331b4e500d42e4e50417e285502684b7966184505b885b42819a88469d1e9cf55072d7f3510f85580db689302eab377e4e11b14a91fdd0df7627efc048934f0aff8e7eb77eb17b3a95de13678004f2512293891d8baf8dde0ef69be520a58bbd6038ce899c9594cf3e30b8c3d9c7ecc832d4c19a6212747b50724e6f70f6451f78fd27b58ce43ca33b1641304a916186cfbe7dbca224f55d08530ba851e4df22baf7ab7078e9cbea46c0798b35a750f54103b0cdd08c81a6505c4932f6bfbd492a9fced31d54e98b6370d4c96600552fcf5b37780ed18c8787d03200963600db297a8f05dfa551321d17b9917edadcda51e274830749d133ad226f8bb6b94f13b4f77e67b35b71f52112ce9ba5da706ad9573584a2570a4ff25d29ab9761a06bdcf2c33638bf9baf2054825037881c14adf3816ba0cbd0fca689aad3ce16f2fe362c98f48134a9221765d939f0b49677d1c2447e56b46859f1810e2cf23e82a53e0d44f34dae932581b3b7f49eaec59af872cf9de757a964f7b33d143a36c270189508fcafe19398e4d2966948164d40556b05b7ff532f66f5d1edc41334ef742f78221dfe0c7ae2275bb3f24c89ae35f00afeea4e6ed187b866b209dc6e83b660593fce7c40e143beb07ac86c56f39e895385924667efe3a3f031938753c7764a2dbeb0a643fd359c46e614873fd0424e435fa7fac083b9a41a9d6bf7e284eee537ea7c50dd239f359941a43dc982745184bf3ee31a8dc850316aa9c6b66d6985acee814373be3458550659e1a06287c3b3b76a185c5cb93e38c1eebcf34ff072894b6430aed8d34122dafd925c46a515cca79b0269c92b301890ca6b0dc8b679cdac0f23318c105de73d7a46d16d2dad988d49c22e9963c117960bdc70ef0db6b091cf09445a516176b7f6d58ec29539166cc8a38bbff387acefffab2ea5faad0e8bb70625716ef0edf61940733c25993ea3de9f0be23d36e7cb8da10505f9dc426cd0e6e5b173ab4fff8c37e1f1fb56d1ea372013d075e0934c6919393cfc21395eea20718fad03542a4162a9ded66c814ad8320b2d7c2da3ecaf206da34c502db2096d1c46699a91dd1c432f019ad434e2c1ce507f91104f66f491fed37b225b8e0b2888c37276cfa0468fc13b8d593fd9a2675f0f5b20b8a15f8fa7558176a530d6865738ddb25d3426dab905221681cf9da0e0200eea5b2eba3ad3a5237d2a391f9074bf1779a2005cee43eec2b058511532635e0fea61664f531ac2b356f40db5c5d275a4cf5c82d468976455af4e3362cc8f71aa95e71d394aff3ead6f7101279f95bcd8a0fedce1d21cb3c9f6dd3b182fce0db5d6712981b651f29178a24119968b14783cafa713bc5f2a65205a42e4ce9dc7ba462bdb1f3e4553afc15f5f39998fdb53e7e231e3e520a46943734a007c2daa1eda9f495791657eefcac5c32833936e568d06187857ed04d7b97167ae207c5c5ae54e528c36016a984235e9c5b2f0718d7b3aa93c7822ccc772580b6599671b3c02ece8a21399abd33cfd3028790133167d0a97e7de53dc8ff0101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff071f0104455a6361736830623963346565663862376363343137656535303031653335303039383462366665613335363833613763616331343161303433633432303634383335643334ffffffff010000000000000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(),
Network::Regtest | Network::Unitest => "TODO".into(),
}
}

View File

@ -386,6 +386,7 @@ pub mod tests {
.merkled_header()
.parent(rolling_hash.clone())
.bits(Network::Unitest.max_bits().into())
.time(consensus.pow_target_spacing * 7 * i)
.build()
.build();
rolling_hash = next_block.hash();
@ -408,6 +409,7 @@ pub mod tests {
.merkled_header()
.parent(last_block_hash)
.bits(Network::Unitest.max_bits().into())
.time(consensus.pow_target_spacing * 7 * 102)
.build()
.build().into();

View File

@ -17,14 +17,14 @@ pub struct ChainAcceptor<'a> {
}
impl<'a> ChainAcceptor<'a> {
pub fn new(store: &'a Store, consensus: &'a ConsensusParams, verification_level: VerificationLevel, block: CanonBlock<'a>, height: u32, deployments: &'a BlockDeployments) -> Self {
pub fn new(store: &'a Store, consensus: &'a ConsensusParams, verification_level: VerificationLevel, block: CanonBlock<'a>, height: u32, time: u32, deployments: &'a BlockDeployments) -> Self {
trace!(target: "verification", "Block verification {}", block.hash().to_reversed_str());
let output_store = DuplexTransactionOutputProvider::new(store.as_transaction_output_provider(), block.raw());
let headers = store.as_block_header_provider();
ChainAcceptor {
block: BlockAcceptor::new(store.as_transaction_output_provider(), consensus, block, height, deployments, headers),
header: HeaderAcceptor::new(headers, consensus, block.header(), height, deployments),
header: HeaderAcceptor::new(headers, consensus, block.header(), height, time, deployments),
transactions: block.transactions()
.into_iter()
.enumerate()

View File

@ -18,11 +18,12 @@ impl<'a> HeaderAcceptor<'a> {
consensus: &'a ConsensusParams,
header: CanonHeader<'a>,
height: u32,
time: u32,
deployments: D,
) -> Self {
let csv_active = deployments.as_ref().csv(height, store, consensus);
HeaderAcceptor {
work: HeaderWork::new(header, store, height, consensus),
work: HeaderWork::new(header, store, height, time, consensus),
median_timestamp: HeaderMedianTimestamp::new(header, store, csv_active),
version: HeaderVersion::new(header, height, consensus),
}
@ -66,22 +67,24 @@ pub struct HeaderWork<'a> {
header: CanonHeader<'a>,
store: &'a BlockHeaderProvider,
height: u32,
time: u32,
consensus: &'a ConsensusParams,
}
impl<'a> HeaderWork<'a> {
fn new(header: CanonHeader<'a>, store: &'a BlockHeaderProvider, height: u32, consensus: &'a ConsensusParams) -> Self {
fn new(header: CanonHeader<'a>, store: &'a BlockHeaderProvider, height: u32, time: u32, consensus: &'a ConsensusParams) -> Self {
HeaderWork {
header: header,
store: store,
height: height,
time: time,
consensus: consensus,
}
}
fn check(&self) -> Result<(), Error> {
let previous_header_hash = self.header.raw.previous_header_hash.clone();
let work = work_required(previous_header_hash, self.height, self.store, self.consensus);
let work = work_required(previous_header_hash, self.time, self.height, self.store, self.consensus);
if work == self.header.raw.bits {
Ok(())
} else {

View File

@ -54,7 +54,7 @@ impl BackwardsCompatibleChainVerifier {
let deployments = BlockDeployments::new(&self.deployments, block_number, header_provider, &self.consensus);
let canon_block = CanonBlock::new(block);
let chain_acceptor = ChainAcceptor::new(self.store.as_store(), &self.consensus, verification_level,
canon_block, block_number, &deployments);
canon_block, block_number, block.header.raw.time, &deployments);
chain_acceptor.check()?;
},
BlockOrigin::SideChain(origin) => {
@ -64,7 +64,7 @@ impl BackwardsCompatibleChainVerifier {
let fork = self.store.fork(origin)?;
let canon_block = CanonBlock::new(block);
let chain_acceptor = ChainAcceptor::new(fork.store(), &self.consensus, verification_level, canon_block,
block_number, &deployments);
block_number, block.header.raw.time, &deployments);
chain_acceptor.check()?;
},
BlockOrigin::SideChainBecomingCanonChain(origin) => {
@ -74,7 +74,7 @@ impl BackwardsCompatibleChainVerifier {
let fork = self.store.fork(origin)?;
let canon_block = CanonBlock::new(block);
let chain_acceptor = ChainAcceptor::new(fork.store(), &self.consensus, verification_level, canon_block,
block_number, &deployments);
block_number, block.header.raw.time, &deployments);
chain_acceptor.check()?;
},
}

View File

@ -88,7 +88,6 @@ mod sapling;
mod sigops;
mod timestamp;
mod work;
mod work_zcash;
// pre-verification
mod verify_block;

View File

@ -1,10 +1,9 @@
use primitives::compact::Compact;
use primitives::hash::H256;
use primitives::bigint::U256;
use chain::IndexedBlockHeader;
use primitives::bigint::{Uint, U256};
use network::ConsensusParams;
use storage::BlockHeaderProvider;
use work_zcash::work_required_zcash;
use storage::{BlockHeaderProvider, BlockAncestors};
use timestamp::median_timestamp_inclusive;
/// Returns true if hash is lower or equal than target represented by compact bits
pub fn is_valid_proof_of_work_hash(bits: Compact, hash: &H256) -> bool {
@ -35,16 +34,246 @@ pub fn is_valid_proof_of_work(max_work_bits: Compact, bits: Compact, hash: &H256
}
/// Returns work required for given header
pub fn work_required(parent_hash: H256, height: u32, store: &BlockHeaderProvider, consensus: &ConsensusParams) -> Compact {
pub fn work_required(parent_hash: H256, time: u32, height: u32, store: &BlockHeaderProvider, consensus: &ConsensusParams) -> Compact {
let max_bits = consensus.network.max_bits().into();
// chain starts with has minimal difficulty
if height == 0 {
return max_bits;
}
let parent_header = store.block_header(parent_hash.clone().into()).expect("self.height != 0; qed");
work_required_zcash(IndexedBlockHeader {
hash: parent_hash,
raw: parent_header
}, store, consensus, max_bits)
// Special difficulty rule for testnet:
// If the new block's timestamp is more than 6 * 2.5 minutes
// then allow mining of a min-difficulty block.
if let Some(allow_min_difficulty_after_height) = consensus.pow_allow_min_difficulty_after_height {
if height >= allow_min_difficulty_after_height {
if time > parent_header.time + consensus.pow_target_spacing * 6 {
return max_bits;
}
}
}
// Find the first block in the averaging interval + calculate total difficulty for blocks in the interval
let (count, oldest_hash, bits_total) = BlockAncestors::new(parent_header.previous_header_hash.into(), store)
.take(consensus.pow_averaging_window as usize - 1)
.fold((1, Default::default(), U256::from(parent_header.bits)), |(count, _, bits_total), header|
(count + 1, header.previous_header_hash, bits_total.overflowing_add(header.bits.into()).0));
if count != consensus.pow_averaging_window {
return max_bits;
}
let bits_avg = bits_total / consensus.pow_averaging_window.into();
let parent_mtp = median_timestamp_inclusive(parent_header.hash(), store);
let oldest_mtp = median_timestamp_inclusive(oldest_hash, store);
calculate_work_required(bits_avg, parent_mtp, oldest_mtp, consensus, max_bits)
}
fn calculate_work_required(bits_avg: U256, parent_mtp: u32, oldest_mtp: u32, consensus: &ConsensusParams, max_bits: Compact) -> Compact {
// Limit adjustment step
// Use medians to prevent time-warp attacks
let actual_timespan = parent_mtp - oldest_mtp;
let mut actual_timespan = consensus.averaging_window_timespan() as i64 +
(actual_timespan as i64 - consensus.averaging_window_timespan() as i64) / 4;
if actual_timespan < consensus.min_actual_timespan() as i64 {
actual_timespan = consensus.min_actual_timespan() as i64;
}
if actual_timespan > consensus.max_actual_timespan() as i64 {
actual_timespan = consensus.max_actual_timespan() as i64;
}
// Retarget
let actual_timespan = actual_timespan as u32;
let mut bits_new = bits_avg / consensus.averaging_window_timespan().into();
bits_new = bits_new * actual_timespan.into();
if bits_new > max_bits.into() {
return max_bits;
}
bits_new.into()
}
#[cfg(test)]
mod tests {
extern crate test_data;
use std::collections::HashMap;
use primitives::bytes::Bytes;
use primitives::compact::Compact;
use primitives::bigint::U256;
use primitives::hash::H256;
use network::{Network, ConsensusParams};
use chain::BlockHeader;
use storage::{BlockHeaderProvider, BlockRef};
use timestamp::median_timestamp_inclusive;
use super::{work_required, calculate_work_required};
#[derive(Default)]
pub struct MemoryBlockHeaderProvider {
pub by_height: Vec<BlockHeader>,
pub by_hash: HashMap<H256, usize>,
}
impl MemoryBlockHeaderProvider {
pub fn last(&self) -> &BlockHeader {
self.by_height.last().unwrap()
}
pub fn insert(&mut self, header: BlockHeader) {
self.by_hash.insert(header.hash(), self.by_height.len());
self.by_height.push(header);
}
pub fn replace_last(&mut self, header: BlockHeader) {
let idx = self.by_height.len() - 1;
self.by_hash.remove(&self.by_height[idx].hash());
self.by_hash.insert(header.hash(), idx);
self.by_height[idx] = header;
}
pub fn next_height(&self) -> u32 {
self.by_height.len() as u32
}
pub fn next_time(&self) -> u32 {
self.last().time + (self.last().time - self.by_height[self.by_height.len() - 2].time)
}
}
impl BlockHeaderProvider for MemoryBlockHeaderProvider {
fn block_header_bytes(&self, _block_ref: BlockRef) -> Option<Bytes> {
unimplemented!()
}
fn block_header(&self, block_ref: BlockRef) -> Option<BlockHeader> {
match block_ref {
BlockRef::Hash(ref hash) => self.by_hash.get(hash).map(|h| &self.by_height[*h]).cloned(),
BlockRef::Number(height) => self.by_height.get(height as usize).cloned(),
}
}
}
#[test]
fn main_chain_required_work_works() {
let consensus = ConsensusParams::new(Network::Mainnet);
// insert genesis block
let mut header_provider = MemoryBlockHeaderProvider::default();
let genesis = test_data::genesis().block_header;
header_provider.insert(genesis.clone());
// assert block#1 work
let h1 = test_data::block_h1();
let expected = h1.block_header.bits;
let actual = work_required(genesis.hash(), h1.block_header.time, 1, &header_provider, &consensus);
assert_eq!(expected, actual);
}
// original test link:
// https://github.com/Bitcoin-ABC/bitcoin-abc/blob/d8eac91f8d16716eed0ad11ccac420122280bb13/src/test/pow_tests.cpp#L193
#[test]
fn work_required_works() {
let consensus = ConsensusParams::new(Network::Mainnet);
let max_bits = Network::Mainnet.max_bits();
let last_block = 2 * consensus.pow_averaging_window;
let first_block = last_block - consensus.pow_averaging_window;
// insert genesis block
let mut header_provider = MemoryBlockHeaderProvider::default();
header_provider.insert(BlockHeader {
time: 1269211443,
bits: Compact::new(0x1e7fffff),
version: 0,
previous_header_hash: 0.into(),
merkle_root_hash: 0.into(),
nonce: 0.into(),
reserved_hash: Default::default(),
solution: Default::default(),
});
// Start with blocks evenly-spaced and equal difficulty
for i in 1..last_block+1 {
let header = BlockHeader {
time: header_provider.last().time + consensus.pow_target_spacing,
bits: Compact::new(0x1e7fffff),
version: 0,
previous_header_hash: header_provider.by_height[i as usize - 1].hash(),
merkle_root_hash: 0.into(),
nonce: 0.into(),
reserved_hash: Default::default(),
solution: Default::default(),
};
header_provider.insert(header);
}
// Result should be the same as if last difficulty was used
let bits_avg: U256 = header_provider.by_height[last_block as usize].bits.into();
let expected = calculate_work_required(bits_avg,
median_timestamp_inclusive(header_provider.by_height[last_block as usize].hash(), &header_provider),
median_timestamp_inclusive(header_provider.by_height[first_block as usize].hash(), &header_provider),
&consensus, max_bits.into());
let actual = work_required(header_provider.last().hash(), header_provider.next_time(), header_provider.next_height(),
&header_provider, &consensus);
assert_eq!(actual, expected);
// Result should be unchanged, modulo integer division precision loss
let mut bits_expected: U256 = Compact::new(0x1e7fffff).into();
bits_expected = bits_expected / consensus.averaging_window_timespan().into();
bits_expected = bits_expected * consensus.averaging_window_timespan().into();
assert_eq!(work_required(header_provider.last().hash(), header_provider.next_time(), header_provider.next_height(),
&header_provider, &consensus),
bits_expected.into());
// Randomise the final block time (plus 1 to ensure it is always different)
use rand::{thread_rng, Rng};
let mut last_header = header_provider.by_height[last_block as usize].clone();
last_header.time += thread_rng().gen_range(1, consensus.pow_target_spacing / 2);
header_provider.replace_last(last_header);
// Result should be the same as if last difficulty was used
let bits_avg: U256 = header_provider.by_height[last_block as usize].bits.into();
let expected = calculate_work_required(bits_avg,
median_timestamp_inclusive(header_provider.by_height[last_block as usize].hash(), &header_provider),
median_timestamp_inclusive(header_provider.by_height[first_block as usize].hash(), &header_provider),
&consensus, max_bits.into());
let actual = work_required(header_provider.last().hash(), header_provider.next_time(), header_provider.next_height(),
&header_provider, &consensus);
assert_eq!(actual, expected);
// Result should not be unchanged
let bits_expected = Compact::new(0x1e7fffff);
assert!(work_required(header_provider.last().hash(), header_provider.next_time(), header_provider.next_height(),
&header_provider, &consensus) != bits_expected);
// Change the final block difficulty
let mut last_header = header_provider.by_height[last_block as usize].clone();
last_header.bits = Compact::new(0x1e0fffff);
header_provider.replace_last(last_header);
// Result should not be the same as if last difficulty was used
let bits_avg = header_provider.by_height[last_block as usize].bits;
let expected = calculate_work_required(bits_avg.into(),
median_timestamp_inclusive(header_provider.by_height[last_block as usize].hash(), &header_provider),
median_timestamp_inclusive(header_provider.by_height[first_block as usize].hash(), &header_provider),
&consensus, max_bits.into());
let actual = work_required(header_provider.last().hash(), header_provider.next_time(), header_provider.next_height(),
&header_provider, &consensus);
assert!(actual != expected);
// Result should be the same as if the average difficulty was used
let bits_avg = "0000796968696969696969696969696969696969696969696969696969696969".parse().unwrap();
let expected = calculate_work_required(bits_avg,
median_timestamp_inclusive(header_provider.by_height[last_block as usize].hash(), &header_provider),
median_timestamp_inclusive(header_provider.by_height[first_block as usize].hash(), &header_provider),
&consensus, max_bits.into());
let actual = work_required(header_provider.last().hash(), header_provider.next_time(), header_provider.next_height(),
&header_provider, &consensus);
assert_eq!(actual, expected);
}
}

View File

@ -1,216 +0,0 @@
use primitives::compact::Compact;
use primitives::bigint::{U256, Uint};
use chain::IndexedBlockHeader;
use network::ConsensusParams;
use storage::BlockHeaderProvider;
use timestamp::median_timestamp_inclusive;
/// Returns work required for given header for the ZCash block
pub fn work_required_zcash(parent_header: IndexedBlockHeader, store: &BlockHeaderProvider, consensus: &ConsensusParams, max_bits: Compact) -> Compact {
// TODO: special testnet case!
// Find the first block in the averaging interval
let parent_hash = parent_header.hash.clone();
let mut oldest_hash = parent_header.raw.previous_header_hash;
let mut bits_total: U256 = parent_header.raw.bits.into();
for _ in 1..consensus.pow_averaging_window {
let previous_header = match store.block_header(oldest_hash.into()) {
Some(previous_header) => previous_header,
None => return max_bits,
};
// TODO: check this
bits_total = match bits_total.overflowing_add(previous_header.bits.into()) {
(bits_total, false) => bits_total,
(_, true) => return max_bits,
};
oldest_hash = previous_header.previous_header_hash;
}
let bits_avg = bits_total / consensus.pow_averaging_window.into();
let parent_mtp = median_timestamp_inclusive(parent_hash, store);
let oldest_mtp = median_timestamp_inclusive(oldest_hash, store);
calculate_work_required(bits_avg, parent_mtp, oldest_mtp, consensus, max_bits)
}
fn calculate_work_required(bits_avg: U256, parent_mtp: u32, oldest_mtp: u32, consensus: &ConsensusParams, max_bits: Compact) -> Compact {
// Limit adjustment step
// Use medians to prevent time-warp attacks
let actual_timespan = parent_mtp - oldest_mtp;
let mut actual_timespan = consensus.averaging_window_timespan() as i64 +
(actual_timespan as i64 - consensus.averaging_window_timespan() as i64) / 4;
if actual_timespan < consensus.min_actual_timespan() as i64 {
actual_timespan = consensus.min_actual_timespan() as i64;
}
if actual_timespan > consensus.max_actual_timespan() as i64 {
actual_timespan = consensus.max_actual_timespan() as i64;
}
// Retarget
let actual_timespan = actual_timespan as u32;
let mut bits_new = bits_avg / consensus.averaging_window_timespan().into();
bits_new = bits_new * actual_timespan.into();
if bits_new > max_bits.into() {
return max_bits;
}
bits_new.into()
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use primitives::bytes::Bytes;
use primitives::compact::Compact;
use primitives::bigint::U256;
use primitives::hash::H256;
use network::{Network, ConsensusParams};
use chain::BlockHeader;
use storage::{BlockHeaderProvider, BlockRef};
use timestamp::median_timestamp_inclusive;
use super::{work_required_zcash, calculate_work_required};
#[derive(Default)]
pub struct MemoryBlockHeaderProvider {
pub by_height: Vec<BlockHeader>,
pub by_hash: HashMap<H256, usize>,
}
impl MemoryBlockHeaderProvider {
pub fn last(&self) -> &BlockHeader {
self.by_height.last().unwrap()
}
pub fn insert(&mut self, header: BlockHeader) {
self.by_hash.insert(header.hash(), self.by_height.len());
self.by_height.push(header);
}
pub fn replace_last(&mut self, header: BlockHeader) {
let idx = self.by_height.len() - 1;
self.by_hash.remove(&self.by_height[idx].hash());
self.by_hash.insert(header.hash(), idx);
self.by_height[idx] = header;
}
}
impl BlockHeaderProvider for MemoryBlockHeaderProvider {
fn block_header_bytes(&self, _block_ref: BlockRef) -> Option<Bytes> {
unimplemented!()
}
fn block_header(&self, block_ref: BlockRef) -> Option<BlockHeader> {
match block_ref {
BlockRef::Hash(ref hash) => self.by_hash.get(hash).map(|h| &self.by_height[*h]).cloned(),
BlockRef::Number(height) => self.by_height.get(height as usize).cloned(),
}
}
}
// original test link:
// https://github.com/Bitcoin-ABC/bitcoin-abc/blob/d8eac91f8d16716eed0ad11ccac420122280bb13/src/test/pow_tests.cpp#L193
#[test]
fn zcash_work_required_works() {
let consensus = ConsensusParams::new(Network::Mainnet);
let max_bits = Network::Mainnet.max_bits();
let last_block = 2 * consensus.pow_averaging_window;
let first_block = last_block - consensus.pow_averaging_window;
// insert genesis block
let mut header_provider = MemoryBlockHeaderProvider::default();
header_provider.insert(BlockHeader {
time: 1269211443,
bits: Compact::new(0x1e7fffff),
version: 0,
previous_header_hash: 0.into(),
merkle_root_hash: 0.into(),
nonce: 0.into(),
reserved_hash: Default::default(),
solution: Default::default(),
});
// Start with blocks evenly-spaced and equal difficulty
for i in 1..last_block+1 {
let header = BlockHeader {
time: header_provider.last().time + consensus.pow_target_spacing,
bits: Compact::new(0x1e7fffff),
version: 0,
previous_header_hash: header_provider.by_height[i as usize - 1].hash(),
merkle_root_hash: 0.into(),
nonce: 0.into(),
reserved_hash: Default::default(),
solution: Default::default(),
};
header_provider.insert(header);
}
// Result should be the same as if last difficulty was used
let bits_avg: U256 = header_provider.by_height[last_block as usize].bits.into();
let expected = calculate_work_required(bits_avg,
median_timestamp_inclusive(header_provider.by_height[last_block as usize].hash(), &header_provider),
median_timestamp_inclusive(header_provider.by_height[first_block as usize].hash(), &header_provider),
&consensus, max_bits.into());
let actual = work_required_zcash(header_provider.last().clone().into(),
&header_provider, &consensus, max_bits.into());
assert_eq!(actual, expected);
// Result should be unchanged, modulo integer division precision loss
let mut bits_expected: U256 = Compact::new(0x1e7fffff).into();
bits_expected = bits_expected / consensus.averaging_window_timespan().into();
bits_expected = bits_expected * consensus.averaging_window_timespan().into();
assert_eq!(work_required_zcash(header_provider.last().clone().into(),
&header_provider, &consensus, max_bits.into()),
bits_expected.into());
// Randomise the final block time (plus 1 to ensure it is always different)
use rand::{thread_rng, Rng};
let mut last_header = header_provider.by_height[last_block as usize].clone();
last_header.time += thread_rng().gen_range(1, consensus.pow_target_spacing / 2);
header_provider.replace_last(last_header);
// Result should be the same as if last difficulty was used
let bits_avg: U256 = header_provider.by_height[last_block as usize].bits.into();
let expected = calculate_work_required(bits_avg,
median_timestamp_inclusive(header_provider.by_height[last_block as usize].hash(), &header_provider),
median_timestamp_inclusive(header_provider.by_height[first_block as usize].hash(), &header_provider),
&consensus, max_bits.into());
let actual = work_required_zcash(header_provider.last().clone().into(),
&header_provider, &consensus, max_bits.into());
assert_eq!(actual, expected);
// Result should not be unchanged
let bits_expected = Compact::new(0x1e7fffff);
assert!(work_required_zcash(header_provider.last().clone().into(),
&header_provider, &consensus, max_bits.into()) != bits_expected);
// Change the final block difficulty
let mut last_header = header_provider.by_height[last_block as usize].clone();
last_header.bits = Compact::new(0x1e0fffff);
header_provider.replace_last(last_header);
// Result should not be the same as if last difficulty was used
let bits_avg = header_provider.by_height[last_block as usize].bits;
let expected = calculate_work_required(bits_avg.into(),
median_timestamp_inclusive(header_provider.by_height[last_block as usize].hash(), &header_provider),
median_timestamp_inclusive(header_provider.by_height[first_block as usize].hash(), &header_provider),
&consensus, max_bits.into());
let actual = work_required_zcash(header_provider.last().clone().into(),
&header_provider, &consensus, max_bits.into());
assert!(actual != expected);
// Result should be the same as if the average difficulty was used
let bits_avg = "0000796968696969696969696969696969696969696969696969696969696969".parse().unwrap();
let expected = calculate_work_required(bits_avg,
median_timestamp_inclusive(header_provider.by_height[last_block as usize].hash(), &header_provider),
median_timestamp_inclusive(header_provider.by_height[first_block as usize].hash(), &header_provider),
&consensus, max_bits.into());
let actual = work_required_zcash(header_provider.last().clone().into(),
&header_provider, &consensus, max_bits.into());
assert_eq!(actual, expected);
}
}