uahf: REQ-7 difficulty adjustment

This commit is contained in:
Svyatoslav Nikolsky 2017-08-08 18:33:55 +03:00
parent a769868139
commit bcdc1e6813
4 changed files with 48 additions and 17 deletions

View File

@ -3,7 +3,7 @@ use primitives::hash::H256;
use primitives::compact::Compact; use primitives::compact::Compact;
use chain::{OutPoint, TransactionOutput, IndexedTransaction}; use chain::{OutPoint, TransactionOutput, IndexedTransaction};
use db::{SharedStore, TransactionOutputProvider}; use db::{SharedStore, TransactionOutputProvider};
use network::Magic; use network::ConsensusParams;
use memory_pool::{MemoryPool, OrderingStrategy, Entry}; use memory_pool::{MemoryPool, OrderingStrategy, Entry};
use verification::{work_required, block_reward_satoshi, transaction_sigops}; use verification::{work_required, block_reward_satoshi, transaction_sigops};
@ -233,13 +233,13 @@ impl<'a, T> Iterator for FittingTransactionsIterator<'a, T> where T: Iterator<It
} }
impl BlockAssembler { impl BlockAssembler {
pub fn create_new_block(&self, store: &SharedStore, mempool: &MemoryPool, time: u32, network: Magic) -> BlockTemplate { pub fn create_new_block(&self, store: &SharedStore, mempool: &MemoryPool, time: u32, consensus: &ConsensusParams) -> BlockTemplate {
// get best block // get best block
// take it's hash && height // take it's hash && height
let best_block = store.best_block(); let best_block = store.best_block();
let previous_header_hash = best_block.hash; let previous_header_hash = best_block.hash;
let height = best_block.number + 1; let height = best_block.number + 1;
let bits = work_required(previous_header_hash.clone(), time, height, store.as_block_header_provider(), network); let bits = work_required(previous_header_hash.clone(), time, height, store.as_block_header_provider(), consensus);
let version = BLOCK_VERSION; let version = BLOCK_VERSION;
let mut coinbase_value = block_reward_satoshi(height); let mut coinbase_value = block_reward_satoshi(height);

View File

@ -282,7 +282,7 @@ impl<T, U, V> LocalNode<T, U, V> where T: TaskExecutor, U: Server, V: Client {
max_block_sigops: self.consensus.fork.max_block_sigops(height, max_block_size) as u32, max_block_sigops: self.consensus.fork.max_block_sigops(height, max_block_size) as u32,
}; };
let memory_pool = &*self.memory_pool.read(); let memory_pool = &*self.memory_pool.read();
block_assembler.create_new_block(&self.storage, memory_pool, time::get_time().sec as u32, self.consensus.magic) block_assembler.create_new_block(&self.storage, memory_pool, time::get_time().sec as u32, &self.consensus)
} }
/// Install synchronization events listener /// Install synchronization events listener

View File

@ -1,4 +1,4 @@
use network::{Magic, ConsensusParams}; use network::ConsensusParams;
use db::BlockHeaderProvider; use db::BlockHeaderProvider;
use canon::CanonHeader; use canon::CanonHeader;
use error::Error; use error::Error;
@ -21,7 +21,7 @@ impl<'a> HeaderAcceptor<'a> {
deployments: &'a Deployments, deployments: &'a Deployments,
) -> Self { ) -> Self {
HeaderAcceptor { HeaderAcceptor {
work: HeaderWork::new(header, store, height, consensus.magic), work: HeaderWork::new(header, store, height, consensus),
median_timestamp: HeaderMedianTimestamp::new(header, store, height, deployments, consensus), median_timestamp: HeaderMedianTimestamp::new(header, store, height, deployments, consensus),
version: HeaderVersion::new(header, height, consensus), version: HeaderVersion::new(header, height, consensus),
} }
@ -67,23 +67,23 @@ pub struct HeaderWork<'a> {
header: CanonHeader<'a>, header: CanonHeader<'a>,
store: &'a BlockHeaderProvider, store: &'a BlockHeaderProvider,
height: u32, height: u32,
network: Magic, consensus: &'a ConsensusParams,
} }
impl<'a> HeaderWork<'a> { impl<'a> HeaderWork<'a> {
fn new(header: CanonHeader<'a>, store: &'a BlockHeaderProvider, height: u32, network: Magic) -> Self { fn new(header: CanonHeader<'a>, store: &'a BlockHeaderProvider, height: u32, consensus: &'a ConsensusParams) -> Self {
HeaderWork { HeaderWork {
header: header, header: header,
store: store, store: store,
height: height, height: height,
network: network, consensus: consensus,
} }
} }
fn check(&self) -> Result<(), Error> { fn check(&self) -> Result<(), Error> {
let previous_header_hash = self.header.raw.previous_header_hash.clone(); let previous_header_hash = self.header.raw.previous_header_hash.clone();
let time = self.header.raw.time; let time = self.header.raw.time;
let work = work_required(previous_header_hash, time, self.height, self.store, self.network); let work = work_required(previous_header_hash, time, self.height, self.store, self.consensus);
if work == self.header.raw.bits { if work == self.header.raw.bits {
Ok(()) Ok(())
} else { } else {

View File

@ -2,8 +2,9 @@ use std::cmp;
use primitives::compact::Compact; use primitives::compact::Compact;
use primitives::hash::H256; use primitives::hash::H256;
use primitives::bigint::U256; use primitives::bigint::U256;
use network::Magic; use network::{Magic, ConsensusParams, ConsensusFork};
use db::{BlockHeaderProvider, BlockRef}; use db::{BlockHeaderProvider, BlockRef};
use timestamp::median_timestamp;
use constants::{ use constants::{
DOUBLE_SPACING_SECONDS, DOUBLE_SPACING_SECONDS,
@ -55,9 +56,10 @@ pub fn retarget_timespan(retarget_timestamp: u32, last_timestamp: u32) -> u32 {
} }
/// Returns work required for given header /// Returns work required for given header
pub fn work_required(parent_hash: H256, time: u32, height: u32, store: &BlockHeaderProvider, network: Magic) -> Compact { pub fn work_required(parent_hash: H256, time: u32, height: u32, store: &BlockHeaderProvider, consensus: &ConsensusParams) -> Compact {
let max_bits = consensus.magic.max_bits();
if height == 0 { if height == 0 {
return network.max_bits(); return max_bits;
} }
let parent_header = store.block_header(parent_hash.clone().into()).expect("self.height != 0; qed"); let parent_header = store.block_header(parent_hash.clone().into()).expect("self.height != 0; qed");
@ -73,14 +75,43 @@ pub fn work_required(parent_hash: H256, time: u32, height: u32, store: &BlockHea
// bits of last block // bits of last block
let last_bits = parent_header.bits; let last_bits = parent_header.bits;
return work_required_retarget(network.max_bits(), retarget_timestamp, last_timestamp, last_bits); return work_required_retarget(max_bits, retarget_timestamp, last_timestamp, last_bits);
} }
if network == Magic::Testnet { if consensus.magic == Magic::Testnet {
return work_required_testnet(parent_hash, time, height, store, network) return work_required_testnet(parent_hash, time, height, store, Magic::Testnet)
} }
parent_header.bits match consensus.fork {
_ if parent_header.bits == max_bits => parent_header.bits,
ConsensusFork::BitcoinCash(fork_height) if height >= fork_height => {
// REQ-7 Difficulty adjustement in case of hashrate drop
// In case the MTP of the tip of the chain is 12h or more after the MTP 6 block before the tip,
// the proof of work target is increased by a quarter, or 25%, which corresponds to a difficulty
// reduction of 20%.
let ancient_block_ref = (height - 6 - 1).into();
let ancient_header = store.block_header(ancient_block_ref)
.expect("parent_header.bits != max_bits; difficulty is max_bits for first RETARGETING_INTERVAL height; RETARGETING_INTERVAL > 7; qed");
let ancient_timestamp = median_timestamp(&ancient_header, store);
let parent_timestamp = median_timestamp(&parent_header, store);
let timestamp_diff = parent_timestamp.checked_sub(ancient_timestamp).unwrap_or_default();
if timestamp_diff < 43_200 {
// less than 12h => no difficulty change needed
return parent_header.bits;
}
let mut new_bits: U256 = parent_header.bits.into();
let max_bits: U256 = max_bits.into();
new_bits = new_bits + (new_bits >> 2);
if new_bits > max_bits {
new_bits = max_bits
}
new_bits.into()
},
_ => parent_header.bits,
}
} }
pub fn work_required_testnet(parent_hash: H256, time: u32, height: u32, store: &BlockHeaderProvider, network: Magic) -> Compact { pub fn work_required_testnet(parent_hash: H256, time: u32, height: u32, store: &BlockHeaderProvider, network: Magic) -> Compact {