Merge pull request #271 from ethcore/block_assembler

Block assembler
This commit is contained in:
Svyatoslav Nikolsky 2016-12-09 16:12:44 +03:00 committed by GitHub
commit 37e4896547
26 changed files with 835 additions and 372 deletions

5
Cargo.lock generated
View File

@ -6,7 +6,6 @@ dependencies = [
"chain 0.1.0",
"db 0.1.0",
"ethcore-devtools 1.3.0",
"linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"network 0.1.0",
"parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
@ -450,12 +449,16 @@ dependencies = [
name = "miner"
version = "0.1.0"
dependencies = [
"bitcrypto 0.1.0",
"byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
"chain 0.1.0",
"db 0.1.0",
"heapsize 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"network 0.1.0",
"primitives 0.1.0",
"serialization 0.1.0",
"test-data 0.1.0",
"verification 0.1.0",
]
[[package]]

View File

@ -5,6 +5,7 @@ use ser::{
Serializable, Stream, serialize
};
use crypto::dhash256;
use compact::Compact;
use hash::H256;
#[derive(PartialEq, Clone)]
@ -13,7 +14,7 @@ pub struct BlockHeader {
pub previous_header_hash: H256,
pub merkle_root_hash: H256,
pub time: u32,
pub nbits: u32,
pub bits: Compact,
pub nonce: u32,
}
@ -30,7 +31,7 @@ impl fmt::Debug for BlockHeader {
.field("previous_header_hash", &self.previous_header_hash.reversed())
.field("merkle_root_hash", &self.merkle_root_hash.reversed())
.field("time", &self.time)
.field("nbits", &self.nbits)
.field("bits", &self.bits)
.field("nonce", &self.nonce)
.finish()
}
@ -43,7 +44,7 @@ impl Serializable for BlockHeader {
.append(&self.previous_header_hash)
.append(&self.merkle_root_hash)
.append(&self.time)
.append(&self.nbits)
.append(&self.bits)
.append(&self.nonce);
}
}
@ -55,7 +56,7 @@ impl Deserializable for BlockHeader {
previous_header_hash: try!(reader.read()),
merkle_root_hash: try!(reader.read()),
time: try!(reader.read()),
nbits: try!(reader.read()),
bits: try!(reader.read()),
nonce: try!(reader.read()),
};
@ -81,7 +82,7 @@ mod tests {
previous_header_hash: [2; 32].into(),
merkle_root_hash: [3; 32].into(),
time: 4,
nbits: 5,
bits: 5.into(),
nonce: 6,
};
@ -118,7 +119,7 @@ mod tests {
previous_header_hash: [2; 32].into(),
merkle_root_hash: [3; 32].into(),
time: 4,
nbits: 5,
bits: 5.into(),
nonce: 6,
};

View File

@ -14,7 +14,7 @@ pub trait RepresentH256 {
}
pub use rustc_serialize::hex;
pub use primitives::{hash, bytes, uint};
pub use primitives::{hash, bytes, uint, compact};
pub use self::block::Block;
pub use self::block_header::BlockHeader;

View File

@ -65,6 +65,13 @@ impl Deserializable for OutPoint {
}
impl OutPoint {
pub fn null() -> Self {
OutPoint {
hash: H256::default(),
index: u32::max_value(),
}
}
pub fn hash(&self) -> &H256 {
&self.hash
}

View File

@ -0,0 +1,38 @@
use std::cmp;
use primitives::hash::H256;
use chain::{Transaction, OutPoint, TransactionOutput};
use PreviousTransactionOutputProvider;
#[derive(Debug)]
pub struct IndexedTransaction {
pub transaction: Transaction,
pub hash: H256,
}
impl From<Transaction> for IndexedTransaction {
fn from(t: Transaction) -> Self {
let hash = t.hash();
IndexedTransaction {
transaction: t,
hash: hash,
}
}
}
impl cmp::PartialEq for IndexedTransaction {
fn eq(&self, other: &Self) -> bool {
self.hash == other.hash
}
}
impl<'a> PreviousTransactionOutputProvider for &'a [IndexedTransaction] {
fn previous_transaction_output(&self, prevout: &OutPoint) -> Option<TransactionOutput> {
self.iter()
.find(|tx| tx.hash == prevout.hash)
.map(|tx| tx.transaction.outputs[prevout.index as usize].clone())
}
fn is_spent(&self, _prevout: &OutPoint) -> bool {
unimplemented!();
}
}

View File

@ -28,6 +28,7 @@ mod transaction_meta_provider;
mod error;
mod update_context;
mod indexed_block;
mod indexed_transaction;
#[derive(Debug, Clone)]
pub enum BlockRef {
@ -72,6 +73,7 @@ pub use transaction_meta_provider::TransactionMetaProvider;
pub use block_stapler::{BlockStapler, BlockInsertedChain};
pub use block_provider::{BlockProvider, BlockHeaderProvider, AsBlockHeaderProvider};
pub use indexed_block::{IndexedBlock, IndexedTransactions};
pub use indexed_transaction::{IndexedTransaction};
#[cfg(feature="dev")]
pub use test_storage::TestStorage;

View File

@ -28,5 +28,6 @@ pub trait AsTransactionProvider {
pub trait PreviousTransactionOutputProvider {
fn previous_transaction_output(&self, prevout: &chain::OutPoint) -> Option<chain::TransactionOutput>;
// TODO: this should not be here, cause it requires meta data
fn is_spent(&self, prevout: &chain::OutPoint) -> bool;
}

View File

@ -4,9 +4,13 @@ version = "0.1.0"
authors = ["Ethcore <admin@ethcore.io>"]
[dependencies]
byteorder = "0.5"
heapsize = "0.3"
bitcrypto = { path = "../crypto" }
chain = { path = "../chain" }
db = { path = "../db" }
network = { path = "../network" }
primitives = { path = "../primitives" }
serialization = { path = "../serialization" }
verification = { path = "../verification" }
test-data = { path = "../test-data" }

View File

@ -0,0 +1,245 @@
use primitives::hash::H256;
use db::{SharedStore, IndexedTransaction};
use network::Magic;
use memory_pool::{MemoryPool, OrderingStrategy};
use verification::{
work_required, block_reward_satoshi, transaction_sigops,
StoreWithUnretainedOutputs, MAX_BLOCK_SIZE, MAX_BLOCK_SIGOPS
};
const BLOCK_VERSION: u32 = 0x20000000;
const BLOCK_HEADER_SIZE: u32 = 4 + 32 + 32 + 4 + 4 + 4;
/// Block template as described in BIP0022
/// Minimal version
/// [BIP0022](https://github.com/bitcoin/bips/blob/master/bip-0022.mediawiki#block-template-request)
pub struct BlockTemplate {
/// Version
pub version: u32,
/// The hash of previous block
pub previous_header_hash: H256,
/// The current time as seen by the server
pub time: u32,
/// The compressed difficulty
pub nbits: u32,
/// Block height
pub height: u32,
/// Block transactions (excluding coinbase)
pub transactions: Vec<IndexedTransaction>,
/// Total funds available for the coinbase (in Satoshis)
pub coinbase_value: u64,
/// Number of bytes allowed in the block
pub size_limit: u32,
/// Number of sigops allowed in the block
pub sigop_limit: u32,
}
/// Block size and number of signatures opcodes is limited
/// This structure should be used for storing this values.
struct SizePolicy {
/// Current size
current_size: u32,
/// Max size
max_size: u32,
/// When current_size + size_buffer > max_size
/// we need to start finishing the block
size_buffer: u32,
/// Number of transactions checked since finishing started
finish_counter: u32,
/// Number of transactions to check when finishing the block
finish_limit: u32,
}
/// When appending transaction, opcode count and block size policies
/// must agree on appending the transaction to the block
#[derive(Debug, PartialEq, Copy, Clone)]
enum NextStep {
/// Append the transaction, check the next one
Append,
/// Append the transaction, do not check the next one
FinishAndAppend,
/// Ignore transaction, check the next one
Ignore,
/// Ignore transaction, do not check the next one
FinishAndIgnore,
}
impl NextStep {
fn and(self, other: NextStep) -> Self {
match (self, other) {
(_, NextStep::FinishAndIgnore) |
(NextStep::FinishAndIgnore, _) |
(NextStep::FinishAndAppend, NextStep::Ignore) |
(NextStep::Ignore, NextStep::FinishAndAppend) => NextStep::FinishAndIgnore,
(NextStep::Ignore, _) |
(_, NextStep::Ignore) => NextStep::Ignore,
(_, NextStep::FinishAndAppend) |
(NextStep::FinishAndAppend, _) => NextStep::FinishAndAppend,
(NextStep::Append, NextStep::Append) => NextStep::Append,
}
}
}
impl SizePolicy {
fn new(current_size: u32, max_size: u32, size_buffer: u32, finish_limit: u32) -> Self {
SizePolicy {
current_size: current_size,
max_size: max_size,
size_buffer: size_buffer,
finish_counter: 0,
finish_limit: finish_limit,
}
}
fn decide(&mut self, size: u32) -> NextStep {
let finishing = self.current_size + self.size_buffer > self.max_size;
let fits = self.current_size + size <= self.max_size;
let finish = self.finish_counter + 1 >= self.finish_limit;
if finishing {
self.finish_counter += 1;
}
if fits {
self.current_size += size;
}
match (fits, finish) {
(true, true) => NextStep::FinishAndAppend,
(true, false) => NextStep::Append,
(false, true) => NextStep::FinishAndIgnore,
(false, false) => NextStep::Ignore,
}
}
}
/// Block assembler
pub struct BlockAssembler {
pub max_block_size: u32,
pub max_block_sigops: u32,
}
impl Default for BlockAssembler {
fn default() -> Self {
BlockAssembler {
max_block_size: MAX_BLOCK_SIZE as u32,
max_block_sigops: MAX_BLOCK_SIGOPS as u32,
}
}
}
impl BlockAssembler {
pub fn create_new_block(&self, store: &SharedStore, mempool: &MemoryPool, time: u32, network: Magic) -> BlockTemplate {
// get best block
// take it's hash && height
let best_block = store.best_block().expect("Cannot assemble new block without genesis block");
let previous_header_hash = best_block.hash;
let height = best_block.number + 1;
let nbits = work_required(previous_header_hash.clone(), time, height, store.as_block_header_provider(), network);
let version = BLOCK_VERSION;
let mut block_size = SizePolicy::new(BLOCK_HEADER_SIZE, self.max_block_size, 1_000, 50);
let mut sigops = SizePolicy::new(0, self.max_block_sigops, 8, 50);
let mut coinbase_value = block_reward_satoshi(height);
let mut transactions = Vec::new();
// add priority transactions
BlockAssembler::fill_transactions(store, mempool, &mut block_size, &mut sigops, &mut coinbase_value, &mut transactions, OrderingStrategy::ByTransactionScore);
BlockTemplate {
version: version,
previous_header_hash: previous_header_hash,
time: time,
nbits: nbits.into(),
height: height,
transactions: transactions,
coinbase_value: coinbase_value,
size_limit: self.max_block_size,
sigop_limit: self.max_block_sigops,
}
}
fn fill_transactions(
store: &SharedStore,
mempool: &MemoryPool,
block_size: &mut SizePolicy,
sigops: &mut SizePolicy,
coinbase_value: &mut u64,
transactions: &mut Vec<IndexedTransaction>,
strategy: OrderingStrategy
) {
for entry in mempool.iter(strategy) {
let transaction_size = entry.size as u32;
let sigops_count = {
let txs: &[_] = &*transactions;
let unretained_store = StoreWithUnretainedOutputs::new(store, &txs);
let bip16_active = true;
transaction_sigops(&entry.transaction, &unretained_store, bip16_active) as u32
};
let size_step = block_size.decide(transaction_size);
let sigops_step = sigops.decide(sigops_count);
let transaction = IndexedTransaction {
transaction: entry.transaction.clone(),
hash: entry.hash.clone(),
};
match size_step.and(sigops_step) {
NextStep::Append => {
// miner_fee is i64, but we can safely cast it to u64
// memory pool should restrict miner fee to be positive
*coinbase_value += entry.miner_fee as u64;
transactions.push(transaction);
},
NextStep::FinishAndAppend => {
transactions.push(transaction);
break;
},
NextStep::Ignore => (),
NextStep::FinishAndIgnore => {
break;
},
}
}
}
}
#[cfg(test)]
mod tests {
use super::{SizePolicy, NextStep};
#[test]
fn test_size_policy() {
let mut size_policy = SizePolicy::new(0, 1000, 200, 3);
assert_eq!(size_policy.decide(100), NextStep::Append);
assert_eq!(size_policy.decide(500), NextStep::Append);
assert_eq!(size_policy.decide(600), NextStep::Ignore);
assert_eq!(size_policy.decide(200), NextStep::Append);
assert_eq!(size_policy.decide(300), NextStep::Ignore);
assert_eq!(size_policy.decide(300), NextStep::Ignore);
// this transaction will make counter + buffer > max size
assert_eq!(size_policy.decide(1), NextStep::Append);
// so now only 3 more transactions may accepted / ignored
assert_eq!(size_policy.decide(1), NextStep::Append);
assert_eq!(size_policy.decide(1000), NextStep::Ignore);
assert_eq!(size_policy.decide(1), NextStep::FinishAndAppend);
// we should not call decide again after it returned finish...
// but we can, let's check if result is ok
assert_eq!(size_policy.decide(1000), NextStep::FinishAndIgnore);
}
#[test]
fn test_next_step_and() {
assert_eq!(NextStep::Append.and(NextStep::Append), NextStep::Append);
assert_eq!(NextStep::Ignore.and(NextStep::Append), NextStep::Ignore);
assert_eq!(NextStep::FinishAndIgnore.and(NextStep::Append), NextStep::FinishAndIgnore);
assert_eq!(NextStep::Ignore.and(NextStep::FinishAndIgnore), NextStep::FinishAndIgnore);
assert_eq!(NextStep::FinishAndAppend.and(NextStep::FinishAndIgnore), NextStep::FinishAndIgnore);
assert_eq!(NextStep::FinishAndAppend.and(NextStep::Ignore), NextStep::FinishAndIgnore);
assert_eq!(NextStep::FinishAndAppend.and(NextStep::Append), NextStep::FinishAndAppend);
}
}

134
miner/src/cpu_miner.rs Normal file
View File

@ -0,0 +1,134 @@
use byteorder::{WriteBytesExt, LittleEndian};
use primitives::bytes::Bytes;
use primitives::hash::H256;
use primitives::uint::U256;
use chain::{merkle_root, Transaction};
use crypto::dhash256;
use ser::Stream;
use block_assembler::BlockTemplate;
use verification::is_valid_proof_of_work_hash;
/// Instead of serializing `BlockHeader` from scratch over and over again,
/// let's keep it serialized in memory and replace needed bytes
struct BlockHeaderBytes {
data: Bytes,
}
impl BlockHeaderBytes {
/// Creates new instance of block header bytes.
fn new(version: u32, previous_header_hash: H256, nbits: u32) -> Self {
let merkle_root_hash = H256::default();
let time = 0u32;
let nonce = 0u32;
let mut stream = Stream::default();
stream
.append(&version)
.append(&previous_header_hash)
.append(&merkle_root_hash)
.append(&time)
.append(&nbits)
.append(&nonce);
BlockHeaderBytes {
data: stream.out(),
}
}
/// Set merkle root hash
fn set_merkle_root_hash(&mut self, hash: &H256) {
let mut merkle_bytes: &mut [u8] = &mut self.data[4 + 32..4 + 32 + 32];
merkle_bytes.copy_from_slice(&**hash);
}
/// Set block header time
fn set_time(&mut self, time: u32) {
let mut time_bytes: &mut [u8] = &mut self.data[4 + 32 + 32..];
time_bytes.write_u32::<LittleEndian>(time).unwrap();
}
/// Set block header nonce
fn set_nonce(&mut self, nonce: u32) {
let mut nonce_bytes: &mut [u8] = &mut self.data[4 + 32 + 32 + 4 + 4..];
nonce_bytes.write_u32::<LittleEndian>(nonce).unwrap();
}
/// Returns block header hash
fn hash(&self) -> H256 {
dhash256(&self.data)
}
}
/// This trait should be implemented by coinbase transaction.
pub trait CoinbaseTransaction {
/// Protocols like stratum limit number of extranonce bytes.
/// This function informs miner about maximum size of extra nonce.
fn max_extranonce(&self) -> U256;
/// Should be used to increase number of hash possibities for miner
fn set_extranonce(&mut self, extranocne: &U256);
/// Returns transaction hash
fn hash(&self) -> H256;
/// Coverts transaction into raw bytes
fn drain(self) -> Transaction;
}
/// Cpu miner solution.
pub struct Solution {
/// Block header nonce.
pub nonce: u32,
/// Coinbase transaction extra nonce (modyfiable by miner).
pub extranonce: U256,
/// Block header time.
pub time: u32,
/// Coinbase transaction (extranonce is already set).
pub coinbase_transaction: Transaction,
}
/// Simple bitcoin cpu miner.
/// First it tries to find solution by changing block header nonce.
/// Once all nonce values have been tried, it increases extranonce.
/// Once all of them have been tried (quite unlikely on cpu ;),
/// and solution still hasn't been found it returns None.
/// It's possible to also experiment with time, but I find it pointless
/// to implement on CPU.
pub fn find_solution<T>(block: BlockTemplate, mut coinbase_transaction: T) -> Option<Solution> where T: CoinbaseTransaction {
let max_extranonce = coinbase_transaction.max_extranonce();
let mut extranonce = U256::default();
let mut header_bytes = BlockHeaderBytes::new(block.version, block.previous_header_hash, block.nbits);
// update header with time
header_bytes.set_time(block.time);
while extranonce < max_extranonce {
// update coinbase transaction with new extranonce
coinbase_transaction.set_extranonce(&extranonce);
// recalculate merkle root hash
let mut merkle_tree = vec![coinbase_transaction.hash()];
merkle_tree.extend(block.transactions.iter().map(|tx| tx.hash.clone()));
let merkle_root_hash = merkle_root(&merkle_tree);
// update header with new merkle root hash
header_bytes.set_merkle_root_hash(&merkle_root_hash);
for nonce in 0..(u32::max_value() as u64 + 1) {
// update §
header_bytes.set_nonce(nonce as u32);
let hash = header_bytes.hash();
if is_valid_proof_of_work_hash(block.nbits.into(), &hash) {
let solution = Solution {
nonce: nonce as u32,
extranonce: extranonce,
time: block.time,
coinbase_transaction: coinbase_transaction.drain(),
};
return Some(solution);
}
}
extranonce = extranonce + 1.into();
}
None
}

View File

@ -1,20 +1,18 @@
use chain::Transaction;
use ser::Serializable;
use db::TransactionProvider;
pub fn transaction_fee(store: &TransactionProvider, transaction: &Transaction) -> u64 {
let inputs_sum = transaction.inputs.iter()
.fold(0, |accumulator, input| {
let input_transaction = store.transaction(&input.previous_output.hash)
.expect("transaction must be verified by caller");
accumulator + input_transaction.outputs[input.previous_output.index as usize].value
});
let inputs_sum = transaction.inputs.iter().map(|input| {
let input_transaction = store.transaction(&input.previous_output.hash)
.expect("transaction must be verified by caller");
input_transaction.outputs[input.previous_output.index as usize].value
}).sum::<u64>();
let outputs_sum = transaction.outputs.iter().map(|output| output.value).sum();
inputs_sum.saturating_sub(outputs_sum)
}
pub fn transaction_fee_rate(store: &TransactionProvider, transaction: &Transaction) -> u64 {
use ser::Serializable;
transaction_fee(store, transaction) / transaction.serialized_size() as u64
}

View File

@ -1,12 +1,21 @@
extern crate byteorder;
extern crate heapsize;
extern crate bitcrypto as crypto;
extern crate chain;
extern crate db;
extern crate heapsize;
extern crate network;
extern crate primitives;
extern crate serialization as ser;
extern crate verification;
extern crate test_data;
mod block_assembler;
mod cpu_miner;
mod fee;
mod memory_pool;
pub use fee::{transaction_fee, transaction_fee_rate};
pub use block_assembler::BlockAssembler;
pub use cpu_miner::find_solution;
pub use memory_pool::{MemoryPool, Information as MemoryPoolInformation, OrderingStrategy as MemoryPoolOrderingStrategy};
pub use fee::{transaction_fee, transaction_fee_rate};

View File

@ -50,25 +50,25 @@ pub struct MemoryPool {
#[derive(Debug)]
pub struct Entry {
/// Transaction
transaction: Transaction,
pub transaction: Transaction,
/// In-pool ancestors hashes for this transaction
ancestors: HashSet<H256>,
pub ancestors: HashSet<H256>,
/// Transaction hash (stored for effeciency)
hash: H256,
pub hash: H256,
/// Transaction size (stored for effeciency)
size: usize,
pub size: usize,
/// Throughout index of this transaction in memory pool (non persistent)
storage_index: u64,
pub storage_index: u64,
/// Transaction fee (stored for efficiency)
miner_fee: i64,
pub miner_fee: i64,
/// Virtual transaction fee (a way to prioritize/penalize transaction)
miner_virtual_fee: i64,
pub miner_virtual_fee: i64,
/// size + Sum(size) for all in-pool descendants
package_size: usize,
pub package_size: usize,
/// miner_fee + Sum(miner_fee) for all in-pool descendants
package_miner_fee: i64,
pub package_miner_fee: i64,
/// miner_virtual_fee + Sum(miner_virtual_fee) for all in-pool descendants
package_miner_virtual_fee: i64,
pub package_miner_virtual_fee: i64,
}
/// Multi-index transactions storage
@ -624,7 +624,7 @@ impl MemoryPool {
/// Ancestors are always returned before descendant transactions.
/// Use this function with care, only if really needed (heavy memory usage)
pub fn read_n_with_strategy(&mut self, n: usize, strategy: OrderingStrategy) -> Vec<H256> {
self.iter(strategy).take(n).collect()
self.iter(strategy).map(|entry| entry.hash.clone()).take(n).collect()
}
/// Removes the 'top' transaction from the `MemoryPool` using selected strategy.
@ -772,7 +772,7 @@ impl<'a> MemoryPoolIterator<'a> {
}
impl<'a> Iterator for MemoryPoolIterator<'a> {
type Item = H256;
type Item = &'a Entry;
fn next(&mut self) -> Option<Self::Item> {
let top_hash = match self.strategy {
@ -781,13 +781,12 @@ impl<'a> Iterator for MemoryPoolIterator<'a> {
OrderingStrategy::ByPackageScore => self.references.ordered.by_package_score.iter().map(|entry| entry.hash.clone()).nth(0),
};
if let Some(ref top_hash) = top_hash {
let entry = self.memory_pool.storage.by_hash.get(top_hash).expect("missing hash is a sign of MemoryPool internal inconsistancy");
top_hash.map(|top_hash| {
let entry = self.memory_pool.storage.by_hash.get(&top_hash).expect("missing hash is a sign of MemoryPool internal inconsistancy");
self.removed.insert(top_hash.clone());
self.references.remove(Some(&self.removed), &self.memory_pool.storage.by_hash, entry);
}
top_hash
entry
})
}
}

View File

@ -5,7 +5,7 @@ extern crate serialization as ser;
mod consensus;
mod magic;
pub use primitives::hash;
pub use primitives::{hash, compact};
pub use consensus::ConsensusParams;
pub use magic::Magic;

View File

@ -1,6 +1,7 @@
//! Bitcoin network magic number
//! https://www.anintegratedworld.com/unravelling-the-mysterious-block-chain-magic-number/
use compact::Compact;
use ser::{Stream, Serializable};
use chain::Block;
use super::ConsensusParams;
@ -9,9 +10,9 @@ const MAGIC_MAINNET: u32 = 0xD9B4BEF9;
const MAGIC_TESTNET: u32 = 0x0709110B;
const MAGIC_REGTEST: u32 = 0xDAB5BFFA;
const MAX_NBITS_MAINNET: u32 = 0x1d00ffff;
const MAX_NBITS_TESTNET: u32 = 0x1d00ffff;
const MAX_NBITS_REGTEST: u32 = 0x207fffff;
const MAX_BITS_MAINNET: u32 = 0x1d00ffff;
const MAX_BITS_TESTNET: u32 = 0x1d00ffff;
const MAX_BITS_REGTEST: u32 = 0x207fffff;
/// Bitcoin network
/// https://bitcoin.org/en/glossary/mainnet
@ -50,12 +51,12 @@ impl From<u32> for Magic {
}
impl Magic {
pub fn max_nbits(&self) -> u32 {
pub fn max_bits(&self) -> Compact {
match *self {
Magic::Mainnet | Magic::Other(_) => MAX_NBITS_MAINNET,
Magic::Testnet => MAX_NBITS_TESTNET,
Magic::Regtest => MAX_NBITS_REGTEST,
}
Magic::Mainnet | Magic::Other(_) => MAX_BITS_MAINNET,
Magic::Testnet => MAX_BITS_TESTNET,
Magic::Regtest => MAX_BITS_REGTEST,
}.into()
}
pub fn port(&self) -> u16 {
@ -97,7 +98,7 @@ impl Serializable for Magic {
mod tests {
use super::{
Magic, MAGIC_MAINNET, MAGIC_TESTNET, MAGIC_REGTEST,
MAX_NBITS_MAINNET, MAX_NBITS_TESTNET, MAX_NBITS_REGTEST,
MAX_BITS_MAINNET, MAX_BITS_TESTNET, MAX_BITS_REGTEST,
};
#[test]
@ -112,10 +113,10 @@ mod tests {
}
#[test]
fn test_network_max_nbits() {
assert_eq!(Magic::Mainnet.max_nbits(), MAX_NBITS_MAINNET);
assert_eq!(Magic::Testnet.max_nbits(), MAX_NBITS_TESTNET);
assert_eq!(Magic::Regtest.max_nbits(), MAX_NBITS_REGTEST);
fn test_network_max_bits() {
assert_eq!(Magic::Mainnet.max_bits(), MAX_BITS_MAINNET.into());
assert_eq!(Magic::Testnet.max_bits(), MAX_BITS_TESTNET.into());
assert_eq!(Magic::Regtest.max_bits(), MAX_BITS_REGTEST.into());
}
#[test]

View File

@ -15,13 +15,26 @@ impl From<Compact> for u32 {
}
}
impl From<U256> for Compact {
fn from(u: U256) -> Self {
Compact::from_u256(u)
}
}
impl From<Compact> for U256 {
fn from(c: Compact) -> Self {
// ignore overflows and negative values
c.to_u256().unwrap_or_else(|x| x)
}
}
impl Compact {
pub fn new(u: u32) -> Self {
Compact(u)
}
/// Computes the target [0, T] that a blockhash must land in to be valid
/// Returns None, if there is an overflow or its negative value
/// Returns value in error, if there is an overflow or its negative value
pub fn to_u256(&self) -> Result<U256, U256> {
let size = self.0 >> 24;
let mut word = self.0 & 0x007fffff;

View File

@ -4,6 +4,7 @@
extern crate rustc_serialize;
pub mod bytes;
pub mod compact;
pub mod hash;
pub mod uint;

View File

@ -1,6 +1,7 @@
use std::io;
use bytes::Bytes;
use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian};
use bytes::Bytes;
use compact::Compact;
use hash::{H32, H48, H96, H160, H256, H264, H512, H520};
use compact_integer::CompactInteger;
use {Serializable, Stream, Deserializable, Reader, Error};
@ -219,6 +220,18 @@ impl Deserializable for Bytes {
}
}
impl Serializable for Compact {
fn serialize(&self, stream: &mut Stream) {
stream.append(&u32::from(*self));
}
}
impl Deserializable for Compact {
fn deserialize<T>(reader: &mut Reader<T>) -> Result<Self, Error> where T: io::Read {
reader.read::<u32>().map(Compact::new)
}
}
#[cfg(test)]
mod tests {
use bytes::Bytes;

View File

@ -6,7 +6,7 @@ mod impls;
pub mod reader;
pub mod stream;
pub use primitives::{hash, bytes};
pub use primitives::{hash, bytes, compact};
pub use compact_integer::CompactInteger;
pub use self::reader::{Reader, Deserializable, deserialize, deserialize_iterator, ReadIterator, Error};

View File

@ -4,6 +4,7 @@ use super::genesis;
use chain;
use primitives::hash::H256;
use primitives::bytes::Bytes;
use primitives::compact::Compact;
use invoke::{Invoke, Identity};
use std::cell::Cell;
@ -178,7 +179,7 @@ pub struct BlockHeaderBuilder<F=Identity> {
time: u32,
parent: H256,
nonce: u32,
nbits: u32,
bits: Compact,
version: u32,
merkle_root: H256,
}
@ -189,9 +190,9 @@ impl<F> BlockHeaderBuilder<F> where F: Invoke<chain::BlockHeader> {
callback: callback,
time: TIMESTAMP_COUNTER.with(|counter| { let val = counter.get(); counter.set(val+1); val }),
nonce: 0,
merkle_root: H256::from(0),
parent: H256::from(0),
nbits: 0,
merkle_root: 0.into(),
parent: 0.into(),
bits: 0.into(),
version: 1,
}
}
@ -211,8 +212,8 @@ impl<F> BlockHeaderBuilder<F> where F: Invoke<chain::BlockHeader> {
self
}
pub fn nbits(mut self, nbits: u32) -> Self {
self.nbits = nbits;
pub fn bits(mut self, bits: Compact) -> Self {
self.bits = bits;
self
}
@ -226,7 +227,7 @@ impl<F> BlockHeaderBuilder<F> where F: Invoke<chain::BlockHeader> {
chain::BlockHeader {
time: self.time,
previous_header_hash: self.parent,
nbits: self.nbits,
bits: self.bits,
nonce: self.nonce,
merkle_root_hash: self.merkle_root,
version: self.version,

View File

@ -6,7 +6,6 @@ authors = ["Nikolay Volf <nikvolf@gmail.com>"]
[dependencies]
byteorder = "0.5"
parking_lot = "0.3"
linked-hash-map = "0.3"
time = "0.1"
log = "0.3"
scoped-pool = "1.0"

View File

@ -1,22 +1,36 @@
//! Bitcoin chain verifier
use std::collections::BTreeSet;
use db::{self, BlockLocation, PreviousTransactionOutputProvider, BlockHeaderProvider, BlockRef};
use network::{Magic, ConsensusParams};
use script::Script;
use super::{Verify, VerificationResult, Chain, Error, TransactionError};
use {chain, utils};
use scoped_pool::Pool;
use primitives::hash::H256;
use hash::H256;
use db::{self, BlockLocation, PreviousTransactionOutputProvider, BlockHeaderProvider};
use network::{Magic, ConsensusParams};
use error::{Error, TransactionError};
use sigops::{StoreWithUnretainedOutputs, transaction_sigops};
use {Verify, chain, utils};
const BLOCK_MAX_FUTURE: i64 = 2 * 60 * 60; // 2 hours
const COINBASE_MATURITY: u32 = 100; // 2 hours
const MAX_BLOCK_SIGOPS: usize = 20000;
const MAX_BLOCK_SIZE: usize = 1000000;
pub const MAX_BLOCK_SIZE: usize = 1_000_000;
pub const MAX_BLOCK_SIGOPS: usize = 20_000;
const TRANSACTIONS_VERIFY_THREADS: usize = 4;
const TRANSACTIONS_VERIFY_PARALLEL_THRESHOLD: usize = 16;
#[derive(PartialEq, Debug)]
/// Block verification chain
pub enum Chain {
/// Main chain
Main,
/// Side chain
Side,
/// Orphan (no known parent)
Orphan,
}
/// Verification result
pub type VerificationResult = Result<Chain, Error>;
pub struct ChainVerifier {
store: db::SharedStore,
skip_pow: bool,
@ -58,49 +72,14 @@ impl ChainVerifier {
height >= self.consensus_params.bip65_height
}
/// Returns previous transaction output.
/// NOTE: This function expects all previous blocks to be already in database.
fn previous_transaction_output<T>(&self, prevout_provider: &T, prevout: &chain::OutPoint) -> Option<chain::TransactionOutput>
where T: PreviousTransactionOutputProvider {
self.store.transaction(&prevout.hash)
.and_then(|tx| tx.outputs.into_iter().nth(prevout.index as usize))
.or_else(|| prevout_provider.previous_transaction_output(prevout))
}
/// Returns number of transaction signature operations.
/// NOTE: This function expects all previous blocks to be already in database.
fn transaction_sigops(&self, block: &db::IndexedBlock, transaction: &chain::Transaction, bip16_active: bool) -> usize {
let output_sigops: usize = transaction.outputs.iter().map(|output| {
let output_script: Script = output.script_pubkey.clone().into();
output_script.sigops_count(false)
}).sum();
if transaction.is_coinbase() {
return output_sigops;
}
let input_sigops: usize = transaction.inputs.iter().map(|input| {
let input_script: Script = input.script_sig.clone().into();
let mut sigops = input_script.sigops_count(false);
if bip16_active {
let previous_output = self.previous_transaction_output(block, &input.previous_output)
.expect("missing tx, out of order verification or malformed db");
let prevout_script: Script = previous_output.script_pubkey.into();
sigops += input_script.pay_to_script_hash_sigops(&prevout_script);
}
sigops
}).sum();
input_sigops + output_sigops
}
/// Returns number of block signature operations.
/// NOTE: This function expects all previous blocks to be already in database.
fn block_sigops(&self, block: &db::IndexedBlock) -> usize {
// strict pay-to-script-hash signature operations count toward block
// signature operations limit is enforced with BIP16
let store = StoreWithUnretainedOutputs::new(&self.store, block);
let bip16_active = self.verify_p2sh(block.header().time);
block.transactions().map(|(_, tx)| self.transaction_sigops(block, tx, bip16_active)).sum()
block.transactions().map(|(_, tx)| transaction_sigops(tx, &store, bip16_active)).sum()
}
fn ordered_verify(&self, block: &db::IndexedBlock, at_height: u32) -> Result<(), Error> {
@ -116,10 +95,18 @@ impl ChainVerifier {
let block_hash = block.hash();
// check that difficulty matches the adjusted level
if let Some(work) = self.work_required(block, at_height) {
if !self.skip_pow && work != block.header().nbits {
//if let Some(work) = self.work_required(block, at_height) {
if at_height != 0 && !self.skip_pow {
let work = utils::work_required(
block.header().previous_header_hash.clone(),
block.header().time,
at_height,
self.store.as_block_header_provider(),
self.network
);
if !self.skip_pow && work != block.header().bits {
trace!(target: "verification", "pow verification error at height: {}", at_height);
trace!(target: "verification", "expected work: {}, got {}", work, block.header().nbits);
trace!(target: "verification", "expected work: {:?}, got {:?}", work, block.header().bits);
return Err(Error::Difficulty);
}
}
@ -139,6 +126,7 @@ impl ChainVerifier {
}
}
let unretained_store = StoreWithUnretainedOutputs::new(&self.store, block);
let mut total_unspent = 0u64;
for (tx_index, (_, tx)) in block.transactions().enumerate().skip(1) {
let mut total_claimed: u64 = 0;
@ -153,7 +141,7 @@ impl ChainVerifier {
}
}
let previous_output = self.previous_transaction_output(block, &input.previous_output)
let previous_output = unretained_store.previous_transaction_output(&input.previous_output)
.expect("missing tx, out of order verification or malformed db");
total_claimed += previous_output.value;
@ -201,10 +189,11 @@ impl ChainVerifier {
// must not be coinbase (sequence = 0 is returned above)
if transaction.is_coinbase() { return Err(TransactionError::MisplacedCoinbase(sequence)); }
let unretained_store = StoreWithUnretainedOutputs::new(&self.store, prevout_provider);
for (input_index, input) in transaction.inputs().iter().enumerate() {
// signature verification
let signer: TransactionInputSigner = transaction.clone().into();
let paired_output = match self.previous_transaction_output(prevout_provider, &input.previous_output) {
let paired_output = match unretained_store.previous_transaction_output(&input.previous_output) {
Some(output) => output,
_ => return Err(TransactionError::UnknownReference(input.previous_output.hash.clone()))
};
@ -246,7 +235,7 @@ impl ChainVerifier {
header: &chain::BlockHeader
) -> Result<(), Error> {
// target difficulty threshold
if !self.skip_pow && !utils::check_nbits(self.network.max_nbits(), hash, header.nbits) {
if !self.skip_pow && !utils::is_valid_proof_of_work(self.network.max_bits(), header.bits, hash) {
return Err(Error::Pow);
}
@ -375,63 +364,6 @@ impl ChainVerifier {
}
else { None }
}
fn work_required_testnet(&self, header: &chain::BlockHeader, height: u32) -> u32 {
let mut bits = Vec::new();
let mut block_ref: BlockRef = header.previous_header_hash.clone().into();
let parent_header = self.store.block_header(block_ref.clone()).expect("can be called only during ordered verification");
let max_time_gap = parent_header.time + utils::DOUBLE_SPACING_SECONDS;
if header.time > max_time_gap {
return self.network.max_nbits();
}
// TODO: optimize it, so it does not make 2016!!! redundant queries each time
for _ in 0..utils::RETARGETING_INTERVAL {
let previous_header = match self.store.block_header(block_ref) {
Some(h) => h,
None => { break; }
};
bits.push(previous_header.nbits);
block_ref = previous_header.previous_header_hash.into();
}
for (index, bit) in bits.into_iter().enumerate() {
if bit != self.network.max_nbits() || utils::is_retarget_height(height - index as u32 - 1) {
return bit;
}
}
self.network.max_nbits()
}
fn work_required(&self, block: &db::IndexedBlock, height: u32) -> Option<u32> {
if height == 0 {
return None;
}
let previous_ref = block.header().previous_header_hash.clone().into();
let previous_header = self.store.block_header(previous_ref).expect("self.height != 0; qed");
if utils::is_retarget_height(height) {
let retarget_ref = (height - utils::RETARGETING_INTERVAL).into();
let retarget_header = self.store.block_header(retarget_ref).expect("self.height != 0 && self.height % RETARGETING_INTERVAL == 0; qed");
// timestamp of block(height - RETARGETING_INTERVAL)
let retarget_timestamp = retarget_header.time;
// timestamp of parent block
let last_timestamp = previous_header.time;
// nbits of last block
let last_nbits = previous_header.nbits;
return Some(utils::work_required_retarget(self.network.max_nbits(), retarget_timestamp, last_timestamp, last_nbits));
}
if let Magic::Testnet = self.network {
return Some(self.work_required_testnet(block.header(), height));
}
Some(previous_header.nbits)
}
}
impl Verify for ChainVerifier {

63
verification/src/error.rs Normal file
View File

@ -0,0 +1,63 @@
use primitives::hash::H256;
#[derive(Debug, PartialEq)]
/// All possible verification errors
pub enum Error {
/// has an equal duplicate in the chain
Duplicate,
/// No transactions in block
Empty,
/// Invalid proof-of-work (Block hash does not satisfy nBits)
Pow,
/// Futuristic timestamp
FuturisticTimestamp,
/// Invalid timestamp
Timestamp,
/// First transaction is not a coinbase transaction
Coinbase,
/// One of the transactions is invalid (corresponding index and specific transaction error)
Transaction(usize, TransactionError),
/// nBits do not match difficulty rules
Difficulty,
/// Invalid merkle root
MerkleRoot,
/// Coinbase spends too much
CoinbaseOverspend { expected_max: u64, actual: u64 },
/// Maximum sigops operations exceeded - will not provide how much it was in total
/// since it stops counting once `MAX_BLOCK_SIGOPS` is reached
MaximumSigops,
/// Coinbase signature is not in the range 2-100
CoinbaseSignatureLength(usize),
/// Block size is invalid
Size(usize),
/// Block transactions are not final.
NonFinalBlock,
}
#[derive(Debug, PartialEq)]
/// Possible transactions verification errors
pub enum TransactionError {
/// Not found corresponding output for transaction input
Input(usize),
/// Referenced coinbase output for the transaction input is not mature enough
Maturity,
/// Signature invalid for given input
Signature(usize),
/// Unknown previous transaction referenced
UnknownReference(H256),
/// Spends more than claims
Overspend,
/// Signature script can't be properly parsed
SignatureMallformed(String),
/// Too many signature operations
Sigops(usize),
/// Too many signature operations once p2sh operations included
SigopsP2SH(usize),
/// Coinbase transaction is found at position that is not 0
MisplacedCoinbase(usize),
/// Not fully spent transaction with the same hash already exists, bip30.
UnspentTransactionWithTheSameHash,
/// Using output that is surely spent
UsingSpentOutput(H256, u32),
}

View File

@ -2,10 +2,10 @@
extern crate byteorder;
extern crate parking_lot;
extern crate linked_hash_map;
extern crate time;
#[macro_use]
extern crate log;
extern crate scoped_pool;
extern crate db;
extern crate chain;
@ -13,7 +13,6 @@ extern crate network;
extern crate primitives;
extern crate serialization;
extern crate script;
extern crate scoped_pool;
#[cfg(test)]
extern crate ethcore_devtools as devtools;
@ -21,100 +20,17 @@ extern crate ethcore_devtools as devtools;
extern crate test_data;
mod chain_verifier;
mod compact;
mod utils;
mod error;
mod sigops;
mod task;
mod utils;
pub use primitives::{uint, hash};
pub use primitives::{uint, hash, compact};
pub use chain_verifier::ChainVerifier;
use primitives::hash::H256;
#[derive(Debug, PartialEq)]
/// All possible verification errors
pub enum Error {
/// has an equal duplicate in the chain
Duplicate,
/// No transactions in block
Empty,
/// Invalid proof-of-work (Block hash does not satisfy nBits)
Pow,
/// Futuristic timestamp
FuturisticTimestamp,
/// Invalid timestamp
Timestamp,
/// First transaction is not a coinbase transaction
Coinbase,
/// One of the transactions is invalid (corresponding index and specific transaction error)
Transaction(usize, TransactionError),
/// nBits do not match difficulty rules
Difficulty,
/// Invalid merkle root
MerkleRoot,
/// Coinbase spends too much
CoinbaseOverspend { expected_max: u64, actual: u64 },
/// Maximum sigops operations exceeded - will not provide how much it was in total
/// since it stops counting once `MAX_BLOCK_SIGOPS` is reached
MaximumSigops,
/// Coinbase signature is not in the range 2-100
CoinbaseSignatureLength(usize),
/// Block size is invalid
Size(usize),
/// Block transactions are not final.
NonFinalBlock,
}
#[derive(Debug, PartialEq)]
/// Possible transactions verification errors
pub enum TransactionError {
/// Not found corresponding output for transaction input
Input(usize),
/// Referenced coinbase output for the transaction input is not mature enough
Maturity,
/// Signature invalid for given input
Signature(usize),
/// Unknown previous transaction referenced
UnknownReference(H256),
/// Spends more than claims
Overspend,
/// Signature script can't be properly parsed
SignatureMallformed(String),
/// Too many signature operations
Sigops(usize),
/// Too many signature operations once p2sh operations included
SigopsP2SH(usize),
/// Coinbase transaction is found at position that is not 0
MisplacedCoinbase(usize),
/// Not fully spent transaction with the same hash already exists, bip30.
UnspentTransactionWithTheSameHash,
/// Using output that is surely spent
UsingSpentOutput(H256, u32),
}
#[derive(PartialEq, Debug)]
/// Block verification chain
pub enum Chain {
/// Main chain
Main,
/// Side chain
Side,
/// Orphan (no known parent)
Orphan,
}
#[derive(PartialEq, Debug)]
/// block status within the queue
pub enum BlockStatus {
Valid,
Invalid,
Pending,
Absent,
Verifying,
}
/// Verification result
pub type VerificationResult = Result<Chain, Error>;
pub use chain_verifier::{Chain, ChainVerifier, VerificationResult, MAX_BLOCK_SIZE, MAX_BLOCK_SIGOPS};
pub use error::{Error, TransactionError};
pub use sigops::{transaction_sigops, StoreWithUnretainedOutputs};
pub use utils::{work_required, is_valid_proof_of_work, is_valid_proof_of_work_hash, block_reward_satoshi};
/// Interface for block verification
pub trait Verify : Send + Sync {

View File

@ -0,0 +1,58 @@
use chain::{Transaction, TransactionOutput, OutPoint};
use db::{PreviousTransactionOutputProvider, SharedStore};
use script::Script;
pub struct StoreWithUnretainedOutputs<'a, T> where T: 'a {
store: &'a SharedStore,
outputs: &'a T,
}
impl<'a, T> StoreWithUnretainedOutputs<'a, T> where T: PreviousTransactionOutputProvider {
pub fn new(store: &'a SharedStore, outputs: &'a T) -> Self {
StoreWithUnretainedOutputs {
store: store,
outputs: outputs,
}
}
}
impl<'a, T> PreviousTransactionOutputProvider for StoreWithUnretainedOutputs<'a, T> where T: PreviousTransactionOutputProvider {
fn previous_transaction_output(&self, prevout: &OutPoint) -> Option<TransactionOutput> {
self.store.transaction(&prevout.hash)
.and_then(|tx| tx.outputs.into_iter().nth(prevout.index as usize))
.or_else(|| self.outputs.previous_transaction_output(prevout))
}
fn is_spent(&self, _prevout: &OutPoint) -> bool {
unimplemented!();
}
}
pub fn transaction_sigops(
transaction: &Transaction,
store: &PreviousTransactionOutputProvider,
bip16_active: bool
) -> usize {
let output_sigops: usize = transaction.outputs.iter().map(|output| {
let output_script: Script = output.script_pubkey.clone().into();
output_script.sigops_count(false)
}).sum();
if transaction.is_coinbase() {
return output_sigops;
}
let input_sigops: usize = transaction.inputs.iter().map(|input| {
let input_script: Script = input.script_sig.clone().into();
let mut sigops = input_script.sigops_count(false);
if bip16_active {
let previous_output = store.previous_transaction_output(&input.previous_output)
.expect("missing tx, out of order verification or malformed db");
let prevout_script: Script = previous_output.script_pubkey.into();
sigops += input_script.pay_to_script_hash_sigops(&prevout_script);
}
sigops
}).sum();
input_sigops + output_sigops
}

View File

@ -1,15 +1,13 @@
#![allow(dead_code)]
//! Verification utilities
use std::cmp;
use hash::H256;
use uint::U256;
use byteorder::{BigEndian, ByteOrder};
use compact::Compact;
use primitives::compact::Compact;
use primitives::hash::H256;
use primitives::uint::U256;
use network::Magic;
use db::{BlockHeaderProvider, BlockRef};
// Timespan constants
const RETARGETING_FACTOR: u32 = 4;
const TARGET_SPACING_SECONDS: u32 = 10 * 60;
pub const DOUBLE_SPACING_SECONDS: u32 = 2 * TARGET_SPACING_SECONDS;
const DOUBLE_SPACING_SECONDS: u32 = 2 * TARGET_SPACING_SECONDS;
const TARGET_TIMESPAN_SECONDS: u32 = 2 * 7 * 24 * 60 * 60;
// The upper and lower bounds for retargeting timespan
@ -23,83 +21,117 @@ pub fn is_retarget_height(height: u32) -> bool {
height % RETARGETING_INTERVAL == 0
}
fn retarget_timespan(retarget_timestamp: u32, last_timestamp: u32) -> u32 {
// According to libbitcoin we need to
// Subtract 32 bit numbers in 64 bit space and constrain result to 32 bits.
let timespan = last_timestamp as u64 - retarget_timestamp as u64;
range_constrain(timespan, MIN_TIMESPAN as u64, MAX_TIMESPAN as u64) as u32
}
pub fn work_required_retarget(max_nbits: u32, retarget_timestamp: u32, last_timestamp: u32, last_nbits: u32) -> u32 {
// ignore overflows here
let mut retarget = Compact::new(last_nbits).to_u256().unwrap_or_else(|x| x);
let maximum = Compact::new(max_nbits).to_u256().unwrap_or_else(|x| x);
// multiplication overflow potential
retarget = retarget * U256::from(retarget_timespan(retarget_timestamp, last_timestamp));
retarget = retarget / U256::from(TARGET_TIMESPAN_SECONDS);
if retarget > maximum {
Compact::from_u256(maximum).into()
} else {
Compact::from_u256(retarget).into()
}
}
fn range_constrain(value: u64, min: u64, max: u64) -> u64 {
fn range_constrain(value: i64, min: i64, max: i64) -> i64 {
cmp::min(cmp::max(value, min), max)
}
/// Simple nbits check that does not require 256-bit arithmetic
pub fn check_nbits(max_nbits: u32, hash: &H256, n_bits: u32) -> bool {
if n_bits > max_nbits {
return false;
}
/// Returns true if hash is lower or equal than target represented by compact bits
pub fn is_valid_proof_of_work_hash(bits: Compact, hash: &H256) -> bool {
let target = match bits.to_u256() {
Ok(target) => target,
_err => return false,
};
let hash_bytes: &[u8] = &**hash;
let mut nb = [0u8; 4];
BigEndian::write_u32(&mut nb, n_bits);
let shift = match nb[0].checked_sub(3) {
Some(v) => v,
None => return false,
} as usize; // total shift for mantissa
if shift >= 30 { return false; } // invalid shift
let should_be_zero = shift + 3..32;
let should_be_le = shift..shift + 3;
for z_check in should_be_zero {
if hash_bytes[z_check as usize] != 0 { return false; }
}
// making u32 from 3 bytes
let mut order = 0;
let hash_val: u32 = hash_bytes[should_be_le].iter()
.fold(0u32, |s, a| {
let r = s + ((*a as u32) << order);
order += 8;
r
});
// using 3 bytes leftover of nbits
nb[0] = 0;
let threshold = BigEndian::read_u32(&nb);
if hash_val < threshold {
return true;
}
else if hash_val > threshold {
return false;
}
// the case when hash effective bits are equal to nbits
// then the rest of the hash must be zero
hash_bytes[0..shift].iter().all(|&x| x == 0)
let value = U256::from(&*hash.reversed() as &[u8]);
value <= target
}
pub fn age(protocol_time: u32) -> i64 {
::time::get_time().sec - protocol_time as i64
/// Returns true if hash is lower or equal than target and target is lower or equal
/// than current network maximum
pub fn is_valid_proof_of_work(max_work_bits: Compact, bits: Compact, hash: &H256) -> bool {
let maximum = match max_work_bits.to_u256() {
Ok(max) => max,
_err => return false,
};
let target = match bits.to_u256() {
Ok(target) => target,
_err => return false,
};
let value = U256::from(&*hash.reversed() as &[u8]);
target <= maximum && value <= target
}
/// Returns constrained number of seconds since last retarget
pub fn retarget_timespan(retarget_timestamp: u32, last_timestamp: u32) -> u32 {
// subtract unsigned 32 bit numbers in signed 64 bit space in
// order to prevent underflow before applying the range constraint.
let timespan = last_timestamp as i64 - retarget_timestamp as i64;
range_constrain(timespan, MIN_TIMESPAN as i64, MAX_TIMESPAN as i64) as u32
}
/// Returns work required for given header
pub fn work_required(parent_hash: H256, time: u32, height: u32, store: &BlockHeaderProvider, network: Magic) -> Compact {
assert!(height != 0, "cannot calculate required work for genesis block");
let parent_header = store.block_header(parent_hash.clone().into()).expect("self.height != 0; qed");
if is_retarget_height(height) {
let retarget_ref = (height - RETARGETING_INTERVAL).into();
let retarget_header = store.block_header(retarget_ref).expect("self.height != 0 && self.height % RETARGETING_INTERVAL == 0; qed");
// timestamp of block(height - RETARGETING_INTERVAL)
let retarget_timestamp = retarget_header.time;
// timestamp of parent block
let last_timestamp = parent_header.time;
// bits of last block
let last_bits = parent_header.bits;
return work_required_retarget(network.max_bits(), retarget_timestamp, last_timestamp, last_bits);
}
if network == Magic::Testnet {
return work_required_testnet(parent_hash, time, height, store, network)
}
parent_header.bits
}
pub fn work_required_testnet(parent_hash: H256, time: u32, height: u32, store: &BlockHeaderProvider, network: Magic) -> Compact {
assert!(height != 0, "cannot calculate required work for genesis block");
let mut bits = Vec::new();
let mut block_ref: BlockRef = parent_hash.into();
let parent_header = store.block_header(block_ref.clone()).expect("height != 0; qed");
let max_time_gap = parent_header.time + DOUBLE_SPACING_SECONDS;
if time > max_time_gap {
return network.max_bits();
}
// TODO: optimize it, so it does not make 2016!!! redundant queries each time
for _ in 0..RETARGETING_INTERVAL {
let previous_header = match store.block_header(block_ref) {
Some(h) => h,
None => { break; }
};
bits.push(previous_header.bits);
block_ref = previous_header.previous_header_hash.into();
}
for (index, bit) in bits.into_iter().enumerate() {
if bit != network.max_bits() || is_retarget_height(height - index as u32 - 1) {
return bit;
}
}
network.max_bits()
}
/// Algorithm used for retargeting work every 2 weeks
pub fn work_required_retarget(max_work_bits: Compact, retarget_timestamp: u32, last_timestamp: u32, last_bits: Compact) -> Compact {
let mut retarget: U256 = last_bits.into();
let maximum: U256 = max_work_bits.into();
retarget = retarget * retarget_timespan(retarget_timestamp, last_timestamp).into();
retarget = retarget / TARGET_TIMESPAN_SECONDS.into();
if retarget > maximum {
max_work_bits
} else {
retarget.into()
}
}
pub fn block_reward_satoshi(block_height: u32) -> u64 {
@ -108,11 +140,34 @@ pub fn block_reward_satoshi(block_height: u32) -> u64 {
res
}
pub fn age(protocol_time: u32) -> i64 {
::time::get_time().sec - protocol_time as i64
}
#[cfg(test)]
mod tests {
use primitives::hash::H256;
use primitives::compact::Compact;
use network::Magic;
use super::{block_reward_satoshi, check_nbits};
use hash::H256;
use super::{is_valid_proof_of_work_hash, is_valid_proof_of_work, block_reward_satoshi};
fn is_valid_pow(max: Compact, bits: u32, hash: &'static str) -> bool {
is_valid_proof_of_work_hash(bits.into(), &H256::from_reversed_str(hash)) &&
is_valid_proof_of_work(max.into(), bits.into(), &H256::from_reversed_str(hash))
}
#[test]
fn test_is_valid_proof_of_work() {
// block 2
assert!(is_valid_pow(Magic::Mainnet.max_bits(), 486604799u32, "000000006a625f06636b8bb6ac7b960a8d03705d1ace08b1a19da3fdcc99ddbd"));
// block 400_000
assert!(is_valid_pow(Magic::Mainnet.max_bits(), 403093919u32, "000000000000000004ec466ce4732fe6f1ed1cddc2ed4b328fff5224276e3f6f"));
// other random tests
assert!(is_valid_pow(Magic::Regtest.max_bits(), 0x181bc330u32, "00000000000000001bc330000000000000000000000000000000000000000000"));
assert!(!is_valid_pow(Magic::Regtest.max_bits(), 0x181bc330u32, "00000000000000001bc330000000000000000000000000000000000000000001"));
assert!(!is_valid_pow(Magic::Regtest.max_bits(), 0x181bc330u32, "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"));
}
#[test]
fn reward() {
@ -125,34 +180,4 @@ mod tests {
assert_eq!(block_reward_satoshi(630000), 625000000);
assert_eq!(block_reward_satoshi(630001), 625000000);
}
#[test]
fn nbits() {
let max_nbits = Magic::Regtest.max_nbits();
// strictly equal
let hash = H256::from_reversed_str("00000000000000001bc330000000000000000000000000000000000000000000");
let nbits = 0x181bc330u32;
assert!(check_nbits(max_nbits, &hash, nbits));
// nbits match but not equal (greater)
let hash = H256::from_reversed_str("00000000000000001bc330000000000000000000000000000000000000000001");
let nbits = 0x181bc330u32;
assert!(!check_nbits(max_nbits, &hash, nbits));
// greater
let hash = H256::from_reversed_str("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff");
let nbits = 0x181bc330u32;
assert!(!check_nbits(max_nbits, &hash, nbits));
// some real examples
let hash = H256::from_reversed_str("000000000000000001f942eb4bfa0aeccb6a14c268f4c72d5fff17270da771b9");
let nbits = 404129525;
assert!(check_nbits(max_nbits, &hash, nbits));
let hash = H256::from_reversed_str("00000000000000000e753ef636075711efd2cbf5a8473c7c5b67755a3701e0c2");
let nbits = 404129525;
assert!(check_nbits(max_nbits, &hash, nbits));
}
}