fixed a couple of TODOs in block assembler

This commit is contained in:
debris 2016-12-08 20:33:10 +01:00
parent f5b1d95d2d
commit fc41aba63d
5 changed files with 115 additions and 19 deletions

1
Cargo.lock generated
View File

@ -353,6 +353,7 @@ dependencies = [
"chain 0.1.0", "chain 0.1.0",
"db 0.1.0", "db 0.1.0",
"heapsize 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"network 0.1.0",
"primitives 0.1.0", "primitives 0.1.0",
"serialization 0.1.0", "serialization 0.1.0",
"test-data 0.1.0", "test-data 0.1.0",

View File

@ -9,6 +9,7 @@ heapsize = "0.3"
bitcrypto = { path = "../crypto" } bitcrypto = { path = "../crypto" }
chain = { path = "../chain" } chain = { path = "../chain" }
db = { path = "../db" } db = { path = "../db" }
network = { path = "../network" }
primitives = { path = "../primitives" } primitives = { path = "../primitives" }
serialization = { path = "../serialization" } serialization = { path = "../serialization" }
test-data = { path = "../test-data" } test-data = { path = "../test-data" }

View File

@ -1,6 +1,12 @@
use primitives::hash::H256; use primitives::hash::H256;
use db::{SharedStore, IndexedTransaction}; use db::{SharedStore, IndexedTransaction};
use network::Magic;
use memory_pool::{MemoryPool, OrderingStrategy}; use memory_pool::{MemoryPool, OrderingStrategy};
use pow::{work_required, block_reward_satoshi};
const BLOCK_VERSION: u32 = 0x20000000;
const MAX_BLOCK_SIZE: u32 = 1_000_000;
const MAX_BLOCK_SIGOPS: u32 = 20_000;
/// Block template as described in BIP0022 /// Block template as described in BIP0022
/// Minimal version /// Minimal version
@ -19,7 +25,11 @@ pub struct BlockTemplate {
/// Block transactions (excluding coinbase) /// Block transactions (excluding coinbase)
pub transactions: Vec<IndexedTransaction>, pub transactions: Vec<IndexedTransaction>,
/// Total funds available for the coinbase (in Satoshis) /// Total funds available for the coinbase (in Satoshis)
pub coinbase_value: u32, pub coinbase_value: u64,
/// Number of bytes allowed in the block
pub size_limit: u32,
/// Number of sigops allowed in the block
pub sigop_limit: u32,
} }
/// Block size and number of signatures opcodes is limited /// Block size and number of signatures opcodes is limited
@ -105,28 +115,33 @@ impl SizePolicy {
} }
/// Block assembler /// Block assembler
pub struct BlockAssembler; pub struct BlockAssembler {
max_block_size: u32,
max_block_sigops: u32,
}
impl Default for BlockAssembler {
fn default() -> Self {
BlockAssembler {
max_block_size: MAX_BLOCK_SIZE,
max_block_sigops: MAX_BLOCK_SIGOPS,
}
}
}
impl BlockAssembler { impl BlockAssembler {
pub fn create_new_block(store: &SharedStore, mempool: &MemoryPool, time: u32) -> BlockTemplate { pub fn create_new_block(&self, store: &SharedStore, mempool: &MemoryPool, time: u32, network: Magic) -> BlockTemplate {
// get best block // get best block
// take it's hash && height // take it's hash && height
let best_block = store.best_block().expect("Cannot assemble new block without genesis block"); let best_block = store.best_block().expect("Cannot assemble new block without genesis block");
let previous_header_hash = best_block.hash; let previous_header_hash = best_block.hash;
let height = best_block.number + 1; let height = best_block.number + 1;
let nbits = work_required(previous_header_hash.clone(), time, height, store.as_block_header_provider(), network);
let version = BLOCK_VERSION;
// TODO: calculate nbits (retarget may be required) let mut block_size = SizePolicy::new(0, self.max_block_size, 1_000, 50);
let nbits = 0; let mut sigops = SizePolicy::new(0, self.max_block_sigops, 8, 50);
let mut coinbase_value = block_reward_satoshi(height);
// TODO: calculate version
let version = 0;
// TODO: use constants and real values
let mut block_size = SizePolicy::new(0, 1_000_000, 100_000, 50);
// TODO: use constants and real values
let mut sigops = SizePolicy::new(0, 2000, 8, 50);
// TODO: calculate coinbase fee
let mut coinbase_value = 0u32;
let mut transactions = Vec::new(); let mut transactions = Vec::new();
// add priority transactions // add priority transactions
@ -138,10 +153,12 @@ impl BlockAssembler {
version: version, version: version,
previous_header_hash: previous_header_hash, previous_header_hash: previous_header_hash,
time: time, time: time,
nbits: nbits, nbits: nbits.into(),
height: height, height: height,
transactions: transactions, transactions: transactions,
coinbase_value: coinbase_value, coinbase_value: coinbase_value,
size_limit: self.max_block_size,
sigop_limit: self.max_block_sigops,
} }
} }
@ -149,7 +166,7 @@ impl BlockAssembler {
mempool: &MemoryPool, mempool: &MemoryPool,
block_size: &mut SizePolicy, block_size: &mut SizePolicy,
sigops: &mut SizePolicy, sigops: &mut SizePolicy,
coinbase_value: &mut u32, coinbase_value: &mut u64,
transactions: &mut Vec<IndexedTransaction>, transactions: &mut Vec<IndexedTransaction>,
strategy: OrderingStrategy strategy: OrderingStrategy
) { ) {
@ -172,9 +189,9 @@ impl BlockAssembler {
match size_step.and(sigops_step) { match size_step.and(sigops_step) {
NextStep::Append => { NextStep::Append => {
// miner_fee is i64, but we can safely cast it to u32 // miner_fee is i64, but we can safely cast it to u64
// memory pool should restrict miner fee to be positive // memory pool should restrict miner fee to be positive
*coinbase_value += entry.miner_fee as u32; *coinbase_value += entry.miner_fee as u64;
transactions.push(transaction); transactions.push(transaction);
}, },
NextStep::FinishAndAppend => { NextStep::FinishAndAppend => {

View File

@ -4,6 +4,7 @@ extern crate heapsize;
extern crate bitcrypto as crypto; extern crate bitcrypto as crypto;
extern crate chain; extern crate chain;
extern crate db; extern crate db;
extern crate network;
extern crate primitives; extern crate primitives;
extern crate serialization as ser; extern crate serialization as ser;
extern crate test_data; extern crate test_data;
@ -16,3 +17,4 @@ mod pow;
pub use fee::{transaction_fee, transaction_fee_rate}; pub use fee::{transaction_fee, transaction_fee_rate};
pub use memory_pool::{MemoryPool, Information as MemoryPoolInformation, OrderingStrategy as MemoryPoolOrderingStrategy}; pub use memory_pool::{MemoryPool, Information as MemoryPoolInformation, OrderingStrategy as MemoryPoolOrderingStrategy};
pub use pow::{work_required, is_valid_proof_of_work, block_reward_satoshi};

View File

@ -2,14 +2,25 @@ use std::cmp;
use primitives::compact::Compact; use primitives::compact::Compact;
use primitives::hash::H256; use primitives::hash::H256;
use primitives::uint::U256; use primitives::uint::U256;
use network::Magic;
use db::{BlockHeaderProvider, BlockRef};
const RETARGETING_FACTOR: u32 = 4; const RETARGETING_FACTOR: u32 = 4;
const TARGET_SPACING_SECONDS: u32 = 10 * 60;
const DOUBLE_SPACING_SECONDS: u32 = 2 * TARGET_SPACING_SECONDS;
const TARGET_TIMESPAN_SECONDS: u32 = 2 * 7 * 24 * 60 * 60; const TARGET_TIMESPAN_SECONDS: u32 = 2 * 7 * 24 * 60 * 60;
// The upper and lower bounds for retargeting timespan // The upper and lower bounds for retargeting timespan
const MIN_TIMESPAN: u32 = TARGET_TIMESPAN_SECONDS / RETARGETING_FACTOR; const MIN_TIMESPAN: u32 = TARGET_TIMESPAN_SECONDS / RETARGETING_FACTOR;
const MAX_TIMESPAN: u32 = TARGET_TIMESPAN_SECONDS * RETARGETING_FACTOR; const MAX_TIMESPAN: u32 = TARGET_TIMESPAN_SECONDS * RETARGETING_FACTOR;
// Target number of blocks, 2 weaks, 2016
pub const RETARGETING_INTERVAL: u32 = TARGET_TIMESPAN_SECONDS / TARGET_SPACING_SECONDS;
pub fn is_retarget_height(height: u32) -> bool {
height % RETARGETING_INTERVAL == 0
}
fn range_constrain(value: i64, min: i64, max: i64) -> i64 { fn range_constrain(value: i64, min: i64, max: i64) -> i64 {
cmp::min(cmp::max(value, min), max) cmp::min(cmp::max(value, min), max)
} }
@ -50,6 +61,64 @@ pub fn retarget_timespan(retarget_timestamp: u32, last_timestamp: u32) -> u32 {
range_constrain(timespan, MIN_TIMESPAN as i64, MAX_TIMESPAN as i64) as u32 range_constrain(timespan, MIN_TIMESPAN as i64, MAX_TIMESPAN as i64) as u32
} }
/// Returns work required for given header
pub fn work_required(parent_hash: H256, time: u32, height: u32, store: &BlockHeaderProvider, network: Magic) -> Compact {
assert!(height != 0, "cannot calculate required work for genesis block");
let parent_header = store.block_header(parent_hash.clone().into()).expect("self.height != 0; qed");
if is_retarget_height(height) {
let retarget_ref = (height - RETARGETING_INTERVAL).into();
let retarget_header = store.block_header(retarget_ref).expect("self.height != 0 && self.height % RETARGETING_INTERVAL == 0; qed");
// timestamp of block(height - RETARGETING_INTERVAL)
let retarget_timestamp = retarget_header.time;
// timestamp of parent block
let last_timestamp = parent_header.time;
// nbits of last block
let last_nbits = parent_header.nbits;
return work_required_retarget(network.max_nbits().into(), retarget_timestamp, last_timestamp, last_nbits.into());
}
if network == Magic::Testnet {
return work_required_testnet(parent_hash, time, height, store, network)
}
parent_header.nbits.into()
}
pub fn work_required_testnet(parent_hash: H256, time: u32, height: u32, store: &BlockHeaderProvider, network: Magic) -> Compact {
assert!(height != 0, "cannot calculate required work for genesis block");
let mut bits = Vec::new();
let mut block_ref: BlockRef = parent_hash.into();
let parent_header = store.block_header(block_ref.clone()).expect("height != 0; qed");
let max_time_gap = parent_header.time + DOUBLE_SPACING_SECONDS;
if time > max_time_gap {
return network.max_nbits().into();
}
// TODO: optimize it, so it does not make 2016!!! redundant queries each time
for _ in 0..RETARGETING_INTERVAL {
let previous_header = match store.block_header(block_ref) {
Some(h) => h,
None => { break; }
};
bits.push(previous_header.nbits);
block_ref = previous_header.previous_header_hash.into();
}
for (index, bit) in bits.into_iter().enumerate() {
if bit != network.max_nbits() || is_retarget_height(height - index as u32 - 1) {
return bit.into();
}
}
network.max_nbits().into()
}
/// Algorithm used for retargeting work every 2 weeks /// Algorithm used for retargeting work every 2 weeks
pub fn work_required_retarget(max_work_bits: Compact, retarget_timestamp: u32, last_timestamp: u32, last_bits: Compact) -> Compact { pub fn work_required_retarget(max_work_bits: Compact, retarget_timestamp: u32, last_timestamp: u32, last_bits: Compact) -> Compact {
let mut retarget: U256 = last_bits.into(); let mut retarget: U256 = last_bits.into();
@ -65,6 +134,12 @@ pub fn work_required_retarget(max_work_bits: Compact, retarget_timestamp: u32, l
} }
} }
pub fn block_reward_satoshi(block_height: u32) -> u64 {
let mut res = 50 * 100 * 1000 * 1000;
for _ in 0..block_height / 210000 { res /= 2 }
res
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::{is_valid_proof_of_work_hash, is_valid_proof_of_work}; use super::{is_valid_proof_of_work_hash, is_valid_proof_of_work};