commit
c0c83017d1
|
@ -6,6 +6,7 @@ dependencies = [
|
|||
"db 0.1.0",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"network 0.1.0",
|
||||
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"primitives 0.1.0",
|
||||
"rayon 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"script 0.1.0",
|
||||
|
@ -193,6 +194,7 @@ dependencies = [
|
|||
"chain 0.1.0",
|
||||
"elastic-array 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"primitives 0.1.0",
|
||||
"rocksdb 0.4.5 (git+https://github.com/ethcore/rust-rocksdb)",
|
||||
|
@ -448,6 +450,11 @@ name = "linked-hash-map"
|
|||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "linked-hash-map"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.3.7"
|
||||
|
@ -463,6 +470,14 @@ dependencies = [
|
|||
"time 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lru-cache"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"linked-hash-map 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "matches"
|
||||
version = "0.1.4"
|
||||
|
@ -1227,7 +1242,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||
"checksum lazycell 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ce12306c4739d86ee97c23139f3a34ddf0387bbf181bc7929d287025a8c3ef6b"
|
||||
"checksum libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)" = "88ee81885f9f04bff991e306fea7c1c60a5f0f9e409e99f6b40e3311a3363135"
|
||||
"checksum linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6d262045c5b87c0861b3f004610afd0e2c851e2908d08b6c870cbb9d5f494ecd"
|
||||
"checksum linked-hash-map 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7860ec297f7008ff7a1e3382d7f7e1dcd69efc94751a2284bafc3d013c2aa939"
|
||||
"checksum log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "5141eca02775a762cc6cd564d8d2c50f67c0ea3a372cbf1c51592b3e029e10ad"
|
||||
"checksum lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4d06ff7ff06f729ce5f4e227876cb88d10bc59cd4ae1e09fbb2bde15c850dc21"
|
||||
"checksum matches 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "efd7622e3022e1a6eaa602c4cea8912254e5582c9c692e9167714182244801b1"
|
||||
"checksum memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1dbccc0e46f1ea47b9f17e6d67c5a96bd27030519c519c9c91327e31275a47b4"
|
||||
"checksum mime 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "5514f038123342d01ee5f95129e4ef1e0470c93bc29edf058a46f9ee3ba6737e"
|
||||
|
|
|
@ -32,9 +32,10 @@ pub fn fetch(benchmark: &mut Benchmark) {
|
|||
}
|
||||
|
||||
for block in blocks.into_iter() {
|
||||
let block = block.into();
|
||||
store.insert(&block).unwrap();
|
||||
store.canonize(block.hash()).unwrap();
|
||||
let block: IndexedBlock = block.into();
|
||||
let hash = block.hash().clone();
|
||||
store.insert(block).unwrap();
|
||||
store.canonize(&hash).unwrap();
|
||||
}
|
||||
|
||||
// bench
|
||||
|
@ -57,7 +58,7 @@ pub fn write(benchmark: &mut Benchmark) {
|
|||
|
||||
let mut rolling_hash = genesis.hash().clone();
|
||||
|
||||
let mut blocks = Vec::new();
|
||||
let mut blocks: Vec<IndexedBlock> = Vec::new();
|
||||
|
||||
for x in 0..BLOCKS {
|
||||
let next_block = test_data::block_builder()
|
||||
|
@ -73,9 +74,10 @@ pub fn write(benchmark: &mut Benchmark) {
|
|||
|
||||
// bench
|
||||
benchmark.start();
|
||||
for idx in 0..BLOCKS {
|
||||
store.insert(&blocks[idx]).unwrap();
|
||||
store.canonize(blocks[idx].hash()).unwrap();
|
||||
for block in blocks {
|
||||
let hash = block.hash().clone();
|
||||
store.insert(block).unwrap();
|
||||
store.canonize(&hash).unwrap();
|
||||
}
|
||||
benchmark.stop();
|
||||
}
|
||||
|
@ -144,23 +146,24 @@ pub fn reorg_short(benchmark: &mut Benchmark) {
|
|||
for idx in 0..BLOCKS {
|
||||
total += 1;
|
||||
let block: IndexedBlock = blocks[idx].clone().into();
|
||||
let hash = block.hash().clone();
|
||||
|
||||
match store.block_origin(&block.header).unwrap() {
|
||||
BlockOrigin::KnownBlock => {
|
||||
unreachable!();
|
||||
},
|
||||
BlockOrigin::CanonChain { .. } => {
|
||||
store.insert(&block).unwrap();
|
||||
store.canonize(block.hash()).unwrap();
|
||||
store.insert(block).unwrap();
|
||||
store.canonize(&hash).unwrap();
|
||||
},
|
||||
BlockOrigin::SideChain(_origin) => {
|
||||
store.insert(&block).unwrap();
|
||||
store.insert(block).unwrap();
|
||||
},
|
||||
BlockOrigin::SideChainBecomingCanonChain(origin) => {
|
||||
reorgs += 1;
|
||||
let fork = store.fork(origin).unwrap();
|
||||
fork.store().insert(&block).unwrap();
|
||||
fork.store().canonize(block.hash()).unwrap();
|
||||
fork.store().insert(block).unwrap();
|
||||
fork.store().canonize(&hash).unwrap();
|
||||
store.switch_to_fork(fork).unwrap();
|
||||
},
|
||||
}
|
||||
|
@ -223,17 +226,19 @@ pub fn write_heavy(benchmark: &mut Benchmark) {
|
|||
}
|
||||
|
||||
for block in blocks[..BLOCKS_INITIAL].iter() {
|
||||
let block = block.clone().into();
|
||||
store.insert(&block).expect("cannot insert initial block");
|
||||
store.canonize(block.hash()).unwrap();
|
||||
let block: IndexedBlock = block.clone().into();
|
||||
let hash = block.hash().clone();
|
||||
store.insert(block).expect("cannot insert initial block");
|
||||
store.canonize(&hash).unwrap();
|
||||
}
|
||||
|
||||
// bench
|
||||
benchmark.start();
|
||||
for block in blocks[BLOCKS_INITIAL..].iter() {
|
||||
let block = block.clone().into();
|
||||
store.insert(&block).expect("cannot insert bench block");
|
||||
store.canonize(block.hash()).unwrap();
|
||||
let block: IndexedBlock = block.clone().into();
|
||||
let hash = block.hash().clone();
|
||||
store.insert(block).expect("cannot insert bench block");
|
||||
store.canonize(&hash).unwrap();
|
||||
}
|
||||
benchmark.stop();
|
||||
}
|
||||
|
|
|
@ -51,8 +51,9 @@ pub fn main(benchmark: &mut Benchmark) {
|
|||
|
||||
let store = Arc::new(BlockChainDatabase::init_test_chain(vec![genesis.clone().into()]));
|
||||
for block in blocks.iter() {
|
||||
store.insert(block).unwrap();
|
||||
store.canonize(block.hash()).unwrap();
|
||||
let hash = block.hash().clone();
|
||||
store.insert(block.clone()).unwrap();
|
||||
store.canonize(&hash).unwrap();
|
||||
}
|
||||
|
||||
let mut verification_blocks: Vec<IndexedBlock> = Vec::new();
|
||||
|
|
|
@ -66,10 +66,6 @@ impl Block {
|
|||
pub fn hash(&self) -> H256 {
|
||||
self.block_header.hash()
|
||||
}
|
||||
|
||||
pub fn is_final(&self, height: u32) -> bool {
|
||||
self.transactions.iter().all(|t| t.is_final_in_block(height, self.block_header.time))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
|
|
@ -9,6 +9,7 @@ elastic-array = "0.6"
|
|||
parking_lot = "0.4"
|
||||
log = "0.3"
|
||||
bit-vec = "0.4"
|
||||
lru-cache = "0.1"
|
||||
primitives = { path = "../primitives" }
|
||||
serialization = { path = "../serialization" }
|
||||
chain = { path = "../chain" }
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
use chain::BlockHeader;
|
||||
use {BlockRef, BlockHeaderProvider};
|
||||
|
||||
pub struct BlockAncestors<'a> {
|
||||
block: BlockRef,
|
||||
headers: &'a BlockHeaderProvider,
|
||||
}
|
||||
|
||||
impl<'a> BlockAncestors<'a> {
|
||||
pub fn new(block: BlockRef, headers: &'a BlockHeaderProvider) -> Self {
|
||||
BlockAncestors {
|
||||
block: block,
|
||||
headers: headers,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Iterator for BlockAncestors<'a> {
|
||||
type Item = BlockHeader;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let result = self.headers.block_header(self.block.clone());
|
||||
if let Some(ref header) = result {
|
||||
self.block = BlockRef::Hash(header.previous_header_hash.clone());
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
|
@ -13,7 +13,7 @@ pub trait ForkChain {
|
|||
|
||||
pub trait BlockChain {
|
||||
/// Inserts new block into blockchain
|
||||
fn insert(&self, block: &IndexedBlock) -> Result<(), Error>;
|
||||
fn insert(&self, block: IndexedBlock) -> Result<(), Error>;
|
||||
|
||||
/// Canonizes block with given hash
|
||||
fn canonize(&self, block_hash: &H256) -> Result<(), Error>;
|
||||
|
|
|
@ -9,12 +9,15 @@ use chain::{
|
|||
OutPoint, TransactionOutput
|
||||
};
|
||||
use ser::{
|
||||
deserialize, serialize, serialize_list, Serializable, Deserializable,
|
||||
DeserializableList
|
||||
deserialize, serialize, List
|
||||
};
|
||||
use kv::{
|
||||
KeyValueDatabase, OverlayDatabase, Transaction as DBTransaction, Location, Value, DiskDatabase,
|
||||
DatabaseConfig, MemoryDatabase, AutoFlushingOverlayDatabase
|
||||
KeyValueDatabase, OverlayDatabase, Transaction as DBTransaction, Value, DiskDatabase,
|
||||
DatabaseConfig, MemoryDatabase, AutoFlushingOverlayDatabase, KeyValue, Key, KeyState, CacheDatabase
|
||||
};
|
||||
use kv::{
|
||||
COL_COUNT, COL_BLOCK_HASHES, COL_BLOCK_HEADERS, COL_BLOCK_TRANSACTIONS, COL_TRANSACTIONS,
|
||||
COL_TRANSACTIONS_META, COL_BLOCK_NUMBERS
|
||||
};
|
||||
use best_block::BestBlock;
|
||||
use {
|
||||
|
@ -23,15 +26,6 @@ use {
|
|||
SideChainOrigin, ForkChain, Forkable, CanonStore
|
||||
};
|
||||
|
||||
const COL_COUNT: u32 = 10;
|
||||
const COL_META: u32 = 0;
|
||||
const COL_BLOCK_HASHES: u32 = 1;
|
||||
const COL_BLOCK_HEADERS: u32 = 2;
|
||||
const COL_BLOCK_TRANSACTIONS: u32 = 3;
|
||||
const COL_TRANSACTIONS: u32 = 4;
|
||||
const COL_TRANSACTIONS_META: u32 = 5;
|
||||
const COL_BLOCK_NUMBERS: u32 = 6;
|
||||
|
||||
const KEY_VERSION: &'static str = "version";
|
||||
const KEY_BEST_BLOCK_NUMBER: &'static str = "best_block_number";
|
||||
const KEY_BEST_BLOCK_HASH: &'static str = "best_block_hash";
|
||||
|
@ -58,7 +52,7 @@ impl<'a, T> ForkChain for ForkChainDatabase<'a, T> where T: KeyValueDatabase {
|
|||
}
|
||||
}
|
||||
|
||||
impl BlockChainDatabase<AutoFlushingOverlayDatabase<DiskDatabase>> {
|
||||
impl BlockChainDatabase<CacheDatabase<AutoFlushingOverlayDatabase<DiskDatabase>>> {
|
||||
pub fn open_at_path<P>(path: P, total_cache: usize) -> Result<Self, Error> where P: AsRef<Path> {
|
||||
fs::create_dir_all(path.as_ref()).map_err(|err| Error::DatabaseError(err.to_string()))?;
|
||||
let mut cfg = DatabaseConfig::with_columns(Some(COL_COUNT));
|
||||
|
@ -84,17 +78,18 @@ impl BlockChainDatabase<MemoryDatabase> {
|
|||
pub fn init_test_chain(blocks: Vec<IndexedBlock>) -> Self {
|
||||
let store = BlockChainDatabase::open(MemoryDatabase::default());
|
||||
|
||||
for block in &blocks {
|
||||
for block in blocks {
|
||||
let hash = block.hash().clone();
|
||||
store.insert(block).unwrap();
|
||||
store.canonize(block.hash()).unwrap();
|
||||
store.canonize(&hash).unwrap();
|
||||
}
|
||||
store
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> BlockChainDatabase<AutoFlushingOverlayDatabase<T>> where T: KeyValueDatabase {
|
||||
impl<T> BlockChainDatabase<CacheDatabase<AutoFlushingOverlayDatabase<T>>> where T: KeyValueDatabase {
|
||||
pub fn open_with_cache(db: T) -> Self {
|
||||
let db = AutoFlushingOverlayDatabase::new(db, 50);
|
||||
let db = CacheDatabase::new(AutoFlushingOverlayDatabase::new(db, 50));
|
||||
let best_block = Self::read_best_block(&db).unwrap_or_default();
|
||||
BlockChainDatabase {
|
||||
best_block: RwLock::new(best_block),
|
||||
|
@ -105,14 +100,14 @@ impl<T> BlockChainDatabase<AutoFlushingOverlayDatabase<T>> where T: KeyValueData
|
|||
|
||||
impl<T> BlockChainDatabase<T> where T: KeyValueDatabase {
|
||||
fn read_best_block(db: &T) -> Option<BestBlock> {
|
||||
let best_number = db.get(COL_META.into(), &serialize(&KEY_BEST_BLOCK_NUMBER));
|
||||
let best_hash = db.get(COL_META.into(), &serialize(&KEY_BEST_BLOCK_HASH));
|
||||
let best_number = db.get(&Key::Meta(KEY_BEST_BLOCK_NUMBER)).map(KeyState::into_option).map(|x| x.and_then(Value::as_meta));
|
||||
let best_hash = db.get(&Key::Meta(KEY_BEST_BLOCK_HASH)).map(KeyState::into_option).map(|x| x.and_then(Value::as_meta));
|
||||
|
||||
match (best_number, best_hash) {
|
||||
(Ok(None), Ok(None)) => None,
|
||||
(Ok(Some(number)), Ok(Some(hash))) => Some(BestBlock {
|
||||
number: deserialize(&*number).expect("Inconsistent DB. Invalid best block number."),
|
||||
hash: deserialize(&*hash).expect("Inconsistent DB. Invalid best block hash."),
|
||||
number: deserialize(&**number).expect("Inconsistent DB. Invalid best block number."),
|
||||
hash: deserialize(&**hash).expect("Inconsistent DB. Invalid best block hash."),
|
||||
}),
|
||||
_ => panic!("Inconsistent DB"),
|
||||
}
|
||||
|
@ -184,7 +179,6 @@ impl<T> BlockChainDatabase<T> where T: KeyValueDatabase {
|
|||
ancestor: number,
|
||||
canonized_route: sidechain_route.into_iter().rev().collect(),
|
||||
decanonized_route: (number + 1..best_block.number + 1).into_iter()
|
||||
//.map(|decanonized_bn| self.block_hash(decanonized_bn + 1).expect("to find block hashes of canon chain blocks; qed"))
|
||||
.filter_map(|decanonized_bn| self.block_hash(decanonized_bn))
|
||||
.collect(),
|
||||
block_number: block_number,
|
||||
|
@ -207,24 +201,25 @@ impl<T> BlockChainDatabase<T> where T: KeyValueDatabase {
|
|||
Err(Error::AncientFork)
|
||||
}
|
||||
|
||||
pub fn insert(&self, block: &IndexedBlock) -> Result<(), Error> {
|
||||
pub fn insert(&self, block: IndexedBlock) -> Result<(), Error> {
|
||||
if self.contains_block(block.hash().clone().into()) {
|
||||
return Ok(())
|
||||
}
|
||||
|
||||
let parent_hash = &block.header.raw.previous_header_hash;
|
||||
let parent_hash = block.header.raw.previous_header_hash.clone();
|
||||
if !self.contains_block(parent_hash.clone().into()) && !parent_hash.is_zero() {
|
||||
return Err(Error::UnknownParent);
|
||||
}
|
||||
|
||||
let mut update = DBTransaction::new();
|
||||
update.insert(COL_BLOCK_HEADERS.into(), block.hash(), &block.header.raw);
|
||||
for tx in &block.transactions {
|
||||
update.insert(COL_TRANSACTIONS.into(), &tx.hash, &tx.raw);
|
||||
update.insert(KeyValue::BlockHeader(block.hash().clone(), block.header.raw));
|
||||
let tx_hashes = block.transactions.iter().map(|tx| tx.hash.clone()).collect::<Vec<_>>();
|
||||
update.insert(KeyValue::BlockTransactions(block.header.hash.clone(), List::from(tx_hashes)));
|
||||
|
||||
for tx in block.transactions.into_iter() {
|
||||
update.insert(KeyValue::Transaction(tx.hash, tx.raw));
|
||||
}
|
||||
|
||||
let tx_hashes = serialize_list::<H256, &H256>(&block.transactions.iter().map(|tx| &tx.hash).collect::<Vec<_>>());
|
||||
update.insert_raw(COL_BLOCK_TRANSACTIONS.into(), &**block.hash(), &tx_hashes);
|
||||
self.db.write(update).map_err(Error::DatabaseError)
|
||||
}
|
||||
|
||||
|
@ -255,10 +250,10 @@ impl<T> BlockChainDatabase<T> where T: KeyValueDatabase {
|
|||
trace!(target: "db", "canonize {:?}", new_best_block);
|
||||
|
||||
let mut update = DBTransaction::new();
|
||||
update.insert(COL_BLOCK_HASHES.into(), &new_best_block.number, &new_best_block.hash);
|
||||
update.insert(COL_BLOCK_NUMBERS.into(), &new_best_block.hash, &new_best_block.number);
|
||||
update.insert(COL_META.into(), &KEY_BEST_BLOCK_HASH, &new_best_block.hash);
|
||||
update.insert(COL_META.into(), &KEY_BEST_BLOCK_NUMBER, &new_best_block.number);
|
||||
update.insert(KeyValue::BlockHash(new_best_block.number, new_best_block.hash.clone()));
|
||||
update.insert(KeyValue::BlockNumber(new_best_block.hash.clone(), new_best_block.number));
|
||||
update.insert(KeyValue::Meta(KEY_BEST_BLOCK_HASH, serialize(&new_best_block.hash)));
|
||||
update.insert(KeyValue::Meta(KEY_BEST_BLOCK_NUMBER, serialize(&new_best_block.number)));
|
||||
|
||||
let mut modified_meta: HashMap<H256, TransactionMeta> = HashMap::new();
|
||||
if let Some(tx) = block.transactions.first() {
|
||||
|
@ -287,8 +282,8 @@ impl<T> BlockChainDatabase<T> where T: KeyValueDatabase {
|
|||
}
|
||||
}
|
||||
|
||||
for (hash, meta) in modified_meta.iter() {
|
||||
update.insert(COL_TRANSACTIONS_META.into(), hash, meta);
|
||||
for (hash, meta) in modified_meta.into_iter() {
|
||||
update.insert(KeyValue::TransactionMeta(hash, meta));
|
||||
}
|
||||
|
||||
self.db.write(update).map_err(Error::DatabaseError)?;
|
||||
|
@ -318,10 +313,10 @@ impl<T> BlockChainDatabase<T> where T: KeyValueDatabase {
|
|||
trace!(target: "db", "decanonize, new best: {:?}", new_best_block);
|
||||
|
||||
let mut update = DBTransaction::new();
|
||||
update.delete(COL_BLOCK_HASHES.into(), &block_number);
|
||||
update.delete(COL_BLOCK_NUMBERS.into(), &block_hash);
|
||||
update.insert(COL_META.into(), &KEY_BEST_BLOCK_HASH, &new_best_block.hash);
|
||||
update.insert(COL_META.into(), &KEY_BEST_BLOCK_NUMBER, &new_best_block.number);
|
||||
update.delete(Key::BlockHash(block_number));
|
||||
update.delete(Key::BlockNumber(block_hash.clone()));
|
||||
update.insert(KeyValue::Meta(KEY_BEST_BLOCK_HASH, serialize(&new_best_block.hash)));
|
||||
update.insert(KeyValue::Meta(KEY_BEST_BLOCK_NUMBER, serialize(&new_best_block.number)));
|
||||
|
||||
let mut modified_meta: HashMap<H256, TransactionMeta> = HashMap::new();
|
||||
for tx in block.transactions.iter().skip(1) {
|
||||
|
@ -343,12 +338,12 @@ impl<T> BlockChainDatabase<T> where T: KeyValueDatabase {
|
|||
}
|
||||
}
|
||||
|
||||
for (hash, meta) in modified_meta.iter() {
|
||||
update.insert(COL_TRANSACTIONS_META.into(), hash, meta);
|
||||
for (hash, meta) in modified_meta {
|
||||
update.insert(KeyValue::TransactionMeta(hash, meta));
|
||||
}
|
||||
|
||||
for tx in &block.transactions {
|
||||
update.delete(COL_TRANSACTIONS_META.into(), &tx.hash);
|
||||
for tx in block.transactions {
|
||||
update.delete(Key::TransactionMeta(tx.hash));
|
||||
}
|
||||
|
||||
self.db.write(update).map_err(Error::DatabaseError)?;
|
||||
|
@ -356,12 +351,8 @@ impl<T> BlockChainDatabase<T> where T: KeyValueDatabase {
|
|||
Ok(block_hash)
|
||||
}
|
||||
|
||||
fn get_raw<K>(&self, location: Location, key: &K) -> Option<Value> where K: Serializable {
|
||||
self.db.get(location, &serialize(key)).expect("db query to be fine")
|
||||
}
|
||||
|
||||
fn get<K, V>(&self, location: Location, key: &K) -> Option<V> where K: Serializable, V: Deserializable {
|
||||
self.get_raw(location, key).map(|val| deserialize(&*val).expect("db value to be fine"))
|
||||
fn get(&self, key: Key) -> Option<Value> {
|
||||
self.db.get(&key).expect("db value to be fine").into_option()
|
||||
}
|
||||
|
||||
fn resolve_hash(&self, block_ref: BlockRef) -> Option<H256> {
|
||||
|
@ -374,24 +365,25 @@ impl<T> BlockChainDatabase<T> where T: KeyValueDatabase {
|
|||
|
||||
impl<T> BlockHeaderProvider for BlockChainDatabase<T> where T: KeyValueDatabase {
|
||||
fn block_header_bytes(&self, block_ref: BlockRef) -> Option<Bytes> {
|
||||
self.resolve_hash(block_ref)
|
||||
.and_then(|hash| self.get_raw(COL_BLOCK_HEADERS.into(), &hash))
|
||||
.map(|raw| (&*raw).into())
|
||||
self.block_header(block_ref).map(|header| serialize(&header))
|
||||
}
|
||||
|
||||
fn block_header(&self, block_ref: BlockRef) -> Option<BlockHeader> {
|
||||
self.resolve_hash(block_ref)
|
||||
.and_then(|hash| self.get(COL_BLOCK_HEADERS.into(), &hash))
|
||||
.and_then(|hash| self.get(Key::BlockHeader(hash)))
|
||||
.and_then(Value::as_block_header)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> BlockProvider for BlockChainDatabase<T> where T: KeyValueDatabase {
|
||||
fn block_number(&self, hash: &H256) -> Option<u32> {
|
||||
self.get(COL_BLOCK_NUMBERS.into(), hash)
|
||||
self.get(Key::BlockNumber(hash.clone()))
|
||||
.and_then(Value::as_block_number)
|
||||
}
|
||||
|
||||
fn block_hash(&self, number: u32) -> Option<H256> {
|
||||
self.get(COL_BLOCK_HASHES.into(), &number)
|
||||
self.get(Key::BlockHash(number))
|
||||
.and_then(Value::as_block_hash)
|
||||
}
|
||||
|
||||
fn block(&self, block_ref: BlockRef) -> Option<Block> {
|
||||
|
@ -407,21 +399,23 @@ impl<T> BlockProvider for BlockChainDatabase<T> where T: KeyValueDatabase {
|
|||
|
||||
fn contains_block(&self, block_ref: BlockRef) -> bool {
|
||||
self.resolve_hash(block_ref)
|
||||
.and_then(|block_hash| self.get_raw(COL_BLOCK_HEADERS.into(), &block_hash))
|
||||
.and_then(|hash| self.get(Key::BlockHeader(hash)))
|
||||
.is_some()
|
||||
}
|
||||
|
||||
fn block_transaction_hashes(&self, block_ref: BlockRef) -> Vec<H256> {
|
||||
self.resolve_hash(block_ref)
|
||||
.and_then(|block_hash| self.get(COL_BLOCK_TRANSACTIONS.into(), &block_hash))
|
||||
.map(|hashes: DeserializableList<H256>| hashes.into())
|
||||
.and_then(|hash| self.get(Key::BlockTransactions(hash)))
|
||||
.and_then(Value::as_block_transactions)
|
||||
.map(List::into)
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
fn block_transactions(&self, block_ref: BlockRef) -> Vec<Transaction> {
|
||||
self.block_transaction_hashes(block_ref)
|
||||
.iter()
|
||||
.filter_map(|hash| self.get(COL_TRANSACTIONS.into(), hash))
|
||||
.into_iter()
|
||||
.filter_map(|hash| self.get(Key::Transaction(hash)))
|
||||
.filter_map(Value::as_transaction)
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
@ -430,7 +424,8 @@ impl<T> IndexedBlockProvider for BlockChainDatabase<T> where T: KeyValueDatabase
|
|||
fn indexed_block_header(&self, block_ref: BlockRef) -> Option<IndexedBlockHeader> {
|
||||
self.resolve_hash(block_ref)
|
||||
.and_then(|block_hash| {
|
||||
self.get(COL_BLOCK_HEADERS.into(), &block_hash)
|
||||
self.get(Key::BlockHeader(block_hash.clone()))
|
||||
.and_then(Value::as_block_header)
|
||||
.map(|header| IndexedBlockHeader::new(block_hash, header))
|
||||
})
|
||||
}
|
||||
|
@ -450,7 +445,8 @@ impl<T> IndexedBlockProvider for BlockChainDatabase<T> where T: KeyValueDatabase
|
|||
self.block_transaction_hashes(block_ref)
|
||||
.into_iter()
|
||||
.filter_map(|hash| {
|
||||
self.get(COL_TRANSACTIONS.into(), &hash)
|
||||
self.get(Key::Transaction(hash.clone()))
|
||||
.and_then(Value::as_transaction)
|
||||
.map(|tx| IndexedTransaction::new(hash, tx))
|
||||
})
|
||||
.collect()
|
||||
|
@ -459,18 +455,19 @@ impl<T> IndexedBlockProvider for BlockChainDatabase<T> where T: KeyValueDatabase
|
|||
|
||||
impl<T> TransactionMetaProvider for BlockChainDatabase<T> where T: KeyValueDatabase {
|
||||
fn transaction_meta(&self, hash: &H256) -> Option<TransactionMeta> {
|
||||
self.get(COL_TRANSACTIONS_META.into(), hash)
|
||||
self.get(Key::TransactionMeta(hash.clone()))
|
||||
.and_then(Value::as_transaction_meta)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> TransactionProvider for BlockChainDatabase<T> where T: KeyValueDatabase {
|
||||
fn transaction_bytes(&self, hash: &H256) -> Option<Bytes> {
|
||||
self.get_raw(COL_TRANSACTIONS.into(), hash)
|
||||
.map(|raw| (&*raw).into())
|
||||
self.transaction(hash).map(|tx| serialize(&tx))
|
||||
}
|
||||
|
||||
fn transaction(&self, hash: &H256) -> Option<Transaction> {
|
||||
self.get(COL_TRANSACTIONS.into(), hash)
|
||||
self.get(Key::Transaction(hash.clone()))
|
||||
.and_then(Value::as_transaction)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -490,7 +487,7 @@ impl<T> TransactionOutputProvider for BlockChainDatabase<T> where T: KeyValueDat
|
|||
}
|
||||
|
||||
impl<T> BlockChain for BlockChainDatabase<T> where T: KeyValueDatabase {
|
||||
fn insert(&self, block: &IndexedBlock) -> Result<(), Error> {
|
||||
fn insert(&self, block: IndexedBlock) -> Result<(), Error> {
|
||||
BlockChainDatabase::insert(self, block)
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
use chain::BlockHeader;
|
||||
use {BlockRef, BlockHeaderProvider};
|
||||
|
||||
pub struct BlockIterator<'a> {
|
||||
block: u32,
|
||||
period: u32,
|
||||
headers: &'a BlockHeaderProvider,
|
||||
}
|
||||
|
||||
impl<'a> BlockIterator<'a> {
|
||||
pub fn new(block: u32, period: u32, headers: &'a BlockHeaderProvider) -> Self {
|
||||
BlockIterator {
|
||||
block: block,
|
||||
period: period,
|
||||
headers: headers,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Iterator for BlockIterator<'a> {
|
||||
type Item = (u32, BlockHeader);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let result = self.headers.block_header(BlockRef::Number(self.block));
|
||||
let block = self.block;
|
||||
self.block += self.period;
|
||||
result.map(|header| (block, header))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
use lru_cache::LruCache;
|
||||
use parking_lot::Mutex;
|
||||
use hash::H256;
|
||||
use chain::BlockHeader;
|
||||
use kv::{KeyValueDatabase, KeyState, Operation, KeyValue, Key, Value, Transaction};
|
||||
|
||||
pub struct CacheDatabase<T> where T: KeyValueDatabase {
|
||||
db: T,
|
||||
header: Mutex<LruCache<H256, KeyState<BlockHeader>>>,
|
||||
}
|
||||
|
||||
impl<T> CacheDatabase<T> where T: KeyValueDatabase {
|
||||
pub fn new(db: T) -> Self {
|
||||
CacheDatabase {
|
||||
db: db,
|
||||
// 144 (blocks per day) * 14 (days) + 100 (arbitrary number)
|
||||
header: Mutex::new(LruCache::new(2116)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> KeyValueDatabase for CacheDatabase<T> where T: KeyValueDatabase {
|
||||
fn write(&self, tx: Transaction) -> Result<(), String> {
|
||||
for op in &tx.operations {
|
||||
match *op {
|
||||
Operation::Insert(KeyValue::BlockHeader(ref hash, ref header)) => {
|
||||
self.header.lock().insert(hash.clone(), KeyState::Insert(header.clone()));
|
||||
},
|
||||
Operation::Delete(Key::BlockHeader(ref hash)) => {
|
||||
self.header.lock().insert(hash.clone(), KeyState::Delete);
|
||||
},
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
self.db.write(tx)
|
||||
}
|
||||
|
||||
fn get(&self, key: &Key) -> Result<KeyState<Value>, String> {
|
||||
if let Key::BlockHeader(ref hash) = *key {
|
||||
let mut header = self.header.lock();
|
||||
if let Some(state) = header.get_mut(hash) {
|
||||
return Ok(state.clone().map(Value::BlockHeader))
|
||||
}
|
||||
}
|
||||
self.db.get(key)
|
||||
}
|
||||
}
|
|
@ -1,7 +1,7 @@
|
|||
use kv::{Location, Transaction, Value};
|
||||
use kv::{Transaction, KeyState, Key, Value};
|
||||
|
||||
pub trait KeyValueDatabase: Send + Sync {
|
||||
fn write(&self, tx: Transaction) -> Result<(), String>;
|
||||
|
||||
fn get(&self, location: Location, key: &[u8]) -> Result<Option<Value>, String>;
|
||||
fn get(&self, key: &Key) -> Result<KeyState<Value>, String>;
|
||||
}
|
||||
|
|
|
@ -6,7 +6,9 @@ use rocksdb::{
|
|||
DB, Writable, WriteBatch, WriteOptions, IteratorMode, DBIterator,
|
||||
Options, DBCompactionStyle, BlockBasedOptions, Cache, Column, ReadOptions
|
||||
};
|
||||
use kv::{Transaction, Operation, Location, Value, KeyValueDatabase};
|
||||
use bytes::Bytes;
|
||||
use kv::{Transaction, RawTransaction, RawOperation, Location, Value, KeyValueDatabase, Key, KeyState, RawKeyValue,
|
||||
RawKey};
|
||||
|
||||
const DB_BACKGROUND_FLUSHES: i32 = 2;
|
||||
const DB_BACKGROUND_COMPACTIONS: i32 = 2;
|
||||
|
@ -121,11 +123,14 @@ pub struct Database {
|
|||
|
||||
impl KeyValueDatabase for Database {
|
||||
fn write(&self, tx: Transaction) -> Result<(), String> {
|
||||
Database::write(self, tx)
|
||||
Database::write(self, (&tx).into())
|
||||
}
|
||||
|
||||
fn get(&self, location: Location, key: &[u8]) -> Result<Option<Value>, String> {
|
||||
Database::get(self, location, key)
|
||||
fn get(&self, key: &Key) -> Result<KeyState<Value>, String> {
|
||||
match Database::get(self, &key.into())? {
|
||||
Some(value) => Ok(KeyState::Insert(Value::for_key(key, &value)?)),
|
||||
None => Ok(KeyState::Unknown)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -239,16 +244,16 @@ impl Database {
|
|||
}
|
||||
|
||||
/// Commit transaction to database.
|
||||
pub fn write(&self, tx: Transaction) -> Result<(), String> {
|
||||
pub fn write(&self, tx: RawTransaction) -> Result<(), String> {
|
||||
let DBAndColumns { ref db, ref cfs } = self.db;
|
||||
let batch = WriteBatch::new();
|
||||
for op in tx.operations.into_iter() {
|
||||
match op {
|
||||
Operation::Insert { location, key, value } => match location {
|
||||
RawOperation::Insert(RawKeyValue { location, key, value }) => match location {
|
||||
Location::DB => batch.put(&key, &value)?,
|
||||
Location::Column(col) => batch.put_cf(cfs[col as usize], &key, &value)?,
|
||||
},
|
||||
Operation::Delete { location, key } => match location {
|
||||
RawOperation::Delete(RawKey { location, key }) => match location {
|
||||
Location::DB => batch.delete(&key)?,
|
||||
Location::Column(col) => batch.delete_cf(cfs[col as usize], &key)?,
|
||||
},
|
||||
|
@ -258,16 +263,16 @@ impl Database {
|
|||
}
|
||||
|
||||
/// Get value by key.
|
||||
pub fn get(&self, location: Location, key: &[u8]) -> Result<Option<Value>, String> {
|
||||
pub fn get(&self, key: &RawKey) -> Result<Option<Bytes>, String> {
|
||||
let DBAndColumns { ref db, ref cfs } = self.db;
|
||||
match location {
|
||||
match key.location {
|
||||
Location::DB => {
|
||||
let value = db.get_opt(key, &self.read_opts)?;
|
||||
Ok(value.map(|v| Value::from_slice(&v)))
|
||||
let value = db.get_opt(&key.key, &self.read_opts)?;
|
||||
Ok(value.map(|v| (&*v).into()))
|
||||
},
|
||||
Location::Column(col) => {
|
||||
let value = db.get_cf_opt(cfs[col as usize], key, &self.read_opts)?;
|
||||
Ok(value.map(|v| Value::from_slice(&v)))
|
||||
let value = db.get_cf_opt(cfs[col as usize], &key.key, &self.read_opts)?;
|
||||
Ok(value.map(|v| (&*v).into()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -294,7 +299,7 @@ mod tests {
|
|||
extern crate tempdir;
|
||||
|
||||
use self::tempdir::TempDir;
|
||||
use kv::{Transaction, Location};
|
||||
use kv::{RawTransaction, Location};
|
||||
use super::*;
|
||||
|
||||
fn test_db(config: DatabaseConfig) {
|
||||
|
@ -305,12 +310,12 @@ mod tests {
|
|||
let key2 = b"key2";
|
||||
let key3 = b"key3";
|
||||
|
||||
let mut batch = Transaction::default();
|
||||
let mut batch = RawTransaction::default();
|
||||
batch.insert_raw(Location::DB, key1, b"cat");
|
||||
batch.insert_raw(Location::DB, key2, b"dog");
|
||||
db.write(batch).unwrap();
|
||||
|
||||
assert_eq!(&*db.get(Location::DB, key1).unwrap().unwrap(), b"cat");
|
||||
assert_eq!(&*db.get(&RawKey::new(Location::DB,key1 as &[u8])).unwrap().unwrap(), b"cat");
|
||||
|
||||
let contents: Vec<_> = db.iter(Location::DB).collect();
|
||||
assert_eq!(contents.len(), 2);
|
||||
|
@ -319,21 +324,21 @@ mod tests {
|
|||
assert_eq!(&*contents[1].0, &*key2);
|
||||
assert_eq!(&*contents[1].1, b"dog");
|
||||
|
||||
let mut batch = Transaction::default();
|
||||
let mut batch = RawTransaction::default();
|
||||
batch.delete_raw(Location::DB, key1);
|
||||
db.write(batch).unwrap();
|
||||
|
||||
assert_eq!(db.get(Location::DB, key1).unwrap(), None);
|
||||
assert_eq!(db.get(&RawKey::new(Location::DB, key1 as &[u8])).unwrap(), None);
|
||||
|
||||
let mut batch = Transaction::default();
|
||||
let mut batch = RawTransaction::default();
|
||||
batch.insert_raw(Location::DB, key1, b"cat");
|
||||
db.write(batch).unwrap();
|
||||
|
||||
let mut transaction = Transaction::default();
|
||||
let mut transaction = RawTransaction::default();
|
||||
transaction.insert_raw(Location::DB, key3, b"elephant");
|
||||
transaction.delete_raw(Location::DB, key1);
|
||||
db.write(transaction).unwrap();
|
||||
assert_eq!(&*db.get(Location::DB, key3).unwrap().unwrap(), b"elephant");
|
||||
assert_eq!(&*db.get(&RawKey::new(Location::DB, key3 as &[u8])).unwrap().unwrap(), b"elephant");
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
|
|
@ -1,47 +1,64 @@
|
|||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::mem::replace;
|
||||
use parking_lot::RwLock;
|
||||
use kv::{Transaction, Key, KeyState, Location, Operation, Value, KeyValueDatabase};
|
||||
use hash::H256;
|
||||
use bytes::Bytes;
|
||||
use ser::List;
|
||||
use chain::{Transaction as ChainTransaction, BlockHeader};
|
||||
use kv::{Transaction, Key, KeyState, Operation, Value, KeyValueDatabase, KeyValue};
|
||||
use {TransactionMeta};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct MemoryDatabase {
|
||||
db: RwLock<HashMap<Location, HashMap<Key, KeyState>>>,
|
||||
#[derive(Default, Debug)]
|
||||
struct InnerDatabase {
|
||||
meta: HashMap<&'static str, KeyState<Bytes>>,
|
||||
block_hash: HashMap<u32, KeyState<H256>>,
|
||||
block_header: HashMap<H256, KeyState<BlockHeader>>,
|
||||
block_transactions: HashMap<H256, KeyState<List<H256>>>,
|
||||
transaction: HashMap<H256, KeyState<ChainTransaction>>,
|
||||
transaction_meta: HashMap<H256, KeyState<TransactionMeta>>,
|
||||
block_number: HashMap<H256, KeyState<u32>>,
|
||||
}
|
||||
|
||||
impl Default for MemoryDatabase {
|
||||
fn default() -> Self {
|
||||
MemoryDatabase {
|
||||
db: RwLock::default(),
|
||||
}
|
||||
}
|
||||
#[derive(Default, Debug)]
|
||||
pub struct MemoryDatabase {
|
||||
db: RwLock<InnerDatabase>,
|
||||
}
|
||||
|
||||
impl MemoryDatabase {
|
||||
pub fn drain_transaction(&self) -> Transaction {
|
||||
let mut db = self.db.write();
|
||||
let operations = db.drain()
|
||||
.flat_map(|(location, action)| {
|
||||
action.into_iter().map(|(key, state)| match state {
|
||||
KeyState::Insert(value) => Operation::Insert {
|
||||
location: location,
|
||||
key: key,
|
||||
value: value,
|
||||
},
|
||||
KeyState::Delete => Operation::Delete {
|
||||
location: location,
|
||||
key: key,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
.collect();
|
||||
Transaction {
|
||||
operations: operations,
|
||||
}
|
||||
}
|
||||
let meta = replace(&mut db.meta, HashMap::default()).into_iter()
|
||||
.flat_map(|(key, state)| state.into_operation(key, KeyValue::Meta, Key::Meta));
|
||||
|
||||
pub fn is_known(&self, location: Location, key: &[u8]) -> bool {
|
||||
self.db.read().get(&location).and_then(|db| db.get(key)).is_some()
|
||||
let block_hash = replace(&mut db.block_hash, HashMap::default()).into_iter()
|
||||
.flat_map(|(key, state)| state.into_operation(key, KeyValue::BlockHash, Key::BlockHash));
|
||||
|
||||
let block_header = replace(&mut db.block_header, HashMap::default()).into_iter()
|
||||
.flat_map(|(key, state)| state.into_operation(key, KeyValue::BlockHeader, Key::BlockHeader));
|
||||
|
||||
let block_transactions = replace(&mut db.block_transactions, HashMap::default()).into_iter()
|
||||
.flat_map(|(key, state)| state.into_operation(key, KeyValue::BlockTransactions, Key::BlockTransactions));
|
||||
|
||||
let transaction = replace(&mut db.transaction, HashMap::default()).into_iter()
|
||||
.flat_map(|(key, state)| state.into_operation(key, KeyValue::Transaction, Key::Transaction));
|
||||
|
||||
let transaction_meta = replace(&mut db.transaction_meta, HashMap::default()).into_iter()
|
||||
.flat_map(|(key, state)| state.into_operation(key, KeyValue::TransactionMeta, Key::TransactionMeta));
|
||||
|
||||
let block_number = replace(&mut db.block_number, HashMap::default()).into_iter()
|
||||
.flat_map(|(key, state)| state.into_operation(key, KeyValue::BlockNumber, Key::BlockNumber));
|
||||
|
||||
Transaction {
|
||||
operations: meta
|
||||
.chain(block_hash)
|
||||
.chain(block_header)
|
||||
.chain(block_transactions)
|
||||
.chain(transaction)
|
||||
.chain(transaction_meta)
|
||||
.chain(block_number)
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -50,24 +67,42 @@ impl KeyValueDatabase for MemoryDatabase {
|
|||
let mut db = self.db.write();
|
||||
for op in tx.operations.into_iter() {
|
||||
match op {
|
||||
Operation::Insert { location, key, value } => {
|
||||
let db = db.entry(location).or_insert_with(HashMap::default);
|
||||
db.insert(key, KeyState::Insert(value));
|
||||
},
|
||||
Operation::Delete { location, key } => {
|
||||
let db = db.entry(location).or_insert_with(HashMap::default);
|
||||
db.insert(key, KeyState::Delete);
|
||||
Operation::Insert(insert) => match insert {
|
||||
KeyValue::Meta(key, value) => { db.meta.insert(key, KeyState::Insert(value)); },
|
||||
KeyValue::BlockHash(key, value) => { db.block_hash.insert(key, KeyState::Insert(value)); },
|
||||
KeyValue::BlockHeader(key, value) => { db.block_header.insert(key, KeyState::Insert(value)); },
|
||||
KeyValue::BlockTransactions(key, value) => { db.block_transactions.insert(key, KeyState::Insert(value)); },
|
||||
KeyValue::Transaction(key, value) => { db.transaction.insert(key, KeyState::Insert(value)); },
|
||||
KeyValue::TransactionMeta(key, value) => { db.transaction_meta.insert(key, KeyState::Insert(value)); },
|
||||
KeyValue::BlockNumber(key, value) => { db.block_number.insert(key, KeyState::Insert(value)); },
|
||||
},
|
||||
Operation::Delete(delete) => match delete {
|
||||
Key::Meta(key) => { db.meta.insert(key, KeyState::Delete); }
|
||||
Key::BlockHash(key) => { db.block_hash.insert(key, KeyState::Delete); }
|
||||
Key::BlockHeader(key) => { db.block_header.insert(key, KeyState::Delete); }
|
||||
Key::BlockTransactions(key) => { db.block_transactions.insert(key, KeyState::Delete); }
|
||||
Key::Transaction(key) => { db.transaction.insert(key, KeyState::Delete); }
|
||||
Key::TransactionMeta(key) => { db.transaction_meta.insert(key, KeyState::Delete); }
|
||||
Key::BlockNumber(key) => { db.block_number.insert(key, KeyState::Delete); }
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get(&self, location: Location, key: &[u8]) -> Result<Option<Value>, String> {
|
||||
match self.db.read().get(&location).and_then(|db| db.get(key)) {
|
||||
Some(&KeyState::Insert(ref value)) => Ok(Some(value.clone())),
|
||||
Some(&KeyState::Delete) | None => Ok(None),
|
||||
}
|
||||
fn get(&self, key: &Key) -> Result<KeyState<Value>, String> {
|
||||
let db = self.db.read();
|
||||
let result = match *key {
|
||||
Key::Meta(ref key) => db.meta.get(key).cloned().unwrap_or_default().map(Value::Meta),
|
||||
Key::BlockHash(ref key) => db.block_hash.get(key).cloned().unwrap_or_default().map(Value::BlockHash),
|
||||
Key::BlockHeader(ref key) => db.block_header.get(key).cloned().unwrap_or_default().map(Value::BlockHeader),
|
||||
Key::BlockTransactions(ref key) => db.block_transactions.get(key).cloned().unwrap_or_default().map(Value::BlockTransactions),
|
||||
Key::Transaction(ref key) => db.transaction.get(key).cloned().unwrap_or_default().map(Value::Transaction),
|
||||
Key::TransactionMeta(ref key) => db.transaction_meta.get(key).cloned().unwrap_or_default().map(Value::TransactionMeta),
|
||||
Key::BlockNumber(ref key) => db.block_number.get(key).cloned().unwrap_or_default().map(Value::BlockNumber),
|
||||
};
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -97,7 +132,7 @@ impl KeyValueDatabase for SharedMemoryDatabase {
|
|||
self.db.write(tx)
|
||||
}
|
||||
|
||||
fn get(&self, location: Location, key: &[u8]) -> Result<Option<Value>, String> {
|
||||
self.db.get(location, key)
|
||||
fn get(&self, key: &Key) -> Result<KeyState<Value>, String> {
|
||||
self.db.get(key)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,11 +1,18 @@
|
|||
mod cachedb;
|
||||
mod db;
|
||||
mod diskdb;
|
||||
mod memorydb;
|
||||
mod overlaydb;
|
||||
mod transaction;
|
||||
|
||||
pub use self::cachedb::CacheDatabase;
|
||||
pub use self::db::KeyValueDatabase;
|
||||
pub use self::diskdb::{Database as DiskDatabase, DatabaseConfig, CompactionProfile};
|
||||
pub use self::memorydb::{MemoryDatabase, SharedMemoryDatabase};
|
||||
pub use self::overlaydb::{OverlayDatabase, AutoFlushingOverlayDatabase};
|
||||
pub use self::transaction::{Transaction, Operation, Location, Key, Value, KeyState};
|
||||
pub use self::transaction::{
|
||||
RawTransaction, Transaction, RawOperation, Operation, Location, KeyState,
|
||||
Key, Value, KeyValue, RawKeyValue, RawKey,
|
||||
COL_COUNT, COL_META, COL_BLOCK_HASHES, COL_BLOCK_HEADERS, COL_BLOCK_TRANSACTIONS,
|
||||
COL_TRANSACTIONS, COL_TRANSACTIONS_META, COL_BLOCK_NUMBERS
|
||||
};
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
use parking_lot::Mutex;
|
||||
use kv::{Transaction, Location, Value, KeyValueDatabase, MemoryDatabase};
|
||||
use kv::{Transaction, Value, KeyValueDatabase, MemoryDatabase, KeyState, Key};
|
||||
|
||||
pub struct OverlayDatabase<'a, T> where T: 'a + KeyValueDatabase {
|
||||
db: &'a T,
|
||||
|
@ -24,11 +24,10 @@ impl<'a, T> KeyValueDatabase for OverlayDatabase<'a, T> where T: 'a + KeyValueDa
|
|||
self.overlay.write(tx)
|
||||
}
|
||||
|
||||
fn get(&self, location: Location, key: &[u8]) -> Result<Option<Value>, String> {
|
||||
if self.overlay.is_known(location, key) {
|
||||
self.overlay.get(location, key)
|
||||
} else {
|
||||
self.db.get(location, key)
|
||||
fn get(&self, key: &Key) -> Result<KeyState<Value>, String> {
|
||||
match self.overlay.get(key)? {
|
||||
KeyState::Unknown => self.db.get(key),
|
||||
exists => Ok(exists)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -67,11 +66,10 @@ impl<T> KeyValueDatabase for AutoFlushingOverlayDatabase<T> where T: KeyValueDat
|
|||
Ok(())
|
||||
}
|
||||
|
||||
fn get(&self, location: Location, key: &[u8]) -> Result<Option<Value>, String> {
|
||||
if self.overlay.is_known(location, key) {
|
||||
self.overlay.get(location, key)
|
||||
} else {
|
||||
self.db.get(location, key)
|
||||
fn get(&self, key: &Key) -> Result<KeyState<Value>, String> {
|
||||
match self.overlay.get(key)? {
|
||||
KeyState::Unknown => self.db.get(key),
|
||||
exists => Ok(exists)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,39 +1,161 @@
|
|||
use elastic_array::{ElasticArray32, ElasticArray128};
|
||||
use ser::{Serializable, serialize};
|
||||
use bytes::Bytes;
|
||||
use hash::H256;
|
||||
use ser::{serialize, List, deserialize};
|
||||
use chain::{Transaction as ChainTransaction, BlockHeader};
|
||||
use {TransactionMeta};
|
||||
|
||||
pub type Key = ElasticArray32<u8>;
|
||||
pub type Value = ElasticArray128<u8>;
|
||||
pub const COL_COUNT: u32 = 10;
|
||||
pub const COL_META: u32 = 0;
|
||||
pub const COL_BLOCK_HASHES: u32 = 1;
|
||||
pub const COL_BLOCK_HEADERS: u32 = 2;
|
||||
pub const COL_BLOCK_TRANSACTIONS: u32 = 3;
|
||||
pub const COL_TRANSACTIONS: u32 = 4;
|
||||
pub const COL_TRANSACTIONS_META: u32 = 5;
|
||||
pub const COL_BLOCK_NUMBERS: u32 = 6;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
|
||||
pub enum Location {
|
||||
DB,
|
||||
Column(u32),
|
||||
#[derive(Debug)]
|
||||
pub enum Operation {
|
||||
Insert(KeyValue),
|
||||
Delete(Key),
|
||||
}
|
||||
|
||||
impl From<u32> for Location {
|
||||
fn from(column: u32) -> Location {
|
||||
Location::Column(column)
|
||||
#[derive(Debug)]
|
||||
pub enum KeyValue {
|
||||
Meta(&'static str, Bytes),
|
||||
BlockHash(u32, H256),
|
||||
BlockHeader(H256, BlockHeader),
|
||||
BlockTransactions(H256, List<H256>),
|
||||
Transaction(H256, ChainTransaction),
|
||||
TransactionMeta(H256, TransactionMeta),
|
||||
BlockNumber(H256, u32),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Key {
|
||||
Meta(&'static str),
|
||||
BlockHash(u32),
|
||||
BlockHeader(H256),
|
||||
BlockTransactions(H256),
|
||||
Transaction(H256),
|
||||
TransactionMeta(H256),
|
||||
BlockNumber(H256),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Value {
|
||||
Meta(Bytes),
|
||||
BlockHash(H256),
|
||||
BlockHeader(BlockHeader),
|
||||
BlockTransactions(List<H256>),
|
||||
Transaction(ChainTransaction),
|
||||
TransactionMeta(TransactionMeta),
|
||||
BlockNumber(u32),
|
||||
}
|
||||
|
||||
impl Value {
|
||||
pub fn for_key(key: &Key, bytes: &[u8]) -> Result<Self, String> {
|
||||
match *key {
|
||||
Key::Meta(_) => deserialize(bytes).map(Value::Meta),
|
||||
Key::BlockHash(_) => deserialize(bytes).map(Value::BlockHash),
|
||||
Key::BlockHeader(_) => deserialize(bytes).map(Value::BlockHeader),
|
||||
Key::BlockTransactions(_) => deserialize(bytes).map(Value::BlockTransactions),
|
||||
Key::Transaction(_) => deserialize(bytes).map(Value::Transaction),
|
||||
Key::TransactionMeta(_) => deserialize(bytes).map(Value::TransactionMeta),
|
||||
Key::BlockNumber(_) => deserialize(bytes).map(Value::BlockNumber),
|
||||
}.map_err(|e| format!("{:?}", e))
|
||||
}
|
||||
|
||||
pub fn as_meta(self) -> Option<Bytes> {
|
||||
match self {
|
||||
Value::Meta(bytes) => Some(bytes),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_block_hash(self) -> Option<H256> {
|
||||
match self {
|
||||
Value::BlockHash(block_hash) => Some(block_hash),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_block_header(self) -> Option<BlockHeader> {
|
||||
match self {
|
||||
Value::BlockHeader(block_header) => Some(block_header),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_block_transactions(self) -> Option<List<H256>> {
|
||||
match self {
|
||||
Value::BlockTransactions(list) => Some(list),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_transaction(self) -> Option<ChainTransaction> {
|
||||
match self {
|
||||
Value::Transaction(transaction) => Some(transaction),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_transaction_meta(self) -> Option<TransactionMeta> {
|
||||
match self {
|
||||
Value::TransactionMeta(meta) => Some(meta),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_block_number(self) -> Option<u32> {
|
||||
match self {
|
||||
Value::BlockNumber(number) => Some(number),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub enum Operation {
|
||||
Insert {
|
||||
location: Location,
|
||||
key: Key,
|
||||
value: Value,
|
||||
},
|
||||
Delete {
|
||||
location: Location,
|
||||
key: Key,
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum KeyState<V> {
|
||||
Insert(V),
|
||||
Delete,
|
||||
Unknown,
|
||||
}
|
||||
|
||||
impl<V> Default for KeyState<V> {
|
||||
fn default() -> Self {
|
||||
KeyState::Unknown
|
||||
}
|
||||
}
|
||||
|
||||
impl<V> KeyState<V> {
|
||||
pub fn map<U, F>(self, f: F) -> KeyState<U> where F: FnOnce(V) -> U {
|
||||
match self {
|
||||
KeyState::Insert(value) => KeyState::Insert(f(value)),
|
||||
KeyState::Delete => KeyState::Delete,
|
||||
KeyState::Unknown => KeyState::Unknown,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn into_option(self) -> Option<V> {
|
||||
match self {
|
||||
KeyState::Insert(value) => Some(value),
|
||||
KeyState::Delete => None,
|
||||
KeyState::Unknown => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn into_operation<K, I, D>(self, key: K, insert: I, delete: D) -> Option<Operation>
|
||||
where I: FnOnce(K, V) -> KeyValue, D: FnOnce(K) -> Key {
|
||||
match self {
|
||||
KeyState::Insert(value) => Some(Operation::Insert(insert(key, value))),
|
||||
KeyState::Delete => Some(Operation::Delete(delete(key))),
|
||||
KeyState::Unknown => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum KeyState {
|
||||
Insert(Value),
|
||||
Delete,
|
||||
}
|
||||
|
||||
pub struct Transaction {
|
||||
pub operations: Vec<Operation>,
|
||||
}
|
||||
|
@ -47,32 +169,143 @@ impl Default for Transaction {
|
|||
}
|
||||
|
||||
impl Transaction {
|
||||
pub fn new() -> Transaction {
|
||||
pub fn new() -> Self {
|
||||
Transaction::default()
|
||||
}
|
||||
|
||||
pub fn insert_raw(&mut self, location: Location, key: &[u8], value: &[u8]) {
|
||||
let operation = Operation::Insert {
|
||||
location: location,
|
||||
key: Key::from_slice(key),
|
||||
value: Value::from_slice(value),
|
||||
};
|
||||
self.operations.push(operation);
|
||||
pub fn insert(&mut self, insert: KeyValue) {
|
||||
self.operations.push(Operation::Insert(insert));
|
||||
}
|
||||
|
||||
pub fn insert<K, V>(&mut self, location: Location, key: &K, value: &V) where K: Serializable, V: Serializable {
|
||||
self.insert_raw(location, &serialize(key), &serialize(value))
|
||||
pub fn delete(&mut self, delete: Key) {
|
||||
self.operations.push(Operation::Delete(delete));
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
|
||||
pub enum Location {
|
||||
DB,
|
||||
Column(u32),
|
||||
}
|
||||
|
||||
impl From<u32> for Location {
|
||||
fn from(column: u32) -> Location {
|
||||
Location::Column(column)
|
||||
}
|
||||
}
|
||||
|
||||
pub enum RawOperation {
|
||||
Insert(RawKeyValue),
|
||||
Delete(RawKey),
|
||||
}
|
||||
|
||||
pub struct RawKeyValue {
|
||||
pub location: Location,
|
||||
pub key: Bytes,
|
||||
pub value: Bytes,
|
||||
}
|
||||
|
||||
impl<'a> From<&'a KeyValue> for RawKeyValue {
|
||||
fn from(i: &'a KeyValue) -> Self {
|
||||
let (location, key, value) = match *i {
|
||||
KeyValue::Meta(ref key, ref value) => (COL_META, serialize(key), serialize(value)),
|
||||
KeyValue::BlockHash(ref key, ref value) => (COL_BLOCK_HASHES, serialize(key), serialize(value)),
|
||||
KeyValue::BlockHeader(ref key, ref value) => (COL_BLOCK_HEADERS, serialize(key), serialize(value)),
|
||||
KeyValue::BlockTransactions(ref key, ref value) => (COL_BLOCK_TRANSACTIONS, serialize(key), serialize(value)),
|
||||
KeyValue::Transaction(ref key, ref value) => (COL_TRANSACTIONS, serialize(key), serialize(value)),
|
||||
KeyValue::TransactionMeta(ref key, ref value) => (COL_TRANSACTIONS_META, serialize(key), serialize(value)),
|
||||
KeyValue::BlockNumber(ref key, ref value) => (COL_BLOCK_NUMBERS, serialize(key), serialize(value)),
|
||||
};
|
||||
|
||||
RawKeyValue {
|
||||
location: location.into(),
|
||||
key: key,
|
||||
value: value,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RawKey {
|
||||
pub location: Location,
|
||||
pub key: Bytes,
|
||||
}
|
||||
|
||||
impl RawKey {
|
||||
pub fn new<B>(location: Location, key: B) -> Self where B: Into<Bytes> {
|
||||
RawKey {
|
||||
location: location,
|
||||
key: key.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a Key> for RawKey {
|
||||
fn from(d: &'a Key) -> Self {
|
||||
let (location, key) = match *d {
|
||||
Key::Meta(ref key) => (COL_META, serialize(key)),
|
||||
Key::BlockHash(ref key) => (COL_BLOCK_HASHES, serialize(key)),
|
||||
Key::BlockHeader(ref key) => (COL_BLOCK_HEADERS, serialize(key)),
|
||||
Key::BlockTransactions(ref key) => (COL_BLOCK_TRANSACTIONS, serialize(key)),
|
||||
Key::Transaction(ref key) => (COL_TRANSACTIONS, serialize(key)),
|
||||
Key::TransactionMeta(ref key) => (COL_TRANSACTIONS_META, serialize(key)),
|
||||
Key::BlockNumber(ref key) => (COL_BLOCK_NUMBERS, serialize(key)),
|
||||
};
|
||||
|
||||
RawKey {
|
||||
location: location.into(),
|
||||
key: key,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a Operation> for RawOperation {
|
||||
fn from(o: &'a Operation) -> Self {
|
||||
match *o {
|
||||
Operation::Insert(ref insert) => RawOperation::Insert(insert.into()),
|
||||
Operation::Delete(ref delete) => RawOperation::Delete(delete.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RawTransaction {
|
||||
pub operations: Vec<RawOperation>,
|
||||
}
|
||||
|
||||
impl<'a> From<&'a Transaction> for RawTransaction {
|
||||
fn from(tx: &'a Transaction) -> Self {
|
||||
RawTransaction {
|
||||
operations: tx.operations.iter().map(Into::into).collect()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for RawTransaction {
|
||||
fn default() -> Self {
|
||||
RawTransaction {
|
||||
operations: Vec::with_capacity(32),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RawTransaction {
|
||||
pub fn new() -> RawTransaction {
|
||||
RawTransaction::default()
|
||||
}
|
||||
|
||||
pub fn insert_raw(&mut self, location: Location, key: &[u8], value: &[u8]) {
|
||||
let operation = RawOperation::Insert(RawKeyValue {
|
||||
location: location,
|
||||
key: key.into(),
|
||||
value: value.into(),
|
||||
});
|
||||
self.operations.push(operation);
|
||||
}
|
||||
|
||||
pub fn delete_raw(&mut self, location: Location, key: &[u8]) {
|
||||
let operation = Operation::Delete {
|
||||
let operation = RawOperation::Delete(RawKey {
|
||||
location: location,
|
||||
key: Key::from_slice(key),
|
||||
};
|
||||
key: key.into(),
|
||||
});
|
||||
self.operations.push(operation);
|
||||
}
|
||||
|
||||
pub fn delete<K>(&mut self, location: Location, key: &K) where K: Serializable {
|
||||
self.delete_raw(location, &serialize(key))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ extern crate parking_lot;
|
|||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate bit_vec;
|
||||
extern crate lru_cache;
|
||||
|
||||
extern crate primitives;
|
||||
extern crate serialization as ser;
|
||||
|
@ -11,13 +12,15 @@ extern crate chain;
|
|||
|
||||
pub mod kv;
|
||||
mod best_block;
|
||||
mod block_ancestors;
|
||||
mod block_chain;
|
||||
mod block_chain_db;
|
||||
mod block_impls;
|
||||
mod block_iterator;
|
||||
mod block_origin;
|
||||
mod block_provider;
|
||||
mod block_ref;
|
||||
mod block_chain;
|
||||
mod block_chain_db;
|
||||
mod error;
|
||||
mod block_impls;
|
||||
mod store;
|
||||
mod transaction_meta;
|
||||
mod transaction_provider;
|
||||
|
@ -25,11 +28,13 @@ mod transaction_provider;
|
|||
pub use primitives::{hash, bytes};
|
||||
|
||||
pub use best_block::BestBlock;
|
||||
pub use block_ancestors::BlockAncestors;
|
||||
pub use block_chain::{BlockChain, ForkChain, Forkable};
|
||||
pub use block_chain_db::{BlockChainDatabase, ForkChainDatabase};
|
||||
pub use block_iterator::BlockIterator;
|
||||
pub use block_origin::{BlockOrigin, SideChainOrigin};
|
||||
pub use block_provider::{BlockHeaderProvider, BlockProvider, IndexedBlockProvider};
|
||||
pub use block_ref::BlockRef;
|
||||
pub use block_chain::{BlockChain, ForkChain, Forkable};
|
||||
pub use block_chain_db::{BlockChainDatabase, ForkChainDatabase};
|
||||
pub use error::Error;
|
||||
pub use store::{AsSubstore, Store, SharedStore, CanonStore};
|
||||
pub use transaction_meta::TransactionMeta;
|
||||
|
|
|
@ -13,9 +13,9 @@ fn insert_block() {
|
|||
let b1: IndexedBlock = test_data::block_h1().into();
|
||||
let b2: IndexedBlock = test_data::block_h2().into();
|
||||
|
||||
store.insert(&b0).unwrap();
|
||||
store.insert(&b1).unwrap();
|
||||
store.insert(&b2).unwrap();
|
||||
store.insert(b0.clone()).unwrap();
|
||||
store.insert(b1.clone()).unwrap();
|
||||
store.insert(b2.clone()).unwrap();
|
||||
|
||||
assert_eq!(0, store.best_block().number);
|
||||
assert!(store.best_block().hash.is_zero());
|
||||
|
@ -55,9 +55,9 @@ fn reopen_db() {
|
|||
|
||||
{
|
||||
let store = BlockChainDatabase::open(shared_database.clone());
|
||||
store.insert(&b0).unwrap();
|
||||
store.insert(&b1).unwrap();
|
||||
store.insert(&b2).unwrap();
|
||||
store.insert(b0.clone()).unwrap();
|
||||
store.insert(b1.clone()).unwrap();
|
||||
store.insert(b2.clone()).unwrap();
|
||||
|
||||
store.canonize(b0.hash()).unwrap();
|
||||
store.canonize(b1.hash()).unwrap();
|
||||
|
@ -80,9 +80,9 @@ fn switch_to_simple_fork() {
|
|||
let b1: IndexedBlock = test_data::block_h1().into();
|
||||
let b2: IndexedBlock = test_data::block_h2().into();
|
||||
|
||||
store.insert(&b0).unwrap();
|
||||
store.insert(&b1).unwrap();
|
||||
store.insert(&b2).unwrap();
|
||||
store.insert(b0.clone()).unwrap();
|
||||
store.insert(b1.clone()).unwrap();
|
||||
store.insert(b2.clone()).unwrap();
|
||||
|
||||
store.canonize(b0.hash()).unwrap();
|
||||
store.canonize(b1.hash()).unwrap();
|
||||
|
|
|
@ -8,7 +8,7 @@ BIPs that are implemented by pbtc
|
|||
|
||||
| BIPs | pbtc | core | unlimited |
|
||||
| ------ | ------ | ------ | ------ |
|
||||
| [BIP 9][BIP9] | w | + | ? |
|
||||
| [BIP 9][BIP9] | + | + | ? |
|
||||
| [BIP 11][BIP11] | a | + | ? |
|
||||
| [BIP 13][BIP13] | a | + | ? |
|
||||
| [BIP 14][BIP14] | - | + | ? |
|
||||
|
@ -26,14 +26,14 @@ BIPs that are implemented by pbtc
|
|||
| [BIP 61][BIP61] | ? | + | ? |
|
||||
| [BIP 65][BIP65] | + | + | ? |
|
||||
| [BIP 66][BIP66] | + | + | ? |
|
||||
| [BIP 68][BIP68] | w | + | ? |
|
||||
| [BIP 68][BIP68] | + | + | ? |
|
||||
| [BIP 70][BIP70] | a | + | ? |
|
||||
| [BIP 71][BIP71] | a | + | ? |
|
||||
| [BIP 72][BIP72] | a | + | ? |
|
||||
| [BIP 90][BIP90] | + | + | ? |
|
||||
| [BIP 111][BIP111] | ? | + | ? |
|
||||
| [BIP 112][BIP112] | w | + | ? |
|
||||
| [BIP 113][BIP113] | w | + | ? |
|
||||
| [BIP 112][BIP112] | + | + | ? |
|
||||
| [BIP 113][BIP113] | + | + | ? |
|
||||
| [BIP 125][BIP125] | a | + | ? |
|
||||
| [BIP 130][BIP130] | ? | + | ? |
|
||||
| [BIP 133][BIP133] | ? | + | ? |
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
use hash::H256;
|
||||
use super::Magic;
|
||||
use {Magic, Deployment};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
/// Parameters that influence chain consensus.
|
||||
|
@ -16,6 +16,14 @@ pub struct ConsensusParams {
|
|||
/// Block height at which BIP65 becomes active.
|
||||
/// See https://github.com/bitcoin/bips/blob/master/bip-0066.mediawiki
|
||||
pub bip66_height: u32,
|
||||
/// Version bits activation
|
||||
pub rule_change_activation_threshold: u32,
|
||||
/// Number of blocks with the same set of rules
|
||||
pub miner_confirmation_window: u32,
|
||||
/// BIP68, BIP112, BIP113 deployment
|
||||
pub csv_deployment: Option<Deployment>,
|
||||
/// BIP141, BIP143, BIP147 deployment
|
||||
pub segwit_deployment: Option<Deployment>,
|
||||
}
|
||||
|
||||
impl ConsensusParams {
|
||||
|
@ -26,18 +34,48 @@ impl ConsensusParams {
|
|||
bip34_height: 227931, // 000000000000024b89b42a942fe0d9fea3bb44ab7bd1b19115dd6a759c0808b8
|
||||
bip65_height: 388381, // 000000000000000004c2b624ed5d7756c508d90fd0da2c7c679febfa6c4735f0
|
||||
bip66_height: 363725, // 00000000000000000379eaa19dce8c9b722d46ae6a57c2f1a988119488b50931
|
||||
rule_change_activation_threshold: 1916, // 95%
|
||||
miner_confirmation_window: 2016,
|
||||
csv_deployment: Some(Deployment {
|
||||
name: "csv",
|
||||
bit: 0,
|
||||
start_time: 1462060800,
|
||||
timeout: 1493596800,
|
||||
activation: Some(770112),
|
||||
}),
|
||||
segwit_deployment: None,
|
||||
},
|
||||
Magic::Testnet => ConsensusParams {
|
||||
bip16_time: 1333238400, // Apr 1 2012
|
||||
bip34_height: 21111, // 0000000023b3a96d3484e5abb3755c413e7d41500f8e2a5c3f0dd01299cd8ef8
|
||||
bip65_height: 581885, // 00000000007f6655f22f98e72ed80d8b06dc761d5da09df0fa1dc4be4f861eb6
|
||||
bip66_height: 330776, // 000000002104c8c45e99a8853285a3b592602a3ccde2b832481da85e9e4ba182
|
||||
rule_change_activation_threshold: 1512, // 75%
|
||||
miner_confirmation_window: 2016,
|
||||
csv_deployment: Some(Deployment {
|
||||
name: "csv",
|
||||
bit: 0,
|
||||
start_time: 1456790400,
|
||||
timeout: 1493596800,
|
||||
activation: Some(419328),
|
||||
}),
|
||||
segwit_deployment: None,
|
||||
},
|
||||
Magic::Regtest | Magic::Unitest => ConsensusParams {
|
||||
bip16_time: 1333238400, // Apr 1 2012
|
||||
bip34_height: 100000000, // not activated on regtest
|
||||
bip65_height: 1351,
|
||||
bip66_height: 1251, // used only in rpc tests
|
||||
rule_change_activation_threshold: 108, // 75%
|
||||
miner_confirmation_window: 144,
|
||||
csv_deployment: Some(Deployment {
|
||||
name: "csv",
|
||||
bit: 0,
|
||||
start_time: 0,
|
||||
timeout: 0,
|
||||
activation: Some(0),
|
||||
}),
|
||||
segwit_deployment: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -73,4 +111,18 @@ mod tests {
|
|||
assert_eq!(ConsensusParams::with_magic(Magic::Testnet).bip66_height, 330776);
|
||||
assert_eq!(ConsensusParams::with_magic(Magic::Regtest).bip66_height, 1251);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_consensus_activation_threshold() {
|
||||
assert_eq!(ConsensusParams::with_magic(Magic::Mainnet).rule_change_activation_threshold, 1916);
|
||||
assert_eq!(ConsensusParams::with_magic(Magic::Testnet).rule_change_activation_threshold, 1512);
|
||||
assert_eq!(ConsensusParams::with_magic(Magic::Regtest).rule_change_activation_threshold, 108);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_consensus_miner_confirmation_window() {
|
||||
assert_eq!(ConsensusParams::with_magic(Magic::Mainnet).miner_confirmation_window, 2016);
|
||||
assert_eq!(ConsensusParams::with_magic(Magic::Testnet).miner_confirmation_window, 2016);
|
||||
assert_eq!(ConsensusParams::with_magic(Magic::Regtest).miner_confirmation_window, 144);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
const VERSIONBITS_TOP_MASK: u32 = 0xe0000000;
|
||||
const VERSIONBITS_TOP_BITS: u32 = 0x20000000;
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct Deployment {
|
||||
/// Deployment's name
|
||||
pub name: &'static str,
|
||||
/// Bit
|
||||
pub bit: u8,
|
||||
/// Start time
|
||||
pub start_time: u32,
|
||||
/// Timeout
|
||||
pub timeout: u32,
|
||||
/// Activation block number (if already activated)
|
||||
pub activation: Option<u32>,
|
||||
}
|
||||
|
||||
impl Deployment {
|
||||
pub fn matches(&self, version: u32) -> bool {
|
||||
(version & VERSIONBITS_TOP_MASK) == VERSIONBITS_TOP_BITS && (version & (1 << self.bit)) != 0
|
||||
}
|
||||
}
|
||||
|
|
@ -3,10 +3,12 @@ extern crate primitives;
|
|||
extern crate serialization as ser;
|
||||
|
||||
mod consensus;
|
||||
mod deployments;
|
||||
mod magic;
|
||||
|
||||
pub use primitives::{hash, compact};
|
||||
|
||||
pub use consensus::ConsensusParams;
|
||||
pub use deployments::Deployment;
|
||||
pub use magic::Magic;
|
||||
|
||||
|
|
|
@ -30,8 +30,9 @@ pub fn init_db(cfg: &Config, db: &db::SharedStore) -> Result<(), String> {
|
|||
Some(ref db_genesis_block_hash) if db_genesis_block_hash != genesis_block.hash() => Err("Trying to open database with incompatible genesis block".into()),
|
||||
Some(_) => Ok(()),
|
||||
None => {
|
||||
db.insert(&genesis_block).expect("Failed to insert genesis block to the database");
|
||||
db.canonize(genesis_block.hash()).expect("Failed to canonize genesis block");
|
||||
let hash = genesis_block.hash().clone();
|
||||
db.insert(genesis_block).expect("Failed to insert genesis block to the database");
|
||||
db.canonize(&hash).expect("Failed to canonize genesis block");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ use std::{ops, str, fmt, io, marker};
|
|||
use hex::{ToHex, FromHex, FromHexError};
|
||||
|
||||
/// Wrapper around `Vec<u8>`
|
||||
#[derive(Default, PartialEq, Clone)]
|
||||
#[derive(Default, PartialEq, Clone, Eq, Hash)]
|
||||
pub struct Bytes(Vec<u8>);
|
||||
|
||||
impl Bytes {
|
||||
|
|
|
@ -75,11 +75,9 @@ impl BlockChainClientCoreApi for BlockChainClientCore {
|
|||
None => -1,
|
||||
};
|
||||
let block_size = block.size();
|
||||
// TODO: use real network
|
||||
let median_time = verification::median_timestamp(
|
||||
&block.header.raw,
|
||||
self.storage.as_block_header_provider(),
|
||||
Magic::Mainnet,
|
||||
self.storage.as_block_header_provider()
|
||||
);
|
||||
|
||||
VerboseBlock {
|
||||
|
|
|
@ -53,12 +53,12 @@ pub struct VerificationFlags {
|
|||
/// Verify CHECKLOCKTIMEVERIFY
|
||||
///
|
||||
/// See BIP65 for details.
|
||||
pub verify_clocktimeverify: bool,
|
||||
pub verify_locktime: bool,
|
||||
|
||||
/// support CHECKSEQUENCEVERIFY opcode
|
||||
///
|
||||
/// See BIP112 for details
|
||||
pub verify_checksequenceverify: bool,
|
||||
pub verify_checksequence: bool,
|
||||
|
||||
/// Support segregated witness
|
||||
pub verify_witness: bool,
|
||||
|
@ -73,8 +73,13 @@ impl VerificationFlags {
|
|||
self
|
||||
}
|
||||
|
||||
pub fn verify_clocktimeverify(mut self, value: bool) -> Self {
|
||||
self.verify_clocktimeverify = value;
|
||||
pub fn verify_locktime(mut self, value: bool) -> Self {
|
||||
self.verify_locktime = value;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn verify_checksequence(mut self, value: bool) -> Self {
|
||||
self.verify_checksequence = value;
|
||||
self
|
||||
}
|
||||
|
||||
|
|
|
@ -456,7 +456,7 @@ pub fn eval_script(
|
|||
},
|
||||
Opcode::OP_NOP => break,
|
||||
Opcode::OP_CHECKLOCKTIMEVERIFY => {
|
||||
if flags.verify_clocktimeverify {
|
||||
if flags.verify_locktime {
|
||||
// Note that elsewhere numeric opcodes are limited to
|
||||
// operands in the range -2**31+1 to 2**31-1, however it is
|
||||
// legal for opcodes to produce results exceeding that
|
||||
|
@ -488,7 +488,7 @@ pub fn eval_script(
|
|||
}
|
||||
},
|
||||
Opcode::OP_CHECKSEQUENCEVERIFY => {
|
||||
if flags.verify_checksequenceverify {
|
||||
if flags.verify_checksequence {
|
||||
let sequence = try!(Num::from_slice(try!(stack.last()), flags.verify_minimaldata, 5));
|
||||
|
||||
if sequence.is_negative() {
|
||||
|
@ -1916,7 +1916,7 @@ mod tests {
|
|||
|
||||
let flags = VerificationFlags::default()
|
||||
.verify_p2sh(true)
|
||||
.verify_clocktimeverify(true);
|
||||
.verify_locktime(true);
|
||||
assert_eq!(verify_script(&input, &output, &flags, &checker), Err(Error::NumberOverflow));
|
||||
}
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ mod stream;
|
|||
pub use primitives::{hash, bytes, compact};
|
||||
|
||||
pub use compact_integer::CompactInteger;
|
||||
pub use list::DeserializableList;
|
||||
pub use list::List;
|
||||
pub use reader::{Reader, Deserializable, deserialize, deserialize_iterator, ReadIterator, Error};
|
||||
pub use stream::{Stream, Serializable, serialize, serialize_list, serialized_list_size};
|
||||
|
||||
|
|
|
@ -1,16 +1,27 @@
|
|||
use std::io;
|
||||
use {Deserializable, Error, Reader};
|
||||
use {Serializable, Deserializable, Error, Reader, Stream};
|
||||
|
||||
pub struct DeserializableList<T>(Vec<T>) where T: Deserializable;
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct List<T>(Vec<T>);
|
||||
|
||||
impl<T> List<T> where T: Serializable + Deserializable {
|
||||
pub fn from(vec: Vec<T>) -> Self {
|
||||
List(vec)
|
||||
}
|
||||
|
||||
impl<T> DeserializableList<T> where T: Deserializable{
|
||||
pub fn into(self) -> Vec<T> {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> Deserializable for DeserializableList<D> where D: Deserializable {
|
||||
fn deserialize<T>(reader: &mut Reader<T>) -> Result<Self, Error> where T: io::Read {
|
||||
reader.read_list().map(DeserializableList)
|
||||
impl<S> Serializable for List<S> where S: Serializable {
|
||||
fn serialize(&self, s: &mut Stream) {
|
||||
s.append_list(&self.0);
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> Deserializable for List<D> where D: Deserializable {
|
||||
fn deserialize<T>(reader: &mut Reader<T>) -> Result<Self, Error> where T: io::Read {
|
||||
reader.read_list().map(List)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -85,8 +85,9 @@ impl BlocksWriter {
|
|||
return Err(err);
|
||||
}
|
||||
} else {
|
||||
self.storage.insert(&block).map_err(Error::Database)?;
|
||||
self.storage.canonize(block.hash()).map_err(Error::Database)?;
|
||||
let hash = block.hash().clone();
|
||||
self.storage.insert(block).map_err(Error::Database)?;
|
||||
self.storage.canonize(&hash).map_err(Error::Database)?;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -123,10 +124,11 @@ impl VerificationSink for BlocksWriterSink {
|
|||
|
||||
impl BlockVerificationSink for BlocksWriterSink {
|
||||
fn on_block_verification_success(&self, block: chain::IndexedBlock) -> Option<Vec<VerificationTask>> {
|
||||
if let Err(err) = self.data.storage.insert(&block) {
|
||||
let hash = block.hash().clone();
|
||||
if let Err(err) = self.data.storage.insert(block) {
|
||||
*self.data.err.lock() = Some(Error::Database(err));
|
||||
}
|
||||
if let Err(err) = self.data.storage.canonize(block.hash()) {
|
||||
if let Err(err) = self.data.storage.canonize(&hash) {
|
||||
*self.data.err.lock() = Some(Error::Database(err));
|
||||
}
|
||||
|
||||
|
|
|
@ -333,8 +333,7 @@ impl Chain {
|
|||
}
|
||||
|
||||
/// Insert new best block to storage
|
||||
pub fn insert_best_block(&mut self, block: &IndexedBlock) -> Result<BlockInsertionResult, db::Error> {
|
||||
trace!(target: "sync", "insert_best_block {:?} best_block: {:?}", block.hash().reversed(), self.storage.best_block());
|
||||
pub fn insert_best_block(&mut self, block: IndexedBlock) -> Result<BlockInsertionResult, db::Error> {
|
||||
assert_eq!(Some(self.storage.best_block().hash), self.storage.block_hash(self.storage.best_block().number));
|
||||
let block_origin = self.storage.block_origin(&block.header)?;
|
||||
trace!(target: "sync", "insert_best_block {:?} origin: {:?}", block.hash().reversed(), block_origin);
|
||||
|
@ -345,7 +344,7 @@ impl Chain {
|
|||
},
|
||||
// case 1: block has been added to the main branch
|
||||
db::BlockOrigin::CanonChain { .. } => {
|
||||
self.storage.insert(block)?;
|
||||
self.storage.insert(block.clone())?;
|
||||
self.storage.canonize(block.hash())?;
|
||||
|
||||
// remember new best block hash
|
||||
|
@ -379,7 +378,7 @@ impl Chain {
|
|||
// case 2: block has been added to the side branch with reorganization to this branch
|
||||
db::BlockOrigin::SideChainBecomingCanonChain(origin) => {
|
||||
let fork = self.storage.fork(origin.clone())?;
|
||||
fork.store().insert(block)?;
|
||||
fork.store().insert(block.clone())?;
|
||||
fork.store().canonize(block.hash())?;
|
||||
self.storage.switch_to_fork(fork)?;
|
||||
|
||||
|
@ -445,11 +444,12 @@ impl Chain {
|
|||
},
|
||||
// case 3: block has been added to the side branch without reorganization to this branch
|
||||
db::BlockOrigin::SideChain(_origin) => {
|
||||
let block_hash = block.hash().clone();
|
||||
self.storage.insert(block)?;
|
||||
|
||||
// remove inserted block + handle possible reorganization in headers chain
|
||||
// TODO: mk, not sure if it's needed here at all
|
||||
self.headers_chain.block_inserted_to_storage(block.hash(), &self.best_storage_block.hash);
|
||||
self.headers_chain.block_inserted_to_storage(&block_hash, &self.best_storage_block.hash);
|
||||
|
||||
// no transactions were accepted
|
||||
// no transactions to reverify
|
||||
|
@ -791,7 +791,7 @@ mod tests {
|
|||
assert!(chain.information().scheduled == 3 && chain.information().requested == 1
|
||||
&& chain.information().verifying == 1 && chain.information().stored == 1);
|
||||
// insert new best block to the chain
|
||||
chain.insert_best_block(&test_data::block_h1().into()).expect("Db error");
|
||||
chain.insert_best_block(test_data::block_h1().into()).expect("Db error");
|
||||
assert!(chain.information().scheduled == 3 && chain.information().requested == 1
|
||||
&& chain.information().verifying == 1 && chain.information().stored == 2);
|
||||
assert_eq!(db.best_block().number, 1);
|
||||
|
@ -807,13 +807,13 @@ mod tests {
|
|||
let block1 = test_data::block_h1();
|
||||
let block1_hash = block1.hash();
|
||||
|
||||
chain.insert_best_block(&block1.into()).expect("Error inserting new block");
|
||||
chain.insert_best_block(block1.into()).expect("Error inserting new block");
|
||||
assert_eq!(chain.block_locator_hashes(), vec![block1_hash.clone(), genesis_hash.clone()]);
|
||||
|
||||
let block2 = test_data::block_h2();
|
||||
let block2_hash = block2.hash();
|
||||
|
||||
chain.insert_best_block(&block2.into()).expect("Error inserting new block");
|
||||
chain.insert_best_block(block2.into()).expect("Error inserting new block");
|
||||
assert_eq!(chain.block_locator_hashes(), vec![block2_hash.clone(), block1_hash.clone(), genesis_hash.clone()]);
|
||||
|
||||
let blocks0 = test_data::build_n_empty_blocks_from_genesis(11, 0);
|
||||
|
@ -930,7 +930,7 @@ mod tests {
|
|||
assert_eq!(chain.information().transactions.transactions_count, 1);
|
||||
|
||||
// when block is inserted to the database => all accepted transactions are removed from mempool && verifying queue
|
||||
chain.insert_best_block(&b1.into()).expect("block accepted");
|
||||
chain.insert_best_block(b1.into()).expect("block accepted");
|
||||
|
||||
assert_eq!(chain.information().transactions.transactions_count, 0);
|
||||
assert!(!chain.forget_verifying_transaction(&tx1_hash));
|
||||
|
@ -999,15 +999,15 @@ mod tests {
|
|||
chain.insert_verified_transaction(tx2.into());
|
||||
|
||||
// no reorg
|
||||
let result = chain.insert_best_block(&b1.into()).expect("no error");
|
||||
let result = chain.insert_best_block(b1.into()).expect("no error");
|
||||
assert_eq!(result.transactions_to_reverify.len(), 0);
|
||||
|
||||
// no reorg
|
||||
let result = chain.insert_best_block(&b2.into()).expect("no error");
|
||||
let result = chain.insert_best_block(b2.into()).expect("no error");
|
||||
assert_eq!(result.transactions_to_reverify.len(), 0);
|
||||
|
||||
// reorg
|
||||
let result = chain.insert_best_block(&b3.into()).expect("no error");
|
||||
let result = chain.insert_best_block(b3.into()).expect("no error");
|
||||
assert_eq!(result.transactions_to_reverify.len(), 2);
|
||||
assert!(result.transactions_to_reverify.iter().any(|ref tx| &tx.hash == &tx1_hash));
|
||||
assert!(result.transactions_to_reverify.iter().any(|ref tx| &tx.hash == &tx2_hash));
|
||||
|
@ -1048,18 +1048,18 @@ mod tests {
|
|||
chain.insert_verified_transaction(tx4.into());
|
||||
chain.insert_verified_transaction(tx5.into());
|
||||
|
||||
assert_eq!(chain.insert_best_block(&b0.clone().into()).expect("block accepted"), BlockInsertionResult::with_canonized_blocks(vec![b0.hash()]));
|
||||
assert_eq!(chain.insert_best_block(b0.clone().into()).expect("block accepted"), BlockInsertionResult::with_canonized_blocks(vec![b0.hash()]));
|
||||
assert_eq!(chain.information().transactions.transactions_count, 3);
|
||||
assert_eq!(chain.insert_best_block(&b1.clone().into()).expect("block accepted"), BlockInsertionResult::with_canonized_blocks(vec![b1.hash()]));
|
||||
assert_eq!(chain.insert_best_block(b1.clone().into()).expect("block accepted"), BlockInsertionResult::with_canonized_blocks(vec![b1.hash()]));
|
||||
assert_eq!(chain.information().transactions.transactions_count, 3);
|
||||
assert_eq!(chain.insert_best_block(&b2.clone().into()).expect("block accepted"), BlockInsertionResult::with_canonized_blocks(vec![b2.hash()]));
|
||||
assert_eq!(chain.insert_best_block(b2.clone().into()).expect("block accepted"), BlockInsertionResult::with_canonized_blocks(vec![b2.hash()]));
|
||||
assert_eq!(chain.information().transactions.transactions_count, 3);
|
||||
assert_eq!(chain.insert_best_block(&b3.clone().into()).expect("block accepted"), BlockInsertionResult::default());
|
||||
assert_eq!(chain.insert_best_block(b3.clone().into()).expect("block accepted"), BlockInsertionResult::default());
|
||||
assert_eq!(chain.information().transactions.transactions_count, 3);
|
||||
assert_eq!(chain.insert_best_block(&b4.clone().into()).expect("block accepted"), BlockInsertionResult::default());
|
||||
assert_eq!(chain.insert_best_block(b4.clone().into()).expect("block accepted"), BlockInsertionResult::default());
|
||||
assert_eq!(chain.information().transactions.transactions_count, 3);
|
||||
// order matters
|
||||
let insert_result = chain.insert_best_block(&b5.clone().into()).expect("block accepted");
|
||||
let insert_result = chain.insert_best_block(b5.clone().into()).expect("block accepted");
|
||||
let transactions_to_reverify_hashes: Vec<_> = insert_result
|
||||
.transactions_to_reverify
|
||||
.into_iter()
|
||||
|
@ -1090,7 +1090,7 @@ mod tests {
|
|||
chain.insert_verified_transaction(tx2.clone().into());
|
||||
chain.insert_verified_transaction(tx3.clone().into());
|
||||
// insert verified block with tx1
|
||||
chain.insert_best_block(&b0.into()).expect("no error");
|
||||
chain.insert_best_block(b0.into()).expect("no error");
|
||||
// => tx2 is removed from memory pool, but tx3 remains
|
||||
assert_eq!(chain.information().transactions.transactions_count, 1);
|
||||
}
|
||||
|
|
|
@ -1013,6 +1013,7 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
|
|||
// remove flags
|
||||
let needs_relay = !self.do_not_relay.remove(block.hash());
|
||||
|
||||
let block_hash = block.hash().clone();
|
||||
// insert block to the storage
|
||||
match {
|
||||
// remove block from verification queue
|
||||
|
@ -1020,7 +1021,7 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
|
|||
// or it is removed earlier, when block was removed from the verifying queue
|
||||
if self.chain.forget_block_with_state_leave_header(block.hash(), BlockState::Verifying) != HashPosition::Missing {
|
||||
// block was in verification queue => insert to storage
|
||||
self.chain.insert_best_block(&block)
|
||||
self.chain.insert_best_block(block)
|
||||
} else {
|
||||
Ok(BlockInsertionResult::default())
|
||||
}
|
||||
|
@ -1037,7 +1038,7 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
|
|||
}
|
||||
|
||||
// awake threads, waiting for this block insertion
|
||||
self.awake_waiting_threads(block.hash());
|
||||
self.awake_waiting_threads(&block_hash);
|
||||
|
||||
// continue with synchronization
|
||||
self.execute_synchronization_tasks(None, None);
|
||||
|
@ -1065,7 +1066,7 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
|
|||
},
|
||||
Err(e) => {
|
||||
// process as irrecoverable failure
|
||||
panic!("Block {} insertion failed with error {:?}", block.hash().to_reversed_str(), e);
|
||||
panic!("Block {} insertion failed with error {:?}", block_hash.to_reversed_str(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1715,7 +1716,7 @@ pub mod tests {
|
|||
fn sync_after_db_insert_nonfatal_fail() {
|
||||
let block = test_data::block_h2();
|
||||
let storage = BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]);
|
||||
assert!(storage.insert(&test_data::block_h2().into()).is_err());
|
||||
assert!(storage.insert(test_data::block_h2().into()).is_err());
|
||||
let best_genesis = storage.best_block();
|
||||
|
||||
let (_, core, sync) = create_sync(Some(Arc::new(storage)), None);
|
||||
|
|
|
@ -563,7 +563,7 @@ pub mod tests {
|
|||
#[test]
|
||||
fn server_getblocks_responds_inventory_when_have_unknown_blocks() {
|
||||
let (storage, _, executor, _, server) = create_synchronization_server();
|
||||
storage.insert(&test_data::block_h1().into()).expect("Db write error");
|
||||
storage.insert(test_data::block_h1().into()).expect("Db write error");
|
||||
storage.canonize(&test_data::block_h1().hash()).unwrap();
|
||||
// when asking for blocks hashes
|
||||
server.execute(ServerTask::GetBlocks(0, types::GetBlocks {
|
||||
|
@ -599,7 +599,7 @@ pub mod tests {
|
|||
#[test]
|
||||
fn server_getheaders_responds_headers_when_have_unknown_blocks() {
|
||||
let (storage, _, executor, _, server) = create_synchronization_server();
|
||||
storage.insert(&test_data::block_h1().into()).expect("Db write error");
|
||||
storage.insert(test_data::block_h1().into()).expect("Db write error");
|
||||
storage.canonize(&test_data::block_h1().hash()).unwrap();
|
||||
// when asking for blocks hashes
|
||||
let dummy_id = 0;
|
||||
|
@ -743,7 +743,7 @@ pub mod tests {
|
|||
fn server_responds_with_nonempty_inventory_when_getdata_stop_hash_filled() {
|
||||
let (storage, _, executor, _, server) = create_synchronization_server();
|
||||
{
|
||||
storage.insert(&test_data::block_h1().into()).expect("no error");
|
||||
storage.insert(test_data::block_h1().into()).expect("no error");
|
||||
storage.canonize(&test_data::block_h1().hash()).unwrap();
|
||||
}
|
||||
// when asking with stop_hash
|
||||
|
@ -765,7 +765,7 @@ pub mod tests {
|
|||
fn server_responds_with_nonempty_headers_when_getdata_stop_hash_filled() {
|
||||
let (storage, _, executor, _, server) = create_synchronization_server();
|
||||
{
|
||||
storage.insert(&test_data::block_h1().into()).expect("no error");
|
||||
storage.insert(test_data::block_h1().into()).expect("no error");
|
||||
storage.canonize(&test_data::block_h1().hash()).unwrap();
|
||||
}
|
||||
// when asking with stop_hash
|
||||
|
@ -806,8 +806,8 @@ pub mod tests {
|
|||
let b2_hash = b2.hash();
|
||||
|
||||
// This peer will provide blocks
|
||||
storage.insert(&b1.clone().into()).expect("no error");
|
||||
storage.insert(&b2.clone().into()).expect("no error");
|
||||
storage.insert(b1.clone().into()).expect("no error");
|
||||
storage.insert(b2.clone().into()).expect("no error");
|
||||
storage.canonize(&b1.hash()).unwrap();
|
||||
storage.canonize(&b2.hash()).unwrap();
|
||||
|
||||
|
@ -902,7 +902,7 @@ pub mod tests {
|
|||
let b1_hash = b1.hash();
|
||||
|
||||
// This peer will provide blocks
|
||||
storage.insert(&b1.clone().into()).expect("no error");
|
||||
storage.insert(b1.clone().into()).expect("no error");
|
||||
storage.canonize(&b1.hash()).unwrap();
|
||||
|
||||
// This peer will receive compact block
|
||||
|
|
|
@ -7,6 +7,7 @@ authors = ["Nikolay Volf <nikvolf@gmail.com>"]
|
|||
time = "0.1"
|
||||
log = "0.3"
|
||||
rayon = "0.6"
|
||||
parking_lot = "0.4"
|
||||
primitives = { path = "../primitives" }
|
||||
chain = { path = "../chain" }
|
||||
serialization = { path = "../serialization" }
|
||||
|
|
|
@ -1,12 +1,14 @@
|
|||
use network::{Magic, ConsensusParams};
|
||||
use db::TransactionOutputProvider;
|
||||
use db::{TransactionOutputProvider, BlockHeaderProvider};
|
||||
use script;
|
||||
use sigops::transaction_sigops;
|
||||
use work::block_reward_satoshi;
|
||||
use duplex_store::DuplexTransactionOutputProvider;
|
||||
use deployments::Deployments;
|
||||
use canon::CanonBlock;
|
||||
use constants::MAX_BLOCK_SIGOPS;
|
||||
use error::{Error, TransactionError};
|
||||
use timestamp::median_timestamp;
|
||||
|
||||
/// Flexible verification of ordered block
|
||||
pub struct BlockAcceptor<'a> {
|
||||
|
@ -17,10 +19,17 @@ pub struct BlockAcceptor<'a> {
|
|||
}
|
||||
|
||||
impl<'a> BlockAcceptor<'a> {
|
||||
pub fn new(store: &'a TransactionOutputProvider, network: Magic, block: CanonBlock<'a>, height: u32) -> Self {
|
||||
pub fn new(
|
||||
store: &'a TransactionOutputProvider,
|
||||
network: Magic,
|
||||
block: CanonBlock<'a>,
|
||||
height: u32,
|
||||
deployments: &'a Deployments,
|
||||
headers: &'a BlockHeaderProvider,
|
||||
) -> Self {
|
||||
let params = network.consensus_params();
|
||||
BlockAcceptor {
|
||||
finality: BlockFinality::new(block, height),
|
||||
finality: BlockFinality::new(block, height, deployments, headers, ¶ms),
|
||||
coinbase_script: BlockCoinbaseScript::new(block, ¶ms, height),
|
||||
coinbase_claim: BlockCoinbaseClaim::new(block, store, height),
|
||||
sigops: BlockSigops::new(block, store, params, MAX_BLOCK_SIGOPS),
|
||||
|
@ -39,18 +48,30 @@ impl<'a> BlockAcceptor<'a> {
|
|||
pub struct BlockFinality<'a> {
|
||||
block: CanonBlock<'a>,
|
||||
height: u32,
|
||||
csv_active: bool,
|
||||
headers: &'a BlockHeaderProvider,
|
||||
}
|
||||
|
||||
impl<'a> BlockFinality<'a> {
|
||||
fn new(block: CanonBlock<'a>, height: u32) -> Self {
|
||||
fn new(block: CanonBlock<'a>, height: u32, deployments: &'a Deployments, headers: &'a BlockHeaderProvider, params: &ConsensusParams) -> Self {
|
||||
let csv_active = deployments.csv(height, headers, params);
|
||||
|
||||
BlockFinality {
|
||||
block: block,
|
||||
height: height,
|
||||
csv_active: csv_active,
|
||||
headers: headers,
|
||||
}
|
||||
}
|
||||
|
||||
fn check(&self) -> Result<(), Error> {
|
||||
if self.block.is_final(self.height) {
|
||||
let time_cutoff = if self.csv_active {
|
||||
median_timestamp(&self.block.header.raw, self.headers)
|
||||
} else {
|
||||
self.block.header.raw.time
|
||||
};
|
||||
|
||||
if self.block.transactions.iter().all(|tx| tx.raw.is_final_in_block(self.height, time_cutoff)) {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::NonFinalBlock)
|
||||
|
|
|
@ -6,6 +6,7 @@ use canon::CanonBlock;
|
|||
use accept_block::BlockAcceptor;
|
||||
use accept_header::HeaderAcceptor;
|
||||
use accept_transaction::TransactionAcceptor;
|
||||
use deployments::Deployments;
|
||||
use duplex_store::DuplexTransactionOutputProvider;
|
||||
|
||||
pub struct ChainAcceptor<'a> {
|
||||
|
@ -15,12 +16,13 @@ pub struct ChainAcceptor<'a> {
|
|||
}
|
||||
|
||||
impl<'a> ChainAcceptor<'a> {
|
||||
pub fn new(store: &'a Store, network: Magic, block: CanonBlock<'a>, height: u32) -> Self {
|
||||
pub fn new(store: &'a Store, network: Magic, block: CanonBlock<'a>, height: u32, deployments: &'a Deployments) -> Self {
|
||||
trace!(target: "verification", "Block verification {}", block.hash().to_reversed_str());
|
||||
let output_store = DuplexTransactionOutputProvider::new(store.as_transaction_output_provider(), block.raw());
|
||||
let headers = store.as_block_header_provider();
|
||||
ChainAcceptor {
|
||||
block: BlockAcceptor::new(store.as_transaction_output_provider(), network, block, height),
|
||||
header: HeaderAcceptor::new(store.as_block_header_provider(), network, block.header(), height),
|
||||
block: BlockAcceptor::new(store.as_transaction_output_provider(), network, block, height, deployments, headers),
|
||||
header: HeaderAcceptor::new(headers, network, block.header(), height, deployments),
|
||||
transactions: block.transactions()
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
|
@ -32,7 +34,9 @@ impl<'a> ChainAcceptor<'a> {
|
|||
block.hash(),
|
||||
height,
|
||||
block.header.raw.time,
|
||||
tx_index
|
||||
tx_index,
|
||||
deployments,
|
||||
headers,
|
||||
))
|
||||
.collect(),
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ use db::BlockHeaderProvider;
|
|||
use canon::CanonHeader;
|
||||
use error::Error;
|
||||
use work::work_required;
|
||||
use deployments::Deployments;
|
||||
use timestamp::median_timestamp;
|
||||
|
||||
pub struct HeaderAcceptor<'a> {
|
||||
|
@ -12,13 +13,18 @@ pub struct HeaderAcceptor<'a> {
|
|||
}
|
||||
|
||||
impl<'a> HeaderAcceptor<'a> {
|
||||
pub fn new(store: &'a BlockHeaderProvider, network: Magic, header: CanonHeader<'a>, height: u32) -> Self {
|
||||
pub fn new(
|
||||
store: &'a BlockHeaderProvider,
|
||||
network: Magic,
|
||||
header: CanonHeader<'a>,
|
||||
height: u32,
|
||||
deployments: &'a Deployments,
|
||||
) -> Self {
|
||||
let params = network.consensus_params();
|
||||
HeaderAcceptor {
|
||||
// TODO: check last 1000 blocks instead of hardcoding the value
|
||||
version: HeaderVersion::new(header, height, params),
|
||||
work: HeaderWork::new(header, store, height, network),
|
||||
median_timestamp: HeaderMedianTimestamp::new(header, store, network),
|
||||
median_timestamp: HeaderMedianTimestamp::new(header, store, height, deployments, ¶ms),
|
||||
version: HeaderVersion::new(header, height, params),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -90,21 +96,21 @@ impl<'a> HeaderWork<'a> {
|
|||
pub struct HeaderMedianTimestamp<'a> {
|
||||
header: CanonHeader<'a>,
|
||||
store: &'a BlockHeaderProvider,
|
||||
network: Magic,
|
||||
active: bool,
|
||||
}
|
||||
|
||||
impl<'a> HeaderMedianTimestamp<'a> {
|
||||
fn new(header: CanonHeader<'a>, store: &'a BlockHeaderProvider, network: Magic) -> Self {
|
||||
fn new(header: CanonHeader<'a>, store: &'a BlockHeaderProvider, height: u32, deployments: &'a Deployments, params: &ConsensusParams) -> Self {
|
||||
let active = deployments.csv(height, store, params);
|
||||
HeaderMedianTimestamp {
|
||||
header: header,
|
||||
store: store,
|
||||
network: network,
|
||||
active: active,
|
||||
}
|
||||
}
|
||||
|
||||
fn check(&self) -> Result<(), Error> {
|
||||
let median = median_timestamp(&self.header.raw, self.store, self.network);
|
||||
if self.header.raw.time <= median {
|
||||
if self.active && self.header.raw.time <= median_timestamp(&self.header.raw, self.store) {
|
||||
Err(Error::Timestamp)
|
||||
} else {
|
||||
Ok(())
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
use primitives::hash::H256;
|
||||
use db::{TransactionMetaProvider, TransactionOutputProvider};
|
||||
use db::{TransactionMetaProvider, TransactionOutputProvider, BlockHeaderProvider};
|
||||
use network::{Magic, ConsensusParams};
|
||||
use script::{Script, verify_script, VerificationFlags, TransactionSignatureChecker, TransactionInputSigner};
|
||||
use duplex_store::DuplexTransactionOutputProvider;
|
||||
use deployments::Deployments;
|
||||
use sigops::transaction_sigops;
|
||||
use canon::CanonTransaction;
|
||||
use constants::{COINBASE_MATURITY, MAX_BLOCK_SIGOPS};
|
||||
|
@ -30,6 +31,8 @@ impl<'a> TransactionAcceptor<'a> {
|
|||
height: u32,
|
||||
time: u32,
|
||||
transaction_index: usize,
|
||||
deployments: &'a Deployments,
|
||||
headers: &'a BlockHeaderProvider,
|
||||
) -> Self {
|
||||
trace!(target: "verification", "Tx verification {}", transaction.hash.to_reversed_str());
|
||||
let params = network.consensus_params();
|
||||
|
@ -39,7 +42,7 @@ impl<'a> TransactionAcceptor<'a> {
|
|||
maturity: TransactionMaturity::new(transaction, meta_store, height),
|
||||
overspent: TransactionOverspent::new(transaction, output_store),
|
||||
double_spent: TransactionDoubleSpend::new(transaction, output_store),
|
||||
eval: TransactionEval::new(transaction, output_store, params, height, time),
|
||||
eval: TransactionEval::new(transaction, output_store, ¶ms, height, time, deployments, headers),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -73,6 +76,8 @@ impl<'a> MemoryPoolTransactionAcceptor<'a> {
|
|||
transaction: CanonTransaction<'a>,
|
||||
height: u32,
|
||||
time: u32,
|
||||
deployments: &'a Deployments,
|
||||
headers: &'a BlockHeaderProvider,
|
||||
) -> Self {
|
||||
trace!(target: "verification", "Mempool-Tx verification {}", transaction.hash.to_reversed_str());
|
||||
let params = network.consensus_params();
|
||||
|
@ -83,7 +88,7 @@ impl<'a> MemoryPoolTransactionAcceptor<'a> {
|
|||
overspent: TransactionOverspent::new(transaction, output_store),
|
||||
sigops: TransactionSigops::new(transaction, output_store, params.clone(), MAX_BLOCK_SIGOPS, time),
|
||||
double_spent: TransactionDoubleSpend::new(transaction, output_store),
|
||||
eval: TransactionEval::new(transaction, output_store, params, height, time),
|
||||
eval: TransactionEval::new(transaction, output_store, ¶ms, height, time, deployments, headers),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -269,7 +274,8 @@ pub struct TransactionEval<'a> {
|
|||
transaction: CanonTransaction<'a>,
|
||||
store: DuplexTransactionOutputProvider<'a>,
|
||||
verify_p2sh: bool,
|
||||
verify_clocktime: bool,
|
||||
verify_locktime: bool,
|
||||
verify_checksequence: bool,
|
||||
verify_dersig: bool,
|
||||
}
|
||||
|
||||
|
@ -277,19 +283,24 @@ impl<'a> TransactionEval<'a> {
|
|||
fn new(
|
||||
transaction: CanonTransaction<'a>,
|
||||
store: DuplexTransactionOutputProvider<'a>,
|
||||
params: ConsensusParams,
|
||||
params: &ConsensusParams,
|
||||
height: u32,
|
||||
time: u32,
|
||||
deployments: &'a Deployments,
|
||||
headers: &'a BlockHeaderProvider,
|
||||
) -> Self {
|
||||
let verify_p2sh = time >= params.bip16_time;
|
||||
let verify_clocktime = height >= params.bip65_height;
|
||||
let verify_locktime = height >= params.bip65_height;
|
||||
let verify_dersig = height >= params.bip66_height;
|
||||
|
||||
let verify_checksequence = deployments.csv(height, headers, params);
|
||||
|
||||
TransactionEval {
|
||||
transaction: transaction,
|
||||
store: store,
|
||||
verify_p2sh: verify_p2sh,
|
||||
verify_clocktime: verify_clocktime,
|
||||
verify_locktime: verify_locktime,
|
||||
verify_checksequence: verify_checksequence,
|
||||
verify_dersig: verify_dersig,
|
||||
}
|
||||
}
|
||||
|
@ -317,7 +328,8 @@ impl<'a> TransactionEval<'a> {
|
|||
|
||||
let flags = VerificationFlags::default()
|
||||
.verify_p2sh(self.verify_p2sh)
|
||||
.verify_clocktimeverify(self.verify_clocktime)
|
||||
.verify_locktime(self.verify_locktime)
|
||||
.verify_checksequence(self.verify_checksequence)
|
||||
.verify_dersig(self.verify_dersig);
|
||||
|
||||
try!(verify_script(&input, &output, &flags, &checker).map_err(|_| TransactionError::Signature(index)));
|
||||
|
|
|
@ -12,11 +12,13 @@ use verify_header::HeaderVerifier;
|
|||
use verify_transaction::MemoryPoolTransactionVerifier;
|
||||
use accept_chain::ChainAcceptor;
|
||||
use accept_transaction::MemoryPoolTransactionAcceptor;
|
||||
use deployments::Deployments;
|
||||
use Verify;
|
||||
|
||||
pub struct BackwardsCompatibleChainVerifier {
|
||||
store: SharedStore,
|
||||
network: Magic,
|
||||
deployments: Deployments,
|
||||
}
|
||||
|
||||
impl BackwardsCompatibleChainVerifier {
|
||||
|
@ -24,6 +26,7 @@ impl BackwardsCompatibleChainVerifier {
|
|||
BackwardsCompatibleChainVerifier {
|
||||
store: store,
|
||||
network: network,
|
||||
deployments: Deployments::new(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -43,21 +46,21 @@ impl BackwardsCompatibleChainVerifier {
|
|||
},
|
||||
BlockOrigin::CanonChain { block_number } => {
|
||||
let canon_block = CanonBlock::new(block);
|
||||
let chain_acceptor = ChainAcceptor::new(self.store.as_store(), self.network, canon_block, block_number);
|
||||
let chain_acceptor = ChainAcceptor::new(self.store.as_store(), self.network, canon_block, block_number, &self.deployments);
|
||||
chain_acceptor.check()?;
|
||||
},
|
||||
BlockOrigin::SideChain(origin) => {
|
||||
let block_number = origin.block_number;
|
||||
let fork = self.store.fork(origin)?;
|
||||
let canon_block = CanonBlock::new(block);
|
||||
let chain_acceptor = ChainAcceptor::new(fork.store(), self.network, canon_block, block_number);
|
||||
let chain_acceptor = ChainAcceptor::new(fork.store(), self.network, canon_block, block_number, &self.deployments);
|
||||
chain_acceptor.check()?;
|
||||
},
|
||||
BlockOrigin::SideChainBecomingCanonChain(origin) => {
|
||||
let block_number = origin.block_number;
|
||||
let fork = self.store.fork(origin)?;
|
||||
let canon_block = CanonBlock::new(block);
|
||||
let chain_acceptor = ChainAcceptor::new(fork.store(), self.network, canon_block, block_number);
|
||||
let chain_acceptor = ChainAcceptor::new(fork.store(), self.network, canon_block, block_number, &self.deployments);
|
||||
chain_acceptor.check()?;
|
||||
},
|
||||
}
|
||||
|
@ -102,7 +105,9 @@ impl BackwardsCompatibleChainVerifier {
|
|||
self.network,
|
||||
canon_tx,
|
||||
height,
|
||||
time
|
||||
time,
|
||||
&self.deployments,
|
||||
self.store.as_block_header_provider()
|
||||
);
|
||||
tx_acceptor.check()
|
||||
}
|
||||
|
@ -322,13 +327,14 @@ mod tests {
|
|||
|
||||
// waiting 100 blocks for genesis coinbase to become valid
|
||||
for _ in 0..100 {
|
||||
let block = test_data::block_builder()
|
||||
let block: IndexedBlock = test_data::block_builder()
|
||||
.transaction().coinbase().build()
|
||||
.merkled_header().parent(genesis.hash()).build()
|
||||
.build()
|
||||
.into();
|
||||
storage.insert(&block).expect("All dummy blocks should be inserted");
|
||||
storage.canonize(block.hash()).unwrap();
|
||||
let hash = block.hash().clone();
|
||||
storage.insert(block).expect("All dummy blocks should be inserted");
|
||||
storage.canonize(&hash).unwrap();
|
||||
}
|
||||
|
||||
let best_hash = storage.best_block().hash;
|
||||
|
|
|
@ -0,0 +1,222 @@
|
|||
use std::collections::HashMap;
|
||||
use std::collections::hash_map::Entry;
|
||||
use parking_lot::Mutex;
|
||||
use network::{ConsensusParams, Deployment};
|
||||
use hash::H256;
|
||||
use db::{BlockHeaderProvider, BlockRef, BlockAncestors, BlockIterator};
|
||||
use timestamp::median_timestamp;
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
enum ThresholdState {
|
||||
Defined,
|
||||
Started,
|
||||
LockedIn,
|
||||
Active,
|
||||
Failed,
|
||||
}
|
||||
|
||||
impl Default for ThresholdState {
|
||||
fn default() -> Self {
|
||||
ThresholdState::Defined
|
||||
}
|
||||
}
|
||||
|
||||
impl ThresholdState {
|
||||
fn is_final(&self) -> bool {
|
||||
match *self {
|
||||
ThresholdState::Active | ThresholdState::Failed => true,
|
||||
ThresholdState::Defined | ThresholdState::Started | ThresholdState::LockedIn => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn is_active(&self) -> bool {
|
||||
match *self {
|
||||
ThresholdState::Active => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Threshold state at given point of time
|
||||
#[derive(Debug, Clone, Default)]
|
||||
struct DeploymentState {
|
||||
/// Block number
|
||||
block_number: u32,
|
||||
/// Block hash
|
||||
block_hash: H256,
|
||||
/// Threshold state for given block
|
||||
state: ThresholdState,
|
||||
}
|
||||
|
||||
/// Last known deployment states
|
||||
type DeploymentStateCache = HashMap<&'static str, DeploymentState>;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Deployments {
|
||||
cache: Mutex<DeploymentStateCache>,
|
||||
}
|
||||
|
||||
impl Deployments {
|
||||
pub fn new() -> Self {
|
||||
Deployments::default()
|
||||
}
|
||||
|
||||
/// Returns true if csv deployment is active
|
||||
pub fn csv(&self, number: u32, headers: &BlockHeaderProvider, consensus: &ConsensusParams) -> bool {
|
||||
match consensus.csv_deployment {
|
||||
Some(csv) => {
|
||||
let mut cache = self.cache.lock();
|
||||
threshold_state(&mut cache, csv, number, headers, consensus).is_active()
|
||||
},
|
||||
None => false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculates threshold state of given deployment
|
||||
fn threshold_state(cache: &mut DeploymentStateCache, deployment: Deployment, number: u32, headers: &BlockHeaderProvider, consensus: &ConsensusParams) -> ThresholdState {
|
||||
if let Some(activation) = deployment.activation {
|
||||
if activation <= number {
|
||||
return ThresholdState::Active;
|
||||
} else {
|
||||
return ThresholdState::Defined;
|
||||
}
|
||||
}
|
||||
|
||||
// get number of the first block in the period
|
||||
let number = first_of_the_period(number, consensus.miner_confirmation_window);
|
||||
|
||||
let hash = match headers.block_header(BlockRef::Number(number)) {
|
||||
Some(header) => header.hash(),
|
||||
None => return ThresholdState::Defined,
|
||||
};
|
||||
|
||||
match cache.entry(deployment.name) {
|
||||
// by checking hash, we make sure we are on the same branch
|
||||
Entry::Occupied(ref entry) if entry.get().block_number == number && entry.get().block_hash == hash => {
|
||||
entry.get().state
|
||||
},
|
||||
// otherwise we need to recalculate threshold state
|
||||
Entry::Occupied(mut entry) => {
|
||||
let deployment_state = entry.get().clone();
|
||||
if deployment_state.state.is_final() {
|
||||
return deployment_state.state
|
||||
}
|
||||
let from_block = deployment_state.block_number + consensus.miner_confirmation_window;
|
||||
let threshold_state = deployment_state.state;
|
||||
let deployment_iter = ThresholdIterator::new(deployment, headers, from_block, consensus, threshold_state);
|
||||
let state = deployment_iter.last().expect("iter must have at least one item");
|
||||
let result = state.state;
|
||||
entry.insert(state);
|
||||
result
|
||||
},
|
||||
Entry::Vacant(entry) => {
|
||||
let deployment_iter = ThresholdIterator::new(deployment, headers, 0, consensus, ThresholdState::Defined);
|
||||
let state = deployment_iter.last().unwrap_or_default();
|
||||
let result = state.state;
|
||||
entry.insert(state);
|
||||
result
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
fn first_of_the_period(block: u32, miner_confirmation_window: u32) -> u32 {
|
||||
if block < miner_confirmation_window - 1 {
|
||||
0
|
||||
} else {
|
||||
block - ((block + 1) % miner_confirmation_window)
|
||||
}
|
||||
}
|
||||
|
||||
fn count_deployment_matches(block_number: u32, blocks: &BlockHeaderProvider, deployment: Deployment, window: u32) -> usize {
|
||||
BlockAncestors::new(BlockRef::Number(block_number), blocks)
|
||||
.take(window as usize)
|
||||
.filter(|header| deployment.matches(header.version))
|
||||
.count()
|
||||
}
|
||||
|
||||
struct ThresholdIterator<'a> {
|
||||
deployment: Deployment,
|
||||
block_iterator: BlockIterator<'a>,
|
||||
headers: &'a BlockHeaderProvider,
|
||||
consensus: &'a ConsensusParams,
|
||||
last_state: ThresholdState,
|
||||
}
|
||||
|
||||
impl<'a> ThresholdIterator<'a> {
|
||||
fn new(deployment: Deployment, headers: &'a BlockHeaderProvider, to_check: u32, consensus: &'a ConsensusParams, state: ThresholdState) -> Self {
|
||||
ThresholdIterator {
|
||||
deployment: deployment,
|
||||
block_iterator: BlockIterator::new(to_check, consensus.miner_confirmation_window, headers),
|
||||
headers: headers,
|
||||
consensus: consensus,
|
||||
last_state: state,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Iterator for ThresholdIterator<'a> {
|
||||
type Item = DeploymentState;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let (block_number, header) = match self.block_iterator.next() {
|
||||
Some(header) => header,
|
||||
None => return None,
|
||||
};
|
||||
|
||||
let median = median_timestamp(&header, self.headers);
|
||||
|
||||
match self.last_state {
|
||||
ThresholdState::Defined => {
|
||||
if median >= self.deployment.timeout {
|
||||
self.last_state = ThresholdState::Failed;
|
||||
} else if median >= self.deployment.start_time {
|
||||
self.last_state = ThresholdState::Started;
|
||||
}
|
||||
},
|
||||
ThresholdState::Started => {
|
||||
if median >= self.deployment.timeout {
|
||||
self.last_state = ThresholdState::Failed;
|
||||
} else {
|
||||
let count = count_deployment_matches(block_number, self.headers, self.deployment, self.consensus.miner_confirmation_window);
|
||||
if count >= self.consensus.rule_change_activation_threshold as usize {
|
||||
self.last_state = ThresholdState::LockedIn;
|
||||
}
|
||||
}
|
||||
},
|
||||
ThresholdState::LockedIn => {
|
||||
self.last_state = ThresholdState::Active;
|
||||
},
|
||||
ThresholdState::Failed | ThresholdState::Active => {
|
||||
return None
|
||||
}
|
||||
}
|
||||
|
||||
let result = DeploymentState {
|
||||
block_number: block_number,
|
||||
block_hash: header.hash(),
|
||||
state: self.last_state,
|
||||
};
|
||||
|
||||
Some(result)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::first_of_the_period;
|
||||
|
||||
#[test]
|
||||
fn test_first_of_the_period() {
|
||||
let window = 2016;
|
||||
assert_eq!(0, first_of_the_period(0, window));
|
||||
assert_eq!(0, first_of_the_period(1, window));
|
||||
assert_eq!(0, first_of_the_period(2014, window));
|
||||
assert_eq!(2015, first_of_the_period(2015, window));
|
||||
assert_eq!(2015, first_of_the_period(2016, window));
|
||||
assert_eq!(8063, first_of_the_period(8063, window));
|
||||
assert_eq!(8063, first_of_the_period(10000, window));
|
||||
assert_eq!(8063, first_of_the_period(10001, window));
|
||||
}
|
||||
}
|
|
@ -54,6 +54,7 @@
|
|||
extern crate time;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate parking_lot;
|
||||
extern crate rayon;
|
||||
|
||||
extern crate db;
|
||||
|
@ -65,6 +66,7 @@ extern crate script;
|
|||
|
||||
pub mod constants;
|
||||
mod canon;
|
||||
mod deployments;
|
||||
mod duplex_store;
|
||||
mod error;
|
||||
mod sigops;
|
||||
|
|
|
@ -1,29 +1,15 @@
|
|||
use std::collections::BTreeSet;
|
||||
use chain::BlockHeader;
|
||||
use db::BlockHeaderProvider;
|
||||
use network::Magic;
|
||||
use db::{BlockHeaderProvider, BlockAncestors};
|
||||
|
||||
/// Returns median timestamp, of given header ancestors.
|
||||
/// The header should be later expected to have higher timestamp
|
||||
/// than this median timestamp
|
||||
pub fn median_timestamp(header: &BlockHeader, store: &BlockHeaderProvider, network: Magic) -> u32 {
|
||||
// TODO: timestamp validation on testnet is broken
|
||||
if network == Magic::Testnet {
|
||||
return 0;
|
||||
}
|
||||
|
||||
let ancestors = 11;
|
||||
let mut timestamps = BTreeSet::new();
|
||||
let mut block_ref = header.previous_header_hash.clone().into();
|
||||
|
||||
for _ in 0..ancestors {
|
||||
let previous_header = match store.block_header(block_ref) {
|
||||
Some(h) => h,
|
||||
None => break,
|
||||
};
|
||||
timestamps.insert(previous_header.time);
|
||||
block_ref = previous_header.previous_header_hash.into();
|
||||
}
|
||||
pub fn median_timestamp(header: &BlockHeader, store: &BlockHeaderProvider) -> u32 {
|
||||
let timestamps: BTreeSet<_> = BlockAncestors::new(header.previous_header_hash.clone().into(), store)
|
||||
.take(11)
|
||||
.map(|header| header.time)
|
||||
.collect();
|
||||
|
||||
if timestamps.is_empty() {
|
||||
return 0;
|
||||
|
|
Loading…
Reference in New Issue