sync refactoring

This commit is contained in:
NikVolf 2016-11-28 16:07:33 +03:00
parent 32e21d6e37
commit fb881adb66
8 changed files with 85 additions and 84 deletions

View File

@ -27,7 +27,7 @@ impl BlocksWriter {
match self.verifier.verify(&indexed_block) {
Err(err) => Err(Error::Verification(err)),
Ok(_chain) => { try!(self.storage.insert_block(&block).map_err(Error::Database)); Ok(()) }
Ok(_chain) => { try!(self.storage.insert_indexed_block(&indexed_block).map_err(Error::Database)); Ok(()) }
}
}
}

View File

@ -145,7 +145,7 @@ impl<T, U, V> LocalNode<T, U, V> where T: SynchronizationTaskExecutor + PeersCon
trace!(target: "sync", "Got `block` message from peer#{}. Block hash: {}", peer_index, message.block.hash().to_reversed_str());
// try to process new block
self.client.lock().on_peer_block(peer_index, message.block);
self.client.lock().on_peer_block(peer_index, message.block.into());
}
pub fn on_peer_headers(&self, peer_index: usize, message: types::Headers) {

View File

@ -44,7 +44,7 @@ impl OrphanBlocksPool {
/// Insert orphaned block, for which we have already requested its parent block
pub fn insert_orphaned_block(&mut self, hash: H256, block: IndexedBlock) {
self.orphaned_blocks
.entry(block.block_header.previous_header_hash.clone())
.entry(block.header().previous_header_hash.clone())
.or_insert_with(HashMap::new)
.insert(hash, block);
}
@ -72,7 +72,7 @@ impl OrphanBlocksPool {
let mut queue: VecDeque<H256> = VecDeque::new();
queue.push_back(hash.clone());
let mut removed: Vec<(H256, Block)> = Vec::new();
let mut removed: Vec<(H256, IndexedBlock)> = Vec::new();
while let Some(parent_hash) = queue.pop_front() {
if let Entry::Occupied(entry) = self.orphaned_blocks.entry(parent_hash) {
let (_, orphaned) = entry.remove_entry();
@ -89,7 +89,7 @@ impl OrphanBlocksPool {
/// Remove blocks with given hashes + all dependent blocks
pub fn remove_blocks(&mut self, hashes: &HashSet<H256>) -> Vec<(H256, IndexedBlock)> {
// TODO: excess clone
let mut removed: Vec<(H256, Block)> = Vec::new();
let mut removed: Vec<(H256, IndexedBlock)> = Vec::new();
let parent_orphan_keys: Vec<_> = self.orphaned_blocks.keys().cloned().collect();
for parent_orphan_key in parent_orphan_keys {
if let Entry::Occupied(mut orphan_entry) = self.orphaned_blocks.entry(parent_orphan_key) {
@ -139,7 +139,7 @@ mod tests {
let b1 = test_data::block_h1();
let b1_hash = b1.hash();
pool.insert_orphaned_block(b1_hash.clone(), b1);
pool.insert_orphaned_block(b1_hash.clone(), b1.into());
assert_eq!(pool.len(), 1);
assert!(!pool.contains_unknown_block(&b1_hash));
@ -152,7 +152,7 @@ mod tests {
let b1 = test_data::block_h1();
let b1_hash = b1.hash();
pool.insert_unknown_block(b1_hash.clone(), b1);
pool.insert_unknown_block(b1_hash.clone(), b1.into());
assert_eq!(pool.len(), 1);
assert!(pool.contains_unknown_block(&b1_hash));
@ -167,8 +167,8 @@ mod tests {
let b2 = test_data::block_h169();
let b2_hash = b2.hash();
pool.insert_orphaned_block(b1_hash.clone(), b1);
pool.insert_unknown_block(b2_hash.clone(), b2);
pool.insert_orphaned_block(b1_hash.clone(), b1.into());
pool.insert_unknown_block(b2_hash.clone(), b2.into());
assert_eq!(pool.len(), 2);
assert!(!pool.contains_unknown_block(&b1_hash));
@ -193,9 +193,9 @@ mod tests {
let b3 = test_data::block_h2();
let b3_hash = b3.hash();
pool.insert_orphaned_block(b1_hash.clone(), b1);
pool.insert_unknown_block(b2_hash.clone(), b2);
pool.insert_orphaned_block(b3_hash.clone(), b3);
pool.insert_orphaned_block(b1_hash.clone(), b1.into());
pool.insert_unknown_block(b2_hash.clone(), b2.into());
pool.insert_orphaned_block(b3_hash.clone(), b3.into());
let removed = pool.remove_blocks_for_parent(&test_data::genesis().hash());
assert_eq!(removed.len(), 2);
@ -223,11 +223,11 @@ mod tests {
let b5 = test_data::block_h181();
let b5_hash = b5.hash();
pool.insert_orphaned_block(b1_hash.clone(), b1);
pool.insert_orphaned_block(b2_hash.clone(), b2);
pool.insert_orphaned_block(b3_hash.clone(), b3);
pool.insert_orphaned_block(b4_hash.clone(), b4);
pool.insert_orphaned_block(b5_hash.clone(), b5);
pool.insert_orphaned_block(b1_hash.clone(), b1.into());
pool.insert_orphaned_block(b2_hash.clone(), b2.into());
pool.insert_orphaned_block(b3_hash.clone(), b3.into());
pool.insert_orphaned_block(b4_hash.clone(), b4.into());
pool.insert_orphaned_block(b5_hash.clone(), b5.into());
let mut blocks_to_remove: HashSet<H256> = HashSet::new();
blocks_to_remove.insert(b1_hash.clone());

View File

@ -310,10 +310,10 @@ impl Chain {
/// Insert new best block to storage
pub fn insert_best_block(&mut self, hash: H256, block: &IndexedBlock) -> Result<BlockInsertionResult, db::Error> {
let is_appending_to_main_branch = self.best_storage_block.hash == block.block_header.previous_header_hash;
let is_appending_to_main_branch = &self.best_storage_block.hash == block.hash();
// insert to storage
let storage_insertion = try!(self.storage.insert_block(&block));
let storage_insertion = try!(self.storage.insert_indexed_block(&block));
// remember new best block hash
self.best_storage_block = self.storage.best_block().expect("Inserted block above");
@ -328,7 +328,7 @@ impl Chain {
// all transactions from this block were accepted
// => delete accepted transactions from verification queue and from the memory pool
let this_block_transactions_hashes = block.transactions.iter().map(|tx| tx.hash());
let this_block_transactions_hashes: Vec<H256> = block.transaction_hashes().iter().cloned().collect();
for transaction_accepted in this_block_transactions_hashes {
self.memory_pool.remove_by_hash(&transaction_accepted);
self.verifying_transactions.remove(&transaction_accepted);
@ -353,7 +353,7 @@ impl Chain {
// all transactions from this block were accepted
// + all transactions from previous blocks of this fork were accepted
// => delete accepted transactions from verification queue and from the memory pool
let this_block_transactions_hashes = block.transaction_hashes();
let this_block_transactions_hashes: Vec<H256> = block.transaction_hashes().iter().cloned().collect();
let mut canonized_blocks_hashes: Vec<H256> = Vec::new();
let mut new_main_blocks_transactions_hashes: Vec<H256> = Vec::new();
while let Some(canonized_block_hash) = reorganization.pop_canonized() {
@ -361,7 +361,7 @@ impl Chain {
new_main_blocks_transactions_hashes.extend(canonized_transactions_hashes);
canonized_blocks_hashes.push(canonized_block_hash);
}
for transaction_accepted in this_block_transactions_hashes.chain(new_main_blocks_transactions_hashes.into_iter()) {
for transaction_accepted in this_block_transactions_hashes.into_iter().chain(new_main_blocks_transactions_hashes.into_iter()) {
self.memory_pool.remove_by_hash(&transaction_accepted);
self.verifying_transactions.remove(&transaction_accepted);
}
@ -785,7 +785,7 @@ mod tests {
assert!(chain.information().scheduled == 3 && chain.information().requested == 1
&& chain.information().verifying == 1 && chain.information().stored == 1);
// insert new best block to the chain
chain.insert_best_block(test_data::block_h1().hash(), &test_data::block_h1()).expect("Db error");
chain.insert_best_block(test_data::block_h1().hash(), &test_data::block_h1().into()).expect("Db error");
assert!(chain.information().scheduled == 3 && chain.information().requested == 1
&& chain.information().verifying == 1 && chain.information().stored == 2);
assert_eq!(db.best_block().expect("storage with genesis block is required").number, 1);
@ -800,13 +800,13 @@ mod tests {
let block1 = test_data::block_h1();
let block1_hash = block1.hash();
chain.insert_best_block(block1_hash.clone(), &block1).expect("Error inserting new block");
chain.insert_best_block(block1_hash.clone(), &block1.into()).expect("Error inserting new block");
assert_eq!(chain.block_locator_hashes(), vec![block1_hash.clone(), genesis_hash.clone()]);
let block2 = test_data::block_h2();
let block2_hash = block2.hash();
chain.insert_best_block(block2_hash.clone(), &block2).expect("Error inserting new block");
chain.insert_best_block(block2_hash.clone(), &block2.into()).expect("Error inserting new block");
assert_eq!(chain.block_locator_hashes(), vec![block2_hash.clone(), block1_hash.clone(), genesis_hash.clone()]);
let blocks0 = test_data::build_n_empty_blocks_from_genesis(11, 0);
@ -879,8 +879,8 @@ mod tests {
fn chain_intersect_with_inventory() {
let mut chain = Chain::new(Arc::new(db::TestStorage::with_genesis_block()));
// append 2 db blocks
chain.insert_best_block(test_data::block_h1().hash(), &test_data::block_h1()).expect("Error inserting new block");
chain.insert_best_block(test_data::block_h2().hash(), &test_data::block_h2()).expect("Error inserting new block");
chain.insert_best_block(test_data::block_h1().hash(), &test_data::block_h1().into()).expect("Error inserting new block");
chain.insert_best_block(test_data::block_h2().hash(), &test_data::block_h2().into()).expect("Error inserting new block");
// prepare blocks
let blocks0 = test_data::build_n_empty_blocks_from(9, 0, &test_data::block_h2().block_header);
@ -993,7 +993,7 @@ mod tests {
assert_eq!(chain.information().transactions.transactions_count, 1);
// when block is inserted to the database => all accepted transactions are removed from mempool && verifying queue
chain.insert_best_block(b1.hash(), &b1).expect("block accepted");
chain.insert_best_block(b1.hash(), &b1.into()).expect("block accepted");
assert_eq!(chain.information().transactions.transactions_count, 0);
assert!(!chain.forget_verifying_transaction(&tx1_hash));
@ -1063,15 +1063,15 @@ mod tests {
chain.insert_verified_transaction(tx2);
// no reorg
let result = chain.insert_best_block(b1.hash(), &b1).expect("no error");
let result = chain.insert_best_block(b1.hash(), &b1.into()).expect("no error");
assert_eq!(result.transactions_to_reverify.len(), 0);
// no reorg
let result = chain.insert_best_block(b2.hash(), &b2).expect("no error");
let result = chain.insert_best_block(b2.hash(), &b2.into()).expect("no error");
assert_eq!(result.transactions_to_reverify.len(), 0);
// reorg
let result = chain.insert_best_block(b3.hash(), &b3).expect("no error");
let result = chain.insert_best_block(b3.hash(), &b3.into()).expect("no error");
assert_eq!(result.transactions_to_reverify.len(), 2);
assert!(result.transactions_to_reverify.iter().any(|&(ref h, _)| h == &tx1_hash));
assert!(result.transactions_to_reverify.iter().any(|&(ref h, _)| h == &tx2_hash));
@ -1115,18 +1115,18 @@ mod tests {
chain.insert_verified_transaction(tx4);
chain.insert_verified_transaction(tx5);
assert_eq!(chain.insert_best_block(b0.hash(), &b0).expect("block accepted"), BlockInsertionResult::with_canonized_blocks(vec![b0.hash()]));
assert_eq!(chain.insert_best_block(b0.hash(), &b0.clone().into()).expect("block accepted"), BlockInsertionResult::with_canonized_blocks(vec![b0.hash()]));
assert_eq!(chain.information().transactions.transactions_count, 3);
assert_eq!(chain.insert_best_block(b1.hash(), &b1).expect("block accepted"), BlockInsertionResult::with_canonized_blocks(vec![b1.hash()]));
assert_eq!(chain.insert_best_block(b1.hash(), &b1.clone().into()).expect("block accepted"), BlockInsertionResult::with_canonized_blocks(vec![b1.hash()]));
assert_eq!(chain.information().transactions.transactions_count, 3);
assert_eq!(chain.insert_best_block(b2.hash(), &b2).expect("block accepted"), BlockInsertionResult::with_canonized_blocks(vec![b2.hash()]));
assert_eq!(chain.insert_best_block(b2.hash(), &b2.clone().into()).expect("block accepted"), BlockInsertionResult::with_canonized_blocks(vec![b2.hash()]));
assert_eq!(chain.information().transactions.transactions_count, 3);
assert_eq!(chain.insert_best_block(b3.hash(), &b3).expect("block accepted"), BlockInsertionResult::default());
assert_eq!(chain.insert_best_block(b3.hash(), &b3.clone().into()).expect("block accepted"), BlockInsertionResult::default());
assert_eq!(chain.information().transactions.transactions_count, 3);
assert_eq!(chain.insert_best_block(b4.hash(), &b4).expect("block accepted"), BlockInsertionResult::default());
assert_eq!(chain.insert_best_block(b4.hash(), &b4.clone().into()).expect("block accepted"), BlockInsertionResult::default());
assert_eq!(chain.information().transactions.transactions_count, 3);
// order matters
let insert_result = chain.insert_best_block(b5.hash(), &b5).expect("block accepted");
let insert_result = chain.insert_best_block(b5.hash(), &b5.clone().into()).expect("block accepted");
let transactions_to_reverify_hashes: Vec<_> = insert_result
.transactions_to_reverify
.into_iter()

View File

@ -610,12 +610,12 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
/// Process new block.
fn on_peer_block(&mut self, peer_index: usize, block: IndexedBlock) -> Option<VecDeque<(H256, IndexedBlock)>> {
let block_hash = block.hash();
let block_hash = block.hash().clone();
// update peers to select next tasks
self.peers.on_block_received(peer_index, &block_hash);
self.process_peer_block(peer_index, block_hash.clone(), block)
self.process_peer_block(peer_index, block_hash, block)
}
/// Process new transaction.
@ -1410,7 +1410,7 @@ pub mod tests {
assert_eq!(sync.information().peers.active, 1);
// push unknown block => will be queued as orphan
sync.on_peer_block(5, block2);
sync.on_peer_block(5, block2.into());
assert!(sync.information().state.is_nearly_saturated());
assert_eq!(sync.information().orphaned_blocks, 1);
assert_eq!(sync.information().chain.scheduled, 0);
@ -1420,7 +1420,7 @@ pub mod tests {
assert_eq!(sync.information().peers.active, 1);
// push requested block => should be moved to the test storage && orphan should be moved
sync.on_peer_block(5, block1);
sync.on_peer_block(5, block1.into());
assert!(sync.information().state.is_saturated());
assert_eq!(sync.information().orphaned_blocks, 0);
assert_eq!(sync.information().chain.scheduled, 0);
@ -1437,7 +1437,7 @@ pub mod tests {
let mut sync = sync.lock();
sync.on_new_blocks_headers(5, vec![test_data::block_h1().block_header.clone(), test_data::block_h2().block_header.clone()]);
sync.on_peer_block(5, test_data::block_h169());
sync.on_peer_block(5, test_data::block_h169().into());
// out-of-order block was presented by the peer
assert!(sync.information().state.is_synchronizing());
@ -1485,11 +1485,11 @@ pub mod tests {
{
let mut sync = sync.lock();
// receive block from peer#2
sync.on_peer_block(2, block2);
sync.on_peer_block(2, block2.into());
assert!(sync.information().chain.requested == 2
&& sync.information().orphaned_blocks == 1);
// receive block from peer#1
sync.on_peer_block(1, block1);
sync.on_peer_block(1, block1.into());
assert!(sync.information().chain.requested == 0
&& sync.information().orphaned_blocks == 0
@ -1537,7 +1537,7 @@ pub mod tests {
sync.on_new_blocks_headers(1, vec![block.block_header.clone()]);
sync.on_new_blocks_headers(2, vec![block.block_header.clone()]);
executor.lock().take_tasks();
sync.on_peer_block(2, block.clone());
sync.on_peer_block(2, block.clone().into());
let tasks = executor.lock().take_tasks();
assert_eq!(tasks.len(), 5);
@ -1570,7 +1570,7 @@ pub mod tests {
assert_eq!(chain.information().headers.total, 2);
}
sync.on_peer_block(1, b1);
sync.on_peer_block(1, b1.into());
let tasks = executor.lock().take_tasks();
assert_eq!(tasks, vec![]);
@ -1581,7 +1581,7 @@ pub mod tests {
assert_eq!(chain.information().headers.total, 1);
}
sync.on_peer_block(1, b2);
sync.on_peer_block(1, b2.into());
let tasks = executor.lock().take_tasks();
assert_eq!(tasks, vec![Task::RequestBlocksHeaders(1), Task::RequestMemoryPool(1)]);
@ -1613,7 +1613,7 @@ pub mod tests {
assert_eq!(chain.information().headers.total, 2);
}
sync.on_peer_block(1, b2);
sync.on_peer_block(1, b2.into());
let tasks = executor.lock().take_tasks();
assert_eq!(tasks, vec![]);
@ -1624,7 +1624,7 @@ pub mod tests {
assert_eq!(chain.information().headers.total, 2);
}
sync.on_peer_block(1, b1);
sync.on_peer_block(1, b1.into());
let tasks = executor.lock().take_tasks();
assert_eq!(tasks, vec![Task::RequestBlocksHeaders(1), Task::RequestMemoryPool(1)]);
@ -1674,35 +1674,35 @@ pub mod tests {
Task::RequestBlocks(2, vec![fork2[0].hash(), fork2[1].hash(), fork2[2].hash()]),
]);
sync.on_peer_block(2, fork2[0].clone());
sync.on_peer_block(2, fork2[0].clone().into());
{
let chain = chain.read();
assert_eq!(chain.best_storage_block().hash, fork2[0].hash());
assert_eq!(chain.best_storage_block().number, 1);
}
sync.on_peer_block(1, fork1[0].clone());
sync.on_peer_block(1, fork1[0].clone().into());
{
let chain = chain.read();
assert_eq!(chain.best_storage_block().hash, fork2[0].hash());
assert_eq!(chain.best_storage_block().number, 1);
}
sync.on_peer_block(1, fork1[1].clone());
sync.on_peer_block(1, fork1[1].clone().into());
{
let chain = chain.read();
assert_eq!(chain.best_storage_block().hash, fork1[1].hash());
assert_eq!(chain.best_storage_block().number, 2);
}
sync.on_peer_block(2, fork2[1].clone());
sync.on_peer_block(2, fork2[1].clone().into());
{
let chain = chain.read();
assert_eq!(chain.best_storage_block().hash, fork1[1].hash());
assert_eq!(chain.best_storage_block().number, 2);
}
sync.on_peer_block(2, fork2[2].clone());
sync.on_peer_block(2, fork2[2].clone().into());
{
let chain = chain.read();
assert_eq!(chain.best_storage_block().hash, fork2[2].hash());
@ -1740,12 +1740,12 @@ pub mod tests {
assert_eq!(chain.information().headers.total, 3);
}
sync.on_peer_block(1, common_block.clone());
sync.on_peer_block(1, fork1[0].clone());
sync.on_peer_block(1, fork1[1].clone());
sync.on_peer_block(2, fork2[0].clone());
sync.on_peer_block(2, fork2[1].clone());
sync.on_peer_block(2, fork2[2].clone());
sync.on_peer_block(1, common_block.clone().into());
sync.on_peer_block(1, fork1[0].clone().into());
sync.on_peer_block(1, fork1[1].clone().into());
sync.on_peer_block(2, fork2[0].clone().into());
sync.on_peer_block(2, fork2[1].clone().into());
sync.on_peer_block(2, fork2[2].clone().into());
{
let chain = chain.read();
@ -1759,7 +1759,7 @@ pub mod tests {
let (_, _, _, chain, sync) = create_sync(None, None);
let mut sync = sync.lock();
sync.on_peer_block(1, test_data::block_h2());
sync.on_peer_block(1, test_data::block_h2().into());
assert_eq!(sync.information().orphaned_blocks, 1);
{
@ -1767,7 +1767,7 @@ pub mod tests {
assert_eq!(chain.best_storage_block().number, 0);
}
sync.on_peer_block(1, test_data::block_h1());
sync.on_peer_block(1, test_data::block_h1().into());
assert_eq!(sync.information().orphaned_blocks, 0);
{
@ -1781,7 +1781,7 @@ pub mod tests {
let (_, _, executor, _, sync) = create_sync(None, None);
let mut sync = sync.lock();
sync.on_peer_block(1, test_data::block_h2());
sync.on_peer_block(1, test_data::block_h2().into());
sync.on_new_blocks_inventory(1, vec![test_data::block_h1().hash(), test_data::block_h2().hash()]);
let tasks = executor.lock().take_tasks();
@ -2051,11 +2051,11 @@ pub mod tests {
sync.on_new_blocks_headers(1, vec![b10.block_header.clone(), b11.block_header.clone(), b12.block_header.clone()]);
sync.on_new_blocks_headers(2, vec![b10.block_header.clone(), b21.block_header.clone(), b22.block_header.clone()]);
sync.on_peer_block(1, b10.clone());
sync.on_peer_block(1, b11);
sync.on_peer_block(1, b12);
sync.on_peer_block(1, b10.clone().into());
sync.on_peer_block(1, b11.into());
sync.on_peer_block(1, b12.into());
sync.on_peer_block(2, b21.clone());
sync.on_peer_block(2, b21.clone().into());
// should not panic here
sync.on_new_blocks_headers(2, vec![b10.block_header.clone(), b21.block_header.clone(),
@ -2073,8 +2073,8 @@ pub mod tests {
let mut sync = sync.lock();
sync.on_new_blocks_headers(1, vec![b0.block_header.clone(), b1.block_header.clone()]);
sync.on_peer_block(1, b0.clone());
sync.on_peer_block(1, b1.clone());
sync.on_peer_block(1, b0.clone().into());
sync.on_peer_block(1, b1.clone().into());
// we were in synchronization state => block is not relayed
{
@ -2086,7 +2086,7 @@ pub mod tests {
]);
}
sync.on_peer_block(2, b2.clone());
sync.on_peer_block(2, b2.clone().into());
// we were in saturated state => block is relayed
{
@ -2096,7 +2096,7 @@ pub mod tests {
}
sync.on_new_blocks_headers(1, vec![b3.block_header.clone()]);
sync.on_peer_block(1, b3.clone());
sync.on_peer_block(1, b3.clone().into());
// we were in nearly saturated state => block is relayed
{
@ -2193,7 +2193,7 @@ pub mod tests {
// igonore tasks
{ executor.lock().take_tasks(); }
sync.on_peer_block(1, b0.clone());
sync.on_peer_block(1, b0.clone().into());
let tasks = executor.lock().take_tasks();
let inventory = vec![InventoryVector { inv_type: InventoryType::MessageBlock, hash: b0.hash() }];
@ -2221,7 +2221,7 @@ pub mod tests {
sync.on_peer_connected(3);
sync.on_peer_connected(4);
sync.on_peer_block(1, b1);
sync.on_peer_block(1, b1.into());
{
use miner::transaction_fee_rate;
@ -2260,9 +2260,9 @@ pub mod tests {
let mut sync = sync.lock();
sync.on_peer_block(1, test_data::block_h2());
sync.on_peer_block(1, test_data::block_h2().into());
// should not panic here
sync.on_peer_block(2, test_data::block_h2());
sync.on_peer_block(2, test_data::block_h2().into());
}
#[test]
@ -2280,7 +2280,7 @@ pub mod tests {
// igonore tasks
{ executor.lock().take_tasks(); }
sync.on_peer_block(1, b0.clone());
sync.on_peer_block(1, b0.clone().into());
let tasks = executor.lock().take_tasks();
let inventory = vec![InventoryVector { inv_type: InventoryType::MessageBlock, hash: b0.hash() }];

View File

@ -225,7 +225,7 @@ mod tests {
let config = ManageUnknownBlocksConfig { removal_time_ms: 1000, max_number: 100 };
let mut pool = OrphanBlocksPool::new();
let block = test_data::genesis();
pool.insert_unknown_block(block.hash(), block);
pool.insert_unknown_block(block.hash(), block.into());
assert_eq!(manage_unknown_orphaned_blocks(&config, &mut pool), None);
assert_eq!(pool.len(), 1);
}
@ -238,7 +238,7 @@ mod tests {
let mut pool = OrphanBlocksPool::new();
let block = test_data::genesis();
let block_hash = block.hash();
pool.insert_unknown_block(block_hash.clone(), block);
pool.insert_unknown_block(block_hash.clone(), block.into());
sleep(Duration::from_millis(1));
assert_eq!(manage_unknown_orphaned_blocks(&config, &mut pool), Some(vec![block_hash]));
@ -253,8 +253,8 @@ mod tests {
let block1_hash = block1.hash();
let block2 = test_data::block_h2();
let block2_hash = block2.hash();
pool.insert_unknown_block(block1_hash.clone(), block1);
pool.insert_unknown_block(block2_hash.clone(), block2);
pool.insert_unknown_block(block1_hash.clone(), block1.into());
pool.insert_unknown_block(block2_hash.clone(), block2.into());
assert_eq!(manage_unknown_orphaned_blocks(&config, &mut pool), Some(vec![block1_hash]));
assert_eq!(pool.len(), 1);
}

View File

@ -658,7 +658,7 @@ pub mod tests {
#[test]
fn server_getblocks_responds_inventory_when_have_unknown_blocks() {
let (chain, executor, server) = create_synchronization_server();
chain.write().insert_best_block(test_data::block_h1().hash(), &test_data::block_h1()).expect("Db write error");
chain.write().insert_best_block(test_data::block_h1().hash(), &test_data::block_h1().into()).expect("Db write error");
// when asking for blocks hashes
server.serve_getblocks(0, types::GetBlocks {
version: 0,
@ -693,7 +693,7 @@ pub mod tests {
#[test]
fn server_getheaders_responds_headers_when_have_unknown_blocks() {
let (chain, executor, server) = create_synchronization_server();
chain.write().insert_best_block(test_data::block_h1().hash(), &test_data::block_h1()).expect("Db write error");
chain.write().insert_best_block(test_data::block_h1().hash(), &test_data::block_h1().into()).expect("Db write error");
// when asking for blocks hashes
let dummy_id = 0;
server.serve_getheaders(0, types::GetHeaders {

View File

@ -157,6 +157,7 @@ pub mod tests {
use synchronization_executor::tests::DummyTaskExecutor;
use primitives::hash::H256;
use super::{Verifier, VerificationSink};
use db::IndexedBlock;
#[derive(Default)]
pub struct DummyVerifier {
@ -175,7 +176,7 @@ pub mod tests {
}
impl Verifier for DummyVerifier {
fn verify_block(&self, block: Block) {
fn verify_block(&self, block: IndexedBlock) {
match self.sink {
Some(ref sink) => match self.errors.get(&block.hash()) {
Some(err) => sink.lock().on_block_verification_error(&err, &block.hash()),