Merge pull request #62 from ethcore/sync_request_fix

Fixed sync::scheduled queue
This commit is contained in:
Svyatoslav Nikolsky 2016-10-31 16:05:09 +03:00 committed by GitHub
commit f44d4d18a1
5 changed files with 278 additions and 67 deletions

View File

@ -14,12 +14,14 @@ pub enum HashPosition {
Inside,
}
/// Ordered queue with O(1) contains() && random access operations cost.
#[derive(Clone)]
pub struct HashQueue {
queue: VecDeque<H256>,
set: HashSet<H256>
}
/// Chain of linked queues. First queue has index zero.
pub struct HashQueueChain {
chain: Vec<HashQueue>,
}
@ -32,22 +34,54 @@ impl HashQueue {
}
}
/// Returns len of the given queue.
pub fn len(&self) -> usize {
self.queue.len()
}
/// Returns true if queue is empty.
pub fn is_empty(&self) -> bool {
self.queue.is_empty()
}
pub fn back<'a>(&'a self) -> Option<&'a H256> {
self.queue.back()
/// Returns front element from the given queue.
pub fn front(&self) -> Option<H256> {
self.queue.front().cloned()
}
/// Returns back element from the given queue.
pub fn back(&self) -> Option<H256> {
self.queue.back().cloned()
}
/// Returns previous-to back element from the given queue.
pub fn pre_back(&self) -> Option<H256> {
let queue_len = self.queue.len();
if queue_len <= 1 {
return None;
}
Some(self.queue[queue_len - 2].clone())
}
/// Returns n-th element (n is starting from 0), starting from the back-element in the queue.
/// If there are no n-th element - returns (n-1) element & etc.
pub fn back_skip_n(&self, n: usize) -> Option<H256> {
let queue_len = self.queue.len();
if queue_len == 0 {
return None;
}
if n + 1 > queue_len {
return Some(self.queue[0].clone())
}
return Some(self.queue[queue_len - n - 1].clone())
}
/// Returns true if queue contains element.
pub fn contains(&self, hash: &H256) -> bool {
self.set.contains(hash)
}
/// Removes element from the front of the queue.
pub fn pop_front(&mut self) -> Option<H256> {
match self.queue.pop_front() {
Some(hash) => {
@ -58,6 +92,7 @@ impl HashQueue {
}
}
/// Removes n elements from the front of the queue.
pub fn pop_front_n(&mut self, n: usize) -> Vec<H256> {
let mut result: Vec<H256> = Vec::new();
for _ in 0..n {
@ -69,6 +104,7 @@ impl HashQueue {
result
}
/// Adds element to the back of the queue.
pub fn push_back(&mut self, hash: H256) {
if !self.set.insert(hash.clone()) {
panic!("must be checked by caller");
@ -76,12 +112,14 @@ impl HashQueue {
self.queue.push_back(hash);
}
/// Adds elements to the back of the queue.
pub fn push_back_n(&mut self, hashes: Vec<H256>) {
for hash in hashes {
self.push_back(hash);
}
}
/// Removes element from the queue, returning its position.
pub fn remove(&mut self, hash: &H256) -> HashPosition {
if !self.set.remove(hash) {
return HashPosition::Missing;
@ -103,6 +141,7 @@ impl HashQueue {
unreachable!()
}
/// Removes all elements from the queue.
pub fn remove_all(&mut self) {
self.queue.clear();
self.set.clear();
@ -118,6 +157,7 @@ impl Index<usize> for HashQueue {
}
impl HashQueueChain {
/// Creates chain with given number of queues.
pub fn with_number_of_queues(number_of_queues: usize) -> Self {
assert!(number_of_queues != 0);
HashQueueChain {
@ -125,30 +165,54 @@ impl HashQueueChain {
}
}
/// Returns length of the whole chain.
pub fn len(&self) -> usize {
self.chain.iter().fold(0, |total, chain| total + chain.len())
}
/// Returns length of the given queue.
pub fn len_of(&self, chain_index: usize) -> usize {
self.chain[chain_index].len()
}
/// Returns true if given queue is empty.
pub fn is_empty_at(&self, chain_index: usize) -> bool {
self.chain[chain_index].is_empty()
}
pub fn back_at(&self, chain_index: usize) -> Option<H256> {
/// Returns element at the front of the given queue.
pub fn front_at(&self, chain_index: usize) -> Option<H256> {
let ref queue = self.chain[chain_index];
queue.back().cloned()
queue.front()
}
/// Returns element at the back of the given queue.
pub fn back_at(&self, chain_index: usize) -> Option<H256> {
let ref queue = self.chain[chain_index];
queue.back()
}
/// Returns previous-to back element from the given queue.
pub fn pre_back_at(&self, chain_index: usize) -> Option<H256> {
let ref queue = self.chain[chain_index];
queue.pre_back()
}
/// Returns n-th element (n is starting from 0), starting from the back-element in given queue.
/// If there are no n-th element - returns (n-1) element & etc.
pub fn back_skip_n_at(&self, chain_index: usize, n: usize) -> Option<H256> {
let ref queue = self.chain[chain_index];
queue.back_skip_n(n)
}
/// Returns the back of the whole chain.
pub fn back(&self) -> Option<H256> {
let mut queue_index = self.chain.len() - 1;
loop {
let ref queue = self.chain[queue_index];
let queue_back = queue.back();
if queue_back.is_some() {
return queue_back.cloned();
return queue_back;
}
queue_index = queue_index - 1;
@ -158,10 +222,12 @@ impl HashQueueChain {
}
}
/// Checks if hash is contained in given queue.
pub fn is_contained_in(&self, queue_index: usize, hash: &H256) -> bool {
self.chain[queue_index].contains(hash)
}
/// Returns the index of queue, hash is contained in.
pub fn contains_in(&self, hash: &H256) -> Option<usize> {
for i in 0..self.chain.len() {
if self.chain[i].contains(hash) {
@ -171,22 +237,27 @@ impl HashQueueChain {
None
}
/// Remove a number of hashes from the front of the given queue.
pub fn pop_front_n_at(&mut self, queue_index: usize, n: usize) -> Vec<H256> {
self.chain[queue_index].pop_front_n(n)
}
/// Push hash onto the back of the given queue.
pub fn push_back_at(&mut self, queue_index: usize, hash: H256) {
self.chain[queue_index].push_back(hash)
}
/// Push a number of hashes onto the back of the given queue.
pub fn push_back_n_at(&mut self, queue_index: usize, hashes: Vec<H256>) {
self.chain[queue_index].push_back_n(hashes)
}
/// Remove hash from given queue.
pub fn remove_at(&mut self, queue_index: usize, hash: &H256) -> HashPosition {
self.chain[queue_index].remove(hash)
}
/// Remove all items from given queue.
pub fn remove_all_at(&mut self, queue_index: usize) {
self.chain[queue_index].remove_all();
}
@ -208,3 +279,100 @@ impl Index<usize> for HashQueueChain {
panic!("invalid index");
}
}
#[cfg(test)]
mod tests {
use super::{HashQueue, HashQueueChain, HashPosition};
#[test]
fn hash_queue_empty() {
let mut queue = HashQueue::new();
assert_eq!(queue.len(), 0);
assert_eq!(queue.is_empty(), true);
assert_eq!(queue.front(), None);
assert_eq!(queue.back(), None);
assert_eq!(queue.pre_back(), None);
assert_eq!(queue.back_skip_n(100), None);
assert_eq!(queue.contains(&"000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f".into()), false);
assert_eq!(queue.pop_front(), None);
assert_eq!(queue.pop_front_n(100), vec![]);
assert_eq!(queue.remove(&"000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f".into()), HashPosition::Missing);
}
#[test]
fn hash_queue_chain_empty() {
let mut chain = HashQueueChain::with_number_of_queues(3);
assert_eq!(chain.len(), 0);
assert_eq!(chain.len_of(0), 0);
assert_eq!(chain.is_empty_at(0), true);
assert_eq!(chain.front_at(0), None);
assert_eq!(chain.back_at(0), None);
assert_eq!(chain.pre_back_at(0), None);
assert_eq!(chain.back_skip_n_at(0, 100), None);
assert_eq!(chain.back(), None);
assert_eq!(chain.is_contained_in(0, &"000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f".into()), false);
assert_eq!(chain.contains_in(&"000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f".into()), None);
assert_eq!(chain.pop_front_n_at(0, 100), vec![]);
assert_eq!(chain.remove_at(0, &"000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f".into()), HashPosition::Missing);
}
#[test]
fn hash_queue_chain_not_empty() {
let mut chain = HashQueueChain::with_number_of_queues(4);
chain.push_back_n_at(0, vec![
"0000000000000000000000000000000000000000000000000000000000000000".into(),
"0000000000000000000000000000000000000000000000000000000000000001".into(),
"0000000000000000000000000000000000000000000000000000000000000002".into(),
]);
chain.push_back_n_at(1, vec![
"0000000000000000000000000000000000000000000000000000000000000003".into(),
"0000000000000000000000000000000000000000000000000000000000000004".into(),
]);
chain.push_back_n_at(2, vec![
"0000000000000000000000000000000000000000000000000000000000000005".into(),
]);
assert_eq!(chain.len(), 6);
assert_eq!(chain.len_of(0), 3);
assert_eq!(chain.len_of(1), 2);
assert_eq!(chain.len_of(2), 1);
assert_eq!(chain.len_of(3), 0);
assert_eq!(chain.is_empty_at(0), false);
assert_eq!(chain.is_empty_at(1), false);
assert_eq!(chain.is_empty_at(2), false);
assert_eq!(chain.is_empty_at(3), true);
assert_eq!(chain.front_at(0), Some("0000000000000000000000000000000000000000000000000000000000000000".into()));
assert_eq!(chain.front_at(1), Some("0000000000000000000000000000000000000000000000000000000000000003".into()));
assert_eq!(chain.front_at(2), Some("0000000000000000000000000000000000000000000000000000000000000005".into()));
assert_eq!(chain.front_at(3), None);
assert_eq!(chain.back_at(0), Some("0000000000000000000000000000000000000000000000000000000000000002".into()));
assert_eq!(chain.back_at(1), Some("0000000000000000000000000000000000000000000000000000000000000004".into()));
assert_eq!(chain.back_at(2), Some("0000000000000000000000000000000000000000000000000000000000000005".into()));
assert_eq!(chain.back_at(3), None);
assert_eq!(chain.pre_back_at(0), Some("0000000000000000000000000000000000000000000000000000000000000001".into()));
assert_eq!(chain.pre_back_at(1), Some("0000000000000000000000000000000000000000000000000000000000000003".into()));
assert_eq!(chain.pre_back_at(2), None);
assert_eq!(chain.pre_back_at(3), None);
assert_eq!(chain.back(), Some("0000000000000000000000000000000000000000000000000000000000000005".into()));
assert_eq!(chain.is_contained_in(0, &"0000000000000000000000000000000000000000000000000000000000000002".into()), true);
assert_eq!(chain.is_contained_in(1, &"0000000000000000000000000000000000000000000000000000000000000002".into()), false);
assert_eq!(chain.is_contained_in(2, &"0000000000000000000000000000000000000000000000000000000000000002".into()), false);
assert_eq!(chain.is_contained_in(3, &"0000000000000000000000000000000000000000000000000000000000000002".into()), false);
assert_eq!(chain.contains_in(&"0000000000000000000000000000000000000000000000000000000000000002".into()), Some(0));
assert_eq!(chain.contains_in(&"0000000000000000000000000000000000000000000000000000000000000005".into()), Some(2));
assert_eq!(chain.contains_in(&"0000000000000000000000000000000000000000000000000000000000000009".into()), None);
assert_eq!(chain.back_skip_n_at(0, 0), Some("0000000000000000000000000000000000000000000000000000000000000002".into()));
assert_eq!(chain.back_skip_n_at(1, 0), Some("0000000000000000000000000000000000000000000000000000000000000004".into()));
assert_eq!(chain.back_skip_n_at(2, 0), Some("0000000000000000000000000000000000000000000000000000000000000005".into()));
assert_eq!(chain.back_skip_n_at(3, 0), None);
assert_eq!(chain.back_skip_n_at(0, 1), Some("0000000000000000000000000000000000000000000000000000000000000001".into()));
assert_eq!(chain.back_skip_n_at(1, 1), Some("0000000000000000000000000000000000000000000000000000000000000003".into()));
assert_eq!(chain.back_skip_n_at(2, 1), Some("0000000000000000000000000000000000000000000000000000000000000005".into()));
assert_eq!(chain.back_skip_n_at(3, 1), None);
assert_eq!(chain.back_skip_n_at(0, 2), Some("0000000000000000000000000000000000000000000000000000000000000000".into()));
assert_eq!(chain.back_skip_n_at(1, 2), Some("0000000000000000000000000000000000000000000000000000000000000003".into()));
assert_eq!(chain.back_skip_n_at(2, 2), Some("0000000000000000000000000000000000000000000000000000000000000005".into()));
assert_eq!(chain.back_skip_n_at(3, 2), None);
}
}

View File

@ -8,7 +8,7 @@ use p2p::OutboundSyncConnectionRef;
use message::common::InventoryType;
use message::types;
use synchronization::{Synchronization, SynchronizationRef, Config as SynchronizationConfig, Task as SynchronizationTask, TaskExecutor as SynchronizationTaskExecutor};
use synchronization_chain::{Chain, ChainRef, BlockState};
use synchronization_chain::{Chain, ChainRef};
use synchronization_executor::LocalSynchronizationTaskExecutor;
use best_block::BestBlock;
@ -74,19 +74,15 @@ impl LocalNode {
// (2) with 500 entries
// what is (1)?
// process unknown blocks
let unknown_blocks: Vec<_> = {
let chain = self.chain.read();
message.inventory.iter()
.filter(|item| item.inv_type == InventoryType::MessageBlock)
.filter(|item| chain.block_state(&item.hash) == BlockState::Unknown)
.map(|item| item.hash.clone())
.collect()
};
// process blocks first
let blocks_inventory: Vec<_> = message.inventory.iter()
.filter(|item| item.inv_type == InventoryType::MessageBlock)
.map(|item| item.hash.clone())
.collect();
// if there are unknown blocks => start synchronizing with peer
if !unknown_blocks.is_empty() {
self.sync.lock().on_unknown_blocks(peer_index, unknown_blocks);
if !blocks_inventory.is_empty() {
self.sync.lock().on_new_blocks_inventory(peer_index, blocks_inventory);
}
// TODO: process unknown transactions, etc...
@ -104,7 +100,8 @@ impl LocalNode {
trace!(target: "sync", "Got `getheaders` message from peer#{}", peer_index);
}
pub fn on_peer_transaction(&self, _peer_index: usize, _message: types::Tx) {
pub fn on_peer_transaction(&self, peer_index: usize, message: types::Tx) {
trace!(target: "sync", "Got `transaction` message from peer#{}. Transaction hash: {}", peer_index, message.transaction.hash());
}
pub fn on_peer_block(&self, peer_index: usize, message: types::Block) {

View File

@ -214,8 +214,8 @@ impl<T> Synchronization<T> where T: TaskExecutor + Send + 'static {
}
/// Try to queue synchronization of unknown blocks when new inventory is received.
pub fn on_unknown_blocks(&mut self, peer_index: usize, peer_hashes: Vec<H256>) {
self.process_unknown_blocks(peer_index, peer_hashes);
pub fn on_new_blocks_inventory(&mut self, peer_index: usize, peer_hashes: Vec<H256>) {
self.process_new_blocks_inventory(peer_index, peer_hashes);
self.execute_synchronization_tasks();
}
@ -241,10 +241,12 @@ impl<T> Synchronization<T> where T: TaskExecutor + Send + 'static {
chain.remove_blocks_with_state(BlockState::Requested);
chain.remove_blocks_with_state(BlockState::Scheduled);
chain.remove_blocks_with_state(BlockState::Verifying);
warn!(target: "sync", "Synchronization process restarting from block {:?}", chain.best_block());
}
/// Process new unknown blocks
fn process_unknown_blocks(&mut self, peer_index: usize, mut peer_hashes: Vec<H256>) {
/// Process new blocks inventory
fn process_new_blocks_inventory(&mut self, peer_index: usize, mut peer_hashes: Vec<H256>) {
// | requested | QUEUED |
// --- [1]
// --- [2] +
@ -375,9 +377,11 @@ impl<T> Synchronization<T> where T: TaskExecutor + Send + 'static {
/// Schedule new synchronization tasks, if any.
fn execute_synchronization_tasks(&mut self) {
let mut tasks: Vec<Task> = Vec::new();
let idle_peers = self.peers.idle_peers();
let idle_peers_len = idle_peers.len() as u64;
// prepar synchronization tasks
{
// prepare synchronization tasks
if idle_peers_len != 0 {
// display information if processed many blocks || enough time has passed since sync start
let mut chain = self.chain.write();
if let State::Synchronizing(timestamp, num_of_blocks) = self.state {
@ -399,16 +403,12 @@ impl<T> Synchronization<T> where T: TaskExecutor + Send + 'static {
let scheduled_hashes_len = chain.length_of_state(BlockState::Scheduled);
if scheduled_hashes_len < MAX_SCHEDULED_HASHES {
if self.state.is_synchronizing() {
if let Some(idle_peer) = self.peers.idle_peer() {
tasks.push(Task::RequestBestInventory(idle_peer));
self.peers.on_inventory_requested(idle_peer);
}
tasks.push(Task::RequestBestInventory(idle_peers[0]));
self.peers.on_inventory_requested(idle_peers[0]);
}
else {
if let Some(idle_peer) = self.peers.idle_peer() {
tasks.push(Task::RequestInventory(idle_peer));
self.peers.on_inventory_requested(idle_peer);
}
tasks.push(Task::RequestInventory(idle_peers[0]));
self.peers.on_inventory_requested(idle_peers[0]);
}
}
@ -416,20 +416,16 @@ impl<T> Synchronization<T> where T: TaskExecutor + Send + 'static {
let requested_hashes_len = chain.length_of_state(BlockState::Requested);
let verifying_hashes_len = chain.length_of_state(BlockState::Verifying);
if requested_hashes_len + verifying_hashes_len < MAX_REQUESTED_BLOCKS + MAX_VERIFYING_BLOCKS && scheduled_hashes_len != 0 {
let idle_peers = self.peers.idle_peers();
let idle_peers_len = idle_peers.len() as u64;
if idle_peers_len != 0 {
let chunk_size = min(MAX_BLOCKS_IN_REQUEST, max(scheduled_hashes_len / idle_peers_len, MIN_BLOCKS_IN_REQUEST));
for idle_peer in idle_peers {
let peer_chunk_size = min(chain.length_of_state(BlockState::Scheduled), chunk_size);
if peer_chunk_size == 0 {
break;
}
let requested_hashes = chain.request_blocks_hashes(peer_chunk_size);
self.peers.on_blocks_requested(idle_peer, &requested_hashes);
tasks.push(Task::RequestBlocks(idle_peer, requested_hashes));
let chunk_size = min(MAX_BLOCKS_IN_REQUEST, max(scheduled_hashes_len / idle_peers_len, MIN_BLOCKS_IN_REQUEST));
for idle_peer in idle_peers {
let peer_chunk_size = min(chain.length_of_state(BlockState::Scheduled), chunk_size);
if peer_chunk_size == 0 {
break;
}
let requested_hashes = chain.request_blocks_hashes(peer_chunk_size);
self.peers.on_blocks_requested(idle_peer, &requested_hashes);
tasks.push(Task::RequestBlocks(idle_peer, requested_hashes));
}
}
}
@ -462,15 +458,20 @@ impl<T> Synchronization<T> where T: TaskExecutor + Send + 'static {
/// Process successful block verification
fn on_block_verification_success(&mut self, block: Block) {
let hash = block.hash();
let mut chain = self.chain.write();
{
let hash = block.hash();
let mut chain = self.chain.write();
// remove from verifying queue
assert_eq!(chain.remove_block_with_state(&hash, BlockState::Verifying), HashPosition::Front);
// remove from verifying queue
assert_eq!(chain.remove_block_with_state(&hash, BlockState::Verifying), HashPosition::Front);
// insert to storage
chain.insert_best_block(block)
.expect("Error inserting to db.");
// insert to storage
chain.insert_best_block(block)
.expect("Error inserting to db.");
}
// continue with synchronization
self.execute_synchronization_tasks();
}
/// Process failed block verification
@ -479,6 +480,9 @@ impl<T> Synchronization<T> where T: TaskExecutor + Send + 'static {
// reset synchronization process
self.reset();
// start new tasks
self.execute_synchronization_tasks();
}
}
@ -535,7 +539,7 @@ mod tests {
let block1: Block = "010000006fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000982051fd1e4ba744bbbe680e1fee14677ba1a3c3540bf7b1cdb606e857233e0e61bc6649ffff001d01e362990101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d0104ffffffff0100f2052a0100000043410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac00000000".into();
let block2: Block = "010000004860eb18bf1b1620e37e9490fc8a427514416fd75159ab86688e9a8300000000d5fdcc541e25de1c7a5addedf24858b8bb665c9f36ef744ee42c316022c90f9bb0bc6649ffff001d08d2bd610101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d010bffffffff0100f2052a010000004341047211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073dee6c89064984f03385237d92167c13e236446b417ab79a0fcae412ae3316b77ac00000000".into();
sync.on_unknown_blocks(5, vec![block1.hash()]);
sync.on_new_blocks_inventory(5, vec![block1.hash()]);
let tasks = executor.lock().take_tasks();
assert_eq!(tasks.len(), 2);
assert_eq!(tasks[0], Task::RequestBestInventory(5));
@ -565,7 +569,8 @@ mod tests {
assert_eq!(sync.information().chain.scheduled, 0);
assert_eq!(sync.information().chain.requested, 0);
assert_eq!(sync.information().chain.stored, 2);
assert_eq!(sync.information().peers.idle, 1);
// we have just requested new `inventory` from the peer => peer is forgotten
assert_eq!(sync.information().peers.idle, 0);
assert_eq!(sync.information().peers.active, 0);
}
@ -576,7 +581,7 @@ mod tests {
let block2: Block = "010000004860eb18bf1b1620e37e9490fc8a427514416fd75159ab86688e9a8300000000d5fdcc541e25de1c7a5addedf24858b8bb665c9f36ef744ee42c316022c90f9bb0bc6649ffff001d08d2bd610101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d010bffffffff0100f2052a010000004341047211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073dee6c89064984f03385237d92167c13e236446b417ab79a0fcae412ae3316b77ac00000000".into();
sync.on_unknown_blocks(5, vec![block2.hash()]);
sync.on_new_blocks_inventory(5, vec![block2.hash()]);
sync.on_peer_block(5, block2);
// out-of-order block was presented by the peer
@ -585,7 +590,8 @@ mod tests {
assert_eq!(sync.information().chain.scheduled, 0);
assert_eq!(sync.information().chain.requested, 0);
assert_eq!(sync.information().chain.stored, 1);
assert_eq!(sync.information().peers.idle, 1);
// we have just requested new `inventory` from the peer => peer is forgotten
assert_eq!(sync.information().peers.idle, 0);
assert_eq!(sync.information().peers.active, 0);
// TODO: check that peer is penalized
}

View File

@ -1,3 +1,4 @@
use std::fmt;
use std::sync::Arc;
use parking_lot::RwLock;
use chain::{Block, RepresentH256};
@ -202,14 +203,14 @@ impl Chain {
/// Prepare best block locator hashes
pub fn best_block_locator_hashes(&self) -> Vec<H256> {
let mut result: Vec<H256> = Vec::with_capacity(4);
if let Some(best_block) = self.hash_chain.back_at(SCHEDULED_QUEUE) {
result.push(best_block);
if let Some(pre_best_block) = self.hash_chain.back_skip_n_at(SCHEDULED_QUEUE, 2) {
result.push(pre_best_block);
}
if let Some(best_block) = self.hash_chain.back_at(REQUESTED_QUEUE) {
result.push(best_block);
if let Some(pre_best_block) = self.hash_chain.back_skip_n_at(REQUESTED_QUEUE, 2) {
result.push(pre_best_block);
}
if let Some(best_block) = self.hash_chain.back_at(VERIFYING_QUEUE) {
result.push(best_block);
if let Some(pre_best_block) = self.hash_chain.back_skip_n_at(VERIFYING_QUEUE, 2) {
result.push(pre_best_block);
}
result.push(self.best_storage_block_hash.clone());
result
@ -324,6 +325,35 @@ impl Chain {
}
}
impl fmt::Debug for Chain {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(writeln!(f, "chain: ["));
{
let mut num = self.storage.best_block_number().unwrap() as usize;
try!(writeln!(f, "\tworse(stored): {} {:?}", 0, self.storage.block_hash(0)));
try!(writeln!(f, "\tbest(stored): {} {:?}", num, self.storage.block_hash(num as u32)));
let queues = vec![
("verifying", VERIFYING_QUEUE),
("requested", REQUESTED_QUEUE),
("scheduled", SCHEDULED_QUEUE),
];
for (state, queue) in queues {
let queue_len = self.hash_chain.len_of(queue);
if queue_len != 0 {
try!(writeln!(f, "\tworse({}): {} {:?}", state, num + 1, self.hash_chain.front_at(queue)));
num += 1 + queue_len;
if let Some(pre_best) = self.hash_chain.pre_back_at(queue) {
try!(writeln!(f, "\tpre-best({}): {} {:?}", state, num - 1, pre_best));
}
try!(writeln!(f, "\tbest({}): {} {:?}", state, num, self.hash_chain.back_at(queue)));
}
}
}
writeln!(f, "]")
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
@ -391,7 +421,7 @@ mod tests {
]);
chain.request_blocks_hashes(10);
assert_eq!(chain.best_block_locator_hashes()[0], "0000000000000000000000000000000000000000000000000000000000000016".into());
assert_eq!(chain.best_block_locator_hashes()[0], "0000000000000000000000000000000000000000000000000000000000000014".into());
assert_eq!(chain.block_locator_hashes(), vec![
"0000000000000000000000000000000000000000000000000000000000000016".into(),
"0000000000000000000000000000000000000000000000000000000000000015".into(),
@ -414,7 +444,7 @@ mod tests {
"0000000000000000000000000000000000000000000000000000000000000022".into(),
]);
assert_eq!(chain.best_block_locator_hashes()[0], "0000000000000000000000000000000000000000000000000000000000000022".into());
assert_eq!(chain.best_block_locator_hashes()[0], "0000000000000000000000000000000000000000000000000000000000000020".into());
assert_eq!(chain.block_locator_hashes(), vec![
"0000000000000000000000000000000000000000000000000000000000000022".into(),
"0000000000000000000000000000000000000000000000000000000000000021".into(),

View File

@ -41,6 +41,7 @@ impl Peers {
}
/// Get idle peer.
#[cfg(test)]
pub fn idle_peer(&self) -> Option<usize> {
self.idle_peers.iter().cloned().next()
}
@ -77,13 +78,22 @@ impl Peers {
/// Blocks have been requested from peer.
pub fn on_blocks_requested(&mut self, peer_index: usize, blocks_hashes: &Vec<H256>) {
self.blocks_requests.entry(peer_index).or_insert(HashSet::new()).extend(blocks_hashes.iter().cloned());
// inventory can only be requested from idle peers
assert!(!self.blocks_requests.contains_key(&peer_index));
self.idle_peers.remove(&peer_index);
self.blocks_requests.entry(peer_index).or_insert(HashSet::new()).extend(blocks_hashes.iter().cloned());
}
/// Inventory has been requested from peer.
pub fn on_inventory_requested(&mut self, _peer_index: usize) {
// TODO
pub fn on_inventory_requested(&mut self, peer_index: usize) {
// inventory can only be requested from idle peers
assert!(!self.blocks_requests.contains_key(&peer_index));
self.idle_peers.remove(&peer_index);
// peer is now out-of-synchronization process, because:
// 1) if it has new blocks, it will respond with `inventory` message && will be insrted back here
// 2) if it has no new blocks => either synchronization is completed, or it is behind us in sync
}
/// Reset peers state