Merge pull request #136 from ethcore/h256-logging

traces & warnings to use H256::to_reversed_str()
This commit is contained in:
Svyatoslav Nikolsky 2016-11-16 18:16:11 +03:00 committed by GitHub
commit 024ed05d15
5 changed files with 46 additions and 14 deletions

View File

@ -372,7 +372,7 @@ impl Storage {
/// all transaction meta is removed
/// DOES NOT update best block
fn decanonize_block(&self, context: &mut UpdateContext, hash: &H256) -> Result<(), Error> {
trace!(target: "reorg", "Decanonizing block {}", hash);
trace!(target: "reorg", "Decanonizing block {}", hash.to_reversed_str());
// ensure that block is of the main chain
try!(self.block_number(hash).ok_or(Error::not_main(hash)));
@ -640,19 +640,34 @@ impl Store for Storage {
Err(Error::Consistency(consistency_error)) => {
match consistency_error {
ConsistencyError::DoubleSpend(hash) => {
warn!(target: "reorg", "Failed to reorganize to {} due to double-spend at {}", &block_hash, &hash);
warn!(
target: "reorg",
"Failed to reorganize to {} due to double-spend at {}",
block_hash.to_reversed_str(),
hash.to_reversed_str()
);
// return without any commit
return Err(Error::reorganize(&hash));
},
ConsistencyError::UnknownSpending(hash) => {
warn!(target: "reorg", "Failed to reorganize to {} due to spending unknown transaction {}", &block_hash, &hash);
warn!(
target: "reorg",
"Failed to reorganize to {} due to spending unknown transaction {}",
block_hash.to_reversed_str(),
hash.to_reversed_str()
);
// return without any commit
return Err(Error::reorganize(&hash));
},
ConsistencyError::Unknown(hash) => {
// this is orphan block inserted or disconnected chain head updated, we allow that (by now)
// so it is no-op
warn!(target: "reorg", "Disconnected chain head {} updated with {}", &hash, &block_hash);
warn!(
target: "reorg",
"Disconnected chain head {} updated with {}",
hash.to_reversed_str(),
block_hash.to_reversed_str()
);
},
_ => {
// we don't allow other errors on side chain/orphans

View File

@ -132,11 +132,11 @@ impl<T, U, V> LocalNode<T, U, V> where T: SynchronizationTaskExecutor + PeersCon
}
pub fn on_peer_transaction(&self, peer_index: usize, message: types::Tx) {
trace!(target: "sync", "Got `transaction` message from peer#{}. Transaction hash: {}", peer_index, message.transaction.hash());
trace!(target: "sync", "Got `transaction` message from peer#{}. Transaction hash: {}", peer_index, message.transaction.hash().to_reversed_str());
}
pub fn on_peer_block(&self, peer_index: usize, message: types::Block) {
trace!(target: "sync", "Got `block` message from peer#{}. Block hash: {}", peer_index, message.block.hash());
trace!(target: "sync", "Got `block` message from peer#{}. Block hash: {}", peer_index, message.block.hash().to_reversed_str());
// try to process new block
self.client.lock().on_peer_block(peer_index, message.block);

View File

@ -335,7 +335,13 @@ impl<T> Client for SynchronizationClient<T> where T: TaskExecutor {
if {
self.chain.read().block_state(&header0.previous_header_hash) == BlockState::Unknown
} {
warn!(target: "sync", "Previous header of the first header from peer#{} `headers` message is unknown. First: {:?}. Previous: {:?}", peer_index, header0.hash(), header0.previous_header_hash);
warn!(
target: "sync",
"Previous header of the first header from peer#{} `headers` message is unknown. First: {:?}. Previous: {:?}",
peer_index,
header0.hash().to_reversed_str(),
header0.previous_header_hash.to_reversed_str()
);
return;
}
@ -463,7 +469,7 @@ impl<T> Client for SynchronizationClient<T> where T: TaskExecutor {
/// Process failed block verification
fn on_block_verification_error(&mut self, err: &str, hash: &H256) {
warn!(target: "sync", "Block {:?} verification failed with error {:?}", hash, err);
warn!(target: "sync", "Block {:?} verification failed with error {:?}", hash.to_reversed_str(), err);
{
let mut chain = self.chain.write();
@ -599,7 +605,13 @@ impl<T> SynchronizationClient<T> where T: TaskExecutor {
let new_blocks_hashes = hashes.split_off(new_block_index);
let new_blocks_headers = headers.split_off(new_block_index);
let new_blocks_hashes_len = new_blocks_hashes.len();
trace!(target: "sync", "Sch. {} headers from peer#{}. First {:?}, last: {:?}", new_blocks_hashes_len, peer_index, new_blocks_hashes[0], new_blocks_hashes[new_blocks_hashes_len - 1]);
trace!(
target: "sync", "Sch. {} headers from peer#{}. First {:?}, last: {:?}",
new_blocks_hashes_len,
peer_index,
new_blocks_hashes[0].to_reversed_str(),
new_blocks_hashes[new_blocks_hashes_len - 1].to_reversed_str()
);
chain.schedule_blocks_headers(new_blocks_hashes, new_blocks_headers);
// remember peer as useful
self.peers.useful_peer(peer_index);
@ -631,7 +643,12 @@ impl<T> SynchronizationClient<T> where T: TaskExecutor {
BlockState::Unknown => {
if self.state.is_synchronizing() {
// when synchronizing, we tend to receive all blocks in-order
trace!(target: "sync", "Ignoring block {} from peer#{}, because its parent is unknown and we are synchronizing", block_hash, peer_index);
trace!(
target: "sync",
"Ignoring block {} from peer#{}, because its parent is unknown and we are synchronizing",
block_hash.to_reversed_str(),
peer_index
);
// remove block from current queue
chain.forget(&block_hash);
// remove orphaned blocks
@ -826,7 +843,7 @@ impl<T> SynchronizationClient<T> where T: TaskExecutor {
chain.information());
}
// finally - ask all known peers for their best blocks inventory, in case if some peer
// finally - ask all known peers for their best blocks inventory, in case if some peer
// has lead us to the fork
{
let mut executor = self.executor.lock();
@ -1034,7 +1051,7 @@ pub mod tests {
let client = SynchronizationClient::new(config, &handle, executor.clone(), chain.clone());
(event_loop, handle, executor, chain, client)
}
}
#[test]
fn synchronization_saturated_on_start() {

View File

@ -368,7 +368,7 @@ impl Server for SynchronizationServer {
fn serve_getheaders(&self, peer_index: usize, message: types::GetHeaders, id: u32) {
if let Some(best_common_block) = self.locate_known_block_header(message.block_locator_hashes) {
trace!(target: "sync", "Best common block header with peer#{} is block#{}: {:?}", peer_index, best_common_block.number, best_common_block.hash);
trace!(target: "sync", "Best common block header with peer#{} is block#{}: {:?}", peer_index, best_common_block.number, best_common_block.hash.to_reversed_str());
let task = IndexedServerTask::new(ServerTask::ServeGetHeaders(best_common_block, message.hash_stop), ServerTaskIndex::Final(id));
self.queue.lock().add_task(peer_index, task);
}

View File

@ -157,7 +157,7 @@ impl Queue {
items.push_front(hash, ScheduleItem::Continued(item.block(), num));
},
Err(e) => {
trace!(target: "verification", "Verification of block {} failed: {:?}", &hash, e);
trace!(target: "verification", "Verification of block {} failed: {:?}", hash.to_reversed_str(), e);
let mut invalid = self.invalid.write();
let mut processing = self.processing.write();