Improved logging to console (#440)

* Remove redundant timestamp.

* Measure block processing in the logs

* Use tabs to separate status.

* Show updates every 1000 blocks.

* Reword some warnings.

* Add some color to log level.

* Reduce code redundancy

* No need to specify the type for blocks_speed

* Utilize the return value of match.

* Count all peers.

* Add whitespaces

* Remove another redundant timestamp

* One item per line

* Use tabs not spaces

* Remove unused import

* Remove redundant time imports

* Paint it black.
This commit is contained in:
Afri 2017-08-17 22:45:36 +02:00 committed by Marek Kotewicz
parent a2ae59c0e3
commit b2faa05ee7
5 changed files with 34 additions and 23 deletions

View File

@ -4,7 +4,7 @@ extern crate env_logger;
extern crate time;
use ansi_term::Colour as Color;
use log::LogRecord;
use log::{LogRecord, LogLevel};
use env_logger::LogBuilder;
fn strftime() -> String {
@ -29,7 +29,18 @@ pub struct DateAndColorLogFormatter;
impl LogFormatter for DateAndColorLogFormatter {
fn format(&self, record: &LogRecord) -> String {
let timestamp = strftime();
format!("{} {} {} {}", Color::Black.bold().paint(timestamp), record.level(), record.target(), record.args())
let log_level = match record.level() {
LogLevel::Error => Color::Fixed(9).bold().paint(record.level().to_string()),
LogLevel::Warn => Color::Fixed(11).bold().paint(record.level().to_string()),
LogLevel::Info => Color::Fixed(10).paint(record.level().to_string()),
LogLevel::Debug => Color::Fixed(14).paint(record.level().to_string()),
LogLevel::Trace => Color::Fixed(12).paint(record.level().to_string()),
};
format!("{} {} {} {}"
, Color::Fixed(8).bold().paint(timestamp)
, log_level
, Color::Fixed(8).paint(record.target())
, record.args())
}
}

View File

@ -4,7 +4,6 @@ use std::collections::hash_map::Entry;
use std::sync::Arc;
use futures::Future;
use parking_lot::Mutex;
use time;
use time::precise_time_s;
use chain::{IndexedBlockHeader, IndexedTransaction, Transaction, IndexedBlock};
use message::types;
@ -826,17 +825,17 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
/// Print synchronization information
pub fn print_synchronization_information(&mut self) {
if let State::Synchronizing(timestamp, num_of_blocks) = self.state {
let new_timestamp = time::precise_time_s();
let new_timestamp = precise_time_s();
let timestamp_diff = new_timestamp - timestamp;
let new_num_of_blocks = self.chain.best_storage_block().number;
let blocks_diff = if new_num_of_blocks > num_of_blocks { new_num_of_blocks - num_of_blocks } else { 0 };
if timestamp_diff >= 60.0 || blocks_diff > 1000 {
self.state = State::Synchronizing(time::precise_time_s(), new_num_of_blocks);
use time;
info!(target: "sync", "{:?} Processed {} blocks in {:.2} seconds. Peers: {:?}. Chain: {:?}"
, time::strftime("%H:%M:%S", &time::now()).unwrap()
, blocks_diff, timestamp_diff
if timestamp_diff >= 60.0 || blocks_diff >= 1000 {
self.state = State::Synchronizing(precise_time_s(), new_num_of_blocks);
let blocks_speed = blocks_diff as f64 / timestamp_diff;
info!(target: "sync", "Processed {} blocks in {:.2} seconds ({:.2} blk/s).\tPeers: {:?}.\tChain: {:?}"
, blocks_diff
, timestamp_diff
, blocks_speed
, self.peers_tasks.information()
, self.chain.information());
}
@ -987,7 +986,7 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
}
self.shared_state.update_synchronizing(true);
self.state = State::Synchronizing(time::precise_time_s(), self.chain.best_storage_block().number);
self.state = State::Synchronizing(precise_time_s(), self.chain.best_storage_block().number);
}
/// Switch to nearly saturated state
@ -1027,9 +1026,7 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
self.chain.forget_all_blocks_with_state(BlockState::Requested);
self.chain.forget_all_blocks_with_state(BlockState::Scheduled);
use time;
info!(target: "sync", "{:?} @ Switched to saturated state. Chain information: {:?}",
time::strftime("%H:%M:%S", &time::now()).unwrap(),
info!(target: "sync", "Switched to saturated state.\tChain: {:?}",
self.chain.information());
}

View File

@ -192,7 +192,7 @@ pub fn manage_synchronization_peers_blocks(config: &ManagePeersConfig, peers: Pe
}
// decrease score && move to the idle queue
warn!(target: "sync", "Failed to get requested block from peer#{} in {} seconds", worst_peer_index, time_diff);
warn!(target: "sync", "Failed to get requested block from peer#{} in {:.2} seconds.", worst_peer_index, time_diff);
let failed_blocks = peers_tasks.reset_blocks_tasks(worst_peer_index);
// mark blocks as failed
@ -202,9 +202,9 @@ pub fn manage_synchronization_peers_blocks(config: &ManagePeersConfig, peers: Pe
// if peer failed many times => forget it
if peers_tasks.on_peer_block_failure(worst_peer_index) {
warn!(target: "sync", "Too many failures for peer#{}. Excluding from synchronization", worst_peer_index);
warn!(target: "sync", "Too many failures for peer#{}. Excluding from synchronization.", worst_peer_index);
peers_tasks.unuseful_peer(worst_peer_index);
peers.misbehaving(worst_peer_index, &format!("Too many failures"));
peers.misbehaving(worst_peer_index, &format!("Too many failures."));
}
}
@ -232,8 +232,8 @@ pub fn manage_synchronization_peers_headers(config: &ManagePeersConfig, peers: P
// if peer failed many times => forget it
if peers_tasks.on_peer_headers_failure(worst_peer_index) {
warn!(target: "sync", "Too many headers failures for peer#{}. Excluding from synchronization", worst_peer_index);
peers.misbehaving(worst_peer_index, &format!("Too many headers failures"));
warn!(target: "sync", "Too many header failures for peer#{}. Excluding from synchronization.", worst_peer_index);
peers.misbehaving(worst_peer_index, &format!("Too many header failures."));
}
}
}

View File

@ -145,14 +145,14 @@ impl PeersContainer for PeersImpl {
fn misbehaving(&self, peer_index: PeerIndex, reason: &str) {
if let Some(peer) = self.peers.write().remove(&peer_index) {
warn!(target: "sync", "Disconnecting from peer#{} due to misbehaving: {}", peer_index, reason);
warn!(target: "sync", "Disconnecting from peer#{} due to misbehavior: {}", peer_index, reason);
peer.connection.close();
}
}
fn dos(&self, peer_index: PeerIndex, reason: &str) {
if let Some(peer) = self.peers.write().remove(&peer_index) {
warn!(target: "sync", "Disconnecting from peer#{} due to DOS: {}", peer_index, reason);
warn!(target: "sync", "Disconnecting from peer#{} due to DoS: {}", peer_index, reason);
peer.connection.close();
}
}

View File

@ -16,6 +16,8 @@ const BLOCKS_TO_INSPECT: usize = 32;
/// Information on synchronization peers
pub struct Information {
/// total # of peers.
pub all: usize,
/// # of peers that are marked as useful for current synchronization session && have no pending requests.
pub idle: usize,
/// # of peers that are marked as non-useful for current synchronization session && have no pending requests.
@ -93,6 +95,7 @@ impl PeersTasks {
pub fn information(&self) -> Information {
let active_for_headers: HashSet<_> = self.headers_requests.keys().cloned().collect();
Information {
all: self.all.len(),
idle: self.idle_for_blocks.difference(&active_for_headers).count(),
unuseful: self.unuseful.len(),
active: active_for_headers.union(&self.blocks_requests.keys().cloned().collect()).count(),
@ -375,7 +378,7 @@ impl PeerStats {
impl fmt::Debug for Information {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[active:{}, idle:{}, bad:{}]", self.active, self.idle, self.unuseful)
write!(f, "{} (act: {}, idl: {}, bad: {})", self.all, self.active, self.idle, self.unuseful)
}
}