2018-10-25 14:56:21 -07:00
|
|
|
//! The `poh_service` module implements a service that records the passing of
|
|
|
|
//! "ticks", a measure of time in the PoH stream
|
2021-06-04 08:23:06 -07:00
|
|
|
use {
|
|
|
|
crate::poh_recorder::{PohRecorder, Record},
|
|
|
|
crossbeam_channel::Receiver,
|
|
|
|
log::*,
|
2021-07-14 05:16:29 -07:00
|
|
|
solana_entry::poh::Poh,
|
2021-06-04 08:23:06 -07:00
|
|
|
solana_measure::measure::Measure,
|
|
|
|
solana_sdk::poh_config::PohConfig,
|
|
|
|
std::{
|
|
|
|
sync::{
|
|
|
|
atomic::{AtomicBool, Ordering},
|
|
|
|
Arc, Mutex,
|
|
|
|
},
|
|
|
|
thread::{self, sleep, Builder, JoinHandle},
|
|
|
|
time::{Duration, Instant},
|
|
|
|
},
|
|
|
|
};
|
2019-04-29 15:26:52 -07:00
|
|
|
|
2018-10-25 14:56:21 -07:00
|
|
|
pub struct PohService {
|
2019-02-24 08:59:49 -08:00
|
|
|
tick_producer: JoinHandle<()>,
|
2018-10-25 14:56:21 -07:00
|
|
|
}
|
|
|
|
|
2019-05-18 14:01:36 -07:00
|
|
|
// Number of hashes to batch together.
|
|
|
|
// * If this number is too small, PoH hash rate will suffer.
|
|
|
|
// * The larger this number is from 1, the speed of recording transactions will suffer due to lock
|
|
|
|
// contention with the PoH hashing within `tick_producer()`.
|
|
|
|
//
|
2021-03-05 16:01:21 -08:00
|
|
|
// Can use test_poh_service to calibrate this
|
|
|
|
pub const DEFAULT_HASHES_PER_BATCH: u64 = 64;
|
2019-05-18 14:01:36 -07:00
|
|
|
|
2020-12-29 11:09:47 -08:00
|
|
|
pub const DEFAULT_PINNED_CPU_CORE: usize = 0;
|
|
|
|
|
2021-01-05 19:25:44 -08:00
|
|
|
const TARGET_SLOT_ADJUSTMENT_NS: u64 = 50_000_000;
|
|
|
|
|
2021-03-23 07:10:04 -07:00
|
|
|
#[derive(Debug)]
|
|
|
|
struct PohTiming {
|
|
|
|
num_ticks: u64,
|
|
|
|
num_hashes: u64,
|
|
|
|
total_sleep_us: u64,
|
|
|
|
total_lock_time_ns: u64,
|
|
|
|
total_hash_time_ns: u64,
|
|
|
|
total_tick_time_ns: u64,
|
|
|
|
last_metric: Instant,
|
2021-03-24 12:48:32 -07:00
|
|
|
total_record_time_us: u64,
|
2021-03-23 07:10:04 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
impl PohTiming {
|
|
|
|
fn new() -> Self {
|
|
|
|
Self {
|
|
|
|
num_ticks: 0,
|
|
|
|
num_hashes: 0,
|
|
|
|
total_sleep_us: 0,
|
|
|
|
total_lock_time_ns: 0,
|
|
|
|
total_hash_time_ns: 0,
|
|
|
|
total_tick_time_ns: 0,
|
|
|
|
last_metric: Instant::now(),
|
2021-03-24 12:48:32 -07:00
|
|
|
total_record_time_us: 0,
|
2021-03-23 07:10:04 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
fn report(&mut self, ticks_per_slot: u64) {
|
|
|
|
if self.last_metric.elapsed().as_millis() > 1000 {
|
|
|
|
let elapsed_us = self.last_metric.elapsed().as_micros() as u64;
|
|
|
|
let us_per_slot = (elapsed_us * ticks_per_slot) / self.num_ticks;
|
|
|
|
datapoint_info!(
|
|
|
|
"poh-service",
|
|
|
|
("ticks", self.num_ticks as i64, i64),
|
|
|
|
("hashes", self.num_hashes as i64, i64),
|
|
|
|
("elapsed_us", us_per_slot, i64),
|
|
|
|
("total_sleep_us", self.total_sleep_us, i64),
|
|
|
|
("total_tick_time_us", self.total_tick_time_ns / 1000, i64),
|
|
|
|
("total_lock_time_us", self.total_lock_time_ns / 1000, i64),
|
|
|
|
("total_hash_time_us", self.total_hash_time_ns / 1000, i64),
|
2021-03-24 12:48:32 -07:00
|
|
|
("total_record_time_us", self.total_record_time_us, i64),
|
2021-03-23 07:10:04 -07:00
|
|
|
);
|
|
|
|
self.total_sleep_us = 0;
|
|
|
|
self.num_ticks = 0;
|
|
|
|
self.num_hashes = 0;
|
|
|
|
self.total_tick_time_ns = 0;
|
|
|
|
self.total_lock_time_ns = 0;
|
|
|
|
self.total_hash_time_ns = 0;
|
|
|
|
self.last_metric = Instant::now();
|
2021-03-24 12:48:32 -07:00
|
|
|
self.total_record_time_us = 0;
|
2021-03-23 07:10:04 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-25 14:56:21 -07:00
|
|
|
impl PohService {
|
2019-02-17 19:15:34 -08:00
|
|
|
pub fn new(
|
2019-02-24 08:59:49 -08:00
|
|
|
poh_recorder: Arc<Mutex<PohRecorder>>,
|
2019-05-18 14:01:36 -07:00
|
|
|
poh_config: &Arc<PohConfig>,
|
2019-03-04 16:33:14 -08:00
|
|
|
poh_exit: &Arc<AtomicBool>,
|
2020-12-29 09:35:57 -08:00
|
|
|
ticks_per_slot: u64,
|
2020-12-29 11:09:47 -08:00
|
|
|
pinned_cpu_core: usize,
|
2021-03-05 16:01:21 -08:00
|
|
|
hashes_per_batch: u64,
|
2021-03-23 07:10:04 -07:00
|
|
|
record_receiver: Receiver<Record>,
|
2019-02-24 08:59:49 -08:00
|
|
|
) -> Self {
|
2018-10-25 14:56:21 -07:00
|
|
|
let poh_exit_ = poh_exit.clone();
|
2019-05-18 14:01:36 -07:00
|
|
|
let poh_config = poh_config.clone();
|
2018-10-25 14:56:21 -07:00
|
|
|
let tick_producer = Builder::new()
|
|
|
|
.name("solana-poh-service-tick_producer".to_string())
|
|
|
|
.spawn(move || {
|
2019-12-02 16:46:46 -08:00
|
|
|
solana_sys_tuner::request_realtime_poh();
|
2019-05-18 14:01:36 -07:00
|
|
|
if poh_config.hashes_per_tick.is_none() {
|
2019-10-16 12:37:27 -07:00
|
|
|
if poh_config.target_tick_count.is_none() {
|
2021-03-23 07:10:04 -07:00
|
|
|
Self::sleepy_tick_producer(
|
|
|
|
poh_recorder,
|
|
|
|
&poh_config,
|
|
|
|
&poh_exit_,
|
|
|
|
record_receiver,
|
|
|
|
);
|
2019-10-16 12:37:27 -07:00
|
|
|
} else {
|
|
|
|
Self::short_lived_sleepy_tick_producer(
|
|
|
|
poh_recorder,
|
|
|
|
&poh_config,
|
|
|
|
&poh_exit_,
|
2021-03-23 07:10:04 -07:00
|
|
|
record_receiver,
|
2019-10-16 12:37:27 -07:00
|
|
|
);
|
|
|
|
}
|
2019-05-18 14:01:36 -07:00
|
|
|
} else {
|
2019-05-22 15:54:24 -07:00
|
|
|
// PoH service runs in a tight loop, generating hashes as fast as possible.
|
|
|
|
// Let's dedicate one of the CPU cores to this thread so that it can gain
|
|
|
|
// from cache performance.
|
2019-05-22 14:21:43 -07:00
|
|
|
if let Some(cores) = core_affinity::get_core_ids() {
|
2020-12-29 11:09:47 -08:00
|
|
|
core_affinity::set_for_current(cores[pinned_cpu_core]);
|
2019-05-22 14:21:43 -07:00
|
|
|
}
|
2020-12-29 09:35:57 -08:00
|
|
|
Self::tick_producer(
|
|
|
|
poh_recorder,
|
|
|
|
&poh_exit_,
|
|
|
|
ticks_per_slot,
|
2021-03-05 16:01:21 -08:00
|
|
|
hashes_per_batch,
|
2021-03-23 07:10:04 -07:00
|
|
|
record_receiver,
|
2021-03-30 08:34:21 -07:00
|
|
|
Self::target_ns_per_tick(
|
|
|
|
ticks_per_slot,
|
|
|
|
poh_config.target_tick_duration.as_nanos() as u64,
|
|
|
|
),
|
2020-12-29 09:35:57 -08:00
|
|
|
);
|
2019-05-18 14:01:36 -07:00
|
|
|
}
|
2018-10-25 14:56:21 -07:00
|
|
|
poh_exit_.store(true, Ordering::Relaxed);
|
2018-12-07 19:01:28 -08:00
|
|
|
})
|
|
|
|
.unwrap();
|
2018-10-25 14:56:21 -07:00
|
|
|
|
2019-03-04 20:50:02 -08:00
|
|
|
Self { tick_producer }
|
2018-10-25 14:56:21 -07:00
|
|
|
}
|
|
|
|
|
2021-03-26 11:54:16 -07:00
|
|
|
pub fn target_ns_per_tick(ticks_per_slot: u64, target_tick_duration_ns: u64) -> u64 {
|
|
|
|
// Account for some extra time outside of PoH generation to account
|
|
|
|
// for processing time outside PoH.
|
|
|
|
let adjustment_per_tick = if ticks_per_slot > 0 {
|
|
|
|
TARGET_SLOT_ADJUSTMENT_NS / ticks_per_slot
|
|
|
|
} else {
|
|
|
|
0
|
|
|
|
};
|
|
|
|
target_tick_duration_ns.saturating_sub(adjustment_per_tick)
|
|
|
|
}
|
|
|
|
|
2019-05-18 14:01:36 -07:00
|
|
|
fn sleepy_tick_producer(
|
|
|
|
poh_recorder: Arc<Mutex<PohRecorder>>,
|
|
|
|
poh_config: &PohConfig,
|
2019-01-26 00:28:08 -08:00
|
|
|
poh_exit: &AtomicBool,
|
2021-03-23 07:10:04 -07:00
|
|
|
record_receiver: Receiver<Record>,
|
2019-02-24 08:59:49 -08:00
|
|
|
) {
|
2019-05-18 14:01:36 -07:00
|
|
|
while !poh_exit.load(Ordering::Relaxed) {
|
2021-03-23 07:10:04 -07:00
|
|
|
Self::read_record_receiver_and_process(
|
|
|
|
&poh_recorder,
|
|
|
|
&record_receiver,
|
|
|
|
Duration::from_millis(0),
|
|
|
|
);
|
2019-05-18 14:01:36 -07:00
|
|
|
sleep(poh_config.target_tick_duration);
|
|
|
|
poh_recorder.lock().unwrap().tick();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-23 07:10:04 -07:00
|
|
|
pub fn read_record_receiver_and_process(
|
|
|
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
|
|
|
record_receiver: &Receiver<Record>,
|
|
|
|
timeout: Duration,
|
|
|
|
) {
|
|
|
|
let record = record_receiver.recv_timeout(timeout);
|
|
|
|
if let Ok(record) = record {
|
|
|
|
if record
|
|
|
|
.sender
|
|
|
|
.send(poh_recorder.lock().unwrap().record(
|
|
|
|
record.slot,
|
|
|
|
record.mixin,
|
|
|
|
record.transactions,
|
|
|
|
))
|
|
|
|
.is_err()
|
|
|
|
{
|
|
|
|
panic!("Error returning mixin hash");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-16 12:37:27 -07:00
|
|
|
fn short_lived_sleepy_tick_producer(
|
|
|
|
poh_recorder: Arc<Mutex<PohRecorder>>,
|
|
|
|
poh_config: &PohConfig,
|
|
|
|
poh_exit: &AtomicBool,
|
2021-03-23 07:10:04 -07:00
|
|
|
record_receiver: Receiver<Record>,
|
2019-10-16 12:37:27 -07:00
|
|
|
) {
|
|
|
|
let mut warned = false;
|
|
|
|
for _ in 0..poh_config.target_tick_count.unwrap() {
|
2021-03-23 07:10:04 -07:00
|
|
|
Self::read_record_receiver_and_process(
|
|
|
|
&poh_recorder,
|
|
|
|
&record_receiver,
|
|
|
|
Duration::from_millis(0),
|
|
|
|
);
|
2019-10-16 12:37:27 -07:00
|
|
|
sleep(poh_config.target_tick_duration);
|
|
|
|
poh_recorder.lock().unwrap().tick();
|
|
|
|
if poh_exit.load(Ordering::Relaxed) && !warned {
|
|
|
|
warned = true;
|
|
|
|
warn!("exit signal is ignored because PohService is scheduled to exit soon");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-26 11:54:16 -07:00
|
|
|
// returns true if we need to tick
|
2021-03-23 07:10:04 -07:00
|
|
|
fn record_or_hash(
|
|
|
|
next_record: &mut Option<Record>,
|
|
|
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
|
|
|
timing: &mut PohTiming,
|
|
|
|
record_receiver: &Receiver<Record>,
|
|
|
|
hashes_per_batch: u64,
|
|
|
|
poh: &Arc<Mutex<Poh>>,
|
2021-03-30 08:34:21 -07:00
|
|
|
target_ns_per_tick: u64,
|
2021-03-23 07:10:04 -07:00
|
|
|
) -> bool {
|
|
|
|
match next_record.take() {
|
|
|
|
Some(mut record) => {
|
|
|
|
// received message to record
|
|
|
|
// so, record for as long as we have queued up record requests
|
|
|
|
let mut lock_time = Measure::start("lock");
|
|
|
|
let mut poh_recorder_l = poh_recorder.lock().unwrap();
|
|
|
|
lock_time.stop();
|
|
|
|
timing.total_lock_time_ns += lock_time.as_ns();
|
2021-03-24 12:48:32 -07:00
|
|
|
let mut record_time = Measure::start("record");
|
2021-03-23 07:10:04 -07:00
|
|
|
loop {
|
|
|
|
let res = poh_recorder_l.record(
|
|
|
|
record.slot,
|
|
|
|
record.mixin,
|
|
|
|
std::mem::take(&mut record.transactions),
|
|
|
|
);
|
|
|
|
let _ = record.sender.send(res); // what do we do on failure here? Ignore for now.
|
|
|
|
timing.num_hashes += 1; // note: may have also ticked inside record
|
|
|
|
|
|
|
|
let new_record_result = record_receiver.try_recv();
|
|
|
|
match new_record_result {
|
|
|
|
Ok(new_record) => {
|
|
|
|
// we already have second request to record, so record again while we still have the mutex
|
|
|
|
record = new_record;
|
|
|
|
}
|
|
|
|
Err(_) => {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-03-24 12:48:32 -07:00
|
|
|
record_time.stop();
|
|
|
|
timing.total_record_time_us += record_time.as_us();
|
2021-03-23 07:10:04 -07:00
|
|
|
// PohRecorder.record would have ticked if it needed to, so should_tick will be false
|
|
|
|
}
|
|
|
|
None => {
|
|
|
|
// did not receive instructions to record, so hash until we notice we've been asked to record (or we need to tick) and then remember what to record
|
|
|
|
let mut lock_time = Measure::start("lock");
|
|
|
|
let mut poh_l = poh.lock().unwrap();
|
|
|
|
lock_time.stop();
|
|
|
|
timing.total_lock_time_ns += lock_time.as_ns();
|
|
|
|
loop {
|
|
|
|
timing.num_hashes += hashes_per_batch;
|
|
|
|
let mut hash_time = Measure::start("hash");
|
|
|
|
let should_tick = poh_l.hash(hashes_per_batch);
|
2021-03-30 08:34:21 -07:00
|
|
|
let ideal_time = poh_l.target_poh_time(target_ns_per_tick);
|
2021-03-23 07:10:04 -07:00
|
|
|
hash_time.stop();
|
|
|
|
timing.total_hash_time_ns += hash_time.as_ns();
|
|
|
|
if should_tick {
|
2021-03-26 11:54:16 -07:00
|
|
|
// nothing else can be done. tick required.
|
|
|
|
return true;
|
2021-03-23 07:10:04 -07:00
|
|
|
}
|
|
|
|
// check to see if a record request has been sent
|
2021-03-30 08:34:21 -07:00
|
|
|
if let Ok(record) = record_receiver.try_recv() {
|
|
|
|
// remember the record we just received as the next record to occur
|
|
|
|
*next_record = Some(record);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// check to see if we need to wait to catch up to ideal
|
|
|
|
let wait_start = Instant::now();
|
|
|
|
if ideal_time <= wait_start {
|
|
|
|
// no, keep hashing. We still hold the lock.
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// busy wait, polling for new records and after dropping poh lock (reset can occur, for example)
|
|
|
|
drop(poh_l);
|
|
|
|
while ideal_time > Instant::now() {
|
|
|
|
// check to see if a record request has been sent
|
|
|
|
if let Ok(record) = record_receiver.try_recv() {
|
2021-03-23 07:10:04 -07:00
|
|
|
// remember the record we just received as the next record to occur
|
|
|
|
*next_record = Some(record);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2021-03-30 08:34:21 -07:00
|
|
|
timing.total_sleep_us += wait_start.elapsed().as_micros() as u64;
|
|
|
|
break;
|
2021-03-23 07:10:04 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
false // should_tick = false for all code that reaches here
|
|
|
|
}
|
|
|
|
|
2020-12-29 09:35:57 -08:00
|
|
|
fn tick_producer(
|
|
|
|
poh_recorder: Arc<Mutex<PohRecorder>>,
|
|
|
|
poh_exit: &AtomicBool,
|
|
|
|
ticks_per_slot: u64,
|
2021-03-05 16:01:21 -08:00
|
|
|
hashes_per_batch: u64,
|
2021-03-23 07:10:04 -07:00
|
|
|
record_receiver: Receiver<Record>,
|
2021-03-30 08:34:21 -07:00
|
|
|
target_ns_per_tick: u64,
|
2020-12-29 09:35:57 -08:00
|
|
|
) {
|
2019-05-18 14:01:36 -07:00
|
|
|
let poh = poh_recorder.lock().unwrap().poh.clone();
|
2021-03-23 07:10:04 -07:00
|
|
|
let mut timing = PohTiming::new();
|
|
|
|
let mut next_record = None;
|
2018-10-25 14:56:21 -07:00
|
|
|
loop {
|
2021-03-23 07:10:04 -07:00
|
|
|
let should_tick = Self::record_or_hash(
|
|
|
|
&mut next_record,
|
|
|
|
&poh_recorder,
|
|
|
|
&mut timing,
|
|
|
|
&record_receiver,
|
|
|
|
hashes_per_batch,
|
|
|
|
&poh,
|
2021-03-30 08:34:21 -07:00
|
|
|
target_ns_per_tick,
|
2021-03-23 07:10:04 -07:00
|
|
|
);
|
2021-03-17 08:38:26 -07:00
|
|
|
if should_tick {
|
2021-03-23 07:10:04 -07:00
|
|
|
// Lock PohRecorder only for the final hash. record_or_hash will lock PohRecorder for record calls but not for hashing.
|
2021-03-17 08:38:26 -07:00
|
|
|
{
|
|
|
|
let mut lock_time = Measure::start("lock");
|
|
|
|
let mut poh_recorder_l = poh_recorder.lock().unwrap();
|
|
|
|
lock_time.stop();
|
2021-03-23 07:10:04 -07:00
|
|
|
timing.total_lock_time_ns += lock_time.as_ns();
|
2021-03-17 08:38:26 -07:00
|
|
|
let mut tick_time = Measure::start("tick");
|
|
|
|
poh_recorder_l.tick();
|
|
|
|
tick_time.stop();
|
2021-03-23 07:10:04 -07:00
|
|
|
timing.total_tick_time_ns += tick_time.as_ns();
|
2021-03-17 08:38:26 -07:00
|
|
|
}
|
2021-03-23 07:10:04 -07:00
|
|
|
timing.num_ticks += 1;
|
2020-12-29 09:35:57 -08:00
|
|
|
|
2021-03-23 07:10:04 -07:00
|
|
|
timing.report(ticks_per_slot);
|
2019-05-18 14:01:36 -07:00
|
|
|
if poh_exit.load(Ordering::Relaxed) {
|
|
|
|
break;
|
2019-02-26 10:48:18 -08:00
|
|
|
}
|
2018-10-25 14:56:21 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-13 10:12:09 -08:00
|
|
|
pub fn join(self) -> thread::Result<()> {
|
2018-10-25 14:56:21 -07:00
|
|
|
self.tick_producer.join()
|
|
|
|
}
|
|
|
|
}
|
2018-12-05 12:49:41 -08:00
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
2021-06-04 08:23:06 -07:00
|
|
|
use {
|
|
|
|
super::*,
|
|
|
|
rand::{thread_rng, Rng},
|
|
|
|
solana_ledger::{
|
|
|
|
blockstore::Blockstore,
|
|
|
|
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
|
|
|
get_tmp_ledger_path,
|
|
|
|
leader_schedule_cache::LeaderScheduleCache,
|
|
|
|
},
|
|
|
|
solana_measure::measure::Measure,
|
|
|
|
solana_perf::test_tx::test_tx,
|
|
|
|
solana_runtime::bank::Bank,
|
2021-08-17 15:17:56 -07:00
|
|
|
solana_sdk::{
|
|
|
|
clock, hash::hash, pubkey::Pubkey, timing, transaction::VersionedTransaction,
|
|
|
|
},
|
2021-06-04 08:23:06 -07:00
|
|
|
std::time::Duration,
|
|
|
|
};
|
2018-12-05 12:49:41 -08:00
|
|
|
|
|
|
|
#[test]
|
2021-04-23 10:14:11 -07:00
|
|
|
#[ignore]
|
2018-12-05 12:49:41 -08:00
|
|
|
fn test_poh_service() {
|
2021-03-05 16:01:21 -08:00
|
|
|
solana_logger::setup();
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
|
2021-08-05 12:50:25 -07:00
|
|
|
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
|
2019-03-02 10:25:16 -08:00
|
|
|
let prev_hash = bank.last_blockhash();
|
2019-03-29 20:00:36 -07:00
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
|
|
|
{
|
2020-01-13 13:13:52 -08:00
|
|
|
let blockstore = Blockstore::open(&ledger_path)
|
|
|
|
.expect("Expected to be able to open database ledger");
|
2021-03-05 16:01:21 -08:00
|
|
|
|
|
|
|
let default_target_tick_duration =
|
|
|
|
timing::duration_as_us(&PohConfig::default().target_tick_duration);
|
|
|
|
let target_tick_duration = Duration::from_micros(default_target_tick_duration);
|
2019-05-18 14:01:36 -07:00
|
|
|
let poh_config = Arc::new(PohConfig {
|
2021-03-05 16:01:21 -08:00
|
|
|
hashes_per_tick: Some(clock::DEFAULT_HASHES_PER_TICK),
|
|
|
|
target_tick_duration,
|
2019-10-16 12:37:27 -07:00
|
|
|
target_tick_count: None,
|
2019-05-18 14:01:36 -07:00
|
|
|
});
|
2021-04-14 10:07:21 -07:00
|
|
|
let exit = Arc::new(AtomicBool::new(false));
|
|
|
|
|
2021-09-13 16:55:35 -07:00
|
|
|
let ticks_per_slot = bank.ticks_per_slot();
|
|
|
|
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
|
|
|
let blockstore = Arc::new(blockstore);
|
2021-03-23 07:10:04 -07:00
|
|
|
let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new(
|
2019-03-29 20:00:36 -07:00
|
|
|
bank.tick_height(),
|
|
|
|
prev_hash,
|
2021-09-13 16:55:35 -07:00
|
|
|
bank.clone(),
|
2019-07-26 11:33:51 -07:00
|
|
|
Some((4, 4)),
|
2021-09-13 16:55:35 -07:00
|
|
|
ticks_per_slot,
|
2019-03-29 20:00:36 -07:00
|
|
|
&Pubkey::default(),
|
2021-09-13 16:55:35 -07:00
|
|
|
&blockstore,
|
|
|
|
&leader_schedule_cache,
|
2019-05-18 14:01:36 -07:00
|
|
|
&poh_config,
|
2021-04-14 10:07:21 -07:00
|
|
|
exit.clone(),
|
2019-03-29 20:00:36 -07:00
|
|
|
);
|
|
|
|
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
2021-03-05 16:01:21 -08:00
|
|
|
let ticks_per_slot = bank.ticks_per_slot();
|
2021-09-13 16:55:35 -07:00
|
|
|
let bank_slot = bank.slot();
|
2021-03-05 16:01:21 -08:00
|
|
|
|
|
|
|
// specify RUN_TIME to run in a benchmark-like mode
|
|
|
|
// to calibrate batch size
|
|
|
|
let run_time = std::env::var("RUN_TIME")
|
|
|
|
.map(|x| x.parse().unwrap())
|
|
|
|
.unwrap_or(0);
|
|
|
|
let is_test_run = run_time == 0;
|
2019-03-29 20:00:36 -07:00
|
|
|
|
2020-01-02 19:50:43 -08:00
|
|
|
let entry_producer = {
|
2019-03-29 20:00:36 -07:00
|
|
|
let poh_recorder = poh_recorder.clone();
|
|
|
|
let exit = exit.clone();
|
|
|
|
|
|
|
|
Builder::new()
|
|
|
|
.name("solana-poh-service-entry_producer".to_string())
|
|
|
|
.spawn(move || {
|
2021-03-05 16:01:21 -08:00
|
|
|
let now = Instant::now();
|
|
|
|
let mut total_us = 0;
|
|
|
|
let mut total_times = 0;
|
|
|
|
let h1 = hash(b"hello world!");
|
2021-08-17 15:17:56 -07:00
|
|
|
let tx = VersionedTransaction::from(test_tx());
|
2019-03-29 20:00:36 -07:00
|
|
|
loop {
|
|
|
|
// send some data
|
2021-03-05 16:01:21 -08:00
|
|
|
let mut time = Measure::start("record");
|
|
|
|
let _ = poh_recorder.lock().unwrap().record(
|
2021-09-13 16:55:35 -07:00
|
|
|
bank_slot,
|
2021-03-05 16:01:21 -08:00
|
|
|
h1,
|
|
|
|
vec![tx.clone()],
|
|
|
|
);
|
|
|
|
time.stop();
|
|
|
|
total_us += time.as_us();
|
|
|
|
total_times += 1;
|
|
|
|
if is_test_run && thread_rng().gen_ratio(1, 4) {
|
|
|
|
sleep(Duration::from_millis(200));
|
|
|
|
}
|
2019-03-29 20:00:36 -07:00
|
|
|
|
|
|
|
if exit.load(Ordering::Relaxed) {
|
2021-03-05 16:01:21 -08:00
|
|
|
info!(
|
|
|
|
"spent:{}ms record: {}ms entries recorded: {}",
|
|
|
|
now.elapsed().as_millis(),
|
|
|
|
total_us / 1000,
|
|
|
|
total_times,
|
|
|
|
);
|
2020-01-02 19:50:43 -08:00
|
|
|
break;
|
2019-03-29 20:00:36 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.unwrap()
|
|
|
|
};
|
|
|
|
|
2021-03-05 16:01:21 -08:00
|
|
|
let hashes_per_batch = std::env::var("HASHES_PER_BATCH")
|
|
|
|
.map(|x| x.parse().unwrap())
|
|
|
|
.unwrap_or(DEFAULT_HASHES_PER_BATCH);
|
2020-12-29 11:09:47 -08:00
|
|
|
let poh_service = PohService::new(
|
|
|
|
poh_recorder.clone(),
|
|
|
|
&poh_config,
|
|
|
|
&exit,
|
|
|
|
0,
|
|
|
|
DEFAULT_PINNED_CPU_CORE,
|
2021-03-05 16:01:21 -08:00
|
|
|
hashes_per_batch,
|
2021-03-23 07:10:04 -07:00
|
|
|
record_receiver,
|
2020-12-29 11:09:47 -08:00
|
|
|
);
|
2021-09-13 16:55:35 -07:00
|
|
|
poh_recorder.lock().unwrap().set_bank(&bank);
|
2019-03-29 20:00:36 -07:00
|
|
|
|
|
|
|
// get some events
|
|
|
|
let mut hashes = 0;
|
|
|
|
let mut need_tick = true;
|
|
|
|
let mut need_entry = true;
|
|
|
|
let mut need_partial = true;
|
2021-03-05 16:01:21 -08:00
|
|
|
let mut num_ticks = 0;
|
2019-03-29 20:00:36 -07:00
|
|
|
|
2021-03-05 16:01:21 -08:00
|
|
|
let time = Instant::now();
|
|
|
|
while run_time != 0 || need_tick || need_entry || need_partial {
|
2019-09-18 12:16:22 -07:00
|
|
|
let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap();
|
|
|
|
|
|
|
|
if entry.is_tick() {
|
2021-03-05 16:01:21 -08:00
|
|
|
num_ticks += 1;
|
2019-09-18 12:16:22 -07:00
|
|
|
assert!(
|
|
|
|
entry.num_hashes <= poh_config.hashes_per_tick.unwrap(),
|
2021-02-18 23:42:09 -08:00
|
|
|
"{} <= {}",
|
|
|
|
entry.num_hashes,
|
|
|
|
poh_config.hashes_per_tick.unwrap()
|
2019-09-18 12:16:22 -07:00
|
|
|
);
|
|
|
|
|
|
|
|
if entry.num_hashes == poh_config.hashes_per_tick.unwrap() {
|
|
|
|
need_tick = false;
|
|
|
|
} else {
|
|
|
|
need_partial = false;
|
|
|
|
}
|
2018-12-05 12:49:41 -08:00
|
|
|
|
2019-09-18 12:16:22 -07:00
|
|
|
hashes += entry.num_hashes;
|
2018-12-05 12:49:41 -08:00
|
|
|
|
2019-09-18 12:16:22 -07:00
|
|
|
assert_eq!(hashes, poh_config.hashes_per_tick.unwrap());
|
2018-12-05 12:49:41 -08:00
|
|
|
|
2019-09-18 12:16:22 -07:00
|
|
|
hashes = 0;
|
|
|
|
} else {
|
|
|
|
assert!(entry.num_hashes >= 1);
|
|
|
|
need_entry = false;
|
|
|
|
hashes += entry.num_hashes;
|
2018-12-05 12:49:41 -08:00
|
|
|
}
|
2021-03-05 16:01:21 -08:00
|
|
|
|
|
|
|
if run_time != 0 {
|
|
|
|
if time.elapsed().as_millis() > run_time {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
assert!(
|
|
|
|
time.elapsed().as_secs() < 60,
|
|
|
|
"Test should not run for this long! {}s tick {} entry {} partial {}",
|
|
|
|
time.elapsed().as_secs(),
|
|
|
|
need_tick,
|
|
|
|
need_entry,
|
|
|
|
need_partial,
|
|
|
|
);
|
|
|
|
}
|
2018-12-05 12:49:41 -08:00
|
|
|
}
|
2021-03-05 16:01:21 -08:00
|
|
|
info!(
|
|
|
|
"target_tick_duration: {} ticks_per_slot: {}",
|
|
|
|
poh_config.target_tick_duration.as_nanos(),
|
|
|
|
ticks_per_slot
|
|
|
|
);
|
|
|
|
let elapsed = time.elapsed();
|
|
|
|
info!(
|
|
|
|
"{} ticks in {}ms {}us/tick",
|
|
|
|
num_ticks,
|
|
|
|
elapsed.as_millis(),
|
|
|
|
elapsed.as_micros() / num_ticks
|
|
|
|
);
|
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
exit.store(true, Ordering::Relaxed);
|
2020-05-15 09:35:43 -07:00
|
|
|
poh_service.join().unwrap();
|
|
|
|
entry_producer.join().unwrap();
|
2018-12-05 12:49:41 -08:00
|
|
|
}
|
2020-01-13 13:13:52 -08:00
|
|
|
Blockstore::destroy(&ledger_path).unwrap();
|
2018-12-05 12:49:41 -08:00
|
|
|
}
|
|
|
|
}
|