2018-12-07 14:09:29 -08:00
|
|
|
//! The `replay_stage` replays transactions broadcast by the leader.
|
2018-05-22 14:26:28 -07:00
|
|
|
|
2018-12-07 19:16:27 -08:00
|
|
|
use crate::bank::Bank;
|
|
|
|
use crate::cluster_info::ClusterInfo;
|
|
|
|
use crate::counter::Counter;
|
|
|
|
use crate::entry::{EntryReceiver, EntrySender};
|
2018-11-16 08:04:46 -08:00
|
|
|
use solana_sdk::hash::Hash;
|
2018-11-16 08:45:59 -08:00
|
|
|
|
2019-01-09 14:33:44 -08:00
|
|
|
use crate::entry::EntrySlice;
|
2019-01-15 12:07:58 -08:00
|
|
|
use crate::leader_scheduler::TICKS_PER_BLOCK;
|
2018-12-07 19:16:27 -08:00
|
|
|
use crate::packet::BlobError;
|
|
|
|
use crate::result::{Error, Result};
|
|
|
|
use crate::service::Service;
|
|
|
|
use crate::streamer::{responder, BlobSender};
|
2019-01-10 09:21:38 -08:00
|
|
|
use crate::vote_signer_proxy::VoteSignerProxy;
|
2018-08-06 11:35:45 -07:00
|
|
|
use log::Level;
|
2018-11-16 08:45:59 -08:00
|
|
|
use solana_metrics::{influxdb, submit};
|
2018-12-03 10:26:28 -08:00
|
|
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
2018-11-16 08:45:59 -08:00
|
|
|
use solana_sdk::timing::duration_as_ms;
|
2019-01-11 12:58:31 -08:00
|
|
|
use std::net::UdpSocket;
|
2018-09-25 15:41:29 -07:00
|
|
|
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
2018-07-05 12:01:40 -07:00
|
|
|
use std::sync::mpsc::channel;
|
2018-07-05 15:29:49 -07:00
|
|
|
use std::sync::mpsc::RecvTimeoutError;
|
2018-07-05 12:01:40 -07:00
|
|
|
use std::sync::{Arc, RwLock};
|
2018-07-03 21:14:08 -07:00
|
|
|
use std::thread::{self, Builder, JoinHandle};
|
2018-05-22 15:30:46 -07:00
|
|
|
use std::time::Duration;
|
2018-09-26 20:58:06 -07:00
|
|
|
use std::time::Instant;
|
2018-05-22 14:26:28 -07:00
|
|
|
|
2018-12-13 18:43:10 -08:00
|
|
|
pub const MAX_ENTRY_RECV_PER_ITER: usize = 512;
|
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
#[derive(Debug, PartialEq, Eq, Clone)]
|
2018-12-07 14:09:29 -08:00
|
|
|
pub enum ReplayStageReturnType {
|
2018-10-18 22:57:48 -07:00
|
|
|
LeaderRotation(u64, u64, Hash),
|
2018-10-10 16:49:41 -07:00
|
|
|
}
|
|
|
|
|
2018-12-07 14:09:29 -08:00
|
|
|
// Implement a destructor for the ReplayStage thread to signal it exited
|
2018-09-25 15:41:29 -07:00
|
|
|
// even on panics
|
|
|
|
struct Finalizer {
|
|
|
|
exit_sender: Arc<AtomicBool>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Finalizer {
|
|
|
|
fn new(exit_sender: Arc<AtomicBool>) -> Self {
|
|
|
|
Finalizer { exit_sender }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Implement a destructor for Finalizer.
|
|
|
|
impl Drop for Finalizer {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
self.exit_sender.clone().store(true, Ordering::Relaxed);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-07 14:09:29 -08:00
|
|
|
pub struct ReplayStage {
|
2018-10-10 16:49:41 -07:00
|
|
|
t_responder: JoinHandle<()>,
|
2018-12-07 14:09:29 -08:00
|
|
|
t_replay: JoinHandle<Option<ReplayStageReturnType>>,
|
2018-05-22 14:26:28 -07:00
|
|
|
}
|
|
|
|
|
2018-12-07 14:09:29 -08:00
|
|
|
impl ReplayStage {
|
2018-06-15 14:27:06 -07:00
|
|
|
/// Process entry blobs, already in order
|
2019-01-05 12:57:52 -08:00
|
|
|
#[allow(clippy::too_many_arguments)]
|
2018-12-07 14:09:29 -08:00
|
|
|
fn process_entries(
|
2018-07-05 12:01:40 -07:00
|
|
|
bank: &Arc<Bank>,
|
2018-10-08 19:55:54 -07:00
|
|
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
2018-09-21 16:01:24 -07:00
|
|
|
window_receiver: &EntryReceiver,
|
2018-09-10 08:32:52 -07:00
|
|
|
keypair: &Arc<Keypair>,
|
2019-01-23 18:05:06 -08:00
|
|
|
vote_signer: Option<&Arc<VoteSignerProxy>>,
|
2018-09-26 20:58:06 -07:00
|
|
|
vote_blob_sender: Option<&BlobSender>,
|
2018-10-23 14:42:48 -07:00
|
|
|
ledger_entry_sender: &EntrySender,
|
2018-10-10 16:49:41 -07:00
|
|
|
entry_height: &mut u64,
|
2018-11-12 12:41:19 -08:00
|
|
|
last_entry_id: &mut Hash,
|
2018-11-12 11:40:32 -08:00
|
|
|
) -> Result<()> {
|
2018-05-22 15:30:46 -07:00
|
|
|
let timer = Duration::new(1, 0);
|
2018-09-21 16:01:24 -07:00
|
|
|
//coalesce all the available entries into a single vote
|
|
|
|
let mut entries = window_receiver.recv_timeout(timer)?;
|
2018-07-05 12:01:40 -07:00
|
|
|
while let Ok(mut more) = window_receiver.try_recv() {
|
2018-09-21 16:01:24 -07:00
|
|
|
entries.append(&mut more);
|
2018-12-13 18:43:10 -08:00
|
|
|
if entries.len() >= MAX_ENTRY_RECV_PER_ITER {
|
|
|
|
break;
|
|
|
|
}
|
2018-07-05 12:01:40 -07:00
|
|
|
}
|
2018-08-06 00:59:42 -07:00
|
|
|
|
2018-11-16 08:45:59 -08:00
|
|
|
submit(
|
2018-12-07 15:18:09 -08:00
|
|
|
influxdb::Point::new("replicate-stage")
|
2018-10-20 06:38:20 -07:00
|
|
|
.add_field("count", influxdb::Value::Integer(entries.len() as i64))
|
2018-10-16 12:54:23 -07:00
|
|
|
.to_owned(),
|
|
|
|
);
|
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
let mut res = Ok(());
|
2018-11-12 11:40:32 -08:00
|
|
|
let mut num_entries_to_write = entries.len();
|
2018-11-12 12:41:19 -08:00
|
|
|
let now = Instant::now();
|
|
|
|
if !entries.as_slice().verify(last_entry_id) {
|
2018-12-07 15:18:09 -08:00
|
|
|
inc_new_counter_info!("replicate_stage-verify-fail", entries.len());
|
2018-11-12 12:41:19 -08:00
|
|
|
return Err(Error::BlobError(BlobError::VerificationFailed));
|
|
|
|
}
|
|
|
|
inc_new_counter_info!(
|
2018-12-07 15:18:09 -08:00
|
|
|
"replicate_stage-verify-duration",
|
2018-11-12 12:41:19 -08:00
|
|
|
duration_as_ms(&now.elapsed()) as usize
|
|
|
|
);
|
2018-12-12 15:58:29 -08:00
|
|
|
|
2018-11-12 11:40:32 -08:00
|
|
|
let (current_leader, _) = bank
|
|
|
|
.get_current_leader()
|
2018-12-12 15:58:29 -08:00
|
|
|
.expect("Scheduled leader should be calculated by this point");
|
2018-12-13 18:43:10 -08:00
|
|
|
let my_id = keypair.pubkey();
|
2018-12-18 16:06:05 -08:00
|
|
|
|
|
|
|
// Next vote tick is ceiling of (current tick/ticks per block)
|
2019-01-15 12:07:58 -08:00
|
|
|
let mut num_ticks_to_next_vote = TICKS_PER_BLOCK - (bank.tick_height() % TICKS_PER_BLOCK);
|
2018-12-18 16:06:05 -08:00
|
|
|
let mut start_entry_index = 0;
|
2018-11-12 11:40:32 -08:00
|
|
|
for (i, entry) in entries.iter().enumerate() {
|
2018-12-18 16:06:05 -08:00
|
|
|
inc_new_counter_info!("replicate-stage_bank-tick", bank.tick_height() as usize);
|
|
|
|
if entry.is_tick() {
|
|
|
|
num_ticks_to_next_vote -= 1;
|
2018-12-13 18:43:10 -08:00
|
|
|
}
|
2018-12-18 16:06:05 -08:00
|
|
|
inc_new_counter_info!(
|
|
|
|
"replicate-stage_tick-to-vote",
|
|
|
|
num_ticks_to_next_vote as usize
|
|
|
|
);
|
|
|
|
// If it's the last entry in the vector, i will be vec len - 1.
|
|
|
|
// If we don't process the entry now, the for loop will exit and the entry
|
|
|
|
// will be dropped.
|
|
|
|
if 0 == num_ticks_to_next_vote || (i + 1) == entries.len() {
|
|
|
|
res = bank.process_entries(&entries[start_entry_index..=i]);
|
|
|
|
|
|
|
|
if res.is_err() {
|
|
|
|
// TODO: This will return early from the first entry that has an erroneous
|
|
|
|
// transaction, instead of processing the rest of the entries in the vector
|
|
|
|
// of received entries. This is in line with previous behavior when
|
|
|
|
// bank.process_entries() was used to process the entries, but doesn't solve the
|
|
|
|
// issue that the bank state was still changed, leading to inconsistencies with the
|
|
|
|
// leader as the leader currently should not be publishing erroneous transactions
|
|
|
|
inc_new_counter_info!(
|
|
|
|
"replicate-stage_failed_process_entries",
|
|
|
|
(i - start_entry_index)
|
|
|
|
);
|
|
|
|
|
|
|
|
break;
|
2018-12-13 18:43:10 -08:00
|
|
|
}
|
|
|
|
|
2018-12-18 16:06:05 -08:00
|
|
|
if 0 == num_ticks_to_next_vote {
|
2019-01-23 18:05:06 -08:00
|
|
|
if let Some(signer) = vote_signer {
|
|
|
|
if let Some(sender) = vote_blob_sender {
|
|
|
|
signer
|
|
|
|
.send_validator_vote(bank, &cluster_info, sender)
|
|
|
|
.unwrap();
|
|
|
|
}
|
2018-12-18 16:06:05 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
let (scheduled_leader, _) = bank
|
|
|
|
.get_current_leader()
|
|
|
|
.expect("Scheduled leader should be calculated by this point");
|
2018-10-10 16:49:41 -07:00
|
|
|
|
2018-12-18 16:06:05 -08:00
|
|
|
// TODO: Remove this soon once we boot the leader from ClusterInfo
|
|
|
|
if scheduled_leader != current_leader {
|
|
|
|
cluster_info.write().unwrap().set_leader(scheduled_leader);
|
|
|
|
}
|
2018-12-13 18:43:10 -08:00
|
|
|
|
2018-12-18 16:06:05 -08:00
|
|
|
if my_id == scheduled_leader {
|
|
|
|
num_entries_to_write = i + 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
start_entry_index = i + 1;
|
2019-01-15 12:07:58 -08:00
|
|
|
num_ticks_to_next_vote = TICKS_PER_BLOCK;
|
2018-10-10 16:49:41 -07:00
|
|
|
}
|
2018-11-12 11:40:32 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// If leader rotation happened, only write the entries up to leader rotation.
|
|
|
|
entries.truncate(num_entries_to_write);
|
2018-11-12 12:41:19 -08:00
|
|
|
*last_entry_id = entries
|
|
|
|
.last()
|
|
|
|
.expect("Entries cannot be empty at this point")
|
|
|
|
.id;
|
2018-09-10 08:32:52 -07:00
|
|
|
|
2018-08-06 11:35:45 -07:00
|
|
|
inc_new_counter_info!(
|
2018-12-07 15:18:09 -08:00
|
|
|
"replicate-transactions",
|
2018-07-16 18:33:50 -07:00
|
|
|
entries.iter().map(|x| x.transactions.len()).sum()
|
|
|
|
);
|
2018-08-06 00:59:42 -07:00
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
let entries_len = entries.len() as u64;
|
|
|
|
// TODO: In line with previous behavior, this will write all the entries even if
|
|
|
|
// an error occurred processing one of the entries (causing the rest of the entries to
|
|
|
|
// not be processed).
|
2018-10-23 14:42:48 -07:00
|
|
|
if entries_len != 0 {
|
|
|
|
ledger_entry_sender.send(entries)?;
|
2018-08-05 22:04:27 -07:00
|
|
|
}
|
2018-08-03 11:06:06 -07:00
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
*entry_height += entries_len;
|
2018-09-03 02:48:11 -07:00
|
|
|
res?;
|
2018-12-13 18:43:10 -08:00
|
|
|
inc_new_counter_info!(
|
|
|
|
"replicate_stage-duration",
|
|
|
|
duration_as_ms(&now.elapsed()) as usize
|
|
|
|
);
|
2018-11-12 11:40:32 -08:00
|
|
|
|
|
|
|
Ok(())
|
2018-05-22 15:30:46 -07:00
|
|
|
}
|
2018-09-25 15:41:29 -07:00
|
|
|
|
2018-12-07 19:01:28 -08:00
|
|
|
#[allow(clippy::new_ret_no_self)]
|
2018-07-05 12:01:40 -07:00
|
|
|
pub fn new(
|
2018-09-14 01:53:18 -07:00
|
|
|
keypair: Arc<Keypair>,
|
2019-01-23 18:05:06 -08:00
|
|
|
vote_signer: Option<Arc<VoteSignerProxy>>,
|
2018-07-05 12:01:40 -07:00
|
|
|
bank: Arc<Bank>,
|
2018-10-08 19:55:54 -07:00
|
|
|
cluster_info: Arc<RwLock<ClusterInfo>>,
|
2018-09-21 16:01:24 -07:00
|
|
|
window_receiver: EntryReceiver,
|
2018-09-25 15:41:29 -07:00
|
|
|
exit: Arc<AtomicBool>,
|
2018-10-10 16:49:41 -07:00
|
|
|
entry_height: u64,
|
2018-11-12 12:41:19 -08:00
|
|
|
last_entry_id: Hash,
|
2018-10-23 14:42:48 -07:00
|
|
|
) -> (Self, EntryReceiver) {
|
2018-07-05 12:01:40 -07:00
|
|
|
let (vote_blob_sender, vote_blob_receiver) = channel();
|
2018-10-23 14:42:48 -07:00
|
|
|
let (ledger_entry_sender, ledger_entry_receiver) = channel();
|
2018-07-05 12:01:40 -07:00
|
|
|
let send = UdpSocket::bind("0.0.0.0:0").expect("bind");
|
2018-12-07 14:09:29 -08:00
|
|
|
let t_responder = responder("replay_stage", Arc::new(send), vote_blob_receiver);
|
2018-07-19 21:27:35 -07:00
|
|
|
|
2018-09-10 08:32:52 -07:00
|
|
|
let keypair = Arc::new(keypair);
|
2018-12-07 14:09:29 -08:00
|
|
|
let t_replay = Builder::new()
|
|
|
|
.name("solana-replay-stage".to_string())
|
2018-09-25 15:41:29 -07:00
|
|
|
.spawn(move || {
|
2018-09-24 21:31:20 -07:00
|
|
|
let _exit = Finalizer::new(exit);
|
2018-10-10 16:49:41 -07:00
|
|
|
let mut entry_height_ = entry_height;
|
2018-11-12 12:41:19 -08:00
|
|
|
let mut last_entry_id = last_entry_id;
|
2018-09-25 15:41:29 -07:00
|
|
|
loop {
|
2018-11-07 13:18:14 -08:00
|
|
|
let (leader_id, _) = bank
|
2018-10-25 17:24:24 -07:00
|
|
|
.get_current_leader()
|
2018-12-12 15:58:29 -08:00
|
|
|
.expect("Scheduled leader should be calculated by this point");
|
2018-10-25 16:58:40 -07:00
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
if leader_id == keypair.pubkey() {
|
2019-01-14 14:20:05 -08:00
|
|
|
inc_new_counter_info!(
|
|
|
|
"replay_stage-new_leader",
|
|
|
|
bank.tick_height() as usize
|
|
|
|
);
|
2018-12-07 14:09:29 -08:00
|
|
|
return Some(ReplayStageReturnType::LeaderRotation(
|
2018-11-05 09:47:41 -08:00
|
|
|
bank.tick_height(),
|
2018-10-12 00:39:10 -07:00
|
|
|
entry_height_,
|
|
|
|
// We should never start the TPU / this stage on an exact entry that causes leader
|
2018-10-17 13:42:54 -07:00
|
|
|
// rotation (Fullnode should automatically transition on startup if it detects
|
|
|
|
// are no longer a validator. Hence we can assume that some entry must have
|
2018-10-12 00:39:10 -07:00
|
|
|
// triggered leader rotation
|
2018-11-12 12:41:19 -08:00
|
|
|
last_entry_id,
|
2018-10-12 00:39:10 -07:00
|
|
|
));
|
2018-10-10 16:49:41 -07:00
|
|
|
}
|
|
|
|
|
2018-12-07 14:09:29 -08:00
|
|
|
match Self::process_entries(
|
2018-09-25 15:41:29 -07:00
|
|
|
&bank,
|
2018-10-08 19:55:54 -07:00
|
|
|
&cluster_info,
|
2018-09-25 15:41:29 -07:00
|
|
|
&window_receiver,
|
|
|
|
&keypair,
|
2019-01-23 18:05:06 -08:00
|
|
|
vote_signer.as_ref(),
|
2018-12-13 18:43:10 -08:00
|
|
|
Some(&vote_blob_sender),
|
2018-10-23 14:42:48 -07:00
|
|
|
&ledger_entry_sender,
|
2018-10-10 16:49:41 -07:00
|
|
|
&mut entry_height_,
|
2018-11-12 11:40:32 -08:00
|
|
|
&mut last_entry_id,
|
2018-09-25 15:41:29 -07:00
|
|
|
) {
|
2018-10-12 00:39:10 -07:00
|
|
|
Err(Error::RecvTimeoutError(RecvTimeoutError::Disconnected)) => break,
|
|
|
|
Err(Error::RecvTimeoutError(RecvTimeoutError::Timeout)) => (),
|
|
|
|
Err(e) => error!("{:?}", e),
|
2018-11-12 11:40:32 -08:00
|
|
|
Ok(()) => (),
|
2018-07-05 15:29:49 -07:00
|
|
|
}
|
2018-05-30 13:38:15 -07:00
|
|
|
}
|
2018-07-19 21:27:35 -07:00
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
None
|
2018-12-07 19:01:28 -08:00
|
|
|
})
|
|
|
|
.unwrap();
|
2018-07-19 21:27:35 -07:00
|
|
|
|
2018-10-23 14:42:48 -07:00
|
|
|
(
|
2018-12-07 19:01:28 -08:00
|
|
|
Self {
|
2018-10-23 14:42:48 -07:00
|
|
|
t_responder,
|
2018-12-07 14:09:29 -08:00
|
|
|
t_replay,
|
2018-10-23 14:42:48 -07:00
|
|
|
},
|
|
|
|
ledger_entry_receiver,
|
|
|
|
)
|
2018-05-22 14:26:28 -07:00
|
|
|
}
|
|
|
|
}
|
2018-07-03 21:14:08 -07:00
|
|
|
|
2018-12-07 14:09:29 -08:00
|
|
|
impl Service for ReplayStage {
|
|
|
|
type JoinReturnType = Option<ReplayStageReturnType>;
|
2018-10-10 16:49:41 -07:00
|
|
|
|
2018-12-07 14:09:29 -08:00
|
|
|
fn join(self) -> thread::Result<Option<ReplayStageReturnType>> {
|
2018-10-10 16:49:41 -07:00
|
|
|
self.t_responder.join()?;
|
2018-12-07 14:09:29 -08:00
|
|
|
self.t_replay.join()
|
2018-10-10 16:49:41 -07:00
|
|
|
}
|
|
|
|
}
|
2018-09-13 14:00:17 -07:00
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
2018-12-07 19:16:27 -08:00
|
|
|
use crate::bank::Bank;
|
|
|
|
use crate::cluster_info::{ClusterInfo, Node};
|
2019-01-09 14:33:44 -08:00
|
|
|
use crate::db_ledger::create_tmp_sample_ledger;
|
2019-01-03 21:29:21 -08:00
|
|
|
use crate::db_ledger::{DbLedger, DEFAULT_SLOT_HEIGHT};
|
2019-01-09 14:33:44 -08:00
|
|
|
use crate::entry::create_ticks;
|
2018-12-07 19:16:27 -08:00
|
|
|
use crate::entry::Entry;
|
|
|
|
use crate::fullnode::Fullnode;
|
|
|
|
use crate::leader_scheduler::{
|
|
|
|
make_active_set_entries, LeaderScheduler, LeaderSchedulerConfig,
|
|
|
|
};
|
2019-01-05 12:57:52 -08:00
|
|
|
|
2018-12-07 19:16:27 -08:00
|
|
|
use crate::packet::BlobError;
|
2018-12-07 14:09:29 -08:00
|
|
|
use crate::replay_stage::{ReplayStage, ReplayStageReturnType};
|
2018-12-07 19:16:27 -08:00
|
|
|
use crate::result::Error;
|
|
|
|
use crate::service::Service;
|
2019-01-10 09:21:38 -08:00
|
|
|
use crate::vote_signer_proxy::VoteSignerProxy;
|
2018-11-16 08:04:46 -08:00
|
|
|
use solana_sdk::hash::Hash;
|
2018-12-03 10:26:28 -08:00
|
|
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
2019-01-11 12:58:31 -08:00
|
|
|
use solana_vote_signer::rpc::LocalVoteSigner;
|
2018-10-17 13:42:54 -07:00
|
|
|
use std::fs::remove_dir_all;
|
2018-10-10 16:49:41 -07:00
|
|
|
use std::sync::atomic::{AtomicBool, Ordering};
|
|
|
|
use std::sync::mpsc::channel;
|
|
|
|
use std::sync::{Arc, RwLock};
|
|
|
|
|
|
|
|
#[test]
|
2018-12-07 14:09:29 -08:00
|
|
|
pub fn test_replay_stage_leader_rotation_exit() {
|
2018-12-14 12:36:50 -08:00
|
|
|
solana_logger::setup();
|
2018-10-10 16:49:41 -07:00
|
|
|
|
2018-12-07 14:09:29 -08:00
|
|
|
// Set up dummy node to host a ReplayStage
|
2018-10-10 16:49:41 -07:00
|
|
|
let my_keypair = Keypair::new();
|
|
|
|
let my_id = my_keypair.pubkey();
|
|
|
|
let my_node = Node::new_localhost_with_pubkey(my_id);
|
2018-11-19 11:25:14 -08:00
|
|
|
let cluster_info_me = ClusterInfo::new(my_node.info.clone());
|
2018-10-10 16:49:41 -07:00
|
|
|
|
2018-11-02 14:32:05 -07:00
|
|
|
// Create keypair for the old leader
|
|
|
|
let old_leader_id = Keypair::new().pubkey();
|
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
// Create a ledger
|
2019-01-24 12:04:04 -08:00
|
|
|
let num_ending_ticks = 3;
|
|
|
|
let (_, mint_keypair, my_ledger_path, genesis_entries) = create_tmp_sample_ledger(
|
2018-12-07 14:09:29 -08:00
|
|
|
"test_replay_stage_leader_rotation_exit",
|
2018-10-18 22:57:48 -07:00
|
|
|
10_000,
|
|
|
|
num_ending_ticks,
|
2018-11-02 14:32:05 -07:00
|
|
|
old_leader_id,
|
|
|
|
500,
|
2018-10-18 22:57:48 -07:00
|
|
|
);
|
2018-10-10 16:49:41 -07:00
|
|
|
let mut last_id = genesis_entries
|
|
|
|
.last()
|
|
|
|
.expect("expected at least one genesis entry")
|
|
|
|
.id;
|
|
|
|
|
2019-01-11 12:58:31 -08:00
|
|
|
let my_keypair = Arc::new(my_keypair);
|
2018-10-10 16:49:41 -07:00
|
|
|
// Write two entries to the ledger so that the validator is in the active set:
|
|
|
|
// 1) Give the validator a nonzero number of tokens 2) A vote from the validator .
|
|
|
|
// This will cause leader rotation after the bootstrap height
|
2019-01-05 12:57:52 -08:00
|
|
|
let (active_set_entries, vote_account_id) =
|
2019-01-24 12:04:04 -08:00
|
|
|
make_active_set_entries(&my_keypair, &mint_keypair, &last_id, &last_id, 0);
|
2018-10-18 22:57:48 -07:00
|
|
|
last_id = active_set_entries.last().unwrap().id;
|
|
|
|
let initial_tick_height = genesis_entries
|
|
|
|
.iter()
|
|
|
|
.fold(0, |tick_count, entry| tick_count + entry.is_tick() as u64);
|
|
|
|
let active_set_entries_len = active_set_entries.len() as u64;
|
|
|
|
let initial_non_tick_height = genesis_entries.len() as u64 - initial_tick_height;
|
|
|
|
let initial_entry_len = genesis_entries.len() as u64 + active_set_entries_len;
|
2019-01-03 21:29:21 -08:00
|
|
|
|
|
|
|
{
|
|
|
|
let db_ledger = DbLedger::open(&my_ledger_path).unwrap();
|
|
|
|
db_ledger
|
|
|
|
.write_entries(
|
|
|
|
DEFAULT_SLOT_HEIGHT,
|
|
|
|
genesis_entries.len() as u64,
|
|
|
|
&active_set_entries,
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
}
|
2018-10-10 16:49:41 -07:00
|
|
|
|
2019-01-24 12:04:04 -08:00
|
|
|
// Set up the LeaderScheduler so that this node becomes the leader at
|
2018-10-10 16:49:41 -07:00
|
|
|
// bootstrap_height = num_bootstrap_slots * leader_rotation_interval
|
2018-12-18 16:06:05 -08:00
|
|
|
let leader_rotation_interval = 16;
|
2018-10-10 16:49:41 -07:00
|
|
|
let num_bootstrap_slots = 2;
|
|
|
|
let bootstrap_height = num_bootstrap_slots * leader_rotation_interval;
|
|
|
|
let leader_scheduler_config = LeaderSchedulerConfig::new(
|
|
|
|
Some(bootstrap_height),
|
|
|
|
Some(leader_rotation_interval),
|
|
|
|
Some(leader_rotation_interval * 2),
|
|
|
|
Some(bootstrap_height),
|
|
|
|
);
|
|
|
|
|
2018-10-25 16:58:40 -07:00
|
|
|
let leader_scheduler =
|
|
|
|
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config)));
|
2018-10-10 16:49:41 -07:00
|
|
|
|
|
|
|
// Set up the bank
|
2018-11-12 12:41:19 -08:00
|
|
|
let (bank, _, last_entry_id) =
|
|
|
|
Fullnode::new_bank_from_ledger(&my_ledger_path, leader_scheduler);
|
2018-10-10 16:49:41 -07:00
|
|
|
|
2018-12-07 14:09:29 -08:00
|
|
|
// Set up the replay stage
|
2018-10-10 16:49:41 -07:00
|
|
|
let (entry_sender, entry_receiver) = channel();
|
|
|
|
let exit = Arc::new(AtomicBool::new(false));
|
2018-12-07 14:09:29 -08:00
|
|
|
let (replay_stage, ledger_writer_recv) = ReplayStage::new(
|
2019-01-11 12:58:31 -08:00
|
|
|
my_keypair,
|
2019-01-23 18:05:06 -08:00
|
|
|
Some(Arc::new(vote_account_id)),
|
2018-10-10 16:49:41 -07:00
|
|
|
Arc::new(bank),
|
|
|
|
Arc::new(RwLock::new(cluster_info_me)),
|
|
|
|
entry_receiver,
|
|
|
|
exit.clone(),
|
2018-10-18 22:57:48 -07:00
|
|
|
initial_entry_len,
|
2018-11-12 12:41:19 -08:00
|
|
|
last_entry_id,
|
2018-10-10 16:49:41 -07:00
|
|
|
);
|
|
|
|
|
2018-10-18 22:57:48 -07:00
|
|
|
// Send enough ticks to trigger leader rotation
|
2018-10-10 16:49:41 -07:00
|
|
|
let extra_entries = leader_rotation_interval;
|
|
|
|
let total_entries_to_send = (bootstrap_height + extra_entries) as usize;
|
2018-10-18 22:57:48 -07:00
|
|
|
let num_hashes = 1;
|
2018-10-10 16:49:41 -07:00
|
|
|
let mut entries_to_send = vec![];
|
|
|
|
while entries_to_send.len() < total_entries_to_send {
|
2018-12-10 20:03:04 -08:00
|
|
|
let entry = Entry::new(&mut last_id, 0, num_hashes, vec![]);
|
2018-10-18 22:57:48 -07:00
|
|
|
last_id = entry.id;
|
|
|
|
entries_to_send.push(entry);
|
2018-07-05 12:01:40 -07:00
|
|
|
}
|
2018-10-10 16:49:41 -07:00
|
|
|
|
2018-10-18 22:57:48 -07:00
|
|
|
assert!((num_ending_ticks as u64) < bootstrap_height);
|
|
|
|
|
|
|
|
// Add on the only entries that weren't ticks to the bootstrap height to get the
|
|
|
|
// total expected entry length
|
2019-01-24 12:04:04 -08:00
|
|
|
let leader_rotation_index = (bootstrap_height - initial_tick_height) as usize;
|
2018-10-18 22:57:48 -07:00
|
|
|
let expected_entry_height =
|
2019-01-24 12:04:04 -08:00
|
|
|
bootstrap_height + initial_non_tick_height + active_set_entries_len - 1;
|
|
|
|
let expected_last_id = entries_to_send[leader_rotation_index - 2].id;
|
2018-11-12 11:40:32 -08:00
|
|
|
entry_sender.send(entries_to_send.clone()).unwrap();
|
2018-10-10 16:49:41 -07:00
|
|
|
|
2018-12-07 14:09:29 -08:00
|
|
|
// Wait for replay_stage to exit and check return value is correct
|
2018-10-10 16:49:41 -07:00
|
|
|
assert_eq!(
|
2018-12-07 14:09:29 -08:00
|
|
|
Some(ReplayStageReturnType::LeaderRotation(
|
2018-10-12 00:39:10 -07:00
|
|
|
bootstrap_height,
|
2018-10-18 22:57:48 -07:00
|
|
|
expected_entry_height,
|
|
|
|
expected_last_id,
|
2018-10-12 00:39:10 -07:00
|
|
|
)),
|
2018-12-07 14:09:29 -08:00
|
|
|
replay_stage.join().expect("replay stage join")
|
2018-10-10 16:49:41 -07:00
|
|
|
);
|
|
|
|
|
2018-11-12 11:40:32 -08:00
|
|
|
// Check that the entries on the ledger writer channel are correct
|
|
|
|
let received_ticks = ledger_writer_recv
|
|
|
|
.recv()
|
2019-01-24 12:04:04 -08:00
|
|
|
.expect("Expected to receive an entry on the ledger writer receiver");
|
2018-11-12 11:40:32 -08:00
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
&received_ticks[..],
|
2019-01-24 12:04:04 -08:00
|
|
|
&entries_to_send[..leader_rotation_index - 1]
|
2018-11-12 11:40:32 -08:00
|
|
|
);
|
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
assert_eq!(exit.load(Ordering::Relaxed), true);
|
|
|
|
|
2018-10-17 13:42:54 -07:00
|
|
|
let _ignored = remove_dir_all(&my_ledger_path);
|
2018-07-03 21:14:08 -07:00
|
|
|
}
|
2018-11-12 11:40:32 -08:00
|
|
|
|
|
|
|
#[test]
|
2018-12-07 14:09:29 -08:00
|
|
|
fn test_vote_error_replay_stage_correctness() {
|
|
|
|
// Set up dummy node to host a ReplayStage
|
2018-11-12 11:40:32 -08:00
|
|
|
let my_keypair = Keypair::new();
|
|
|
|
let my_id = my_keypair.pubkey();
|
|
|
|
let my_node = Node::new_localhost_with_pubkey(my_id);
|
|
|
|
|
|
|
|
// Create keypair for the leader
|
|
|
|
let leader_id = Keypair::new().pubkey();
|
|
|
|
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::default()));
|
|
|
|
|
2019-01-24 12:04:04 -08:00
|
|
|
let num_ending_ticks = 1;
|
|
|
|
let (_genesis_block, _mint_keypair, my_ledger_path, genesis_entries) =
|
|
|
|
create_tmp_sample_ledger(
|
|
|
|
"test_vote_error_replay_stage_correctness",
|
|
|
|
10_000,
|
|
|
|
num_ending_ticks,
|
|
|
|
leader_id,
|
|
|
|
500,
|
|
|
|
);
|
2018-11-12 11:40:32 -08:00
|
|
|
|
|
|
|
let initial_entry_len = genesis_entries.len();
|
|
|
|
|
|
|
|
// Set up the bank
|
2018-11-12 12:41:19 -08:00
|
|
|
let (bank, _, last_entry_id) =
|
|
|
|
Fullnode::new_bank_from_ledger(&my_ledger_path, leader_scheduler);
|
2018-11-12 11:40:32 -08:00
|
|
|
|
|
|
|
// Set up the cluster info
|
2018-11-19 11:25:14 -08:00
|
|
|
let cluster_info_me = Arc::new(RwLock::new(ClusterInfo::new(my_node.info.clone())));
|
2018-11-12 11:40:32 -08:00
|
|
|
|
2018-12-07 14:09:29 -08:00
|
|
|
// Set up the replay stage
|
2018-11-12 11:40:32 -08:00
|
|
|
let bank = Arc::new(bank);
|
|
|
|
let (entry_sender, entry_receiver) = channel();
|
|
|
|
let exit = Arc::new(AtomicBool::new(false));
|
2019-01-05 12:57:52 -08:00
|
|
|
let my_keypair = Arc::new(my_keypair);
|
2019-01-11 12:58:31 -08:00
|
|
|
let vote_signer = Arc::new(VoteSignerProxy::new(
|
|
|
|
&my_keypair,
|
|
|
|
Box::new(LocalVoteSigner::default()),
|
|
|
|
));
|
2018-12-07 14:09:29 -08:00
|
|
|
let (replay_stage, ledger_writer_recv) = ReplayStage::new(
|
2019-01-05 12:57:52 -08:00
|
|
|
my_keypair.clone(),
|
2019-01-23 18:05:06 -08:00
|
|
|
Some(vote_signer.clone()),
|
2018-11-12 11:40:32 -08:00
|
|
|
bank.clone(),
|
|
|
|
cluster_info_me.clone(),
|
|
|
|
entry_receiver,
|
|
|
|
exit.clone(),
|
|
|
|
initial_entry_len as u64,
|
2018-11-12 12:41:19 -08:00
|
|
|
last_entry_id,
|
2018-11-12 11:40:32 -08:00
|
|
|
);
|
|
|
|
|
|
|
|
// Vote sender should error because no leader contact info is found in the
|
|
|
|
// ClusterInfo
|
|
|
|
let (mock_sender, _mock_receiver) = channel();
|
2019-01-10 09:21:38 -08:00
|
|
|
let _vote_err = vote_signer.send_validator_vote(&bank, &cluster_info_me, &mock_sender);
|
2018-11-12 11:40:32 -08:00
|
|
|
|
2018-12-07 14:09:29 -08:00
|
|
|
// Send ReplayStage an entry, should see it on the ledger writer receiver
|
2018-11-12 11:40:32 -08:00
|
|
|
let next_tick = create_ticks(
|
|
|
|
1,
|
|
|
|
genesis_entries
|
|
|
|
.last()
|
|
|
|
.expect("Expected nonzero number of entries in genesis")
|
|
|
|
.id,
|
|
|
|
);
|
|
|
|
entry_sender
|
|
|
|
.send(next_tick.clone())
|
2018-12-07 14:09:29 -08:00
|
|
|
.expect("Error sending entry to ReplayStage");
|
2018-11-12 11:40:32 -08:00
|
|
|
let received_tick = ledger_writer_recv
|
|
|
|
.recv()
|
|
|
|
.expect("Expected to recieve an entry on the ledger writer receiver");
|
|
|
|
|
|
|
|
assert_eq!(next_tick, received_tick);
|
|
|
|
drop(entry_sender);
|
2018-12-07 14:09:29 -08:00
|
|
|
replay_stage
|
2018-11-12 11:40:32 -08:00
|
|
|
.join()
|
2018-12-07 14:09:29 -08:00
|
|
|
.expect("Expect successful ReplayStage exit");
|
2018-11-12 11:40:32 -08:00
|
|
|
let _ignored = remove_dir_all(&my_ledger_path);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2018-12-07 14:09:29 -08:00
|
|
|
fn test_vote_error_replay_stage_leader_rotation() {
|
|
|
|
// Set up dummy node to host a ReplayStage
|
2018-11-12 11:40:32 -08:00
|
|
|
let my_keypair = Keypair::new();
|
|
|
|
let my_id = my_keypair.pubkey();
|
|
|
|
let my_node = Node::new_localhost_with_pubkey(my_id);
|
|
|
|
|
|
|
|
// Create keypair for the leader
|
|
|
|
let leader_id = Keypair::new().pubkey();
|
|
|
|
|
|
|
|
// Create the ledger
|
2019-01-24 12:04:04 -08:00
|
|
|
let (_genesis_block, mint_keypair, my_ledger_path, genesis_entries) =
|
|
|
|
create_tmp_sample_ledger(
|
|
|
|
"test_vote_error_replay_stage_leader_rotation",
|
|
|
|
10_000,
|
|
|
|
1,
|
|
|
|
leader_id,
|
|
|
|
500,
|
|
|
|
);
|
2018-11-12 11:40:32 -08:00
|
|
|
|
|
|
|
let mut last_id = genesis_entries
|
|
|
|
.last()
|
|
|
|
.expect("expected at least one genesis entry")
|
|
|
|
.id;
|
|
|
|
|
2019-01-11 12:58:31 -08:00
|
|
|
let my_keypair = Arc::new(my_keypair);
|
2018-11-12 11:40:32 -08:00
|
|
|
// Write two entries to the ledger so that the validator is in the active set:
|
|
|
|
// 1) Give the validator a nonzero number of tokens 2) A vote from the validator.
|
|
|
|
// This will cause leader rotation after the bootstrap height
|
2019-01-05 12:57:52 -08:00
|
|
|
let (active_set_entries, vote_account_id) =
|
2019-01-24 12:04:04 -08:00
|
|
|
make_active_set_entries(&my_keypair, &mint_keypair, &last_id, &last_id, 0);
|
2018-11-12 11:40:32 -08:00
|
|
|
last_id = active_set_entries.last().unwrap().id;
|
|
|
|
let initial_tick_height = genesis_entries
|
|
|
|
.iter()
|
|
|
|
.fold(0, |tick_count, entry| tick_count + entry.is_tick() as u64);
|
|
|
|
let active_set_entries_len = active_set_entries.len() as u64;
|
|
|
|
let initial_non_tick_height = genesis_entries.len() as u64 - initial_tick_height;
|
|
|
|
let initial_entry_len = genesis_entries.len() as u64 + active_set_entries_len;
|
2019-01-03 21:29:21 -08:00
|
|
|
|
|
|
|
{
|
|
|
|
let db_ledger = DbLedger::open(&my_ledger_path).unwrap();
|
|
|
|
db_ledger
|
|
|
|
.write_entries(
|
|
|
|
DEFAULT_SLOT_HEIGHT,
|
|
|
|
genesis_entries.len() as u64,
|
|
|
|
&active_set_entries,
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
}
|
2018-11-12 11:40:32 -08:00
|
|
|
|
|
|
|
// Set up the LeaderScheduler so that this this node becomes the leader at
|
|
|
|
// bootstrap_height = num_bootstrap_slots * leader_rotation_interval
|
|
|
|
let leader_rotation_interval = 10;
|
|
|
|
let num_bootstrap_slots = 2;
|
|
|
|
let bootstrap_height = num_bootstrap_slots * leader_rotation_interval;
|
|
|
|
let leader_scheduler_config = LeaderSchedulerConfig::new(
|
|
|
|
Some(bootstrap_height),
|
|
|
|
Some(leader_rotation_interval),
|
|
|
|
Some(leader_rotation_interval * 2),
|
|
|
|
Some(bootstrap_height),
|
|
|
|
);
|
|
|
|
|
|
|
|
let leader_scheduler =
|
|
|
|
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config)));
|
|
|
|
|
|
|
|
// Set up the bank
|
2018-11-12 12:41:19 -08:00
|
|
|
let (bank, _, last_entry_id) =
|
|
|
|
Fullnode::new_bank_from_ledger(&my_ledger_path, leader_scheduler);
|
2018-11-12 11:40:32 -08:00
|
|
|
|
|
|
|
// Set up the cluster info
|
2018-11-19 11:25:14 -08:00
|
|
|
let cluster_info_me = Arc::new(RwLock::new(ClusterInfo::new(my_node.info.clone())));
|
2018-11-12 11:40:32 -08:00
|
|
|
|
2018-12-07 14:09:29 -08:00
|
|
|
// Set up the replay stage
|
2019-01-11 12:58:31 -08:00
|
|
|
let signer_proxy = Arc::new(vote_account_id);
|
2018-11-12 11:40:32 -08:00
|
|
|
let bank = Arc::new(bank);
|
|
|
|
let (entry_sender, entry_receiver) = channel();
|
|
|
|
let exit = Arc::new(AtomicBool::new(false));
|
2018-12-07 14:09:29 -08:00
|
|
|
let (replay_stage, ledger_writer_recv) = ReplayStage::new(
|
2019-01-05 12:57:52 -08:00
|
|
|
my_keypair.clone(),
|
2019-01-23 18:05:06 -08:00
|
|
|
Some(signer_proxy.clone()),
|
2018-11-12 11:40:32 -08:00
|
|
|
bank.clone(),
|
|
|
|
cluster_info_me.clone(),
|
|
|
|
entry_receiver,
|
|
|
|
exit.clone(),
|
|
|
|
initial_entry_len as u64,
|
2018-11-12 12:41:19 -08:00
|
|
|
last_entry_id,
|
2018-11-12 11:40:32 -08:00
|
|
|
);
|
|
|
|
|
|
|
|
// Vote sender should error because no leader contact info is found in the
|
|
|
|
// ClusterInfo
|
|
|
|
let (mock_sender, _mock_receiver) = channel();
|
2019-01-10 09:21:38 -08:00
|
|
|
let _vote_err = signer_proxy.send_validator_vote(&bank, &cluster_info_me, &mock_sender);
|
2018-11-12 11:40:32 -08:00
|
|
|
|
|
|
|
// Send enough ticks to trigger leader rotation
|
|
|
|
let total_entries_to_send = (bootstrap_height - initial_tick_height) as usize;
|
|
|
|
let num_hashes = 1;
|
|
|
|
|
|
|
|
// Add on the only entries that weren't ticks to the bootstrap height to get the
|
|
|
|
// total expected entry length
|
|
|
|
let expected_entry_height =
|
2019-01-24 12:04:04 -08:00
|
|
|
bootstrap_height + initial_non_tick_height + active_set_entries_len - 1;
|
|
|
|
let leader_rotation_index = (bootstrap_height - initial_tick_height - 2) as usize;
|
2018-11-12 11:40:32 -08:00
|
|
|
let mut expected_last_id = Hash::default();
|
2019-01-24 12:04:04 -08:00
|
|
|
for i in 0..total_entries_to_send - 1 {
|
2018-12-10 20:03:04 -08:00
|
|
|
let entry = Entry::new(&mut last_id, 0, num_hashes, vec![]);
|
2018-11-12 11:40:32 -08:00
|
|
|
last_id = entry.id;
|
|
|
|
entry_sender
|
|
|
|
.send(vec![entry.clone()])
|
2018-12-07 14:09:29 -08:00
|
|
|
.expect("Expected to be able to send entry to ReplayStage");
|
2018-11-12 11:40:32 -08:00
|
|
|
// Check that the entries on the ledger writer channel are correct
|
|
|
|
let received_entry = ledger_writer_recv
|
|
|
|
.recv()
|
|
|
|
.expect("Expected to recieve an entry on the ledger writer receiver");
|
|
|
|
assert_eq!(received_entry[0], entry);
|
|
|
|
|
|
|
|
if i == leader_rotation_index {
|
|
|
|
expected_last_id = entry.id;
|
|
|
|
}
|
2019-01-24 12:04:04 -08:00
|
|
|
debug!(
|
|
|
|
"loop: i={}, leader_rotation_index={}, entry={:?}",
|
|
|
|
i, leader_rotation_index, entry,
|
|
|
|
);
|
2018-11-12 11:40:32 -08:00
|
|
|
}
|
|
|
|
|
2018-12-07 14:09:29 -08:00
|
|
|
// Wait for replay_stage to exit and check return value is correct
|
2018-11-12 11:40:32 -08:00
|
|
|
assert_eq!(
|
2018-12-07 14:09:29 -08:00
|
|
|
Some(ReplayStageReturnType::LeaderRotation(
|
2018-11-12 11:40:32 -08:00
|
|
|
bootstrap_height,
|
|
|
|
expected_entry_height,
|
|
|
|
expected_last_id,
|
|
|
|
)),
|
2018-12-07 14:09:29 -08:00
|
|
|
replay_stage.join().expect("replay stage join")
|
2018-11-12 11:40:32 -08:00
|
|
|
);
|
2019-01-24 12:04:04 -08:00
|
|
|
assert_ne!(expected_last_id, Hash::default());
|
|
|
|
|
2018-11-12 11:40:32 -08:00
|
|
|
assert_eq!(exit.load(Ordering::Relaxed), true);
|
|
|
|
let _ignored = remove_dir_all(&my_ledger_path);
|
|
|
|
}
|
2018-11-16 15:48:10 -08:00
|
|
|
|
|
|
|
#[test]
|
2018-12-07 14:09:29 -08:00
|
|
|
fn test_replay_stage_poh_error_entry_receiver() {
|
|
|
|
// Set up dummy node to host a ReplayStage
|
2018-11-16 15:48:10 -08:00
|
|
|
let my_keypair = Keypair::new();
|
|
|
|
let my_id = my_keypair.pubkey();
|
|
|
|
let my_node = Node::new_localhost_with_pubkey(my_id);
|
|
|
|
// Set up the cluster info
|
2018-11-19 11:25:14 -08:00
|
|
|
let cluster_info_me = Arc::new(RwLock::new(ClusterInfo::new(my_node.info.clone())));
|
2018-11-16 15:48:10 -08:00
|
|
|
let (entry_sender, entry_receiver) = channel();
|
|
|
|
let (ledger_entry_sender, _ledger_entry_receiver) = channel();
|
|
|
|
let mut last_entry_id = Hash::default();
|
|
|
|
// Create keypair for the old leader
|
|
|
|
|
|
|
|
let mut entry_height = 0;
|
|
|
|
let mut last_id = Hash::default();
|
|
|
|
let mut entries = Vec::new();
|
|
|
|
for _ in 0..5 {
|
2018-12-10 20:03:04 -08:00
|
|
|
let entry = Entry::new(&mut last_id, 0, 1, vec![]); //just ticks
|
2018-11-16 15:48:10 -08:00
|
|
|
last_id = entry.id;
|
|
|
|
entries.push(entry);
|
|
|
|
}
|
|
|
|
entry_sender
|
|
|
|
.send(entries.clone())
|
|
|
|
.expect("Expected to err out");
|
|
|
|
|
2019-01-05 12:57:52 -08:00
|
|
|
let my_keypair = Arc::new(my_keypair);
|
2019-01-11 12:58:31 -08:00
|
|
|
let vote_signer = Arc::new(VoteSignerProxy::new(
|
|
|
|
&my_keypair,
|
|
|
|
Box::new(LocalVoteSigner::default()),
|
|
|
|
));
|
2018-12-07 14:09:29 -08:00
|
|
|
let res = ReplayStage::process_entries(
|
2018-11-16 15:48:10 -08:00
|
|
|
&Arc::new(Bank::default()),
|
|
|
|
&cluster_info_me,
|
|
|
|
&entry_receiver,
|
2019-01-05 12:57:52 -08:00
|
|
|
&my_keypair,
|
2019-01-23 18:05:06 -08:00
|
|
|
Some(&vote_signer),
|
2018-11-16 15:48:10 -08:00
|
|
|
None,
|
|
|
|
&ledger_entry_sender,
|
|
|
|
&mut entry_height,
|
|
|
|
&mut last_entry_id,
|
|
|
|
);
|
|
|
|
|
|
|
|
match res {
|
|
|
|
Ok(_) => (),
|
|
|
|
Err(e) => assert!(false, "Entries were not sent correctly {:?}", e),
|
|
|
|
}
|
|
|
|
|
|
|
|
entries.clear();
|
|
|
|
for _ in 0..5 {
|
2018-12-10 20:03:04 -08:00
|
|
|
let entry = Entry::new(&mut Hash::default(), 0, 0, vec![]); //just broken entries
|
2018-11-16 15:48:10 -08:00
|
|
|
entries.push(entry);
|
|
|
|
}
|
|
|
|
entry_sender
|
|
|
|
.send(entries.clone())
|
|
|
|
.expect("Expected to err out");
|
|
|
|
|
2018-12-07 14:09:29 -08:00
|
|
|
let res = ReplayStage::process_entries(
|
2018-11-16 15:48:10 -08:00
|
|
|
&Arc::new(Bank::default()),
|
|
|
|
&cluster_info_me,
|
|
|
|
&entry_receiver,
|
|
|
|
&Arc::new(Keypair::new()),
|
2019-01-23 18:05:06 -08:00
|
|
|
Some(&vote_signer),
|
2018-11-16 15:48:10 -08:00
|
|
|
None,
|
|
|
|
&ledger_entry_sender,
|
|
|
|
&mut entry_height,
|
|
|
|
&mut last_entry_id,
|
|
|
|
);
|
|
|
|
|
|
|
|
match res {
|
|
|
|
Ok(_) => assert!(false, "Should have failed because entries are broken"),
|
|
|
|
Err(Error::BlobError(BlobError::VerificationFailed)) => (),
|
|
|
|
Err(e) => assert!(
|
|
|
|
false,
|
|
|
|
"Should have failed because with blob error, instead, got {:?}",
|
|
|
|
e
|
|
|
|
),
|
|
|
|
}
|
|
|
|
}
|
2018-07-03 21:14:08 -07:00
|
|
|
}
|