2018-08-09 13:41:21 -07:00
|
|
|
//! The `broadcast_stage` broadcasts data from a leader node to validators
|
2018-08-09 11:54:23 -07:00
|
|
|
//!
|
|
|
|
use counter::Counter;
|
2018-09-12 13:59:19 -07:00
|
|
|
use crdt::{Crdt, CrdtError, NodeInfo, LEADER_ROTATION_INTERVAL};
|
2018-09-18 13:49:10 -07:00
|
|
|
use entry::Entry;
|
2018-08-09 11:54:23 -07:00
|
|
|
#[cfg(feature = "erasure")]
|
|
|
|
use erasure;
|
2018-09-18 13:49:10 -07:00
|
|
|
use ledger::Block;
|
2018-08-09 11:54:23 -07:00
|
|
|
use log::Level;
|
2018-09-18 21:45:49 -07:00
|
|
|
use packet::{BlobRecycler, SharedBlobs};
|
|
|
|
use rayon::prelude::*;
|
2018-08-09 11:54:23 -07:00
|
|
|
use result::{Error, Result};
|
2018-08-09 14:17:50 -07:00
|
|
|
use service::Service;
|
2018-08-09 11:54:23 -07:00
|
|
|
use std::net::UdpSocket;
|
2018-09-14 14:34:32 -07:00
|
|
|
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
2018-09-18 13:49:10 -07:00
|
|
|
use std::sync::mpsc::{Receiver, RecvTimeoutError};
|
2018-08-09 11:54:23 -07:00
|
|
|
use std::sync::{Arc, RwLock};
|
2018-08-09 14:17:50 -07:00
|
|
|
use std::thread::{self, Builder, JoinHandle};
|
2018-09-18 21:45:49 -07:00
|
|
|
use std::time::{Duration, Instant};
|
|
|
|
use timing::duration_as_ms;
|
2018-09-07 12:38:48 -07:00
|
|
|
use window::{self, SharedWindow, WindowIndex, WindowUtil, WINDOW_SIZE};
|
2018-08-09 11:54:23 -07:00
|
|
|
|
2018-09-14 00:17:40 -07:00
|
|
|
#[derive(Debug, PartialEq, Eq, Clone)]
|
|
|
|
pub enum BroadcastStageReturnType {
|
|
|
|
LeaderRotation,
|
|
|
|
ChannelDisconnected,
|
|
|
|
}
|
|
|
|
|
2018-08-09 11:54:23 -07:00
|
|
|
fn broadcast(
|
|
|
|
node_info: &NodeInfo,
|
|
|
|
broadcast_table: &[NodeInfo],
|
|
|
|
window: &SharedWindow,
|
|
|
|
recycler: &BlobRecycler,
|
2018-09-18 13:49:10 -07:00
|
|
|
receiver: &Receiver<Vec<Entry>>,
|
2018-08-09 11:54:23 -07:00
|
|
|
sock: &UdpSocket,
|
|
|
|
transmit_index: &mut WindowIndex,
|
|
|
|
receive_index: &mut u64,
|
|
|
|
) -> Result<()> {
|
2018-09-05 21:36:59 -07:00
|
|
|
let id = node_info.id;
|
2018-08-09 11:54:23 -07:00
|
|
|
let timer = Duration::new(1, 0);
|
2018-09-18 13:49:10 -07:00
|
|
|
let entries = receiver.recv_timeout(timer)?;
|
2018-09-18 21:45:49 -07:00
|
|
|
let mut num_entries = entries.len();
|
|
|
|
let mut ventries = Vec::new();
|
|
|
|
ventries.push(entries);
|
2018-09-18 13:49:10 -07:00
|
|
|
while let Ok(entries) = receiver.try_recv() {
|
2018-09-18 21:45:49 -07:00
|
|
|
num_entries += entries.len();
|
|
|
|
ventries.push(entries);
|
2018-08-09 11:54:23 -07:00
|
|
|
}
|
|
|
|
|
2018-09-18 21:45:49 -07:00
|
|
|
let to_blobs_start = Instant::now();
|
|
|
|
let dq: SharedBlobs = ventries
|
|
|
|
.into_par_iter()
|
|
|
|
.flat_map(|p| p.to_blobs(recycler))
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
let to_blobs_elapsed = duration_as_ms(&to_blobs_start.elapsed());
|
|
|
|
|
2018-08-09 11:54:23 -07:00
|
|
|
// flatten deque to vec
|
|
|
|
let blobs_vec: Vec<_> = dq.into_iter().collect();
|
|
|
|
|
2018-09-18 21:45:49 -07:00
|
|
|
let blobs_chunking = Instant::now();
|
2018-08-09 11:54:23 -07:00
|
|
|
// We could receive more blobs than window slots so
|
|
|
|
// break them up into window-sized chunks to process
|
|
|
|
let blobs_chunked = blobs_vec.chunks(WINDOW_SIZE as usize).map(|x| x.to_vec());
|
2018-09-18 21:45:49 -07:00
|
|
|
let chunking_elapsed = duration_as_ms(&blobs_chunking.elapsed());
|
2018-08-09 11:54:23 -07:00
|
|
|
|
2018-09-07 12:38:48 -07:00
|
|
|
trace!("{}", window.read().unwrap().print(&id, *receive_index));
|
2018-08-09 11:54:23 -07:00
|
|
|
|
2018-09-18 21:45:49 -07:00
|
|
|
let broadcast_start = Instant::now();
|
2018-08-09 11:54:23 -07:00
|
|
|
for mut blobs in blobs_chunked {
|
|
|
|
let blobs_len = blobs.len();
|
2018-09-05 21:36:59 -07:00
|
|
|
trace!("{}: broadcast blobs.len: {}", id, blobs_len);
|
2018-08-09 11:54:23 -07:00
|
|
|
|
|
|
|
// Index the blobs
|
|
|
|
window::index_blobs(node_info, &blobs, receive_index)
|
|
|
|
.expect("index blobs for initial window");
|
|
|
|
|
|
|
|
// keep the cache of blobs that are broadcast
|
|
|
|
inc_new_counter_info!("streamer-broadcast-sent", blobs.len());
|
|
|
|
{
|
|
|
|
let mut win = window.write().unwrap();
|
|
|
|
assert!(blobs.len() <= win.len());
|
|
|
|
for b in &blobs {
|
|
|
|
let ix = b.read().unwrap().get_index().expect("blob index");
|
|
|
|
let pos = (ix % WINDOW_SIZE) as usize;
|
2018-09-14 13:13:36 -07:00
|
|
|
if let Some(x) = win[pos].data.take() {
|
2018-08-09 11:54:23 -07:00
|
|
|
trace!(
|
2018-09-05 21:36:59 -07:00
|
|
|
"{} popped {} at {}",
|
|
|
|
id,
|
2018-08-09 11:54:23 -07:00
|
|
|
x.read().unwrap().get_index().unwrap(),
|
|
|
|
pos
|
|
|
|
);
|
2018-09-04 13:07:02 -07:00
|
|
|
recycler.recycle(x, "broadcast-data");
|
2018-08-09 11:54:23 -07:00
|
|
|
}
|
2018-09-14 13:13:36 -07:00
|
|
|
if let Some(x) = win[pos].coding.take() {
|
2018-08-09 11:54:23 -07:00
|
|
|
trace!(
|
2018-09-05 21:36:59 -07:00
|
|
|
"{} popped {} at {}",
|
|
|
|
id,
|
2018-08-09 11:54:23 -07:00
|
|
|
x.read().unwrap().get_index().unwrap(),
|
|
|
|
pos
|
|
|
|
);
|
2018-09-04 13:07:02 -07:00
|
|
|
recycler.recycle(x, "broadcast-coding");
|
2018-08-09 11:54:23 -07:00
|
|
|
}
|
|
|
|
|
2018-09-05 21:36:59 -07:00
|
|
|
trace!("{} null {}", id, pos);
|
2018-08-09 11:54:23 -07:00
|
|
|
}
|
|
|
|
while let Some(b) = blobs.pop() {
|
|
|
|
let ix = b.read().unwrap().get_index().expect("blob index");
|
|
|
|
let pos = (ix % WINDOW_SIZE) as usize;
|
2018-09-05 21:36:59 -07:00
|
|
|
trace!("{} caching {} at {}", id, ix, pos);
|
2018-08-09 11:54:23 -07:00
|
|
|
assert!(win[pos].data.is_none());
|
|
|
|
win[pos].data = Some(b);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fill in the coding blob data from the window data blobs
|
|
|
|
#[cfg(feature = "erasure")]
|
|
|
|
{
|
|
|
|
erasure::generate_coding(
|
2018-09-05 21:36:59 -07:00
|
|
|
&id,
|
2018-08-09 11:54:23 -07:00
|
|
|
&mut window.write().unwrap(),
|
|
|
|
recycler,
|
|
|
|
*receive_index,
|
|
|
|
blobs_len,
|
|
|
|
&mut transmit_index.coding,
|
|
|
|
)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
*receive_index += blobs_len as u64;
|
|
|
|
|
|
|
|
// Send blobs out from the window
|
|
|
|
Crdt::broadcast(
|
|
|
|
&node_info,
|
|
|
|
&broadcast_table,
|
|
|
|
&window,
|
|
|
|
&sock,
|
|
|
|
transmit_index,
|
|
|
|
*receive_index,
|
|
|
|
)?;
|
|
|
|
}
|
2018-09-18 21:45:49 -07:00
|
|
|
let broadcast_elapsed = duration_as_ms(&broadcast_start.elapsed());
|
|
|
|
|
|
|
|
info!(
|
|
|
|
"broadcast: {} entries, blob time {} chunking time {} broadcast time {}",
|
|
|
|
num_entries, to_blobs_elapsed, chunking_elapsed, broadcast_elapsed
|
|
|
|
);
|
|
|
|
|
2018-08-09 11:54:23 -07:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-09-14 14:34:32 -07:00
|
|
|
// Implement a destructor for the BroadcastStage thread to signal it exited
|
|
|
|
// even on panics
|
|
|
|
struct Finalizer {
|
|
|
|
exit_sender: Arc<AtomicBool>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Finalizer {
|
|
|
|
fn new(exit_sender: Arc<AtomicBool>) -> Self {
|
|
|
|
Finalizer { exit_sender }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Implement a destructor for Finalizer.
|
|
|
|
impl Drop for Finalizer {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
self.exit_sender.clone().store(true, Ordering::Relaxed);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-09 14:17:50 -07:00
|
|
|
pub struct BroadcastStage {
|
2018-09-14 00:17:40 -07:00
|
|
|
thread_hdl: JoinHandle<BroadcastStageReturnType>,
|
2018-08-09 14:17:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
impl BroadcastStage {
|
|
|
|
fn run(
|
2018-08-09 15:20:13 -07:00
|
|
|
sock: &UdpSocket,
|
|
|
|
crdt: &Arc<RwLock<Crdt>>,
|
|
|
|
window: &SharedWindow,
|
2018-08-09 14:17:50 -07:00
|
|
|
entry_height: u64,
|
2018-08-09 15:20:13 -07:00
|
|
|
recycler: &BlobRecycler,
|
2018-09-18 13:49:10 -07:00
|
|
|
receiver: &Receiver<Vec<Entry>>,
|
2018-09-14 00:17:40 -07:00
|
|
|
) -> BroadcastStageReturnType {
|
2018-08-09 14:17:50 -07:00
|
|
|
let mut transmit_index = WindowIndex {
|
|
|
|
data: entry_height,
|
|
|
|
coding: entry_height,
|
|
|
|
};
|
|
|
|
let mut receive_index = entry_height;
|
|
|
|
let me = crdt.read().unwrap().my_data().clone();
|
|
|
|
loop {
|
2018-09-12 13:59:19 -07:00
|
|
|
if transmit_index.data % (LEADER_ROTATION_INTERVAL as u64) == 0 {
|
|
|
|
let rcrdt = crdt.read().unwrap();
|
|
|
|
let my_id = rcrdt.my_data().id;
|
|
|
|
match rcrdt.get_scheduled_leader(transmit_index.data) {
|
|
|
|
Some(id) if id == my_id => (),
|
2018-09-13 19:02:14 -07:00
|
|
|
// If the leader stays in power for the next
|
2018-09-12 13:59:19 -07:00
|
|
|
// round as well, then we don't exit. Otherwise, exit.
|
|
|
|
_ => {
|
2018-09-14 00:17:40 -07:00
|
|
|
return BroadcastStageReturnType::LeaderRotation;
|
2018-09-12 13:59:19 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-09 14:17:50 -07:00
|
|
|
let broadcast_table = crdt.read().unwrap().compute_broadcast_table();
|
|
|
|
if let Err(e) = broadcast(
|
|
|
|
&me,
|
|
|
|
&broadcast_table,
|
|
|
|
&window,
|
|
|
|
&recycler,
|
|
|
|
&receiver,
|
|
|
|
&sock,
|
|
|
|
&mut transmit_index,
|
|
|
|
&mut receive_index,
|
|
|
|
) {
|
|
|
|
match e {
|
2018-09-14 00:17:40 -07:00
|
|
|
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => {
|
|
|
|
return BroadcastStageReturnType::ChannelDisconnected
|
|
|
|
}
|
2018-08-09 14:17:50 -07:00
|
|
|
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
|
|
|
Error::CrdtError(CrdtError::NoPeers) => (), // TODO: Why are the unit-tests throwing hundreds of these?
|
|
|
|
_ => {
|
|
|
|
inc_new_counter_info!("streamer-broadcaster-error", 1, 1);
|
|
|
|
error!("broadcaster error: {:?}", e);
|
2018-08-09 11:54:23 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-08-09 14:17:50 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Service to broadcast messages from the leader to layer 1 nodes.
|
|
|
|
/// See `crdt` for network layer definitions.
|
|
|
|
/// # Arguments
|
|
|
|
/// * `sock` - Socket to send from.
|
|
|
|
/// * `exit` - Boolean to signal system exit.
|
|
|
|
/// * `crdt` - CRDT structure
|
|
|
|
/// * `window` - Cache of blobs that we have broadcast
|
|
|
|
/// * `recycler` - Blob recycler.
|
|
|
|
/// * `receiver` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
|
2018-09-14 14:34:32 -07:00
|
|
|
/// * `exit_sender` - Set to true when this stage exits, allows rest of Tpu to exit cleanly. Otherwise,
|
|
|
|
/// when a Tpu stage closes, it only closes the stages that come after it. The stages
|
|
|
|
/// that come before could be blocked on a receive, and never notice that they need to
|
|
|
|
/// exit. Now, if any stage of the Tpu closes, it will lead to closing the WriteStage (b/c
|
|
|
|
/// WriteStage is the last stage in the pipeline), which will then close Broadcast stage,
|
|
|
|
/// which will then close FetchStage in the Tpu, and then the rest of the Tpu,
|
|
|
|
/// completing the cycle.
|
2018-08-09 14:17:50 -07:00
|
|
|
pub fn new(
|
|
|
|
sock: UdpSocket,
|
|
|
|
crdt: Arc<RwLock<Crdt>>,
|
|
|
|
window: SharedWindow,
|
|
|
|
entry_height: u64,
|
|
|
|
recycler: BlobRecycler,
|
2018-09-18 13:49:10 -07:00
|
|
|
receiver: Receiver<Vec<Entry>>,
|
2018-09-14 14:34:32 -07:00
|
|
|
exit_sender: Arc<AtomicBool>,
|
2018-08-09 14:17:50 -07:00
|
|
|
) -> Self {
|
|
|
|
let thread_hdl = Builder::new()
|
|
|
|
.name("solana-broadcaster".to_string())
|
2018-09-14 14:34:32 -07:00
|
|
|
.spawn(move || {
|
|
|
|
let _exit = Finalizer::new(exit_sender);
|
|
|
|
Self::run(&sock, &crdt, &window, entry_height, &recycler, &receiver)
|
|
|
|
})
|
2018-09-12 13:59:19 -07:00
|
|
|
.unwrap();
|
2018-08-09 14:17:50 -07:00
|
|
|
|
2018-09-14 14:34:32 -07:00
|
|
|
(BroadcastStage { thread_hdl })
|
2018-08-09 14:17:50 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Service for BroadcastStage {
|
2018-09-14 00:17:40 -07:00
|
|
|
type JoinReturnType = BroadcastStageReturnType;
|
2018-08-09 14:17:50 -07:00
|
|
|
|
2018-09-14 00:17:40 -07:00
|
|
|
fn join(self) -> thread::Result<BroadcastStageReturnType> {
|
|
|
|
self.thread_hdl.join()
|
2018-08-09 14:17:50 -07:00
|
|
|
}
|
2018-08-09 11:54:23 -07:00
|
|
|
}
|
2018-09-13 18:47:39 -07:00
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
2018-09-14 00:17:40 -07:00
|
|
|
use broadcast_stage::{BroadcastStage, BroadcastStageReturnType};
|
2018-09-13 19:02:14 -07:00
|
|
|
use crdt::{Crdt, Node, LEADER_ROTATION_INTERVAL};
|
2018-09-13 18:47:39 -07:00
|
|
|
use entry::Entry;
|
2018-09-13 19:02:14 -07:00
|
|
|
use mint::Mint;
|
2018-09-13 18:47:39 -07:00
|
|
|
use packet::BlobRecycler;
|
|
|
|
use recorder::Recorder;
|
|
|
|
use service::Service;
|
|
|
|
use signature::{Keypair, KeypairUtil, Pubkey};
|
|
|
|
use std::cmp;
|
2018-09-14 14:34:32 -07:00
|
|
|
use std::sync::atomic::AtomicBool;
|
2018-09-14 00:17:40 -07:00
|
|
|
use std::sync::mpsc::{channel, Sender};
|
2018-09-13 19:02:14 -07:00
|
|
|
use std::sync::{Arc, RwLock};
|
|
|
|
use window::{new_window_from_entries, SharedWindow};
|
2018-09-13 18:47:39 -07:00
|
|
|
|
2018-09-13 19:02:14 -07:00
|
|
|
fn setup_dummy_broadcast_stage() -> (
|
|
|
|
Pubkey,
|
|
|
|
Pubkey,
|
|
|
|
BroadcastStage,
|
|
|
|
SharedWindow,
|
2018-09-14 00:17:40 -07:00
|
|
|
Sender<Vec<Entry>>,
|
2018-09-13 19:02:14 -07:00
|
|
|
Arc<RwLock<Crdt>>,
|
|
|
|
Vec<Entry>,
|
|
|
|
) {
|
2018-09-13 18:47:39 -07:00
|
|
|
// Setup dummy leader info
|
|
|
|
let leader_keypair = Keypair::new();
|
|
|
|
let id = leader_keypair.pubkey();
|
|
|
|
let leader_info = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
2018-09-13 19:02:14 -07:00
|
|
|
|
2018-09-13 18:47:39 -07:00
|
|
|
// Give the leader somebody to broadcast to so he isn't lonely
|
|
|
|
let buddy_keypair = Keypair::new();
|
|
|
|
let buddy_id = buddy_keypair.pubkey();
|
|
|
|
let broadcast_buddy = Node::new_localhost_with_pubkey(buddy_keypair.pubkey());
|
|
|
|
|
|
|
|
// Fill the crdt with the buddy's info
|
|
|
|
let mut crdt = Crdt::new(leader_info.info.clone()).expect("Crdt::new");
|
|
|
|
crdt.insert(&broadcast_buddy.info);
|
|
|
|
let crdt = Arc::new(RwLock::new(crdt));
|
|
|
|
let blob_recycler = BlobRecycler::default();
|
|
|
|
|
|
|
|
// Make dummy initial entries
|
|
|
|
let mint = Mint::new(10000);
|
|
|
|
let entries = mint.create_entries();
|
|
|
|
let entry_height = entries.len() as u64;
|
|
|
|
|
|
|
|
// Setup a window
|
|
|
|
let window =
|
|
|
|
new_window_from_entries(&entries, entry_height, &leader_info.info, &blob_recycler);
|
|
|
|
|
|
|
|
let shared_window = Arc::new(RwLock::new(window));
|
|
|
|
|
2018-09-14 00:17:40 -07:00
|
|
|
let (entry_sender, entry_receiver) = channel();
|
2018-09-14 14:34:32 -07:00
|
|
|
let exit_sender = Arc::new(AtomicBool::new(false));
|
2018-09-13 18:47:39 -07:00
|
|
|
// Start up the broadcast stage
|
|
|
|
let broadcast_stage = BroadcastStage::new(
|
|
|
|
leader_info.sockets.broadcast,
|
|
|
|
crdt.clone(),
|
|
|
|
shared_window.clone(),
|
|
|
|
entry_height,
|
|
|
|
blob_recycler.clone(),
|
2018-09-14 00:17:40 -07:00
|
|
|
entry_receiver,
|
2018-09-14 14:34:32 -07:00
|
|
|
exit_sender,
|
2018-09-13 18:47:39 -07:00
|
|
|
);
|
|
|
|
|
|
|
|
(
|
|
|
|
id,
|
|
|
|
buddy_id,
|
|
|
|
broadcast_stage,
|
|
|
|
shared_window,
|
2018-09-14 00:17:40 -07:00
|
|
|
entry_sender,
|
2018-09-13 18:47:39 -07:00
|
|
|
crdt,
|
|
|
|
entries,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn find_highest_window_index(shared_window: &SharedWindow) -> u64 {
|
|
|
|
let window = shared_window.read().unwrap();
|
|
|
|
window.iter().fold(0, |m, w_slot| {
|
|
|
|
if let Some(ref blob) = w_slot.data {
|
|
|
|
cmp::max(m, blob.read().unwrap().get_index().unwrap())
|
|
|
|
} else {
|
|
|
|
m
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_broadcast_stage_leader_rotation_exit() {
|
2018-09-14 01:53:18 -07:00
|
|
|
let (
|
|
|
|
id,
|
|
|
|
buddy_id,
|
|
|
|
broadcast_stage,
|
|
|
|
shared_window,
|
|
|
|
entry_sender,
|
|
|
|
crdt,
|
|
|
|
entries,
|
|
|
|
) = setup_dummy_broadcast_stage();
|
2018-09-13 18:47:39 -07:00
|
|
|
{
|
|
|
|
let mut wcrdt = crdt.write().unwrap();
|
|
|
|
// Set leader to myself
|
|
|
|
wcrdt.set_leader(id);
|
|
|
|
// Set the leader for the next rotation to also be myself
|
|
|
|
wcrdt.set_scheduled_leader(LEADER_ROTATION_INTERVAL, id);
|
|
|
|
}
|
|
|
|
|
|
|
|
let genesis_len = entries.len() as u64;
|
2018-09-13 19:02:14 -07:00
|
|
|
let last_entry_hash = entries.last().expect("Ledger should not be empty").id;
|
2018-09-13 18:47:39 -07:00
|
|
|
|
|
|
|
// Input enough entries to make exactly LEADER_ROTATION_INTERVAL entries, which will
|
|
|
|
// trigger a check for leader rotation. Because the next scheduled leader
|
|
|
|
// is ourselves, we won't exit
|
|
|
|
let mut recorder = Recorder::new(last_entry_hash);
|
|
|
|
|
|
|
|
for _ in genesis_len..LEADER_ROTATION_INTERVAL {
|
|
|
|
let new_entry = recorder.record(vec![]);
|
2018-09-14 00:17:40 -07:00
|
|
|
entry_sender.send(new_entry).unwrap();
|
2018-09-13 18:47:39 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Set the scheduled next leader in the crdt to the other buddy on the network
|
|
|
|
crdt.write()
|
|
|
|
.unwrap()
|
|
|
|
.set_scheduled_leader(2 * LEADER_ROTATION_INTERVAL, buddy_id);
|
|
|
|
|
|
|
|
// Input another LEADER_ROTATION_INTERVAL dummy entries, which will take us
|
2018-09-13 19:02:14 -07:00
|
|
|
// past the point of the leader rotation. The write_stage will see that
|
2018-09-13 18:47:39 -07:00
|
|
|
// it's no longer the leader after checking the crdt, and exit
|
|
|
|
for _ in 0..LEADER_ROTATION_INTERVAL {
|
|
|
|
let new_entry = recorder.record(vec![]);
|
2018-09-14 00:17:40 -07:00
|
|
|
match entry_sender.send(new_entry) {
|
2018-09-13 18:47:39 -07:00
|
|
|
// We disconnected, break out of loop and check the results
|
|
|
|
Err(_) => break,
|
|
|
|
_ => (),
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure the threads closed cleanly
|
2018-09-14 00:17:40 -07:00
|
|
|
assert_eq!(
|
|
|
|
broadcast_stage.join().unwrap(),
|
|
|
|
BroadcastStageReturnType::LeaderRotation
|
|
|
|
);
|
2018-09-14 14:34:32 -07:00
|
|
|
|
|
|
|
let highest_index = find_highest_window_index(&shared_window);
|
|
|
|
assert_eq!(highest_index, 2 * LEADER_ROTATION_INTERVAL - 1);
|
2018-09-13 18:47:39 -07:00
|
|
|
}
|
|
|
|
}
|