solana/src/tpu.rs

651 lines
22 KiB
Rust
Raw Normal View History

//! The `tpu` module implements the Transaction Processing Unit, a
//! 5-stage transaction processing pipeline in software.
2018-03-29 11:20:54 -07:00
2018-05-09 13:56:34 -07:00
use accounting_stage::AccountingStage;
2018-04-28 00:31:20 -07:00
use crdt::{Crdt, ReplicatedData};
use ecdsa;
use entry::Entry;
use ledger;
2018-03-26 21:07:11 -07:00
use packet;
use packet::SharedPackets;
2018-05-08 17:59:01 -07:00
use rand::{thread_rng, Rng};
use result::Result;
use serde_json;
2018-03-26 21:07:11 -07:00
use std::collections::VecDeque;
2018-05-11 11:38:52 -07:00
use std::io::Write;
2018-05-11 12:06:05 -07:00
use std::io::sink;
use std::net::UdpSocket;
2018-03-22 13:05:23 -07:00
use std::sync::atomic::{AtomicBool, Ordering};
2018-05-11 16:58:27 -07:00
use std::sync::mpsc::{channel, Receiver, Sender};
2018-04-18 12:02:54 -07:00
use std::sync::{Arc, Mutex, RwLock};
use std::thread::{spawn, JoinHandle};
use std::time::Duration;
2018-05-08 17:59:01 -07:00
use std::time::Instant;
use streamer;
use thin_client_service::ThinClientService;
use timing;
2018-02-28 09:07:54 -08:00
pub struct Tpu {
2018-05-09 15:14:40 -07:00
accounting_stage: AccountingStage,
2018-05-09 13:56:34 -07:00
thin_client_service: ThinClientService,
}
type SharedTpu = Arc<Tpu>;
2018-04-28 00:31:20 -07:00
impl Tpu {
/// Create a new Tpu that wraps the given Accountant.
2018-05-09 15:14:40 -07:00
pub fn new(accounting_stage: AccountingStage) -> Self {
let thin_client_service = ThinClientService::new(accounting_stage.accountant.clone());
Tpu {
2018-05-09 15:14:40 -07:00
accounting_stage,
2018-05-09 13:56:34 -07:00
thin_client_service,
}
}
2018-05-10 14:48:57 -07:00
fn write_entry<W: Write>(&self, writer: &Mutex<W>, entry: &Entry) {
trace!("write_entry entry");
2018-05-10 14:41:18 -07:00
self.accounting_stage
.accountant
.register_entry_id(&entry.id);
2018-04-28 00:31:20 -07:00
writeln!(
writer.lock().expect("'writer' lock in fn fn write_entry"),
2018-04-28 00:31:20 -07:00
"{}",
serde_json::to_string(&entry).expect("'entry' to_strong in fn write_entry")
).expect("writeln! in fn write_entry");
2018-05-10 14:41:18 -07:00
self.thin_client_service
2018-05-09 13:56:34 -07:00
.notify_entry_info_subscribers(&entry);
2018-04-28 00:31:20 -07:00
}
2018-05-10 14:48:57 -07:00
fn write_entries<W: Write>(&self, writer: &Mutex<W>) -> Result<Vec<Entry>> {
2018-04-28 00:31:20 -07:00
//TODO implement a serialize for channel that does this without allocations
2018-05-03 14:35:04 -07:00
let mut l = vec![];
2018-05-10 14:41:18 -07:00
let entry = self.accounting_stage
2018-04-28 00:31:20 -07:00
.output
.lock()
2018-05-10 18:21:10 -07:00
.expect("'ouput' lock in fn receive_all")
2018-04-28 00:31:20 -07:00
.recv_timeout(Duration::new(1, 0))?;
2018-05-10 14:48:57 -07:00
self.write_entry(writer, &entry);
2018-05-03 14:35:04 -07:00
l.push(entry);
2018-05-11 11:38:52 -07:00
while let Ok(entry) = self.accounting_stage
.output
.lock()
.expect("'output' lock in fn write_entries")
.try_recv()
{
2018-05-10 14:48:57 -07:00
self.write_entry(writer, &entry);
2018-05-03 14:35:04 -07:00
l.push(entry);
2018-04-28 00:31:20 -07:00
}
Ok(l)
}
2018-03-29 11:20:54 -07:00
/// Process any Entry items that have been published by the Historian.
2018-04-28 00:31:20 -07:00
/// continuosly broadcast blobs of entries out
fn run_sync<W: Write>(
2018-05-10 14:41:18 -07:00
&self,
2018-04-28 00:31:20 -07:00
broadcast: &streamer::BlobSender,
blob_recycler: &packet::BlobRecycler,
2018-05-09 14:27:33 -07:00
writer: &Mutex<W>,
2018-04-28 00:31:20 -07:00
) -> Result<()> {
let mut q = VecDeque::new();
2018-05-10 14:48:57 -07:00
let list = self.write_entries(writer)?;
trace!("New blobs? {}", list.len());
ledger::process_entry_list_into_blobs(&list, blob_recycler, &mut q);
2018-04-28 00:31:20 -07:00
if !q.is_empty() {
broadcast.send(q)?;
2018-03-20 22:15:44 -07:00
}
2018-04-28 00:31:20 -07:00
Ok(())
}
pub fn sync_service<W: Write + Send + 'static>(
obj: SharedTpu,
2018-04-28 00:31:20 -07:00
exit: Arc<AtomicBool>,
broadcast: streamer::BlobSender,
blob_recycler: packet::BlobRecycler,
2018-05-09 14:27:33 -07:00
writer: Mutex<W>,
2018-05-07 20:44:44 -07:00
) -> JoinHandle<()> {
spawn(move || loop {
2018-05-10 14:41:18 -07:00
let _ = obj.run_sync(&broadcast, &blob_recycler, &writer);
2018-05-07 20:44:44 -07:00
if exit.load(Ordering::Relaxed) {
info!("sync_service exiting");
break;
}
})
}
/// Process any Entry items that have been published by the Historian.
/// continuosly broadcast blobs of entries out
2018-05-10 14:41:18 -07:00
fn run_sync_no_broadcast(&self) -> Result<()> {
2018-05-10 14:48:57 -07:00
self.write_entries(&Arc::new(Mutex::new(sink())))?;
Ok(())
}
pub fn sync_no_broadcast_service(obj: SharedTpu, exit: Arc<AtomicBool>) -> JoinHandle<()> {
spawn(move || loop {
2018-05-10 14:41:18 -07:00
let _ = obj.run_sync_no_broadcast();
if exit.load(Ordering::Relaxed) {
info!("sync_no_broadcast_service exiting");
2018-04-28 00:31:20 -07:00
break;
}
})
2018-02-28 13:16:50 -08:00
}
fn verify_batch(
batch: Vec<SharedPackets>,
sendr: &Arc<Mutex<Sender<Vec<(SharedPackets, Vec<u8>)>>>>,
) -> Result<()> {
let r = ecdsa::ed25519_verify(&batch);
let res = batch.into_iter().zip(r).collect();
2018-05-11 11:38:52 -07:00
sendr
.lock()
.expect("lock in fn verify_batch in tpu")
.send(res)?;
// TODO: fix error handling here?
Ok(())
2018-04-11 08:02:33 -07:00
}
fn verifier(
recvr: &Arc<Mutex<streamer::PacketReceiver>>,
sendr: &Arc<Mutex<Sender<Vec<(SharedPackets, Vec<u8>)>>>>,
2018-04-11 08:02:33 -07:00
) -> Result<()> {
2018-05-11 11:38:52 -07:00
let (batch, len) =
streamer::recv_batch(&recvr.lock().expect("'recvr' lock in fn verifier"))?;
let now = Instant::now();
let batch_len = batch.len();
let rand_id = thread_rng().gen_range(0, 100);
info!(
"@{:?} verifier: verifying: {} id: {}",
timing::timestamp(),
batch.len(),
rand_id
);
2018-05-10 18:21:10 -07:00
Self::verify_batch(batch, sendr).expect("verify_batch in fn verifier");
let total_time_ms = timing::duration_as_ms(&now.elapsed());
let total_time_s = timing::duration_as_s(&now.elapsed());
info!(
"@{:?} verifier: done. batches: {} total verify time: {:?} id: {} verified: {} v/s {}",
timing::timestamp(),
batch_len,
total_time_ms,
rand_id,
len,
(len as f32 / total_time_s)
);
2018-03-26 21:07:11 -07:00
Ok(())
}
2018-04-18 12:29:33 -07:00
/// Process verified blobs, already in order
/// Respond with a signed hash of the state
fn replicate_state(
2018-05-09 14:27:33 -07:00
obj: &Tpu,
2018-04-19 15:43:19 -07:00
verified_receiver: &streamer::BlobReceiver,
2018-04-18 12:29:33 -07:00
blob_recycler: &packet::BlobRecycler,
) -> Result<()> {
let timer = Duration::new(1, 0);
let blobs = verified_receiver.recv_timeout(timer)?;
trace!("replicating blobs {}", blobs.len());
let entries = ledger::reconstruct_entries_from_blobs(&blobs);
obj.accounting_stage
.accountant
.process_verified_entries(entries)?;
2018-04-19 15:43:19 -07:00
for blob in blobs {
blob_recycler.recycle(blob);
}
2018-04-18 12:29:33 -07:00
Ok(())
}
2018-02-28 09:07:54 -08:00
2018-05-11 16:58:27 -07:00
fn thin_client_service(
obj: SharedTpu,
exit: Arc<AtomicBool>,
verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
responder_sender: streamer::BlobSender,
packet_recycler: packet::PacketRecycler,
blob_recycler: packet::BlobRecycler,
) -> JoinHandle<()> {
spawn(move || loop {
let e = obj.thin_client_service.process_request_packets(
&obj.accounting_stage,
&verified_receiver,
&responder_sender,
&packet_recycler,
&blob_recycler,
);
if e.is_err() {
if exit.load(Ordering::Relaxed) {
break;
}
}
})
}
2018-05-11 18:59:40 -07:00
fn verifier_service(
exit: Arc<AtomicBool>,
packets_receiver: Arc<Mutex<streamer::PacketReceiver>>,
verified_sender: Arc<Mutex<Sender<Vec<(SharedPackets, Vec<u8>)>>>>,
) -> JoinHandle<()> {
spawn(move || loop {
let e = Self::verifier(&packets_receiver.clone(), &verified_sender.clone());
if e.is_err() && exit.load(Ordering::Relaxed) {
break;
}
})
}
fn verifier_services(
exit: Arc<AtomicBool>,
packets_receiver: streamer::PacketReceiver,
verified_sender: Sender<Vec<(SharedPackets, Vec<u8>)>>,
) -> Vec<JoinHandle<()>> {
let sender = Arc::new(Mutex::new(verified_sender));
let receiver = Arc::new(Mutex::new(packets_receiver));
(0..4)
.map(|_| Self::verifier_service(exit.clone(), receiver.clone(), sender.clone()))
.collect()
}
/// Create a UDP microservice that forwards messages the given Tpu.
2018-04-17 19:26:19 -07:00
/// This service is the network leader
2018-03-29 11:20:54 -07:00
/// Set `exit` to shutdown its threads.
2018-04-28 00:31:20 -07:00
pub fn serve<W: Write + Send + 'static>(
obj: &SharedTpu,
2018-04-28 00:31:20 -07:00
me: ReplicatedData,
2018-05-11 15:41:35 -07:00
requests_socket: UdpSocket,
_events_socket: UdpSocket,
2018-04-28 00:31:20 -07:00
gossip: UdpSocket,
2018-03-22 13:05:23 -07:00
exit: Arc<AtomicBool>,
2018-04-28 00:31:20 -07:00
writer: W,
2018-03-23 20:49:28 -07:00
) -> Result<Vec<JoinHandle<()>>> {
2018-04-28 00:31:20 -07:00
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
let t_gossip = Crdt::gossip(crdt.clone(), exit.clone());
let t_listen = Crdt::listen(crdt.clone(), gossip, exit.clone());
// make sure we are on the same interface
2018-05-11 15:41:35 -07:00
let mut local = requests_socket.local_addr()?;
local.set_port(0);
2018-04-28 00:31:20 -07:00
let respond_socket = UdpSocket::bind(local.clone())?;
2018-02-28 09:07:54 -08:00
let packet_recycler = packet::PacketRecycler::default();
let blob_recycler = packet::BlobRecycler::default();
let (packet_sender, packet_receiver) = channel();
2018-05-11 15:41:35 -07:00
let t_receiver = streamer::receiver(
requests_socket,
exit.clone(),
packet_recycler.clone(),
packet_sender,
)?;
2018-04-28 00:31:20 -07:00
let (responder_sender, responder_receiver) = channel();
let t_responder = streamer::responder(
respond_socket,
exit.clone(),
blob_recycler.clone(),
responder_receiver,
);
2018-03-26 21:07:11 -07:00
2018-05-11 18:59:40 -07:00
let (verified_sender, verified_receiver) = channel();
let verify_threads: Vec<_> =
Self::verifier_services(exit.clone(), packet_receiver, verified_sender);
2018-03-26 21:07:11 -07:00
2018-04-28 00:31:20 -07:00
let (broadcast_sender, broadcast_receiver) = channel();
let broadcast_socket = UdpSocket::bind(local)?;
let t_broadcast = streamer::broadcaster(
broadcast_socket,
exit.clone(),
crdt.clone(),
blob_recycler.clone(),
broadcast_receiver,
);
let t_sync = Self::sync_service(
obj.clone(),
exit.clone(),
broadcast_sender,
blob_recycler.clone(),
2018-05-09 14:27:33 -07:00
Mutex::new(writer),
2018-04-28 00:31:20 -07:00
);
2018-05-11 16:58:27 -07:00
let t_thin_client = Self::thin_client_service(
obj.clone(),
exit.clone(),
verified_receiver,
responder_sender,
packet_recycler.clone(),
blob_recycler.clone(),
);
let mut threads = vec![
2018-04-28 00:31:20 -07:00
t_receiver,
t_responder,
2018-05-11 16:58:27 -07:00
t_thin_client,
2018-04-28 00:31:20 -07:00
t_sync,
t_gossip,
t_listen,
t_broadcast,
];
threads.extend(verify_threads.into_iter());
Ok(threads)
2018-03-26 21:07:11 -07:00
}
2018-04-17 19:26:19 -07:00
2018-04-18 12:02:54 -07:00
/// This service receives messages from a leader in the network and processes the transactions
/// on the accountant state.
/// # Arguments
2018-04-18 20:12:30 -07:00
/// * `obj` - The accountant state.
2018-04-28 00:31:20 -07:00
/// * `me` - my configuration
/// * `leader` - leader configuration
2018-04-18 12:02:54 -07:00
/// * `exit` - The exit signal.
/// # Remarks
2018-04-18 20:12:30 -07:00
/// The pipeline is constructed as follows:
2018-04-18 12:02:54 -07:00
/// 1. receive blobs from the network, these are out of order
2018-04-18 12:05:12 -07:00
/// 2. verify blobs, PoH, signatures (TODO)
/// 3. reconstruct contiguous window
2018-04-18 12:02:54 -07:00
/// a. order the blobs
/// b. use erasure coding to reconstruct missing blobs
2018-04-18 12:29:33 -07:00
/// c. ask the network for missing blobs, if erasure coding is insufficient
2018-04-18 12:05:12 -07:00
/// d. make sure that the blobs PoH sequences connect (TODO)
2018-04-18 12:02:54 -07:00
/// 4. process the transaction state machine
/// 5. respond with the hash of the state back to the leader
2018-04-17 19:26:19 -07:00
pub fn replicate(
obj: &SharedTpu,
2018-04-28 00:31:20 -07:00
me: ReplicatedData,
gossip: UdpSocket,
2018-05-11 15:41:35 -07:00
requests_socket: UdpSocket,
2018-04-28 00:31:20 -07:00
replicate: UdpSocket,
leader: ReplicatedData,
2018-04-17 19:26:19 -07:00
exit: Arc<AtomicBool>,
) -> Result<Vec<JoinHandle<()>>> {
//replicate pipeline
2018-04-28 00:31:20 -07:00
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
2018-05-11 11:38:52 -07:00
crdt.write()
.expect("'crdt' write lock in pub fn replicate")
.set_leader(leader.id);
crdt.write()
.expect("'crdt' write lock before insert() in pub fn replicate")
.insert(leader);
2018-04-28 00:31:20 -07:00
let t_gossip = Crdt::gossip(crdt.clone(), exit.clone());
let t_listen = Crdt::listen(crdt.clone(), gossip, exit.clone());
2018-04-17 19:26:19 -07:00
// make sure we are on the same interface
2018-04-28 00:31:20 -07:00
let mut local = replicate.local_addr()?;
2018-04-17 19:26:19 -07:00
local.set_port(0);
let write = UdpSocket::bind(local)?;
let blob_recycler = packet::BlobRecycler::default();
let (blob_sender, blob_receiver) = channel();
let t_blob_receiver = streamer::blob_receiver(
exit.clone(),
blob_recycler.clone(),
2018-04-28 00:31:20 -07:00
replicate,
blob_sender.clone(),
)?;
2018-04-17 19:26:19 -07:00
let (window_sender, window_receiver) = channel();
2018-04-17 20:09:37 -07:00
let (retransmit_sender, retransmit_receiver) = channel();
2018-04-17 19:26:19 -07:00
2018-04-19 10:32:02 -07:00
let t_retransmit = streamer::retransmitter(
2018-04-17 20:09:37 -07:00
write,
exit.clone(),
2018-04-28 00:31:20 -07:00
crdt.clone(),
2018-04-17 20:09:37 -07:00
blob_recycler.clone(),
retransmit_receiver,
);
2018-04-28 00:31:20 -07:00
2018-04-18 12:02:54 -07:00
//TODO
//the packets coming out of blob_receiver need to be sent to the GPU and verified
2018-04-18 12:02:54 -07:00
//then sent to the window, which does the erasure coding reconstruction
let t_window = streamer::window(
exit.clone(),
crdt.clone(),
2018-04-18 12:02:54 -07:00
blob_recycler.clone(),
blob_receiver,
window_sender,
retransmit_sender,
);
2018-03-26 21:07:11 -07:00
let tpu = obj.clone();
let s_exit = exit.clone();
let t_replicator = spawn(move || loop {
let e = Self::replicate_state(&tpu, &window_receiver, &blob_recycler);
if e.is_err() && s_exit.load(Ordering::Relaxed) {
break;
}
});
//serve pipeline
// make sure we are on the same interface
2018-05-11 15:41:35 -07:00
let mut local = requests_socket.local_addr()?;
local.set_port(0);
let respond_socket = UdpSocket::bind(local.clone())?;
let packet_recycler = packet::PacketRecycler::default();
let blob_recycler = packet::BlobRecycler::default();
let (packet_sender, packet_receiver) = channel();
2018-05-11 15:41:35 -07:00
let t_packet_receiver = streamer::receiver(
requests_socket,
exit.clone(),
packet_recycler.clone(),
packet_sender,
)?;
let (responder_sender, responder_receiver) = channel();
let t_responder = streamer::responder(
respond_socket,
exit.clone(),
blob_recycler.clone(),
responder_receiver,
);
let (verified_sender, verified_receiver) = channel();
2018-05-11 18:59:40 -07:00
let verify_threads: Vec<_> =
Self::verifier_services(exit.clone(), packet_receiver, verified_sender);
let t_sync = Self::sync_no_broadcast_service(obj.clone(), exit.clone());
2018-05-11 16:58:27 -07:00
let t_thin_client = Self::thin_client_service(
obj.clone(),
exit.clone(),
verified_receiver,
responder_sender,
packet_recycler.clone(),
blob_recycler.clone(),
);
let mut threads = vec![
//replicate threads
2018-04-28 00:31:20 -07:00
t_blob_receiver,
t_retransmit,
t_window,
t_replicator,
2018-04-28 00:31:20 -07:00
t_gossip,
t_listen,
//serve threads
t_packet_receiver,
t_responder,
2018-05-11 16:58:27 -07:00
t_thin_client,
t_sync,
];
threads.extend(verify_threads.into_iter());
Ok(threads)
2018-03-26 21:07:11 -07:00
}
}
2018-04-11 17:30:53 -07:00
#[cfg(test)]
2018-05-09 12:33:33 -07:00
pub fn test_node() -> (ReplicatedData, UdpSocket, UdpSocket, UdpSocket, UdpSocket) {
use signature::{KeyPair, KeyPairUtil};
let events_socket = UdpSocket::bind("127.0.0.1:0").unwrap();
2018-05-09 12:33:33 -07:00
let gossip = UdpSocket::bind("127.0.0.1:0").unwrap();
let replicate = UdpSocket::bind("127.0.0.1:0").unwrap();
2018-05-11 15:41:35 -07:00
let requests_socket = UdpSocket::bind("127.0.0.1:0").unwrap();
2018-05-09 12:33:33 -07:00
let pubkey = KeyPair::new().pubkey();
let d = ReplicatedData::new(
pubkey,
gossip.local_addr().unwrap(),
replicate.local_addr().unwrap(),
2018-05-11 15:41:35 -07:00
requests_socket.local_addr().unwrap(),
2018-05-09 12:33:33 -07:00
);
2018-05-11 15:41:35 -07:00
(d, gossip, replicate, requests_socket, events_socket)
2018-04-11 17:30:53 -07:00
}
2018-03-26 21:07:11 -07:00
#[cfg(test)]
mod tests {
use accountant::Accountant;
use accounting_stage::AccountingStage;
use bincode::serialize;
2018-04-28 00:31:20 -07:00
use chrono::prelude::*;
use crdt::Crdt;
use entry;
use event::Event;
use hash::{hash, Hash};
use logger;
2018-04-12 16:04:11 -07:00
use mint::Mint;
use packet::BlobRecycler;
2018-04-12 16:04:11 -07:00
use signature::{KeyPair, KeyPairUtil};
2018-04-28 00:31:20 -07:00
use std::collections::VecDeque;
use std::sync::atomic::{AtomicBool, Ordering};
2018-04-28 00:31:20 -07:00
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use std::time::Duration;
2018-05-02 09:21:20 -07:00
use streamer;
use tpu::{test_node, Tpu};
2018-04-28 00:31:20 -07:00
use transaction::Transaction;
2018-04-19 15:43:19 -07:00
2018-04-28 00:31:20 -07:00
/// Test that mesasge sent from leader to target1 and repliated to target2
2018-04-19 15:43:19 -07:00
#[test]
2018-05-09 08:40:06 -07:00
#[ignore]
2018-04-19 15:43:19 -07:00
fn test_replicate() {
logger::setup();
2018-05-07 20:44:44 -07:00
let (leader_data, leader_gossip, _, leader_serve, _) = test_node();
let (target1_data, target1_gossip, target1_replicate, target1_serve, _) = test_node();
let (target2_data, target2_gossip, target2_replicate, _, _) = test_node();
let exit = Arc::new(AtomicBool::new(false));
2018-04-28 00:31:20 -07:00
//start crdt_leader
let mut crdt_l = Crdt::new(leader_data.clone());
crdt_l.set_leader(leader_data.id);
let cref_l = Arc::new(RwLock::new(crdt_l));
let t_l_gossip = Crdt::gossip(cref_l.clone(), exit.clone());
let t_l_listen = Crdt::listen(cref_l, leader_gossip, exit.clone());
//start crdt2
let mut crdt2 = Crdt::new(target2_data.clone());
crdt2.insert(leader_data.clone());
crdt2.set_leader(leader_data.id);
let leader_id = leader_data.id;
let cref2 = Arc::new(RwLock::new(crdt2));
let t2_gossip = Crdt::gossip(cref2.clone(), exit.clone());
let t2_listen = Crdt::listen(cref2, target2_gossip, exit.clone());
// setup some blob services to send blobs into the socket
// to simulate the source peer and get blobs out of the socket to
// simulate target peer
let recv_recycler = BlobRecycler::default();
let resp_recycler = BlobRecycler::default();
let (s_reader, r_reader) = channel();
let t_receiver = streamer::blob_receiver(
exit.clone(),
recv_recycler.clone(),
2018-04-28 00:31:20 -07:00
target2_replicate,
s_reader,
).unwrap();
2018-04-28 00:31:20 -07:00
// simulate leader sending messages
let (s_responder, r_responder) = channel();
let t_responder = streamer::responder(
2018-04-28 00:31:20 -07:00
leader_serve,
exit.clone(),
resp_recycler.clone(),
r_responder,
);
let starting_balance = 10_000;
let alice = Mint::new(starting_balance);
let accountant = Accountant::new(&alice);
let accounting_stage = AccountingStage::new(accountant, &alice.last_id(), Some(30));
2018-05-09 15:14:40 -07:00
let tpu = Arc::new(Tpu::new(accounting_stage));
2018-04-28 00:31:20 -07:00
let replicate_addr = target1_data.replicate_addr;
let threads = Tpu::replicate(
&tpu,
2018-04-28 00:31:20 -07:00
target1_data,
target1_gossip,
target1_serve,
2018-04-28 00:31:20 -07:00
target1_replicate,
leader_data,
exit.clone(),
).unwrap();
let mut alice_ref_balance = starting_balance;
let mut msgs = VecDeque::new();
let mut cur_hash = Hash::default();
let num_blobs = 10;
let transfer_amount = 501;
let bob_keypair = KeyPair::new();
for i in 0..num_blobs {
let b = resp_recycler.allocate();
let b_ = b.clone();
let mut w = b.write().unwrap();
w.set_index(i).unwrap();
2018-04-28 00:31:20 -07:00
w.set_id(leader_id).unwrap();
let accountant = &tpu.accounting_stage.accountant;
let tr0 = Event::new_timestamp(&bob_keypair, Utc::now());
let entry0 = entry::create_entry(&cur_hash, i, vec![tr0]);
accountant.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
let tr1 = Transaction::new(
&alice.keypair(),
bob_keypair.pubkey(),
transfer_amount,
cur_hash,
);
accountant.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
let entry1 =
entry::create_entry(&cur_hash, i + num_blobs, vec![Event::Transaction(tr1)]);
accountant.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
alice_ref_balance -= transfer_amount;
let serialized_entry = serialize(&vec![entry0, entry1]).unwrap();
w.data_mut()[..serialized_entry.len()].copy_from_slice(&serialized_entry);
w.set_size(serialized_entry.len());
2018-04-28 00:31:20 -07:00
w.meta.set_addr(&replicate_addr);
drop(w);
msgs.push_back(b_);
}
// send the blobs into the socket
s_responder.send(msgs).expect("send");
// receive retransmitted messages
let timer = Duration::new(1, 0);
let mut msgs: Vec<_> = Vec::new();
while let Ok(msg) = r_reader.recv_timeout(timer) {
trace!("msg: {:?}", msg);
msgs.push(msg);
}
let accountant = &tpu.accounting_stage.accountant;
let alice_balance = accountant.get_balance(&alice.keypair().pubkey()).unwrap();
assert_eq!(alice_balance, alice_ref_balance);
let bob_balance = accountant.get_balance(&bob_keypair.pubkey()).unwrap();
assert_eq!(bob_balance, starting_balance - alice_ref_balance);
2018-04-19 15:43:19 -07:00
exit.store(true, Ordering::Relaxed);
2018-04-28 00:31:20 -07:00
for t in threads {
t.join().expect("join");
}
t2_gossip.join().expect("join");
t2_listen.join().expect("join");
t_receiver.join().expect("join");
t_responder.join().expect("join");
2018-04-28 00:31:20 -07:00
t_l_gossip.join().expect("join");
t_l_listen.join().expect("join");
2018-04-19 15:43:19 -07:00
}
2018-04-11 13:05:29 -07:00
}