Merge branch 'master' of github.com:solana-labs/solana

This commit is contained in:
Robert Kelly 2018-05-08 23:40:54 -04:00
commit bdbe90b891
22 changed files with 2304 additions and 1359 deletions

View File

@ -1,7 +1,11 @@
[package]
name = "solana"
description = "High Performance Blockchain"
<<<<<<< HEAD
version = "0.5.0-beta"
=======
version = "0.5.0"
>>>>>>> 3236be7877143563faf35255a599cb944603d0f3
documentation = "https://docs.rs/solana"
homepage = "http://solana.io/"
repository = "https://github.com/solana-labs/solana"
@ -68,3 +72,4 @@ libc = "^0.2.1"
getopts = "^0.2"
isatty = "0.1"
futures = "0.1"
rand = "0.4.2"

View File

@ -102,6 +102,12 @@ $ source $HOME/.cargo/env
$ rustup component add rustfmt-preview
```
If your rustc version is lower than 1.25.0, please update it:
```bash
$ rustup update
```
Download the source code:
```bash
@ -118,6 +124,18 @@ Run the test suite:
cargo test
```
Debugging
---
There are some useful debug messages in the code, you can enable them on a per-module and per-level
basis with the normal RUST\_LOG environment variable. Run the testnode with this syntax:
```bash
$ RUST_LOG=solana::streamer=debug,solana::accountant_skel=info cat genesis.log | ./target/release/solana-testnode > transactions0.log
```
to see the debug and info sections for streamer and accountant\_skel respectively. Generally
we are using debug for infrequent debug messages, trace for potentially frequent messages and
info for performance-related logging.
Benchmarking
---
@ -133,6 +151,13 @@ Run the benchmarks:
$ cargo +nightly bench --features="unstable"
```
To run the benchmarks on Linux with GPU optimizations enabled:
```bash
$ wget https://solana-build-artifacts.s3.amazonaws.com/v0.5.0/libcuda_verify_ed25519.a
$ cargo +nightly bench --features="unstable,cuda"
```
Code coverage
---

View File

@ -15,6 +15,7 @@ use signature::{KeyPair, PublicKey, Signature};
use std::collections::hash_map::Entry::Occupied;
use std::collections::{HashMap, HashSet, VecDeque};
use std::result;
use std::sync::atomic::{AtomicIsize, Ordering};
use std::sync::RwLock;
use transaction::Transaction;
@ -30,18 +31,18 @@ pub enum AccountingError {
pub type Result<T> = result::Result<T, AccountingError>;
/// Commit funds to the 'to' party.
fn apply_payment(balances: &RwLock<HashMap<PublicKey, RwLock<i64>>>, payment: &Payment) {
fn apply_payment(balances: &RwLock<HashMap<PublicKey, AtomicIsize>>, payment: &Payment) {
if balances.read().unwrap().contains_key(&payment.to) {
let bals = balances.read().unwrap();
*bals[&payment.to].write().unwrap() += payment.tokens;
bals[&payment.to].fetch_add(payment.tokens as isize, Ordering::Relaxed);
} else {
let mut bals = balances.write().unwrap();
bals.insert(payment.to, RwLock::new(payment.tokens));
bals.insert(payment.to, AtomicIsize::new(payment.tokens as isize));
}
}
pub struct Accountant {
balances: RwLock<HashMap<PublicKey, RwLock<i64>>>,
balances: RwLock<HashMap<PublicKey, AtomicIsize>>,
pending: RwLock<HashMap<Signature, Plan>>,
last_ids: RwLock<VecDeque<(Hash, RwLock<HashSet<Signature>>)>>,
time_sources: RwLock<HashSet<PublicKey>>,
@ -73,6 +74,13 @@ impl Accountant {
acc
}
/// Return the last entry ID registered
pub fn last_id(&self) -> Hash {
let last_ids = self.last_ids.read().unwrap();
let last_item = last_ids.iter().last().expect("empty last_ids list");
last_item.0
}
fn reserve_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> bool {
if signatures.read().unwrap().contains(sig) {
return false;
@ -127,27 +135,37 @@ impl Accountant {
/// funds and isn't a duplicate.
pub fn process_verified_transaction_debits(&self, tr: &Transaction) -> Result<()> {
let bals = self.balances.read().unwrap();
// Hold a write lock before the condition check, so that a debit can't occur
// between checking the balance and the withdraw.
let option = bals.get(&tr.from);
if option.is_none() {
return Err(AccountingError::AccountNotFound);
}
let mut bal = option.unwrap().write().unwrap();
if !self.reserve_signature_with_last_id(&tr.sig, &tr.data.last_id) {
return Err(AccountingError::InvalidTransferSignature);
}
if *bal < tr.data.tokens {
self.forget_signature_with_last_id(&tr.sig, &tr.data.last_id);
return Err(AccountingError::InsufficientFunds);
loop {
let bal = option.unwrap();
let current = bal.load(Ordering::Relaxed) as i64;
if current < tr.data.tokens {
self.forget_signature_with_last_id(&tr.sig, &tr.data.last_id);
return Err(AccountingError::InsufficientFunds);
}
let result = bal.compare_exchange(
current as isize,
(current - tr.data.tokens) as isize,
Ordering::Relaxed,
Ordering::Relaxed,
);
match result {
Ok(_) => return Ok(()),
Err(_) => continue,
};
}
*bal -= tr.data.tokens;
Ok(())
}
pub fn process_verified_transaction_credits(&self, tr: &Transaction) {
@ -200,13 +218,18 @@ impl Accountant {
(trs, rest)
}
pub fn process_verified_events(&self, events: Vec<Event>) -> Result<()> {
pub fn process_verified_events(&self, events: Vec<Event>) -> Vec<Result<Event>> {
let (trs, rest) = Self::partition_events(events);
self.process_verified_transactions(trs);
let mut results: Vec<_> = self.process_verified_transactions(trs)
.into_iter()
.map(|x| x.map(Event::Transaction))
.collect();
for event in rest {
self.process_verified_event(&event)?;
results.push(self.process_verified_event(event));
}
Ok(())
results
}
/// Process a Witness Signature that has already been verified.
@ -260,12 +283,13 @@ impl Accountant {
}
/// Process an Transaction or Witness that has already been verified.
pub fn process_verified_event(&self, event: &Event) -> Result<()> {
match *event {
pub fn process_verified_event(&self, event: Event) -> Result<Event> {
match event {
Event::Transaction(ref tr) => self.process_verified_transaction(tr),
Event::Signature { from, tx_sig, .. } => self.process_verified_sig(from, tx_sig),
Event::Timestamp { from, dt, .. } => self.process_verified_timestamp(from, dt),
}
}?;
Ok(event)
}
/// Create, sign, and process a Transaction from `keypair` to `to` of
@ -300,7 +324,7 @@ impl Accountant {
pub fn get_balance(&self, pubkey: &PublicKey) -> Option<i64> {
let bals = self.balances.read().unwrap();
bals.get(pubkey).map(|x| *x.read().unwrap())
bals.get(pubkey).map(|x| x.load(Ordering::Relaxed) as i64)
}
}
@ -316,6 +340,8 @@ mod tests {
let alice = Mint::new(10_000);
let bob_pubkey = KeyPair::new().pubkey();
let acc = Accountant::new(&alice);
assert_eq!(acc.last_id(), alice.last_id());
acc.transfer(1_000, &alice.keypair(), bob_pubkey, alice.last_id())
.unwrap();
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_000);

View File

@ -1,810 +0,0 @@
//! The `accountant_skel` module is a microservice that exposes the high-level
//! Accountant API to the network. Its message encoding is currently
//! in flux. Clients should use AccountantStub to interact with it.
use accountant::Accountant;
use bincode::{deserialize, serialize};
use ecdsa;
use entry::Entry;
use event::Event;
use hash::Hash;
use historian::Historian;
use packet;
use packet::SharedPackets;
use rayon::prelude::*;
use recorder::Signal;
use result::Result;
use serde_json;
use signature::PublicKey;
use std::cmp::max;
use std::collections::VecDeque;
use std::io::Write;
use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::{Arc, Mutex, RwLock};
use std::thread::{spawn, JoinHandle};
use std::time::Duration;
use streamer;
use transaction::Transaction;
use subscribers;
pub struct AccountantSkel<W: Write + Send + 'static> {
acc: Accountant,
last_id: Hash,
writer: W,
historian: Historian,
entry_info_subscribers: Vec<SocketAddr>,
}
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum Request {
Transaction(Transaction),
GetBalance { key: PublicKey },
GetLastId,
Subscribe { subscriptions: Vec<Subscription> },
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum Subscription {
EntryInfo,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct EntryInfo {
pub id: Hash,
pub num_hashes: u64,
pub num_events: u64,
}
impl Request {
/// Verify the request is valid.
pub fn verify(&self) -> bool {
match *self {
Request::Transaction(ref tr) => tr.verify_plan(),
_ => true,
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub enum Response {
Balance { key: PublicKey, val: Option<i64> },
EntryInfo(EntryInfo),
LastId { id: Hash },
}
impl<W: Write + Send + 'static> AccountantSkel<W> {
/// Create a new AccountantSkel that wraps the given Accountant.
pub fn new(acc: Accountant, last_id: Hash, writer: W, historian: Historian) -> Self {
AccountantSkel {
acc,
last_id,
writer,
historian,
entry_info_subscribers: vec![],
}
}
fn notify_entry_info_subscribers(&mut self, entry: &Entry) {
// TODO: No need to bind().
let socket = UdpSocket::bind("127.0.0.1:0").expect("bind");
for addr in &self.entry_info_subscribers {
let entry_info = EntryInfo {
id: entry.id,
num_hashes: entry.num_hashes,
num_events: entry.events.len() as u64,
};
let data = serialize(&Response::EntryInfo(entry_info)).expect("serialize EntryInfo");
let _res = socket.send_to(&data, addr);
}
}
/// Process any Entry items that have been published by the Historian.
pub fn sync(&mut self) -> Hash {
while let Ok(entry) = self.historian.receiver.try_recv() {
self.last_id = entry.id;
self.acc.register_entry_id(&self.last_id);
writeln!(self.writer, "{}", serde_json::to_string(&entry).unwrap()).unwrap();
self.notify_entry_info_subscribers(&entry);
}
self.last_id
}
/// Process Request items sent by clients.
pub fn process_request(
&mut self,
msg: Request,
rsp_addr: SocketAddr,
) -> Option<(Response, SocketAddr)> {
match msg {
Request::GetBalance { key } => {
let val = self.acc.get_balance(&key);
Some((Response::Balance { key, val }, rsp_addr))
}
Request::GetLastId => Some((Response::LastId { id: self.sync() }, rsp_addr)),
Request::Transaction(_) => unreachable!(),
Request::Subscribe { subscriptions } => {
for subscription in subscriptions {
match subscription {
Subscription::EntryInfo => self.entry_info_subscribers.push(rsp_addr),
}
}
None
}
}
}
fn recv_batch(recvr: &streamer::PacketReceiver) -> Result<Vec<SharedPackets>> {
let timer = Duration::new(1, 0);
let msgs = recvr.recv_timeout(timer)?;
trace!("got msgs");
let mut batch = vec![msgs];
while let Ok(more) = recvr.try_recv() {
trace!("got more msgs");
batch.push(more);
}
info!("batch len {}", batch.len());
Ok(batch)
}
fn verify_batch(batch: Vec<SharedPackets>) -> Vec<Vec<(SharedPackets, Vec<u8>)>> {
let chunk_size = max(1, (batch.len() + 3) / 4);
let batches: Vec<_> = batch.chunks(chunk_size).map(|x| x.to_vec()).collect();
batches
.into_par_iter()
.map(|batch| {
let r = ecdsa::ed25519_verify(&batch);
batch.into_iter().zip(r).collect()
})
.collect()
}
fn verifier(
recvr: &streamer::PacketReceiver,
sendr: &Sender<Vec<(SharedPackets, Vec<u8>)>>,
) -> Result<()> {
let batch = Self::recv_batch(recvr)?;
let verified_batches = Self::verify_batch(batch);
for xs in verified_batches {
sendr.send(xs)?;
}
Ok(())
}
pub fn deserialize_packets(p: &packet::Packets) -> Vec<Option<(Request, SocketAddr)>> {
p.packets
.par_iter()
.map(|x| {
deserialize(&x.data[0..x.meta.size])
.map(|req| (req, x.meta.addr()))
.ok()
})
.collect()
}
/// Split Request list into verified transactions and the rest
fn partition_requests(
req_vers: Vec<(Request, SocketAddr, u8)>,
) -> (Vec<Transaction>, Vec<(Request, SocketAddr)>) {
let mut trs = vec![];
let mut reqs = vec![];
for (msg, rsp_addr, verify) in req_vers {
match msg {
Request::Transaction(tr) => {
if verify != 0 {
trs.push(tr);
}
}
_ => reqs.push((msg, rsp_addr)),
}
}
(trs, reqs)
}
fn process_packets(
&mut self,
req_vers: Vec<(Request, SocketAddr, u8)>,
) -> Result<Vec<(Response, SocketAddr)>> {
let (trs, reqs) = Self::partition_requests(req_vers);
// Process the transactions in parallel and then log the successful ones.
for result in self.acc.process_verified_transactions(trs) {
if let Ok(tr) = result {
self.historian
.sender
.send(Signal::Event(Event::Transaction(tr)))?;
}
}
// Let validators know they should not attempt to process additional
// transactions in parallel.
self.historian.sender.send(Signal::Tick)?;
// Process the remaining requests serially.
let rsps = reqs.into_iter()
.filter_map(|(req, rsp_addr)| self.process_request(req, rsp_addr))
.collect();
Ok(rsps)
}
fn serialize_response(
resp: Response,
rsp_addr: SocketAddr,
blob_recycler: &packet::BlobRecycler,
) -> Result<packet::SharedBlob> {
let blob = blob_recycler.allocate();
{
let mut b = blob.write().unwrap();
let v = serialize(&resp)?;
let len = v.len();
b.data[..len].copy_from_slice(&v);
b.meta.size = len;
b.meta.set_addr(&rsp_addr);
}
Ok(blob)
}
fn serialize_responses(
rsps: Vec<(Response, SocketAddr)>,
blob_recycler: &packet::BlobRecycler,
) -> Result<VecDeque<packet::SharedBlob>> {
let mut blobs = VecDeque::new();
for (resp, rsp_addr) in rsps {
blobs.push_back(Self::serialize_response(resp, rsp_addr, blob_recycler)?);
}
Ok(blobs)
}
fn process(
obj: &Arc<Mutex<AccountantSkel<W>>>,
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
blob_sender: &streamer::BlobSender,
packet_recycler: &packet::PacketRecycler,
blob_recycler: &packet::BlobRecycler,
) -> Result<()> {
let timer = Duration::new(1, 0);
let mms = verified_receiver.recv_timeout(timer)?;
for (msgs, vers) in mms {
let reqs = Self::deserialize_packets(&msgs.read().unwrap());
let req_vers = reqs.into_iter()
.zip(vers)
.filter_map(|(req, ver)| req.map(|(msg, addr)| (msg, addr, ver)))
.filter(|x| x.0.verify())
.collect();
let rsps = obj.lock().unwrap().process_packets(req_vers)?;
let blobs = Self::serialize_responses(rsps, blob_recycler)?;
if !blobs.is_empty() {
//don't wake up the other side if there is nothing
blob_sender.send(blobs)?;
}
packet_recycler.recycle(msgs);
// Write new entries to the ledger and notify subscribers.
obj.lock().unwrap().sync();
}
Ok(())
}
/// Process verified blobs, already in order
/// Respond with a signed hash of the state
fn replicate_state(
obj: &Arc<Mutex<AccountantSkel<W>>>,
verified_receiver: &streamer::BlobReceiver,
blob_recycler: &packet::BlobRecycler,
) -> Result<()> {
let timer = Duration::new(1, 0);
let blobs = verified_receiver.recv_timeout(timer)?;
for msgs in &blobs {
let blob = msgs.read().unwrap();
let entries: Vec<Entry> = deserialize(&blob.data()[..blob.meta.size]).unwrap();
for entry in entries {
obj.lock().unwrap().acc.register_entry_id(&entry.id);
obj.lock()
.unwrap()
.acc
.process_verified_events(entry.events)?;
}
//TODO respond back to leader with hash of the state
}
for blob in blobs {
blob_recycler.recycle(blob);
}
Ok(())
}
/// Create a UDP microservice that forwards messages the given AccountantSkel.
/// This service is the network leader
/// Set `exit` to shutdown its threads.
pub fn serve(
obj: &Arc<Mutex<AccountantSkel<W>>>,
addr: &str,
exit: Arc<AtomicBool>,
) -> Result<Vec<JoinHandle<()>>> {
let read = UdpSocket::bind(addr)?;
// make sure we are on the same interface
let mut local = read.local_addr()?;
local.set_port(0);
let write = UdpSocket::bind(local)?;
let packet_recycler = packet::PacketRecycler::default();
let blob_recycler = packet::BlobRecycler::default();
let (packet_sender, packet_receiver) = channel();
let t_receiver =
streamer::receiver(read, exit.clone(), packet_recycler.clone(), packet_sender)?;
let (blob_sender, blob_receiver) = channel();
let t_responder =
streamer::responder(write, exit.clone(), blob_recycler.clone(), blob_receiver);
let (verified_sender, verified_receiver) = channel();
let exit_ = exit.clone();
let t_verifier = spawn(move || loop {
let e = Self::verifier(&packet_receiver, &verified_sender);
if e.is_err() && exit_.load(Ordering::Relaxed) {
break;
}
});
let skel = obj.clone();
let t_server = spawn(move || loop {
let e = Self::process(
&skel,
&verified_receiver,
&blob_sender,
&packet_recycler,
&blob_recycler,
);
if e.is_err() {
// Assume this was a timeout, so sync any empty entries.
skel.lock().unwrap().sync();
if exit.load(Ordering::Relaxed) {
break;
}
}
});
Ok(vec![t_receiver, t_responder, t_server, t_verifier])
}
/// This service receives messages from a leader in the network and processes the transactions
/// on the accountant state.
/// # Arguments
/// * `obj` - The accountant state.
/// * `rsubs` - The subscribers.
/// * `exit` - The exit signal.
/// # Remarks
/// The pipeline is constructed as follows:
/// 1. receive blobs from the network, these are out of order
/// 2. verify blobs, PoH, signatures (TODO)
/// 3. reconstruct contiguous window
/// a. order the blobs
/// b. use erasure coding to reconstruct missing blobs
/// c. ask the network for missing blobs, if erasure coding is insufficient
/// d. make sure that the blobs PoH sequences connect (TODO)
/// 4. process the transaction state machine
/// 5. respond with the hash of the state back to the leader
pub fn replicate(
obj: &Arc<Mutex<AccountantSkel<W>>>,
rsubs: subscribers::Subscribers,
exit: Arc<AtomicBool>,
) -> Result<Vec<JoinHandle<()>>> {
let read = UdpSocket::bind(rsubs.me.addr)?;
// make sure we are on the same interface
let mut local = read.local_addr()?;
local.set_port(0);
let write = UdpSocket::bind(local)?;
let blob_recycler = packet::BlobRecycler::default();
let (blob_sender, blob_receiver) = channel();
let t_blob_receiver = streamer::blob_receiver(
exit.clone(),
blob_recycler.clone(),
read,
blob_sender.clone(),
)?;
let (window_sender, window_receiver) = channel();
let (retransmit_sender, retransmit_receiver) = channel();
let subs = Arc::new(RwLock::new(rsubs));
let t_retransmit = streamer::retransmitter(
write,
exit.clone(),
subs.clone(),
blob_recycler.clone(),
retransmit_receiver,
);
//TODO
//the packets coming out of blob_receiver need to be sent to the GPU and verified
//then sent to the window, which does the erasure coding reconstruction
let t_window = streamer::window(
exit.clone(),
subs,
blob_recycler.clone(),
blob_receiver,
window_sender,
retransmit_sender,
);
let skel = obj.clone();
let t_server = spawn(move || loop {
let e = Self::replicate_state(&skel, &window_receiver, &blob_recycler);
if e.is_err() && exit.load(Ordering::Relaxed) {
break;
}
});
Ok(vec![t_blob_receiver, t_retransmit, t_window, t_server])
}
}
#[cfg(test)]
pub fn to_packets(r: &packet::PacketRecycler, reqs: Vec<Request>) -> Vec<SharedPackets> {
let mut out = vec![];
for rrs in reqs.chunks(packet::NUM_PACKETS) {
let p = r.allocate();
p.write()
.unwrap()
.packets
.resize(rrs.len(), Default::default());
for (i, o) in rrs.iter().zip(p.write().unwrap().packets.iter_mut()) {
let v = serialize(&i).expect("serialize request");
let len = v.len();
o.data[..len].copy_from_slice(&v);
o.meta.size = len;
}
out.push(p);
}
return out;
}
#[cfg(test)]
mod tests {
use accountant_skel::{to_packets, Request};
use bincode::serialize;
use ecdsa;
use packet::{BlobRecycler, PacketRecycler, NUM_PACKETS};
use transaction::{memfind, test_tx};
use accountant::Accountant;
use accountant_skel::AccountantSkel;
use accountant_stub::AccountantStub;
use entry::Entry;
use futures::Future;
use historian::Historian;
use mint::Mint;
use plan::Plan;
use recorder::Signal;
use signature::{KeyPair, KeyPairUtil};
use std::io::sink;
use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::thread::sleep;
use std::time::Duration;
use transaction::Transaction;
use subscribers::{Node, Subscribers};
use streamer;
use std::sync::mpsc::channel;
use std::collections::VecDeque;
use hash::{hash, Hash};
use event::Event;
use entry;
use chrono::prelude::*;
#[test]
fn test_layout() {
let tr = test_tx();
let tx = serialize(&tr).unwrap();
let packet = serialize(&Request::Transaction(tr)).unwrap();
assert_matches!(memfind(&packet, &tx), Some(ecdsa::TX_OFFSET));
assert_matches!(memfind(&packet, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), None);
}
#[test]
fn test_to_packets() {
let tr = Request::Transaction(test_tx());
let re = PacketRecycler::default();
let rv = to_packets(&re, vec![tr.clone(); 1]);
assert_eq!(rv.len(), 1);
assert_eq!(rv[0].read().unwrap().packets.len(), 1);
let rv = to_packets(&re, vec![tr.clone(); NUM_PACKETS]);
assert_eq!(rv.len(), 1);
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
let rv = to_packets(&re, vec![tr.clone(); NUM_PACKETS + 1]);
assert_eq!(rv.len(), 2);
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
assert_eq!(rv[1].read().unwrap().packets.len(), 1);
}
#[test]
fn test_accounting_sequential_consistency() {
// In this attack we'll demonstrate that a verifier can interpret the ledger
// differently if either the server doesn't signal the ledger to add an
// Entry OR if the verifier tries to parallelize across multiple Entries.
let mint = Mint::new(2);
let acc = Accountant::new(&mint);
let rsp_addr: SocketAddr = "0.0.0.0:0".parse().expect("socket address");
let historian = Historian::new(&mint.last_id(), None);
let mut skel = AccountantSkel::new(acc, mint.last_id(), sink(), historian);
// Process a batch that includes a transaction that receives two tokens.
let alice = KeyPair::new();
let tr = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
let req_vers = vec![(Request::Transaction(tr), rsp_addr, 1_u8)];
assert!(skel.process_packets(req_vers).is_ok());
// Process a second batch that spends one of those tokens.
let tr = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
let req_vers = vec![(Request::Transaction(tr), rsp_addr, 1_u8)];
assert!(skel.process_packets(req_vers).is_ok());
// Collect the ledger and feed it to a new accountant.
skel.historian.sender.send(Signal::Tick).unwrap();
drop(skel.historian.sender);
let entries: Vec<Entry> = skel.historian.receiver.iter().collect();
// Assert the user holds one token, not two. If the server only output one
// entry, then the second transaction will be rejected, because it drives
// the account balance below zero before the credit is added.
let acc = Accountant::new(&mint);
for entry in entries {
acc.process_verified_events(entry.events).unwrap();
}
assert_eq!(acc.get_balance(&alice.pubkey()), Some(1));
}
#[test]
fn test_accountant_bad_sig() {
let serve_port = 9002;
let send_port = 9003;
let addr = format!("127.0.0.1:{}", serve_port);
let send_addr = format!("127.0.0.1:{}", send_port);
let alice = Mint::new(10_000);
let acc = Accountant::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let historian = Historian::new(&alice.last_id(), Some(30));
let acc = Arc::new(Mutex::new(AccountantSkel::new(
acc,
alice.last_id(),
sink(),
historian,
)));
let _threads = AccountantSkel::serve(&acc, &addr, exit.clone()).unwrap();
sleep(Duration::from_millis(300));
let socket = UdpSocket::bind(send_addr).unwrap();
socket.set_read_timeout(Some(Duration::new(5, 0))).unwrap();
let mut acc = AccountantStub::new(&addr, socket);
let last_id = acc.get_last_id().wait().unwrap();
let tr = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id);
let _sig = acc.transfer_signed(tr).unwrap();
let last_id = acc.get_last_id().wait().unwrap();
let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id);
tr2.data.tokens = 502;
tr2.data.plan = Plan::new_payment(502, bob_pubkey);
let _sig = acc.transfer_signed(tr2).unwrap();
assert_eq!(acc.get_balance(&bob_pubkey).wait().unwrap(), 500);
exit.store(true, Ordering::Relaxed);
}
use std::sync::{Once, ONCE_INIT};
extern crate env_logger;
static INIT: Once = ONCE_INIT;
/// Setup function that is only run once, even if called multiple times.
fn setup() {
INIT.call_once(|| {
env_logger::init().unwrap();
});
}
#[test]
fn test_replicate() {
setup();
let leader_sock = UdpSocket::bind("127.0.0.1:0").expect("bind");
let leader_addr = leader_sock.local_addr().unwrap();
let me_addr = "127.0.0.1:9010".parse().unwrap();
let target_peer_sock = UdpSocket::bind("127.0.0.1:0").expect("bind");
let target_peer_addr = target_peer_sock.local_addr().unwrap();
let source_peer_sock = UdpSocket::bind("127.0.0.1:0").expect("bind");
let exit = Arc::new(AtomicBool::new(false));
let node_me = Node::new([0, 0, 0, 0, 0, 0, 0, 1], 10, me_addr);
let node_subs = vec![Node::new([0, 0, 0, 0, 0, 0, 0, 2], 8, target_peer_addr); 1];
let node_leader = Node::new([0, 0, 0, 0, 0, 0, 0, 3], 20, leader_addr);
let subs = Subscribers::new(node_me, node_leader, &node_subs);
// setup some blob services to send blobs into the socket
// to simulate the source peer and get blobs out of the socket to
// simulate target peer
let recv_recycler = BlobRecycler::default();
let resp_recycler = BlobRecycler::default();
let (s_reader, r_reader) = channel();
let t_receiver = streamer::blob_receiver(
exit.clone(),
recv_recycler.clone(),
target_peer_sock,
s_reader,
).unwrap();
let (s_responder, r_responder) = channel();
let t_responder = streamer::responder(
source_peer_sock,
exit.clone(),
resp_recycler.clone(),
r_responder,
);
let starting_balance = 10_000;
let alice = Mint::new(starting_balance);
let acc = Accountant::new(&alice);
let historian = Historian::new(&alice.last_id(), Some(30));
let acc = Arc::new(Mutex::new(AccountantSkel::new(
acc,
alice.last_id(),
sink(),
historian,
)));
let _threads = AccountantSkel::replicate(&acc, subs, exit.clone()).unwrap();
let mut alice_ref_balance = starting_balance;
let mut msgs = VecDeque::new();
let mut cur_hash = Hash::default();
let num_blobs = 10;
let transfer_amount = 501;
let bob_keypair = KeyPair::new();
for i in 0..num_blobs {
let b = resp_recycler.allocate();
let b_ = b.clone();
let mut w = b.write().unwrap();
w.set_index(i).unwrap();
let tr0 = Event::new_timestamp(&bob_keypair, Utc::now());
let entry0 = entry::create_entry(&cur_hash, i, vec![tr0]);
acc.lock().unwrap().acc.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
let tr1 = Transaction::new(
&alice.keypair(),
bob_keypair.pubkey(),
transfer_amount,
cur_hash,
);
acc.lock().unwrap().acc.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
let entry1 =
entry::create_entry(&cur_hash, i + num_blobs, vec![Event::Transaction(tr1)]);
acc.lock().unwrap().acc.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
alice_ref_balance -= transfer_amount;
let serialized_entry = serialize(&vec![entry0, entry1]).unwrap();
w.data_mut()[..serialized_entry.len()].copy_from_slice(&serialized_entry);
w.set_size(serialized_entry.len());
w.meta.set_addr(&me_addr);
drop(w);
msgs.push_back(b_);
}
// send the blobs into the socket
s_responder.send(msgs).expect("send");
// receive retransmitted messages
let timer = Duration::new(1, 0);
let mut msgs: Vec<_> = Vec::new();
while let Ok(msg) = r_reader.recv_timeout(timer) {
trace!("msg: {:?}", msg);
msgs.push(msg);
}
let alice_balance = acc.lock()
.unwrap()
.acc
.get_balance(&alice.keypair().pubkey())
.unwrap();
assert_eq!(alice_balance, alice_ref_balance);
let bob_balance = acc.lock()
.unwrap()
.acc
.get_balance(&bob_keypair.pubkey())
.unwrap();
assert_eq!(bob_balance, starting_balance - alice_ref_balance);
exit.store(true, Ordering::Relaxed);
t_receiver.join().expect("join");
t_responder.join().expect("join");
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use accountant::{Accountant, MAX_ENTRY_IDS};
use accountant_skel::*;
use bincode::serialize;
use hash::hash;
use mint::Mint;
use signature::{KeyPair, KeyPairUtil};
use std::collections::HashSet;
use std::io::sink;
use std::time::Instant;
use transaction::Transaction;
#[bench]
fn process_packets_bench(_bencher: &mut Bencher) {
let mint = Mint::new(100_000_000);
let acc = Accountant::new(&mint);
let rsp_addr: SocketAddr = "0.0.0.0:0".parse().expect("socket address");
// Create transactions between unrelated parties.
let txs = 100_000;
let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
let transactions: Vec<_> = (0..txs)
.into_par_iter()
.map(|i| {
// Seed the 'to' account and a cell for its signature.
let dummy_id = i % (MAX_ENTRY_IDS as i32);
let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
{
let mut last_ids = last_ids.lock().unwrap();
if !last_ids.contains(&last_id) {
last_ids.insert(last_id);
acc.register_entry_id(&last_id);
}
}
// Seed the 'from' account.
let rando0 = KeyPair::new();
let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
acc.process_verified_transaction(&tr).unwrap();
let rando1 = KeyPair::new();
let tr = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
acc.process_verified_transaction(&tr).unwrap();
// Finally, return a transaction that's unique
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
})
.collect();
let req_vers = transactions
.into_iter()
.map(|tr| (Request::Transaction(tr), rsp_addr, 1_u8))
.collect();
let historian = Historian::new(&mint.last_id(), None);
let mut skel = AccountantSkel::new(acc, mint.last_id(), sink(), historian);
let now = Instant::now();
assert!(skel.process_packets(req_vers).is_ok());
let duration = now.elapsed();
let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
let tps = txs as f64 / sec;
// Ensure that all transactions were successfully logged.
drop(skel.historian.sender);
let entries: Vec<Entry> = skel.historian.receiver.iter().collect();
assert_eq!(entries.len(), 1);
assert_eq!(entries[0].events.len(), txs as usize);
println!("{} tps", tps);
}
}

View File

@ -1,201 +0,0 @@
//! The `accountant_stub` module is a client-side object that interfaces with a server-side Accountant
//! object via the network interface exposed by AccountantSkel. Client code should use
//! this object instead of writing messages to the network directly. The binary
//! encoding of its messages are unstable and may change in future releases.
use accountant_skel::{Request, Response, Subscription};
use bincode::{deserialize, serialize};
use futures::future::{ok, FutureResult};
use hash::Hash;
use signature::{KeyPair, PublicKey, Signature};
use std::collections::HashMap;
use std::io;
use std::net::UdpSocket;
use transaction::Transaction;
pub struct AccountantStub {
pub addr: String,
pub socket: UdpSocket,
last_id: Option<Hash>,
num_events: u64,
balances: HashMap<PublicKey, Option<i64>>,
}
impl AccountantStub {
/// Create a new AccountantStub that will interface with AccountantSkel
/// over `socket`. To receive responses, the caller must bind `socket`
/// to a public address before invoking AccountantStub methods.
pub fn new(addr: &str, socket: UdpSocket) -> Self {
let stub = AccountantStub {
addr: addr.to_string(),
socket,
last_id: None,
num_events: 0,
balances: HashMap::new(),
};
stub.init();
stub
}
pub fn init(&self) {
let subscriptions = vec![Subscription::EntryInfo];
let req = Request::Subscribe { subscriptions };
let data = serialize(&req).expect("serialize Subscribe");
let _res = self.socket.send_to(&data, &self.addr);
}
pub fn recv_response(&self) -> io::Result<Response> {
let mut buf = vec![0u8; 1024];
self.socket.recv_from(&mut buf)?;
let resp = deserialize(&buf).expect("deserialize balance");
Ok(resp)
}
pub fn process_response(&mut self, resp: Response) {
match resp {
Response::Balance { key, val } => {
self.balances.insert(key, val);
}
Response::LastId { id } => {
self.last_id = Some(id);
}
Response::EntryInfo(entry_info) => {
self.last_id = Some(entry_info.id);
self.num_events += entry_info.num_events;
}
}
}
/// Send a signed Transaction to the server for processing. This method
/// does not wait for a response.
pub fn transfer_signed(&self, tr: Transaction) -> io::Result<usize> {
let req = Request::Transaction(tr);
let data = serialize(&req).unwrap();
self.socket.send_to(&data, &self.addr)
}
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
pub fn transfer(
&self,
n: i64,
keypair: &KeyPair,
to: PublicKey,
last_id: &Hash,
) -> io::Result<Signature> {
let tr = Transaction::new(keypair, to, n, *last_id);
let sig = tr.sig;
self.transfer_signed(tr).map(|_| sig)
}
/// Request the balance of the user holding `pubkey`. This method blocks
/// until the server sends a response. If the response packet is dropped
/// by the network, this method will hang indefinitely.
pub fn get_balance(&mut self, pubkey: &PublicKey) -> FutureResult<i64, i64> {
let req = Request::GetBalance { key: *pubkey };
let data = serialize(&req).expect("serialize GetBalance");
self.socket
.send_to(&data, &self.addr)
.expect("buffer error");
let mut done = false;
while !done {
let resp = self.recv_response().expect("recv response");
if let &Response::Balance { ref key, .. } = &resp {
done = key == pubkey;
}
self.process_response(resp);
}
ok(self.balances[pubkey].unwrap())
}
/// Request the last Entry ID from the server. This method blocks
/// until the server sends a response. At the time of this writing,
/// it also has the side-effect of causing the server to log any
/// entries that have been published by the Historian.
pub fn get_last_id(&mut self) -> FutureResult<Hash, ()> {
let req = Request::GetLastId;
let data = serialize(&req).expect("serialize GetId");
self.socket
.send_to(&data, &self.addr)
.expect("buffer error");
let mut done = false;
while !done {
let resp = self.recv_response().expect("recv response");
if let &Response::LastId { .. } = &resp {
done = true;
}
self.process_response(resp);
}
ok(self.last_id.unwrap_or(Hash::default()))
}
/// Return the number of transactions the server processed since creating
/// this stub instance.
pub fn transaction_count(&mut self) -> u64 {
// Wait for at least one EntryInfo.
let mut done = false;
while !done {
let resp = self.recv_response().expect("recv response");
if let &Response::EntryInfo(_) = &resp {
done = true;
}
self.process_response(resp);
}
// Then take the rest.
self.socket.set_nonblocking(true).expect("set nonblocking");
loop {
match self.recv_response() {
Err(_) => break,
Ok(resp) => self.process_response(resp),
}
}
self.socket.set_nonblocking(false).expect("set blocking");
self.num_events
}
}
#[cfg(test)]
mod tests {
use super::*;
use accountant::Accountant;
use accountant_skel::AccountantSkel;
use futures::Future;
use historian::Historian;
use mint::Mint;
use signature::{KeyPair, KeyPairUtil};
use std::io::sink;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::thread::sleep;
use std::time::Duration;
// TODO: Figure out why this test sometimes hangs on TravisCI.
#[test]
fn test_accountant_stub() {
let addr = "127.0.0.1:9000";
let send_addr = "127.0.0.1:9001";
let alice = Mint::new(10_000);
let acc = Accountant::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let historian = Historian::new(&alice.last_id(), Some(30));
let acc = Arc::new(Mutex::new(AccountantSkel::new(
acc,
alice.last_id(),
sink(),
historian,
)));
let _threads = AccountantSkel::serve(&acc, addr, exit.clone()).unwrap();
sleep(Duration::from_millis(300));
let socket = UdpSocket::bind(send_addr).unwrap();
socket.set_read_timeout(Some(Duration::new(5, 0))).unwrap();
let mut acc = AccountantStub::new(addr, socket);
let last_id = acc.get_last_id().wait().unwrap();
let _sig = acc.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
.unwrap();
assert_eq!(acc.get_balance(&bob_pubkey).wait().unwrap(), 500);
exit.store(true, Ordering::Relaxed);
}
}

View File

@ -10,14 +10,16 @@ use futures::Future;
use getopts::Options;
use isatty::stdin_isatty;
use rayon::prelude::*;
use solana::accountant_stub::AccountantStub;
use solana::mint::MintDemo;
use solana::signature::{KeyPair, KeyPairUtil};
use solana::thin_client::ThinClient;
use solana::transaction::Transaction;
use std::env;
use std::io::{stdin, Read};
use std::net::UdpSocket;
use std::net::{SocketAddr, UdpSocket};
use std::process::exit;
use std::thread::sleep;
use std::time::Duration;
use std::time::Instant;
use untrusted::Input;
@ -33,12 +35,12 @@ fn print_usage(program: &str, opts: Options) {
fn main() {
let mut threads = 4usize;
let mut addr: String = "127.0.0.1:8000".to_string();
let mut send_addr: String = "127.0.0.1:8001".to_string();
let mut client_addr: String = "127.0.0.1:8010".to_string();
let mut opts = Options::new();
opts.optopt("s", "", "server address", "host:port");
opts.optopt("c", "", "client address", "host:port");
opts.optopt("t", "", "number of threads", "4");
opts.optopt("t", "", "number of threads", &format!("{}", threads));
opts.optflag("h", "help", "print help");
let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) {
@ -58,7 +60,7 @@ fn main() {
addr = matches.opt_str("s").unwrap();
}
if matches.opt_present("c") {
send_addr = matches.opt_str("c").unwrap();
client_addr = matches.opt_str("c").unwrap();
}
if matches.opt_present("t") {
threads = matches.opt_str("t").unwrap().parse().expect("integer");
@ -82,11 +84,14 @@ fn main() {
exit(1);
});
let socket = UdpSocket::bind(&send_addr).unwrap();
let mut acc = AccountantStub::new(&addr, socket);
println!("Binding to {}", client_addr);
let socket = UdpSocket::bind(&client_addr).unwrap();
socket.set_read_timeout(Some(Duration::new(5, 0))).unwrap();
let mut acc = ThinClient::new(addr.parse().unwrap(), socket);
println!("Get last ID...");
let last_id = acc.get_last_id().wait().unwrap();
println!("Got last ID {:?}", last_id);
println!("Creating keypairs...");
let txs = demo.users.len() / 2;
@ -102,7 +107,7 @@ fn main() {
.into_par_iter()
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, last_id))
.collect();
let duration = now.elapsed();
let mut duration = now.elapsed();
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let bsps = txs as f64 / ns as f64;
let nsps = ns as f64 / txs as f64;
@ -113,31 +118,33 @@ fn main() {
);
let initial_tx_count = acc.transaction_count();
println!("initial count {}", initial_tx_count);
println!("Transfering {} transactions in {} batches", txs, threads);
let now = Instant::now();
let sz = transactions.len() / threads;
let chunks: Vec<_> = transactions.chunks(sz).collect();
chunks.into_par_iter().for_each(|trs| {
println!("Transferring 1 unit {} times...", trs.len());
let send_addr = "0.0.0.0:0";
let socket = UdpSocket::bind(send_addr).unwrap();
let acc = AccountantStub::new(&addr, socket);
println!("Transferring 1 unit {} times... to", trs.len());
let mut client_addr: SocketAddr = client_addr.parse().unwrap();
client_addr.set_port(0);
let socket = UdpSocket::bind(client_addr).unwrap();
let acc = ThinClient::new(addr.parse().unwrap(), socket);
for tr in trs {
acc.transfer_signed(tr.clone()).unwrap();
}
});
println!("Waiting for half the transactions to complete...",);
let mut tx_count = acc.transaction_count();
while tx_count < transactions.len() as u64 / 2 {
println!("Waiting for transactions to complete...",);
let mut tx_count;
for _ in 0..10 {
tx_count = acc.transaction_count();
duration = now.elapsed();
let txs = tx_count - initial_tx_count;
println!("Transactions processed {}", txs);
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let tps = (txs * 1_000_000_000) as f64 / ns as f64;
println!("{} tps", tps);
sleep(Duration::new(1, 0));
}
let txs = tx_count - initial_tx_count;
println!("Transactions processed {}", txs);
let duration = now.elapsed();
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let tps = (txs * 1_000_000_000) as f64 / ns as f64;
println!("Done. {} tps", tps);
}

View File

@ -8,26 +8,27 @@ use solana::ledger::Block;
use solana::recorder::Signal;
use solana::signature::{KeyPair, KeyPairUtil};
use solana::transaction::Transaction;
use std::sync::mpsc::SendError;
use std::sync::mpsc::{sync_channel, SendError, SyncSender};
use std::thread::sleep;
use std::time::Duration;
fn create_ledger(hist: &Historian, seed: &Hash) -> Result<(), SendError<Signal>> {
fn create_ledger(input: &SyncSender<Signal>, seed: &Hash) -> Result<(), SendError<Signal>> {
sleep(Duration::from_millis(15));
let keypair = KeyPair::new();
let tr = Transaction::new(&keypair, keypair.pubkey(), 42, *seed);
let signal0 = Signal::Event(Event::Transaction(tr));
hist.sender.send(signal0)?;
input.send(signal0)?;
sleep(Duration::from_millis(10));
Ok(())
}
fn main() {
let (input, event_receiver) = sync_channel(10);
let seed = Hash::default();
let hist = Historian::new(&seed, Some(10));
create_ledger(&hist, &seed).expect("send error");
drop(hist.sender);
let entries: Vec<Entry> = hist.receiver.iter().collect();
let hist = Historian::new(event_receiver, &seed, Some(10));
create_ledger(&input, &seed).expect("send error");
drop(input);
let entries: Vec<Entry> = hist.output.lock().unwrap().iter().collect();
for entry in &entries {
println!("{:?}", entry);
}

View File

@ -7,15 +7,19 @@ extern crate solana;
use getopts::Options;
use isatty::stdin_isatty;
use solana::accountant::Accountant;
use solana::accountant_skel::AccountantSkel;
use solana::crdt::ReplicatedData;
use solana::entry::Entry;
use solana::event::Event;
use solana::historian::Historian;
use solana::signature::{KeyPair, KeyPairUtil};
use solana::tpu::Tpu;
use std::env;
use std::io::{stdin, stdout, Read};
use std::net::UdpSocket;
use std::process::exit;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Mutex};
use std::sync::mpsc::sync_channel;
use std::sync::Arc;
fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: cat <transaction.log> | {} [options]\n\n", program);
@ -48,7 +52,10 @@ fn main() {
if matches.opt_present("p") {
port = matches.opt_str("p").unwrap().parse().expect("port");
}
let addr = format!("0.0.0.0:{}", port);
let serve_addr = format!("0.0.0.0:{}", port);
let gossip_addr = format!("0.0.0.0:{}", port + 1);
let replicate_addr = format!("0.0.0.0:{}", port + 2);
let skinny_addr = format!("0.0.0.0:{}", port + 3);
if stdin_isatty() {
eprintln!("nothing found on stdin, expected a log file");
@ -70,6 +77,8 @@ fn main() {
})
});
eprintln!("done parsing...");
// The first item in the ledger is required to be an entry with zero num_hashes,
// which implies its id can be used as the ledger's seed.
let entry0 = entries.next().unwrap();
@ -84,27 +93,55 @@ fn main() {
None
};
eprintln!("creating accountant...");
let acc = Accountant::new_from_deposit(&deposit.unwrap());
acc.register_entry_id(&entry0.id);
acc.register_entry_id(&entry1.id);
eprintln!("processing entries...");
let mut last_id = entry1.id;
for entry in entries {
last_id = entry.id;
acc.process_verified_events(entry.events).unwrap();
let results = acc.process_verified_events(entry.events);
for result in results {
if let Err(e) = result {
eprintln!("failed to process event {:?}", e);
exit(1);
}
}
acc.register_entry_id(&last_id);
}
let historian = Historian::new(&last_id, Some(1000));
eprintln!("creating networking stack...");
let (input, event_receiver) = sync_channel(10_000);
let historian = Historian::new(event_receiver, &last_id, Some(1000));
let exit = Arc::new(AtomicBool::new(false));
let skel = Arc::new(Mutex::new(AccountantSkel::new(
acc,
last_id,
let tpu = Arc::new(Tpu::new(acc, input, historian));
let serve_sock = UdpSocket::bind(&serve_addr).unwrap();
let gossip_sock = UdpSocket::bind(&gossip_addr).unwrap();
let replicate_sock = UdpSocket::bind(&replicate_addr).unwrap();
let skinny_sock = UdpSocket::bind(&skinny_addr).unwrap();
let pubkey = KeyPair::new().pubkey();
let d = ReplicatedData::new(
pubkey,
gossip_sock.local_addr().unwrap(),
replicate_sock.local_addr().unwrap(),
serve_sock.local_addr().unwrap(),
);
eprintln!("starting server...");
let threads = Tpu::serve(
&tpu,
d,
serve_sock,
skinny_sock,
gossip_sock,
exit.clone(),
stdout(),
historian,
)));
let threads = AccountantSkel::serve(&skel, &addr, exit.clone()).unwrap();
eprintln!("Ready. Listening on {}", addr);
).unwrap();
eprintln!("Ready. Listening on {}", serve_addr);
for t in threads {
t.join().expect("join");
}

View File

@ -1,14 +1,24 @@
//! The `crdt` module defines a data structure that is shared by all the nodes in the network over
//! a gossip control plane. The goal is to share small bits of of-chain information and detect and
//! a gossip control plane. The goal is to share small bits of off-chain information and detect and
//! repair partitions.
//!
//! This CRDT only supports a very limited set of types. A map of PublicKey -> Versioned Struct.
//! The last version is always picked durring an update.
//!
//! The network is arranged in layers:
//!
//! * layer 0 - Leader.
//! * layer 1 - As many nodes as we can fit
//! * layer 2 - Everyone else, if layer 1 is `2^10`, layer 2 should be able to fit `2^20` number of nodes.
//!
//! Accountant needs to provide an interface for us to query the stake weight
use bincode::{deserialize, serialize};
use byteorder::{LittleEndian, ReadBytesExt};
use hash::Hash;
use result::Result;
use packet::SharedBlob;
use rayon::prelude::*;
use result::{Error, Result};
use ring::rand::{SecureRandom, SystemRandom};
use signature::{PublicKey, Signature};
use std::collections::HashMap;
@ -22,16 +32,16 @@ use std::time::Duration;
/// Structure to be replicated by the network
#[derive(Serialize, Deserialize, Clone)]
pub struct ReplicatedData {
id: PublicKey,
pub id: PublicKey,
sig: Signature,
/// should always be increasing
version: u64,
/// address to connect to for gossip
gossip_addr: SocketAddr,
pub gossip_addr: SocketAddr,
/// address to connect to for replication
replicate_addr: SocketAddr,
pub replicate_addr: SocketAddr,
/// address to connect to when this node is leader
lead_addr: SocketAddr,
pub serve_addr: SocketAddr,
/// current leader identity
current_leader_id: PublicKey,
/// last verified hash that was submitted to the leader
@ -41,15 +51,19 @@ pub struct ReplicatedData {
}
impl ReplicatedData {
pub fn new(id: PublicKey, gossip_addr: SocketAddr) -> ReplicatedData {
let daddr = "0.0.0.0:0".parse().unwrap();
pub fn new(
id: PublicKey,
gossip_addr: SocketAddr,
replicate_addr: SocketAddr,
serve_addr: SocketAddr,
) -> ReplicatedData {
ReplicatedData {
id,
sig: Signature::default(),
version: 0,
gossip_addr,
replicate_addr: daddr,
lead_addr: daddr,
replicate_addr,
serve_addr,
current_leader_id: PublicKey::default(),
last_verified_hash: Hash::default(),
last_verified_count: 0,
@ -77,8 +91,8 @@ pub struct Crdt {
local: HashMap<PublicKey, u64>,
/// The value of the remote update index that i have last seen
/// This Node will ask external nodes for updates since the value in this list
remote: HashMap<PublicKey, u64>,
update_index: u64,
pub remote: HashMap<PublicKey, u64>,
pub update_index: u64,
me: PublicKey,
timeout: Duration,
}
@ -109,23 +123,141 @@ impl Crdt {
g.table.insert(me.id, me);
g
}
pub fn import(&mut self, v: &ReplicatedData) {
// TODO check that last_verified types are always increasing
// TODO probably an error or attack
if self.me != v.id {
self.insert(v);
}
pub fn my_data(&self) -> &ReplicatedData {
&self.table[&self.me]
}
pub fn insert(&mut self, v: &ReplicatedData) {
pub fn leader_data(&self) -> &ReplicatedData {
&self.table[&self.table[&self.me].current_leader_id]
}
pub fn set_leader(&mut self, key: PublicKey) -> () {
let mut me = self.my_data().clone();
me.current_leader_id = key;
me.version += 1;
self.insert(me);
}
pub fn insert(&mut self, v: ReplicatedData) {
// TODO check that last_verified types are always increasing
if self.table.get(&v.id).is_none() || (v.version > self.table[&v.id].version) {
//somehow we signed a message for our own identity with a higher version that
// we have stored ourselves
trace!("me: {:?}", self.me[0]);
trace!("v.id: {:?}", v.id[0]);
trace!("insert! {}", v.version);
self.update_index += 1;
let _ = self.table.insert(v.id, v.clone());
let _ = self.table.insert(v.id.clone(), v.clone());
let _ = self.local.insert(v.id, self.update_index);
} else {
trace!("INSERT FAILED {}", v.version);
trace!(
"INSERT FAILED new.version: {} me.version: {}",
v.version,
self.table[&v.id].version
);
}
}
/// broadcast messages from the leader to layer 1 nodes
/// # Remarks
/// We need to avoid having obj locked while doing any io, such as the `send_to`
pub fn broadcast(
obj: &Arc<RwLock<Self>>,
blobs: &Vec<SharedBlob>,
s: &UdpSocket,
transmit_index: &mut u64,
) -> Result<()> {
let (me, table): (ReplicatedData, Vec<ReplicatedData>) = {
// copy to avoid locking durring IO
let robj = obj.read().unwrap();
let cloned_table: Vec<ReplicatedData> = robj.table.values().cloned().collect();
(robj.table[&robj.me].clone(), cloned_table)
};
let daddr = "0.0.0.0:0".parse().unwrap();
let items: Vec<(usize, &ReplicatedData)> = table
.iter()
.filter(|v| {
if me.id == v.id {
//filter myself
false
} else if v.replicate_addr == daddr {
//filter nodes that are not listening
false
} else {
true
}
})
.enumerate()
.collect();
let orders: Vec<_> = items.into_iter().cycle().zip(blobs.iter()).collect();
let errs: Vec<_> = orders
.into_par_iter()
.map(|((i, v), b)| {
// only leader should be broadcasting
assert!(me.current_leader_id != v.id);
let mut blob = b.write().unwrap();
blob.set_id(me.id).expect("set_id");
blob.set_index(*transmit_index + i as u64)
.expect("set_index");
//TODO profile this, may need multiple sockets for par_iter
s.send_to(&blob.data[..blob.meta.size], &v.replicate_addr)
})
.collect();
for e in errs {
trace!("retransmit result {:?}", e);
match e {
Err(e) => return Err(Error::IO(e)),
_ => (),
}
*transmit_index += 1;
}
Ok(())
}
/// retransmit messages from the leader to layer 1 nodes
/// # Remarks
/// We need to avoid having obj locked while doing any io, such as the `send_to`
pub fn retransmit(obj: &Arc<RwLock<Self>>, blob: &SharedBlob, s: &UdpSocket) -> Result<()> {
let (me, table): (ReplicatedData, Vec<ReplicatedData>) = {
// copy to avoid locking durring IO
let s = obj.read().unwrap();
(s.table[&s.me].clone(), s.table.values().cloned().collect())
};
let rblob = blob.read().unwrap();
let daddr = "0.0.0.0:0".parse().unwrap();
let orders: Vec<_> = table
.iter()
.filter(|v| {
if me.id == v.id {
false
} else if me.current_leader_id == v.id {
trace!("skip retransmit to leader {:?}", v.id);
false
} else if v.replicate_addr == daddr {
trace!("skip nodes that are not listening {:?}", v.id);
false
} else {
true
}
})
.collect();
let errs: Vec<_> = orders
.par_iter()
.map(|v| {
trace!("retransmit blob to {}", v.replicate_addr);
//TODO profile this, may need multiple sockets for par_iter
s.send_to(&rblob.data[..rblob.meta.size], &v.replicate_addr)
})
.collect();
for e in errs {
trace!("retransmit result {:?}", e);
match e {
Err(e) => return Err(Error::IO(e)),
_ => (),
}
}
Ok(())
}
fn random() -> u64 {
let rnd = SystemRandom::new();
let mut buf = [0u8; 8];
@ -134,7 +266,7 @@ impl Crdt {
rdr.read_u64::<LittleEndian>().unwrap()
}
fn get_updates_since(&self, v: u64) -> (PublicKey, u64, Vec<ReplicatedData>) {
trace!("get updates since {}", v);
//trace!("get updates since {}", v);
let data = self.table
.values()
.filter(|x| self.local[&x.id] > v)
@ -147,17 +279,21 @@ impl Crdt {
/// Create a random gossip request
/// # Returns
/// (A,B,C)
/// * A - Remote gossip address
/// * B - My gossip address
/// * C - Remote update index to request updates since
fn gossip_request(&self) -> (SocketAddr, Protocol) {
let n = (Self::random() as usize) % self.table.len();
trace!("random {:?} {}", &self.me[0..1], n);
/// (A,B)
/// * A - Address to send to
/// * B - RequestUpdates protocol message
fn gossip_request(&self) -> Result<(SocketAddr, Protocol)> {
if self.table.len() <= 1 {
return Err(Error::GeneralError);
}
let mut n = (Self::random() as usize) % self.table.len();
while self.table.values().nth(n).unwrap().id == self.me {
n = (Self::random() as usize) % self.table.len();
}
let v = self.table.values().nth(n).unwrap().clone();
let remote_update_index = *self.remote.get(&v.id).unwrap_or(&0);
let req = Protocol::RequestUpdates(remote_update_index, self.table[&self.me].clone());
(v.gossip_addr, req)
Ok((v.gossip_addr, req))
}
/// At random pick a node and try to get updated changes from them
@ -167,7 +303,7 @@ impl Crdt {
// Lock the object only to do this operation and not for any longer
// especially not when doing the `sock.send_to`
let (remote_gossip_addr, req) = obj.read().unwrap().gossip_request();
let (remote_gossip_addr, req) = obj.read().unwrap().gossip_request()?;
let sock = UdpSocket::bind("0.0.0.0:0")?;
// TODO this will get chatty, so we need to first ask for number of updates since
// then only ask for specific data that we dont have
@ -186,7 +322,7 @@ impl Crdt {
// TODO we need to punish/spam resist here
// sig verify the whole update and slash anyone who sends a bad update
for v in data {
self.import(&v);
self.insert(v.clone());
}
*self.remote.entry(from).or_insert(update_index) = update_index;
}
@ -222,7 +358,7 @@ impl Crdt {
let rsp = serialize(&Protocol::ReceiveUpdates(from, ups, data))?;
trace!("send_to {}", addr);
//TODO verify reqdata belongs to sender
obj.write().unwrap().import(&reqdata);
obj.write().unwrap().insert(reqdata);
sock.send_to(&rsp, addr).unwrap();
trace!("send_to done!");
}
@ -251,6 +387,9 @@ impl Crdt {
#[cfg(test)]
mod test {
use crdt::{Crdt, ReplicatedData};
use logger;
use packet::Blob;
use rayon::iter::*;
use signature::KeyPair;
use signature::KeyPairUtil;
use std::net::UdpSocket;
@ -259,6 +398,28 @@ mod test {
use std::thread::{sleep, JoinHandle};
use std::time::Duration;
fn test_node() -> (Crdt, UdpSocket, UdpSocket, UdpSocket) {
let gossip = UdpSocket::bind("0.0.0.0:0").unwrap();
let replicate = UdpSocket::bind("0.0.0.0:0").unwrap();
let serve = UdpSocket::bind("0.0.0.0:0").unwrap();
let pubkey = KeyPair::new().pubkey();
let d = ReplicatedData::new(
pubkey,
gossip.local_addr().unwrap(),
replicate.local_addr().unwrap(),
serve.local_addr().unwrap(),
);
let crdt = Crdt::new(d);
trace!(
"id: {} gossip: {} replicate: {} serve: {}",
crdt.my_data().id[0],
gossip.local_addr().unwrap(),
replicate.local_addr().unwrap(),
serve.local_addr().unwrap(),
);
(crdt, gossip, replicate, serve)
}
/// Test that the network converges.
/// Run until every node in the network has a full ReplicatedData set.
/// Check that nodes stop sending updates after all the ReplicatedData has been shared.
@ -271,12 +432,9 @@ mod test {
let exit = Arc::new(AtomicBool::new(false));
let listen: Vec<_> = (0..num)
.map(|_| {
let listener = UdpSocket::bind("0.0.0.0:0").unwrap();
let pubkey = KeyPair::new().pubkey();
let d = ReplicatedData::new(pubkey, listener.local_addr().unwrap());
let crdt = Crdt::new(d);
let (crdt, gossip, _, _) = test_node();
let c = Arc::new(RwLock::new(crdt));
let l = Crdt::listen(c.clone(), listener, exit.clone());
let l = Crdt::listen(c.clone(), gossip, exit.clone());
(c, l)
})
.collect();
@ -332,7 +490,7 @@ mod test {
let yv = listen[y].0.read().unwrap();
let mut d = yv.table[&yv.me].clone();
d.version = 0;
xv.insert(&d);
xv.insert(d);
}
});
}
@ -349,7 +507,7 @@ mod test {
let yv = listen[y].0.read().unwrap();
let mut d = yv.table[&yv.me].clone();
d.version = 0;
xv.insert(&d);
xv.insert(d);
}
});
}
@ -357,16 +515,89 @@ mod test {
/// Test that insert drops messages that are older
#[test]
fn insert_test() {
let mut d = ReplicatedData::new(KeyPair::new().pubkey(), "127.0.0.1:1234".parse().unwrap());
let mut d = ReplicatedData::new(
KeyPair::new().pubkey(),
"127.0.0.1:1234".parse().unwrap(),
"127.0.0.1:1235".parse().unwrap(),
"127.0.0.1:1236".parse().unwrap(),
);
assert_eq!(d.version, 0);
let mut crdt = Crdt::new(d.clone());
assert_eq!(crdt.table[&d.id].version, 0);
d.version = 2;
crdt.insert(&d);
crdt.insert(d.clone());
assert_eq!(crdt.table[&d.id].version, 2);
d.version = 1;
crdt.insert(&d);
crdt.insert(d.clone());
assert_eq!(crdt.table[&d.id].version, 2);
}
#[test]
pub fn test_crdt_retransmit() {
logger::setup();
trace!("c1:");
let (mut c1, s1, r1, e1) = test_node();
trace!("c2:");
let (mut c2, s2, r2, _) = test_node();
trace!("c3:");
let (mut c3, s3, r3, _) = test_node();
let c1_id = c1.my_data().id;
c1.set_leader(c1_id);
c2.insert(c1.my_data().clone());
c3.insert(c1.my_data().clone());
c2.set_leader(c1.my_data().id);
c3.set_leader(c1.my_data().id);
let exit = Arc::new(AtomicBool::new(false));
// Create listen threads
let a1 = Arc::new(RwLock::new(c1));
let t1 = Crdt::listen(a1.clone(), s1, exit.clone());
let a2 = Arc::new(RwLock::new(c2));
let t2 = Crdt::listen(a2.clone(), s2, exit.clone());
let a3 = Arc::new(RwLock::new(c3));
let t3 = Crdt::listen(a3.clone(), s3, exit.clone());
// Create gossip threads
let t1_gossip = Crdt::gossip(a1.clone(), exit.clone());
let t2_gossip = Crdt::gossip(a2.clone(), exit.clone());
let t3_gossip = Crdt::gossip(a3.clone(), exit.clone());
//wait to converge
trace!("waitng to converge:");
let mut done = false;
for _ in 0..10 {
done = a1.read().unwrap().table.len() == 3 && a2.read().unwrap().table.len() == 3
&& a3.read().unwrap().table.len() == 3;
if done {
break;
}
sleep(Duration::new(1, 0));
}
assert!(done);
let mut b = Blob::default();
b.meta.size = 10;
Crdt::retransmit(&a1, &Arc::new(RwLock::new(b)), &e1).unwrap();
let res: Vec<_> = [r1, r2, r3]
.into_par_iter()
.map(|s| {
let mut b = Blob::default();
s.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
let res = s.recv_from(&mut b.data);
res.is_err() //true if failed to receive the retransmit packet
})
.collect();
//true if failed receive the retransmit packet, r2, and r3 should succeed
//r1 was the sender, so it should fail to receive the packet
assert_eq!(res, [true, false, false]);
exit.store(true, Ordering::Relaxed);
let threads = vec![t1, t2, t3, t1_gossip, t2_gossip, t3_gossip];
for t in threads.into_iter() {
t.join().unwrap();
}
}
}

View File

@ -130,11 +130,11 @@ pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
#[cfg(test)]
mod tests {
use accountant_skel::Request;
use bincode::serialize;
use ecdsa;
use packet::{Packet, Packets, SharedPackets};
use std::sync::RwLock;
use tpu::Request;
use transaction::test_tx;
use transaction::Transaction;

View File

@ -153,7 +153,7 @@ pub fn decode_blocks(data: &mut [&mut [u8]], coding: &[&[u8]], erasures: &[i32])
// Generate coding blocks in window from consumed to consumed+NUM_DATA
pub fn generate_coding(
re: &BlobRecycler,
window: &mut Vec<Option<SharedBlob>>,
window: &mut Vec<SharedBlob>,
consumed: usize,
) -> Result<()> {
let mut data_blobs = Vec::new();
@ -179,7 +179,7 @@ pub fn generate_coding(
let coding_end = consumed + NUM_CODED;
for i in coding_start..coding_end {
let n = i % window.len();
window[n] = Some(re.allocate());
window[n] = re.allocate();
coding_blobs.push(window[n].clone().unwrap());
}
for b in &coding_blobs {
@ -272,7 +272,6 @@ pub fn recover(
mod test {
use erasure;
use packet::{BlobRecycler, SharedBlob, PACKET_DATA_SIZE};
extern crate env_logger;
#[test]
pub fn test_coding() {

View File

@ -4,25 +4,28 @@
use entry::Entry;
use hash::Hash;
use recorder::{ExitReason, Recorder, Signal};
use std::sync::mpsc::{sync_channel, Receiver, SyncSender};
use std::sync::mpsc::{sync_channel, Receiver, SyncSender, TryRecvError};
use std::sync::{Arc, Mutex};
use std::thread::{spawn, JoinHandle};
use std::time::Instant;
pub struct Historian {
pub sender: SyncSender<Signal>,
pub receiver: Receiver<Entry>,
pub output: Arc<Mutex<Receiver<Entry>>>,
pub thread_hdl: JoinHandle<ExitReason>,
}
impl Historian {
pub fn new(start_hash: &Hash, ms_per_tick: Option<u64>) -> Self {
let (sender, event_receiver) = sync_channel(10_000);
let (entry_sender, receiver) = sync_channel(10_000);
pub fn new(
event_receiver: Receiver<Signal>,
start_hash: &Hash,
ms_per_tick: Option<u64>,
) -> Self {
let (entry_sender, output) = sync_channel(10_000);
let thread_hdl =
Historian::create_recorder(*start_hash, ms_per_tick, event_receiver, entry_sender);
let loutput = Arc::new(Mutex::new(output));
Historian {
sender,
receiver,
output: loutput,
thread_hdl,
}
}
@ -48,6 +51,10 @@ impl Historian {
}
})
}
pub fn receive(self: &Self) -> Result<Entry, TryRecvError> {
self.output.lock().unwrap().try_recv()
}
}
#[cfg(test)]
@ -59,24 +66,25 @@ mod tests {
#[test]
fn test_historian() {
let (input, event_receiver) = sync_channel(10);
let zero = Hash::default();
let hist = Historian::new(&zero, None);
let hist = Historian::new(event_receiver, &zero, None);
hist.sender.send(Signal::Tick).unwrap();
input.send(Signal::Tick).unwrap();
sleep(Duration::new(0, 1_000_000));
hist.sender.send(Signal::Tick).unwrap();
input.send(Signal::Tick).unwrap();
sleep(Duration::new(0, 1_000_000));
hist.sender.send(Signal::Tick).unwrap();
input.send(Signal::Tick).unwrap();
let entry0 = hist.receiver.recv().unwrap();
let entry1 = hist.receiver.recv().unwrap();
let entry2 = hist.receiver.recv().unwrap();
let entry0 = hist.output.lock().unwrap().recv().unwrap();
let entry1 = hist.output.lock().unwrap().recv().unwrap();
let entry2 = hist.output.lock().unwrap().recv().unwrap();
assert_eq!(entry0.num_hashes, 0);
assert_eq!(entry1.num_hashes, 0);
assert_eq!(entry2.num_hashes, 0);
drop(hist.sender);
drop(input);
assert_eq!(
hist.thread_hdl.join().unwrap(),
ExitReason::RecvDisconnected
@ -87,10 +95,11 @@ mod tests {
#[test]
fn test_historian_closed_sender() {
let (input, event_receiver) = sync_channel(10);
let zero = Hash::default();
let hist = Historian::new(&zero, None);
drop(hist.receiver);
hist.sender.send(Signal::Tick).unwrap();
let hist = Historian::new(event_receiver, &zero, None);
drop(hist.output);
input.send(Signal::Tick).unwrap();
assert_eq!(
hist.thread_hdl.join().unwrap(),
ExitReason::SendDisconnected
@ -99,12 +108,13 @@ mod tests {
#[test]
fn test_ticking_historian() {
let (input, event_receiver) = sync_channel(10);
let zero = Hash::default();
let hist = Historian::new(&zero, Some(20));
let hist = Historian::new(event_receiver, &zero, Some(20));
sleep(Duration::from_millis(300));
hist.sender.send(Signal::Tick).unwrap();
drop(hist.sender);
let entries: Vec<Entry> = hist.receiver.iter().collect();
input.send(Signal::Tick).unwrap();
drop(input);
let entries: Vec<Entry> = hist.output.lock().unwrap().iter().collect();
assert!(entries.len() > 1);
// Ensure the ID is not the seed.

View File

@ -1,7 +1,5 @@
#![cfg_attr(feature = "unstable", feature(test))]
pub mod accountant;
pub mod accountant_skel;
pub mod accountant_stub;
pub mod crdt;
pub mod ecdsa;
pub mod entry;
@ -11,6 +9,7 @@ pub mod event;
pub mod hash;
pub mod historian;
pub mod ledger;
pub mod logger;
pub mod mint;
pub mod packet;
pub mod plan;
@ -18,7 +17,9 @@ pub mod recorder;
pub mod result;
pub mod signature;
pub mod streamer;
pub mod subscribers;
pub mod thin_client;
pub mod timing;
pub mod tpu;
pub mod transaction;
extern crate bincode;
extern crate byteorder;
@ -41,3 +42,5 @@ extern crate futures;
#[cfg(test)]
#[macro_use]
extern crate matches;
extern crate rand;

11
src/logger.rs Normal file
View File

@ -0,0 +1,11 @@
use std::sync::{Once, ONCE_INIT};
extern crate env_logger;
static INIT: Once = ONCE_INIT;
/// Setup function that is only run once, even if called multiple times.
pub fn setup() {
INIT.call_once(|| {
let _ = env_logger::init();
});
}

View File

@ -1,12 +1,14 @@
//! The `packet` module defines data structures and methods to pull data from the network.
use bincode::{deserialize, serialize};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use result::{Error, Result};
use signature::PublicKey;
use std::collections::VecDeque;
use std::fmt;
use std::io;
use std::mem::size_of;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
use std::sync::{Arc, Mutex, RwLock};
use std::mem::size_of;
pub type SharedPackets = Arc<RwLock<Packets>>;
pub type SharedBlob = Arc<RwLock<Blob>>;
@ -14,7 +16,8 @@ pub type PacketRecycler = Recycler<Packets>;
pub type BlobRecycler = Recycler<Blob>;
pub const NUM_PACKETS: usize = 1024 * 8;
const BLOB_SIZE: usize = 64 * 1024;
pub const BLOB_SIZE: usize = 64 * 1024;
pub const BLOB_DATA_SIZE: usize = BLOB_SIZE - BLOB_ID_END;
pub const PACKET_DATA_SIZE: usize = 256;
pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE;
@ -176,13 +179,14 @@ impl Packets {
socket.set_nonblocking(false)?;
for p in &mut self.packets {
p.meta.size = 0;
trace!("receiving");
match socket.recv_from(&mut p.data) {
Err(_) if i > 0 => {
trace!("got {:?} messages", i);
debug!("got {:?} messages", i);
break;
}
Err(e) => {
info!("recv_from err {:?}", e);
trace!("recv_from err {:?}", e);
return Err(Error::IO(e));
}
Ok((nrecv, from)) => {
@ -200,6 +204,7 @@ impl Packets {
pub fn recv_from(&mut self, socket: &UdpSocket) -> Result<()> {
let sz = self.run_read_from(socket)?;
self.packets.resize(sz, Packet::default());
debug!("recv_from: {}", sz);
Ok(())
}
pub fn send_to(&self, socket: &UdpSocket) -> Result<()> {
@ -211,28 +216,41 @@ impl Packets {
}
}
const BLOB_INDEX_SIZE: usize = size_of::<u64>();
const BLOB_INDEX_END: usize = size_of::<u64>();
const BLOB_ID_END: usize = BLOB_INDEX_END + size_of::<usize>() + size_of::<PublicKey>();
impl Blob {
pub fn get_index(&self) -> Result<u64> {
let mut rdr = io::Cursor::new(&self.data[0..BLOB_INDEX_SIZE]);
let mut rdr = io::Cursor::new(&self.data[0..BLOB_INDEX_END]);
let r = rdr.read_u64::<LittleEndian>()?;
Ok(r)
}
pub fn set_index(&mut self, ix: u64) -> Result<()> {
let mut wtr = vec![];
wtr.write_u64::<LittleEndian>(ix)?;
self.data[..BLOB_INDEX_SIZE].clone_from_slice(&wtr);
self.data[..BLOB_INDEX_END].clone_from_slice(&wtr);
Ok(())
}
pub fn get_id(&self) -> Result<PublicKey> {
let e = deserialize(&self.data[BLOB_INDEX_END..BLOB_ID_END])?;
Ok(e)
}
pub fn set_id(&mut self, id: PublicKey) -> Result<()> {
let wtr = serialize(&id)?;
self.data[BLOB_INDEX_END..BLOB_ID_END].clone_from_slice(&wtr);
Ok(())
}
pub fn data(&self) -> &[u8] {
&self.data[BLOB_INDEX_SIZE..]
&self.data[BLOB_ID_END..]
}
pub fn data_mut(&mut self) -> &mut [u8] {
&mut self.data[BLOB_INDEX_SIZE..]
&mut self.data[BLOB_ID_END..]
}
pub fn set_size(&mut self, size: usize) {
self.meta.size = size + BLOB_INDEX_SIZE;
self.meta.size = size + BLOB_ID_END;
}
pub fn recv_from(re: &BlobRecycler, socket: &UdpSocket) -> Result<VecDeque<SharedBlob>> {
let mut v = VecDeque::new();

View File

@ -8,6 +8,7 @@
use entry::{create_entry_mut, Entry};
use event::Event;
use hash::{hash, Hash};
use packet::BLOB_DATA_SIZE;
use std::mem;
use std::sync::mpsc::{Receiver, SyncSender, TryRecvError};
use std::time::{Duration, Instant};
@ -79,6 +80,13 @@ impl Recorder {
}
Signal::Event(event) => {
self.events.push(event);
// Record an entry early if we anticipate its serialized size will
// be larger than 64kb. At the time of this writing, we assume each
// event will be well under 256 bytes.
if self.events.len() >= BLOB_DATA_SIZE / 256 {
self.record_entry()?;
}
}
},
Err(TryRecvError::Empty) => return Ok(()),
@ -87,3 +95,34 @@ impl Recorder {
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use bincode::serialize;
use signature::{KeyPair, KeyPairUtil};
use std::sync::mpsc::sync_channel;
use transaction::Transaction;
#[test]
fn test_sub64k_entry_size() {
let (signal_sender, signal_receiver) = sync_channel(500);
let (entry_sender, entry_receiver) = sync_channel(10);
let zero = Hash::default();
let mut recorder = Recorder::new(signal_receiver, entry_sender, zero);
let alice_keypair = KeyPair::new();
let bob_pubkey = KeyPair::new().pubkey();
for _ in 0..256 {
let tx = Transaction::new(&alice_keypair, bob_pubkey, 1, zero);
let event = Event::Transaction(tx);
signal_sender.send(Signal::Event(event)).unwrap();
}
recorder.process_events(Instant::now(), None).unwrap();
drop(recorder.sender);
let entries: Vec<_> = entry_receiver.iter().collect();
assert_eq!(entries.len(), 1);
assert!(serialize(&entries[0]).unwrap().len() <= 65_536);
}
}

View File

@ -1,10 +1,10 @@
//! The `result` module exposes a Result type that propagates one of many different Error types.
use accountant;
use bincode;
use serde_json;
use std;
use std::any::Any;
use accountant;
#[derive(Debug)]
pub enum Error {
@ -18,6 +18,7 @@ pub enum Error {
AccountingError(accountant::AccountingError),
SendError,
Services,
GeneralError,
}
pub type Result<T> = std::result::Result<T, Error>;

View File

@ -1,4 +1,7 @@
//! The `streamer` module defines a set of services for effecently pulling data from udp sockets.
use crdt::Crdt;
#[cfg(feature = "erasure")]
use erasure;
use packet::{Blob, BlobRecycler, PacketRecycler, SharedBlob, SharedPackets, NUM_BLOBS};
use result::Result;
use std::collections::VecDeque;
@ -8,7 +11,6 @@ use std::sync::mpsc;
use std::sync::{Arc, RwLock};
use std::thread::{spawn, JoinHandle};
use std::time::Duration;
use subscribers::Subscribers;
pub type PacketReceiver = mpsc::Receiver<SharedPackets>;
pub type PacketSender = mpsc::Sender<SharedPackets>;
@ -99,17 +101,14 @@ pub fn blob_receiver(
if exit.load(Ordering::Relaxed) {
break;
}
let ret = recv_blobs(&recycler, &sock, &s);
if ret.is_err() {
break;
}
let _ = recv_blobs(&recycler, &sock, &s);
});
Ok(t)
}
fn recv_window(
window: &mut Vec<Option<SharedBlob>>,
subs: &Arc<RwLock<Subscribers>>,
crdt: &Arc<RwLock<Crdt>>,
recycler: &BlobRecycler,
consumed: &mut usize,
r: &BlobReceiver,
@ -118,24 +117,25 @@ fn recv_window(
) -> Result<()> {
let timer = Duration::new(1, 0);
let mut dq = r.recv_timeout(timer)?;
let leader_id = crdt.read().unwrap().leader_data().id;
while let Ok(mut nq) = r.try_recv() {
dq.append(&mut nq)
}
{
//retransmit all leader blocks
let mut retransmitq = VecDeque::new();
let rsubs = subs.read().unwrap();
for b in &dq {
let p = b.read().unwrap();
//TODO this check isn't safe against adverserial packets
//we need to maintain a sequence window
trace!(
"idx: {} addr: {:?} leader: {:?}",
"idx: {} addr: {:?} id: {:?} leader: {:?}",
p.get_index().unwrap(),
p.get_id().unwrap(),
p.meta.addr(),
rsubs.leader.addr
leader_id
);
if p.meta.addr() == rsubs.leader.addr {
if p.get_id().unwrap() == leader_id {
//TODO
//need to copy the retransmited blob
//otherwise we get into races with which thread
@ -195,7 +195,7 @@ fn recv_window(
pub fn window(
exit: Arc<AtomicBool>,
subs: Arc<RwLock<Subscribers>>,
crdt: Arc<RwLock<Crdt>>,
recycler: BlobRecycler,
r: BlobReceiver,
s: BlobSender,
@ -210,7 +210,7 @@ pub fn window(
}
let _ = recv_window(
&mut window,
&subs,
&crdt,
&recycler,
&mut consumed,
&r,
@ -221,8 +221,57 @@ pub fn window(
})
}
fn broadcast(
crdt: &Arc<RwLock<Crdt>>,
recycler: &BlobRecycler,
r: &BlobReceiver,
sock: &UdpSocket,
transmit_index: &mut u64,
) -> Result<()> {
let timer = Duration::new(1, 0);
let mut dq = r.recv_timeout(timer)?;
while let Ok(mut nq) = r.try_recv() {
dq.append(&mut nq);
}
let mut blobs = dq.into_iter().collect();
/// appends codes to the list of blobs allowing us to reconstruct the stream
#[cfg(feature = "erasure")]
erasure::generate_codes(blobs);
Crdt::broadcast(crdt, &blobs, &sock, transmit_index)?;
while let Some(b) = blobs.pop() {
recycler.recycle(b);
}
Ok(())
}
/// Service to broadcast messages from the leader to layer 1 nodes.
/// See `crdt` for network layer definitions.
/// # Arguments
/// * `sock` - Socket to send from.
/// * `exit` - Boolean to signal system exit.
/// * `crdt` - CRDT structure
/// * `recycler` - Blob recycler.
/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
pub fn broadcaster(
sock: UdpSocket,
exit: Arc<AtomicBool>,
crdt: Arc<RwLock<Crdt>>,
recycler: BlobRecycler,
r: BlobReceiver,
) -> JoinHandle<()> {
spawn(move || {
let mut transmit_index = 0;
loop {
if exit.load(Ordering::Relaxed) {
break;
}
let _ = broadcast(&crdt, &recycler, &r, &sock, &mut transmit_index);
}
})
}
fn retransmit(
subs: &Arc<RwLock<Subscribers>>,
crdt: &Arc<RwLock<Crdt>>,
recycler: &BlobRecycler,
r: &BlobReceiver,
sock: &UdpSocket,
@ -233,10 +282,8 @@ fn retransmit(
dq.append(&mut nq);
}
{
let wsubs = subs.read().unwrap();
for b in &dq {
let mut mb = b.write().unwrap();
wsubs.retransmit(&mut mb, sock)?;
Crdt::retransmit(&crdt, b, sock)?;
}
}
while let Some(b) = dq.pop_front() {
@ -246,26 +293,30 @@ fn retransmit(
}
/// Service to retransmit messages from the leader to layer 1 nodes.
/// See `subscribers` for network layer definitions.
/// See `crdt` for network layer definitions.
/// # Arguments
/// * `sock` - Socket to read from. Read timeout is set to 1.
/// * `exit` - Boolean to signal system exit.
/// * `subs` - Shared Subscriber structure. This structure needs to be updated and popualted by
/// the accountant.
/// * `crdt` - This structure needs to be updated and populated by the accountant and via gossip.
/// * `recycler` - Blob recycler.
/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
pub fn retransmitter(
sock: UdpSocket,
exit: Arc<AtomicBool>,
subs: Arc<RwLock<Subscribers>>,
crdt: Arc<RwLock<Crdt>>,
recycler: BlobRecycler,
r: BlobReceiver,
) -> JoinHandle<()> {
spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;
spawn(move || {
trace!("retransmitter started");
loop {
if exit.load(Ordering::Relaxed) {
break;
}
// TODO: handle this error
let _ = retransmit(&crdt, &recycler, &r, &sock);
}
let _ = retransmit(&subs, &recycler, &r, &sock);
trace!("exiting retransmitter");
})
}
@ -356,7 +407,7 @@ mod bench {
let time = elapsed.as_secs() * 10000000000 + elapsed.subsec_nanos() as u64;
let ftime = (time as f64) / 10000000000f64;
let fcount = (end_val - start_val) as f64;
println!("performance: {:?}", fcount / ftime);
trace!("performance: {:?}", fcount / ftime);
exit.store(true, Ordering::Relaxed);
t_reader.join()?;
t_producer1.join()?;
@ -373,7 +424,11 @@ mod bench {
#[cfg(test)]
mod test {
use crdt::{Crdt, ReplicatedData};
use logger;
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets, PACKET_DATA_SIZE};
use signature::KeyPair;
use signature::KeyPairUtil;
use std::collections::VecDeque;
use std::io;
use std::io::Write;
@ -381,17 +436,17 @@ mod test {
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use std::thread::sleep;
use std::time::Duration;
use streamer::{blob_receiver, receiver, responder, retransmitter, window, BlobReceiver,
PacketReceiver};
use subscribers::{Node, Subscribers};
use streamer::{blob_receiver, receiver, responder, retransmitter, window};
use streamer::{BlobReceiver, PacketReceiver};
fn get_msgs(r: PacketReceiver, num: &mut usize) {
for _t in 0..5 {
let timer = Duration::new(1, 0);
match r.recv_timeout(timer) {
Ok(m) => *num += m.read().unwrap().packets.len(),
e => println!("error {:?}", e),
e => info!("error {:?}", e),
}
if *num == 10 {
break;
@ -445,7 +500,7 @@ mod test {
}
*num += m.len();
}
e => println!("error {:?}", e),
e => info!("error {:?}", e),
}
if *num == 10 {
break;
@ -455,15 +510,23 @@ mod test {
#[test]
pub fn window_send_test() {
let pubkey_me = KeyPair::new().pubkey();
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
let addr = read.local_addr().unwrap();
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
let serve = UdpSocket::bind("127.0.0.1:0").expect("bind");
let exit = Arc::new(AtomicBool::new(false));
let subs = Arc::new(RwLock::new(Subscribers::new(
Node::default(),
Node::new([0; 8], 0, send.local_addr().unwrap()),
&[],
)));
let rep_data = ReplicatedData::new(
pubkey_me,
read.local_addr().unwrap(),
send.local_addr().unwrap(),
serve.local_addr().unwrap(),
);
let mut crdt_me = Crdt::new(rep_data);
let me_id = crdt_me.my_data().id;
crdt_me.set_leader(me_id);
let subs = Arc::new(RwLock::new(crdt_me));
let resp_recycler = BlobRecycler::default();
let (s_reader, r_reader) = channel();
let t_receiver =
@ -487,6 +550,7 @@ mod test {
let b_ = b.clone();
let mut w = b.write().unwrap();
w.set_index(i).unwrap();
w.set_id(me_id).unwrap();
assert_eq!(i, w.get_index().unwrap());
w.meta.size = PACKET_DATA_SIZE;
w.meta.set_addr(&addr);
@ -507,43 +571,102 @@ mod test {
t_window.join().expect("join");
}
fn test_node() -> (Arc<RwLock<Crdt>>, UdpSocket, UdpSocket, UdpSocket) {
let gossip = UdpSocket::bind("127.0.0.1:0").unwrap();
let replicate = UdpSocket::bind("127.0.0.1:0").unwrap();
let serve = UdpSocket::bind("127.0.0.1:0").unwrap();
let pubkey = KeyPair::new().pubkey();
let d = ReplicatedData::new(
pubkey,
gossip.local_addr().unwrap(),
replicate.local_addr().unwrap(),
serve.local_addr().unwrap(),
);
let crdt = Crdt::new(d);
trace!(
"id: {} gossip: {} replicate: {} serve: {}",
crdt.my_data().id[0],
gossip.local_addr().unwrap(),
replicate.local_addr().unwrap(),
serve.local_addr().unwrap(),
);
(Arc::new(RwLock::new(crdt)), gossip, replicate, serve)
}
#[test]
//retransmit from leader to replicate target
pub fn retransmit() {
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
logger::setup();
trace!("retransmit test start");
let exit = Arc::new(AtomicBool::new(false));
let subs = Arc::new(RwLock::new(Subscribers::new(
Node::default(),
Node::default(),
&[Node::new([0; 8], 1, read.local_addr().unwrap())],
)));
let (crdt_leader, sock_gossip_leader, _, sock_leader) = test_node();
let (crdt_target, sock_gossip_target, sock_replicate_target, _) = test_node();
let leader_data = crdt_leader.read().unwrap().my_data().clone();
crdt_leader.write().unwrap().insert(leader_data.clone());
crdt_leader.write().unwrap().set_leader(leader_data.id);
let t_crdt_leader_g = Crdt::gossip(crdt_leader.clone(), exit.clone());
let t_crdt_leader_l = Crdt::listen(crdt_leader.clone(), sock_gossip_leader, exit.clone());
crdt_target.write().unwrap().insert(leader_data.clone());
crdt_target.write().unwrap().set_leader(leader_data.id);
let t_crdt_target_g = Crdt::gossip(crdt_target.clone(), exit.clone());
let t_crdt_target_l = Crdt::listen(crdt_target.clone(), sock_gossip_target, exit.clone());
//leader retransmitter
let (s_retransmit, r_retransmit) = channel();
let blob_recycler = BlobRecycler::default();
let saddr = send.local_addr().unwrap();
let saddr = sock_leader.local_addr().unwrap();
let t_retransmit = retransmitter(
send,
sock_leader,
exit.clone(),
subs,
crdt_leader.clone(),
blob_recycler.clone(),
r_retransmit,
);
//target receiver
let (s_blob_receiver, r_blob_receiver) = channel();
let t_receiver = blob_receiver(
exit.clone(),
blob_recycler.clone(),
sock_replicate_target,
s_blob_receiver,
).unwrap();
for _ in 0..10 {
let done = crdt_target.read().unwrap().update_index == 2
&& crdt_leader.read().unwrap().update_index == 2;
if done {
break;
}
let timer = Duration::new(1, 0);
sleep(timer);
}
//send the data through
let mut bq = VecDeque::new();
let b = blob_recycler.allocate();
b.write().unwrap().meta.size = 10;
bq.push_back(b);
s_retransmit.send(bq).unwrap();
let (s_blob_receiver, r_blob_receiver) = channel();
let t_receiver =
blob_receiver(exit.clone(), blob_recycler.clone(), read, s_blob_receiver).unwrap();
let mut oq = r_blob_receiver.recv().unwrap();
let timer = Duration::new(5, 0);
trace!("Waiting for timeout");
let mut oq = r_blob_receiver.recv_timeout(timer).unwrap();
assert_eq!(oq.len(), 1);
let o = oq.pop_front().unwrap();
let ro = o.read().unwrap();
assert_eq!(ro.meta.size, 10);
assert_eq!(ro.meta.addr(), saddr);
exit.store(true, Ordering::Relaxed);
t_receiver.join().expect("join");
t_retransmit.join().expect("join");
let threads = vec![
t_receiver,
t_retransmit,
t_crdt_target_g,
t_crdt_target_l,
t_crdt_leader_g,
t_crdt_leader_l,
];
for t in threads {
t.join().unwrap();
}
}
}

View File

@ -1,149 +0,0 @@
//! The `subscribers` module defines data structures to keep track of nodes on the network.
//! The network is arranged in layers:
//!
//! * layer 0 - Leader.
//! * layer 1 - As many nodes as we can fit to quickly get reliable `2/3+1` finality
//! * layer 2 - Everyone else, if layer 1 is `2^10`, layer 2 should be able to fit `2^20` number of nodes.
//!
//! It's up to the external state machine to keep this updated.
use packet::Blob;
use rayon::prelude::*;
use result::{Error, Result};
use std::net::{SocketAddr, UdpSocket};
use std::fmt;
#[derive(Clone, PartialEq)]
pub struct Node {
pub id: [u64; 8],
pub weight: u64,
pub addr: SocketAddr,
}
//sockaddr doesn't implement default
impl Default for Node {
fn default() -> Node {
Node {
id: [0; 8],
weight: 0,
addr: "0.0.0.0:0".parse().unwrap(),
}
}
}
impl Node {
pub fn new(id: [u64; 8], weight: u64, addr: SocketAddr) -> Node {
Node { id, weight, addr }
}
fn key(&self) -> i64 {
(self.weight as i64).checked_neg().unwrap()
}
}
impl fmt::Debug for Node {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Node {{ weight: {} addr: {} }}", self.weight, self.addr)
}
}
pub struct Subscribers {
data: Vec<Node>,
pub me: Node,
pub leader: Node,
}
impl Subscribers {
pub fn new(me: Node, leader: Node, network: &[Node]) -> Subscribers {
let mut h = Subscribers {
data: vec![],
me: me.clone(),
leader: leader.clone(),
};
h.insert(&[me, leader]);
h.insert(network);
h
}
/// retransmit messages from the leader to layer 1 nodes
pub fn retransmit(&self, blob: &mut Blob, s: &UdpSocket) -> Result<()> {
let errs: Vec<_> = self.data
.par_iter()
.map(|i| {
if self.me == *i {
return Ok(0);
}
if self.leader == *i {
return Ok(0);
}
trace!("retransmit blob to {}", i.addr);
s.send_to(&blob.data[..blob.meta.size], &i.addr)
})
.collect();
for e in errs {
trace!("retransmit result {:?}", e);
match e {
Err(e) => return Err(Error::IO(e)),
_ => (),
}
}
Ok(())
}
pub fn insert(&mut self, ns: &[Node]) {
self.data.extend_from_slice(ns);
self.data.sort_by_key(Node::key);
}
}
#[cfg(test)]
mod test {
use packet::Blob;
use rayon::prelude::*;
use std::net::UdpSocket;
use std::time::Duration;
use subscribers::{Node, Subscribers};
#[test]
pub fn subscriber() {
let mut me = Node::default();
me.weight = 10;
let mut leader = Node::default();
leader.weight = 11;
let mut s = Subscribers::new(me, leader, &[]);
assert_eq!(s.data.len(), 2);
assert_eq!(s.data[0].weight, 11);
assert_eq!(s.data[1].weight, 10);
let mut n = Node::default();
n.weight = 12;
s.insert(&[n]);
assert_eq!(s.data.len(), 3);
assert_eq!(s.data[0].weight, 12);
}
#[test]
pub fn retransmit() {
let s1 = UdpSocket::bind("127.0.0.1:0").expect("bind");
let s2 = UdpSocket::bind("127.0.0.1:0").expect("bind");
let s3 = UdpSocket::bind("127.0.0.1:0").expect("bind");
let n1 = Node::new([0; 8], 0, s1.local_addr().unwrap());
let n2 = Node::new([0; 8], 0, s2.local_addr().unwrap());
let mut s = Subscribers::new(n1.clone(), n2.clone(), &[]);
let n3 = Node::new([0; 8], 0, s3.local_addr().unwrap());
s.insert(&[n3]);
let mut b = Blob::default();
b.meta.size = 10;
let s4 = UdpSocket::bind("127.0.0.1:0").expect("bind");
s.retransmit(&mut b, &s4).unwrap();
let res: Vec<_> = [s1, s2, s3]
.into_par_iter()
.map(|s| {
let mut b = Blob::default();
s.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
s.recv_from(&mut b.data).is_err()
})
.collect();
assert_eq!(res, [true, true, false]);
let mut n4 = Node::default();
n4.addr = "255.255.255.255:1".parse().unwrap();
s.insert(&[n4]);
assert!(s.retransmit(&mut b, &s4).is_err());
}
}

356
src/thin_client.rs Normal file
View File

@ -0,0 +1,356 @@
//! The `thin_client` module is a client-side object that interfaces with
//! a server-side TPU. Client code should use this object instead of writing
//! messages to the network directly. The binary encoding of its messages are
//! unstable and may change in future releases.
use bincode::{deserialize, serialize};
use futures::future::{ok, FutureResult};
use hash::Hash;
use signature::{KeyPair, PublicKey, Signature};
use std::collections::HashMap;
use std::io;
use std::net::{SocketAddr, UdpSocket};
use tpu::{Request, Response, Subscription};
use transaction::Transaction;
pub struct ThinClient {
pub addr: SocketAddr,
pub socket: UdpSocket,
last_id: Option<Hash>,
num_events: u64,
balances: HashMap<PublicKey, Option<i64>>,
}
impl ThinClient {
/// Create a new ThinClient that will interface with Tpu
/// over `socket`. To receive responses, the caller must bind `socket`
/// to a public address before invoking ThinClient methods.
pub fn new(addr: SocketAddr, socket: UdpSocket) -> Self {
let client = ThinClient {
addr: addr,
socket,
last_id: None,
num_events: 0,
balances: HashMap::new(),
};
client.init();
client
}
pub fn init(&self) {
let subscriptions = vec![Subscription::EntryInfo];
let req = Request::Subscribe { subscriptions };
let data = serialize(&req).expect("serialize Subscribe");
trace!("subscribing to {}", self.addr);
let _res = self.socket.send_to(&data, &self.addr);
}
pub fn recv_response(&self) -> io::Result<Response> {
let mut buf = vec![0u8; 1024];
info!("start recv_from");
self.socket.recv_from(&mut buf)?;
info!("end recv_from");
let resp = deserialize(&buf).expect("deserialize balance");
Ok(resp)
}
pub fn process_response(&mut self, resp: Response) {
match resp {
Response::Balance { key, val } => {
info!("Response balance {:?} {:?}", key, val);
self.balances.insert(key, val);
}
Response::EntryInfo(entry_info) => {
trace!("Response entry_info {:?}", entry_info.id);
self.last_id = Some(entry_info.id);
self.num_events += entry_info.num_events;
}
}
}
/// Send a signed Transaction to the server for processing. This method
/// does not wait for a response.
pub fn transfer_signed(&self, tr: Transaction) -> io::Result<usize> {
let req = Request::Transaction(tr);
let data = serialize(&req).unwrap();
self.socket.send_to(&data, &self.addr)
}
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
pub fn transfer(
&self,
n: i64,
keypair: &KeyPair,
to: PublicKey,
last_id: &Hash,
) -> io::Result<Signature> {
let tr = Transaction::new(keypair, to, n, *last_id);
let sig = tr.sig;
self.transfer_signed(tr).map(|_| sig)
}
/// Request the balance of the user holding `pubkey`. This method blocks
/// until the server sends a response. If the response packet is dropped
/// by the network, this method will hang indefinitely.
pub fn get_balance(&mut self, pubkey: &PublicKey) -> io::Result<i64> {
info!("get_balance");
let req = Request::GetBalance { key: *pubkey };
let data = serialize(&req).expect("serialize GetBalance");
self.socket
.send_to(&data, &self.addr)
.expect("buffer error");
let mut done = false;
while !done {
let resp = self.recv_response()?;
info!("recv_response {:?}", resp);
if let &Response::Balance { ref key, .. } = &resp {
done = key == pubkey;
}
self.process_response(resp);
}
self.balances[pubkey].ok_or(io::Error::new(io::ErrorKind::Other, "nokey"))
}
/// Request the last Entry ID from the server. This method blocks
/// until the server sends a response.
pub fn get_last_id(&mut self) -> FutureResult<Hash, ()> {
self.transaction_count();
ok(self.last_id.unwrap_or(Hash::default()))
}
/// Return the number of transactions the server processed since creating
/// this client instance.
pub fn transaction_count(&mut self) -> u64 {
// Wait for at least one EntryInfo.
let mut done = false;
while !done {
let resp = self.recv_response().expect("recv response");
if let &Response::EntryInfo(_) = &resp {
done = true;
}
self.process_response(resp);
}
// Then take the rest.
self.socket.set_nonblocking(true).expect("set nonblocking");
loop {
match self.recv_response() {
Err(_) => break,
Ok(resp) => self.process_response(resp),
}
}
self.socket.set_nonblocking(false).expect("set blocking");
self.num_events
}
}
#[cfg(test)]
mod tests {
use super::*;
use accountant::Accountant;
use crdt::{Crdt, ReplicatedData};
use futures::Future;
use historian::Historian;
use logger;
use mint::Mint;
use signature::{KeyPair, KeyPairUtil};
use std::io::sink;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::sync_channel;
use std::sync::{Arc, RwLock};
use std::thread::sleep;
use std::time::Duration;
use std::time::Instant;
use tpu::Tpu;
// TODO: Figure out why this test sometimes hangs on TravisCI.
#[test]
fn test_thin_client() {
logger::setup();
let gossip = UdpSocket::bind("0.0.0.0:0").unwrap();
let serve = UdpSocket::bind("0.0.0.0:0").unwrap();
let skinny = UdpSocket::bind("0.0.0.0:0").unwrap();
let addr = serve.local_addr().unwrap();
let pubkey = KeyPair::new().pubkey();
let d = ReplicatedData::new(
pubkey,
gossip.local_addr().unwrap(),
"0.0.0.0:0".parse().unwrap(),
serve.local_addr().unwrap(),
);
let alice = Mint::new(10_000);
let acc = Accountant::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let (input, event_receiver) = sync_channel(10);
let historian = Historian::new(event_receiver, &alice.last_id(), Some(30));
let acc = Arc::new(Tpu::new(acc, input, historian));
let threads = Tpu::serve(&acc, d, serve, skinny, gossip, exit.clone(), sink()).unwrap();
sleep(Duration::from_millis(300));
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut acc = ThinClient::new(addr, socket);
let last_id = acc.get_last_id().wait().unwrap();
let _sig = acc.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
.unwrap();
let mut balance;
let now = Instant::now();
loop {
balance = acc.get_balance(&bob_pubkey);
if balance.is_ok() {
break;
}
if now.elapsed().as_secs() > 0 {
break;
}
}
assert_eq!(balance.unwrap(), 500);
exit.store(true, Ordering::Relaxed);
for t in threads {
t.join().unwrap();
}
}
fn test_node() -> (ReplicatedData, UdpSocket, UdpSocket, UdpSocket, UdpSocket) {
let gossip = UdpSocket::bind("0.0.0.0:0").unwrap();
let serve = UdpSocket::bind("0.0.0.0:0").unwrap();
let skinny = UdpSocket::bind("0.0.0.0:0").unwrap();
let replicate = UdpSocket::bind("0.0.0.0:0").unwrap();
let pubkey = KeyPair::new().pubkey();
let leader = ReplicatedData::new(
pubkey,
gossip.local_addr().unwrap(),
replicate.local_addr().unwrap(),
serve.local_addr().unwrap(),
);
(leader, gossip, serve, replicate, skinny)
}
#[test]
fn test_multi_node() {
logger::setup();
info!("test_multi_node");
let leader = test_node();
let replicant = test_node();
let alice = Mint::new(10_000);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let leader_acc = {
let (input, event_receiver) = sync_channel(10);
let historian = Historian::new(event_receiver, &alice.last_id(), Some(30));
let acc = Accountant::new(&alice);
Arc::new(Tpu::new(acc, input, historian))
};
let replicant_acc = {
let (input, event_receiver) = sync_channel(10);
let historian = Historian::new(event_receiver, &alice.last_id(), Some(30));
let acc = Accountant::new(&alice);
Arc::new(Tpu::new(acc, input, historian))
};
let leader_threads = Tpu::serve(
&leader_acc,
leader.0.clone(),
leader.2,
leader.4,
leader.1,
exit.clone(),
sink(),
).unwrap();
let replicant_threads = Tpu::replicate(
&replicant_acc,
replicant.0.clone(),
replicant.1,
replicant.2,
replicant.3,
leader.0.clone(),
exit.clone(),
).unwrap();
//lets spy on the network
let (mut spy, spy_gossip, _, _, _) = test_node();
let daddr = "0.0.0.0:0".parse().unwrap();
spy.replicate_addr = daddr;
spy.serve_addr = daddr;
let mut spy_crdt = Crdt::new(spy);
spy_crdt.insert(leader.0.clone());
spy_crdt.set_leader(leader.0.id);
let spy_ref = Arc::new(RwLock::new(spy_crdt));
let t_spy_listen = Crdt::listen(spy_ref.clone(), spy_gossip, exit.clone());
let t_spy_gossip = Crdt::gossip(spy_ref.clone(), exit.clone());
//wait for the network to converge
for _ in 0..20 {
let ix = spy_ref.read().unwrap().update_index;
info!("my update index is {}", ix);
let len = spy_ref.read().unwrap().remote.values().len();
let mut done = false;
info!("remote len {}", len);
if len > 1 && ix > 2 {
done = true;
//check if everyones remote index is greater or equal to ours
let vs: Vec<u64> = spy_ref.read().unwrap().remote.values().cloned().collect();
for t in vs.into_iter() {
info!("remote update index is {} vs {}", t, ix);
if t < 3 {
done = false;
}
}
}
if done == true {
info!("converged!");
break;
}
sleep(Duration::new(1, 0));
}
//verify leader can do transfer
let leader_balance = {
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
socket.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
let mut acc = ThinClient::new(leader.0.serve_addr, socket);
info!("getting leader last_id");
let last_id = acc.get_last_id().wait().unwrap();
info!("executing leader transer");
let _sig = acc.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
.unwrap();
info!("getting leader balance");
acc.get_balance(&bob_pubkey).unwrap()
};
assert_eq!(leader_balance, 500);
//verify replicant has the same balance
let mut replicant_balance = 0;
for _ in 0..10 {
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
socket.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
let mut acc = ThinClient::new(replicant.0.serve_addr, socket);
info!("getting replicant balance");
if let Ok(bal) = acc.get_balance(&bob_pubkey) {
replicant_balance = bal;
}
info!("replicant balance {}", replicant_balance);
if replicant_balance == leader_balance {
break;
}
sleep(Duration::new(1, 0));
}
assert_eq!(replicant_balance, leader_balance);
exit.store(true, Ordering::Relaxed);
for t in leader_threads {
t.join().unwrap();
}
for t in replicant_threads {
t.join().unwrap();
}
for t in vec![t_spy_listen, t_spy_gossip] {
t.join().unwrap();
}
}
}

15
src/timing.rs Normal file
View File

@ -0,0 +1,15 @@
use std::time::Duration;
use std::time::{SystemTime, UNIX_EPOCH};
pub fn duration_as_ms(d: &Duration) -> u64 {
return (d.as_secs() * 1000) + (d.subsec_nanos() as u64 / 1_000_000);
}
pub fn duration_as_s(d: &Duration) -> f32 {
return d.as_secs() as f32 + (d.subsec_nanos() as f32 / 1_000_000_000.0);
}
pub fn timestamp() -> u64 {
let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
return duration_as_ms(&now);
}

1198
src/tpu.rs Normal file

File diff suppressed because it is too large Load Diff