2018-03-30 13:10:27 -07:00
|
|
|
//! The `accountant_skel` module is a microservice that exposes the high-level
|
2018-03-29 11:20:54 -07:00
|
|
|
//! Accountant API to the network. Its message encoding is currently
|
2018-03-30 10:43:38 -07:00
|
|
|
//! in flux. Clients should use AccountantStub to interact with it.
|
2018-03-29 11:20:54 -07:00
|
|
|
|
2018-02-28 09:07:54 -08:00
|
|
|
use accountant::Accountant;
|
2018-03-28 13:40:58 -07:00
|
|
|
use bincode::{deserialize, serialize};
|
2018-04-05 21:39:07 -07:00
|
|
|
use ecdsa;
|
2018-03-26 21:03:26 -07:00
|
|
|
use entry::Entry;
|
2018-04-02 13:41:07 -07:00
|
|
|
use event::Event;
|
2018-03-26 21:03:26 -07:00
|
|
|
use hash::Hash;
|
2018-04-02 20:15:21 -07:00
|
|
|
use historian::Historian;
|
2018-03-26 21:07:11 -07:00
|
|
|
use packet;
|
|
|
|
use packet::SharedPackets;
|
2018-04-02 20:15:21 -07:00
|
|
|
use rayon::prelude::*;
|
|
|
|
use recorder::Signal;
|
2018-03-10 20:09:17 -08:00
|
|
|
use result::Result;
|
2018-03-26 21:03:26 -07:00
|
|
|
use serde_json;
|
|
|
|
use signature::PublicKey;
|
2018-03-26 21:07:11 -07:00
|
|
|
use std::cmp::max;
|
|
|
|
use std::collections::VecDeque;
|
2018-03-29 11:54:10 -07:00
|
|
|
use std::io::Write;
|
2018-03-29 12:09:21 -07:00
|
|
|
use std::net::{SocketAddr, UdpSocket};
|
2018-03-22 13:05:23 -07:00
|
|
|
use std::sync::atomic::{AtomicBool, Ordering};
|
2018-03-26 21:07:11 -07:00
|
|
|
use std::sync::mpsc::{channel, Receiver, SendError, Sender};
|
|
|
|
use std::sync::{Arc, Mutex};
|
2018-03-10 20:09:17 -08:00
|
|
|
use std::thread::{spawn, JoinHandle};
|
2018-03-26 21:03:26 -07:00
|
|
|
use std::time::Duration;
|
|
|
|
use streamer;
|
|
|
|
use transaction::Transaction;
|
2018-02-28 09:07:54 -08:00
|
|
|
|
2018-03-26 11:17:19 -07:00
|
|
|
pub struct AccountantSkel<W: Write + Send + 'static> {
|
2018-04-02 08:30:10 -07:00
|
|
|
acc: Accountant,
|
|
|
|
last_id: Hash,
|
2018-03-26 11:17:19 -07:00
|
|
|
writer: W,
|
2018-04-02 13:41:07 -07:00
|
|
|
historian: Historian,
|
2018-02-28 09:07:54 -08:00
|
|
|
}
|
|
|
|
|
2018-03-22 13:59:25 -07:00
|
|
|
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
|
2018-02-28 13:16:50 -08:00
|
|
|
#[derive(Serialize, Deserialize, Debug)]
|
2018-02-28 09:07:54 -08:00
|
|
|
pub enum Request {
|
2018-03-17 13:42:50 -07:00
|
|
|
Transaction(Transaction),
|
2018-03-06 10:03:41 -08:00
|
|
|
GetBalance { key: PublicKey },
|
2018-04-02 08:30:10 -07:00
|
|
|
GetLastId,
|
2018-02-28 09:07:54 -08:00
|
|
|
}
|
|
|
|
|
2018-03-29 12:18:08 -07:00
|
|
|
impl Request {
|
|
|
|
/// Verify the request is valid.
|
|
|
|
pub fn verify(&self) -> bool {
|
|
|
|
match *self {
|
2018-03-26 21:07:11 -07:00
|
|
|
Request::Transaction(ref tr) => tr.verify_plan(),
|
2018-03-29 12:18:08 -07:00
|
|
|
_ => true,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Parallel verfication of a batch of requests.
|
2018-03-26 21:07:11 -07:00
|
|
|
pub fn filter_valid_requests(reqs: Vec<(Request, SocketAddr)>) -> Vec<(Request, SocketAddr)> {
|
2018-03-29 12:18:08 -07:00
|
|
|
reqs.into_par_iter().filter({ |x| x.0.verify() }).collect()
|
2018-03-29 12:09:21 -07:00
|
|
|
}
|
|
|
|
|
2018-02-28 13:16:50 -08:00
|
|
|
#[derive(Serialize, Deserialize, Debug)]
|
2018-02-28 09:07:54 -08:00
|
|
|
pub enum Response {
|
2018-03-05 16:29:32 -08:00
|
|
|
Balance { key: PublicKey, val: Option<i64> },
|
2018-03-06 19:22:30 -08:00
|
|
|
Entries { entries: Vec<Entry> },
|
2018-04-02 08:30:10 -07:00
|
|
|
LastId { id: Hash },
|
2018-02-28 09:07:54 -08:00
|
|
|
}
|
|
|
|
|
2018-03-26 11:17:19 -07:00
|
|
|
impl<W: Write + Send + 'static> AccountantSkel<W> {
|
2018-03-29 11:20:54 -07:00
|
|
|
/// Create a new AccountantSkel that wraps the given Accountant.
|
2018-04-02 13:41:07 -07:00
|
|
|
pub fn new(acc: Accountant, last_id: Hash, writer: W, historian: Historian) -> Self {
|
2018-03-21 14:43:39 -07:00
|
|
|
AccountantSkel {
|
|
|
|
acc,
|
|
|
|
last_id,
|
2018-04-02 08:30:10 -07:00
|
|
|
writer,
|
2018-04-02 13:41:07 -07:00
|
|
|
historian,
|
2018-03-21 14:43:39 -07:00
|
|
|
}
|
2018-03-20 22:15:44 -07:00
|
|
|
}
|
|
|
|
|
2018-03-29 11:20:54 -07:00
|
|
|
/// Process any Entry items that have been published by the Historian.
|
2018-03-26 11:17:19 -07:00
|
|
|
pub fn sync(&mut self) -> Hash {
|
2018-04-02 13:41:07 -07:00
|
|
|
while let Ok(entry) = self.historian.receiver.try_recv() {
|
2018-03-20 22:15:44 -07:00
|
|
|
self.last_id = entry.id;
|
2018-04-05 08:53:58 -07:00
|
|
|
self.acc.register_entry_id(&self.last_id);
|
2018-03-28 13:40:58 -07:00
|
|
|
writeln!(self.writer, "{}", serde_json::to_string(&entry).unwrap()).unwrap();
|
2018-03-20 22:15:44 -07:00
|
|
|
}
|
|
|
|
self.last_id
|
2018-02-28 13:16:50 -08:00
|
|
|
}
|
|
|
|
|
2018-03-29 11:20:54 -07:00
|
|
|
/// Process Request items sent by clients.
|
2018-03-26 21:07:11 -07:00
|
|
|
pub fn log_verified_request(&mut self, msg: Request, verify: u8) -> Option<Response> {
|
2018-02-28 09:07:54 -08:00
|
|
|
match msg {
|
2018-03-26 21:07:11 -07:00
|
|
|
Request::Transaction(_) if verify == 0 => {
|
|
|
|
trace!("Transaction failed sigverify");
|
|
|
|
None
|
|
|
|
}
|
2018-03-06 10:43:53 -08:00
|
|
|
Request::Transaction(tr) => {
|
2018-04-02 13:41:07 -07:00
|
|
|
if let Err(err) = self.acc.process_verified_transaction(&tr) {
|
2018-03-26 21:07:11 -07:00
|
|
|
trace!("Transaction error: {:?}", err);
|
2018-04-02 13:41:07 -07:00
|
|
|
} else if let Err(SendError(_)) = self.historian
|
|
|
|
.sender
|
2018-03-26 21:07:11 -07:00
|
|
|
.send(Signal::Event(Event::Transaction(tr.clone())))
|
2018-04-02 13:41:07 -07:00
|
|
|
{
|
2018-03-26 21:07:11 -07:00
|
|
|
error!("Channel send error");
|
2018-03-03 09:23:31 -08:00
|
|
|
}
|
2018-02-28 09:07:54 -08:00
|
|
|
None
|
|
|
|
}
|
|
|
|
Request::GetBalance { key } => {
|
2018-03-05 14:34:15 -08:00
|
|
|
let val = self.acc.get_balance(&key);
|
2018-02-28 09:07:54 -08:00
|
|
|
Some(Response::Balance { key, val })
|
|
|
|
}
|
2018-04-02 08:30:10 -07:00
|
|
|
Request::GetLastId => Some(Response::LastId { id: self.sync() }),
|
2018-02-28 09:07:54 -08:00
|
|
|
}
|
|
|
|
}
|
2018-03-27 13:45:04 -07:00
|
|
|
|
2018-03-26 21:07:11 -07:00
|
|
|
fn verifier(
|
|
|
|
recvr: &streamer::PacketReceiver,
|
|
|
|
sendr: &Sender<(Vec<SharedPackets>, Vec<Vec<u8>>)>,
|
|
|
|
) -> Result<()> {
|
|
|
|
let timer = Duration::new(1, 0);
|
|
|
|
let msgs = recvr.recv_timeout(timer)?;
|
|
|
|
trace!("got msgs");
|
|
|
|
let mut v = Vec::new();
|
|
|
|
v.push(msgs);
|
|
|
|
while let Ok(more) = recvr.try_recv() {
|
|
|
|
trace!("got more msgs");
|
|
|
|
v.push(more);
|
|
|
|
}
|
|
|
|
info!("batch {}", v.len());
|
|
|
|
let chunk = max(1, (v.len() + 3) / 4);
|
|
|
|
let chunks: Vec<_> = v.chunks(chunk).collect();
|
|
|
|
let rvs: Vec<_> = chunks
|
|
|
|
.into_par_iter()
|
|
|
|
.map(|x| ecdsa::ed25519_verify(&x.to_vec()))
|
|
|
|
.collect();
|
|
|
|
for (v, r) in v.chunks(chunk).zip(rvs) {
|
|
|
|
sendr.send((v.to_vec(), r))?;
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn deserialize_packets(p: &packet::Packets) -> Vec<Option<(Request, SocketAddr)>> {
|
2018-04-06 14:52:58 -07:00
|
|
|
p.packets
|
|
|
|
.par_iter()
|
|
|
|
.map(|x| {
|
|
|
|
deserialize(&x.data[0..x.meta.size])
|
|
|
|
.map(|req| (req, x.meta.addr()))
|
|
|
|
.ok()
|
|
|
|
})
|
|
|
|
.collect()
|
2018-03-26 21:07:11 -07:00
|
|
|
}
|
|
|
|
|
2018-04-06 15:12:13 -07:00
|
|
|
fn process_packets(
|
|
|
|
obj: &Arc<Mutex<AccountantSkel<W>>>,
|
|
|
|
reqs: Vec<Option<(Request, SocketAddr)>>,
|
|
|
|
vers: Vec<u8>,
|
|
|
|
blob_recycler: &packet::BlobRecycler,
|
|
|
|
) -> Result<VecDeque<packet::SharedBlob>> {
|
|
|
|
let mut rsps = VecDeque::new();
|
|
|
|
for (data, v) in reqs.into_iter().zip(vers.into_iter()) {
|
|
|
|
if let Some((req, rsp_addr)) = data {
|
|
|
|
if !req.verify() {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if let Some(resp) = obj.lock().unwrap().log_verified_request(req, v) {
|
|
|
|
let blob = blob_recycler.allocate();
|
|
|
|
{
|
|
|
|
let mut b = blob.write().unwrap();
|
|
|
|
let v = serialize(&resp)?;
|
|
|
|
let len = v.len();
|
|
|
|
b.data[..len].copy_from_slice(&v);
|
|
|
|
b.meta.size = len;
|
|
|
|
b.meta.set_addr(&rsp_addr);
|
|
|
|
}
|
|
|
|
rsps.push_back(blob);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(rsps)
|
|
|
|
}
|
|
|
|
|
2018-03-10 20:09:17 -08:00
|
|
|
fn process(
|
2018-03-27 13:45:04 -07:00
|
|
|
obj: &Arc<Mutex<AccountantSkel<W>>>,
|
2018-03-26 21:07:11 -07:00
|
|
|
verified_receiver: &Receiver<(Vec<SharedPackets>, Vec<Vec<u8>>)>,
|
2018-04-02 19:32:58 -07:00
|
|
|
blob_sender: &streamer::BlobSender,
|
|
|
|
packet_recycler: &packet::PacketRecycler,
|
|
|
|
blob_recycler: &packet::BlobRecycler,
|
2018-03-10 20:09:17 -08:00
|
|
|
) -> Result<()> {
|
|
|
|
let timer = Duration::new(1, 0);
|
2018-03-26 21:07:11 -07:00
|
|
|
let (mms, vvs) = verified_receiver.recv_timeout(timer)?;
|
|
|
|
for (msgs, vers) in mms.into_iter().zip(vvs.into_iter()) {
|
2018-04-06 15:12:13 -07:00
|
|
|
let reqs = Self::deserialize_packets(&msgs.read().unwrap());
|
|
|
|
let rsps = Self::process_packets(obj, reqs, vers, blob_recycler)?;
|
2018-03-26 21:07:11 -07:00
|
|
|
if !rsps.is_empty() {
|
|
|
|
//don't wake up the other side if there is nothing
|
|
|
|
blob_sender.send(rsps)?;
|
|
|
|
}
|
2018-04-06 14:58:11 -07:00
|
|
|
packet_recycler.recycle(msgs);
|
2018-03-10 20:09:17 -08:00
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
2018-02-28 09:07:54 -08:00
|
|
|
|
2018-03-29 11:20:54 -07:00
|
|
|
/// Create a UDP microservice that forwards messages the given AccountantSkel.
|
|
|
|
/// Set `exit` to shutdown its threads.
|
2018-03-10 20:09:17 -08:00
|
|
|
pub fn serve(
|
2018-04-02 19:32:58 -07:00
|
|
|
obj: &Arc<Mutex<AccountantSkel<W>>>,
|
2018-03-10 20:09:17 -08:00
|
|
|
addr: &str,
|
2018-03-22 13:05:23 -07:00
|
|
|
exit: Arc<AtomicBool>,
|
2018-03-23 20:49:28 -07:00
|
|
|
) -> Result<Vec<JoinHandle<()>>> {
|
2018-03-10 20:09:17 -08:00
|
|
|
let read = UdpSocket::bind(addr)?;
|
|
|
|
// make sure we are on the same interface
|
|
|
|
let mut local = read.local_addr()?;
|
|
|
|
local.set_port(0);
|
|
|
|
let write = UdpSocket::bind(local)?;
|
2018-02-28 09:07:54 -08:00
|
|
|
|
2018-04-02 19:32:58 -07:00
|
|
|
let packet_recycler = packet::PacketRecycler::default();
|
|
|
|
let blob_recycler = packet::BlobRecycler::default();
|
|
|
|
let (packet_sender, packet_receiver) = channel();
|
|
|
|
let t_receiver =
|
|
|
|
streamer::receiver(read, exit.clone(), packet_recycler.clone(), packet_sender)?;
|
|
|
|
let (blob_sender, blob_receiver) = channel();
|
2018-03-25 00:06:48 -07:00
|
|
|
let t_responder =
|
2018-04-02 19:32:58 -07:00
|
|
|
streamer::responder(write, exit.clone(), blob_recycler.clone(), blob_receiver);
|
2018-03-26 21:07:11 -07:00
|
|
|
let (verified_sender, verified_receiver) = channel();
|
|
|
|
|
|
|
|
let exit_ = exit.clone();
|
|
|
|
let t_verifier = spawn(move || loop {
|
|
|
|
let e = Self::verifier(&packet_receiver, &verified_sender);
|
|
|
|
if e.is_err() && exit_.load(Ordering::Relaxed) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
2018-04-02 19:32:58 -07:00
|
|
|
let skel = obj.clone();
|
2018-03-27 13:45:04 -07:00
|
|
|
let t_server = spawn(move || loop {
|
|
|
|
let e = AccountantSkel::process(
|
2018-04-02 19:32:58 -07:00
|
|
|
&skel,
|
2018-03-26 21:07:11 -07:00
|
|
|
&verified_receiver,
|
2018-04-02 19:32:58 -07:00
|
|
|
&blob_sender,
|
2018-03-27 13:45:04 -07:00
|
|
|
&packet_recycler,
|
2018-04-02 19:32:58 -07:00
|
|
|
&blob_recycler,
|
2018-03-27 13:45:04 -07:00
|
|
|
);
|
|
|
|
if e.is_err() && exit.load(Ordering::Relaxed) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
});
|
2018-03-26 21:07:11 -07:00
|
|
|
Ok(vec![t_receiver, t_responder, t_server, t_verifier])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use accountant_skel::Request;
|
|
|
|
use bincode::serialize;
|
|
|
|
use ecdsa;
|
|
|
|
use transaction::{memfind, test_tx};
|
|
|
|
#[test]
|
|
|
|
fn test_layout() {
|
|
|
|
let tr = test_tx();
|
|
|
|
let tx = serialize(&tr).unwrap();
|
|
|
|
let packet = serialize(&Request::Transaction(tr)).unwrap();
|
|
|
|
assert_matches!(memfind(&packet, &tx), Some(ecdsa::TX_OFFSET));
|
2018-02-28 09:07:54 -08:00
|
|
|
}
|
|
|
|
}
|