From 55179101cd28ac289551fca05ff73cba14af2e28 Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Thu, 29 Mar 2018 12:20:54 -0600 Subject: [PATCH] Add more documentation --- src/accountant.rs | 19 ++++++++++++++++--- src/accountant_skel.rs | 21 ++++++++++----------- src/accountant_stub.rs | 21 ++++++++++++++++++--- src/entry.rs | 15 +++++++++++++++ src/event.rs | 7 ++++++- src/hash.rs | 2 ++ src/historian.rs | 4 ++-- src/ledger.rs | 15 ++------------- src/plan.rs | 8 ++++++++ src/recorder.rs | 2 +- src/result.rs | 2 ++ src/signature.rs | 2 +- src/streamer.rs | 8 +++++--- src/transaction.rs | 6 +++++- 14 files changed, 93 insertions(+), 39 deletions(-) diff --git a/src/accountant.rs b/src/accountant.rs index 9d6602e57d..622b96a1c1 100644 --- a/src/accountant.rs +++ b/src/accountant.rs @@ -1,6 +1,7 @@ -//! The `accountant` is a client of the `historian`. It uses the historian's -//! event log to record transactions. Its users can deposit funds and -//! transfer funds to other users. +//! The Accountant tracks client balances and the progress of pending +//! transactions. It offers a high-level public API that signs transactions +//! on behalf of the caller and a private low-level API for when they have +//! already been signed and verified. use chrono::prelude::*; use entry::Entry; @@ -44,6 +45,7 @@ pub struct Accountant { } impl Accountant { + /// Create an Accountant using an existing ledger. pub fn new_from_entries(entries: I, ms_per_tick: Option) -> Self where I: IntoIterator, @@ -79,6 +81,7 @@ impl Accountant { acc } + /// Create an Accountant with only a Mint. Typically used by unit tests. pub fn new(mint: &Mint, ms_per_tick: Option) -> Self { Self::new_from_entries(mint.create_entries(), ms_per_tick) } @@ -91,6 +94,7 @@ impl Accountant { } } + /// Verify and process the given Transaction. pub fn process_transaction(self: &mut Self, tr: Transaction) -> Result<()> { if !tr.verify() { return Err(AccountingError::InvalidTransfer); @@ -111,6 +115,7 @@ impl Accountant { Ok(()) } + /// Process a Transaction that has already been verified. fn process_verified_transaction( self: &mut Self, tr: &Transaction, @@ -138,6 +143,7 @@ impl Accountant { Ok(()) } + /// Process a Witness Signature that has already been verified. fn process_verified_sig(&mut self, from: PublicKey, tx_sig: Signature) -> Result<()> { if let Occupied(mut e) = self.pending.entry(tx_sig) { e.get_mut().apply_witness(&Witness::Signature(from)); @@ -150,6 +156,7 @@ impl Accountant { Ok(()) } + /// Process a Witness Timestamp that has already been verified. fn process_verified_timestamp(&mut self, from: PublicKey, dt: DateTime) -> Result<()> { // If this is the first timestamp we've seen, it probably came from the genesis block, // so we'll trust it. @@ -182,6 +189,7 @@ impl Accountant { Ok(()) } + /// Process an Transaction or Witness that has already been verified. fn process_verified_event(self: &mut Self, event: &Event, allow_deposits: bool) -> Result<()> { match *event { Event::Transaction(ref tr) => self.process_verified_transaction(tr, allow_deposits), @@ -190,6 +198,8 @@ impl Accountant { } } + /// Create, sign, and process a Transaction from `keypair` to `to` of + /// `n` tokens where `last_id` is the last Entry ID observed by the client. pub fn transfer( self: &mut Self, n: i64, @@ -202,6 +212,9 @@ impl Accountant { self.process_transaction(tr).map(|_| sig) } + /// Create, sign, and process a postdated Transaction from `keypair` + /// to `to` of `n` tokens on `dt` where `last_id` is the last Entry ID + /// observed by the client. pub fn transfer_on_date( self: &mut Self, n: i64, diff --git a/src/accountant_skel.rs b/src/accountant_skel.rs index 2b82167afe..6502d6172c 100644 --- a/src/accountant_skel.rs +++ b/src/accountant_skel.rs @@ -1,3 +1,7 @@ +//! The AccountantSkel is a microservice that exposes the high-level +//! Accountant API to the network. Its message encoding is currently +//! in flux. Clients should AccountantStub to interact with it. + use accountant::Accountant; use bincode::{deserialize, serialize}; use entry::Entry; @@ -7,7 +11,7 @@ use serde_json; use signature::PublicKey; use std::default::Default; use std::io::{ErrorKind, Write}; -use std::net::{TcpStream, UdpSocket}; +use std::net::UdpSocket; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::channel; use std::sync::{Arc, Mutex}; @@ -20,7 +24,6 @@ pub struct AccountantSkel { pub acc: Accountant, pub last_id: Hash, writer: W, - subscribers: Vec, } #[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))] @@ -39,31 +42,26 @@ pub enum Response { } impl AccountantSkel { + /// Create a new AccountantSkel that wraps the given Accountant. pub fn new(acc: Accountant, w: W) -> Self { let last_id = acc.first_id; AccountantSkel { acc, last_id, writer: w, - subscribers: vec![], } } + /// Process any Entry items that have been published by the Historian. pub fn sync(&mut self) -> Hash { while let Ok(entry) = self.acc.historian.receiver.try_recv() { self.last_id = entry.id; writeln!(self.writer, "{}", serde_json::to_string(&entry).unwrap()).unwrap(); - - let buf = serialize(&entry).expect("serialize"); - self.subscribers - .retain(|ref mut subscriber| match subscriber.write(&buf) { - Err(err) => err.kind() != ErrorKind::BrokenPipe, - _ => true, - }); } self.last_id } + /// Process Request items sent by clients. pub fn process_request(self: &mut Self, msg: Request) -> Option { match msg { Request::Transaction(tr) => { @@ -127,7 +125,8 @@ impl AccountantSkel { Ok(()) } - /// UDP Server that forwards messages to Accountant methods. + /// Create a UDP microservice that forwards messages the given AccountantSkel. + /// Set `exit` to shutdown its threads. pub fn serve( obj: Arc>>, addr: &str, diff --git a/src/accountant_stub.rs b/src/accountant_stub.rs index 451d8ee24b..1b97a550c1 100644 --- a/src/accountant_stub.rs +++ b/src/accountant_stub.rs @@ -1,6 +1,7 @@ -//! The `accountant` is a client of the `historian`. It uses the historian's -//! event log to record transactions. Its users can deposit funds and -//! transfer funds to other users. +//! A AccountantStub is client-side object that interfaces with a server-side Accountant +//! object via the network interface exposed by AccountantSkel. Client code should use +//! this object instead of writing messages to the network directly. The binary +//! encoding of its messages are unstable and may change in future releases. use accountant_skel::{Request, Response}; use bincode::{deserialize, serialize}; @@ -16,6 +17,9 @@ pub struct AccountantStub { } impl AccountantStub { + /// Create a new AccountantStub that will interface with AccountantSkel + /// over `socket`. To receive responses, the caller must bind `socket` + /// to a public address before invoking AccountantStub methods. pub fn new(addr: &str, socket: UdpSocket) -> Self { AccountantStub { addr: addr.to_string(), @@ -23,12 +27,15 @@ impl AccountantStub { } } + /// Send a signed Transaction to the server for processing. This method + /// does not wait for a response. pub fn transfer_signed(&self, tr: Transaction) -> io::Result { let req = Request::Transaction(tr); let data = serialize(&req).unwrap(); self.socket.send_to(&data, &self.addr) } + /// Creates, signs, and processes a Transaction. Useful for writing unit-tests. pub fn transfer( &self, n: i64, @@ -41,6 +48,9 @@ impl AccountantStub { self.transfer_signed(tr).map(|_| sig) } + /// Request the balance of the user holding `pubkey`. This method blocks + /// until the server sends a response. If the response packet is dropped + /// by the network, this method will hang indefinitely. pub fn get_balance(&self, pubkey: &PublicKey) -> io::Result> { let req = Request::GetBalance { key: *pubkey }; let data = serialize(&req).expect("serialize GetBalance"); @@ -55,6 +65,7 @@ impl AccountantStub { Ok(None) } + /// Request the first or last Entry ID from the server. fn get_id(&self, is_last: bool) -> io::Result { let req = Request::GetId { is_last }; let data = serialize(&req).expect("serialize GetId"); @@ -68,6 +79,10 @@ impl AccountantStub { Ok(Default::default()) } + /// Request the last Entry ID from the server. This method blocks + /// until the server sends a response. At the time of this writing, + /// it also has the side-effect of causing the server to log any + /// entries that have been published by the Historian. pub fn get_last_id(&self) -> io::Result { self.get_id(true) } diff --git a/src/entry.rs b/src/entry.rs index 1cc2819e85..59b94ee40c 100644 --- a/src/entry.rs +++ b/src/entry.rs @@ -1,7 +1,22 @@ +//! An Entry is a fundamental building block of Proof of History. It contains a +//! unqiue ID that is the hash of the Entry before it plus the hash of the +//! transactins within it. Entries cannot be reordered and its field `num_hashes` +//! represents an approximate amount of time since the last Entry was created. use event::Event; use hash::{extend_and_hash, hash, Hash}; use rayon::prelude::*; +/// Each Entry contains three pieces of data. The `num_hashes` field is the number +/// of hashes performed since the previous entry. The `id` field is the result +/// of hashing `id` from the previous entry `num_hashes` times. The `events` +/// field points to Events that took place shortly after `id` was generated. +/// +/// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you +/// get a duration estimate since the last Entry. Since processing power increases +/// over time, one should expect the duration `num_hashes` represents to decrease proportionally. +/// Though processing power varies across nodes, the network gives priority to the +/// fastest processor. Duration should therefore be estimated by assuming that the hash +/// was generated by the fastest processor at the time the entry was recorded. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] pub struct Entry { pub num_hashes: u64, diff --git a/src/event.rs b/src/event.rs index a3933bc36a..c3620d0be4 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1,4 +1,5 @@ -//! The `event` crate provides the data structures for log events. +//! An Event may be a Transaction or a Witness used to process a pending +//! Transaction. use bincode::serialize; use chrono::prelude::*; @@ -21,6 +22,7 @@ pub enum Event { } impl Event { + /// Create and sign a new Witness Timestamp. Used for unit-testing. pub fn new_timestamp(from: &KeyPair, dt: DateTime) -> Self { let sign_data = serialize(&dt).unwrap(); let sig = Signature::clone_from_slice(from.sign(&sign_data).as_ref()); @@ -32,6 +34,7 @@ impl Event { } // TODO: Rename this to transaction_signature(). + /// If the Event is a Transaction, return its Signature. pub fn get_signature(&self) -> Option { match *self { Event::Transaction(ref tr) => Some(tr.sig), @@ -39,6 +42,8 @@ impl Event { } } + /// Verify the Event's signature's are valid and if a transaction, that its + /// spending plan is valid. pub fn verify(&self) -> bool { match *self { Event::Transaction(ref tr) => tr.verify(), diff --git a/src/hash.rs b/src/hash.rs index 181db33499..4b6dbff053 100644 --- a/src/hash.rs +++ b/src/hash.rs @@ -1,3 +1,5 @@ +//! A module for creating SHA-256 hashes. + use generic_array::GenericArray; use generic_array::typenum::U32; use sha2::{Digest, Sha256}; diff --git a/src/historian.rs b/src/historian.rs index 04a41703ef..9fadcb148e 100644 --- a/src/historian.rs +++ b/src/historian.rs @@ -1,5 +1,5 @@ -//! The `historian` crate provides a microservice for generating a Proof-of-History. -//! It manages a thread containing a Proof-of-History Logger. +//! The Historian provides a microservice for generating a Proof of History. +//! It manages a thread containing a Proof of History Recorder. use entry::Entry; use hash::Hash; diff --git a/src/ledger.rs b/src/ledger.rs index 9385ab555a..f35deac738 100644 --- a/src/ledger.rs +++ b/src/ledger.rs @@ -1,18 +1,7 @@ -//! The `ledger` crate provides the foundational data structures for Proof-of-History, -//! an ordered log of events in time. +//! The `ledger` module provides the functions for parallel verification of the +//! Proof of History ledger. use entry::{next_tick, Entry}; -/// Each entry contains three pieces of data. The `num_hashes` field is the number -/// of hashes performed since the previous entry. The `id` field is the result -/// of hashing `id` from the previous entry `num_hashes` times. The `event` -/// field points to an Event that took place shortly after `id` was generated. -/// -/// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you -/// get a duration estimate since the last event. Since processing power increases -/// over time, one should expect the duration `num_hashes` represents to decrease proportionally. -/// Though processing power varies across nodes, the network gives priority to the -/// fastest processor. Duration should therefore be estimated by assuming that the hash -/// was generated by the fastest processor at the time the entry was recorded. use hash::Hash; use rayon::prelude::*; diff --git a/src/plan.rs b/src/plan.rs index 3ef51eed80..81061b4b5e 100644 --- a/src/plan.rs +++ b/src/plan.rs @@ -19,6 +19,7 @@ pub enum Condition { } impl Condition { + /// Return true if the given Witness satisfies this Condition. pub fn is_satisfied(&self, witness: &Witness) -> bool { match (self, witness) { (&Condition::Signature(ref pubkey), &Witness::Signature(ref from)) => pubkey == from, @@ -42,18 +43,23 @@ pub enum Plan { } impl Plan { + /// Create the simplest spending plan - one that pays `tokens` to PublicKey. pub fn new_payment(tokens: i64, to: PublicKey) -> Self { Plan::Pay(Payment { tokens, to }) } + /// Create a spending plan that pays `tokens` to `to` after being witnessed by `from`. pub fn new_authorized_payment(from: PublicKey, tokens: i64, to: PublicKey) -> Self { Plan::After(Condition::Signature(from), Payment { tokens, to }) } + /// Create a spending plan that pays `tokens` to `to` after the given DateTime. pub fn new_future_payment(dt: DateTime, tokens: i64, to: PublicKey) -> Self { Plan::After(Condition::Timestamp(dt), Payment { tokens, to }) } + /// Create a spending plan that pays `tokens` to `to` after the given DateTime + /// unless cancelled by `from`. pub fn new_cancelable_future_payment( dt: DateTime, from: PublicKey, @@ -66,6 +72,7 @@ impl Plan { ) } + /// Return true if the spending plan requires no additional Witnesses. pub fn is_complete(&self) -> bool { match *self { Plan::Pay(_) => true, @@ -73,6 +80,7 @@ impl Plan { } } + /// Return true if the plan spends exactly `spendable_tokens`. pub fn verify(&self, spendable_tokens: i64) -> bool { match *self { Plan::Pay(ref payment) | Plan::After(_, ref payment) => { diff --git a/src/recorder.rs b/src/recorder.rs index 14b6a26348..0bfd666742 100644 --- a/src/recorder.rs +++ b/src/recorder.rs @@ -1,4 +1,4 @@ -//! The `recorder` crate provides an object for generating a Proof-of-History. +//! The `recorder` module provides an object for generating a Proof of History. //! It records Event items on behalf of its users. It continuously generates //! new hashes, only stopping to check if it has been sent an Event item. It //! tags each Event with an Entry and sends it back. The Entry includes the diff --git a/src/result.rs b/src/result.rs index ea96453175..7bef766b59 100644 --- a/src/result.rs +++ b/src/result.rs @@ -1,3 +1,5 @@ +//! Exposes a Result type that propagates one of many different Error types. + use bincode; use serde_json; use std; diff --git a/src/signature.rs b/src/signature.rs index 9fd7b74e2e..2b5cbc3772 100644 --- a/src/signature.rs +++ b/src/signature.rs @@ -1,4 +1,4 @@ -//! The `signature` crate provides functionality for public and private keys +//! The `signature` module provides functionality for public and private keys use generic_array::GenericArray; use generic_array::typenum::{U32, U64}; diff --git a/src/streamer.rs b/src/streamer.rs index 3ad270ad8d..5a26840ffc 100644 --- a/src/streamer.rs +++ b/src/streamer.rs @@ -1,3 +1,5 @@ +//! A module for efficient batch processing of UDP packets. + use result::{Error, Result}; use std::fmt; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket}; @@ -320,7 +322,7 @@ mod bench { }) } - fn sinc( + fn sink( recycler: PacketRecycler, exit: Arc, rvs: Arc>, @@ -354,7 +356,7 @@ mod bench { let t_producer3 = producer(&addr, recycler.clone(), exit.clone()); let rvs = Arc::new(Mutex::new(0)); - let t_sinc = sinc(recycler.clone(), exit.clone(), rvs.clone(), r_reader); + let t_sink = sink(recycler.clone(), exit.clone(), rvs.clone(), r_reader); let start = SystemTime::now(); let start_val = *rvs.lock().unwrap(); @@ -370,7 +372,7 @@ mod bench { t_producer1.join()?; t_producer2.join()?; t_producer3.join()?; - t_sinc.join()?; + t_sink.join()?; Ok(()) } #[bench] diff --git a/src/transaction.rs b/src/transaction.rs index 4aca2742a7..4754335573 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -1,4 +1,4 @@ -//! The `transaction` crate provides functionality for creating log transactions. +//! The `transaction` module provides functionality for creating log transactions. use bincode::serialize; use chrono::prelude::*; @@ -17,6 +17,7 @@ pub struct Transaction { } impl Transaction { + /// Create and sign a new Transaction. Used for unit-testing. pub fn new(from_keypair: &KeyPair, to: PublicKey, tokens: i64, last_id: Hash) -> Self { let from = from_keypair.pubkey(); let plan = Plan::Pay(Payment { tokens, to }); @@ -31,6 +32,7 @@ impl Transaction { tr } + /// Create and sign a postdated Transaction. Used for unit-testing. pub fn new_on_date( from_keypair: &KeyPair, to: PublicKey, @@ -58,11 +60,13 @@ impl Transaction { serialize(&(&self.from, &self.plan, &self.tokens, &self.last_id)).unwrap() } + /// Sign this transaction. pub fn sign(&mut self, keypair: &KeyPair) { let sign_data = self.get_sign_data(); self.sig = Signature::clone_from_slice(keypair.sign(&sign_data).as_ref()); } + /// Verify this transaction's signature and its spending plan. pub fn verify(&self) -> bool { self.sig.verify(&self.from, &self.get_sign_data()) && self.plan.verify(self.tokens) }