Add more documentation

This commit is contained in:
Greg Fitzgerald 2018-03-29 12:20:54 -06:00
parent 132495b1fc
commit 55179101cd
14 changed files with 93 additions and 39 deletions

View File

@ -1,6 +1,7 @@
//! The `accountant` is a client of the `historian`. It uses the historian's
//! event log to record transactions. Its users can deposit funds and
//! transfer funds to other users.
//! The Accountant tracks client balances and the progress of pending
//! transactions. It offers a high-level public API that signs transactions
//! on behalf of the caller and a private low-level API for when they have
//! already been signed and verified.
use chrono::prelude::*;
use entry::Entry;
@ -44,6 +45,7 @@ pub struct Accountant {
}
impl Accountant {
/// Create an Accountant using an existing ledger.
pub fn new_from_entries<I>(entries: I, ms_per_tick: Option<u64>) -> Self
where
I: IntoIterator<Item = Entry>,
@ -79,6 +81,7 @@ impl Accountant {
acc
}
/// Create an Accountant with only a Mint. Typically used by unit tests.
pub fn new(mint: &Mint, ms_per_tick: Option<u64>) -> Self {
Self::new_from_entries(mint.create_entries(), ms_per_tick)
}
@ -91,6 +94,7 @@ impl Accountant {
}
}
/// Verify and process the given Transaction.
pub fn process_transaction(self: &mut Self, tr: Transaction) -> Result<()> {
if !tr.verify() {
return Err(AccountingError::InvalidTransfer);
@ -111,6 +115,7 @@ impl Accountant {
Ok(())
}
/// Process a Transaction that has already been verified.
fn process_verified_transaction(
self: &mut Self,
tr: &Transaction,
@ -138,6 +143,7 @@ impl Accountant {
Ok(())
}
/// Process a Witness Signature that has already been verified.
fn process_verified_sig(&mut self, from: PublicKey, tx_sig: Signature) -> Result<()> {
if let Occupied(mut e) = self.pending.entry(tx_sig) {
e.get_mut().apply_witness(&Witness::Signature(from));
@ -150,6 +156,7 @@ impl Accountant {
Ok(())
}
/// Process a Witness Timestamp that has already been verified.
fn process_verified_timestamp(&mut self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
// If this is the first timestamp we've seen, it probably came from the genesis block,
// so we'll trust it.
@ -182,6 +189,7 @@ impl Accountant {
Ok(())
}
/// Process an Transaction or Witness that has already been verified.
fn process_verified_event(self: &mut Self, event: &Event, allow_deposits: bool) -> Result<()> {
match *event {
Event::Transaction(ref tr) => self.process_verified_transaction(tr, allow_deposits),
@ -190,6 +198,8 @@ impl Accountant {
}
}
/// Create, sign, and process a Transaction from `keypair` to `to` of
/// `n` tokens where `last_id` is the last Entry ID observed by the client.
pub fn transfer(
self: &mut Self,
n: i64,
@ -202,6 +212,9 @@ impl Accountant {
self.process_transaction(tr).map(|_| sig)
}
/// Create, sign, and process a postdated Transaction from `keypair`
/// to `to` of `n` tokens on `dt` where `last_id` is the last Entry ID
/// observed by the client.
pub fn transfer_on_date(
self: &mut Self,
n: i64,

View File

@ -1,3 +1,7 @@
//! The AccountantSkel is a microservice that exposes the high-level
//! Accountant API to the network. Its message encoding is currently
//! in flux. Clients should AccountantStub to interact with it.
use accountant::Accountant;
use bincode::{deserialize, serialize};
use entry::Entry;
@ -7,7 +11,7 @@ use serde_json;
use signature::PublicKey;
use std::default::Default;
use std::io::{ErrorKind, Write};
use std::net::{TcpStream, UdpSocket};
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
use std::sync::{Arc, Mutex};
@ -20,7 +24,6 @@ pub struct AccountantSkel<W: Write + Send + 'static> {
pub acc: Accountant,
pub last_id: Hash,
writer: W,
subscribers: Vec<TcpStream>,
}
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
@ -39,31 +42,26 @@ pub enum Response {
}
impl<W: Write + Send + 'static> AccountantSkel<W> {
/// Create a new AccountantSkel that wraps the given Accountant.
pub fn new(acc: Accountant, w: W) -> Self {
let last_id = acc.first_id;
AccountantSkel {
acc,
last_id,
writer: w,
subscribers: vec![],
}
}
/// Process any Entry items that have been published by the Historian.
pub fn sync(&mut self) -> Hash {
while let Ok(entry) = self.acc.historian.receiver.try_recv() {
self.last_id = entry.id;
writeln!(self.writer, "{}", serde_json::to_string(&entry).unwrap()).unwrap();
let buf = serialize(&entry).expect("serialize");
self.subscribers
.retain(|ref mut subscriber| match subscriber.write(&buf) {
Err(err) => err.kind() != ErrorKind::BrokenPipe,
_ => true,
});
}
self.last_id
}
/// Process Request items sent by clients.
pub fn process_request(self: &mut Self, msg: Request) -> Option<Response> {
match msg {
Request::Transaction(tr) => {
@ -127,7 +125,8 @@ impl<W: Write + Send + 'static> AccountantSkel<W> {
Ok(())
}
/// UDP Server that forwards messages to Accountant methods.
/// Create a UDP microservice that forwards messages the given AccountantSkel.
/// Set `exit` to shutdown its threads.
pub fn serve(
obj: Arc<Mutex<AccountantSkel<W>>>,
addr: &str,

View File

@ -1,6 +1,7 @@
//! The `accountant` is a client of the `historian`. It uses the historian's
//! event log to record transactions. Its users can deposit funds and
//! transfer funds to other users.
//! A AccountantStub is client-side object that interfaces with a server-side Accountant
//! object via the network interface exposed by AccountantSkel. Client code should use
//! this object instead of writing messages to the network directly. The binary
//! encoding of its messages are unstable and may change in future releases.
use accountant_skel::{Request, Response};
use bincode::{deserialize, serialize};
@ -16,6 +17,9 @@ pub struct AccountantStub {
}
impl AccountantStub {
/// Create a new AccountantStub that will interface with AccountantSkel
/// over `socket`. To receive responses, the caller must bind `socket`
/// to a public address before invoking AccountantStub methods.
pub fn new(addr: &str, socket: UdpSocket) -> Self {
AccountantStub {
addr: addr.to_string(),
@ -23,12 +27,15 @@ impl AccountantStub {
}
}
/// Send a signed Transaction to the server for processing. This method
/// does not wait for a response.
pub fn transfer_signed(&self, tr: Transaction) -> io::Result<usize> {
let req = Request::Transaction(tr);
let data = serialize(&req).unwrap();
self.socket.send_to(&data, &self.addr)
}
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
pub fn transfer(
&self,
n: i64,
@ -41,6 +48,9 @@ impl AccountantStub {
self.transfer_signed(tr).map(|_| sig)
}
/// Request the balance of the user holding `pubkey`. This method blocks
/// until the server sends a response. If the response packet is dropped
/// by the network, this method will hang indefinitely.
pub fn get_balance(&self, pubkey: &PublicKey) -> io::Result<Option<i64>> {
let req = Request::GetBalance { key: *pubkey };
let data = serialize(&req).expect("serialize GetBalance");
@ -55,6 +65,7 @@ impl AccountantStub {
Ok(None)
}
/// Request the first or last Entry ID from the server.
fn get_id(&self, is_last: bool) -> io::Result<Hash> {
let req = Request::GetId { is_last };
let data = serialize(&req).expect("serialize GetId");
@ -68,6 +79,10 @@ impl AccountantStub {
Ok(Default::default())
}
/// Request the last Entry ID from the server. This method blocks
/// until the server sends a response. At the time of this writing,
/// it also has the side-effect of causing the server to log any
/// entries that have been published by the Historian.
pub fn get_last_id(&self) -> io::Result<Hash> {
self.get_id(true)
}

View File

@ -1,7 +1,22 @@
//! An Entry is a fundamental building block of Proof of History. It contains a
//! unqiue ID that is the hash of the Entry before it plus the hash of the
//! transactins within it. Entries cannot be reordered and its field `num_hashes`
//! represents an approximate amount of time since the last Entry was created.
use event::Event;
use hash::{extend_and_hash, hash, Hash};
use rayon::prelude::*;
/// Each Entry contains three pieces of data. The `num_hashes` field is the number
/// of hashes performed since the previous entry. The `id` field is the result
/// of hashing `id` from the previous entry `num_hashes` times. The `events`
/// field points to Events that took place shortly after `id` was generated.
///
/// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you
/// get a duration estimate since the last Entry. Since processing power increases
/// over time, one should expect the duration `num_hashes` represents to decrease proportionally.
/// Though processing power varies across nodes, the network gives priority to the
/// fastest processor. Duration should therefore be estimated by assuming that the hash
/// was generated by the fastest processor at the time the entry was recorded.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Entry {
pub num_hashes: u64,

View File

@ -1,4 +1,5 @@
//! The `event` crate provides the data structures for log events.
//! An Event may be a Transaction or a Witness used to process a pending
//! Transaction.
use bincode::serialize;
use chrono::prelude::*;
@ -21,6 +22,7 @@ pub enum Event {
}
impl Event {
/// Create and sign a new Witness Timestamp. Used for unit-testing.
pub fn new_timestamp(from: &KeyPair, dt: DateTime<Utc>) -> Self {
let sign_data = serialize(&dt).unwrap();
let sig = Signature::clone_from_slice(from.sign(&sign_data).as_ref());
@ -32,6 +34,7 @@ impl Event {
}
// TODO: Rename this to transaction_signature().
/// If the Event is a Transaction, return its Signature.
pub fn get_signature(&self) -> Option<Signature> {
match *self {
Event::Transaction(ref tr) => Some(tr.sig),
@ -39,6 +42,8 @@ impl Event {
}
}
/// Verify the Event's signature's are valid and if a transaction, that its
/// spending plan is valid.
pub fn verify(&self) -> bool {
match *self {
Event::Transaction(ref tr) => tr.verify(),

View File

@ -1,3 +1,5 @@
//! A module for creating SHA-256 hashes.
use generic_array::GenericArray;
use generic_array::typenum::U32;
use sha2::{Digest, Sha256};

View File

@ -1,5 +1,5 @@
//! The `historian` crate provides a microservice for generating a Proof-of-History.
//! It manages a thread containing a Proof-of-History Logger.
//! The Historian provides a microservice for generating a Proof of History.
//! It manages a thread containing a Proof of History Recorder.
use entry::Entry;
use hash::Hash;

View File

@ -1,18 +1,7 @@
//! The `ledger` crate provides the foundational data structures for Proof-of-History,
//! an ordered log of events in time.
//! The `ledger` module provides the functions for parallel verification of the
//! Proof of History ledger.
use entry::{next_tick, Entry};
/// Each entry contains three pieces of data. The `num_hashes` field is the number
/// of hashes performed since the previous entry. The `id` field is the result
/// of hashing `id` from the previous entry `num_hashes` times. The `event`
/// field points to an Event that took place shortly after `id` was generated.
///
/// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you
/// get a duration estimate since the last event. Since processing power increases
/// over time, one should expect the duration `num_hashes` represents to decrease proportionally.
/// Though processing power varies across nodes, the network gives priority to the
/// fastest processor. Duration should therefore be estimated by assuming that the hash
/// was generated by the fastest processor at the time the entry was recorded.
use hash::Hash;
use rayon::prelude::*;

View File

@ -19,6 +19,7 @@ pub enum Condition {
}
impl Condition {
/// Return true if the given Witness satisfies this Condition.
pub fn is_satisfied(&self, witness: &Witness) -> bool {
match (self, witness) {
(&Condition::Signature(ref pubkey), &Witness::Signature(ref from)) => pubkey == from,
@ -42,18 +43,23 @@ pub enum Plan {
}
impl Plan {
/// Create the simplest spending plan - one that pays `tokens` to PublicKey.
pub fn new_payment(tokens: i64, to: PublicKey) -> Self {
Plan::Pay(Payment { tokens, to })
}
/// Create a spending plan that pays `tokens` to `to` after being witnessed by `from`.
pub fn new_authorized_payment(from: PublicKey, tokens: i64, to: PublicKey) -> Self {
Plan::After(Condition::Signature(from), Payment { tokens, to })
}
/// Create a spending plan that pays `tokens` to `to` after the given DateTime.
pub fn new_future_payment(dt: DateTime<Utc>, tokens: i64, to: PublicKey) -> Self {
Plan::After(Condition::Timestamp(dt), Payment { tokens, to })
}
/// Create a spending plan that pays `tokens` to `to` after the given DateTime
/// unless cancelled by `from`.
pub fn new_cancelable_future_payment(
dt: DateTime<Utc>,
from: PublicKey,
@ -66,6 +72,7 @@ impl Plan {
)
}
/// Return true if the spending plan requires no additional Witnesses.
pub fn is_complete(&self) -> bool {
match *self {
Plan::Pay(_) => true,
@ -73,6 +80,7 @@ impl Plan {
}
}
/// Return true if the plan spends exactly `spendable_tokens`.
pub fn verify(&self, spendable_tokens: i64) -> bool {
match *self {
Plan::Pay(ref payment) | Plan::After(_, ref payment) => {

View File

@ -1,4 +1,4 @@
//! The `recorder` crate provides an object for generating a Proof-of-History.
//! The `recorder` module provides an object for generating a Proof of History.
//! It records Event items on behalf of its users. It continuously generates
//! new hashes, only stopping to check if it has been sent an Event item. It
//! tags each Event with an Entry and sends it back. The Entry includes the

View File

@ -1,3 +1,5 @@
//! Exposes a Result type that propagates one of many different Error types.
use bincode;
use serde_json;
use std;

View File

@ -1,4 +1,4 @@
//! The `signature` crate provides functionality for public and private keys
//! The `signature` module provides functionality for public and private keys
use generic_array::GenericArray;
use generic_array::typenum::{U32, U64};

View File

@ -1,3 +1,5 @@
//! A module for efficient batch processing of UDP packets.
use result::{Error, Result};
use std::fmt;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
@ -320,7 +322,7 @@ mod bench {
})
}
fn sinc(
fn sink(
recycler: PacketRecycler,
exit: Arc<AtomicBool>,
rvs: Arc<Mutex<usize>>,
@ -354,7 +356,7 @@ mod bench {
let t_producer3 = producer(&addr, recycler.clone(), exit.clone());
let rvs = Arc::new(Mutex::new(0));
let t_sinc = sinc(recycler.clone(), exit.clone(), rvs.clone(), r_reader);
let t_sink = sink(recycler.clone(), exit.clone(), rvs.clone(), r_reader);
let start = SystemTime::now();
let start_val = *rvs.lock().unwrap();
@ -370,7 +372,7 @@ mod bench {
t_producer1.join()?;
t_producer2.join()?;
t_producer3.join()?;
t_sinc.join()?;
t_sink.join()?;
Ok(())
}
#[bench]

View File

@ -1,4 +1,4 @@
//! The `transaction` crate provides functionality for creating log transactions.
//! The `transaction` module provides functionality for creating log transactions.
use bincode::serialize;
use chrono::prelude::*;
@ -17,6 +17,7 @@ pub struct Transaction {
}
impl Transaction {
/// Create and sign a new Transaction. Used for unit-testing.
pub fn new(from_keypair: &KeyPair, to: PublicKey, tokens: i64, last_id: Hash) -> Self {
let from = from_keypair.pubkey();
let plan = Plan::Pay(Payment { tokens, to });
@ -31,6 +32,7 @@ impl Transaction {
tr
}
/// Create and sign a postdated Transaction. Used for unit-testing.
pub fn new_on_date(
from_keypair: &KeyPair,
to: PublicKey,
@ -58,11 +60,13 @@ impl Transaction {
serialize(&(&self.from, &self.plan, &self.tokens, &self.last_id)).unwrap()
}
/// Sign this transaction.
pub fn sign(&mut self, keypair: &KeyPair) {
let sign_data = self.get_sign_data();
self.sig = Signature::clone_from_slice(keypair.sign(&sign_data).as_ref());
}
/// Verify this transaction's signature and its spending plan.
pub fn verify(&self) -> bool {
self.sig.verify(&self.from, &self.get_sign_data()) && self.plan.verify(self.tokens)
}