Merge pull request #67 from garious/cleanup-naming

Cleanup naming
This commit is contained in:
Greg Fitzgerald 2018-03-19 16:29:08 -06:00 committed by GitHub
commit 47325cbe01
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 105 additions and 96 deletions

View File

@ -37,9 +37,9 @@ Install the silk executables:
$ cargo install silk
```
The testnode server is initialized with a transaction log from stdin and
generates new log entries on stdout. To create the input log, we'll need
to create *the mint* and use it to generate a *genesis log*. It's done in
The testnode server is initialized with a ledger from stdin and
generates new ledger entries on stdout. To create the input ledger, we'll need
to create *the mint* and use it to generate a *genesis ledger*. It's done in
two steps because the mint.json file contains a private key that will be
used later in this demo.
@ -55,23 +55,23 @@ Now you can start the server:
```
Then, in a separate shell, let's execute some transactions. Note we pass in
the JSON configuration file here, not the genesis log.
the JSON configuration file here, not the genesis ledger.
```bash
$ cat mint.json | silk-client-demo
```
Now kill the server with Ctrl-C, and take a look at the transaction log. You should
Now kill the server with Ctrl-C, and take a look at the ledger. You should
see something similar to:
```json
{"num_hashes":27,"id":[0, "..."],"event":"Tick"}
{"num_hashes":3,"id":[67, "..."],"event":{"Transaction":{"asset":42}}}
{"num_hashes":3,"id":[67, "..."],"event":{"Transaction":{"tokens":42}}}
{"num_hashes":27,"id":[0, "..."],"event":"Tick"}
```
Now restart the server from where we left off. Pass it both the genesis log, and
the transaction log.
Now restart the server from where we left off. Pass it both the genesis ledger, and
the transaction ledger.
```bash
$ cat genesis.log transactions0.log | silk-testnode > transactions1.log

View File

@ -1,7 +1,7 @@
The Historian
===
Create a *Historian* and send it *events* to generate an *event log*, where each log *entry*
Create a *Historian* and send it *events* to generate an *event log*, where each *entry*
is tagged with the historian's latest *hash*. Then ensure the order of events was not tampered
with by verifying each entry's hash can be generated from the hash in the previous entry:
@ -11,17 +11,17 @@ with by verifying each entry's hash can be generated from the hash in the previo
extern crate silk;
use silk::historian::Historian;
use silk::log::{verify_slice, Entry, Hash};
use silk::ledger::{verify_slice, Entry, Hash};
use silk::event::{generate_keypair, get_pubkey, sign_claim_data, Event};
use std::thread::sleep;
use std::time::Duration;
use std::sync::mpsc::SendError;
fn create_log(hist: &Historian<Hash>) -> Result<(), SendError<Event<Hash>>> {
fn create_ledger(hist: &Historian<Hash>) -> Result<(), SendError<Event<Hash>>> {
sleep(Duration::from_millis(15));
let asset = Hash::default();
let tokens = 42;
let keypair = generate_keypair();
let event0 = Event::new_claim(get_pubkey(&keypair), asset, sign_claim_data(&asset, &keypair));
let event0 = Event::new_claim(get_pubkey(&keypair), tokens, sign_claim_data(&tokens, &keypair));
hist.sender.send(event0)?;
sleep(Duration::from_millis(10));
Ok(())
@ -30,7 +30,7 @@ fn create_log(hist: &Historian<Hash>) -> Result<(), SendError<Event<Hash>>> {
fn main() {
let seed = Hash::default();
let hist = Historian::new(&seed, Some(10));
create_log(&hist).expect("send error");
create_ledger(&hist).expect("send error");
drop(hist.sender);
let entries: Vec<Entry<Hash>> = hist.receiver.iter().collect();
for entry in &entries {
@ -42,11 +42,11 @@ fn main() {
}
```
Running the program should produce a log similar to:
Running the program should produce a ledger similar to:
```rust
Entry { num_hashes: 0, id: [0, ...], event: Tick }
Entry { num_hashes: 3, id: [67, ...], event: Transaction { asset: [37, ...] } }
Entry { num_hashes: 3, id: [67, ...], event: Transaction { tokens: 42 } }
Entry { num_hashes: 3, id: [123, ...], event: Tick }
```

View File

@ -1,17 +1,17 @@
msc {
client,historian,logger;
client,historian,recorder;
logger=>historian [ label = "e0 = Entry{id: h0, n: 0, event: Tick}" ] ;
logger=>logger [ label = "h1 = hash(h0)" ] ;
logger=>logger [ label = "h2 = hash(h1)" ] ;
recorder=>historian [ label = "e0 = Entry{id: h0, n: 0, event: Tick}" ] ;
recorder=>recorder [ label = "h1 = hash(h0)" ] ;
recorder=>recorder [ label = "h2 = hash(h1)" ] ;
client=>historian [ label = "Transaction(d0)" ] ;
historian=>logger [ label = "Transaction(d0)" ] ;
logger=>logger [ label = "h3 = hash(h2 + d0)" ] ;
logger=>historian [ label = "e1 = Entry{id: hash(h3), n: 3, event: Transaction(d0)}" ] ;
logger=>logger [ label = "h4 = hash(h3)" ] ;
logger=>logger [ label = "h5 = hash(h4)" ] ;
logger=>logger [ label = "h6 = hash(h5)" ] ;
logger=>historian [ label = "e2 = Entry{id: h6, n: 3, event: Tick}" ] ;
historian=>recorder [ label = "Transaction(d0)" ] ;
recorder=>recorder [ label = "h3 = hash(h2 + d0)" ] ;
recorder=>historian [ label = "e1 = Entry{id: hash(h3), n: 3, event: Transaction(d0)}" ] ;
recorder=>recorder [ label = "h4 = hash(h3)" ] ;
recorder=>recorder [ label = "h5 = hash(h4)" ] ;
recorder=>recorder [ label = "h6 = hash(h5)" ] ;
recorder=>historian [ label = "e2 = Entry{id: h6, n: 3, event: Tick}" ] ;
client=>historian [ label = "collect()" ] ;
historian=>client [ label = "entries = [e0, e1, e2]" ] ;
client=>client [ label = "verify_slice(entries, h0)" ] ;

View File

@ -10,7 +10,7 @@ use transaction::Transaction;
use signature::{KeyPair, PublicKey, Signature};
use mint::Mint;
use historian::{reserve_signature, Historian};
use logger::Signal;
use recorder::Signal;
use std::sync::mpsc::SendError;
use std::collections::{HashMap, HashSet};
use std::result;
@ -43,8 +43,8 @@ impl Accountant {
{
let mut entries = entries.into_iter();
// The first item in the log is required to be an entry with zero num_hashes,
// which implies its id can be used as the log's seed.
// The first item in the ledger is required to be an entry with zero num_hashes,
// which implies its id can be used as the ledger's seed.
let entry0 = entries.next().unwrap();
let start_hash = entry0.id;
@ -59,7 +59,7 @@ impl Accountant {
last_time: Utc.timestamp(0, 0),
};
// The second item in the log is a special transaction where the to and from
// The second item in the ledger is a special transaction where the to and from
// fields are the same. That entry should be treated as a deposit, not a
// transfer to oneself.
let entry1 = entries.next().unwrap();
@ -97,7 +97,7 @@ impl Accountant {
return Err(AccountingError::InvalidTransfer);
}
if self.get_balance(&tr.from).unwrap_or(0) < tr.asset {
if self.get_balance(&tr.from).unwrap_or(0) < tr.tokens {
return Err(AccountingError::InsufficientFunds);
}
@ -117,10 +117,10 @@ impl Accountant {
if let Plan::Action(Action::Pay(ref payment)) = *plan {
if self.balances.contains_key(&payment.to) {
if let Some(x) = self.balances.get_mut(&payment.to) {
*x += payment.asset;
*x += payment.tokens;
}
} else {
self.balances.insert(payment.to, payment.asset);
self.balances.insert(payment.to, payment.tokens);
}
}
}
@ -136,7 +136,7 @@ impl Accountant {
if !Self::is_deposit(allow_deposits, &tr.from, &tr.plan) {
if let Some(x) = self.balances.get_mut(&tr.from) {
*x -= tr.asset;
*x -= tr.tokens;
}
}
@ -240,7 +240,7 @@ impl Accountant {
mod tests {
use super::*;
use signature::KeyPairUtil;
use logger::ExitReason;
use recorder::ExitReason;
#[test]
fn test_accountant() {
@ -289,16 +289,16 @@ mod tests {
let bob_pubkey = KeyPair::new().pubkey();
let mut tr = Transaction::new(&alice.keypair(), bob_pubkey, 1, alice.seed());
if let Plan::Action(Action::Pay(ref mut payment)) = tr.plan {
payment.asset = 2; // <-- attack!
payment.tokens = 2; // <-- attack!
}
assert_eq!(
acc.process_transaction(tr.clone()),
Err(AccountingError::InvalidTransfer)
);
// Also, ensure all branchs of the plan spend all assets
// Also, ensure all branchs of the plan spend all tokens
if let Plan::Action(Action::Pay(ref mut payment)) = tr.plan {
payment.asset = 0; // <-- whoops!
payment.tokens = 0; // <-- whoops!
}
assert_eq!(
acc.process_transaction(tr.clone()),

View File

@ -3,8 +3,8 @@ extern crate silk;
use silk::historian::Historian;
use silk::hash::Hash;
use silk::entry::Entry;
use silk::log::verify_slice;
use silk::logger::Signal;
use silk::ledger::verify_slice;
use silk::recorder::Signal;
use silk::signature::{KeyPair, KeyPairUtil};
use silk::transaction::Transaction;
use silk::event::Event;
@ -12,7 +12,7 @@ use std::thread::sleep;
use std::time::Duration;
use std::sync::mpsc::SendError;
fn create_log(hist: &Historian, seed: &Hash) -> Result<(), SendError<Signal>> {
fn create_ledger(hist: &Historian, seed: &Hash) -> Result<(), SendError<Signal>> {
sleep(Duration::from_millis(15));
let keypair = KeyPair::new();
let tr = Transaction::new(&keypair, keypair.pubkey(), 42, *seed);
@ -25,7 +25,7 @@ fn create_log(hist: &Historian, seed: &Hash) -> Result<(), SendError<Signal>> {
fn main() {
let seed = Hash::default();
let hist = Historian::new(&seed, Some(10));
create_log(&hist, &seed).expect("send error");
create_ledger(&hist, &seed).expect("send error");
drop(hist.sender);
let entries: Vec<Entry> = hist.receiver.iter().collect();
for entry in &entries {

View File

@ -7,7 +7,7 @@ use std::sync::mpsc::{sync_channel, Receiver, SyncSender};
use std::time::Instant;
use hash::{hash, Hash};
use entry::Entry;
use logger::{ExitReason, Logger, Signal};
use recorder::{ExitReason, Recorder, Signal};
use signature::Signature;
pub struct Historian {
@ -22,7 +22,7 @@ impl Historian {
let (sender, event_receiver) = sync_channel(1000);
let (entry_sender, receiver) = sync_channel(1000);
let thread_hdl =
Historian::create_logger(*start_hash, ms_per_tick, event_receiver, entry_sender);
Historian::create_recorder(*start_hash, ms_per_tick, event_receiver, entry_sender);
let signatures = HashSet::new();
Historian {
sender,
@ -34,22 +34,22 @@ impl Historian {
/// A background thread that will continue tagging received Event messages and
/// sending back Entry messages until either the receiver or sender channel is closed.
fn create_logger(
fn create_recorder(
start_hash: Hash,
ms_per_tick: Option<u64>,
receiver: Receiver<Signal>,
sender: SyncSender<Entry>,
) -> JoinHandle<ExitReason> {
spawn(move || {
let mut logger = Logger::new(receiver, sender, start_hash);
let mut recorder = Recorder::new(receiver, sender, start_hash);
let now = Instant::now();
loop {
if let Err(err) = logger.process_events(now, ms_per_tick) {
if let Err(err) = recorder.process_events(now, ms_per_tick) {
return err;
}
if ms_per_tick.is_some() {
logger.last_id = hash(&logger.last_id);
logger.num_hashes += 1;
recorder.last_id = hash(&recorder.last_id);
recorder.num_hashes += 1;
}
}
})
@ -67,7 +67,7 @@ pub fn reserve_signature(sigs: &mut HashSet<Signature>, sig: &Signature) -> bool
#[cfg(test)]
mod tests {
use super::*;
use log::*;
use ledger::*;
use std::thread::sleep;
use std::time::Duration;
@ -132,7 +132,7 @@ mod tests {
assert_eq!(entries.len(), 1);
// Ensure the ID is not the seed, which indicates another Tick
// was logged before the one we sent.
// was recorded before the one we sent.
assert_ne!(entries[0].id, zero);
}
}

View File

@ -1,7 +1,7 @@
//! The `log` crate provides the foundational data structures for Proof-of-History,
//! The `ledger` crate provides the foundational data structures for Proof-of-History,
//! an ordered log of events in time.
/// Each log entry contains three pieces of data. The 'num_hashes' field is the number
/// Each entry contains three pieces of data. The 'num_hashes' field is the number
/// of hashes performed since the previous entry. The 'id' field is the result
/// of hashing 'id' from the previous entry 'num_hashes' times. The 'event'
/// field points to an Event that took place shortly after 'id' was generated.
@ -11,7 +11,7 @@
/// over time, one should expect the duration 'num_hashes' represents to decrease proportionally.
/// Though processing power varies across nodes, the network gives priority to the
/// fastest processor. Duration should therefore be estimated by assuming that the hash
/// was generated by the fastest processor at the time the entry was logged.
/// was generated by the fastest processor at the time the entry was recorded.
use hash::Hash;
use entry::{next_tick, Entry};
@ -60,7 +60,7 @@ mod tests {
mod bench {
extern crate test;
use self::test::Bencher;
use log::*;
use ledger::*;
#[bench]
fn event_bench(bencher: &mut Bencher) {

View File

@ -5,9 +5,9 @@ pub mod plan;
pub mod transaction;
pub mod event;
pub mod entry;
pub mod log;
pub mod ledger;
pub mod mint;
pub mod logger;
pub mod recorder;
pub mod historian;
pub mod streamer;
pub mod accountant;
@ -18,7 +18,7 @@ extern crate bincode;
extern crate chrono;
extern crate generic_array;
#[macro_use]
extern crate log as logging;
extern crate log;
extern crate rayon;
extern crate ring;
extern crate serde;

View File

@ -57,7 +57,7 @@ impl Mint {
#[cfg(test)]
mod tests {
use super::*;
use log::verify_slice;
use ledger::verify_slice;
use plan::{Action, Plan};
#[test]

View File

@ -35,14 +35,14 @@ pub enum Action {
impl Action {
pub fn spendable(&self) -> i64 {
match *self {
Action::Pay(ref payment) => payment.asset,
Action::Pay(ref payment) => payment.tokens,
}
}
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Payment {
pub asset: i64,
pub tokens: i64,
pub to: PublicKey,
}
@ -54,42 +54,48 @@ pub enum Plan {
}
impl Plan {
pub fn new_payment(asset: i64, to: PublicKey) -> Self {
Plan::Action(Action::Pay(Payment { asset, to }))
pub fn new_payment(tokens: i64, to: PublicKey) -> Self {
Plan::Action(Action::Pay(Payment { tokens, to }))
}
pub fn new_authorized_payment(from: PublicKey, asset: i64, to: PublicKey) -> Self {
pub fn new_authorized_payment(from: PublicKey, tokens: i64, to: PublicKey) -> Self {
Plan::After(
Condition::Signature(from),
Action::Pay(Payment { asset, to }),
Action::Pay(Payment { tokens, to }),
)
}
pub fn new_future_payment(dt: DateTime<Utc>, asset: i64, to: PublicKey) -> Self {
Plan::After(Condition::Timestamp(dt), Action::Pay(Payment { asset, to }))
pub fn new_future_payment(dt: DateTime<Utc>, tokens: i64, to: PublicKey) -> Self {
Plan::After(
Condition::Timestamp(dt),
Action::Pay(Payment { tokens, to }),
)
}
pub fn new_cancelable_future_payment(
dt: DateTime<Utc>,
from: PublicKey,
asset: i64,
tokens: i64,
to: PublicKey,
) -> Self {
Plan::Race(
(Condition::Timestamp(dt), Action::Pay(Payment { asset, to })),
(
Condition::Timestamp(dt),
Action::Pay(Payment { tokens, to }),
),
(
Condition::Signature(from),
Action::Pay(Payment { asset, to: from }),
Action::Pay(Payment { tokens, to: from }),
),
)
}
pub fn verify(&self, spendable_assets: i64) -> bool {
pub fn verify(&self, spendable_tokens: i64) -> bool {
match *self {
Plan::Action(ref action) => action.spendable() == spendable_assets,
Plan::After(_, ref action) => action.spendable() == spendable_assets,
Plan::Action(ref action) => action.spendable() == spendable_tokens,
Plan::After(_, ref action) => action.spendable() == spendable_tokens,
Plan::Race(ref a, ref b) => {
a.1.spendable() == spendable_assets && b.1.spendable() == spendable_assets
a.1.spendable() == spendable_tokens && b.1.spendable() == spendable_tokens
}
}
}

View File

@ -1,5 +1,5 @@
//! The `logger` crate provides an object for generating a Proof-of-History.
//! It logs Event items on behalf of its users. It continuously generates
//! The `recorder` crate provides an object for generating a Proof-of-History.
//! It records Event items on behalf of its users. It continuously generates
//! new hashes, only stopping to check if it has been sent an Event item. It
//! tags each Event with an Entry and sends it back. The Entry includes the
//! Event, the latest hash, and the number of hashes since the last event.
@ -24,7 +24,7 @@ pub enum ExitReason {
SendDisconnected,
}
pub struct Logger {
pub struct Recorder {
pub sender: SyncSender<Entry>,
pub receiver: Receiver<Signal>,
pub last_id: Hash,
@ -33,9 +33,9 @@ pub struct Logger {
pub num_ticks: u64,
}
impl Logger {
impl Recorder {
pub fn new(receiver: Receiver<Signal>, sender: SyncSender<Entry>, start_hash: Hash) -> Self {
Logger {
Recorder {
receiver,
sender,
last_id: start_hash,
@ -45,7 +45,7 @@ impl Logger {
}
}
pub fn log_entry(&mut self) -> Result<Entry, ExitReason> {
pub fn record_entry(&mut self) -> Result<Entry, ExitReason> {
let events = mem::replace(&mut self.events, vec![]);
let entry = create_entry_mut(&mut self.last_id, &mut self.num_hashes, events);
println!("{}", serde_json::to_string(&entry).unwrap());
@ -60,7 +60,7 @@ impl Logger {
loop {
if let Some(ms) = ms_per_tick {
if epoch.elapsed() > Duration::from_millis((self.num_ticks + 1) * ms) {
self.log_entry()?;
self.record_entry()?;
self.num_ticks += 1;
}
}
@ -68,7 +68,7 @@ impl Logger {
match self.receiver.try_recv() {
Ok(signal) => match signal {
Signal::Tick => {
let entry = self.log_entry()?;
let entry = self.record_entry()?;
self.sender
.send(entry)
.or(Err(ExitReason::SendDisconnected))?;

View File

@ -10,19 +10,19 @@ use plan::{Action, Condition, Payment, Plan};
pub struct Transaction {
pub from: PublicKey,
pub plan: Plan,
pub asset: i64,
pub tokens: i64,
pub last_id: Hash,
pub sig: Signature,
}
impl Transaction {
pub fn new(from_keypair: &KeyPair, to: PublicKey, asset: i64, last_id: Hash) -> Self {
pub fn new(from_keypair: &KeyPair, to: PublicKey, tokens: i64, last_id: Hash) -> Self {
let from = from_keypair.pubkey();
let plan = Plan::Action(Action::Pay(Payment { asset, to }));
let plan = Plan::Action(Action::Pay(Payment { tokens, to }));
let mut tr = Transaction {
from,
plan,
asset,
tokens,
last_id,
sig: Signature::default(),
};
@ -34,21 +34,24 @@ impl Transaction {
from_keypair: &KeyPair,
to: PublicKey,
dt: DateTime<Utc>,
asset: i64,
tokens: i64,
last_id: Hash,
) -> Self {
let from = from_keypair.pubkey();
let plan = Plan::Race(
(Condition::Timestamp(dt), Action::Pay(Payment { asset, to })),
(
Condition::Timestamp(dt),
Action::Pay(Payment { tokens, to }),
),
(
Condition::Signature(from),
Action::Pay(Payment { asset, to: from }),
Action::Pay(Payment { tokens, to: from }),
),
);
let mut tr = Transaction {
from,
plan,
asset,
tokens,
last_id,
sig: Signature::default(),
};
@ -57,7 +60,7 @@ impl Transaction {
}
fn get_sign_data(&self) -> Vec<u8> {
serialize(&(&self.from, &self.plan, &self.asset, &self.last_id)).unwrap()
serialize(&(&self.from, &self.plan, &self.tokens, &self.last_id)).unwrap()
}
pub fn sign(&mut self, keypair: &KeyPair) {
@ -66,7 +69,7 @@ impl Transaction {
}
pub fn verify(&self) -> bool {
self.sig.verify(&self.from, &self.get_sign_data()) && self.plan.verify(self.asset)
self.sig.verify(&self.from, &self.get_sign_data()) && self.plan.verify(self.tokens)
}
}
@ -96,13 +99,13 @@ mod tests {
#[test]
fn test_serialize_claim() {
let plan = Plan::Action(Action::Pay(Payment {
asset: 0,
tokens: 0,
to: Default::default(),
}));
let claim0 = Transaction {
from: Default::default(),
plan,
asset: 0,
tokens: 0,
last_id: Default::default(),
sig: Default::default(),
};
@ -118,7 +121,7 @@ mod tests {
let pubkey = keypair.pubkey();
let mut tr = Transaction::new(&keypair, pubkey, 42, zero);
tr.sign(&keypair);
tr.asset = 1_000_000; // <-- attack!
tr.tokens = 1_000_000; // <-- attack!
assert!(!tr.verify());
}