Better names

Event -> Entry
EventData -> Event
This commit is contained in:
Greg Fitzgerald 2018-02-18 09:53:38 -07:00
parent 6ec0e5834c
commit 471f036444
2 changed files with 66 additions and 66 deletions

View File

@ -3,8 +3,8 @@
/// A Proof-of-History is an ordered log of events in time. Each entry contains three /// A Proof-of-History is an ordered log of events in time. Each entry contains three
/// pieces of data. The 'num_hashes' field is the number of hashes performed since the previous /// pieces of data. The 'num_hashes' field is the number of hashes performed since the previous
/// entry. The 'end_hash' field is the result of hashing 'end_hash' from the previous entry /// entry. The 'end_hash' field is the result of hashing 'end_hash' from the previous entry
/// 'num_hashes' times. The 'data' field is an optional foreign key (a hash) pointing to some /// 'num_hashes' times. The 'event' field points to an Event that took place shortly
/// arbitrary data that a client is looking to associate with the entry. /// after 'end_hash' was generated.
/// ///
/// If you divide 'num_hashes' by the amount of time it takes to generate a new hash, you /// If you divide 'num_hashes' by the amount of time it takes to generate a new hash, you
/// get a duration estimate since the last event. Since processing power increases /// get a duration estimate since the last event. Since processing power increases
@ -13,32 +13,32 @@
/// fastest processor. Duration should therefore be estimated by assuming that the hash /// fastest processor. Duration should therefore be estimated by assuming that the hash
/// was generated by the fastest processor at the time the entry was logged. /// was generated by the fastest processor at the time the entry was logged.
#[derive(Debug, PartialEq, Eq, Clone)] #[derive(Debug, PartialEq, Eq, Clone)]
pub struct Event { pub struct Entry {
pub num_hashes: u64, pub num_hashes: u64,
pub end_hash: u64, pub end_hash: u64,
pub data: EventData, pub event: Event,
} }
/// When 'data' is Tick, the event represents a simple clock tick, and exists for the /// When 'event' is Tick, the event represents a simple clock tick, and exists for the
/// sole purpose of improving the performance of event log verification. A tick can /// sole purpose of improving the performance of event log verification. A tick can
/// be generated in 'num_hashes' hashes and verified in 'num_hashes' hashes. By logging /// be generated in 'num_hashes' hashes and verified in 'num_hashes' hashes. By logging
/// a hash alongside the tick, each tick and be verified in parallel using the 'end_hash' /// a hash alongside the tick, each tick and be verified in parallel using the 'end_hash'
/// of the preceding tick to seed its hashing. /// of the preceding tick to seed its hashing.
#[derive(Debug, PartialEq, Eq, Clone)] #[derive(Debug, PartialEq, Eq, Clone)]
pub enum EventData { pub enum Event {
Tick, Tick,
UserDataKey(u64), UserDataKey(u64),
} }
impl Event { impl Entry {
/// Creates an Event from the number of hashes 'num_hashes' since the previous event /// Creates a Entry from the number of hashes 'num_hashes' since the previous event
/// and that resulting 'end_hash'. /// and that resulting 'end_hash'.
pub fn new_tick(num_hashes: u64, end_hash: u64) -> Self { pub fn new_tick(num_hashes: u64, end_hash: u64) -> Self {
let data = EventData::Tick; let event = Event::Tick;
Event { Entry {
num_hashes, num_hashes,
end_hash, end_hash,
data, event,
} }
} }
@ -48,8 +48,8 @@ impl Event {
} }
} }
/// Creates the next Tick Event 'num_hashes' after 'start_hash'. /// Creates the next Tick Entry 'num_hashes' after 'start_hash'.
pub fn next_tick(start_hash: u64, num_hashes: u64) -> Event { pub fn next_tick(start_hash: u64, num_hashes: u64) -> Entry {
use std::collections::hash_map::DefaultHasher; use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher}; use std::hash::{Hash, Hasher};
let mut end_hash = start_hash; let mut end_hash = start_hash;
@ -58,26 +58,26 @@ pub fn next_tick(start_hash: u64, num_hashes: u64) -> Event {
end_hash.hash(&mut hasher); end_hash.hash(&mut hasher);
end_hash = hasher.finish(); end_hash = hasher.finish();
} }
Event::new_tick(num_hashes, end_hash) Entry::new_tick(num_hashes, end_hash)
} }
/// Verifies the hashes and counts of a slice of events are all consistent. /// Verifies the hashes and counts of a slice of events are all consistent.
pub fn verify_slice(events: &[Event], start_hash: u64) -> bool { pub fn verify_slice(events: &[Entry], start_hash: u64) -> bool {
use rayon::prelude::*; use rayon::prelude::*;
let genesis = [Event::new_tick(0, start_hash)]; let genesis = [Entry::new_tick(0, start_hash)];
let event_pairs = genesis.par_iter().chain(events).zip(events); let event_pairs = genesis.par_iter().chain(events).zip(events);
event_pairs.all(|(x0, x1)| x1.verify(x0.end_hash)) event_pairs.all(|(x0, x1)| x1.verify(x0.end_hash))
} }
/// Verifies the hashes and events serially. Exists only for reference. /// Verifies the hashes and events serially. Exists only for reference.
pub fn verify_slice_seq(events: &[Event], start_hash: u64) -> bool { pub fn verify_slice_seq(events: &[Entry], start_hash: u64) -> bool {
let genesis = [Event::new_tick(0, start_hash)]; let genesis = [Entry::new_tick(0, start_hash)];
let mut event_pairs = genesis.iter().chain(events).zip(events); let mut event_pairs = genesis.iter().chain(events).zip(events);
event_pairs.all(|(x0, x1)| x1.verify(x0.end_hash)) event_pairs.all(|(x0, x1)| x1.verify(x0.end_hash))
} }
/// Create a vector of Ticks of length 'len' from 'start_hash' hash and 'num_hashes'. /// Create a vector of Ticks of length 'len' from 'start_hash' hash and 'num_hashes'.
pub fn create_ticks(start_hash: u64, num_hashes: u64, len: usize) -> Vec<Event> { pub fn create_ticks(start_hash: u64, num_hashes: u64, len: usize) -> Vec<Entry> {
use itertools::unfold; use itertools::unfold;
let mut events = unfold(start_hash, |state| { let mut events = unfold(start_hash, |state| {
let event = next_tick(*state, num_hashes); let event = next_tick(*state, num_hashes);
@ -93,8 +93,8 @@ mod tests {
#[test] #[test]
fn test_event_verify() { fn test_event_verify() {
assert!(Event::new_tick(0, 0).verify(0)); // base case assert!(Entry::new_tick(0, 0).verify(0)); // base case
assert!(!Event::new_tick(0, 0).verify(1)); // base case, bad assert!(!Entry::new_tick(0, 0).verify(1)); // base case, bad
assert!(next_tick(0, 1).verify(0)); // inductive step assert!(next_tick(0, 1).verify(0)); // inductive step
assert!(!next_tick(0, 1).verify(1)); // inductive step, bad assert!(!next_tick(0, 1).verify(1)); // inductive step, bad
} }
@ -104,10 +104,10 @@ mod tests {
assert_eq!(next_tick(0, 1).num_hashes, 1) assert_eq!(next_tick(0, 1).num_hashes, 1)
} }
fn verify_slice_generic(verify_slice: fn(&[Event], u64) -> bool) { fn verify_slice_generic(verify_slice: fn(&[Entry], u64) -> bool) {
assert!(verify_slice(&vec![], 0)); // base case assert!(verify_slice(&vec![], 0)); // base case
assert!(verify_slice(&vec![Event::new_tick(0, 0)], 0)); // singleton case 1 assert!(verify_slice(&vec![Entry::new_tick(0, 0)], 0)); // singleton case 1
assert!(!verify_slice(&vec![Event::new_tick(0, 0)], 1)); // singleton case 2, bad assert!(!verify_slice(&vec![Entry::new_tick(0, 0)], 1)); // singleton case 2, bad
assert!(verify_slice(&create_ticks(0, 0, 2), 0)); // inductive step assert!(verify_slice(&create_ticks(0, 0, 2), 0)); // inductive step
let mut bad_ticks = create_ticks(0, 0, 2); let mut bad_ticks = create_ticks(0, 0, 2);

View File

@ -1,44 +1,44 @@
//! The `historian` crate provides a microservice for generating a Proof-of-History. //! The `historian` crate provides a microservice for generating a Proof-of-History.
//! It logs EventData items on behalf of its users. It continuously generates //! It logs Event items on behalf of its users. It continuously generates
//! new hashes, only stopping to check if it has been sent an EventData item. It //! new hashes, only stopping to check if it has been sent an Event item. It
//! tags each EventData with an Event and sends it back. The Event includes the //! tags each Event with an Entry and sends it back. The Entry includes the
//! EventData, the latest hash, and the number of hashes since the last event. //! Event, the latest hash, and the number of hashes since the last event.
//! The resulting Event stream represents ordered events in time. //! The resulting stream of entries represents ordered events in time.
use std::thread::JoinHandle; use std::thread::JoinHandle;
use std::sync::mpsc::{Receiver, Sender}; use std::sync::mpsc::{Receiver, Sender};
use event::{Event, EventData}; use event::{Entry, Event};
pub struct Historian { pub struct Historian {
pub sender: Sender<EventData>, pub sender: Sender<Event>,
pub receiver: Receiver<Event>, pub receiver: Receiver<Entry>,
pub thread_hdl: JoinHandle<(Event, EventThreadExitReason)>, pub thread_hdl: JoinHandle<(Entry, ExitReason)>,
} }
#[derive(Debug, PartialEq, Eq)] #[derive(Debug, PartialEq, Eq)]
pub enum EventThreadExitReason { pub enum ExitReason {
RecvDisconnected, RecvDisconnected,
SendDisconnected, SendDisconnected,
} }
fn drain_queue( fn drain_queue(
receiver: &Receiver<EventData>, receiver: &Receiver<Event>,
sender: &Sender<Event>, sender: &Sender<Entry>,
num_hashes: u64, num_hashes: u64,
end_hash: u64, end_hash: u64,
) -> Result<u64, (Event, EventThreadExitReason)> { ) -> Result<u64, (Entry, ExitReason)> {
use std::sync::mpsc::TryRecvError; use std::sync::mpsc::TryRecvError;
let mut num_hashes = num_hashes; let mut num_hashes = num_hashes;
loop { loop {
match receiver.try_recv() { match receiver.try_recv() {
Ok(data) => { Ok(event) => {
let e = Event { let entry = Entry {
end_hash, end_hash,
num_hashes, num_hashes,
data, event,
}; };
if let Err(_) = sender.send(e.clone()) { if let Err(_) = sender.send(entry.clone()) {
return Err((e, EventThreadExitReason::SendDisconnected)); return Err((entry, ExitReason::SendDisconnected));
} }
num_hashes = 0; num_hashes = 0;
} }
@ -46,24 +46,24 @@ fn drain_queue(
return Ok(num_hashes); return Ok(num_hashes);
} }
Err(TryRecvError::Disconnected) => { Err(TryRecvError::Disconnected) => {
let e = Event { let entry = Entry {
end_hash, end_hash,
num_hashes, num_hashes,
data: EventData::Tick, event: Event::Tick,
}; };
return Err((e, EventThreadExitReason::RecvDisconnected)); return Err((entry, ExitReason::RecvDisconnected));
} }
} }
} }
} }
/// A background thread that will continue tagging received EventData messages and /// A background thread that will continue tagging received Event messages and
/// sending back Event messages until either the receiver or sender channel is closed. /// sending back Entry messages until either the receiver or sender channel is closed.
pub fn event_stream( pub fn event_stream(
start_hash: u64, start_hash: u64,
receiver: Receiver<EventData>, receiver: Receiver<Event>,
sender: Sender<Event>, sender: Sender<Entry>,
) -> JoinHandle<(Event, EventThreadExitReason)> { ) -> JoinHandle<(Entry, ExitReason)> {
use std::collections::hash_map::DefaultHasher; use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher}; use std::hash::{Hash, Hasher};
use std::thread; use std::thread;
@ -74,7 +74,7 @@ pub fn event_stream(
loop { loop {
match drain_queue(&receiver, &sender, num_hashes, end_hash) { match drain_queue(&receiver, &sender, num_hashes, end_hash) {
Ok(n) => num_hashes = n, Ok(n) => num_hashes = n,
Err(e) => return e, Err(err) => return err,
} }
end_hash.hash(&mut hasher); end_hash.hash(&mut hasher);
end_hash = hasher.finish(); end_hash = hasher.finish();
@ -86,9 +86,9 @@ pub fn event_stream(
impl Historian { impl Historian {
pub fn new(start_hash: u64) -> Self { pub fn new(start_hash: u64) -> Self {
use std::sync::mpsc::channel; use std::sync::mpsc::channel;
let (sender, event_data_receiver) = channel(); let (sender, event_receiver) = channel();
let (event_sender, receiver) = channel(); let (entry_sender, receiver) = channel();
let thread_hdl = event_stream(start_hash, event_data_receiver, event_sender); let thread_hdl = event_stream(start_hash, event_receiver, entry_sender);
Historian { Historian {
sender, sender,
receiver, receiver,
@ -106,33 +106,33 @@ mod tests {
fn test_historian() { fn test_historian() {
let hist = Historian::new(0); let hist = Historian::new(0);
let data = EventData::Tick; let event = Event::Tick;
hist.sender.send(data.clone()).unwrap(); hist.sender.send(event.clone()).unwrap();
let e0 = hist.receiver.recv().unwrap(); let entry0 = hist.receiver.recv().unwrap();
assert_eq!(e0.data, data); assert_eq!(entry0.event, event);
let data = EventData::UserDataKey(0xdeadbeef); let event = Event::UserDataKey(0xdeadbeef);
hist.sender.send(data.clone()).unwrap(); hist.sender.send(event.clone()).unwrap();
let e1 = hist.receiver.recv().unwrap(); let entry1 = hist.receiver.recv().unwrap();
assert_eq!(e1.data, data); assert_eq!(entry1.event, event);
drop(hist.sender); drop(hist.sender);
assert_eq!( assert_eq!(
hist.thread_hdl.join().unwrap().1, hist.thread_hdl.join().unwrap().1,
EventThreadExitReason::RecvDisconnected ExitReason::RecvDisconnected
); );
verify_slice(&[e0, e1], 0); verify_slice(&[entry0, entry1], 0);
} }
#[test] #[test]
fn test_historian_closed_sender() { fn test_historian_closed_sender() {
let hist = Historian::new(0); let hist = Historian::new(0);
drop(hist.receiver); drop(hist.receiver);
hist.sender.send(EventData::Tick).unwrap(); hist.sender.send(Event::Tick).unwrap();
assert_eq!( assert_eq!(
hist.thread_hdl.join().unwrap().1, hist.thread_hdl.join().unwrap().1,
EventThreadExitReason::SendDisconnected ExitReason::SendDisconnected
); );
} }
} }