More descriptive field names

This commit is contained in:
Greg Fitzgerald 2018-02-15 10:48:30 -07:00
parent a74540470a
commit d21ad9357c
1 changed files with 38 additions and 34 deletions

View File

@ -1,55 +1,59 @@
//! The `event` crate provides the foundational data structures for Proof-of-History //! The `event` crate provides the foundational data structures for Proof-of-History
/// A Proof-of-History is an ordered log of events in time. Each entry contains three /// A Proof-of-History is an ordered log of events in time. Each entry contains three
/// pieces of data. The 'n' field is the number of hashes performed since the previous /// pieces of data. The 'num_hashes' field is the number of hashes performed since the previous
/// entry. The 'hash' field is the result of hashing 'hash' from the previous entry 'n' /// entry. The 'hash' field is the result of hashing 'hash' from the previous entry 'num_hashes'
/// times. The 'data' field is an optional foreign key (a hash) pointing to some arbitrary /// times. The 'data' field is an optional foreign key (a hash) pointing to some arbitrary
/// data that a client is looking to associate with the entry. /// data that a client is looking to associate with the entry.
/// ///
/// If you divide 'n' by the amount of time it takes to generate a new hash, you /// If you divide 'num_hashes' by the amount of time it takes to generate a new hash, you
/// get a duration estimate since the last event. Since processing power increases /// get a duration estimate since the last event. Since processing power increases
/// over time, one should expect the duration 'n' represents to decrease proportionally. /// over time, one should expect the duration 'num_hashes' represents to decrease proportionally.
/// Though processing power varies across nodes, the network gives priority to the /// Though processing power varies across nodes, the network gives priority to the
/// fastest processor. Duration should therefore be estimated by assuming that the hash /// fastest processor. Duration should therefore be estimated by assuming that the hash
/// was generated by the fastest processor at the time the entry was logged. /// was generated by the fastest processor at the time the entry was logged.
/// ///
/// When 'data' is None, the event represents a simple "tick", and exists for the /// When 'data' is None, the event represents a simple "tick", and exists for the
/// sole purpose of improving the performance of event log verification. A tick can /// sole purpose of improving the performance of event log verification. A tick can
/// be generated in 'n' hashes and verified in 'n' hashes. By logging a hash alongside /// be generated in 'num_hashes' hashes and verified in 'num_hashes' hashes. By logging a hash alongside
/// the tick, each tick and be verified in parallel using the 'hash' of the preceding /// the tick, each tick and be verified in parallel using the 'end_hash' of the preceding
/// tick to seed its hashing. /// tick to seed its hashing.
pub struct Event { pub struct Event {
pub hash: u64, pub end_hash: u64,
pub n: u64, pub num_hashes: u64,
pub data: Option<u64>, pub data: Option<u64>,
} }
impl Event { impl Event {
/// Creates an Event from the number of hashes 'n' since the previous event /// Creates an Event from the number of hashes 'num_hashes' since the previous event
/// and that resulting 'hash'. /// and that resulting 'end_hash'.
pub fn new(hash: u64, n: u64) -> Self { pub fn new(end_hash: u64, num_hashes: u64) -> Self {
let data = None; let data = None;
Event { hash, n, data } Event {
end_hash,
num_hashes,
data,
}
} }
/// Creates an Event from by hashing 'seed' 'n' times. /// Creates an Event from by hashing 'start_hash' 'num_hashes' times.
/// ///
/// ``` /// ```
/// use loomination::event::Event; /// use loomination::event::Event;
/// assert_eq!(Event::run(0, 1).n, 1) /// assert_eq!(Event::run(0, 1).num_hashes, 1)
/// ``` /// ```
pub fn run(seed: u64, n: u64) -> Self { pub fn run(start_hash: u64, num_hashes: u64) -> Self {
use std::collections::hash_map::DefaultHasher; use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher}; use std::hash::{Hash, Hasher};
let mut hash = seed; let mut hash = start_hash;
let mut hasher = DefaultHasher::new(); let mut hasher = DefaultHasher::new();
for _ in 0..n { for _ in 0..num_hashes {
hash.hash(&mut hasher); hash.hash(&mut hasher);
hash = hasher.finish(); hash = hasher.finish();
} }
Self::new(hash, n) Self::new(hash, num_hashes)
} }
/// Verifies self.hash is the result of hashing a 'seed' 'self.n' times. /// Verifies self.end_hash is the result of hashing a 'start_hash' 'self.num_hashes' times.
/// ///
/// ``` /// ```
/// use loomination::event::Event; /// use loomination::event::Event;
@ -58,8 +62,8 @@ impl Event {
/// assert!(Event::run(0, 1).verify(0)); // inductive case /// assert!(Event::run(0, 1).verify(0)); // inductive case
/// assert!(!Event::run(0, 1).verify(1)); // inductive case, bad /// assert!(!Event::run(0, 1).verify(1)); // inductive case, bad
/// ``` /// ```
pub fn verify(self: &Self, seed: u64) -> bool { pub fn verify(self: &Self, start_hash: u64) -> bool {
self.hash == Self::run(seed, self.n).hash self.end_hash == Self::run(start_hash, self.num_hashes).end_hash
} }
} }
@ -73,26 +77,26 @@ impl Event {
/// assert!(verify_slice(&vec![Event::run(0, 0), Event::run(0, 0)], 0)); // lazy inductive case /// assert!(verify_slice(&vec![Event::run(0, 0), Event::run(0, 0)], 0)); // lazy inductive case
/// assert!(!verify_slice(&vec![Event::run(0, 0), Event::run(1, 0)], 0)); // lazy inductive case, bad /// assert!(!verify_slice(&vec![Event::run(0, 0), Event::run(1, 0)], 0)); // lazy inductive case, bad
/// ``` /// ```
pub fn verify_slice(events: &[Event], seed: u64) -> bool { pub fn verify_slice(events: &[Event], start_hash: u64) -> bool {
use rayon::prelude::*; use rayon::prelude::*;
let genesis = [Event::run(seed, 0)]; let genesis = [Event::run(start_hash, 0)];
let event_pairs = genesis.par_iter().chain(events).zip(events); let event_pairs = genesis.par_iter().chain(events).zip(events);
event_pairs.all(|(x, x1)| x1.verify(x.hash)) event_pairs.all(|(x0, x1)| x1.verify(x0.end_hash))
} }
/// Verifies the hashes and events serially. Exists only for reference. /// Verifies the hashes and events serially. Exists only for reference.
pub fn verify_slice_seq(events: &[Event], seed: u64) -> bool { pub fn verify_slice_seq(events: &[Event], start_hash: u64) -> bool {
let genesis = [Event::run(seed, 0)]; let genesis = [Event::run(start_hash, 0)];
let event_pairs = genesis.iter().chain(events).zip(events); let event_pairs = genesis.iter().chain(events).zip(events);
event_pairs.into_iter().all(|(x, x1)| x1.verify(x.hash)) event_pairs.into_iter().all(|(x, x1)| x1.verify(x.end_hash))
} }
/// Create a vector of Ticks of length 'len' from 'seed' hash and 'hashes_since_prev'. /// Create a vector of Ticks of length 'len' from 'start_hash' hash and 'hashes_since_prev'.
pub fn create_events(seed: u64, hashes_since_prev: u64, len: usize) -> Vec<Event> { pub fn create_events(start_hash: u64, hashes_since_prev: u64, len: usize) -> Vec<Event> {
use itertools::unfold; use itertools::unfold;
let mut events = unfold(seed, |state| { let mut events = unfold(start_hash, |state| {
let event = Event::run(*state, hashes_since_prev); let event = Event::run(*state, hashes_since_prev);
*state = event.hash; *state = event.end_hash;
return Some(event); return Some(event);
}); });
events.by_ref().take(len).collect() events.by_ref().take(len).collect()
@ -106,10 +110,10 @@ mod bench {
#[bench] #[bench]
fn event_bench(bencher: &mut Bencher) { fn event_bench(bencher: &mut Bencher) {
let seed = 0; let start_hash = 0;
let events = event::create_events(seed, 100_000, 4); let events = event::create_events(start_hash, 100_000, 4);
bencher.iter(|| { bencher.iter(|| {
assert!(event::verify_slice(&events, seed)); assert!(event::verify_slice(&events, start_hash));
}); });
} }
} }