//! The `entry` module is a fundamental building block of Proof of History. It contains a //! unique ID that is the hash of the Entry before it, plus the hash of the //! transactions within it. Entries cannot be reordered, and its field `num_hashes` //! represents an approximate amount of time since the last Entry was created. use hash::{extend_and_hash, hash, Hash}; use rayon::prelude::*; use transaction::Transaction; /// Each Entry contains three pieces of data. The `num_hashes` field is the number /// of hashes performed since the previous entry. The `id` field is the result /// of hashing `id` from the previous entry `num_hashes` times. The `transactions` /// field points to Transactions that took place shortly before `id` was generated. /// /// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you /// get a duration estimate since the last Entry. Since processing power increases /// over time, one should expect the duration `num_hashes` represents to decrease proportionally. /// An upper bound on Duration can be estimated by assuming each hash was generated by the /// world's fastest processor at the time the entry was recorded. Or said another way, it /// is physically not possible for a shorter duration to have occurred if one assumes the /// hash was computed by the world's fastest processor at that time. The hash chain is both /// a Verifiable Delay Function (VDF) and a Proof of Work (not to be confused with Proof or /// Work consensus!) #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] pub struct Entry { /// The number of hashes since the previous Entry ID. pub num_hashes: u64, /// The SHA-256 hash `num_hashes` after the previous Entry ID. pub id: Hash, /// An unordered list of transactions that were observed before the Entry ID was /// generated. The may have been observed before a previous Entry ID but were /// pushed back into this list to ensure deterministic interpretation of the ledger. pub transactions: Vec, } impl Entry { /// Creates the next Entry `num_hashes` after `start_hash`. pub fn new(start_hash: &Hash, cur_hashes: u64, transactions: Vec) -> Self { let num_hashes = cur_hashes + if transactions.is_empty() { 0 } else { 1 }; let id = next_hash(start_hash, 0, &transactions); Entry { num_hashes, id, transactions, } } /// Creates the next Tick Entry `num_hashes` after `start_hash`. pub fn new_mut( start_hash: &mut Hash, cur_hashes: &mut u64, transactions: Vec, ) -> Self { let entry = Self::new(start_hash, *cur_hashes, transactions); *start_hash = entry.id; *cur_hashes = 0; entry } /// Creates a Entry from the number of hashes `num_hashes` since the previous transaction /// and that resulting `id`. pub fn new_tick(num_hashes: u64, id: &Hash) -> Self { Entry { num_hashes, id: *id, transactions: vec![], } } /// Verifies self.id is the result of hashing a `start_hash` `self.num_hashes` times. /// If the transaction is not a Tick, then hash that as well. pub fn verify(&self, start_hash: &Hash) -> bool { self.transactions.par_iter().all(|tx| tx.verify_plan()) && self.id == next_hash(start_hash, self.num_hashes, &self.transactions) } } fn add_transaction_data(hash_data: &mut Vec, tx: &Transaction) { hash_data.push(0u8); hash_data.extend_from_slice(&tx.sig); } /// Creates the hash `num_hashes` after `start_hash`. If the transaction contains /// a signature, the final hash will be a hash of both the previous ID and /// the signature. pub fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -> Hash { let mut id = *start_hash; for _ in 1..num_hashes { id = hash(&id); } // Hash all the transaction data let mut hash_data = vec![]; for tx in transactions { add_transaction_data(&mut hash_data, tx); } if !hash_data.is_empty() { extend_and_hash(&id, &hash_data) } else if num_hashes != 0 { hash(&id) } else { id } } /// Creates the next Tick or Transaction Entry `num_hashes` after `start_hash`. pub fn next_entry(start_hash: &Hash, num_hashes: u64, transactions: Vec) -> Entry { Entry { num_hashes, id: next_hash(start_hash, num_hashes, &transactions), transactions, } } #[cfg(test)] mod tests { use super::*; use chrono::prelude::*; use entry::Entry; use hash::hash; use signature::{KeyPair, KeyPairUtil}; use transaction::Transaction; #[test] fn test_entry_verify() { let zero = Hash::default(); let one = hash(&zero); assert!(Entry::new_tick(0, &zero).verify(&zero)); // base case assert!(!Entry::new_tick(0, &zero).verify(&one)); // base case, bad assert!(next_entry(&zero, 1, vec![]).verify(&zero)); // inductive step assert!(!next_entry(&zero, 1, vec![]).verify(&one)); // inductive step, bad } #[test] fn test_transaction_reorder_attack() { let zero = Hash::default(); // First, verify entries let keypair = KeyPair::new(); let tx0 = Transaction::new(&keypair, keypair.pubkey(), 0, zero); let tx1 = Transaction::new(&keypair, keypair.pubkey(), 1, zero); let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]); assert!(e0.verify(&zero)); // Next, swap two transactions and ensure verification fails. e0.transactions[0] = tx1; // <-- attack e0.transactions[1] = tx0; assert!(!e0.verify(&zero)); } #[test] fn test_witness_reorder_attack() { let zero = Hash::default(); // First, verify entries let keypair = KeyPair::new(); let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero); let tx1 = Transaction::new_signature(&keypair, Default::default(), zero); let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]); assert!(e0.verify(&zero)); // Next, swap two witness transactions and ensure verification fails. e0.transactions[0] = tx1; // <-- attack e0.transactions[1] = tx0; assert!(!e0.verify(&zero)); } #[test] fn test_next_entry() { let zero = Hash::default(); let tick = next_entry(&zero, 1, vec![]); assert_eq!(tick.num_hashes, 1); assert_ne!(tick.id, zero); } }