//! The `entry` module is a fundamental building block of Proof of History. It contains a //! unique ID that is the hash of the Entry before it, plus the hash of the //! transactions within it. Entries cannot be reordered, and its field `num_hashes` //! represents an approximate amount of time since the last Entry was created. use crate::packet::{SharedBlob, BLOB_DATA_SIZE}; use crate::poh::Poh; use crate::result::Result; use bincode::{deserialize, serialize_into, serialized_size}; use solana_sdk::hash::Hash; use solana_sdk::pubkey::Pubkey; use solana_sdk::transaction::Transaction; use std::io::Cursor; use std::mem::size_of; use std::net::SocketAddr; use std::sync::mpsc::{Receiver, Sender}; pub type EntrySender = Sender>; pub type EntryReceiver = Receiver>; /// Each Entry contains three pieces of data. The `num_hashes` field is the number /// of hashes performed since the previous entry. The `id` field is the result /// of hashing `id` from the previous entry `num_hashes` times. The `transactions` /// field points to Transactions that took place shortly before `id` was generated. /// /// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you /// get a duration estimate since the last Entry. Since processing power increases /// over time, one should expect the duration `num_hashes` represents to decrease proportionally. /// An upper bound on Duration can be estimated by assuming each hash was generated by the /// world's fastest processor at the time the entry was recorded. Or said another way, it /// is physically not possible for a shorter duration to have occurred if one assumes the /// hash was computed by the world's fastest processor at that time. The hash chain is both /// a Verifiable Delay Function (VDF) and a Proof of Work (not to be confused with Proof of /// Work consensus!) #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] pub struct Entry { /// The the previous Entry ID. pub prev_id: Hash, /// tick height of the ledger, not including any tick implied by this Entry pub tick_height: u64, /// The number of hashes since the previous Entry ID. pub num_hashes: u64, /// The SHA-256 hash `num_hashes` after the previous Entry ID. pub id: Hash, /// An unordered list of transactions that were observed before the Entry ID was /// generated. They may have been observed before a previous Entry ID but were /// pushed back into this list to ensure deterministic interpretation of the ledger. pub transactions: Vec, } impl Entry { /// Creates the next Entry `num_hashes` after `start_hash`. pub fn new( prev_id: &Hash, tick_height: u64, num_hashes: u64, transactions: Vec, ) -> Self { let entry = { if num_hashes == 0 && transactions.is_empty() { Entry { prev_id: *prev_id, tick_height, num_hashes: 0, id: *prev_id, transactions, } } else if num_hashes == 0 { // If you passed in transactions, but passed in num_hashes == 0, then // next_hash will generate the next hash and set num_hashes == 1 let id = next_hash(prev_id, 1, &transactions); Entry { prev_id: *prev_id, tick_height, num_hashes: 1, id, transactions, } } else { // Otherwise, the next Entry `num_hashes` after `start_hash`. // If you wanted a tick for instance, then pass in num_hashes = 1 // and transactions = empty let id = next_hash(prev_id, num_hashes, &transactions); Entry { prev_id: *prev_id, tick_height, num_hashes, id, transactions, } } }; let size = serialized_size(&entry).unwrap(); if size > BLOB_DATA_SIZE as u64 { panic!( "Serialized entry size too large: {} ({} transactions):", size, entry.transactions.len() ); } entry } pub fn to_blob( &self, idx: Option, id: Option, addr: Option<&SocketAddr>, ) -> SharedBlob { let blob = SharedBlob::default(); { let mut blob_w = blob.write().unwrap(); let pos = { let mut out = Cursor::new(blob_w.data_mut()); serialize_into(&mut out, &self).expect("failed to serialize output"); out.position() as usize }; blob_w.set_size(pos); if let Some(idx) = idx { blob_w.set_index(idx).expect("set_index()"); } if let Some(id) = id { blob_w.set_id(&id).expect("set_id()"); } if let Some(addr) = addr { blob_w.meta.set_addr(addr); } blob_w.set_flags(0).unwrap(); } blob } /// Estimate serialized_size of Entry without creating an Entry. pub fn serialized_size(transactions: &[Transaction]) -> u64 { let txs_size = serialized_size(transactions).unwrap(); // tick_height+num_hashes + id+prev_id + txs (2 * size_of::() + 2 * size_of::()) as u64 + txs_size } pub fn num_will_fit(transactions: &[Transaction]) -> usize { if transactions.is_empty() { return 0; } let mut num = transactions.len(); let mut upper = transactions.len(); let mut lower = 1; // if one won't fit, we have a lot of TODOs let mut next = transactions.len(); // optimistic loop { debug!( "num {}, upper {} lower {} next {} transactions.len() {}", num, upper, lower, next, transactions.len() ); if Self::serialized_size(&transactions[..num]) <= BLOB_DATA_SIZE as u64 { next = (upper + num) / 2; lower = num; debug!("num {} fits, maybe too well? trying {}", num, next); } else { next = (lower + num) / 2; upper = num; debug!("num {} doesn't fit! trying {}", num, next); } // same as last time if next == num { debug!("converged on num {}", num); break; } num = next; } num } /// Creates the next Tick Entry `num_hashes` after `start_hash`. pub fn new_mut( start_hash: &mut Hash, num_hashes: &mut u64, transactions: Vec, ) -> Self { let entry = Self::new(start_hash, 0, *num_hashes, transactions); *start_hash = entry.id; *num_hashes = 0; assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64); entry } /// Creates a Entry from the number of hashes `num_hashes` /// since the previous transaction and that resulting `id`. #[cfg(test)] pub fn new_tick(prev_id: &Hash, tick_height: u64, num_hashes: u64, id: &Hash) -> Self { Entry { prev_id: *prev_id, tick_height, num_hashes, id: *id, transactions: vec![], } } pub fn verify_self(&self) -> bool { self.id == next_hash(&self.prev_id, self.num_hashes, &self.transactions) } /// Verifies self.id is the result of hashing a `start_hash` `self.num_hashes` times. /// If the transaction is not a Tick, then hash that as well. pub fn verify(&self, start_hash: &Hash) -> bool { let ref_hash = next_hash(start_hash, self.num_hashes, &self.transactions); if self.id != ref_hash { warn!( "next_hash is invalid expected: {:?} actual: {:?}", self.id, ref_hash ); return false; } true } pub fn is_tick(&self) -> bool { self.transactions.is_empty() } } /// Creates the hash `num_hashes` after `start_hash`. If the transaction contains /// a signature, the final hash will be a hash of both the previous ID and /// the signature. If num_hashes is zero and there's no transaction data, /// start_hash is returned. fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -> Hash { if num_hashes == 0 && transactions.is_empty() { return *start_hash; } let mut poh = Poh::new(*start_hash, 0); for _ in 1..num_hashes { poh.hash(); } if transactions.is_empty() { poh.tick().id } else { poh.record(Transaction::hash(transactions)).id } } pub fn reconstruct_entries_from_blobs(blobs: Vec) -> Result<(Vec, u64)> { let mut entries: Vec = Vec::with_capacity(blobs.len()); let mut num_ticks = 0; for blob in blobs { let entry: Entry = { let msg = blob.read().unwrap(); let msg_size = msg.size()?; deserialize(&msg.data()[..msg_size]).expect("Error reconstructing entry") }; if entry.is_tick() { num_ticks += 1 } entries.push(entry) } Ok((entries, num_ticks)) } #[cfg(test)] /// Creates the next Tick or Transaction Entry `num_hashes` after `start_hash`. pub fn next_entry(prev_id: &Hash, num_hashes: u64, transactions: Vec) -> Entry { assert!(num_hashes > 0 || transactions.is_empty()); Entry { prev_id: *prev_id, tick_height: 0, num_hashes, id: next_hash(prev_id, num_hashes, &transactions), transactions, } } #[cfg(test)] mod tests { use super::*; use crate::entry::Entry; use chrono::prelude::*; use solana_sdk::budget_transaction::BudgetTransaction; use solana_sdk::hash::hash; use solana_sdk::signature::{Keypair, KeypairUtil}; use solana_sdk::system_transaction::SystemTransaction; use solana_sdk::transaction::Transaction; #[test] fn test_entry_verify() { let zero = Hash::default(); let one = hash(&zero.as_ref()); assert!(Entry::new_tick(&zero, 0, 0, &zero).verify(&zero)); // base case, never used assert!(!Entry::new_tick(&zero, 1, 0, &zero).verify(&one)); // base case, bad assert!(next_entry(&zero, 1, vec![]).verify(&zero)); // inductive step assert!(next_entry(&zero, 1, vec![]).verify_self()); // also inductive step assert!(!next_entry(&zero, 1, vec![]).verify(&one)); // inductive step, bad } #[test] fn test_transaction_reorder_attack() { let zero = Hash::default(); // First, verify entries let keypair = Keypair::new(); let tx0 = Transaction::system_new(&keypair, keypair.pubkey(), 0, zero); let tx1 = Transaction::system_new(&keypair, keypair.pubkey(), 1, zero); let mut e0 = Entry::new(&zero, 0, 0, vec![tx0.clone(), tx1.clone()]); assert!(e0.verify(&zero)); // Next, swap two transactions and ensure verification fails. e0.transactions[0] = tx1; // <-- attack e0.transactions[1] = tx0; assert!(!e0.verify(&zero)); } #[test] fn test_witness_reorder_attack() { let zero = Hash::default(); // First, verify entries let keypair = Keypair::new(); let tx0 = Transaction::budget_new_timestamp( &keypair, keypair.pubkey(), keypair.pubkey(), Utc::now(), zero, ); let tx1 = Transaction::budget_new_signature(&keypair, keypair.pubkey(), keypair.pubkey(), zero); let mut e0 = Entry::new(&zero, 0, 0, vec![tx0.clone(), tx1.clone()]); assert!(e0.verify(&zero)); // Next, swap two witness transactions and ensure verification fails. e0.transactions[0] = tx1; // <-- attack e0.transactions[1] = tx0; assert!(!e0.verify(&zero)); } #[test] fn test_next_entry() { let zero = Hash::default(); let tick = next_entry(&zero, 1, vec![]); assert_eq!(tick.num_hashes, 1); assert_ne!(tick.id, zero); let tick = next_entry(&zero, 0, vec![]); assert_eq!(tick.num_hashes, 0); assert_eq!(tick.id, zero); let keypair = Keypair::new(); let tx0 = Transaction::budget_new_timestamp( &keypair, keypair.pubkey(), keypair.pubkey(), Utc::now(), zero, ); let entry0 = next_entry(&zero, 1, vec![tx0.clone()]); assert_eq!(entry0.num_hashes, 1); assert_eq!(entry0.id, next_hash(&zero, 1, &vec![tx0])); } #[test] #[should_panic] fn test_next_entry_panic() { let zero = Hash::default(); let keypair = Keypair::new(); let tx = Transaction::system_new(&keypair, keypair.pubkey(), 0, zero); next_entry(&zero, 0, vec![tx]); } #[test] fn test_serialized_size() { let zero = Hash::default(); let keypair = Keypair::new(); let tx = Transaction::system_new(&keypair, keypair.pubkey(), 0, zero); let entry = next_entry(&zero, 1, vec![tx.clone()]); assert_eq!( Entry::serialized_size(&[tx]), serialized_size(&entry).unwrap() ); } }