solana/src/entry.rs

365 lines
12 KiB
Rust
Raw Normal View History

2018-03-30 10:43:38 -07:00
//! The `entry` module is a fundamental building block of Proof of History. It contains a
//! unique ID that is the hash of the Entry before it, plus the hash of the
//! transactions within it. Entries cannot be reordered, and its field `num_hashes`
2018-03-29 11:20:54 -07:00
//! represents an approximate amount of time since the last Entry was created.
2018-12-07 19:16:27 -08:00
use crate::packet::{SharedBlob, BLOB_DATA_SIZE};
use crate::poh::Poh;
use crate::result::Result;
use bincode::{deserialize, serialize_into, serialized_size};
2018-11-16 08:04:46 -08:00
use solana_sdk::hash::Hash;
2018-11-29 16:18:47 -08:00
use solana_sdk::transaction::Transaction;
2018-08-06 12:35:38 -07:00
use std::io::Cursor;
use std::mem::size_of;
use std::sync::mpsc::{Receiver, Sender};
pub type EntrySender = Sender<Vec<Entry>>;
pub type EntryReceiver = Receiver<Vec<Entry>>;
2018-03-29 11:20:54 -07:00
/// Each Entry contains three pieces of data. The `num_hashes` field is the number
/// of hashes performed since the previous entry. The `id` field is the result
2018-05-25 14:51:41 -07:00
/// of hashing `id` from the previous entry `num_hashes` times. The `transactions`
2018-06-06 10:19:56 -07:00
/// field points to Transactions that took place shortly before `id` was generated.
2018-03-29 11:20:54 -07:00
///
/// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you
/// get a duration estimate since the last Entry. Since processing power increases
/// over time, one should expect the duration `num_hashes` represents to decrease proportionally.
2018-06-06 10:19:56 -07:00
/// An upper bound on Duration can be estimated by assuming each hash was generated by the
/// world's fastest processor at the time the entry was recorded. Or said another way, it
/// is physically not possible for a shorter duration to have occurred if one assumes the
/// hash was computed by the world's fastest processor at that time. The hash chain is both
2018-08-06 23:36:09 -07:00
/// a Verifiable Delay Function (VDF) and a Proof of Work (not to be confused with Proof of
2018-06-06 10:19:56 -07:00
/// Work consensus!)
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Entry {
/// tick height of the ledger, not including any tick implied by this Entry
pub tick_height: u64,
2018-06-06 10:19:56 -07:00
/// The number of hashes since the previous Entry ID.
pub num_hashes: u64,
2018-06-06 10:19:56 -07:00
/// The SHA-256 hash `num_hashes` after the previous Entry ID.
pub id: Hash,
2018-06-06 10:19:56 -07:00
/// An unordered list of transactions that were observed before the Entry ID was
2018-08-06 23:55:00 -07:00
/// generated. They may have been observed before a previous Entry ID but were
2018-06-06 10:19:56 -07:00
/// pushed back into this list to ensure deterministic interpretation of the ledger.
2018-05-25 14:51:41 -07:00
pub transactions: Vec<Transaction>,
}
impl Entry {
2018-05-16 16:49:58 -07:00
/// Creates the next Entry `num_hashes` after `start_hash`.
pub fn new(
prev_id: &Hash,
tick_height: u64,
num_hashes: u64,
transactions: Vec<Transaction>,
) -> Self {
let entry = {
if num_hashes == 0 && transactions.is_empty() {
Entry {
tick_height,
num_hashes: 0,
id: *prev_id,
transactions,
}
} else if num_hashes == 0 {
// If you passed in transactions, but passed in num_hashes == 0, then
// next_hash will generate the next hash and set num_hashes == 1
let id = next_hash(prev_id, 1, &transactions);
Entry {
tick_height,
num_hashes: 1,
id,
transactions,
}
} else {
// Otherwise, the next Entry `num_hashes` after `start_hash`.
// If you wanted a tick for instance, then pass in num_hashes = 1
// and transactions = empty
let id = next_hash(prev_id, num_hashes, &transactions);
Entry {
tick_height,
num_hashes,
id,
transactions,
}
}
};
2018-07-24 11:14:33 -07:00
let size = serialized_size(&entry).unwrap();
if size > BLOB_DATA_SIZE as u64 {
panic!(
"Serialized entry size too large: {} ({} transactions):",
size,
entry.transactions.len()
);
}
entry
2018-05-16 16:49:58 -07:00
}
pub fn to_blob(&self) -> SharedBlob {
let blob = SharedBlob::default();
2018-08-06 12:35:38 -07:00
{
let mut blob_w = blob.write().unwrap();
2018-08-06 12:35:38 -07:00
let pos = {
let mut out = Cursor::new(blob_w.data_mut());
serialize_into(&mut out, &self).expect("failed to serialize output");
out.position() as usize
};
blob_w.set_size(pos);
}
blob
}
/// Estimate serialized_size of Entry without creating an Entry.
pub fn serialized_size(transactions: &[Transaction]) -> u64 {
let txs_size = serialized_size(transactions).unwrap();
2018-12-13 09:24:38 -08:00
// tick_height+num_hashes + id + txs
2018-12-13 09:24:38 -08:00
(2 * size_of::<u64>() + size_of::<Hash>()) as u64 + txs_size
2018-06-26 12:35:52 -07:00
}
2018-09-24 12:26:47 -07:00
pub fn num_will_fit(transactions: &[Transaction]) -> usize {
if transactions.is_empty() {
return 0;
}
let mut num = transactions.len();
let mut upper = transactions.len();
let mut lower = 1; // if one won't fit, we have a lot of TODOs
let mut next = transactions.len(); // optimistic
loop {
debug!(
"num {}, upper {} lower {} next {} transactions.len() {}",
num,
upper,
lower,
next,
transactions.len()
);
if Self::serialized_size(&transactions[..num]) <= BLOB_DATA_SIZE as u64 {
next = (upper + num) / 2;
lower = num;
debug!("num {} fits, maybe too well? trying {}", num, next);
} else {
next = (lower + num) / 2;
upper = num;
debug!("num {} doesn't fit! trying {}", num, next);
}
// same as last time
if next == num {
debug!("converged on num {}", num);
break;
}
num = next;
}
num
}
2018-05-16 16:49:58 -07:00
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
2018-05-25 14:51:41 -07:00
pub fn new_mut(
start_hash: &mut Hash,
2018-08-09 07:31:00 -07:00
num_hashes: &mut u64,
2018-05-25 14:51:41 -07:00
transactions: Vec<Transaction>,
) -> Self {
let entry = Self::new(start_hash, 0, *num_hashes, transactions);
2018-05-16 16:49:58 -07:00
*start_hash = entry.id;
2018-08-09 07:31:00 -07:00
*num_hashes = 0;
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
2018-05-16 16:49:58 -07:00
entry
}
/// Creates a Entry from the number of hashes `num_hashes`
/// since the previous transaction and that resulting `id`.
#[cfg(test)]
2018-12-13 09:24:38 -08:00
pub fn new_tick(tick_height: u64, num_hashes: u64, id: &Hash) -> Self {
Entry {
tick_height,
num_hashes,
id: *id,
2018-05-25 14:51:41 -07:00
transactions: vec![],
}
}
2018-03-22 13:40:28 -07:00
/// Verifies self.id is the result of hashing a `start_hash` `self.num_hashes` times.
2018-05-25 14:51:41 -07:00
/// If the transaction is not a Tick, then hash that as well.
pub fn verify(&self, start_hash: &Hash) -> bool {
2018-07-23 16:13:24 -07:00
let ref_hash = next_hash(start_hash, self.num_hashes, &self.transactions);
if self.id != ref_hash {
warn!(
2018-07-23 16:13:24 -07:00
"next_hash is invalid expected: {:?} actual: {:?}",
self.id, ref_hash
);
return false;
}
true
}
pub fn is_tick(&self) -> bool {
self.transactions.is_empty()
}
}
2018-05-25 14:51:41 -07:00
/// Creates the hash `num_hashes` after `start_hash`. If the transaction contains
/// a signature, the final hash will be a hash of both the previous ID and
2018-06-18 13:23:15 -07:00
/// the signature. If num_hashes is zero and there's no transaction data,
2018-06-21 16:09:17 -07:00
/// start_hash is returned.
2018-06-18 13:23:15 -07:00
fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -> Hash {
2018-09-24 12:26:47 -07:00
if num_hashes == 0 && transactions.is_empty() {
return *start_hash;
}
let mut poh = Poh::new(*start_hash, 0);
for _ in 1..num_hashes {
poh.hash();
}
if transactions.is_empty() {
poh.tick().id
} else {
poh.record(Transaction::hash(transactions)).id
}
}
pub fn reconstruct_entries_from_blobs(blobs: Vec<SharedBlob>) -> Result<(Vec<Entry>, u64)> {
let mut entries: Vec<Entry> = Vec::with_capacity(blobs.len());
let mut num_ticks = 0;
for blob in blobs {
let entry: Entry = {
let msg = blob.read().unwrap();
let msg_size = msg.size()?;
deserialize(&msg.data()[..msg_size]).expect("Error reconstructing entry")
};
if entry.is_tick() {
num_ticks += 1
}
entries.push(entry)
}
Ok((entries, num_ticks))
}
#[cfg(test)]
/// Creates the next Tick or Transaction Entry `num_hashes` after `start_hash`.
pub fn next_entry(prev_id: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
assert!(num_hashes > 0 || transactions.is_empty());
Entry {
tick_height: 0,
num_hashes,
id: next_hash(prev_id, num_hashes, &transactions),
transactions,
}
}
#[cfg(test)]
mod tests {
use super::*;
2018-12-07 19:16:27 -08:00
use crate::entry::Entry;
use chrono::prelude::*;
2018-12-04 14:38:19 -08:00
use solana_sdk::budget_transaction::BudgetTransaction;
2018-11-16 08:04:46 -08:00
use solana_sdk::hash::hash;
2018-12-03 10:26:28 -08:00
use solana_sdk::signature::{Keypair, KeypairUtil};
2018-12-04 15:37:11 -08:00
use solana_sdk::system_transaction::SystemTransaction;
2018-12-04 20:19:48 -08:00
use solana_sdk::transaction::Transaction;
#[test]
2018-03-09 16:02:17 -08:00
fn test_entry_verify() {
let zero = Hash::default();
2018-08-01 11:23:52 -07:00
let one = hash(&zero.as_ref());
2018-12-13 09:24:38 -08:00
assert!(Entry::new_tick(0, 0, &zero).verify(&zero)); // base case, never used
assert!(!Entry::new_tick(1, 0, &zero).verify(&one)); // base case, bad
assert!(next_entry(&zero, 1, vec![]).verify(&zero)); // inductive step
assert!(!next_entry(&zero, 1, vec![]).verify(&one)); // inductive step, bad
}
2018-03-09 16:02:17 -08:00
#[test]
2018-05-25 14:51:41 -07:00
fn test_transaction_reorder_attack() {
2018-03-09 16:02:17 -08:00
let zero = Hash::default();
// First, verify entries
2018-08-09 07:56:04 -07:00
let keypair = Keypair::new();
let tx0 = Transaction::system_new(&keypair, keypair.pubkey(), 0, zero);
let tx1 = Transaction::system_new(&keypair, keypair.pubkey(), 1, zero);
let mut e0 = Entry::new(&zero, 0, 0, vec![tx0.clone(), tx1.clone()]);
2018-03-09 16:02:17 -08:00
assert!(e0.verify(&zero));
2018-05-25 14:51:41 -07:00
// Next, swap two transactions and ensure verification fails.
2018-05-29 09:12:27 -07:00
e0.transactions[0] = tx1; // <-- attack
e0.transactions[1] = tx0;
2018-03-09 16:02:17 -08:00
assert!(!e0.verify(&zero));
}
#[test]
fn test_witness_reorder_attack() {
let zero = Hash::default();
// First, verify entries
2018-08-09 07:56:04 -07:00
let keypair = Keypair::new();
let tx0 = Transaction::budget_new_timestamp(
&keypair,
keypair.pubkey(),
keypair.pubkey(),
Utc::now(),
zero,
);
2018-09-18 18:45:44 -07:00
let tx1 =
Transaction::budget_new_signature(&keypair, keypair.pubkey(), keypair.pubkey(), zero);
let mut e0 = Entry::new(&zero, 0, 0, vec![tx0.clone(), tx1.clone()]);
assert!(e0.verify(&zero));
2018-05-25 14:51:41 -07:00
// Next, swap two witness transactions and ensure verification fails.
2018-05-29 09:12:27 -07:00
e0.transactions[0] = tx1; // <-- attack
e0.transactions[1] = tx0;
assert!(!e0.verify(&zero));
}
#[test]
fn test_next_entry() {
let zero = Hash::default();
2018-05-11 09:34:46 -07:00
let tick = next_entry(&zero, 1, vec![]);
assert_eq!(tick.num_hashes, 1);
assert_ne!(tick.id, zero);
2018-06-18 13:23:15 -07:00
let tick = next_entry(&zero, 0, vec![]);
assert_eq!(tick.num_hashes, 0);
assert_eq!(tick.id, zero);
2018-08-09 07:56:04 -07:00
let keypair = Keypair::new();
let tx0 = Transaction::budget_new_timestamp(
&keypair,
keypair.pubkey(),
keypair.pubkey(),
Utc::now(),
zero,
);
let entry0 = next_entry(&zero, 1, vec![tx0.clone()]);
assert_eq!(entry0.num_hashes, 1);
assert_eq!(entry0.id, next_hash(&zero, 1, &vec![tx0]));
}
#[test]
#[should_panic]
fn test_next_entry_panic() {
let zero = Hash::default();
2018-08-09 07:56:04 -07:00
let keypair = Keypair::new();
let tx = Transaction::system_new(&keypair, keypair.pubkey(), 0, zero);
next_entry(&zero, 0, vec![tx]);
}
#[test]
fn test_serialized_size() {
let zero = Hash::default();
let keypair = Keypair::new();
let tx = Transaction::system_new(&keypair, keypair.pubkey(), 0, zero);
let entry = next_entry(&zero, 1, vec![tx.clone()]);
assert_eq!(
Entry::serialized_size(&[tx]),
serialized_size(&entry).unwrap()
);
}
}