2018-03-30 10:43:38 -07:00
|
|
|
//! The `entry` module is a fundamental building block of Proof of History. It contains a
|
|
|
|
//! unique ID that is the hash of the Entry before it, plus the hash of the
|
|
|
|
//! transactions within it. Entries cannot be reordered, and its field `num_hashes`
|
2018-03-29 11:20:54 -07:00
|
|
|
//! represents an approximate amount of time since the last Entry was created.
|
2018-08-06 12:35:38 -07:00
|
|
|
use bincode::{serialize_into, serialized_size};
|
2018-09-21 21:01:13 -07:00
|
|
|
use hash::Hash;
|
2018-09-26 09:50:12 -07:00
|
|
|
use packet::{SharedBlob, BLOB_DATA_SIZE};
|
2018-09-21 21:01:13 -07:00
|
|
|
use poh::Poh;
|
2018-10-25 11:13:08 -07:00
|
|
|
use solana_sdk::pubkey::Pubkey;
|
2018-08-06 12:35:38 -07:00
|
|
|
use std::io::Cursor;
|
2018-10-12 20:16:51 -07:00
|
|
|
use std::mem::size_of;
|
2018-08-07 23:22:45 -07:00
|
|
|
use std::net::SocketAddr;
|
2018-09-21 16:01:24 -07:00
|
|
|
use std::sync::mpsc::{Receiver, Sender};
|
2018-05-23 23:29:01 -07:00
|
|
|
use transaction::Transaction;
|
2018-03-06 16:31:17 -08:00
|
|
|
|
2018-09-21 16:01:24 -07:00
|
|
|
pub type EntrySender = Sender<Vec<Entry>>;
|
|
|
|
pub type EntryReceiver = Receiver<Vec<Entry>>;
|
|
|
|
|
2018-03-29 11:20:54 -07:00
|
|
|
/// Each Entry contains three pieces of data. The `num_hashes` field is the number
|
|
|
|
/// of hashes performed since the previous entry. The `id` field is the result
|
2018-05-25 14:51:41 -07:00
|
|
|
/// of hashing `id` from the previous entry `num_hashes` times. The `transactions`
|
2018-06-06 10:19:56 -07:00
|
|
|
/// field points to Transactions that took place shortly before `id` was generated.
|
2018-03-29 11:20:54 -07:00
|
|
|
///
|
|
|
|
/// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you
|
|
|
|
/// get a duration estimate since the last Entry. Since processing power increases
|
|
|
|
/// over time, one should expect the duration `num_hashes` represents to decrease proportionally.
|
2018-06-06 10:19:56 -07:00
|
|
|
/// An upper bound on Duration can be estimated by assuming each hash was generated by the
|
|
|
|
/// world's fastest processor at the time the entry was recorded. Or said another way, it
|
|
|
|
/// is physically not possible for a shorter duration to have occurred if one assumes the
|
|
|
|
/// hash was computed by the world's fastest processor at that time. The hash chain is both
|
2018-08-06 23:36:09 -07:00
|
|
|
/// a Verifiable Delay Function (VDF) and a Proof of Work (not to be confused with Proof of
|
2018-06-06 10:19:56 -07:00
|
|
|
/// Work consensus!)
|
|
|
|
|
2018-03-06 16:31:17 -08:00
|
|
|
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
2018-03-06 19:22:30 -08:00
|
|
|
pub struct Entry {
|
2018-11-12 17:03:23 -08:00
|
|
|
/// The the previous Entry ID.
|
|
|
|
pub prev_id: Hash,
|
|
|
|
|
2018-06-06 10:19:56 -07:00
|
|
|
/// The number of hashes since the previous Entry ID.
|
2018-03-06 16:31:17 -08:00
|
|
|
pub num_hashes: u64,
|
2018-06-06 10:19:56 -07:00
|
|
|
|
|
|
|
/// The SHA-256 hash `num_hashes` after the previous Entry ID.
|
2018-03-06 16:36:45 -08:00
|
|
|
pub id: Hash,
|
2018-06-06 10:19:56 -07:00
|
|
|
|
|
|
|
/// An unordered list of transactions that were observed before the Entry ID was
|
2018-08-06 23:55:00 -07:00
|
|
|
/// generated. They may have been observed before a previous Entry ID but were
|
2018-06-06 10:19:56 -07:00
|
|
|
/// pushed back into this list to ensure deterministic interpretation of the ledger.
|
2018-05-25 14:51:41 -07:00
|
|
|
pub transactions: Vec<Transaction>,
|
2018-03-06 16:31:17 -08:00
|
|
|
}
|
|
|
|
|
2018-03-06 19:22:30 -08:00
|
|
|
impl Entry {
|
2018-05-16 16:49:58 -07:00
|
|
|
/// Creates the next Entry `num_hashes` after `start_hash`.
|
2018-11-12 17:03:23 -08:00
|
|
|
pub fn new(prev_id: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Self {
|
2018-10-18 22:57:48 -07:00
|
|
|
let entry = {
|
|
|
|
if num_hashes == 0 && transactions.is_empty() {
|
|
|
|
Entry {
|
2018-11-12 17:03:23 -08:00
|
|
|
prev_id: *prev_id,
|
2018-10-18 22:57:48 -07:00
|
|
|
num_hashes: 0,
|
2018-11-12 17:03:23 -08:00
|
|
|
id: *prev_id,
|
2018-10-18 22:57:48 -07:00
|
|
|
transactions,
|
|
|
|
}
|
|
|
|
} else if num_hashes == 0 {
|
|
|
|
// If you passed in transactions, but passed in num_hashes == 0, then
|
|
|
|
// next_hash will generate the next hash and set num_hashes == 1
|
2018-11-12 17:03:23 -08:00
|
|
|
let id = next_hash(prev_id, 1, &transactions);
|
2018-10-18 22:57:48 -07:00
|
|
|
Entry {
|
2018-11-12 17:03:23 -08:00
|
|
|
prev_id: *prev_id,
|
2018-10-18 22:57:48 -07:00
|
|
|
num_hashes: 1,
|
|
|
|
id,
|
|
|
|
transactions,
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Otherwise, the next Entry `num_hashes` after `start_hash`.
|
|
|
|
// If you wanted a tick for instance, then pass in num_hashes = 1
|
|
|
|
// and transactions = empty
|
2018-11-12 17:03:23 -08:00
|
|
|
let id = next_hash(prev_id, num_hashes, &transactions);
|
2018-10-18 22:57:48 -07:00
|
|
|
Entry {
|
2018-11-12 17:03:23 -08:00
|
|
|
prev_id: *prev_id,
|
2018-10-18 22:57:48 -07:00
|
|
|
num_hashes,
|
|
|
|
id,
|
|
|
|
transactions,
|
|
|
|
}
|
|
|
|
}
|
2018-06-20 11:40:41 -07:00
|
|
|
};
|
2018-07-24 11:14:33 -07:00
|
|
|
|
|
|
|
let size = serialized_size(&entry).unwrap();
|
|
|
|
if size > BLOB_DATA_SIZE as u64 {
|
|
|
|
panic!(
|
|
|
|
"Serialized entry size too large: {} ({} transactions):",
|
|
|
|
size,
|
|
|
|
entry.transactions.len()
|
|
|
|
);
|
|
|
|
}
|
2018-06-20 11:40:41 -07:00
|
|
|
entry
|
2018-05-16 16:49:58 -07:00
|
|
|
}
|
|
|
|
|
2018-08-06 12:35:38 -07:00
|
|
|
pub fn to_blob(
|
|
|
|
&self,
|
|
|
|
idx: Option<u64>,
|
2018-08-09 08:13:57 -07:00
|
|
|
id: Option<Pubkey>,
|
2018-08-07 23:22:45 -07:00
|
|
|
addr: Option<&SocketAddr>,
|
2018-08-06 12:35:38 -07:00
|
|
|
) -> SharedBlob {
|
2018-09-26 09:50:12 -07:00
|
|
|
let blob = SharedBlob::default();
|
2018-08-06 12:35:38 -07:00
|
|
|
{
|
2018-09-26 09:50:12 -07:00
|
|
|
let mut blob_w = blob.write().unwrap();
|
2018-08-06 12:35:38 -07:00
|
|
|
let pos = {
|
|
|
|
let mut out = Cursor::new(blob_w.data_mut());
|
|
|
|
serialize_into(&mut out, &self).expect("failed to serialize output");
|
|
|
|
out.position() as usize
|
|
|
|
};
|
|
|
|
blob_w.set_size(pos);
|
|
|
|
|
|
|
|
if let Some(idx) = idx {
|
|
|
|
blob_w.set_index(idx).expect("set_index()");
|
|
|
|
}
|
|
|
|
if let Some(id) = id {
|
2018-11-07 13:18:14 -08:00
|
|
|
blob_w.set_id(&id).expect("set_id()");
|
2018-08-06 12:35:38 -07:00
|
|
|
}
|
2018-08-07 23:22:45 -07:00
|
|
|
if let Some(addr) = addr {
|
|
|
|
blob_w.meta.set_addr(addr);
|
|
|
|
}
|
2018-08-08 04:16:27 -07:00
|
|
|
blob_w.set_flags(0).unwrap();
|
2018-08-06 12:35:38 -07:00
|
|
|
}
|
|
|
|
blob
|
|
|
|
}
|
|
|
|
|
2018-10-12 22:27:52 -07:00
|
|
|
/// Estimate serialized_size of Entry without creating an Entry.
|
|
|
|
pub fn serialized_size(transactions: &[Transaction]) -> u64 {
|
|
|
|
let txs_size = serialized_size(transactions).unwrap();
|
2018-11-12 17:03:23 -08:00
|
|
|
|
|
|
|
// num_hashes + id + prev_id + txs
|
|
|
|
|
|
|
|
(size_of::<u64>() + 2 * size_of::<Hash>()) as u64 + txs_size
|
2018-06-26 12:35:52 -07:00
|
|
|
}
|
|
|
|
|
2018-09-24 12:26:47 -07:00
|
|
|
pub fn num_will_fit(transactions: &[Transaction]) -> usize {
|
|
|
|
if transactions.is_empty() {
|
2018-09-21 21:01:13 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
let mut num = transactions.len();
|
|
|
|
let mut upper = transactions.len();
|
|
|
|
let mut lower = 1; // if one won't fit, we have a lot of TODOs
|
|
|
|
let mut next = transactions.len(); // optimistic
|
|
|
|
loop {
|
|
|
|
debug!(
|
|
|
|
"num {}, upper {} lower {} next {} transactions.len() {}",
|
|
|
|
num,
|
|
|
|
upper,
|
|
|
|
lower,
|
|
|
|
next,
|
|
|
|
transactions.len()
|
|
|
|
);
|
2018-10-12 22:27:52 -07:00
|
|
|
if Self::serialized_size(&transactions[..num]) <= BLOB_DATA_SIZE as u64 {
|
2018-09-21 21:01:13 -07:00
|
|
|
next = (upper + num) / 2;
|
|
|
|
lower = num;
|
|
|
|
debug!("num {} fits, maybe too well? trying {}", num, next);
|
|
|
|
} else {
|
|
|
|
next = (lower + num) / 2;
|
|
|
|
upper = num;
|
|
|
|
debug!("num {} doesn't fit! trying {}", num, next);
|
|
|
|
}
|
|
|
|
// same as last time
|
|
|
|
if next == num {
|
|
|
|
debug!("converged on num {}", num);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
num = next;
|
|
|
|
}
|
|
|
|
num
|
|
|
|
}
|
|
|
|
|
2018-05-16 16:49:58 -07:00
|
|
|
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
|
2018-05-25 14:51:41 -07:00
|
|
|
pub fn new_mut(
|
|
|
|
start_hash: &mut Hash,
|
2018-08-09 07:31:00 -07:00
|
|
|
num_hashes: &mut u64,
|
2018-05-25 14:51:41 -07:00
|
|
|
transactions: Vec<Transaction>,
|
|
|
|
) -> Self {
|
2018-09-21 21:01:13 -07:00
|
|
|
let entry = Self::new(start_hash, *num_hashes, transactions);
|
2018-05-16 16:49:58 -07:00
|
|
|
*start_hash = entry.id;
|
2018-08-09 07:31:00 -07:00
|
|
|
*num_hashes = 0;
|
2018-06-20 11:40:41 -07:00
|
|
|
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
|
2018-05-16 16:49:58 -07:00
|
|
|
entry
|
|
|
|
}
|
|
|
|
|
2018-05-25 14:51:41 -07:00
|
|
|
/// Creates a Entry from the number of hashes `num_hashes` since the previous transaction
|
2018-03-22 13:40:28 -07:00
|
|
|
/// and that resulting `id`.
|
2018-11-12 17:03:23 -08:00
|
|
|
pub fn new_tick(prev_id: &Hash, num_hashes: u64, id: &Hash) -> Self {
|
2018-03-06 16:31:17 -08:00
|
|
|
Entry {
|
2018-11-12 17:03:23 -08:00
|
|
|
prev_id: *prev_id,
|
2018-03-06 16:31:17 -08:00
|
|
|
num_hashes,
|
|
|
|
id: *id,
|
2018-05-25 14:51:41 -07:00
|
|
|
transactions: vec![],
|
2018-03-06 16:31:17 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-12 17:03:23 -08:00
|
|
|
pub fn verify_self(&self) -> bool {
|
|
|
|
self.id == next_hash(&self.prev_id, self.num_hashes, &self.transactions)
|
|
|
|
}
|
|
|
|
|
2018-03-22 13:40:28 -07:00
|
|
|
/// Verifies self.id is the result of hashing a `start_hash` `self.num_hashes` times.
|
2018-05-25 14:51:41 -07:00
|
|
|
/// If the transaction is not a Tick, then hash that as well.
|
2018-03-06 16:36:45 -08:00
|
|
|
pub fn verify(&self, start_hash: &Hash) -> bool {
|
2018-07-23 16:13:24 -07:00
|
|
|
let ref_hash = next_hash(start_hash, self.num_hashes, &self.transactions);
|
|
|
|
if self.id != ref_hash {
|
2018-07-31 16:10:02 -07:00
|
|
|
warn!(
|
2018-07-23 16:13:24 -07:00
|
|
|
"next_hash is invalid expected: {:?} actual: {:?}",
|
|
|
|
self.id, ref_hash
|
|
|
|
);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
true
|
2018-03-06 16:31:17 -08:00
|
|
|
}
|
2018-10-18 22:57:48 -07:00
|
|
|
|
|
|
|
pub fn is_tick(&self) -> bool {
|
|
|
|
self.transactions.is_empty()
|
|
|
|
}
|
2018-03-06 16:31:17 -08:00
|
|
|
}
|
|
|
|
|
2018-05-25 14:51:41 -07:00
|
|
|
/// Creates the hash `num_hashes` after `start_hash`. If the transaction contains
|
2018-04-16 13:38:31 -07:00
|
|
|
/// a signature, the final hash will be a hash of both the previous ID and
|
2018-06-18 13:23:15 -07:00
|
|
|
/// the signature. If num_hashes is zero and there's no transaction data,
|
2018-06-21 16:09:17 -07:00
|
|
|
/// start_hash is returned.
|
2018-06-18 13:23:15 -07:00
|
|
|
fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -> Hash {
|
2018-09-24 12:26:47 -07:00
|
|
|
if num_hashes == 0 && transactions.is_empty() {
|
2018-09-21 21:01:13 -07:00
|
|
|
return *start_hash;
|
2018-03-06 16:31:17 -08:00
|
|
|
}
|
2018-03-09 15:16:29 -08:00
|
|
|
|
2018-10-18 22:57:48 -07:00
|
|
|
let mut poh = Poh::new(*start_hash, 0);
|
2018-03-09 15:16:29 -08:00
|
|
|
|
2018-09-21 21:01:13 -07:00
|
|
|
for _ in 1..num_hashes {
|
|
|
|
poh.hash();
|
2018-03-09 15:16:29 -08:00
|
|
|
}
|
2018-09-21 21:01:13 -07:00
|
|
|
|
2018-09-25 15:01:51 -07:00
|
|
|
if transactions.is_empty() {
|
|
|
|
poh.tick().id
|
|
|
|
} else {
|
|
|
|
poh.record(Transaction::hash(transactions)).id
|
|
|
|
}
|
2018-03-06 16:31:17 -08:00
|
|
|
}
|
|
|
|
|
2018-06-04 16:21:12 -07:00
|
|
|
/// Creates the next Tick or Transaction Entry `num_hashes` after `start_hash`.
|
2018-11-12 17:03:23 -08:00
|
|
|
pub fn next_entry(prev_id: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
|
2018-07-11 13:40:46 -07:00
|
|
|
assert!(num_hashes > 0 || transactions.is_empty());
|
2018-03-06 16:31:17 -08:00
|
|
|
Entry {
|
2018-11-12 17:03:23 -08:00
|
|
|
prev_id: *prev_id,
|
2018-03-06 16:31:17 -08:00
|
|
|
num_hashes,
|
2018-11-12 17:03:23 -08:00
|
|
|
id: next_hash(prev_id, num_hashes, &transactions),
|
2018-05-25 14:51:41 -07:00
|
|
|
transactions,
|
2018-03-06 16:31:17 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::*;
|
2018-09-26 09:33:52 -07:00
|
|
|
use budget_transaction::BudgetTransaction;
|
2018-04-02 20:07:38 -07:00
|
|
|
use chrono::prelude::*;
|
2018-05-16 16:49:58 -07:00
|
|
|
use entry::Entry;
|
2018-03-09 16:02:17 -08:00
|
|
|
use hash::hash;
|
2018-08-09 07:56:04 -07:00
|
|
|
use signature::{Keypair, KeypairUtil};
|
2018-09-26 09:07:53 -07:00
|
|
|
use system_transaction::SystemTransaction;
|
2018-09-26 09:33:52 -07:00
|
|
|
use transaction::Transaction;
|
2018-03-06 16:31:17 -08:00
|
|
|
|
|
|
|
#[test]
|
2018-03-09 16:02:17 -08:00
|
|
|
fn test_entry_verify() {
|
2018-03-06 16:36:45 -08:00
|
|
|
let zero = Hash::default();
|
2018-08-01 11:23:52 -07:00
|
|
|
let one = hash(&zero.as_ref());
|
2018-11-12 17:03:23 -08:00
|
|
|
assert!(Entry::new_tick(&zero, 0, &zero).verify(&zero)); // base case, never used
|
|
|
|
assert!(!Entry::new_tick(&zero, 0, &zero).verify(&one)); // base case, bad
|
2018-05-11 08:45:42 -07:00
|
|
|
assert!(next_entry(&zero, 1, vec![]).verify(&zero)); // inductive step
|
2018-11-12 17:03:23 -08:00
|
|
|
assert!(next_entry(&zero, 1, vec![]).verify_self()); // also inductive step
|
2018-05-11 08:45:42 -07:00
|
|
|
assert!(!next_entry(&zero, 1, vec![]).verify(&one)); // inductive step, bad
|
2018-03-06 16:31:17 -08:00
|
|
|
}
|
|
|
|
|
2018-03-09 16:02:17 -08:00
|
|
|
#[test]
|
2018-05-25 14:51:41 -07:00
|
|
|
fn test_transaction_reorder_attack() {
|
2018-03-09 16:02:17 -08:00
|
|
|
let zero = Hash::default();
|
|
|
|
|
|
|
|
// First, verify entries
|
2018-08-09 07:56:04 -07:00
|
|
|
let keypair = Keypair::new();
|
2018-09-26 08:51:51 -07:00
|
|
|
let tx0 = Transaction::system_new(&keypair, keypair.pubkey(), 0, zero);
|
|
|
|
let tx1 = Transaction::system_new(&keypair, keypair.pubkey(), 1, zero);
|
2018-09-21 21:01:13 -07:00
|
|
|
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
|
2018-03-09 16:02:17 -08:00
|
|
|
assert!(e0.verify(&zero));
|
|
|
|
|
2018-05-25 14:51:41 -07:00
|
|
|
// Next, swap two transactions and ensure verification fails.
|
2018-05-29 09:12:27 -07:00
|
|
|
e0.transactions[0] = tx1; // <-- attack
|
|
|
|
e0.transactions[1] = tx0;
|
2018-03-09 16:02:17 -08:00
|
|
|
assert!(!e0.verify(&zero));
|
|
|
|
}
|
|
|
|
|
2018-04-02 20:07:38 -07:00
|
|
|
#[test]
|
|
|
|
fn test_witness_reorder_attack() {
|
|
|
|
let zero = Hash::default();
|
|
|
|
|
|
|
|
// First, verify entries
|
2018-08-09 07:56:04 -07:00
|
|
|
let keypair = Keypair::new();
|
2018-09-17 13:36:31 -07:00
|
|
|
let tx0 = Transaction::budget_new_timestamp(
|
|
|
|
&keypair,
|
|
|
|
keypair.pubkey(),
|
|
|
|
keypair.pubkey(),
|
|
|
|
Utc::now(),
|
|
|
|
zero,
|
|
|
|
);
|
2018-09-18 18:45:44 -07:00
|
|
|
let tx1 =
|
|
|
|
Transaction::budget_new_signature(&keypair, keypair.pubkey(), keypair.pubkey(), zero);
|
2018-09-21 21:01:13 -07:00
|
|
|
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
|
2018-04-02 20:07:38 -07:00
|
|
|
assert!(e0.verify(&zero));
|
|
|
|
|
2018-05-25 14:51:41 -07:00
|
|
|
// Next, swap two witness transactions and ensure verification fails.
|
2018-05-29 09:12:27 -07:00
|
|
|
e0.transactions[0] = tx1; // <-- attack
|
|
|
|
e0.transactions[1] = tx0;
|
2018-04-02 20:07:38 -07:00
|
|
|
assert!(!e0.verify(&zero));
|
|
|
|
}
|
|
|
|
|
2018-03-06 16:31:17 -08:00
|
|
|
#[test]
|
2018-05-11 08:45:42 -07:00
|
|
|
fn test_next_entry() {
|
2018-03-06 16:36:45 -08:00
|
|
|
let zero = Hash::default();
|
2018-05-11 09:34:46 -07:00
|
|
|
let tick = next_entry(&zero, 1, vec![]);
|
2018-04-16 13:38:31 -07:00
|
|
|
assert_eq!(tick.num_hashes, 1);
|
|
|
|
assert_ne!(tick.id, zero);
|
2018-06-18 13:23:15 -07:00
|
|
|
|
|
|
|
let tick = next_entry(&zero, 0, vec![]);
|
|
|
|
assert_eq!(tick.num_hashes, 0);
|
|
|
|
assert_eq!(tick.id, zero);
|
2018-06-20 11:40:41 -07:00
|
|
|
|
2018-08-09 07:56:04 -07:00
|
|
|
let keypair = Keypair::new();
|
2018-09-17 13:36:31 -07:00
|
|
|
let tx0 = Transaction::budget_new_timestamp(
|
|
|
|
&keypair,
|
|
|
|
keypair.pubkey(),
|
|
|
|
keypair.pubkey(),
|
|
|
|
Utc::now(),
|
|
|
|
zero,
|
|
|
|
);
|
2018-06-20 11:40:41 -07:00
|
|
|
let entry0 = next_entry(&zero, 1, vec![tx0.clone()]);
|
|
|
|
assert_eq!(entry0.num_hashes, 1);
|
|
|
|
assert_eq!(entry0.id, next_hash(&zero, 1, &vec![tx0]));
|
2018-03-06 16:31:17 -08:00
|
|
|
}
|
2018-06-21 16:15:26 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
#[should_panic]
|
|
|
|
fn test_next_entry_panic() {
|
|
|
|
let zero = Hash::default();
|
2018-08-09 07:56:04 -07:00
|
|
|
let keypair = Keypair::new();
|
2018-09-26 08:51:51 -07:00
|
|
|
let tx = Transaction::system_new(&keypair, keypair.pubkey(), 0, zero);
|
2018-06-21 16:15:26 -07:00
|
|
|
next_entry(&zero, 0, vec![tx]);
|
|
|
|
}
|
2018-10-12 22:27:52 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_serialized_size() {
|
|
|
|
let zero = Hash::default();
|
|
|
|
let keypair = Keypair::new();
|
|
|
|
let tx = Transaction::system_new(&keypair, keypair.pubkey(), 0, zero);
|
|
|
|
let entry = next_entry(&zero, 1, vec![tx.clone()]);
|
|
|
|
assert_eq!(
|
|
|
|
Entry::serialized_size(&[tx]),
|
|
|
|
serialized_size(&entry).unwrap()
|
|
|
|
);
|
|
|
|
}
|
2018-03-06 16:31:17 -08:00
|
|
|
}
|