This commit is contained in:
parent
eb63dbcd2a
commit
ac40c1818f
16
src/entry.rs
16
src/entry.rs
|
@ -43,7 +43,8 @@ pub struct Entry {
|
|||
/// purposes of duplicate rejection
|
||||
pub has_more: bool,
|
||||
|
||||
erasure_pad: [u8; 3],
|
||||
/// Erasure requires that Entry be a multiple of 4 bytes in size
|
||||
pad: [u8; 3],
|
||||
}
|
||||
|
||||
impl Entry {
|
||||
|
@ -61,11 +62,22 @@ impl Entry {
|
|||
id,
|
||||
transactions,
|
||||
has_more,
|
||||
pad: [0, 0, 0],
|
||||
};
|
||||
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
|
||||
entry
|
||||
}
|
||||
|
||||
pub fn will_fit(transactions: Vec<Transaction>) -> bool {
|
||||
serialized_size(&Entry {
|
||||
num_hashes: 0,
|
||||
id: Hash::default(),
|
||||
transactions,
|
||||
has_more: false,
|
||||
pad: [0, 0, 0],
|
||||
}).unwrap() <= BLOB_DATA_SIZE as u64
|
||||
}
|
||||
|
||||
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
|
||||
pub fn new_mut(
|
||||
start_hash: &mut Hash,
|
||||
|
@ -88,6 +100,7 @@ impl Entry {
|
|||
id: *id,
|
||||
transactions: vec![],
|
||||
has_more: false,
|
||||
pad: [0, 0, 0],
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -137,6 +150,7 @@ pub fn next_entry(start_hash: &Hash, num_hashes: u64, transactions: Vec<Transact
|
|||
id: next_hash(start_hash, num_hashes, &transactions),
|
||||
transactions,
|
||||
has_more: false,
|
||||
pad: [0, 0, 0],
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
//! The `ledger` module provides functions for parallel verification of the
|
||||
//! Proof of History ledger.
|
||||
|
||||
use bincode::{self, deserialize, serialize_into, serialized_size};
|
||||
use bincode::{self, deserialize, serialize_into};
|
||||
use entry::Entry;
|
||||
use hash::Hash;
|
||||
use packet::{self, SharedBlob, BLOB_DATA_SIZE, BLOB_SIZE};
|
||||
use packet::{self, SharedBlob, BLOB_SIZE};
|
||||
use rayon::prelude::*;
|
||||
use std::collections::VecDeque;
|
||||
use std::io::Cursor;
|
||||
|
@ -78,13 +78,7 @@ pub fn next_entries_mut(
|
|||
let mut chunk_len = transactions.len();
|
||||
|
||||
// check for fit, make sure they can be serialized
|
||||
while serialized_size(&Entry {
|
||||
num_hashes: 0,
|
||||
id: Hash::default(),
|
||||
transactions: transactions[0..chunk_len].to_vec(),
|
||||
has_more: false,
|
||||
}).unwrap() > BLOB_DATA_SIZE as u64
|
||||
{
|
||||
while !Entry::will_fit(transactions[0..chunk_len].to_vec()) {
|
||||
chunk_len /= 2;
|
||||
}
|
||||
|
||||
|
@ -104,12 +98,6 @@ pub fn next_entries_mut(
|
|||
chunk.to_vec(),
|
||||
num_chunks > 0,
|
||||
));
|
||||
println!(
|
||||
"transactions.len() = {}, chunk_len {}, num_chunks {}",
|
||||
transactions.len(),
|
||||
chunk_len,
|
||||
num_chunks,
|
||||
);
|
||||
}
|
||||
entries
|
||||
}
|
||||
|
@ -131,7 +119,7 @@ mod tests {
|
|||
use super::*;
|
||||
use entry::{next_entry, Entry};
|
||||
use hash::hash;
|
||||
use packet::BlobRecycler;
|
||||
use packet::{BlobRecycler, BLOB_DATA_SIZE};
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use transaction::Transaction;
|
||||
|
|
Loading…
Reference in New Issue