2018-06-06 10:24:24 -07:00
|
|
|
//! The `entry_writer` module helps implement the TPU's write stage. It
|
|
|
|
//! writes entries to the given writer, which is typically a file or
|
|
|
|
//! stdout, and then sends the Entry to its output channel.
|
2018-05-11 21:36:16 -07:00
|
|
|
|
2018-05-14 14:33:11 -07:00
|
|
|
use bank::Bank;
|
2018-05-11 21:36:16 -07:00
|
|
|
use entry::Entry;
|
|
|
|
use serde_json;
|
2018-07-01 23:09:41 -07:00
|
|
|
use std::io::{self, BufRead, Error, ErrorKind, Write};
|
2018-05-11 21:36:16 -07:00
|
|
|
|
2018-07-01 13:25:21 -07:00
|
|
|
pub struct EntryWriter<'a, W> {
|
2018-05-14 14:33:11 -07:00
|
|
|
bank: &'a Bank,
|
2018-07-01 13:31:13 -07:00
|
|
|
writer: W,
|
2018-05-11 21:36:16 -07:00
|
|
|
}
|
|
|
|
|
2018-07-01 13:25:21 -07:00
|
|
|
impl<'a, W: Write> EntryWriter<'a, W> {
|
2018-05-14 14:33:11 -07:00
|
|
|
/// Create a new Tpu that wraps the given Bank.
|
2018-07-01 13:31:13 -07:00
|
|
|
pub fn new(bank: &'a Bank, writer: W) -> Self {
|
2018-07-01 13:25:21 -07:00
|
|
|
EntryWriter { bank, writer }
|
2018-05-11 21:36:16 -07:00
|
|
|
}
|
|
|
|
|
2018-07-01 13:31:13 -07:00
|
|
|
fn write_entry(writer: &mut W, entry: &Entry) -> io::Result<()> {
|
2018-07-02 00:04:19 -07:00
|
|
|
let serialized = serde_json::to_string(entry).unwrap();
|
2018-07-01 13:31:13 -07:00
|
|
|
writeln!(writer, "{}", serialized)
|
2018-07-01 08:34:56 -07:00
|
|
|
}
|
|
|
|
|
2018-07-02 00:04:19 -07:00
|
|
|
pub fn write_entries<I>(writer: &mut W, entries: I) -> io::Result<()>
|
|
|
|
where
|
|
|
|
I: IntoIterator<Item = Entry>,
|
|
|
|
{
|
2018-07-01 08:34:56 -07:00
|
|
|
for entry in entries {
|
2018-07-02 00:04:19 -07:00
|
|
|
Self::write_entry(writer, &entry)?;
|
2018-07-01 08:34:56 -07:00
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-07-01 13:31:13 -07:00
|
|
|
fn write_and_register_entry(&mut self, entry: &Entry) -> io::Result<()> {
|
2018-07-01 08:10:41 -07:00
|
|
|
trace!("write_and_register_entry entry");
|
2018-06-28 15:18:10 -07:00
|
|
|
if !entry.has_more {
|
|
|
|
self.bank.register_entry_id(&entry.id);
|
|
|
|
}
|
2018-07-01 13:31:13 -07:00
|
|
|
Self::write_entry(&mut self.writer, entry)
|
2018-05-11 21:36:16 -07:00
|
|
|
}
|
|
|
|
|
2018-07-01 13:31:13 -07:00
|
|
|
pub fn write_and_register_entries(&mut self, entries: &[Entry]) -> io::Result<()> {
|
2018-07-01 08:34:56 -07:00
|
|
|
for entry in entries {
|
2018-07-01 13:25:21 -07:00
|
|
|
self.write_and_register_entry(&entry)?;
|
2018-07-01 08:34:56 -07:00
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
2018-05-11 21:36:16 -07:00
|
|
|
}
|
2018-06-28 15:18:10 -07:00
|
|
|
|
2018-07-01 23:09:41 -07:00
|
|
|
pub fn read_entry(s: String) -> io::Result<Entry> {
|
|
|
|
serde_json::from_str(&s).map_err(|e| Error::new(ErrorKind::Other, e.to_string()))
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: How to implement this without attaching the input's lifetime to the output?
|
|
|
|
pub fn read_entries<'a, R: BufRead>(
|
|
|
|
reader: &'a mut R,
|
|
|
|
) -> impl Iterator<Item = io::Result<Entry>> + 'a {
|
|
|
|
reader.lines().map(|s| read_entry(s?))
|
|
|
|
}
|
|
|
|
|
2018-06-28 15:18:10 -07:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::*;
|
|
|
|
use ledger;
|
|
|
|
use mint::Mint;
|
|
|
|
use packet::BLOB_DATA_SIZE;
|
|
|
|
use signature::{KeyPair, KeyPairUtil};
|
|
|
|
use transaction::Transaction;
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_dont_register_partial_entries() {
|
|
|
|
let mint = Mint::new(1);
|
|
|
|
let bank = Bank::new(&mint);
|
|
|
|
|
2018-07-01 13:31:13 -07:00
|
|
|
let writer = io::sink();
|
|
|
|
let mut entry_writer = EntryWriter::new(&bank, writer);
|
2018-06-28 15:18:10 -07:00
|
|
|
let keypair = KeyPair::new();
|
|
|
|
let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, mint.last_id());
|
|
|
|
|
|
|
|
// NOTE: if Entry grows to larger than a transaction, the code below falls over
|
|
|
|
let threshold = (BLOB_DATA_SIZE / 256) - 1; // 256 is transaction size
|
|
|
|
|
|
|
|
// Verify large entries are split up and the first sets has_more.
|
|
|
|
let txs = vec![tx.clone(); threshold * 2];
|
|
|
|
let entries = ledger::next_entries(&mint.last_id(), 0, txs);
|
|
|
|
assert_eq!(entries.len(), 2);
|
|
|
|
assert!(entries[0].has_more);
|
|
|
|
assert!(!entries[1].has_more);
|
|
|
|
|
2018-07-01 08:10:41 -07:00
|
|
|
// Verify that write_and_register_entry doesn't register the first entries after a split.
|
2018-06-28 15:18:10 -07:00
|
|
|
assert_eq!(bank.last_id(), mint.last_id());
|
2018-07-01 13:25:21 -07:00
|
|
|
entry_writer.write_and_register_entry(&entries[0]).unwrap();
|
2018-06-28 15:18:10 -07:00
|
|
|
assert_eq!(bank.last_id(), mint.last_id());
|
|
|
|
|
2018-07-01 08:10:41 -07:00
|
|
|
// Verify that write_and_register_entry registers the final entry after a split.
|
2018-07-01 13:25:21 -07:00
|
|
|
entry_writer.write_and_register_entry(&entries[1]).unwrap();
|
2018-06-28 15:18:10 -07:00
|
|
|
assert_eq!(bank.last_id(), entries[1].id);
|
|
|
|
}
|
|
|
|
}
|