Remove ledger.rs
Split into entry.rs for entry-constructing functions and EntrySlice trait and db_ledger.rs for ledger helper test functions.
This commit is contained in:
parent
ebd676faaa
commit
491bca5e4b
|
@ -5,10 +5,8 @@ extern crate test;
|
||||||
|
|
||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
use rand::{thread_rng, Rng};
|
use rand::{thread_rng, Rng};
|
||||||
use solana::db_ledger::DbLedger;
|
use solana::db_ledger::{get_tmp_ledger_path, DbLedger};
|
||||||
use solana::ledger::{
|
use solana::entry::{make_large_test_entries, make_tiny_test_entries, EntrySlice};
|
||||||
get_tmp_ledger_path, make_large_test_entries, make_tiny_test_entries, EntrySlice,
|
|
||||||
};
|
|
||||||
use solana::packet::{Blob, BLOB_HEADER_SIZE};
|
use solana::packet::{Blob, BLOB_HEADER_SIZE};
|
||||||
use test::Bencher;
|
use test::Bencher;
|
||||||
|
|
||||||
|
|
|
@ -2,8 +2,7 @@
|
||||||
|
|
||||||
extern crate test;
|
extern crate test;
|
||||||
|
|
||||||
use solana::entry::reconstruct_entries_from_blobs;
|
use solana::entry::{next_entries, reconstruct_entries_from_blobs, EntrySlice};
|
||||||
use solana::ledger::{next_entries, EntrySlice};
|
|
||||||
use solana_sdk::hash::{hash, Hash};
|
use solana_sdk::hash::{hash, Hash};
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
use solana_sdk::system_transaction::SystemTransaction;
|
use solana_sdk::system_transaction::SystemTransaction;
|
||||||
|
|
|
@ -7,9 +7,9 @@ use crate::accounts::{Accounts, ErrorCounters, InstructionAccounts, InstructionL
|
||||||
use crate::checkpoint::Checkpoint;
|
use crate::checkpoint::Checkpoint;
|
||||||
use crate::counter::Counter;
|
use crate::counter::Counter;
|
||||||
use crate::entry::Entry;
|
use crate::entry::Entry;
|
||||||
|
use crate::entry::EntrySlice;
|
||||||
use crate::jsonrpc_macros::pubsub::Sink;
|
use crate::jsonrpc_macros::pubsub::Sink;
|
||||||
use crate::leader_scheduler::LeaderScheduler;
|
use crate::leader_scheduler::LeaderScheduler;
|
||||||
use crate::ledger::EntrySlice;
|
|
||||||
use crate::mint::Mint;
|
use crate::mint::Mint;
|
||||||
use crate::poh_recorder::PohRecorder;
|
use crate::poh_recorder::PohRecorder;
|
||||||
use crate::rpc::RpcSignatureStatus;
|
use crate::rpc::RpcSignatureStatus;
|
||||||
|
@ -958,10 +958,8 @@ impl Bank {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::entry::next_entry;
|
use crate::entry::{next_entries, next_entry, Entry};
|
||||||
use crate::entry::Entry;
|
|
||||||
use crate::jsonrpc_macros::pubsub::{Subscriber, SubscriptionId};
|
use crate::jsonrpc_macros::pubsub::{Subscriber, SubscriptionId};
|
||||||
use crate::ledger;
|
|
||||||
use crate::signature::GenKeys;
|
use crate::signature::GenKeys;
|
||||||
use crate::status_deque;
|
use crate::status_deque;
|
||||||
use crate::status_deque::StatusDequeError;
|
use crate::status_deque::StatusDequeError;
|
||||||
|
@ -1261,7 +1259,7 @@ mod tests {
|
||||||
1,
|
1,
|
||||||
last_id,
|
last_id,
|
||||||
)];
|
)];
|
||||||
let mut e = ledger::next_entries(&hash, 0, txs);
|
let mut e = next_entries(&hash, 0, txs);
|
||||||
entries.append(&mut e);
|
entries.append(&mut e);
|
||||||
hash = entries.last().unwrap().id;
|
hash = entries.last().unwrap().id;
|
||||||
let tick = Entry::new(&hash, 0, num_hashes, vec![]);
|
let tick = Entry::new(&hash, 0, num_hashes, vec![]);
|
||||||
|
|
|
@ -268,7 +268,7 @@ mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::bank::Bank;
|
use crate::bank::Bank;
|
||||||
use crate::banking_stage::BankingStageReturnType;
|
use crate::banking_stage::BankingStageReturnType;
|
||||||
use crate::ledger::EntrySlice;
|
use crate::entry::EntrySlice;
|
||||||
use crate::mint::Mint;
|
use crate::mint::Mint;
|
||||||
use crate::packet::to_packets;
|
use crate::packet::to_packets;
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
|
|
|
@ -5,10 +5,10 @@ use crate::cluster_info::{ClusterInfo, ClusterInfoError, NodeInfo, DATA_PLANE_FA
|
||||||
use crate::counter::Counter;
|
use crate::counter::Counter;
|
||||||
use crate::db_ledger::DbLedger;
|
use crate::db_ledger::DbLedger;
|
||||||
use crate::entry::Entry;
|
use crate::entry::Entry;
|
||||||
|
use crate::entry::EntrySlice;
|
||||||
#[cfg(feature = "erasure")]
|
#[cfg(feature = "erasure")]
|
||||||
use crate::erasure;
|
use crate::erasure;
|
||||||
use crate::leader_scheduler::LeaderScheduler;
|
use crate::leader_scheduler::LeaderScheduler;
|
||||||
use crate::ledger::EntrySlice;
|
|
||||||
use crate::packet::{index_blobs, SharedBlob};
|
use crate::packet::{index_blobs, SharedBlob};
|
||||||
use crate::result::{Error, Result};
|
use crate::result::{Error, Result};
|
||||||
use crate::service::Service;
|
use crate::service::Service;
|
||||||
|
@ -360,9 +360,9 @@ impl Service for BroadcastService {
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::cluster_info::{ClusterInfo, Node};
|
use crate::cluster_info::{ClusterInfo, Node};
|
||||||
|
use crate::db_ledger::get_tmp_ledger_path;
|
||||||
use crate::db_ledger::DbLedger;
|
use crate::db_ledger::DbLedger;
|
||||||
use crate::ledger::create_ticks;
|
use crate::entry::create_ticks;
|
||||||
use crate::ledger::get_tmp_ledger_path;
|
|
||||||
use crate::service::Service;
|
use crate::service::Service;
|
||||||
use crate::window::new_window;
|
use crate::window::new_window;
|
||||||
use solana_sdk::hash::Hash;
|
use solana_sdk::hash::Hash;
|
||||||
|
|
|
@ -85,9 +85,9 @@ pub fn chacha_cbc_encrypt_ledger(
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::chacha::chacha_cbc_encrypt_ledger;
|
use crate::chacha::chacha_cbc_encrypt_ledger;
|
||||||
|
use crate::db_ledger::get_tmp_ledger_path;
|
||||||
use crate::db_ledger::{DbLedger, DEFAULT_SLOT_HEIGHT};
|
use crate::db_ledger::{DbLedger, DEFAULT_SLOT_HEIGHT};
|
||||||
use crate::entry::Entry;
|
use crate::entry::Entry;
|
||||||
use crate::ledger::get_tmp_ledger_path;
|
|
||||||
use ring::signature::Ed25519KeyPair;
|
use ring::signature::Ed25519KeyPair;
|
||||||
use solana_sdk::budget_transaction::BudgetTransaction;
|
use solana_sdk::budget_transaction::BudgetTransaction;
|
||||||
use solana_sdk::hash::{hash, Hash, Hasher};
|
use solana_sdk::hash::{hash, Hash, Hasher};
|
||||||
|
|
|
@ -108,8 +108,9 @@ pub fn chacha_cbc_encrypt_file_many_keys(
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::chacha::chacha_cbc_encrypt_ledger;
|
use crate::chacha::chacha_cbc_encrypt_ledger;
|
||||||
use crate::chacha_cuda::chacha_cbc_encrypt_file_many_keys;
|
use crate::chacha_cuda::chacha_cbc_encrypt_file_many_keys;
|
||||||
|
use crate::db_ledger::get_tmp_ledger_path;
|
||||||
use crate::db_ledger::{DbLedger, DEFAULT_SLOT_HEIGHT};
|
use crate::db_ledger::{DbLedger, DEFAULT_SLOT_HEIGHT};
|
||||||
use crate::ledger::{get_tmp_ledger_path, make_tiny_test_entries};
|
use crate::entry::make_tiny_test_entries;
|
||||||
use crate::replicator::sample_file;
|
use crate::replicator::sample_file;
|
||||||
use solana_sdk::hash::Hash;
|
use solana_sdk::hash::Hash;
|
||||||
use std::fs::{remove_dir_all, remove_file};
|
use std::fs::{remove_dir_all, remove_file};
|
||||||
|
|
|
@ -1312,8 +1312,8 @@ fn report_time_spent(label: &str, time: &Duration, extra: &str) {
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::crds_value::CrdsValueLabel;
|
use crate::crds_value::CrdsValueLabel;
|
||||||
|
use crate::db_ledger::get_tmp_ledger_path;
|
||||||
use crate::db_ledger::DbLedger;
|
use crate::db_ledger::DbLedger;
|
||||||
use crate::ledger::get_tmp_ledger_path;
|
|
||||||
use crate::packet::BLOB_HEADER_SIZE;
|
use crate::packet::BLOB_HEADER_SIZE;
|
||||||
use crate::result::Error;
|
use crate::result::Error;
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
|
|
|
@ -2,7 +2,9 @@
|
||||||
//! Proof of History ledger as well as iterative read, append write, and random
|
//! Proof of History ledger as well as iterative read, append write, and random
|
||||||
//! access read to a persistent file-based ledger.
|
//! access read to a persistent file-based ledger.
|
||||||
|
|
||||||
|
use crate::entry::create_ticks;
|
||||||
use crate::entry::Entry;
|
use crate::entry::Entry;
|
||||||
|
use crate::mint::Mint;
|
||||||
use crate::packet::{Blob, SharedBlob, BLOB_HEADER_SIZE};
|
use crate::packet::{Blob, SharedBlob, BLOB_HEADER_SIZE};
|
||||||
use crate::result::{Error, Result};
|
use crate::result::{Error, Result};
|
||||||
use bincode::{deserialize, serialize};
|
use bincode::{deserialize, serialize};
|
||||||
|
@ -10,10 +12,11 @@ use byteorder::{BigEndian, ByteOrder, ReadBytesExt};
|
||||||
use rocksdb::{ColumnFamily, ColumnFamilyDescriptor, DBRawIterator, Options, WriteBatch, DB};
|
use rocksdb::{ColumnFamily, ColumnFamilyDescriptor, DBRawIterator, Options, WriteBatch, DB};
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
use std::borrow::Borrow;
|
use std::borrow::Borrow;
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
use std::fs::create_dir_all;
|
use std::fs::{create_dir_all, remove_dir_all};
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
@ -840,10 +843,85 @@ where
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn get_tmp_ledger_path(name: &str) -> String {
|
||||||
|
use std::env;
|
||||||
|
let out_dir = env::var("OUT_DIR").unwrap_or_else(|_| "target".to_string());
|
||||||
|
let keypair = Keypair::new();
|
||||||
|
|
||||||
|
let path = format!("{}/tmp/ledger-{}-{}", out_dir, name, keypair.pubkey());
|
||||||
|
|
||||||
|
// whack any possible collision
|
||||||
|
let _ignored = remove_dir_all(&path);
|
||||||
|
|
||||||
|
path
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn create_tmp_ledger_with_mint(name: &str, mint: &Mint) -> String {
|
||||||
|
let path = get_tmp_ledger_path(name);
|
||||||
|
DbLedger::destroy(&path).expect("Expected successful database destruction");
|
||||||
|
let db_ledger = DbLedger::open(&path).unwrap();
|
||||||
|
db_ledger
|
||||||
|
.write_entries(DEFAULT_SLOT_HEIGHT, 0, &mint.create_entries())
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
path
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn create_tmp_genesis(
|
||||||
|
name: &str,
|
||||||
|
num: u64,
|
||||||
|
bootstrap_leader_id: Pubkey,
|
||||||
|
bootstrap_leader_tokens: u64,
|
||||||
|
) -> (Mint, String) {
|
||||||
|
let mint = Mint::new_with_leader(num, bootstrap_leader_id, bootstrap_leader_tokens);
|
||||||
|
let path = create_tmp_ledger_with_mint(name, &mint);
|
||||||
|
|
||||||
|
(mint, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn create_tmp_sample_ledger(
|
||||||
|
name: &str,
|
||||||
|
num_tokens: u64,
|
||||||
|
num_ending_ticks: usize,
|
||||||
|
bootstrap_leader_id: Pubkey,
|
||||||
|
bootstrap_leader_tokens: u64,
|
||||||
|
) -> (Mint, String, Vec<Entry>) {
|
||||||
|
let mint = Mint::new_with_leader(num_tokens, bootstrap_leader_id, bootstrap_leader_tokens);
|
||||||
|
let path = get_tmp_ledger_path(name);
|
||||||
|
|
||||||
|
// Create the entries
|
||||||
|
let mut genesis = mint.create_entries();
|
||||||
|
let ticks = create_ticks(num_ending_ticks, mint.last_id());
|
||||||
|
genesis.extend(ticks);
|
||||||
|
|
||||||
|
DbLedger::destroy(&path).expect("Expected successful database destruction");
|
||||||
|
let db_ledger = DbLedger::open(&path).unwrap();
|
||||||
|
db_ledger
|
||||||
|
.write_entries(DEFAULT_SLOT_HEIGHT, 0, &genesis)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
(mint, path, genesis)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn tmp_copy_ledger(from: &str, name: &str) -> String {
|
||||||
|
let tostr = get_tmp_ledger_path(name);
|
||||||
|
|
||||||
|
let db_ledger = DbLedger::open(from).unwrap();
|
||||||
|
let ledger_entries = db_ledger.read_ledger().unwrap();
|
||||||
|
|
||||||
|
DbLedger::destroy(&tostr).expect("Expected successful database destruction");
|
||||||
|
let db_ledger = DbLedger::open(&tostr).unwrap();
|
||||||
|
db_ledger
|
||||||
|
.write_entries(DEFAULT_SLOT_HEIGHT, 0, ledger_entries)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
tostr
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::ledger::{get_tmp_ledger_path, make_tiny_test_entries, EntrySlice};
|
use crate::entry::{make_tiny_test_entries, EntrySlice};
|
||||||
use crate::packet::index_blobs;
|
use crate::packet::index_blobs;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|
|
@ -295,13 +295,14 @@ fn try_erasure(db_ledger: &Arc<DbLedger>, consume_queue: &mut Vec<Entry>) -> Res
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use crate::db_ledger::get_tmp_ledger_path;
|
||||||
#[cfg(all(feature = "erasure", test))]
|
#[cfg(all(feature = "erasure", test))]
|
||||||
use crate::entry::reconstruct_entries_from_blobs;
|
use crate::entry::reconstruct_entries_from_blobs;
|
||||||
|
use crate::entry::{make_tiny_test_entries, EntrySlice};
|
||||||
#[cfg(all(feature = "erasure", test))]
|
#[cfg(all(feature = "erasure", test))]
|
||||||
use crate::erasure::test::{generate_db_ledger_from_window, setup_window_ledger};
|
use crate::erasure::test::{generate_db_ledger_from_window, setup_window_ledger};
|
||||||
#[cfg(all(feature = "erasure", test))]
|
#[cfg(all(feature = "erasure", test))]
|
||||||
use crate::erasure::{NUM_CODING, NUM_DATA};
|
use crate::erasure::{NUM_CODING, NUM_DATA};
|
||||||
use crate::ledger::{get_tmp_ledger_path, make_tiny_test_entries, EntrySlice};
|
|
||||||
use crate::packet::{index_blobs, Blob, Packet, Packets, SharedBlob, PACKET_DATA_SIZE};
|
use crate::packet::{index_blobs, Blob, Packet, Packets, SharedBlob, PACKET_DATA_SIZE};
|
||||||
use crate::streamer::{receiver, responder, PacketReceiver};
|
use crate::streamer::{receiver, responder, PacketReceiver};
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
|
|
348
src/entry.rs
348
src/entry.rs
|
@ -6,8 +6,15 @@ use crate::packet::{Blob, SharedBlob, BLOB_DATA_SIZE};
|
||||||
use crate::poh::Poh;
|
use crate::poh::Poh;
|
||||||
use crate::result::Result;
|
use crate::result::Result;
|
||||||
use bincode::{deserialize, serialize_into, serialized_size};
|
use bincode::{deserialize, serialize_into, serialized_size};
|
||||||
use solana_sdk::hash::Hash;
|
use chrono::prelude::Utc;
|
||||||
|
use rayon::prelude::*;
|
||||||
|
use solana_sdk::budget_transaction::BudgetTransaction;
|
||||||
|
use solana_sdk::hash::{hash, Hash};
|
||||||
|
use solana_sdk::pubkey::Pubkey;
|
||||||
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
use solana_sdk::transaction::Transaction;
|
use solana_sdk::transaction::Transaction;
|
||||||
|
use solana_sdk::vote_program::Vote;
|
||||||
|
use solana_sdk::vote_transaction::VoteTransaction;
|
||||||
use std::borrow::Borrow;
|
use std::borrow::Borrow;
|
||||||
use std::io::Cursor;
|
use std::io::Cursor;
|
||||||
use std::mem::size_of;
|
use std::mem::size_of;
|
||||||
|
@ -250,6 +257,210 @@ where
|
||||||
Ok((entries, num_ticks))
|
Ok((entries, num_ticks))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// an EntrySlice is a slice of Entries
|
||||||
|
pub trait EntrySlice {
|
||||||
|
/// Verifies the hashes and counts of a slice of transactions are all consistent.
|
||||||
|
fn verify(&self, start_hash: &Hash) -> bool;
|
||||||
|
fn to_shared_blobs(&self) -> Vec<SharedBlob>;
|
||||||
|
fn to_blobs(&self) -> Vec<Blob>;
|
||||||
|
fn votes(&self) -> Vec<(Pubkey, Vote, Hash)>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EntrySlice for [Entry] {
|
||||||
|
fn verify(&self, start_hash: &Hash) -> bool {
|
||||||
|
let genesis = [Entry {
|
||||||
|
tick_height: 0,
|
||||||
|
num_hashes: 0,
|
||||||
|
id: *start_hash,
|
||||||
|
transactions: vec![],
|
||||||
|
}];
|
||||||
|
let entry_pairs = genesis.par_iter().chain(self).zip(self);
|
||||||
|
entry_pairs.all(|(x0, x1)| {
|
||||||
|
let r = x1.verify(&x0.id);
|
||||||
|
if !r {
|
||||||
|
warn!(
|
||||||
|
"entry invalid!: x0: {:?}, x1: {:?} num txs: {}",
|
||||||
|
x0.id,
|
||||||
|
x1.id,
|
||||||
|
x1.transactions.len()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
r
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_blobs(&self) -> Vec<Blob> {
|
||||||
|
self.iter().map(|entry| entry.to_blob()).collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_shared_blobs(&self) -> Vec<SharedBlob> {
|
||||||
|
self.iter().map(|entry| entry.to_shared_blob()).collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn votes(&self) -> Vec<(Pubkey, Vote, Hash)> {
|
||||||
|
self.iter()
|
||||||
|
.flat_map(|entry| {
|
||||||
|
entry
|
||||||
|
.transactions
|
||||||
|
.iter()
|
||||||
|
.flat_map(VoteTransaction::get_votes)
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates the next entries for given transactions, outputs
|
||||||
|
/// updates start_hash to id of last Entry, sets num_hashes to 0
|
||||||
|
pub fn next_entries_mut(
|
||||||
|
start_hash: &mut Hash,
|
||||||
|
num_hashes: &mut u64,
|
||||||
|
transactions: Vec<Transaction>,
|
||||||
|
) -> Vec<Entry> {
|
||||||
|
// TODO: ?? find a number that works better than |?
|
||||||
|
// V
|
||||||
|
if transactions.is_empty() || transactions.len() == 1 {
|
||||||
|
vec![Entry::new_mut(start_hash, num_hashes, transactions)]
|
||||||
|
} else {
|
||||||
|
let mut chunk_start = 0;
|
||||||
|
let mut entries = Vec::new();
|
||||||
|
|
||||||
|
while chunk_start < transactions.len() {
|
||||||
|
let mut chunk_end = transactions.len();
|
||||||
|
let mut upper = chunk_end;
|
||||||
|
let mut lower = chunk_start;
|
||||||
|
let mut next = chunk_end; // be optimistic that all will fit
|
||||||
|
|
||||||
|
// binary search for how many transactions will fit in an Entry (i.e. a BLOB)
|
||||||
|
loop {
|
||||||
|
debug!(
|
||||||
|
"chunk_end {}, upper {} lower {} next {} transactions.len() {}",
|
||||||
|
chunk_end,
|
||||||
|
upper,
|
||||||
|
lower,
|
||||||
|
next,
|
||||||
|
transactions.len()
|
||||||
|
);
|
||||||
|
if Entry::serialized_size(&transactions[chunk_start..chunk_end])
|
||||||
|
<= BLOB_DATA_SIZE as u64
|
||||||
|
{
|
||||||
|
next = (upper + chunk_end) / 2;
|
||||||
|
lower = chunk_end;
|
||||||
|
debug!(
|
||||||
|
"chunk_end {} fits, maybe too well? trying {}",
|
||||||
|
chunk_end, next
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
next = (lower + chunk_end) / 2;
|
||||||
|
upper = chunk_end;
|
||||||
|
debug!("chunk_end {} doesn't fit! trying {}", chunk_end, next);
|
||||||
|
}
|
||||||
|
// same as last time
|
||||||
|
if next == chunk_end {
|
||||||
|
debug!("converged on chunk_end {}", chunk_end);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
chunk_end = next;
|
||||||
|
}
|
||||||
|
entries.push(Entry::new_mut(
|
||||||
|
start_hash,
|
||||||
|
num_hashes,
|
||||||
|
transactions[chunk_start..chunk_end].to_vec(),
|
||||||
|
));
|
||||||
|
chunk_start = chunk_end;
|
||||||
|
}
|
||||||
|
|
||||||
|
entries
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates the next Entries for given transactions
|
||||||
|
pub fn next_entries(
|
||||||
|
start_hash: &Hash,
|
||||||
|
num_hashes: u64,
|
||||||
|
transactions: Vec<Transaction>,
|
||||||
|
) -> Vec<Entry> {
|
||||||
|
let mut id = *start_hash;
|
||||||
|
let mut num_hashes = num_hashes;
|
||||||
|
next_entries_mut(&mut id, &mut num_hashes, transactions)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn create_ticks(num_ticks: usize, mut hash: Hash) -> Vec<Entry> {
|
||||||
|
let mut ticks = Vec::with_capacity(num_ticks as usize);
|
||||||
|
for _ in 0..num_ticks as u64 {
|
||||||
|
let new_tick = Entry::new(&hash, 0, 1, vec![]);
|
||||||
|
hash = new_tick.id;
|
||||||
|
ticks.push(new_tick);
|
||||||
|
}
|
||||||
|
|
||||||
|
ticks
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn make_tiny_test_entries(num: usize) -> Vec<Entry> {
|
||||||
|
let zero = Hash::default();
|
||||||
|
let one = hash(&zero.as_ref());
|
||||||
|
let keypair = Keypair::new();
|
||||||
|
|
||||||
|
let mut id = one;
|
||||||
|
let mut num_hashes = 0;
|
||||||
|
(0..num)
|
||||||
|
.map(|_| {
|
||||||
|
Entry::new_mut(
|
||||||
|
&mut id,
|
||||||
|
&mut num_hashes,
|
||||||
|
vec![Transaction::budget_new_timestamp(
|
||||||
|
&keypair,
|
||||||
|
keypair.pubkey(),
|
||||||
|
keypair.pubkey(),
|
||||||
|
Utc::now(),
|
||||||
|
one,
|
||||||
|
)],
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn make_large_test_entries(num_entries: usize) -> Vec<Entry> {
|
||||||
|
let zero = Hash::default();
|
||||||
|
let one = hash(&zero.as_ref());
|
||||||
|
let keypair = Keypair::new();
|
||||||
|
|
||||||
|
let tx = Transaction::budget_new_timestamp(
|
||||||
|
&keypair,
|
||||||
|
keypair.pubkey(),
|
||||||
|
keypair.pubkey(),
|
||||||
|
Utc::now(),
|
||||||
|
one,
|
||||||
|
);
|
||||||
|
|
||||||
|
let serialized_size = serialized_size(&vec![&tx]).unwrap();
|
||||||
|
let num_txs = BLOB_DATA_SIZE / serialized_size as usize;
|
||||||
|
let txs = vec![tx; num_txs];
|
||||||
|
let entry = next_entries(&one, 1, txs)[0].clone();
|
||||||
|
vec![entry; num_entries]
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn make_consecutive_blobs(
|
||||||
|
id: &Pubkey,
|
||||||
|
num_blobs_to_make: u64,
|
||||||
|
start_height: u64,
|
||||||
|
start_hash: Hash,
|
||||||
|
addr: &std::net::SocketAddr,
|
||||||
|
) -> Vec<SharedBlob> {
|
||||||
|
let entries = create_ticks(num_blobs_to_make as usize, start_hash);
|
||||||
|
|
||||||
|
let blobs = entries.to_shared_blobs();
|
||||||
|
let mut index = start_height;
|
||||||
|
for blob in &blobs {
|
||||||
|
let mut blob = blob.write().unwrap();
|
||||||
|
blob.set_index(index).unwrap();
|
||||||
|
blob.set_id(id).unwrap();
|
||||||
|
blob.meta.set_addr(addr);
|
||||||
|
index += 1;
|
||||||
|
}
|
||||||
|
blobs
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
/// Creates the next Tick or Transaction Entry `num_hashes` after `start_hash`.
|
/// Creates the next Tick or Transaction Entry `num_hashes` after `start_hash`.
|
||||||
pub fn next_entry(prev_id: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
|
pub fn next_entry(prev_id: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
|
||||||
|
@ -266,12 +477,13 @@ pub fn next_entry(prev_id: &Hash, num_hashes: u64, transactions: Vec<Transaction
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::entry::Entry;
|
use crate::entry::Entry;
|
||||||
use chrono::prelude::*;
|
use crate::packet::{to_blobs, BLOB_DATA_SIZE, PACKET_DATA_SIZE};
|
||||||
use solana_sdk::budget_transaction::BudgetTransaction;
|
use solana_sdk::budget_transaction::BudgetTransaction;
|
||||||
use solana_sdk::hash::hash;
|
use solana_sdk::hash::hash;
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
|
||||||
use solana_sdk::system_transaction::SystemTransaction;
|
use solana_sdk::system_transaction::SystemTransaction;
|
||||||
use solana_sdk::transaction::Transaction;
|
use solana_sdk::transaction::Transaction;
|
||||||
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_entry_verify() {
|
fn test_entry_verify() {
|
||||||
|
@ -368,4 +580,134 @@ mod tests {
|
||||||
serialized_size(&entry).unwrap()
|
serialized_size(&entry).unwrap()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_verify_slice() {
|
||||||
|
solana_logger::setup();
|
||||||
|
let zero = Hash::default();
|
||||||
|
let one = hash(&zero.as_ref());
|
||||||
|
assert!(vec![][..].verify(&zero)); // base case
|
||||||
|
assert!(vec![Entry::new_tick(0, 0, &zero)][..].verify(&zero)); // singleton case 1
|
||||||
|
assert!(!vec![Entry::new_tick(0, 0, &zero)][..].verify(&one)); // singleton case 2, bad
|
||||||
|
assert!(vec![next_entry(&zero, 0, vec![]); 2][..].verify(&zero)); // inductive step
|
||||||
|
|
||||||
|
let mut bad_ticks = vec![next_entry(&zero, 0, vec![]); 2];
|
||||||
|
bad_ticks[1].id = one;
|
||||||
|
assert!(!bad_ticks.verify(&zero)); // inductive step, bad
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_test_entries() -> Vec<Entry> {
|
||||||
|
let zero = Hash::default();
|
||||||
|
let one = hash(&zero.as_ref());
|
||||||
|
let keypair = Keypair::new();
|
||||||
|
let vote_account = Keypair::new();
|
||||||
|
let tx = Transaction::vote_new(&vote_account.pubkey(), Vote { tick_height: 1 }, one, 1);
|
||||||
|
let msg = tx.get_sign_data();
|
||||||
|
let sig = Signature::new(&vote_account.sign(&msg).as_ref());
|
||||||
|
let tx0 = Transaction {
|
||||||
|
signatures: vec![sig],
|
||||||
|
account_keys: tx.account_keys,
|
||||||
|
last_id: tx.last_id,
|
||||||
|
fee: tx.fee,
|
||||||
|
program_ids: tx.program_ids,
|
||||||
|
instructions: tx.instructions,
|
||||||
|
};
|
||||||
|
let tx1 = Transaction::budget_new_timestamp(
|
||||||
|
&keypair,
|
||||||
|
keypair.pubkey(),
|
||||||
|
keypair.pubkey(),
|
||||||
|
Utc::now(),
|
||||||
|
one,
|
||||||
|
);
|
||||||
|
//
|
||||||
|
// TODO: this magic number and the mix of transaction types
|
||||||
|
// is designed to fill up a Blob more or less exactly,
|
||||||
|
// to get near enough the the threshold that
|
||||||
|
// deserialization falls over if it uses the wrong size()
|
||||||
|
// parameter to index into blob.data()
|
||||||
|
//
|
||||||
|
// magic numbers -----------------+
|
||||||
|
// |
|
||||||
|
// V
|
||||||
|
let mut transactions = vec![tx0; 362];
|
||||||
|
transactions.extend(vec![tx1; 100]);
|
||||||
|
next_entries(&zero, 0, transactions)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_entries_to_shared_blobs() {
|
||||||
|
solana_logger::setup();
|
||||||
|
let entries = make_test_entries();
|
||||||
|
|
||||||
|
let blob_q = entries.to_blobs();
|
||||||
|
|
||||||
|
assert_eq!(reconstruct_entries_from_blobs(blob_q).unwrap().0, entries);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_bad_blobs_attack() {
|
||||||
|
solana_logger::setup();
|
||||||
|
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||||
|
let blobs_q = to_blobs(vec![(0, addr)]).unwrap(); // <-- attack!
|
||||||
|
assert!(reconstruct_entries_from_blobs(blobs_q).is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_next_entries() {
|
||||||
|
solana_logger::setup();
|
||||||
|
let id = Hash::default();
|
||||||
|
let next_id = hash(&id.as_ref());
|
||||||
|
let keypair = Keypair::new();
|
||||||
|
let vote_account = Keypair::new();
|
||||||
|
let tx = Transaction::vote_new(&vote_account.pubkey(), Vote { tick_height: 1 }, next_id, 2);
|
||||||
|
let msg = tx.get_sign_data();
|
||||||
|
let sig = Signature::new(&vote_account.sign(&msg).as_ref());
|
||||||
|
let tx_small = Transaction {
|
||||||
|
signatures: vec![sig],
|
||||||
|
account_keys: tx.account_keys,
|
||||||
|
last_id: tx.last_id,
|
||||||
|
fee: tx.fee,
|
||||||
|
program_ids: tx.program_ids,
|
||||||
|
instructions: tx.instructions,
|
||||||
|
};
|
||||||
|
let tx_large = Transaction::budget_new(&keypair, keypair.pubkey(), 1, next_id);
|
||||||
|
|
||||||
|
let tx_small_size = serialized_size(&tx_small).unwrap() as usize;
|
||||||
|
let tx_large_size = serialized_size(&tx_large).unwrap() as usize;
|
||||||
|
let entry_size = serialized_size(&Entry {
|
||||||
|
tick_height: 0,
|
||||||
|
num_hashes: 0,
|
||||||
|
id: Hash::default(),
|
||||||
|
transactions: vec![],
|
||||||
|
})
|
||||||
|
.unwrap() as usize;
|
||||||
|
assert!(tx_small_size < tx_large_size);
|
||||||
|
assert!(tx_large_size < PACKET_DATA_SIZE);
|
||||||
|
|
||||||
|
let threshold = (BLOB_DATA_SIZE - entry_size) / tx_small_size;
|
||||||
|
|
||||||
|
// verify no split
|
||||||
|
let transactions = vec![tx_small.clone(); threshold];
|
||||||
|
let entries0 = next_entries(&id, 0, transactions.clone());
|
||||||
|
assert_eq!(entries0.len(), 1);
|
||||||
|
assert!(entries0.verify(&id));
|
||||||
|
|
||||||
|
// verify the split with uniform transactions
|
||||||
|
let transactions = vec![tx_small.clone(); threshold * 2];
|
||||||
|
let entries0 = next_entries(&id, 0, transactions.clone());
|
||||||
|
assert_eq!(entries0.len(), 2);
|
||||||
|
assert!(entries0.verify(&id));
|
||||||
|
|
||||||
|
// verify the split with small transactions followed by large
|
||||||
|
// transactions
|
||||||
|
let mut transactions = vec![tx_small.clone(); BLOB_DATA_SIZE / tx_small_size];
|
||||||
|
let large_transactions = vec![tx_large.clone(); BLOB_DATA_SIZE / tx_large_size];
|
||||||
|
|
||||||
|
transactions.extend(large_transactions);
|
||||||
|
|
||||||
|
let entries0 = next_entries(&id, 0, transactions.clone());
|
||||||
|
assert!(entries0.len() >= 2);
|
||||||
|
assert!(entries0.verify(&id));
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -548,8 +548,9 @@ fn categorize_blob(
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod test {
|
pub mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use crate::db_ledger::get_tmp_ledger_path;
|
||||||
use crate::db_ledger::{DbLedger, DEFAULT_SLOT_HEIGHT};
|
use crate::db_ledger::{DbLedger, DEFAULT_SLOT_HEIGHT};
|
||||||
use crate::ledger::{get_tmp_ledger_path, make_tiny_test_entries, EntrySlice};
|
use crate::entry::{make_tiny_test_entries, EntrySlice};
|
||||||
|
|
||||||
use crate::packet::{index_blobs, SharedBlob, BLOB_DATA_SIZE, BLOB_SIZE};
|
use crate::packet::{index_blobs, SharedBlob, BLOB_DATA_SIZE, BLOB_SIZE};
|
||||||
use crate::window::WindowSlot;
|
use crate::window::WindowSlot;
|
||||||
|
|
|
@ -654,13 +654,11 @@ mod tests {
|
||||||
use crate::cluster_info::Node;
|
use crate::cluster_info::Node;
|
||||||
use crate::create_vote_account::*;
|
use crate::create_vote_account::*;
|
||||||
use crate::db_ledger::*;
|
use crate::db_ledger::*;
|
||||||
|
use crate::entry::make_consecutive_blobs;
|
||||||
use crate::fullnode::{Fullnode, FullnodeReturnType, NodeRole, TvuReturnType};
|
use crate::fullnode::{Fullnode, FullnodeReturnType, NodeRole, TvuReturnType};
|
||||||
use crate::leader_scheduler::{
|
use crate::leader_scheduler::{
|
||||||
make_active_set_entries, LeaderScheduler, LeaderSchedulerConfig,
|
make_active_set_entries, LeaderScheduler, LeaderSchedulerConfig,
|
||||||
};
|
};
|
||||||
use crate::ledger::{
|
|
||||||
create_tmp_genesis, create_tmp_sample_ledger, make_consecutive_blobs, tmp_copy_ledger,
|
|
||||||
};
|
|
||||||
use crate::rpc_request::{RpcClient, RpcRequest};
|
use crate::rpc_request::{RpcClient, RpcRequest};
|
||||||
use crate::service::Service;
|
use crate::service::Service;
|
||||||
use crate::streamer::responder;
|
use crate::streamer::responder;
|
||||||
|
|
|
@ -3,8 +3,7 @@
|
||||||
|
|
||||||
use crate::bank::Bank;
|
use crate::bank::Bank;
|
||||||
|
|
||||||
use crate::entry::Entry;
|
use crate::entry::{create_ticks, Entry};
|
||||||
use crate::ledger::create_ticks;
|
|
||||||
use crate::rpc_request::{RpcClient, RpcRequest};
|
use crate::rpc_request::{RpcClient, RpcRequest};
|
||||||
use bincode::serialize;
|
use bincode::serialize;
|
||||||
use byteorder::{LittleEndian, ReadBytesExt};
|
use byteorder::{LittleEndian, ReadBytesExt};
|
||||||
|
|
442
src/ledger.rs
442
src/ledger.rs
|
@ -1,442 +0,0 @@
|
||||||
//! The `ledger` module provides functions for parallel verification of the
|
|
||||||
//! Proof of History ledger as well as iterative read, append write, and random
|
|
||||||
//! access read to a persistent file-based ledger.
|
|
||||||
|
|
||||||
use crate::db_ledger::{DbLedger, DEFAULT_SLOT_HEIGHT};
|
|
||||||
use crate::entry::Entry;
|
|
||||||
use crate::mint::Mint;
|
|
||||||
use crate::packet::{Blob, SharedBlob, BLOB_DATA_SIZE};
|
|
||||||
use bincode::{self, serialized_size};
|
|
||||||
use chrono::prelude::Utc;
|
|
||||||
use rayon::prelude::*;
|
|
||||||
use solana_sdk::budget_transaction::BudgetTransaction;
|
|
||||||
use solana_sdk::hash::{hash, Hash};
|
|
||||||
use solana_sdk::pubkey::Pubkey;
|
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
|
||||||
use solana_sdk::transaction::Transaction;
|
|
||||||
use solana_sdk::vote_program::Vote;
|
|
||||||
use solana_sdk::vote_transaction::VoteTransaction;
|
|
||||||
use std::fs::remove_dir_all;
|
|
||||||
|
|
||||||
// an EntrySlice is a slice of Entries
|
|
||||||
pub trait EntrySlice {
|
|
||||||
/// Verifies the hashes and counts of a slice of transactions are all consistent.
|
|
||||||
fn verify(&self, start_hash: &Hash) -> bool;
|
|
||||||
fn to_shared_blobs(&self) -> Vec<SharedBlob>;
|
|
||||||
fn to_blobs(&self) -> Vec<Blob>;
|
|
||||||
fn votes(&self) -> Vec<(Pubkey, Vote, Hash)>;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EntrySlice for [Entry] {
|
|
||||||
fn verify(&self, start_hash: &Hash) -> bool {
|
|
||||||
let genesis = [Entry {
|
|
||||||
tick_height: 0,
|
|
||||||
num_hashes: 0,
|
|
||||||
id: *start_hash,
|
|
||||||
transactions: vec![],
|
|
||||||
}];
|
|
||||||
let entry_pairs = genesis.par_iter().chain(self).zip(self);
|
|
||||||
entry_pairs.all(|(x0, x1)| {
|
|
||||||
let r = x1.verify(&x0.id);
|
|
||||||
if !r {
|
|
||||||
warn!(
|
|
||||||
"entry invalid!: x0: {:?}, x1: {:?} num txs: {}",
|
|
||||||
x0.id,
|
|
||||||
x1.id,
|
|
||||||
x1.transactions.len()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
r
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_blobs(&self) -> Vec<Blob> {
|
|
||||||
self.iter().map(|entry| entry.to_blob()).collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_shared_blobs(&self) -> Vec<SharedBlob> {
|
|
||||||
self.iter().map(|entry| entry.to_shared_blob()).collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn votes(&self) -> Vec<(Pubkey, Vote, Hash)> {
|
|
||||||
self.iter()
|
|
||||||
.flat_map(|entry| {
|
|
||||||
entry
|
|
||||||
.transactions
|
|
||||||
.iter()
|
|
||||||
.flat_map(VoteTransaction::get_votes)
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates the next entries for given transactions, outputs
|
|
||||||
/// updates start_hash to id of last Entry, sets num_hashes to 0
|
|
||||||
pub fn next_entries_mut(
|
|
||||||
start_hash: &mut Hash,
|
|
||||||
num_hashes: &mut u64,
|
|
||||||
transactions: Vec<Transaction>,
|
|
||||||
) -> Vec<Entry> {
|
|
||||||
// TODO: ?? find a number that works better than |?
|
|
||||||
// V
|
|
||||||
if transactions.is_empty() || transactions.len() == 1 {
|
|
||||||
vec![Entry::new_mut(start_hash, num_hashes, transactions)]
|
|
||||||
} else {
|
|
||||||
let mut chunk_start = 0;
|
|
||||||
let mut entries = Vec::new();
|
|
||||||
|
|
||||||
while chunk_start < transactions.len() {
|
|
||||||
let mut chunk_end = transactions.len();
|
|
||||||
let mut upper = chunk_end;
|
|
||||||
let mut lower = chunk_start;
|
|
||||||
let mut next = chunk_end; // be optimistic that all will fit
|
|
||||||
|
|
||||||
// binary search for how many transactions will fit in an Entry (i.e. a BLOB)
|
|
||||||
loop {
|
|
||||||
debug!(
|
|
||||||
"chunk_end {}, upper {} lower {} next {} transactions.len() {}",
|
|
||||||
chunk_end,
|
|
||||||
upper,
|
|
||||||
lower,
|
|
||||||
next,
|
|
||||||
transactions.len()
|
|
||||||
);
|
|
||||||
if Entry::serialized_size(&transactions[chunk_start..chunk_end])
|
|
||||||
<= BLOB_DATA_SIZE as u64
|
|
||||||
{
|
|
||||||
next = (upper + chunk_end) / 2;
|
|
||||||
lower = chunk_end;
|
|
||||||
debug!(
|
|
||||||
"chunk_end {} fits, maybe too well? trying {}",
|
|
||||||
chunk_end, next
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
next = (lower + chunk_end) / 2;
|
|
||||||
upper = chunk_end;
|
|
||||||
debug!("chunk_end {} doesn't fit! trying {}", chunk_end, next);
|
|
||||||
}
|
|
||||||
// same as last time
|
|
||||||
if next == chunk_end {
|
|
||||||
debug!("converged on chunk_end {}", chunk_end);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
chunk_end = next;
|
|
||||||
}
|
|
||||||
entries.push(Entry::new_mut(
|
|
||||||
start_hash,
|
|
||||||
num_hashes,
|
|
||||||
transactions[chunk_start..chunk_end].to_vec(),
|
|
||||||
));
|
|
||||||
chunk_start = chunk_end;
|
|
||||||
}
|
|
||||||
|
|
||||||
entries
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates the next Entries for given transactions
|
|
||||||
pub fn next_entries(
|
|
||||||
start_hash: &Hash,
|
|
||||||
num_hashes: u64,
|
|
||||||
transactions: Vec<Transaction>,
|
|
||||||
) -> Vec<Entry> {
|
|
||||||
let mut id = *start_hash;
|
|
||||||
let mut num_hashes = num_hashes;
|
|
||||||
next_entries_mut(&mut id, &mut num_hashes, transactions)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_tmp_ledger_path(name: &str) -> String {
|
|
||||||
use std::env;
|
|
||||||
let out_dir = env::var("OUT_DIR").unwrap_or_else(|_| "target".to_string());
|
|
||||||
let keypair = Keypair::new();
|
|
||||||
|
|
||||||
let path = format!("{}/tmp/ledger-{}-{}", out_dir, name, keypair.pubkey());
|
|
||||||
|
|
||||||
// whack any possible collision
|
|
||||||
let _ignored = remove_dir_all(&path);
|
|
||||||
|
|
||||||
path
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn create_tmp_ledger_with_mint(name: &str, mint: &Mint) -> String {
|
|
||||||
let path = get_tmp_ledger_path(name);
|
|
||||||
DbLedger::destroy(&path).expect("Expected successful database destruction");
|
|
||||||
let db_ledger = DbLedger::open(&path).unwrap();
|
|
||||||
db_ledger
|
|
||||||
.write_entries(DEFAULT_SLOT_HEIGHT, 0, &mint.create_entries())
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
path
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn create_tmp_genesis(
|
|
||||||
name: &str,
|
|
||||||
num: u64,
|
|
||||||
bootstrap_leader_id: Pubkey,
|
|
||||||
bootstrap_leader_tokens: u64,
|
|
||||||
) -> (Mint, String) {
|
|
||||||
let mint = Mint::new_with_leader(num, bootstrap_leader_id, bootstrap_leader_tokens);
|
|
||||||
let path = create_tmp_ledger_with_mint(name, &mint);
|
|
||||||
|
|
||||||
(mint, path)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn create_ticks(num_ticks: usize, mut hash: Hash) -> Vec<Entry> {
|
|
||||||
let mut ticks = Vec::with_capacity(num_ticks as usize);
|
|
||||||
for _ in 0..num_ticks as u64 {
|
|
||||||
let new_tick = Entry::new(&hash, 0, 1, vec![]);
|
|
||||||
hash = new_tick.id;
|
|
||||||
ticks.push(new_tick);
|
|
||||||
}
|
|
||||||
|
|
||||||
ticks
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn create_tmp_sample_ledger(
|
|
||||||
name: &str,
|
|
||||||
num_tokens: u64,
|
|
||||||
num_ending_ticks: usize,
|
|
||||||
bootstrap_leader_id: Pubkey,
|
|
||||||
bootstrap_leader_tokens: u64,
|
|
||||||
) -> (Mint, String, Vec<Entry>) {
|
|
||||||
let mint = Mint::new_with_leader(num_tokens, bootstrap_leader_id, bootstrap_leader_tokens);
|
|
||||||
let path = get_tmp_ledger_path(name);
|
|
||||||
|
|
||||||
// Create the entries
|
|
||||||
let mut genesis = mint.create_entries();
|
|
||||||
let ticks = create_ticks(num_ending_ticks, mint.last_id());
|
|
||||||
genesis.extend(ticks);
|
|
||||||
|
|
||||||
DbLedger::destroy(&path).expect("Expected successful database destruction");
|
|
||||||
let db_ledger = DbLedger::open(&path).unwrap();
|
|
||||||
db_ledger
|
|
||||||
.write_entries(DEFAULT_SLOT_HEIGHT, 0, &genesis)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
(mint, path, genesis)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn tmp_copy_ledger(from: &str, name: &str) -> String {
|
|
||||||
let tostr = get_tmp_ledger_path(name);
|
|
||||||
|
|
||||||
let db_ledger = DbLedger::open(from).unwrap();
|
|
||||||
let ledger_entries = db_ledger.read_ledger().unwrap();
|
|
||||||
|
|
||||||
DbLedger::destroy(&tostr).expect("Expected successful database destruction");
|
|
||||||
let db_ledger = DbLedger::open(&tostr).unwrap();
|
|
||||||
db_ledger
|
|
||||||
.write_entries(DEFAULT_SLOT_HEIGHT, 0, ledger_entries)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
tostr
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn make_tiny_test_entries(num: usize) -> Vec<Entry> {
|
|
||||||
let zero = Hash::default();
|
|
||||||
let one = hash(&zero.as_ref());
|
|
||||||
let keypair = Keypair::new();
|
|
||||||
|
|
||||||
let mut id = one;
|
|
||||||
let mut num_hashes = 0;
|
|
||||||
(0..num)
|
|
||||||
.map(|_| {
|
|
||||||
Entry::new_mut(
|
|
||||||
&mut id,
|
|
||||||
&mut num_hashes,
|
|
||||||
vec![Transaction::budget_new_timestamp(
|
|
||||||
&keypair,
|
|
||||||
keypair.pubkey(),
|
|
||||||
keypair.pubkey(),
|
|
||||||
Utc::now(),
|
|
||||||
one,
|
|
||||||
)],
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn make_large_test_entries(num_entries: usize) -> Vec<Entry> {
|
|
||||||
let zero = Hash::default();
|
|
||||||
let one = hash(&zero.as_ref());
|
|
||||||
let keypair = Keypair::new();
|
|
||||||
|
|
||||||
let tx = Transaction::budget_new_timestamp(
|
|
||||||
&keypair,
|
|
||||||
keypair.pubkey(),
|
|
||||||
keypair.pubkey(),
|
|
||||||
Utc::now(),
|
|
||||||
one,
|
|
||||||
);
|
|
||||||
|
|
||||||
let serialized_size = serialized_size(&vec![&tx]).unwrap();
|
|
||||||
let num_txs = BLOB_DATA_SIZE / serialized_size as usize;
|
|
||||||
let txs = vec![tx; num_txs];
|
|
||||||
let entry = next_entries(&one, 1, txs)[0].clone();
|
|
||||||
vec![entry; num_entries]
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub fn make_consecutive_blobs(
|
|
||||||
id: &Pubkey,
|
|
||||||
num_blobs_to_make: u64,
|
|
||||||
start_height: u64,
|
|
||||||
start_hash: Hash,
|
|
||||||
addr: &std::net::SocketAddr,
|
|
||||||
) -> Vec<SharedBlob> {
|
|
||||||
let entries = create_ticks(num_blobs_to_make as usize, start_hash);
|
|
||||||
|
|
||||||
let blobs = entries.to_shared_blobs();
|
|
||||||
let mut index = start_height;
|
|
||||||
for blob in &blobs {
|
|
||||||
let mut blob = blob.write().unwrap();
|
|
||||||
blob.set_index(index).unwrap();
|
|
||||||
blob.set_id(id).unwrap();
|
|
||||||
blob.meta.set_addr(addr);
|
|
||||||
index += 1;
|
|
||||||
}
|
|
||||||
blobs
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use crate::entry::{next_entry, reconstruct_entries_from_blobs, Entry};
|
|
||||||
use crate::packet::{to_blobs, BLOB_DATA_SIZE, PACKET_DATA_SIZE};
|
|
||||||
use bincode::serialized_size;
|
|
||||||
use solana_sdk::hash::hash;
|
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
|
|
||||||
use solana_sdk::transaction::Transaction;
|
|
||||||
use solana_sdk::vote_program::Vote;
|
|
||||||
use std;
|
|
||||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_verify_slice() {
|
|
||||||
solana_logger::setup();
|
|
||||||
let zero = Hash::default();
|
|
||||||
let one = hash(&zero.as_ref());
|
|
||||||
assert!(vec![][..].verify(&zero)); // base case
|
|
||||||
assert!(vec![Entry::new_tick(0, 0, &zero)][..].verify(&zero)); // singleton case 1
|
|
||||||
assert!(!vec![Entry::new_tick(0, 0, &zero)][..].verify(&one)); // singleton case 2, bad
|
|
||||||
assert!(vec![next_entry(&zero, 0, vec![]); 2][..].verify(&zero)); // inductive step
|
|
||||||
|
|
||||||
let mut bad_ticks = vec![next_entry(&zero, 0, vec![]); 2];
|
|
||||||
bad_ticks[1].id = one;
|
|
||||||
assert!(!bad_ticks.verify(&zero)); // inductive step, bad
|
|
||||||
}
|
|
||||||
|
|
||||||
fn make_test_entries() -> Vec<Entry> {
|
|
||||||
let zero = Hash::default();
|
|
||||||
let one = hash(&zero.as_ref());
|
|
||||||
let keypair = Keypair::new();
|
|
||||||
let vote_account = Keypair::new();
|
|
||||||
let tx = Transaction::vote_new(&vote_account.pubkey(), Vote { tick_height: 1 }, one, 1);
|
|
||||||
let msg = tx.get_sign_data();
|
|
||||||
let sig = Signature::new(&vote_account.sign(&msg).as_ref());
|
|
||||||
let tx0 = Transaction {
|
|
||||||
signatures: vec![sig],
|
|
||||||
account_keys: tx.account_keys,
|
|
||||||
last_id: tx.last_id,
|
|
||||||
fee: tx.fee,
|
|
||||||
program_ids: tx.program_ids,
|
|
||||||
instructions: tx.instructions,
|
|
||||||
};
|
|
||||||
let tx1 = Transaction::budget_new_timestamp(
|
|
||||||
&keypair,
|
|
||||||
keypair.pubkey(),
|
|
||||||
keypair.pubkey(),
|
|
||||||
Utc::now(),
|
|
||||||
one,
|
|
||||||
);
|
|
||||||
//
|
|
||||||
// TODO: this magic number and the mix of transaction types
|
|
||||||
// is designed to fill up a Blob more or less exactly,
|
|
||||||
// to get near enough the the threshold that
|
|
||||||
// deserialization falls over if it uses the wrong size()
|
|
||||||
// parameter to index into blob.data()
|
|
||||||
//
|
|
||||||
// magic numbers -----------------+
|
|
||||||
// |
|
|
||||||
// V
|
|
||||||
let mut transactions = vec![tx0; 362];
|
|
||||||
transactions.extend(vec![tx1; 100]);
|
|
||||||
next_entries(&zero, 0, transactions)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_entries_to_shared_blobs() {
|
|
||||||
solana_logger::setup();
|
|
||||||
let entries = make_test_entries();
|
|
||||||
|
|
||||||
let blob_q = entries.to_blobs();
|
|
||||||
|
|
||||||
assert_eq!(reconstruct_entries_from_blobs(blob_q).unwrap().0, entries);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_bad_blobs_attack() {
|
|
||||||
solana_logger::setup();
|
|
||||||
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
|
||||||
let blobs_q = to_blobs(vec![(0, addr)]).unwrap(); // <-- attack!
|
|
||||||
assert!(reconstruct_entries_from_blobs(blobs_q).is_err());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_next_entries() {
|
|
||||||
solana_logger::setup();
|
|
||||||
let id = Hash::default();
|
|
||||||
let next_id = hash(&id.as_ref());
|
|
||||||
let keypair = Keypair::new();
|
|
||||||
let vote_account = Keypair::new();
|
|
||||||
let tx = Transaction::vote_new(&vote_account.pubkey(), Vote { tick_height: 1 }, next_id, 2);
|
|
||||||
let msg = tx.get_sign_data();
|
|
||||||
let sig = Signature::new(&vote_account.sign(&msg).as_ref());
|
|
||||||
let tx_small = Transaction {
|
|
||||||
signatures: vec![sig],
|
|
||||||
account_keys: tx.account_keys,
|
|
||||||
last_id: tx.last_id,
|
|
||||||
fee: tx.fee,
|
|
||||||
program_ids: tx.program_ids,
|
|
||||||
instructions: tx.instructions,
|
|
||||||
};
|
|
||||||
let tx_large = Transaction::budget_new(&keypair, keypair.pubkey(), 1, next_id);
|
|
||||||
|
|
||||||
let tx_small_size = serialized_size(&tx_small).unwrap() as usize;
|
|
||||||
let tx_large_size = serialized_size(&tx_large).unwrap() as usize;
|
|
||||||
let entry_size = serialized_size(&Entry {
|
|
||||||
tick_height: 0,
|
|
||||||
num_hashes: 0,
|
|
||||||
id: Hash::default(),
|
|
||||||
transactions: vec![],
|
|
||||||
})
|
|
||||||
.unwrap() as usize;
|
|
||||||
assert!(tx_small_size < tx_large_size);
|
|
||||||
assert!(tx_large_size < PACKET_DATA_SIZE);
|
|
||||||
|
|
||||||
let threshold = (BLOB_DATA_SIZE - entry_size) / tx_small_size;
|
|
||||||
|
|
||||||
// verify no split
|
|
||||||
let transactions = vec![tx_small.clone(); threshold];
|
|
||||||
let entries0 = next_entries(&id, 0, transactions.clone());
|
|
||||||
assert_eq!(entries0.len(), 1);
|
|
||||||
assert!(entries0.verify(&id));
|
|
||||||
|
|
||||||
// verify the split with uniform transactions
|
|
||||||
let transactions = vec![tx_small.clone(); threshold * 2];
|
|
||||||
let entries0 = next_entries(&id, 0, transactions.clone());
|
|
||||||
assert_eq!(entries0.len(), 2);
|
|
||||||
assert!(entries0.verify(&id));
|
|
||||||
|
|
||||||
// verify the split with small transactions followed by large
|
|
||||||
// transactions
|
|
||||||
let mut transactions = vec![tx_small.clone(); BLOB_DATA_SIZE / tx_small_size];
|
|
||||||
let large_transactions = vec![tx_large.clone(); BLOB_DATA_SIZE / tx_large_size];
|
|
||||||
|
|
||||||
transactions.extend(large_transactions);
|
|
||||||
|
|
||||||
let entries0 = next_entries(&id, 0, transactions.clone());
|
|
||||||
assert!(entries0.len() >= 2);
|
|
||||||
assert!(entries0.verify(&id));
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -42,7 +42,6 @@ pub mod fetch_stage;
|
||||||
pub mod fullnode;
|
pub mod fullnode;
|
||||||
pub mod gossip_service;
|
pub mod gossip_service;
|
||||||
pub mod leader_scheduler;
|
pub mod leader_scheduler;
|
||||||
pub mod ledger;
|
|
||||||
pub mod mint;
|
pub mod mint;
|
||||||
pub mod packet;
|
pub mod packet;
|
||||||
pub mod poh;
|
pub mod poh;
|
||||||
|
|
|
@ -103,7 +103,7 @@ impl Mint {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::ledger::EntrySlice;
|
use crate::entry::EntrySlice;
|
||||||
use bincode::deserialize;
|
use bincode::deserialize;
|
||||||
use solana_sdk::system_instruction::SystemInstruction;
|
use solana_sdk::system_instruction::SystemInstruction;
|
||||||
use solana_sdk::system_program;
|
use solana_sdk::system_program;
|
||||||
|
|
|
@ -6,7 +6,7 @@ use crate::counter::Counter;
|
||||||
use crate::entry::{EntryReceiver, EntrySender};
|
use crate::entry::{EntryReceiver, EntrySender};
|
||||||
use solana_sdk::hash::Hash;
|
use solana_sdk::hash::Hash;
|
||||||
|
|
||||||
use crate::ledger::EntrySlice;
|
use crate::entry::EntrySlice;
|
||||||
use crate::packet::BlobError;
|
use crate::packet::BlobError;
|
||||||
use crate::result::{Error, Result};
|
use crate::result::{Error, Result};
|
||||||
use crate::rpc_request::RpcClient;
|
use crate::rpc_request::RpcClient;
|
||||||
|
@ -290,13 +290,14 @@ impl Service for ReplayStage {
|
||||||
mod test {
|
mod test {
|
||||||
use crate::bank::Bank;
|
use crate::bank::Bank;
|
||||||
use crate::cluster_info::{ClusterInfo, Node};
|
use crate::cluster_info::{ClusterInfo, Node};
|
||||||
|
use crate::db_ledger::create_tmp_sample_ledger;
|
||||||
use crate::db_ledger::{DbLedger, DEFAULT_SLOT_HEIGHT};
|
use crate::db_ledger::{DbLedger, DEFAULT_SLOT_HEIGHT};
|
||||||
|
use crate::entry::create_ticks;
|
||||||
use crate::entry::Entry;
|
use crate::entry::Entry;
|
||||||
use crate::fullnode::Fullnode;
|
use crate::fullnode::Fullnode;
|
||||||
use crate::leader_scheduler::{
|
use crate::leader_scheduler::{
|
||||||
make_active_set_entries, LeaderScheduler, LeaderSchedulerConfig,
|
make_active_set_entries, LeaderScheduler, LeaderSchedulerConfig,
|
||||||
};
|
};
|
||||||
use crate::ledger::{create_ticks, create_tmp_sample_ledger};
|
|
||||||
|
|
||||||
use crate::create_vote_account::*;
|
use crate::create_vote_account::*;
|
||||||
use crate::packet::BlobError;
|
use crate::packet::BlobError;
|
||||||
|
|
|
@ -405,10 +405,10 @@ mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::bank::Bank;
|
use crate::bank::Bank;
|
||||||
use crate::cluster_info::{Node, NodeInfo};
|
use crate::cluster_info::{Node, NodeInfo};
|
||||||
|
use crate::db_ledger::create_tmp_ledger_with_mint;
|
||||||
use crate::fullnode::Fullnode;
|
use crate::fullnode::Fullnode;
|
||||||
use crate::jsonrpc_core::Response;
|
use crate::jsonrpc_core::Response;
|
||||||
use crate::leader_scheduler::LeaderScheduler;
|
use crate::leader_scheduler::LeaderScheduler;
|
||||||
use crate::ledger::create_tmp_ledger_with_mint;
|
|
||||||
use crate::mint::Mint;
|
use crate::mint::Mint;
|
||||||
use crate::rpc_request::get_rpc_request_str;
|
use crate::rpc_request::get_rpc_request_str;
|
||||||
use bincode::serialize;
|
use bincode::serialize;
|
||||||
|
|
|
@ -336,9 +336,9 @@ impl Service for StorageStage {
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use crate::db_ledger::create_tmp_sample_ledger;
|
||||||
use crate::db_ledger::{DbLedger, DEFAULT_SLOT_HEIGHT};
|
use crate::db_ledger::{DbLedger, DEFAULT_SLOT_HEIGHT};
|
||||||
use crate::entry::Entry;
|
use crate::entry::{make_tiny_test_entries, Entry};
|
||||||
use crate::ledger::{create_tmp_sample_ledger, make_tiny_test_entries};
|
|
||||||
|
|
||||||
use crate::service::Service;
|
use crate::service::Service;
|
||||||
use crate::storage_stage::StorageState;
|
use crate::storage_stage::StorageState;
|
||||||
|
|
|
@ -427,9 +427,9 @@ mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::bank::Bank;
|
use crate::bank::Bank;
|
||||||
use crate::cluster_info::Node;
|
use crate::cluster_info::Node;
|
||||||
|
use crate::db_ledger::create_tmp_ledger_with_mint;
|
||||||
use crate::fullnode::Fullnode;
|
use crate::fullnode::Fullnode;
|
||||||
use crate::leader_scheduler::LeaderScheduler;
|
use crate::leader_scheduler::LeaderScheduler;
|
||||||
use crate::ledger::create_tmp_ledger_with_mint;
|
|
||||||
|
|
||||||
use crate::mint::Mint;
|
use crate::mint::Mint;
|
||||||
use bincode::deserialize;
|
use bincode::deserialize;
|
||||||
|
|
|
@ -173,11 +173,11 @@ impl Service for Tvu {
|
||||||
pub mod tests {
|
pub mod tests {
|
||||||
use crate::bank::Bank;
|
use crate::bank::Bank;
|
||||||
use crate::cluster_info::{ClusterInfo, Node};
|
use crate::cluster_info::{ClusterInfo, Node};
|
||||||
|
use crate::db_ledger::get_tmp_ledger_path;
|
||||||
use crate::db_ledger::DbLedger;
|
use crate::db_ledger::DbLedger;
|
||||||
use crate::entry::Entry;
|
use crate::entry::Entry;
|
||||||
use crate::gossip_service::GossipService;
|
use crate::gossip_service::GossipService;
|
||||||
use crate::leader_scheduler::LeaderScheduler;
|
use crate::leader_scheduler::LeaderScheduler;
|
||||||
use crate::ledger::get_tmp_ledger_path;
|
|
||||||
|
|
||||||
use crate::mint::Mint;
|
use crate::mint::Mint;
|
||||||
use crate::packet::SharedBlob;
|
use crate::packet::SharedBlob;
|
||||||
|
|
|
@ -210,10 +210,10 @@ pub fn window_service(
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use crate::cluster_info::{ClusterInfo, Node};
|
use crate::cluster_info::{ClusterInfo, Node};
|
||||||
|
use crate::db_ledger::get_tmp_ledger_path;
|
||||||
use crate::db_ledger::DbLedger;
|
use crate::db_ledger::DbLedger;
|
||||||
use crate::entry::Entry;
|
use crate::entry::{make_consecutive_blobs, Entry};
|
||||||
use crate::leader_scheduler::LeaderScheduler;
|
use crate::leader_scheduler::LeaderScheduler;
|
||||||
use crate::ledger::{get_tmp_ledger_path, make_consecutive_blobs};
|
|
||||||
|
|
||||||
use crate::packet::{SharedBlob, PACKET_DATA_SIZE};
|
use crate::packet::{SharedBlob, PACKET_DATA_SIZE};
|
||||||
use crate::streamer::{blob_receiver, responder};
|
use crate::streamer::{blob_receiver, responder};
|
||||||
|
|
|
@ -12,12 +12,12 @@ use solana::blob_fetch_stage::BlobFetchStage;
|
||||||
use solana::cluster_info::{ClusterInfo, Node, NodeInfo};
|
use solana::cluster_info::{ClusterInfo, Node, NodeInfo};
|
||||||
use solana::contact_info::ContactInfo;
|
use solana::contact_info::ContactInfo;
|
||||||
use solana::create_vote_account::*;
|
use solana::create_vote_account::*;
|
||||||
|
use solana::db_ledger::{create_tmp_genesis, create_tmp_sample_ledger, tmp_copy_ledger};
|
||||||
use solana::db_ledger::{DbLedger, DEFAULT_SLOT_HEIGHT};
|
use solana::db_ledger::{DbLedger, DEFAULT_SLOT_HEIGHT};
|
||||||
use solana::entry::{reconstruct_entries_from_blobs, Entry};
|
use solana::entry::{reconstruct_entries_from_blobs, Entry};
|
||||||
use solana::fullnode::{Fullnode, FullnodeReturnType};
|
use solana::fullnode::{Fullnode, FullnodeReturnType};
|
||||||
use solana::gossip_service::GossipService;
|
use solana::gossip_service::GossipService;
|
||||||
use solana::leader_scheduler::{make_active_set_entries, LeaderScheduler, LeaderSchedulerConfig};
|
use solana::leader_scheduler::{make_active_set_entries, LeaderScheduler, LeaderSchedulerConfig};
|
||||||
use solana::ledger::{create_tmp_genesis, create_tmp_sample_ledger, tmp_copy_ledger};
|
|
||||||
|
|
||||||
use solana::mint::Mint;
|
use solana::mint::Mint;
|
||||||
use solana::packet::SharedBlob;
|
use solana::packet::SharedBlob;
|
||||||
|
|
|
@ -9,10 +9,10 @@ use solana::client::mk_client;
|
||||||
use solana::cluster_info::{ClusterInfo, Node, NodeInfo};
|
use solana::cluster_info::{ClusterInfo, Node, NodeInfo};
|
||||||
use solana::create_vote_account::*;
|
use solana::create_vote_account::*;
|
||||||
use solana::db_ledger::DbLedger;
|
use solana::db_ledger::DbLedger;
|
||||||
|
use solana::db_ledger::{create_tmp_genesis, get_tmp_ledger_path, tmp_copy_ledger};
|
||||||
use solana::entry::Entry;
|
use solana::entry::Entry;
|
||||||
use solana::fullnode::Fullnode;
|
use solana::fullnode::Fullnode;
|
||||||
use solana::leader_scheduler::LeaderScheduler;
|
use solana::leader_scheduler::LeaderScheduler;
|
||||||
use solana::ledger::{create_tmp_genesis, get_tmp_ledger_path, tmp_copy_ledger};
|
|
||||||
use solana::replicator::Replicator;
|
use solana::replicator::Replicator;
|
||||||
use solana::rpc_request::{RpcClient, RpcRequest};
|
use solana::rpc_request::{RpcClient, RpcRequest};
|
||||||
use solana::streamer::blob_receiver;
|
use solana::streamer::blob_receiver;
|
||||||
|
|
Loading…
Reference in New Issue