test randomize with error (#5916)

* test randomize with error

* update magic numbers

* fixup

* fixup

* fixup

* no more blobs

* fixup
This commit is contained in:
Rob Walker 2019-09-17 15:11:29 -07:00 committed by GitHub
parent 180f415736
commit a2595b44c6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 181 additions and 883 deletions

View File

@ -104,9 +104,6 @@ name = "banking_stage"
[[bench]]
name = "blocktree"
[[bench]]
name = "ledger"
[[bench]]
name = "gen_keys"

View File

@ -7,8 +7,11 @@ extern crate test;
extern crate solana_core;
use rand::Rng;
use solana_core::blocktree::{entries_to_test_shreds, get_tmp_ledger_path, Blocktree};
use solana_core::entry::{make_large_test_entries, make_tiny_test_entries, Entry};
use solana_core::{
blocktree::{entries_to_test_shreds, get_tmp_ledger_path, Blocktree},
entry::{create_ticks, Entry},
};
use solana_sdk::hash::Hash;
use std::path::Path;
use test::Bencher;
@ -32,8 +35,7 @@ fn setup_read_bench(
slot: u64,
) {
// Make some big and small entries
let mut entries = make_large_test_entries(num_large_shreds as usize);
entries.extend(make_tiny_test_entries(num_small_shreds as usize));
let entries = create_ticks(num_large_shreds * 4 + num_small_shreds * 2, Hash::default());
// Convert the entries to shreds, write the shreds to the ledger
let shreds = entries_to_test_shreds(entries, slot, slot.saturating_sub(1), true);
@ -48,7 +50,7 @@ fn setup_read_bench(
fn bench_write_small(bench: &mut Bencher) {
let ledger_path = get_tmp_ledger_path!();
let num_entries = 32 * 1024;
let entries = make_tiny_test_entries(num_entries);
let entries = create_ticks(num_entries, Hash::default());
bench_write_shreds(bench, entries, &ledger_path);
}
@ -58,7 +60,7 @@ fn bench_write_small(bench: &mut Bencher) {
fn bench_write_big(bench: &mut Bencher) {
let ledger_path = get_tmp_ledger_path!();
let num_entries = 32 * 1024;
let entries = make_large_test_entries(num_entries);
let entries = create_ticks(num_entries, Hash::default());
bench_write_shreds(bench, entries, &ledger_path);
}
@ -127,7 +129,7 @@ fn bench_insert_data_shred_small(bench: &mut Bencher) {
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let num_entries = 32 * 1024;
let entries = make_tiny_test_entries(num_entries);
let entries = create_ticks(num_entries, Hash::default());
bench.iter(move || {
let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true);
blocktree.insert_shreds(shreds, None).unwrap();
@ -142,7 +144,7 @@ fn bench_insert_data_shred_big(bench: &mut Bencher) {
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let num_entries = 32 * 1024;
let entries = make_large_test_entries(num_entries);
let entries = create_ticks(num_entries, Hash::default());
bench.iter(move || {
let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true);
blocktree.insert_shreds(shreds, None).unwrap();

View File

@ -1,24 +0,0 @@
#![feature(test)]
extern crate test;
use solana_core::entry::{next_entries, reconstruct_entries_from_blobs, EntrySlice};
use solana_sdk::hash::{hash, Hash};
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction;
use test::Bencher;
#[bench]
fn bench_block_to_blobs_to_block(bencher: &mut Bencher) {
let zero = Hash::default();
let one = hash(&zero.as_ref());
let keypair = Keypair::new();
let tx0 = system_transaction::transfer(&keypair, &keypair.pubkey(), 1, one);
let transactions = vec![tx0; 10];
let entries = next_entries(&zero, 1, transactions);
bencher.iter(|| {
let blobs = entries.to_blobs();
assert_eq!(reconstruct_entries_from_blobs(blobs).unwrap().0, entries);
});
}

View File

@ -961,7 +961,7 @@ mod tests {
use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo};
use crate::packet::to_packets;
use crate::poh_recorder::WorkingBank;
use crate::{entry, get_tmp_ledger_path, packet, tmp_ledger_name};
use crate::{get_tmp_ledger_path, tmp_ledger_name};
use crossbeam_channel::unbounded;
use itertools::Itertools;
use solana_sdk::instruction::InstructionError;
@ -1769,27 +1769,11 @@ mod tests {
..
} = create_genesis_block(10_000);
let bank = Arc::new(Bank::new(&genesis_block));
let mut transactions = vec![];
loop {
let pubkey = Pubkey::new_rand();
// Make enough transactions to span multiple entries
transactions.push(system_transaction::transfer(
&mint_keypair,
&pubkey,
1,
genesis_block.hash(),
));
let pubkey = Pubkey::new_rand();
if entry::num_will_fit(
&transactions[0..],
packet::BLOB_DATA_SIZE as u64,
&Entry::serialized_to_blob_size,
) < transactions.len()
{
break;
}
}
let transactions =
vec![system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_block.hash(),); 3];
let ledger_path = get_tmp_ledger_path!();
{

View File

@ -1691,7 +1691,7 @@ pub fn entries_to_test_shreds(
#[cfg(test)]
pub mod tests {
use super::*;
use crate::entry::{create_ticks, make_tiny_test_entries, Entry};
use crate::entry::{create_ticks, Entry};
use crate::shred::CodingShred;
use itertools::Itertools;
use rand::seq::SliceRandom;
@ -1890,12 +1890,14 @@ pub mod tests {
assert_eq!(bytes4, bytes);
let mut buf = vec![0; bytes * 2];
let (last_index, bytes6) = ledger.get_data_shreds(slot, 9, 10, &mut buf).unwrap();
assert_eq!(last_index, 9);
let (last_index, bytes6) = ledger
.get_data_shreds(slot, num_shreds - 1, num_shreds, &mut buf)
.unwrap();
assert_eq!(last_index, num_shreds - 1);
{
let shred_data = &buf[..bytes6];
assert_eq!(shred_data, &shred_bufs[9][..bytes6]);
assert_eq!(shred_data, &shred_bufs[(num_shreds - 1) as usize][..bytes6]);
}
// Read out of range
@ -2042,7 +2044,7 @@ pub mod tests {
let blocktree_path = get_tmp_ledger_path("test_get_slot_entries1");
{
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let entries = make_tiny_test_entries(8);
let entries = create_ticks(8, Hash::default());
let shreds = entries_to_test_shreds(entries[0..4].to_vec(), 1, 0, false);
blocktree
.insert_shreds(shreds, None)
@ -2077,7 +2079,7 @@ pub mod tests {
let num_slots = 5 as u64;
let mut index = 0;
for slot in 0..num_slots {
let entries = make_tiny_test_entries(slot as usize + 1);
let entries = create_ticks(slot + 1, Hash::default());
let last_entry = entries.last().unwrap().clone();
let mut shreds =
entries_to_test_shreds(entries, slot, slot.saturating_sub(1), false);
@ -2109,13 +2111,13 @@ pub mod tests {
let num_slots = 5 as u64;
let shreds_per_slot = 5 as u64;
let entry_serialized_size =
bincode::serialized_size(&make_tiny_test_entries(1)).unwrap();
bincode::serialized_size(&create_ticks(1, Hash::default())).unwrap();
let entries_per_slot =
(shreds_per_slot * PACKET_DATA_SIZE as u64) / entry_serialized_size;
// Write entries
for slot in 0..num_slots {
let entries = make_tiny_test_entries(entries_per_slot as usize);
let entries = create_ticks(entries_per_slot, Hash::default());
let shreds =
entries_to_test_shreds(entries.clone(), slot, slot.saturating_sub(1), false);
assert!(shreds.len() as u64 >= shreds_per_slot);
@ -2888,7 +2890,7 @@ pub mod tests {
let gap: u64 = 10;
assert!(gap > 3);
let num_entries = 10;
let entries = make_tiny_test_entries(num_entries);
let entries = create_ticks(num_entries, Hash::default());
let mut shreds = entries_to_test_shreds(entries, slot, 0, true);
let num_shreds = shreds.len();
for (i, b) in shreds.iter_mut().enumerate() {
@ -2978,7 +2980,7 @@ pub mod tests {
assert_eq!(blocktree.find_missing_data_indexes(slot, 4, 3, 1), empty);
assert_eq!(blocktree.find_missing_data_indexes(slot, 1, 2, 0), empty);
let entries = make_tiny_test_entries(20);
let entries = create_ticks(20, Hash::default());
let mut shreds = entries_to_test_shreds(entries, slot, 0, true);
shreds.drain(2..);
@ -3019,7 +3021,7 @@ pub mod tests {
// Write entries
let num_entries = 10;
let entries = make_tiny_test_entries(num_entries);
let entries = create_ticks(num_entries, Hash::default());
let shreds = entries_to_test_shreds(entries, slot, 0, true);
let num_shreds = shreds.len();
@ -3041,7 +3043,7 @@ pub mod tests {
#[test]
pub fn test_should_insert_data_shred() {
let (mut shreds, _) = make_slot_entries(0, 0, 100);
let (mut shreds, _) = make_slot_entries(0, 0, 200);
let blocktree_path = get_tmp_ledger_path!();
{
let blocktree = Blocktree::open(&blocktree_path).unwrap();
@ -3057,12 +3059,15 @@ pub mod tests {
let slot_meta = blocktree.meta(0).unwrap().unwrap();
let index = index_cf.get(0).unwrap().unwrap();
assert_eq!(slot_meta.consumed, 5);
assert!(!Blocktree::should_insert_data_shred(
&shreds[1],
&slot_meta,
index.data(),
&last_root
));
assert_eq!(
Blocktree::should_insert_data_shred(
&shreds[1],
&slot_meta,
index.data(),
&last_root
),
false
);
// Trying to insert the same shred again should fail
// skip over shred 5 so the `slot_meta.consumed` doesn't increment
@ -3071,12 +3076,15 @@ pub mod tests {
.unwrap();
let slot_meta = blocktree.meta(0).unwrap().unwrap();
let index = index_cf.get(0).unwrap().unwrap();
assert!(!Blocktree::should_insert_data_shred(
&shreds[6],
&slot_meta,
index.data(),
&last_root
));
assert_eq!(
Blocktree::should_insert_data_shred(
&shreds[6],
&slot_meta,
index.data(),
&last_root
),
false
);
// Trying to insert another "is_last" shred with index < the received index should fail
// skip over shred 7
@ -3094,12 +3102,10 @@ pub mod tests {
panic!("Shred in unexpected format")
}
};
assert!(!Blocktree::should_insert_data_shred(
&shred7,
&slot_meta,
index.data(),
&last_root
));
assert_eq!(
Blocktree::should_insert_data_shred(&shred7, &slot_meta, index.data(), &last_root),
false
);
// Insert all pending shreds
let mut shred8 = shreds[8].clone();
@ -3113,12 +3119,10 @@ pub mod tests {
} else {
panic!("Shred in unexpected format")
}
assert!(!Blocktree::should_insert_data_shred(
&shred7,
&slot_meta,
index.data(),
&last_root
));
assert_eq!(
Blocktree::should_insert_data_shred(&shred7, &slot_meta, index.data(), &last_root),
false
);
}
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
@ -3469,7 +3473,7 @@ pub mod tests {
parent_slot: u64,
num_entries: u64,
) -> (Vec<Shred>, Vec<Entry>) {
let entries = make_tiny_test_entries(num_entries as usize);
let entries = create_ticks(num_entries, Hash::default());
let shreds = entries_to_test_shreds(entries.clone(), slot, parent_slot, true);
(shreds, entries)
}

View File

@ -1230,6 +1230,57 @@ pub mod tests {
assert_eq!(bank.last_blockhash(), blockhash);
}
#[test]
fn test_process_entry_tx_random_execution_with_error() {
let GenesisBlockInfo {
genesis_block,
mint_keypair,
..
} = create_genesis_block(1_000_000_000);
let bank = Bank::new(&genesis_block);
const NUM_TRANSFERS_PER_ENTRY: usize = 8;
const NUM_TRANSFERS: usize = NUM_TRANSFERS_PER_ENTRY * 32;
// large enough to scramble locks and results
let keypairs: Vec<_> = (0..NUM_TRANSFERS * 2).map(|_| Keypair::new()).collect();
// give everybody one lamport
for keypair in &keypairs {
bank.transfer(1, &mint_keypair, &keypair.pubkey())
.expect("funding failed");
}
let mut hash = bank.last_blockhash();
let entries: Vec<_> = (0..NUM_TRANSFERS)
.step_by(NUM_TRANSFERS_PER_ENTRY)
.map(|i| {
let mut transactions = (0..NUM_TRANSFERS_PER_ENTRY)
.map(|j| {
system_transaction::transfer(
&keypairs[i + j],
&keypairs[i + j + NUM_TRANSFERS].pubkey(),
1,
bank.last_blockhash(),
)
})
.collect::<Vec<_>>();
transactions.push(system_transaction::create_account(
&mint_keypair,
&solana_sdk::sysvar::clock::id(), // puts a TX error in results
bank.last_blockhash(),
1,
0,
&Pubkey::new_rand(),
));
next_entry_mut(&mut hash, 0, transactions)
})
.collect();
assert_eq!(process_entries(&bank, &entries, true), Ok(()));
}
#[test]
fn test_process_entry_tx_random_execution_no_error() {
// entropy multiplier should be big enough to provide sufficient entropy
@ -1551,7 +1602,9 @@ pub mod tests {
} = create_genesis_block(1_000_000_000);
let mut bank = Bank::new(&genesis_block);
const NUM_TRANSFERS: usize = 128;
const NUM_TRANSFERS_PER_ENTRY: usize = 8;
const NUM_TRANSFERS: usize = NUM_TRANSFERS_PER_ENTRY * 32;
let keypairs: Vec<_> = (0..NUM_TRANSFERS * 2).map(|_| Keypair::new()).collect();
// give everybody one lamport
@ -1565,33 +1618,51 @@ pub mod tests {
let mut root: Option<Arc<Bank>> = None;
loop {
let entries: Vec<_> = (0..NUM_TRANSFERS)
.step_by(NUM_TRANSFERS_PER_ENTRY)
.map(|i| {
next_entry_mut(
&mut hash,
0,
vec![system_transaction::transfer(
&keypairs[i],
&keypairs[i + NUM_TRANSFERS].pubkey(),
1,
next_entry_mut(&mut hash, 0, {
let mut transactions = (i..i + NUM_TRANSFERS_PER_ENTRY)
.map(|i| {
system_transaction::transfer(
&keypairs[i],
&keypairs[i + NUM_TRANSFERS].pubkey(),
1,
bank.last_blockhash(),
)
})
.collect::<Vec<_>>();
transactions.push(system_transaction::create_account(
&mint_keypair,
&solana_sdk::sysvar::clock::id(), // puts a TX error in results
bank.last_blockhash(),
)],
)
100,
100,
&Pubkey::new_rand(),
));
transactions
})
})
.collect();
info!("paying iteration {}", i);
process_entries(&bank, &entries, true).expect("paying failed");
let entries: Vec<_> = (0..NUM_TRANSFERS)
.step_by(NUM_TRANSFERS_PER_ENTRY)
.map(|i| {
next_entry_mut(
&mut hash,
0,
vec![system_transaction::transfer(
&keypairs[i + NUM_TRANSFERS],
&keypairs[i].pubkey(),
1,
bank.last_blockhash(),
)],
(i..i + NUM_TRANSFERS_PER_ENTRY)
.map(|i| {
system_transaction::transfer(
&keypairs[i + NUM_TRANSFERS],
&keypairs[i].pubkey(),
1,
bank.last_blockhash(),
)
})
.collect::<Vec<_>>(),
)
})
.collect();

View File

@ -118,7 +118,7 @@ mod tests {
use crate::blocktree::Blocktree;
use crate::chacha::chacha_cbc_encrypt_ledger;
use crate::chacha_cuda::chacha_cbc_encrypt_file_many_keys;
use crate::entry::make_tiny_test_entries;
use crate::entry::create_ticks;
use crate::replicator::sample_file;
use solana_sdk::clock::DEFAULT_SLOTS_PER_SEGMENT;
use solana_sdk::hash::Hash;
@ -132,7 +132,7 @@ mod tests {
solana_logger::setup();
let slots_per_segment = 32;
let entries = make_tiny_test_entries(slots_per_segment);
let entries = create_ticks(slots_per_segment, Hash::default());
let ledger_dir = "test_encrypt_file_many_keys_single";
let ledger_path = get_tmp_ledger_path(ledger_dir);
let ticks_per_slot = 16;
@ -190,7 +190,7 @@ mod tests {
fn test_encrypt_file_many_keys_multiple_keys() {
solana_logger::setup();
let entries = make_tiny_test_entries(32);
let entries = create_ticks(32, Hash::default());
let ledger_dir = "test_encrypt_file_many_keys_multiple";
let ledger_path = get_tmp_ledger_path(ledger_dir);
let ticks_per_slot = 16;

View File

@ -2,18 +2,15 @@
//! unique ID that is the hash of the Entry before it, plus the hash of the
//! transactions within it. Entries cannot be reordered, and its field `num_hashes`
//! represents an approximate amount of time since the last Entry was created.
use crate::packet::{Blob, SharedBlob, BLOB_DATA_SIZE};
use crate::packet::{Blob, SharedBlob};
use crate::poh::Poh;
use crate::result::Result;
use bincode::{deserialize, serialized_size};
use chrono::prelude::Utc;
use rayon::prelude::*;
use rayon::ThreadPool;
use solana_budget_api::budget_instruction;
use solana_merkle_tree::MerkleTree;
use solana_metrics::inc_new_counter_warn;
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::timing;
use solana_sdk::transaction::Transaction;
use std::borrow::Borrow;
@ -72,8 +69,6 @@ pub struct Entry {
impl Entry {
/// Creates the next Entry `num_hashes` after `start_hash`.
pub fn new(prev_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Self {
assert!(Self::serialized_to_blob_size(&transactions) <= BLOB_DATA_SIZE as u64);
if num_hashes == 0 && transactions.is_empty() {
Entry {
num_hashes: 0,
@ -135,8 +130,6 @@ impl Entry {
num_hashes: &mut u64,
transactions: Vec<Transaction>,
) -> Self {
assert!(Self::serialized_to_blob_size(&transactions) <= BLOB_DATA_SIZE as u64);
let entry = Self::new(start_hash, *num_hashes, transactions);
*start_hash = entry.hash;
*num_hashes = 0;
@ -230,10 +223,6 @@ pub trait EntrySlice {
/// Verifies the hashes and counts of a slice of transactions are all consistent.
fn verify_cpu(&self, start_hash: &Hash) -> bool;
fn verify(&self, start_hash: &Hash) -> bool;
fn to_shared_blobs(&self) -> Vec<SharedBlob>;
fn to_blobs(&self) -> Vec<Blob>;
fn to_single_entry_blobs(&self) -> Vec<Blob>;
fn to_single_entry_shared_blobs(&self) -> Vec<SharedBlob>;
}
impl EntrySlice for [Entry] {
@ -369,33 +358,6 @@ impl EntrySlice for [Entry] {
);
res
}
fn to_blobs(&self) -> Vec<Blob> {
split_serializable_chunks(
&self,
BLOB_DATA_SIZE as u64,
&|s| bincode::serialized_size(&s).unwrap(),
&mut |entries: &[Entry]| Blob::from_serializable(entries),
)
}
fn to_shared_blobs(&self) -> Vec<SharedBlob> {
self.to_blobs()
.into_iter()
.map(|b| Arc::new(RwLock::new(b)))
.collect()
}
fn to_single_entry_shared_blobs(&self) -> Vec<SharedBlob> {
self.to_single_entry_blobs()
.into_iter()
.map(|b| Arc::new(RwLock::new(b)))
.collect()
}
fn to_single_entry_blobs(&self) -> Vec<Blob> {
self.iter().map(Entry::to_blob).collect()
}
}
pub fn next_entry_mut(start: &mut Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
@ -404,87 +366,6 @@ pub fn next_entry_mut(start: &mut Hash, num_hashes: u64, transactions: Vec<Trans
entry
}
pub fn num_will_fit<T, F>(serializables: &[T], max_size: u64, serialized_size: &F) -> usize
where
F: Fn(&[T]) -> u64,
{
if serializables.is_empty() {
return 0;
}
let mut num = serializables.len();
let mut upper = serializables.len();
let mut lower = 1; // if one won't fit, we have a lot of TODOs
loop {
let next;
if serialized_size(&serializables[..num]) <= max_size {
next = (upper + num) / 2;
lower = num;
} else {
if num == 1 {
// if not even one will fit, bail
num = 0;
break;
}
next = (lower + num) / 2;
upper = num;
}
// same as last time
if next == num {
break;
}
num = next;
}
num
}
pub fn split_serializable_chunks<T, R, F1, F2>(
serializables: &[T],
max_size: u64,
serialized_size: &F1,
converter: &mut F2,
) -> Vec<R>
where
F1: Fn(&[T]) -> u64,
F2: FnMut(&[T]) -> R,
{
let mut result = vec![];
let mut chunk_start = 0;
while chunk_start < serializables.len() {
let chunk_end =
chunk_start + num_will_fit(&serializables[chunk_start..], max_size, serialized_size);
result.push(converter(&serializables[chunk_start..chunk_end]));
chunk_start = chunk_end;
}
result
}
/// Creates the next entries for given transactions, outputs
/// updates start_hash to hash of last Entry, sets num_hashes to 0
fn next_entries_mut(
start_hash: &mut Hash,
num_hashes: &mut u64,
transactions: Vec<Transaction>,
) -> Vec<Entry> {
split_serializable_chunks(
&transactions[..],
BLOB_DATA_SIZE as u64,
&Entry::serialized_to_blob_size,
&mut |txs: &[Transaction]| Entry::new_mut(start_hash, num_hashes, txs.to_vec()),
)
}
/// Creates the next Entries for given transactions
pub fn next_entries(
start_hash: &Hash,
num_hashes: u64,
transactions: Vec<Transaction>,
) -> Vec<Entry> {
let mut hash = *start_hash;
let mut num_hashes = num_hashes;
next_entries_mut(&mut hash, &mut num_hashes, transactions)
}
pub fn create_ticks(num_ticks: u64, mut hash: Hash) -> Vec<Entry> {
let mut ticks = Vec::with_capacity(num_ticks as usize);
for _ in 0..num_ticks {
@ -495,65 +376,6 @@ pub fn create_ticks(num_ticks: u64, mut hash: Hash) -> Vec<Entry> {
ticks
}
pub fn make_tiny_test_entries_from_hash(start: &Hash, num: usize) -> Vec<Entry> {
let keypair = Keypair::new();
let pubkey = keypair.pubkey();
let mut hash = *start;
let mut num_hashes = 0;
(0..num)
.map(|_| {
let ix = budget_instruction::apply_timestamp(&pubkey, &pubkey, &pubkey, Utc::now());
let tx = Transaction::new_signed_instructions(&[&keypair], vec![ix], *start);
Entry::new_mut(&mut hash, &mut num_hashes, vec![tx])
})
.collect()
}
pub fn make_tiny_test_entries(num: usize) -> Vec<Entry> {
let zero = Hash::default();
let one = solana_sdk::hash::hash(&zero.as_ref());
make_tiny_test_entries_from_hash(&one, num)
}
pub fn make_large_test_entries(num_entries: usize) -> Vec<Entry> {
let zero = Hash::default();
let one = solana_sdk::hash::hash(&zero.as_ref());
let keypair = Keypair::new();
let pubkey = keypair.pubkey();
let ix = budget_instruction::apply_timestamp(&pubkey, &pubkey, &pubkey, Utc::now());
let tx = Transaction::new_signed_instructions(&[&keypair], vec![ix], one);
let serialized_size = serialized_size(&tx).unwrap();
let num_txs = BLOB_DATA_SIZE / serialized_size as usize;
let txs = vec![tx; num_txs];
let entry = next_entries(&one, 1, txs)[0].clone();
vec![entry; num_entries]
}
#[cfg(test)]
pub fn make_consecutive_blobs(
id: &solana_sdk::pubkey::Pubkey,
num_blobs_to_make: u64,
start_height: u64,
start_hash: Hash,
addr: &std::net::SocketAddr,
) -> Vec<SharedBlob> {
let entries = create_ticks(num_blobs_to_make, start_hash);
let blobs = entries.to_single_entry_shared_blobs();
let mut index = start_height;
for blob in &blobs {
let mut blob = blob.write().unwrap();
blob.set_index(index);
blob.set_id(id);
blob.meta.set_addr(addr);
index += 1;
}
blobs
}
#[cfg(test)]
/// Creates the next Tick or Transaction Entry `num_hashes` after `start_hash`.
pub fn next_entry(prev_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
@ -569,13 +391,13 @@ pub fn next_entry(prev_hash: &Hash, num_hashes: u64, transactions: Vec<Transacti
mod tests {
use super::*;
use crate::entry::Entry;
use crate::packet::{to_blobs, BLOB_DATA_SIZE, PACKET_DATA_SIZE};
use solana_sdk::hash::hash;
use solana_sdk::instruction::Instruction;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use chrono::prelude::Utc;
use solana_budget_api::budget_instruction;
use solana_sdk::{
hash::hash,
signature::{Keypair, KeypairUtil},
system_transaction,
};
fn create_sample_payment(keypair: &Keypair, hash: Hash) -> Transaction {
let pubkey = keypair.pubkey();
@ -740,169 +562,4 @@ mod tests {
assert!(!bad_ticks.verify(&one)); // inductive step, bad
}
fn blob_sized_entries(num_entries: usize) -> Vec<Entry> {
// rough guess
let mut magic_len = BLOB_DATA_SIZE
- serialized_size(&vec![Entry {
num_hashes: 0,
hash: Hash::default(),
transactions: vec![],
}])
.unwrap() as usize;
loop {
let entries = vec![Entry {
num_hashes: 0,
hash: Hash::default(),
transactions: vec![Transaction::new_unsigned_instructions(vec![
Instruction::new(Pubkey::default(), &vec![0u8; magic_len as usize], vec![]),
])],
}];
let size = serialized_size(&entries).unwrap() as usize;
if size < BLOB_DATA_SIZE {
magic_len += BLOB_DATA_SIZE - size;
} else if size > BLOB_DATA_SIZE {
magic_len -= size - BLOB_DATA_SIZE;
} else {
break;
}
}
vec![
Entry {
num_hashes: 0,
hash: Hash::default(),
transactions: vec![Transaction::new_unsigned_instructions(vec![
Instruction::new(Pubkey::default(), &vec![0u8; magic_len], vec![]),
])],
};
num_entries
]
}
#[test]
fn test_entries_to_blobs() {
solana_logger::setup();
let entries = blob_sized_entries(10);
let blobs = entries.to_blobs();
for blob in &blobs {
assert_eq!(blob.size(), BLOB_DATA_SIZE);
}
assert_eq!(reconstruct_entries_from_blobs(blobs).unwrap().0, entries);
}
#[test]
fn test_multiple_entries_to_blobs() {
solana_logger::setup();
let num_blobs = 10;
let serialized_size =
bincode::serialized_size(&make_tiny_test_entries_from_hash(&Hash::default(), 1))
.unwrap();
let num_entries = (num_blobs * BLOB_DATA_SIZE as u64) / serialized_size;
let entries = make_tiny_test_entries_from_hash(&Hash::default(), num_entries as usize);
let blob_q = entries.to_blobs();
assert_eq!(blob_q.len() as u64, num_blobs);
assert_eq!(reconstruct_entries_from_blobs(blob_q).unwrap().0, entries);
}
#[test]
fn test_bad_blobs_attack() {
solana_logger::setup();
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
let blobs_q = to_blobs(vec![(0, addr)]).unwrap(); // <-- attack!
assert!(reconstruct_entries_from_blobs(blobs_q).is_err());
}
#[test]
fn test_next_entries() {
solana_logger::setup();
let hash = Hash::default();
let next_hash = solana_sdk::hash::hash(&hash.as_ref());
let keypair = Keypair::new();
let tx_small = create_sample_timestamp(&keypair, next_hash);
let tx_large = create_sample_payment(&keypair, next_hash);
let tx_small_size = serialized_size(&tx_small).unwrap() as usize;
let tx_large_size = serialized_size(&tx_large).unwrap() as usize;
let entry_size = serialized_size(&Entry {
num_hashes: 0,
hash: Hash::default(),
transactions: vec![],
})
.unwrap() as usize;
assert!(tx_small_size < tx_large_size);
assert!(tx_large_size < PACKET_DATA_SIZE);
let threshold = (BLOB_DATA_SIZE - entry_size) / tx_small_size;
// verify no split
let transactions = vec![tx_small.clone(); threshold];
let entries0 = next_entries(&hash, 0, transactions.clone());
assert_eq!(entries0.len(), 1);
assert!(entries0.verify(&hash));
// verify the split with uniform transactions
let transactions = vec![tx_small.clone(); threshold * 2];
let entries0 = next_entries(&hash, 0, transactions.clone());
assert_eq!(entries0.len(), 2);
assert!(entries0.verify(&hash));
// verify the split with small transactions followed by large
// transactions
let mut transactions = vec![tx_small.clone(); BLOB_DATA_SIZE / tx_small_size];
let large_transactions = vec![tx_large.clone(); BLOB_DATA_SIZE / tx_large_size];
transactions.extend(large_transactions);
let entries0 = next_entries(&hash, 0, transactions.clone());
assert!(entries0.len() >= 2);
assert!(entries0.verify(&hash));
}
#[test]
fn test_num_will_fit_empty() {
let serializables: Vec<u32> = vec![];
let result = num_will_fit(&serializables[..], 8, &|_| 4);
assert_eq!(result, 0);
}
#[test]
fn test_num_will_fit() {
let serializables_vec: Vec<u8> = (0..10).map(|_| 1).collect();
let serializables = &serializables_vec[..];
let sum = |i: &[u8]| (0..i.len()).into_iter().sum::<usize>() as u64;
// sum[0] is = 0, but sum[0..1] > 0, so result contains 1 item
let result = num_will_fit(serializables, 0, &sum);
assert_eq!(result, 1);
// sum[0..3] is <= 8, but sum[0..4] > 8, so result contains 3 items
let result = num_will_fit(serializables, 8, &sum);
assert_eq!(result, 4);
// sum[0..1] is = 1, but sum[0..2] > 0, so result contains 2 items
let result = num_will_fit(serializables, 1, &sum);
assert_eq!(result, 2);
// sum[0..9] = 45, so contains all items
let result = num_will_fit(serializables, 45, &sum);
assert_eq!(result, 10);
// sum[0..8] <= 44, but sum[0..9] = 45, so contains all but last item
let result = num_will_fit(serializables, 44, &sum);
assert_eq!(result, 9);
// sum[0..9] <= 46, but contains all items
let result = num_will_fit(serializables, 46, &sum);
assert_eq!(result, 10);
// too small to fit a single u64
let result = num_will_fit(&[0u64], (std::mem::size_of::<u64>() - 1) as u64, &|i| {
(std::mem::size_of::<u64>() * i.len()) as u64
});
assert_eq!(result, 0);
}
}

View File

@ -464,7 +464,7 @@ mod test {
let num_slots = 2;
// Create some blobs
let (mut shreds, _) = make_many_slot_entries(0, num_slots as u64, 50 as u64);
let (mut shreds, _) = make_many_slot_entries(0, num_slots as u64, 150 as u64);
let num_shreds = shreds.len() as u64;
let num_shreds_per_slot = num_shreds / num_slots;

View File

@ -276,31 +276,32 @@ impl Service for WindowService {
#[cfg(test)]
mod test {
use super::*;
use crate::bank_forks::BankForks;
use crate::blocktree::tests::make_many_slot_entries;
use crate::blocktree::{get_tmp_ledger_path, Blocktree};
use crate::cluster_info::{ClusterInfo, Node};
use crate::contact_info::ContactInfo;
use crate::entry::{make_consecutive_blobs, make_tiny_test_entries, Entry};
use crate::genesis_utils::create_genesis_block_with_leader;
use crate::packet::{Packet, Packets};
use crate::recycler::Recycler;
use crate::repair_service::RepairSlotRange;
use crate::service::Service;
use crate::shred::Shredder;
use crate::streamer::{receiver, responder};
use rand::seq::SliceRandom;
use rand::thread_rng;
use crate::{
blocktree::tests::make_many_slot_entries,
blocktree::{get_tmp_ledger_path, Blocktree},
cluster_info::ClusterInfo,
contact_info::ContactInfo,
entry::{create_ticks, Entry},
genesis_utils::create_genesis_block_with_leader,
packet::{Packet, Packets},
repair_service::RepairSlotRange,
service::Service,
shred::Shredder,
};
use rand::{seq::SliceRandom, thread_rng};
use solana_runtime::epoch_schedule::MINIMUM_SLOTS_PER_EPOCH;
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, KeypairUtil};
use std::fs::remove_dir_all;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver};
use std::sync::{Arc, RwLock};
use std::thread::sleep;
use std::time::Duration;
use solana_sdk::{
hash::Hash,
signature::{Keypair, KeypairUtil},
};
use std::{
net::UdpSocket,
sync::atomic::{AtomicBool, Ordering},
sync::mpsc::{channel, Receiver},
sync::{Arc, RwLock},
thread::sleep,
time::Duration,
};
fn local_entries_to_shred(
entries: Vec<Entry>,
@ -317,11 +318,11 @@ mod test {
}
#[test]
fn test_process_blob() {
fn test_process_shred() {
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap());
let num_entries = 10;
let original_entries = make_tiny_test_entries(num_entries);
let original_entries = create_ticks(num_entries, Hash::default());
let mut shreds =
local_entries_to_shred(original_entries.clone(), 0, 0, &Arc::new(Keypair::new()));
shreds.reverse();
@ -430,182 +431,6 @@ mod test {
*/
}
#[test]
#[ignore]
pub fn window_send_test() {
solana_logger::setup();
// setup a leader whose id is used to generates blobs and a validator
// node whose window service will retransmit leader blobs.
let leader_node = Node::new_localhost();
let validator_node = Node::new_localhost();
let exit = Arc::new(AtomicBool::new(false));
let cluster_info_me = ClusterInfo::new_with_invalid_keypair(validator_node.info.clone());
let me_id = leader_node.info.id;
let subs = Arc::new(RwLock::new(cluster_info_me));
let (s_reader, r_reader) = channel();
let t_receiver = receiver(
Arc::new(leader_node.sockets.gossip),
&exit,
s_reader,
Recycler::default(),
"window_send_test",
);
let (s_retransmit, r_retransmit) = channel();
let blocktree_path = get_tmp_ledger_path!();
let (blocktree, _, completed_slots_receiver) = Blocktree::open_with_signal(&blocktree_path)
.expect("Expected to be able to open database ledger");
let blocktree = Arc::new(blocktree);
let bank = Bank::new(&create_genesis_block_with_leader(100, &me_id, 10).genesis_block);
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let repair_strategy = RepairStrategy::RepairAll {
bank_forks: bank_forks.clone(),
completed_slots_receiver,
epoch_schedule: bank_forks
.read()
.unwrap()
.working_bank()
.epoch_schedule()
.clone(),
};
let t_window = WindowService::new(
blocktree,
subs,
r_reader,
s_retransmit,
Arc::new(leader_node.sockets.repair),
&exit,
repair_strategy,
&Arc::new(LeaderScheduleCache::default()),
|_, _, _, _, _| true,
);
let t_responder = {
let (s_responder, r_responder) = channel();
let blob_sockets: Vec<Arc<UdpSocket>> =
leader_node.sockets.tvu.into_iter().map(Arc::new).collect();
let t_responder = responder("window_send_test", blob_sockets[0].clone(), r_responder);
let num_blobs_to_make = 10;
let gossip_address = &leader_node.info.gossip;
let msgs = make_consecutive_blobs(
&me_id,
num_blobs_to_make,
0,
Hash::default(),
&gossip_address,
)
.into_iter()
.rev()
.collect();
s_responder.send(msgs).expect("send");
t_responder
};
let max_attempts = 10;
let mut num_attempts = 0;
let mut q = Vec::new();
loop {
assert!(num_attempts != max_attempts);
while let Ok(mut nq) = r_retransmit.recv_timeout(Duration::from_millis(500)) {
q.append(&mut nq.packets);
}
if q.len() == 10 {
break;
}
num_attempts += 1;
}
exit.store(true, Ordering::Relaxed);
t_receiver.join().expect("join");
t_responder.join().expect("join");
t_window.join().expect("join");
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
let _ignored = remove_dir_all(&blocktree_path);
}
#[test]
#[ignore]
pub fn window_send_leader_test2() {
solana_logger::setup();
// setup a leader whose id is used to generates blobs and a validator
// node whose window service will retransmit leader blobs.
let leader_node = Node::new_localhost();
let validator_node = Node::new_localhost();
let exit = Arc::new(AtomicBool::new(false));
let cluster_info_me = ClusterInfo::new_with_invalid_keypair(validator_node.info.clone());
let me_id = leader_node.info.id;
let subs = Arc::new(RwLock::new(cluster_info_me));
let (s_reader, r_reader) = channel();
let t_receiver = receiver(
Arc::new(leader_node.sockets.gossip),
&exit,
s_reader,
Recycler::default(),
"window_send_leader_test2",
);
let (s_retransmit, r_retransmit) = channel();
let blocktree_path = get_tmp_ledger_path!();
let (blocktree, _, completed_slots_receiver) = Blocktree::open_with_signal(&blocktree_path)
.expect("Expected to be able to open database ledger");
let blocktree = Arc::new(blocktree);
let bank = Bank::new(&create_genesis_block_with_leader(100, &me_id, 10).genesis_block);
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let epoch_schedule = *bank_forks.read().unwrap().working_bank().epoch_schedule();
let repair_strategy = RepairStrategy::RepairAll {
bank_forks,
completed_slots_receiver,
epoch_schedule,
};
let t_window = WindowService::new(
blocktree,
subs.clone(),
r_reader,
s_retransmit,
Arc::new(leader_node.sockets.repair),
&exit,
repair_strategy,
&Arc::new(LeaderScheduleCache::default()),
|_, _, _, _, _| true,
);
let t_responder = {
let (s_responder, r_responder) = channel();
let blob_sockets: Vec<Arc<UdpSocket>> =
leader_node.sockets.tvu.into_iter().map(Arc::new).collect();
let t_responder = responder("window_send_test", blob_sockets[0].clone(), r_responder);
let mut msgs = Vec::new();
let blobs =
make_consecutive_blobs(&me_id, 14u64, 0, Hash::default(), &leader_node.info.gossip);
for v in 0..10 {
let i = 9 - v;
msgs.push(blobs[i].clone());
}
s_responder.send(msgs).expect("send");
let mut msgs1 = Vec::new();
for v in 1..5 {
let i = 9 + v;
msgs1.push(blobs[i].clone());
}
s_responder.send(msgs1).expect("send");
t_responder
};
let mut q = Vec::new();
while let Ok(mut nq) = r_retransmit.recv_timeout(Duration::from_millis(5000)) {
q.append(&mut nq.packets);
}
assert!(q.len() > 10);
exit.store(true, Ordering::Relaxed);
t_receiver.join().expect("join");
t_responder.join().expect("join");
t_window.join().expect("join");
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
let _ignored = remove_dir_all(&blocktree_path);
}
fn make_test_window(
packet_receiver: Receiver<Packets>,
exit: Arc<AtomicBool>,

View File

@ -1,218 +0,0 @@
#[macro_use]
extern crate solana_core;
use log::*;
use solana_core::banking_stage::create_test_recorder;
use solana_core::blocktree::{create_new_tmp_ledger, Blocktree};
use solana_core::cluster_info::{ClusterInfo, Node};
use solana_core::confidence::ForkConfidenceCache;
use solana_core::entry::next_entry_mut;
use solana_core::entry::EntrySlice;
use solana_core::genesis_utils::{create_genesis_block_with_leader, GenesisBlockInfo};
use solana_core::gossip_service::GossipService;
use solana_core::packet::index_blobs;
use solana_core::rpc_subscriptions::RpcSubscriptions;
use solana_core::service::Service;
use solana_core::storage_stage::StorageState;
use solana_core::streamer;
use solana_core::tvu::{Sockets, Tvu};
use solana_core::validator;
use solana_runtime::epoch_schedule::MINIMUM_SLOTS_PER_EPOCH;
use solana_sdk::signature::Signable;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction;
use std::fs::remove_dir_all;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use std::time::Duration;
fn new_gossip(
cluster_info: Arc<RwLock<ClusterInfo>>,
gossip: UdpSocket,
exit: &Arc<AtomicBool>,
) -> GossipService {
GossipService::new(&cluster_info, None, None, gossip, exit)
}
/// Test that message sent from leader to target1 and replayed to target2
#[test]
#[ignore]
fn test_replay() {
solana_logger::setup();
let leader_keypair = Keypair::new();
let leader = Node::new_localhost_with_pubkey(&leader_keypair.pubkey());
let target1_keypair = Keypair::new();
let target1 = Node::new_localhost_with_pubkey(&target1_keypair.pubkey());
let target2 = Node::new_localhost();
let exit = Arc::new(AtomicBool::new(false));
// start cluster_info_l
let cluster_info_l = ClusterInfo::new_with_invalid_keypair(leader.info.clone());
let cref_l = Arc::new(RwLock::new(cluster_info_l));
let dr_l = new_gossip(cref_l, leader.sockets.gossip, &exit);
// start cluster_info2
let mut cluster_info2 = ClusterInfo::new_with_invalid_keypair(target2.info.clone());
cluster_info2.insert_info(leader.info.clone());
let cref2 = Arc::new(RwLock::new(cluster_info2));
let dr_2 = new_gossip(cref2, target2.sockets.gossip, &exit);
// setup some blob services to send blobs into the socket
// to simulate the source peer and get blobs out of the socket to
// simulate target peer
let (s_reader, r_reader) = channel();
let blob_sockets: Vec<Arc<UdpSocket>> = target2.sockets.tvu.into_iter().map(Arc::new).collect();
let t_receiver = streamer::blob_receiver(blob_sockets[0].clone(), &exit, s_reader);
// simulate leader sending messages
let (s_responder, r_responder) = channel();
let t_responder = streamer::responder(
"test_replay",
Arc::new(leader.sockets.retransmit),
r_responder,
);
let mint_balance = 10_000;
let leader_balance = 100;
let GenesisBlockInfo {
mut genesis_block,
mint_keypair,
..
} = create_genesis_block_with_leader(mint_balance, &leader.info.id, leader_balance);
genesis_block.ticks_per_slot = 160;
genesis_block.slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH as u64;
let (blocktree_path, blockhash) = create_new_tmp_ledger!(&genesis_block);
let tvu_addr = target1.info.tvu;
let (
_genesis_blockhash,
bank_forks,
_bank_forks_info,
blocktree,
ledger_signal_receiver,
completed_slots_receiver,
leader_schedule_cache,
_,
) = validator::new_banks_from_blocktree(None, &blocktree_path, None, None, true, None);
let working_bank = bank_forks.working_bank();
assert_eq!(
working_bank.get_balance(&mint_keypair.pubkey()),
mint_balance
);
let leader_schedule_cache = Arc::new(leader_schedule_cache);
// start cluster_info1
let bank_forks = Arc::new(RwLock::new(bank_forks));
let mut cluster_info1 = ClusterInfo::new_with_invalid_keypair(target1.info.clone());
cluster_info1.insert_info(leader.info.clone());
let cref1 = Arc::new(RwLock::new(cluster_info1));
let dr_1 = new_gossip(cref1.clone(), target1.sockets.gossip, &exit);
let voting_keypair = Keypair::new();
let storage_keypair = Arc::new(Keypair::new());
let blocktree = Arc::new(blocktree);
{
let (poh_service_exit, poh_recorder, poh_service, _entry_receiver) =
create_test_recorder(&working_bank, &blocktree);
let fork_confidence_cache = Arc::new(RwLock::new(ForkConfidenceCache::default()));
let tvu = Tvu::new(
&voting_keypair.pubkey(),
Some(&Arc::new(voting_keypair)),
&storage_keypair,
&bank_forks,
&cref1,
{
Sockets {
repair: target1.sockets.repair,
retransmit: target1.sockets.retransmit,
fetch: target1.sockets.tvu,
forwards: target1.sockets.tvu_forwards,
}
},
blocktree,
&StorageState::default(),
None,
None,
ledger_signal_receiver,
&Arc::new(RpcSubscriptions::default()),
&poh_recorder,
&leader_schedule_cache,
&exit,
completed_slots_receiver,
fork_confidence_cache,
);
let mut mint_ref_balance = mint_balance;
let mut msgs = Vec::new();
let mut blob_idx = 0;
let num_transfers = 10;
let mut transfer_amount = 501;
let bob_keypair = Keypair::new();
let mut cur_hash = blockhash;
for i in 0..num_transfers {
let entry0 = next_entry_mut(&mut cur_hash, i, vec![]);
let entry_tick0 = next_entry_mut(&mut cur_hash, i + 1, vec![]);
let tx0 = system_transaction::create_user_account(
&mint_keypair,
&bob_keypair.pubkey(),
transfer_amount,
blockhash,
);
let entry_tick1 = next_entry_mut(&mut cur_hash, i + 1, vec![]);
let entry1 = next_entry_mut(&mut cur_hash, i + num_transfers, vec![tx0]);
let entry_tick2 = next_entry_mut(&mut cur_hash, i + 1, vec![]);
mint_ref_balance -= transfer_amount;
transfer_amount -= 1; // Sneaky: change transfer_amount slightly to avoid DuplicateSignature errors
let entries = vec![entry0, entry_tick0, entry_tick1, entry1, entry_tick2];
let blobs = entries.to_shared_blobs();
index_blobs(&blobs, &leader.info.id, blob_idx, 1, 0);
blob_idx += blobs.len() as u64;
blobs.iter().for_each(|b| {
let mut b_w = b.write().unwrap();
b_w.set_id(&leader_keypair.pubkey());
b_w.meta.set_addr(&tvu_addr);
b_w.sign(&leader_keypair);
});
msgs.extend(blobs.into_iter());
}
// send the blobs into the socket
s_responder.send(msgs).expect("send");
drop(s_responder);
// receive retransmitted messages
let timer = Duration::new(1, 0);
while let Ok(_msg) = r_reader.recv_timeout(timer) {
info!("got msg");
}
let working_bank = bank_forks.read().unwrap().working_bank();
let final_mint_balance = working_bank.get_balance(&mint_keypair.pubkey());
assert_eq!(final_mint_balance, mint_ref_balance);
let bob_balance = working_bank.get_balance(&bob_keypair.pubkey());
assert_eq!(bob_balance, mint_balance - mint_ref_balance);
exit.store(true, Ordering::Relaxed);
poh_service_exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
tvu.join().unwrap();
dr_l.join().unwrap();
dr_2.join().unwrap();
dr_1.join().unwrap();
t_receiver.join().unwrap();
t_responder.join().unwrap();
}
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
let _ignored = remove_dir_all(&blocktree_path);
}