Verify number of hashes for each block of entries (#6262)

* Verify number of hashes for each block of entries

* Fix blocktree processor tick check

* Rebase once more
This commit is contained in:
Justin Starry 2019-10-31 16:38:50 -04:00 committed by GitHub
parent 111942a47d
commit e8e5ddc55d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 537 additions and 139 deletions

View File

@ -33,7 +33,11 @@ fn setup_read_bench(
slot: u64, slot: u64,
) { ) {
// Make some big and small entries // Make some big and small entries
let entries = create_ticks(num_large_shreds * 4 + num_small_shreds * 2, Hash::default()); let entries = create_ticks(
num_large_shreds * 4 + num_small_shreds * 2,
0,
Hash::default(),
);
// Convert the entries to shreds, write the shreds to the ledger // Convert the entries to shreds, write the shreds to the ledger
let shreds = entries_to_test_shreds(entries, slot, slot.saturating_sub(1), true); let shreds = entries_to_test_shreds(entries, slot, slot.saturating_sub(1), true);
@ -48,7 +52,7 @@ fn setup_read_bench(
fn bench_write_small(bench: &mut Bencher) { fn bench_write_small(bench: &mut Bencher) {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
let num_entries = 32 * 1024; let num_entries = 32 * 1024;
let entries = create_ticks(num_entries, Hash::default()); let entries = create_ticks(num_entries, 0, Hash::default());
bench_write_shreds(bench, entries, &ledger_path); bench_write_shreds(bench, entries, &ledger_path);
} }
@ -58,7 +62,7 @@ fn bench_write_small(bench: &mut Bencher) {
fn bench_write_big(bench: &mut Bencher) { fn bench_write_big(bench: &mut Bencher) {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
let num_entries = 32 * 1024; let num_entries = 32 * 1024;
let entries = create_ticks(num_entries, Hash::default()); let entries = create_ticks(num_entries, 0, Hash::default());
bench_write_shreds(bench, entries, &ledger_path); bench_write_shreds(bench, entries, &ledger_path);
} }
@ -127,7 +131,7 @@ fn bench_insert_data_shred_small(bench: &mut Bencher) {
let blocktree = let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let num_entries = 32 * 1024; let num_entries = 32 * 1024;
let entries = create_ticks(num_entries, Hash::default()); let entries = create_ticks(num_entries, 0, Hash::default());
bench.iter(move || { bench.iter(move || {
let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true); let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true);
blocktree.insert_shreds(shreds, None).unwrap(); blocktree.insert_shreds(shreds, None).unwrap();
@ -142,7 +146,7 @@ fn bench_insert_data_shred_big(bench: &mut Bencher) {
let blocktree = let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let num_entries = 32 * 1024; let num_entries = 32 * 1024;
let entries = create_ticks(num_entries, Hash::default()); let entries = create_ticks(num_entries, 0, Hash::default());
bench.iter(move || { bench.iter(move || {
let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true); let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true);
blocktree.insert_shreds(shreds, None).unwrap(); blocktree.insert_shreds(shreds, None).unwrap();

View File

@ -33,7 +33,7 @@ fn bench_shredder_ticks(bencher: &mut Bencher) {
let num_shreds = ((1000 * 1000) + (shred_size - 1)) / shred_size; let num_shreds = ((1000 * 1000) + (shred_size - 1)) / shred_size;
// ~1Mb // ~1Mb
let num_ticks = max_ticks_per_n_shreds(1) * num_shreds as u64; let num_ticks = max_ticks_per_n_shreds(1) * num_shreds as u64;
let entries = create_ticks(num_ticks, Hash::default()); let entries = create_ticks(num_ticks, 0, Hash::default());
bencher.iter(|| { bencher.iter(|| {
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp.clone()).unwrap(); let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp.clone()).unwrap();
shredder.entries_to_shreds(&entries, true, 0); shredder.entries_to_shreds(&entries, true, 0);
@ -62,7 +62,7 @@ fn bench_deshredder(bencher: &mut Bencher) {
// ~10Mb // ~10Mb
let num_shreds = ((10000 * 1000) + (shred_size - 1)) / shred_size; let num_shreds = ((10000 * 1000) + (shred_size - 1)) / shred_size;
let num_ticks = max_ticks_per_n_shreds(1) * num_shreds as u64; let num_ticks = max_ticks_per_n_shreds(1) * num_shreds as u64;
let entries = create_ticks(num_ticks, Hash::default()); let entries = create_ticks(num_ticks, 0, Hash::default());
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp).unwrap(); let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp).unwrap();
let data_shreds = shredder.entries_to_shreds(&entries, true, 0).0; let data_shreds = shredder.entries_to_shreds(&entries, true, 0).0;
bencher.iter(|| { bencher.iter(|| {

View File

@ -134,7 +134,7 @@ mod test {
let (slot_full_sender, slot_full_receiver) = channel(); let (slot_full_sender, slot_full_receiver) = channel();
// Create entries - 4 ticks + 1 populated entry + 1 tick // Create entries - 4 ticks + 1 populated entry + 1 tick
let mut entries = create_ticks(4, Hash::default()); let mut entries = create_ticks(4, 0, Hash::default());
let keypair = Keypair::new(); let keypair = Keypair::new();
let mut blockhash = entries[3].hash; let mut blockhash = entries[3].hash;
@ -142,7 +142,7 @@ mod test {
let entry = Entry::new(&mut blockhash, 1, vec![tx]); let entry = Entry::new(&mut blockhash, 1, vec![tx]);
blockhash = entry.hash; blockhash = entry.hash;
entries.push(entry); entries.push(entry);
let final_tick = create_ticks(1, blockhash); let final_tick = create_ticks(1, 0, blockhash);
entries.extend_from_slice(&final_tick); entries.extend_from_slice(&final_tick);
let expected_entries = entries.clone(); let expected_entries = entries.clone();

View File

@ -281,7 +281,7 @@ mod test {
max_tick_height = bank.max_tick_height(); max_tick_height = bank.max_tick_height();
ticks_per_slot = bank.ticks_per_slot(); ticks_per_slot = bank.ticks_per_slot();
slot = bank.slot(); slot = bank.slot();
let ticks = create_ticks(max_tick_height - start_tick_height, Hash::default()); let ticks = create_ticks(max_tick_height - start_tick_height, 0, Hash::default());
for (i, tick) in ticks.into_iter().enumerate() { for (i, tick) in ticks.into_iter().enumerate() {
entry_sender entry_sender
.send((bank.clone(), (tick, i as u64 + 1))) .send((bank.clone(), (tick, i as u64 + 1)))

View File

@ -334,7 +334,7 @@ mod test {
setup(num_shreds_per_slot); setup(num_shreds_per_slot);
// Insert 1 less than the number of ticks needed to finish the slot // Insert 1 less than the number of ticks needed to finish the slot
let ticks = create_ticks(genesis_block.ticks_per_slot - 1, genesis_block.hash()); let ticks = create_ticks(genesis_block.ticks_per_slot - 1, 0, genesis_block.hash());
let receive_results = ReceiveResults { let receive_results = ReceiveResults {
entries: ticks.clone(), entries: ticks.clone(),
time_elapsed: Duration::new(3, 0), time_elapsed: Duration::new(3, 0),
@ -372,7 +372,7 @@ mod test {
// Interrupting the slot should cause the unfinished_slot and stats to reset // Interrupting the slot should cause the unfinished_slot and stats to reset
let num_shreds = 1; let num_shreds = 1;
assert!(num_shreds < num_shreds_per_slot); assert!(num_shreds < num_shreds_per_slot);
let ticks = create_ticks(max_ticks_per_n_shreds(num_shreds), genesis_block.hash()); let ticks = create_ticks(max_ticks_per_n_shreds(num_shreds), 0, genesis_block.hash());
let receive_results = ReceiveResults { let receive_results = ReceiveResults {
entries: ticks.clone(), entries: ticks.clone(),
time_elapsed: Duration::new(2, 0), time_elapsed: Duration::new(2, 0),
@ -401,7 +401,7 @@ mod test {
setup(num_shreds_per_slot); setup(num_shreds_per_slot);
// Insert complete slot of ticks needed to finish the slot // Insert complete slot of ticks needed to finish the slot
let ticks = create_ticks(genesis_block.ticks_per_slot, genesis_block.hash()); let ticks = create_ticks(genesis_block.ticks_per_slot, 0, genesis_block.hash());
let receive_results = ReceiveResults { let receive_results = ReceiveResults {
entries: ticks.clone(), entries: ticks.clone(),
time_elapsed: Duration::new(3, 0), time_elapsed: Duration::new(3, 0),

View File

@ -131,7 +131,7 @@ mod tests {
} }
let slots_per_segment = 32; let slots_per_segment = 32;
let entries = create_ticks(slots_per_segment, Hash::default()); let entries = create_ticks(slots_per_segment, 0, Hash::default());
let ledger_dir = "test_encrypt_file_many_keys_single"; let ledger_dir = "test_encrypt_file_many_keys_single";
let ledger_path = get_tmp_ledger_path(ledger_dir); let ledger_path = get_tmp_ledger_path(ledger_dir);
let ticks_per_slot = 16; let ticks_per_slot = 16;
@ -196,7 +196,7 @@ mod tests {
let ledger_dir = "test_encrypt_file_many_keys_multiple"; let ledger_dir = "test_encrypt_file_many_keys_multiple";
let ledger_path = get_tmp_ledger_path(ledger_dir); let ledger_path = get_tmp_ledger_path(ledger_dir);
let ticks_per_slot = 90; let ticks_per_slot = 90;
let entries = create_ticks(2 * ticks_per_slot, Hash::default()); let entries = create_ticks(2 * ticks_per_slot, 0, Hash::default());
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap()); let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
blocktree blocktree
.write_entries( .write_entries(

View File

@ -160,14 +160,6 @@ impl fmt::Debug for Blob {
} }
} }
#[derive(Debug)]
pub enum BlobError {
/// the Blob's meta and data are not self-consistent
BadState,
/// Blob verification failed
VerificationFailed,
}
impl Packets { impl Packets {
pub fn recv_from(&mut self, socket: &UdpSocket) -> Result<usize> { pub fn recv_from(&mut self, socket: &UdpSocket) -> Result<usize> {
let mut i = 0; let mut i = 0;

View File

@ -5,12 +5,12 @@ use crate::confidence::{
AggregateConfidenceService, ConfidenceAggregationData, ForkConfidenceCache, AggregateConfidenceService, ConfidenceAggregationData, ForkConfidenceCache,
}; };
use crate::consensus::{StakeLockout, Tower}; use crate::consensus::{StakeLockout, Tower};
use crate::packet::BlobError;
use crate::poh_recorder::PohRecorder; use crate::poh_recorder::PohRecorder;
use crate::result::{Error, Result}; use crate::result::{Error, Result};
use crate::rpc_subscriptions::RpcSubscriptions; use crate::rpc_subscriptions::RpcSubscriptions;
use crate::service::Service; use crate::service::Service;
use solana_ledger::bank_forks::BankForks; use solana_ledger::bank_forks::BankForks;
use solana_ledger::block_error::BlockError;
use solana_ledger::blocktree::{Blocktree, BlocktreeError}; use solana_ledger::blocktree::{Blocktree, BlocktreeError};
use solana_ledger::blocktree_processor; use solana_ledger::blocktree_processor;
use solana_ledger::entry::{Entry, EntrySlice}; use solana_ledger::entry::{Entry, EntrySlice};
@ -113,6 +113,7 @@ struct ForkProgress {
last_entry: Hash, last_entry: Hash,
num_shreds: usize, num_shreds: usize,
num_entries: usize, num_entries: usize,
tick_hash_count: u64,
started_ms: u64, started_ms: u64,
is_dead: bool, is_dead: bool,
stats: ReplaySlotStats, stats: ReplaySlotStats,
@ -124,6 +125,7 @@ impl ForkProgress {
last_entry, last_entry,
num_shreds: 0, num_shreds: 0,
num_entries: 0, num_entries: 0,
tick_hash_count: 0,
started_ms: timing::timestamp(), started_ms: timing::timestamp(),
is_dead: false, is_dead: false,
stats: ReplaySlotStats::new(slot), stats: ReplaySlotStats::new(slot),
@ -399,7 +401,7 @@ impl ReplayStage {
let tx_error = Err(e.clone()); let tx_error = Err(e.clone());
!Bank::can_commit(&tx_error) !Bank::can_commit(&tx_error)
} }
Err(Error::BlobError(BlobError::VerificationFailed)) => true, Err(Error::BlockError(_)) => true,
Err(Error::BlocktreeError(BlocktreeError::InvalidShredData(_))) => true, Err(Error::BlocktreeError(BlocktreeError::InvalidShredData(_))) => true,
_ => false, _ => false,
} }
@ -759,25 +761,46 @@ impl ReplayStage {
result result
} }
fn verify_ticks(
bank: &Arc<Bank>,
entries: &[Entry],
tick_hash_count: &mut u64,
) -> std::result::Result<(), BlockError> {
if entries.is_empty() {
return Ok(());
}
let hashes_per_tick = bank.hashes_per_tick().unwrap_or(0);
if !entries.verify_tick_hash_count(tick_hash_count, hashes_per_tick) {
return Err(BlockError::InvalidTickHashCount);
}
let next_bank_tick_height = bank.tick_height() + entries.tick_count();
let max_bank_tick_height = bank.max_tick_height();
if next_bank_tick_height > max_bank_tick_height {
return Err(BlockError::InvalidTickCount);
}
let has_trailing_entry = !entries.last().unwrap().is_tick();
if next_bank_tick_height == max_bank_tick_height && has_trailing_entry {
return Err(BlockError::TrailingEntry);
}
Ok(())
}
fn verify_and_process_entries( fn verify_and_process_entries(
bank: &Arc<Bank>, bank: &Arc<Bank>,
entries: &[Entry], entries: &[Entry],
shred_index: usize, shred_index: usize,
bank_progress: &mut ForkProgress, bank_progress: &mut ForkProgress,
) -> Result<()> { ) -> Result<()> {
datapoint_debug!("verify-batch-size", ("size", entries.len() as i64, i64));
let mut verify_total = Measure::start("verify_and_process_entries");
let last_entry = &bank_progress.last_entry; let last_entry = &bank_progress.last_entry;
let mut entry_state = entries.start_verify(last_entry); let tick_hash_count = &mut bank_progress.tick_hash_count;
let handle_block_error = move |block_error: BlockError| -> Result<()> {
let mut replay_elapsed = Measure::start("replay_elapsed"); warn!(
let res = blocktree_processor::process_entries(bank, entries, true); "{:#?}, slot: {}, entry len: {}, tick_height: {}, last entry: {}, last_blockhash: {}, shred_index: {}",
replay_elapsed.stop(); block_error,
bank_progress.stats.replay_elapsed += replay_elapsed.as_us();
if !entry_state.finish_verify(entries) {
info!(
"entry verification failed, slot: {}, entry len: {}, tick_height: {}, last entry: {}, last_blockhash: {}, shred_index: {}",
bank.slot(), bank.slot(),
entries.len(), entries.len(),
bank.tick_height(), bank.tick_height(),
@ -791,8 +814,27 @@ impl ReplayStage {
("slot", bank.slot(), i64), ("slot", bank.slot(), i64),
("last_entry", last_entry.to_string(), String), ("last_entry", last_entry.to_string(), String),
); );
return Err(Error::BlobError(BlobError::VerificationFailed));
Err(Error::BlockError(block_error))
};
if let Err(block_error) = Self::verify_ticks(bank, entries, tick_hash_count) {
return handle_block_error(block_error);
} }
datapoint_info!("verify-batch-size", ("size", entries.len() as i64, i64));
let mut verify_total = Measure::start("verify_and_process_entries");
let mut entry_state = entries.start_verify(last_entry);
let mut replay_elapsed = Measure::start("replay_elapsed");
let res = blocktree_processor::process_entries(bank, entries, true);
replay_elapsed.stop();
bank_progress.stats.replay_elapsed += replay_elapsed.as_us();
if !entry_state.finish_verify(entries) {
return handle_block_error(BlockError::InvalidEntryHash);
}
verify_total.stop(); verify_total.stop();
bank_progress.stats.entry_verification_elapsed = bank_progress.stats.entry_verification_elapsed =
verify_total.as_us() - replay_elapsed.as_us(); verify_total.as_us() - replay_elapsed.as_us();
@ -951,17 +993,20 @@ mod test {
let missing_keypair = Keypair::new(); let missing_keypair = Keypair::new();
let missing_keypair2 = Keypair::new(); let missing_keypair2 = Keypair::new();
let res = check_dead_fork(|_keypair, blockhash, slot| { let res = check_dead_fork(|_keypair, bank| {
let blockhash = bank.last_blockhash();
let slot = bank.slot();
let hashes_per_tick = bank.hashes_per_tick().unwrap_or(0);
let entry = entry::next_entry( let entry = entry::next_entry(
blockhash, &blockhash,
1, hashes_per_tick.saturating_sub(1),
vec![ vec![
system_transaction::transfer(&keypair1, &keypair2.pubkey(), 2, *blockhash), // should be fine, system_transaction::transfer(&keypair1, &keypair2.pubkey(), 2, blockhash), // should be fine,
system_transaction::transfer( system_transaction::transfer(
&missing_keypair, &missing_keypair,
&missing_keypair2.pubkey(), &missing_keypair2.pubkey(),
2, 2,
*blockhash, blockhash,
), // should cause AccountNotFound error ), // should cause AccountNotFound error
], ],
); );
@ -977,29 +1022,105 @@ mod test {
#[test] #[test]
fn test_dead_fork_entry_verification_failure() { fn test_dead_fork_entry_verification_failure() {
let keypair2 = Keypair::new(); let keypair2 = Keypair::new();
let res = check_dead_fork(|genesis_keypair, blockhash, slot| { let res = check_dead_fork(|genesis_keypair, bank| {
let blockhash = bank.last_blockhash();
let slot = bank.slot();
let bad_hash = hash(&[2; 30]); let bad_hash = hash(&[2; 30]);
let hashes_per_tick = bank.hashes_per_tick().unwrap_or(0);
let entry = entry::next_entry( let entry = entry::next_entry(
// Use wrong blockhash so that the entry causes an entry verification failure // Use wrong blockhash so that the entry causes an entry verification failure
&bad_hash, &bad_hash,
1, hashes_per_tick.saturating_sub(1),
vec![system_transaction::transfer( vec![system_transaction::transfer(
&genesis_keypair, &genesis_keypair,
&keypair2.pubkey(), &keypair2.pubkey(),
2, 2,
*blockhash, blockhash,
)], )],
); );
entries_to_test_shreds(vec![entry], slot, slot.saturating_sub(1), false) entries_to_test_shreds(vec![entry], slot, slot.saturating_sub(1), false)
}); });
assert_matches!(res, Err(Error::BlobError(BlobError::VerificationFailed))); if let Err(Error::BlockError(block_error)) = res {
assert_eq!(block_error, BlockError::InvalidEntryHash);
} else {
assert!(false);
}
}
#[test]
fn test_dead_fork_invalid_tick_hash_count() {
let res = check_dead_fork(|_keypair, bank| {
let blockhash = bank.last_blockhash();
let slot = bank.slot();
let hashes_per_tick = bank.hashes_per_tick().unwrap_or(0);
assert!(hashes_per_tick > 0);
let too_few_hashes_tick = Entry::new(&blockhash, hashes_per_tick - 1, vec![]);
entries_to_test_shreds(
vec![too_few_hashes_tick],
slot,
slot.saturating_sub(1),
false,
)
});
if let Err(Error::BlockError(block_error)) = res {
assert_eq!(block_error, BlockError::InvalidTickHashCount);
} else {
assert!(false);
}
}
#[test]
fn test_dead_fork_invalid_slot_tick_count() {
let res = check_dead_fork(|_keypair, bank| {
let blockhash = bank.last_blockhash();
let slot = bank.slot();
let hashes_per_tick = bank.hashes_per_tick().unwrap_or(0);
entries_to_test_shreds(
entry::create_ticks(bank.ticks_per_slot() + 1, hashes_per_tick, blockhash),
slot,
slot.saturating_sub(1),
false,
)
});
if let Err(Error::BlockError(block_error)) = res {
assert_eq!(block_error, BlockError::InvalidTickCount);
} else {
assert!(false);
}
}
#[test]
fn test_dead_fork_trailing_entry() {
let keypair = Keypair::new();
let res = check_dead_fork(|genesis_keypair, bank| {
let blockhash = bank.last_blockhash();
let slot = bank.slot();
let hashes_per_tick = bank.hashes_per_tick().unwrap_or(0);
let mut entries =
entry::create_ticks(bank.ticks_per_slot(), hashes_per_tick, blockhash.clone());
let last_entry_hash = entries.last().unwrap().hash;
let tx =
system_transaction::transfer(&genesis_keypair, &keypair.pubkey(), 2, blockhash);
let trailing_entry = entry::next_entry(&last_entry_hash, 1, vec![tx]);
entries.push(trailing_entry);
entries_to_test_shreds(entries, slot, slot.saturating_sub(1), false)
});
if let Err(Error::BlockError(block_error)) = res {
assert_eq!(block_error, BlockError::TrailingEntry);
} else {
assert!(false);
}
} }
#[test] #[test]
fn test_dead_fork_entry_deserialize_failure() { fn test_dead_fork_entry_deserialize_failure() {
// Insert entry that causes deserialization failure // Insert entry that causes deserialization failure
let res = check_dead_fork(|_, _, _| { let res = check_dead_fork(|_, _| {
let payload_len = SIZE_OF_DATA_SHRED_PAYLOAD; let payload_len = SIZE_OF_DATA_SHRED_PAYLOAD;
let gibberish = [0xa5u8; PACKET_DATA_SIZE]; let gibberish = [0xa5u8; PACKET_DATA_SIZE];
let mut data_header = DataShredHeader::default(); let mut data_header = DataShredHeader::default();
@ -1027,7 +1148,7 @@ mod test {
// marked as dead. Returns the error for caller to verify. // marked as dead. Returns the error for caller to verify.
fn check_dead_fork<F>(shred_to_insert: F) -> Result<()> fn check_dead_fork<F>(shred_to_insert: F) -> Result<()>
where where
F: Fn(&Keypair, &Hash, u64) -> Vec<Shred>, F: Fn(&Keypair, Arc<Bank>) -> Vec<Shred>,
{ {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
let res = { let res = {
@ -1035,15 +1156,16 @@ mod test {
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"), Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
); );
let GenesisBlockInfo { let GenesisBlockInfo {
genesis_block, mut genesis_block,
mint_keypair, mint_keypair,
.. ..
} = create_genesis_block(1000); } = create_genesis_block(1000);
genesis_block.poh_config.hashes_per_tick = Some(2);
let bank0 = Arc::new(Bank::new(&genesis_block)); let bank0 = Arc::new(Bank::new(&genesis_block));
let mut progress = HashMap::new(); let mut progress = HashMap::new();
let last_blockhash = bank0.last_blockhash(); let last_blockhash = bank0.last_blockhash();
progress.insert(bank0.slot(), ForkProgress::new(0, last_blockhash)); progress.insert(bank0.slot(), ForkProgress::new(0, last_blockhash));
let shreds = shred_to_insert(&mint_keypair, &last_blockhash, bank0.slot()); let shreds = shred_to_insert(&mint_keypair, bank0.clone());
blocktree.insert_shreds(shreds, None).unwrap(); blocktree.insert_shreds(shreds, None).unwrap();
let (res, _tx_count) = let (res, _tx_count) =
ReplayStage::replay_blocktree_into_bank(&bank0, &blocktree, &mut progress); ReplayStage::replay_blocktree_into_bank(&bank0, &blocktree, &mut progress);

View File

@ -1,8 +1,8 @@
//! The `result` module exposes a Result type that propagates one of many different Error types. //! The `result` module exposes a Result type that propagates one of many different Error types.
use crate::cluster_info; use crate::cluster_info;
use crate::packet;
use crate::poh_recorder; use crate::poh_recorder;
use solana_ledger::block_error;
use solana_ledger::blocktree; use solana_ledger::blocktree;
use solana_ledger::snapshot_utils; use solana_ledger::snapshot_utils;
use solana_sdk::transaction; use solana_sdk::transaction;
@ -23,10 +23,10 @@ pub enum Error {
Serialize(std::boxed::Box<bincode::ErrorKind>), Serialize(std::boxed::Box<bincode::ErrorKind>),
TransactionError(transaction::TransactionError), TransactionError(transaction::TransactionError),
ClusterInfoError(cluster_info::ClusterInfoError), ClusterInfoError(cluster_info::ClusterInfoError),
BlobError(packet::BlobError),
ErasureError(reed_solomon_erasure::Error), ErasureError(reed_solomon_erasure::Error),
SendError, SendError,
PohRecorderError(poh_recorder::PohRecorderError), PohRecorderError(poh_recorder::PohRecorderError),
BlockError(block_error::BlockError),
BlocktreeError(blocktree::BlocktreeError), BlocktreeError(blocktree::BlocktreeError),
FsExtra(fs_extra::error::Error), FsExtra(fs_extra::error::Error),
ToBlobError, ToBlobError,

View File

@ -326,7 +326,7 @@ mod test {
let blocktree_path = get_tmp_ledger_path!(); let blocktree_path = get_tmp_ledger_path!();
let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap()); let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap());
let num_entries = 10; let num_entries = 10;
let original_entries = create_ticks(num_entries, Hash::default()); let original_entries = create_ticks(num_entries, 0, Hash::default());
let mut shreds = local_entries_to_shred(&original_entries, 0, 0, &Arc::new(Keypair::new())); let mut shreds = local_entries_to_shred(&original_entries, 0, 0, &Arc::new(Keypair::new()));
shreds.reverse(); shreds.reverse();
blocktree blocktree

View File

@ -117,6 +117,7 @@ mod tests {
&next_bank, &next_bank,
&entry::create_ticks( &entry::create_ticks(
DEFAULT_TICKS_PER_SLOT * next_bank.slots_per_segment() + 1, DEFAULT_TICKS_PER_SLOT * next_bank.slots_per_segment() + 1,
0,
bank.last_blockhash(), bank.last_blockhash(),
), ),
true, true,
@ -207,7 +208,7 @@ mod tests {
let bank = Arc::new(Bank::new_from_parent(&last_bank, &keypair.pubkey(), i)); let bank = Arc::new(Bank::new_from_parent(&last_bank, &keypair.pubkey(), i));
blocktree_processor::process_entries( blocktree_processor::process_entries(
&bank, &bank,
&entry::create_ticks(64, bank.last_blockhash()), &entry::create_ticks(64, 0, bank.last_blockhash()),
true, true,
) )
.expect("failed process entries"); .expect("failed process entries");

15
ledger/src/block_error.rs Normal file
View File

@ -0,0 +1,15 @@
#[derive(Debug, PartialEq)]
pub enum BlockError {
/// Block entries hashes must all be valid
InvalidEntryHash,
/// Blocks can not have extra ticks or missing ticks
InvalidTickCount,
/// All ticks must contain the same number of hashes within a block
InvalidTickHashCount,
/// Blocks must end in a tick entry, trailing transaction entries are not allowed to guarantee
/// that each block has the same number of hashes
TrailingEntry,
}

View File

@ -888,18 +888,12 @@ impl Blocktree {
keypair: &Arc<Keypair>, keypair: &Arc<Keypair>,
entries: Vec<Entry>, entries: Vec<Entry>,
) -> Result<usize> { ) -> Result<usize> {
assert!(num_ticks_in_start_slot < ticks_per_slot); let mut parent_slot = parent.map_or(start_slot.saturating_sub(1), |v| v);
let mut remaining_ticks_in_slot = ticks_per_slot - num_ticks_in_start_slot; let num_slots = (start_slot - parent_slot).max(1); // Note: slot 0 has parent slot 0
assert!(num_ticks_in_start_slot < num_slots * ticks_per_slot);
let mut remaining_ticks_in_slot = num_slots * ticks_per_slot - num_ticks_in_start_slot;
let mut current_slot = start_slot; let mut current_slot = start_slot;
let mut parent_slot = parent.map_or(
if current_slot == 0 {
current_slot
} else {
current_slot - 1
},
|v| v,
);
let mut shredder = Shredder::new(current_slot, parent_slot, 0.0, keypair.clone()) let mut shredder = Shredder::new(current_slot, parent_slot, 0.0, keypair.clone())
.expect("Failed to create entry shredder"); .expect("Failed to create entry shredder");
let mut all_shreds = vec![]; let mut all_shreds = vec![];
@ -1686,14 +1680,14 @@ fn slot_has_updates(slot_meta: &SlotMeta, slot_meta_backup: &Option<SlotMeta>) -
// //
// Returns the blockhash that can be used to append entries with. // Returns the blockhash that can be used to append entries with.
pub fn create_new_ledger(ledger_path: &Path, genesis_block: &GenesisBlock) -> Result<Hash> { pub fn create_new_ledger(ledger_path: &Path, genesis_block: &GenesisBlock) -> Result<Hash> {
let ticks_per_slot = genesis_block.ticks_per_slot;
Blocktree::destroy(ledger_path)?; Blocktree::destroy(ledger_path)?;
genesis_block.write(&ledger_path)?; genesis_block.write(&ledger_path)?;
// Fill slot 0 with ticks that link back to the genesis_block to bootstrap the ledger. // Fill slot 0 with ticks that link back to the genesis_block to bootstrap the ledger.
let blocktree = Blocktree::open(ledger_path)?; let blocktree = Blocktree::open(ledger_path)?;
let ticks_per_slot = genesis_block.ticks_per_slot;
let entries = create_ticks(ticks_per_slot, genesis_block.hash()); let hashes_per_tick = genesis_block.poh_config.hashes_per_tick.unwrap_or(0);
let entries = create_ticks(ticks_per_slot, hashes_per_tick, genesis_block.hash());
let last_hash = entries.last().unwrap().hash; let last_hash = entries.last().unwrap().hash;
let shredder = Shredder::new(0, 0, 0.0, Arc::new(Keypair::new())) let shredder = Shredder::new(0, 0, 0.0, Arc::new(Keypair::new()))
@ -1787,16 +1781,18 @@ pub fn entries_to_test_shreds(
shredder.entries_to_shreds(&entries, is_full_slot, 0).0 shredder.entries_to_shreds(&entries, is_full_slot, 0).0
} }
// used for tests only
pub fn make_slot_entries( pub fn make_slot_entries(
slot: u64, slot: u64,
parent_slot: u64, parent_slot: u64,
num_entries: u64, num_entries: u64,
) -> (Vec<Shred>, Vec<Entry>) { ) -> (Vec<Shred>, Vec<Entry>) {
let entries = create_ticks(num_entries, Hash::default()); let entries = create_ticks(num_entries, 0, Hash::default());
let shreds = entries_to_test_shreds(entries.clone(), slot, parent_slot, true); let shreds = entries_to_test_shreds(entries.clone(), slot, parent_slot, true);
(shreds, entries) (shreds, entries)
} }
// used for tests only
pub fn make_many_slot_entries( pub fn make_many_slot_entries(
start_slot: u64, start_slot: u64,
num_slots: u64, num_slots: u64,
@ -1816,6 +1812,7 @@ pub fn make_many_slot_entries(
} }
// Create shreds for slots that have a parent-child relationship defined by the input `chain` // Create shreds for slots that have a parent-child relationship defined by the input `chain`
// used for tests only
pub fn make_chaining_slot_entries( pub fn make_chaining_slot_entries(
chain: &[u64], chain: &[u64],
entries_per_slot: u64, entries_per_slot: u64,
@ -1857,7 +1854,7 @@ pub mod tests {
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block); let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
let ledger = Blocktree::open(&ledger_path).unwrap(); let ledger = Blocktree::open(&ledger_path).unwrap();
let ticks = create_ticks(genesis_block.ticks_per_slot, genesis_block.hash()); let ticks = create_ticks(genesis_block.ticks_per_slot, 0, genesis_block.hash());
let entries = ledger.get_slot_entries(0, 0, None).unwrap(); let entries = ledger.get_slot_entries(0, 0, None).unwrap();
assert_eq!(ticks, entries); assert_eq!(ticks, entries);
@ -1911,7 +1908,7 @@ pub mod tests {
let mut shreds_per_slot = vec![]; let mut shreds_per_slot = vec![];
for i in 0..num_slots { for i in 0..num_slots {
let mut new_ticks = create_ticks(ticks_per_slot, Hash::default()); let mut new_ticks = create_ticks(ticks_per_slot, 0, Hash::default());
let num_shreds = ledger let num_shreds = ledger
.write_entries( .write_entries(
i, i,
@ -2241,7 +2238,7 @@ pub mod tests {
let blocktree_path = get_tmp_ledger_path("test_get_slot_entries1"); let blocktree_path = get_tmp_ledger_path("test_get_slot_entries1");
{ {
let blocktree = Blocktree::open(&blocktree_path).unwrap(); let blocktree = Blocktree::open(&blocktree_path).unwrap();
let entries = create_ticks(8, Hash::default()); let entries = create_ticks(8, 0, Hash::default());
let shreds = entries_to_test_shreds(entries[0..4].to_vec(), 1, 0, false); let shreds = entries_to_test_shreds(entries[0..4].to_vec(), 1, 0, false);
blocktree blocktree
.insert_shreds(shreds, None) .insert_shreds(shreds, None)
@ -2276,7 +2273,7 @@ pub mod tests {
let num_slots = 5 as u64; let num_slots = 5 as u64;
let mut index = 0; let mut index = 0;
for slot in 0..num_slots { for slot in 0..num_slots {
let entries = create_ticks(slot + 1, Hash::default()); let entries = create_ticks(slot + 1, 0, Hash::default());
let last_entry = entries.last().unwrap().clone(); let last_entry = entries.last().unwrap().clone();
let mut shreds = let mut shreds =
entries_to_test_shreds(entries, slot, slot.saturating_sub(1), false); entries_to_test_shreds(entries, slot, slot.saturating_sub(1), false);
@ -2308,13 +2305,13 @@ pub mod tests {
let num_slots = 5 as u64; let num_slots = 5 as u64;
let shreds_per_slot = 5 as u64; let shreds_per_slot = 5 as u64;
let entry_serialized_size = let entry_serialized_size =
bincode::serialized_size(&create_ticks(1, Hash::default())).unwrap(); bincode::serialized_size(&create_ticks(1, 0, Hash::default())).unwrap();
let entries_per_slot = let entries_per_slot =
(shreds_per_slot * PACKET_DATA_SIZE as u64) / entry_serialized_size; (shreds_per_slot * PACKET_DATA_SIZE as u64) / entry_serialized_size;
// Write entries // Write entries
for slot in 0..num_slots { for slot in 0..num_slots {
let entries = create_ticks(entries_per_slot, Hash::default()); let entries = create_ticks(entries_per_slot, 0, Hash::default());
let shreds = let shreds =
entries_to_test_shreds(entries.clone(), slot, slot.saturating_sub(1), false); entries_to_test_shreds(entries.clone(), slot, slot.saturating_sub(1), false);
assert!(shreds.len() as u64 >= shreds_per_slot); assert!(shreds.len() as u64 >= shreds_per_slot);
@ -3097,7 +3094,7 @@ pub mod tests {
assert!(gap > 3); assert!(gap > 3);
// Create enough entries to ensure there are at least two shreds created // Create enough entries to ensure there are at least two shreds created
let num_entries = max_ticks_per_n_shreds(1) + 1; let num_entries = max_ticks_per_n_shreds(1) + 1;
let entries = create_ticks(num_entries, Hash::default()); let entries = create_ticks(num_entries, 0, Hash::default());
let mut shreds = entries_to_test_shreds(entries, slot, 0, true); let mut shreds = entries_to_test_shreds(entries, slot, 0, true);
let num_shreds = shreds.len(); let num_shreds = shreds.len();
assert!(num_shreds > 1); assert!(num_shreds > 1);
@ -3189,7 +3186,7 @@ pub mod tests {
assert_eq!(blocktree.find_missing_data_indexes(slot, 4, 3, 1), empty); assert_eq!(blocktree.find_missing_data_indexes(slot, 4, 3, 1), empty);
assert_eq!(blocktree.find_missing_data_indexes(slot, 1, 2, 0), empty); assert_eq!(blocktree.find_missing_data_indexes(slot, 1, 2, 0), empty);
let entries = create_ticks(100, Hash::default()); let entries = create_ticks(100, 0, Hash::default());
let mut shreds = entries_to_test_shreds(entries, slot, 0, true); let mut shreds = entries_to_test_shreds(entries, slot, 0, true);
assert!(shreds.len() > 2); assert!(shreds.len() > 2);
shreds.drain(2..); shreds.drain(2..);
@ -3231,7 +3228,7 @@ pub mod tests {
// Write entries // Write entries
let num_entries = 10; let num_entries = 10;
let entries = create_ticks(num_entries, Hash::default()); let entries = create_ticks(num_entries, 0, Hash::default());
let shreds = entries_to_test_shreds(entries, slot, 0, true); let shreds = entries_to_test_shreds(entries, slot, 0, true);
let num_shreds = shreds.len(); let num_shreds = shreds.len();
@ -3746,7 +3743,7 @@ pub mod tests {
{ {
let blocktree = Blocktree::open(&blocktree_path).unwrap(); let blocktree = Blocktree::open(&blocktree_path).unwrap();
let num_ticks = 8; let num_ticks = 8;
let entries = create_ticks(num_ticks, Hash::default()); let entries = create_ticks(num_ticks, 0, Hash::default());
let slot = 1; let slot = 1;
let shreds = entries_to_test_shreds(entries, slot, 0, false); let shreds = entries_to_test_shreds(entries, slot, 0, false);
let next_shred_index = shreds.len(); let next_shred_index = shreds.len();

View File

@ -1,4 +1,5 @@
use crate::bank_forks::BankForks; use crate::bank_forks::BankForks;
use crate::block_error::BlockError;
use crate::blocktree::Blocktree; use crate::blocktree::Blocktree;
use crate::blocktree_meta::SlotMeta; use crate::blocktree_meta::SlotMeta;
use crate::entry::{create_ticks, Entry, EntrySlice}; use crate::entry::{create_ticks, Entry, EntrySlice};
@ -173,9 +174,18 @@ pub struct BankForksInfo {
pub bank_slot: u64, pub bank_slot: u64,
} }
#[derive(Debug)] #[derive(Debug, PartialEq)]
pub enum BlocktreeProcessorError { pub enum BlocktreeProcessorError {
LedgerVerificationFailed, FailedToLoadEntries,
FailedToLoadMeta,
InvalidBlock(BlockError),
InvalidTransaction,
}
impl From<BlockError> for BlocktreeProcessorError {
fn from(block_error: BlockError) -> Self {
BlocktreeProcessorError::InvalidBlock(block_error)
}
} }
/// Callback for accessing bank state while processing the blocktree /// Callback for accessing bank state while processing the blocktree
@ -277,7 +287,7 @@ pub fn process_blocktree_from_root(
Ok((bank_forks, bank_forks_info, leader_schedule_cache)) Ok((bank_forks, bank_forks_info, leader_schedule_cache))
} }
fn verify_and_process_entries( fn verify_and_process_slot_entries(
bank: &Arc<Bank>, bank: &Arc<Bank>,
entries: &[Entry], entries: &[Entry],
last_entry_hash: Hash, last_entry_hash: Hash,
@ -285,9 +295,34 @@ fn verify_and_process_entries(
) -> result::Result<Hash, BlocktreeProcessorError> { ) -> result::Result<Hash, BlocktreeProcessorError> {
assert!(!entries.is_empty()); assert!(!entries.is_empty());
if opts.verify_ledger && !entries.verify(&last_entry_hash) { if opts.verify_ledger {
warn!("Ledger proof of history failed at slot: {}", bank.slot()); let next_bank_tick_height = bank.tick_height() + entries.tick_count();
return Err(BlocktreeProcessorError::LedgerVerificationFailed); let max_bank_tick_height = bank.max_tick_height();
if next_bank_tick_height != max_bank_tick_height {
warn!(
"Invalid number of entry ticks found in slot: {}",
bank.slot()
);
return Err(BlockError::InvalidTickCount.into());
} else if !entries.last().unwrap().is_tick() {
warn!("Slot: {} did not end with a tick entry", bank.slot());
return Err(BlockError::TrailingEntry.into());
}
if let Some(hashes_per_tick) = bank.hashes_per_tick() {
if !entries.verify_tick_hash_count(&mut 0, *hashes_per_tick) {
warn!(
"Tick with invalid number of hashes found in slot: {}",
bank.slot()
);
return Err(BlockError::InvalidTickHashCount.into());
}
}
if !entries.verify(&last_entry_hash) {
warn!("Ledger proof of history failed at slot: {}", bank.slot());
return Err(BlockError::InvalidEntryHash.into());
}
} }
process_entries_with_callback(bank, &entries, true, opts.entry_callback.as_ref()).map_err( process_entries_with_callback(bank, &entries, true, opts.entry_callback.as_ref()).map_err(
@ -297,7 +332,7 @@ fn verify_and_process_entries(
bank.slot(), bank.slot(),
err err
); );
BlocktreeProcessorError::LedgerVerificationFailed BlocktreeProcessorError::InvalidTransaction
}, },
)?; )?;
@ -315,15 +350,10 @@ fn process_bank_0(
// Fetch all entries for this slot // Fetch all entries for this slot
let entries = blocktree.get_slot_entries(0, 0, None).map_err(|err| { let entries = blocktree.get_slot_entries(0, 0, None).map_err(|err| {
warn!("Failed to load entries for slot 0, err: {:?}", err); warn!("Failed to load entries for slot 0, err: {:?}", err);
BlocktreeProcessorError::LedgerVerificationFailed BlocktreeProcessorError::FailedToLoadEntries
})?; })?;
if entries.is_empty() { verify_and_process_slot_entries(bank0, &entries, bank0.last_blockhash(), opts)?;
warn!("entry0 not present");
return Err(BlocktreeProcessorError::LedgerVerificationFailed);
}
verify_and_process_entries(bank0, &entries, bank0.last_blockhash(), opts)?;
bank0.freeze(); bank0.freeze();
@ -355,7 +385,7 @@ fn process_next_slots(
.meta(*next_slot) .meta(*next_slot)
.map_err(|err| { .map_err(|err| {
warn!("Failed to load meta for slot {}: {:?}", next_slot, err); warn!("Failed to load meta for slot {}: {:?}", next_slot, err);
BlocktreeProcessorError::LedgerVerificationFailed BlocktreeProcessorError::FailedToLoadMeta
})? })?
.unwrap(); .unwrap();
@ -419,10 +449,10 @@ fn process_pending_slots(
// Fetch all entries for this slot // Fetch all entries for this slot
let entries = blocktree.get_slot_entries(slot, 0, None).map_err(|err| { let entries = blocktree.get_slot_entries(slot, 0, None).map_err(|err| {
warn!("Failed to load entries for slot {}: {:?}", slot, err); warn!("Failed to load entries for slot {}: {:?}", slot, err);
BlocktreeProcessorError::LedgerVerificationFailed BlocktreeProcessorError::FailedToLoadEntries
})?; })?;
verify_and_process_entries(&bank, &entries, last_entry_hash, opts)?; verify_and_process_slot_entries(&bank, &entries, last_entry_hash, opts)?;
bank.freeze(); // all banks handled by this routine are created from complete slots bank.freeze(); // all banks handled by this routine are created from complete slots
@ -463,7 +493,8 @@ pub fn fill_blocktree_slot_with_ticks(
parent_slot: u64, parent_slot: u64,
last_entry_hash: Hash, last_entry_hash: Hash,
) -> Hash { ) -> Hash {
let entries = create_ticks(ticks_per_slot, last_entry_hash); let num_slots = (slot - parent_slot).max(1); // Note: slot 0 has parent slot 0
let entries = create_ticks(num_slots * ticks_per_slot, 0, last_entry_hash);
let last_entry_hash = entries.last().unwrap().hash; let last_entry_hash = entries.last().unwrap().hash;
blocktree blocktree
@ -486,7 +517,7 @@ pub fn fill_blocktree_slot_with_ticks(
pub mod tests { pub mod tests {
use super::*; use super::*;
use crate::blocktree::create_new_tmp_ledger; use crate::blocktree::create_new_tmp_ledger;
use crate::entry::{create_ticks, next_entry, next_entry_mut, Entry}; use crate::entry::{create_ticks, next_entry, next_entry_mut};
use crate::genesis_utils::{ use crate::genesis_utils::{
create_genesis_block, create_genesis_block_with_leader, GenesisBlockInfo, create_genesis_block, create_genesis_block_with_leader, GenesisBlockInfo,
}; };
@ -503,6 +534,140 @@ pub mod tests {
}; };
use std::sync::RwLock; use std::sync::RwLock;
#[test]
fn test_process_blocktree_with_missing_hashes() {
solana_logger::setup();
let hashes_per_tick = 2;
let GenesisBlockInfo {
mut genesis_block, ..
} = create_genesis_block(10_000);
genesis_block.poh_config.hashes_per_tick = Some(hashes_per_tick);
let ticks_per_slot = genesis_block.ticks_per_slot;
let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_block);
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger");
let parent_slot = 0;
let slot = 1;
let entries = create_ticks(ticks_per_slot, hashes_per_tick - 1, blockhash);
blocktree
.write_entries(
slot,
0,
0,
ticks_per_slot,
Some(parent_slot),
true,
&Arc::new(Keypair::new()),
entries,
)
.expect("Expected to write shredded entries to blocktree");
let opts = ProcessOptions {
verify_ledger: true,
..ProcessOptions::default()
};
assert_eq!(
process_blocktree(&genesis_block, &blocktree, None, opts).err(),
Some(BlocktreeProcessorError::InvalidBlock(
BlockError::InvalidTickHashCount
)),
);
}
#[test]
fn test_process_blocktree_with_invalid_slot_tick_count() {
solana_logger::setup();
let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(10_000);
let ticks_per_slot = genesis_block.ticks_per_slot;
// Create a new ledger with slot 0 full of ticks
let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_block);
let blocktree = Blocktree::open(&ledger_path).unwrap();
// Write slot 1 with one tick missing
let parent_slot = 0;
let slot = 1;
let entries = create_ticks(ticks_per_slot - 1, 0, blockhash);
blocktree
.write_entries(
slot,
0,
0,
ticks_per_slot,
Some(parent_slot),
true,
&Arc::new(Keypair::new()),
entries,
)
.expect("Expected to write shredded entries to blocktree");
let opts = ProcessOptions {
verify_ledger: true,
..ProcessOptions::default()
};
assert_eq!(
process_blocktree(&genesis_block, &blocktree, None, opts).err(),
Some(BlocktreeProcessorError::InvalidBlock(
BlockError::InvalidTickCount
)),
);
}
#[test]
fn test_process_blocktree_with_slot_with_trailing_entry() {
solana_logger::setup();
let GenesisBlockInfo {
mint_keypair,
genesis_block,
..
} = create_genesis_block(10_000);
let ticks_per_slot = genesis_block.ticks_per_slot;
let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_block);
let blocktree = Blocktree::open(&ledger_path).unwrap();
let mut entries = create_ticks(ticks_per_slot, 0, blockhash);
let trailing_entry = {
let keypair = Keypair::new();
let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 1, blockhash);
next_entry(&blockhash, 1, vec![tx])
};
entries.push(trailing_entry);
// Tricks blocktree into writing the trailing entry by lying that there is one more tick
// per slot.
let parent_slot = 0;
let slot = 1;
blocktree
.write_entries(
slot,
0,
0,
ticks_per_slot + 1,
Some(parent_slot),
true,
&Arc::new(Keypair::new()),
entries,
)
.expect("Expected to write shredded entries to blocktree");
let opts = ProcessOptions {
verify_ledger: true,
..ProcessOptions::default()
};
assert_eq!(
process_blocktree(&genesis_block, &blocktree, None, opts).err(),
Some(BlocktreeProcessorError::InvalidBlock(
BlockError::TrailingEntry
)),
);
}
#[test] #[test]
fn test_process_blocktree_with_incomplete_slot() { fn test_process_blocktree_with_incomplete_slot() {
solana_logger::setup(); solana_logger::setup();
@ -534,7 +699,7 @@ pub mod tests {
{ {
let parent_slot = 0; let parent_slot = 0;
let slot = 1; let slot = 1;
let mut entries = create_ticks(ticks_per_slot, blockhash); let mut entries = create_ticks(ticks_per_slot, 0, blockhash);
blockhash = entries.last().unwrap().hash; blockhash = entries.last().unwrap().hash;
// throw away last one // throw away last one
@ -841,7 +1006,7 @@ pub mod tests {
} = create_genesis_block(2); } = create_genesis_block(2);
let bank = Arc::new(Bank::new(&genesis_block)); let bank = Arc::new(Bank::new(&genesis_block));
let keypair = Keypair::new(); let keypair = Keypair::new();
let slot_entries = create_ticks(genesis_block.ticks_per_slot, genesis_block.hash()); let slot_entries = create_ticks(genesis_block.ticks_per_slot, 1, genesis_block.hash());
let tx = system_transaction::transfer( let tx = system_transaction::transfer(
&mint_keypair, &mint_keypair,
&keypair.pubkey(), &keypair.pubkey(),
@ -865,11 +1030,13 @@ pub mod tests {
solana_logger::setup(); solana_logger::setup();
let leader_pubkey = Pubkey::new_rand(); let leader_pubkey = Pubkey::new_rand();
let mint = 100; let mint = 100;
let hashes_per_tick = 10;
let GenesisBlockInfo { let GenesisBlockInfo {
genesis_block, mut genesis_block,
mint_keypair, mint_keypair,
.. ..
} = create_genesis_block_with_leader(mint, &leader_pubkey, 50); } = create_genesis_block_with_leader(mint, &leader_pubkey, 50);
genesis_block.poh_config.hashes_per_tick = Some(hashes_per_tick);
let (ledger_path, mut last_entry_hash) = create_new_tmp_ledger!(&genesis_block); let (ledger_path, mut last_entry_hash) = create_new_tmp_ledger!(&genesis_block);
debug!("ledger_path: {:?}", ledger_path); debug!("ledger_path: {:?}", ledger_path);
@ -880,8 +1047,7 @@ pub mod tests {
// Transfer one token from the mint to a random account // Transfer one token from the mint to a random account
let keypair = Keypair::new(); let keypair = Keypair::new();
let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 1, blockhash); let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 1, blockhash);
let entry = Entry::new(&last_entry_hash, 1, vec![tx]); let entry = next_entry_mut(&mut last_entry_hash, 1, vec![tx]);
last_entry_hash = entry.hash;
entries.push(entry); entries.push(entry);
// Add a second Transaction that will produce a // Add a second Transaction that will produce a
@ -889,14 +1055,22 @@ pub mod tests {
let keypair2 = Keypair::new(); let keypair2 = Keypair::new();
let tx = let tx =
system_transaction::transfer(&mint_keypair, &keypair2.pubkey(), 101, blockhash); system_transaction::transfer(&mint_keypair, &keypair2.pubkey(), 101, blockhash);
let entry = Entry::new(&last_entry_hash, 1, vec![tx]); let entry = next_entry_mut(&mut last_entry_hash, 1, vec![tx]);
last_entry_hash = entry.hash;
entries.push(entry); entries.push(entry);
} }
let remaining_hashes = hashes_per_tick - entries.len() as u64;
let tick_entry = next_entry_mut(&mut last_entry_hash, remaining_hashes, vec![]);
entries.push(tick_entry);
// Fill up the rest of slot 1 with ticks // Fill up the rest of slot 1 with ticks
entries.extend(create_ticks(genesis_block.ticks_per_slot, last_entry_hash)); entries.extend(create_ticks(
genesis_block.ticks_per_slot - 1,
genesis_block.poh_config.hashes_per_tick.unwrap(),
last_entry_hash,
));
let last_blockhash = entries.last().unwrap().hash; let last_blockhash = entries.last().unwrap().hash;
let blocktree = let blocktree =
Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger"); Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger");
blocktree blocktree
@ -1004,7 +1178,11 @@ pub mod tests {
let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]); let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]);
let mut entries = vec![entry_1, entry_2]; let mut entries = vec![entry_1, entry_2];
entries.extend(create_ticks(genesis_block.ticks_per_slot, last_entry_hash)); entries.extend(create_ticks(
genesis_block.ticks_per_slot,
0,
last_entry_hash,
));
blocktree blocktree
.write_entries( .write_entries(
1, 1,
@ -1683,7 +1861,8 @@ pub mod tests {
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1)); let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1));
bank1.squash(); bank1.squash();
let slot1_entries = blocktree.get_slot_entries(1, 0, None).unwrap(); let slot1_entries = blocktree.get_slot_entries(1, 0, None).unwrap();
verify_and_process_entries(&bank1, &slot1_entries, bank0.last_blockhash(), &opts).unwrap(); verify_and_process_slot_entries(&bank1, &slot1_entries, bank0.last_blockhash(), &opts)
.unwrap();
// Test process_blocktree_from_root() from slot 1 onwards // Test process_blocktree_from_root() from slot 1 onwards
let (bank_forks, bank_forks_info, _) = let (bank_forks, bank_forks_info, _) =

View File

@ -61,32 +61,18 @@ pub struct Entry {
impl Entry { impl Entry {
/// Creates the next Entry `num_hashes` after `start_hash`. /// Creates the next Entry `num_hashes` after `start_hash`.
pub fn new(prev_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Self { pub fn new(prev_hash: &Hash, mut num_hashes: u64, transactions: Vec<Transaction>) -> Self {
if num_hashes == 0 && transactions.is_empty() { // If you passed in transactions, but passed in num_hashes == 0, then
Entry { // next_hash will generate the next hash and set num_hashes == 1
num_hashes: 0, if num_hashes == 0 && !transactions.is_empty() {
hash: *prev_hash, num_hashes = 1;
transactions, }
}
} else if num_hashes == 0 { let hash = next_hash(prev_hash, num_hashes, &transactions);
// If you passed in transactions, but passed in num_hashes == 0, then Entry {
// next_hash will generate the next hash and set num_hashes == 1 num_hashes,
let hash = next_hash(prev_hash, 1, &transactions); hash,
Entry { transactions,
num_hashes: 1,
hash,
transactions,
}
} else {
// Otherwise, the next Entry `num_hashes` after `start_hash`.
// If you wanted a tick for instance, then pass in num_hashes = 1
// and transactions = empty
let hash = next_hash(prev_hash, num_hashes, &transactions);
Entry {
num_hashes,
hash,
transactions,
}
} }
} }
@ -219,6 +205,12 @@ pub trait EntrySlice {
fn verify_cpu(&self, start_hash: &Hash) -> EntryVerifyState; fn verify_cpu(&self, start_hash: &Hash) -> EntryVerifyState;
fn start_verify(&self, start_hash: &Hash) -> EntryVerifyState; fn start_verify(&self, start_hash: &Hash) -> EntryVerifyState;
fn verify(&self, start_hash: &Hash) -> bool; fn verify(&self, start_hash: &Hash) -> bool;
/// Checks that each entry tick has the correct number of hashes. Entry slices do not
/// necessarily end in a tick, so `tick_hash_count` is used to carry over the hash count
/// for the next entry slice.
fn verify_tick_hash_count(&self, tick_hash_count: &mut u64, hashes_per_tick: u64) -> bool;
/// Counts tick entries
fn tick_count(&self) -> u64;
} }
impl EntrySlice for [Entry] { impl EntrySlice for [Entry] {
@ -338,6 +330,34 @@ impl EntrySlice for [Entry] {
hashes: Some(hashes), hashes: Some(hashes),
} }
} }
fn verify_tick_hash_count(&self, tick_hash_count: &mut u64, hashes_per_tick: u64) -> bool {
// When hashes_per_tick is 0, hashing is disabled.
if hashes_per_tick == 0 {
return true;
}
for entry in self {
*tick_hash_count += entry.num_hashes;
if entry.is_tick() {
if *tick_hash_count != hashes_per_tick {
warn!(
"invalid tick hash count!: entry: {:#?}, tick_hash_count: {}, hashes_per_tick: {}",
entry,
tick_hash_count,
hashes_per_tick
);
return false;
}
*tick_hash_count = 0;
}
}
*tick_hash_count < hashes_per_tick
}
fn tick_count(&self) -> u64 {
self.iter().filter(|e| e.is_tick()).count() as u64
}
} }
pub fn next_entry_mut(start: &mut Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry { pub fn next_entry_mut(start: &mut Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
@ -346,10 +366,10 @@ pub fn next_entry_mut(start: &mut Hash, num_hashes: u64, transactions: Vec<Trans
entry entry
} }
pub fn create_ticks(num_ticks: u64, mut hash: Hash) -> Vec<Entry> { pub fn create_ticks(num_ticks: u64, hashes_per_tick: u64, mut hash: Hash) -> Vec<Entry> {
let mut ticks = Vec::with_capacity(num_ticks as usize); let mut ticks = Vec::with_capacity(num_ticks as usize);
for _ in 0..num_ticks { for _ in 0..num_ticks {
let new_tick = next_entry_mut(&mut hash, 1, vec![]); let new_tick = next_entry_mut(&mut hash, hashes_per_tick, vec![]);
ticks.push(new_tick); ticks.push(new_tick);
} }
@ -373,9 +393,11 @@ mod tests {
use chrono::prelude::Utc; use chrono::prelude::Utc;
use solana_budget_api::budget_instruction; use solana_budget_api::budget_instruction;
use solana_sdk::{ use solana_sdk::{
hash::hash, hash::{hash, Hash},
message::Message,
signature::{Keypair, KeypairUtil}, signature::{Keypair, KeypairUtil},
system_transaction, system_transaction,
transaction::Transaction,
}; };
fn create_sample_payment(keypair: &Keypair, hash: Hash) -> Transaction { fn create_sample_payment(keypair: &Keypair, hash: Hash) -> Transaction {
@ -528,4 +550,58 @@ mod tests {
bad_ticks[1].hash = one; bad_ticks[1].hash = one;
assert!(!bad_ticks.verify(&one)); // inductive step, bad assert!(!bad_ticks.verify(&one)); // inductive step, bad
} }
#[test]
fn test_verify_tick_hash_count() {
let hashes_per_tick = 10;
let keypairs: Vec<&Keypair> = Vec::new();
let tx: Transaction =
Transaction::new(&keypairs, Message::new(Vec::new()), Hash::default());
let tx_entry = Entry::new(&Hash::default(), 1, vec![tx]);
let full_tick_entry = Entry::new_tick(hashes_per_tick, &Hash::default());
let partial_tick_entry = Entry::new_tick(hashes_per_tick - 1, &Hash::default());
let no_hash_tick_entry = Entry::new_tick(0, &Hash::default());
let single_hash_tick_entry = Entry::new_tick(1, &Hash::default());
let no_ticks = vec![];
let mut tick_hash_count = 0;
assert!(no_ticks.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick));
assert_eq!(tick_hash_count, 0);
// validation is disabled when hashes_per_tick == 0
let no_hash_tick = vec![no_hash_tick_entry.clone()];
assert!(no_hash_tick.verify_tick_hash_count(&mut tick_hash_count, 0));
assert_eq!(tick_hash_count, 0);
// validation is disabled when hashes_per_tick == 0
let tx_and_no_hash_tick = vec![tx_entry.clone(), no_hash_tick_entry];
assert!(tx_and_no_hash_tick.verify_tick_hash_count(&mut tick_hash_count, 0));
assert_eq!(tick_hash_count, 0);
let single_tick = vec![full_tick_entry.clone()];
assert!(single_tick.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick));
assert_eq!(tick_hash_count, 0);
assert!(!single_tick.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick - 1));
assert_eq!(tick_hash_count, hashes_per_tick);
tick_hash_count = 0;
let ticks_and_txs = vec![tx_entry.clone(), partial_tick_entry.clone()];
assert!(ticks_and_txs.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick));
assert_eq!(tick_hash_count, 0);
let partial_tick = vec![partial_tick_entry.clone()];
assert!(!partial_tick.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick));
assert_eq!(tick_hash_count, hashes_per_tick - 1);
tick_hash_count = 0;
let tx_entries: Vec<Entry> = (0..hashes_per_tick - 1).map(|_| tx_entry.clone()).collect();
let tx_entries_and_tick = [tx_entries, vec![single_hash_tick_entry]].concat();
assert!(tx_entries_and_tick.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick));
assert_eq!(tick_hash_count, 0);
let too_many_tx_entries: Vec<Entry> =
(0..hashes_per_tick).map(|_| tx_entry.clone()).collect();
assert!(!too_many_tx_entries.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick));
assert_eq!(tick_hash_count, hashes_per_tick);
}
} }

View File

@ -1,4 +1,5 @@
pub mod bank_forks; pub mod bank_forks;
pub mod block_error;
#[macro_use] #[macro_use]
pub mod blocktree; pub mod blocktree;
mod blocktree_db; mod blocktree_db;

View File

@ -715,7 +715,7 @@ impl Shredder {
} }
pub fn max_ticks_per_n_shreds(num_shreds: u64) -> u64 { pub fn max_ticks_per_n_shreds(num_shreds: u64) -> u64 {
let ticks = create_ticks(1, Hash::default()); let ticks = create_ticks(1, 0, Hash::default());
max_entries_per_n_shred(&ticks[0], num_shreds) max_entries_per_n_shred(&ticks[0], num_shreds)
} }

View File

@ -19,7 +19,7 @@ fn test_multiple_threads_insert_shred() {
// with parent = slot 0 // with parent = slot 0
let threads: Vec<_> = (0..num_threads) let threads: Vec<_> = (0..num_threads)
.map(|i| { .map(|i| {
let entries = entry::create_ticks(1, Hash::default()); let entries = entry::create_ticks(1, 0, Hash::default());
let shreds = blocktree::entries_to_test_shreds(entries, i + 1, 0, false); let shreds = blocktree::entries_to_test_shreds(entries, i + 1, 0, false);
let blocktree_ = blocktree.clone(); let blocktree_ = blocktree.clone();
Builder::new() Builder::new()

View File

@ -197,6 +197,9 @@ pub struct Bank {
// Bank max_tick_height // Bank max_tick_height
max_tick_height: u64, max_tick_height: u64,
/// The number of hashes in each tick. None value means hashing is disabled.
hashes_per_tick: Option<u64>,
/// The number of ticks in each slot. /// The number of ticks in each slot.
ticks_per_slot: u64, ticks_per_slot: u64,
@ -319,6 +322,7 @@ impl Bank {
blockhash_queue: RwLock::new(parent.blockhash_queue.read().unwrap().clone()), blockhash_queue: RwLock::new(parent.blockhash_queue.read().unwrap().clone()),
// TODO: clean this up, soo much special-case copying... // TODO: clean this up, soo much special-case copying...
hashes_per_tick: parent.hashes_per_tick,
ticks_per_slot: parent.ticks_per_slot, ticks_per_slot: parent.ticks_per_slot,
slots_per_segment: parent.slots_per_segment, slots_per_segment: parent.slots_per_segment,
slots_per_year: parent.slots_per_year, slots_per_year: parent.slots_per_year,
@ -662,6 +666,7 @@ impl Bank {
.unwrap() .unwrap()
.genesis_hash(&genesis_block.hash(), &self.fee_calculator); .genesis_hash(&genesis_block.hash(), &self.fee_calculator);
self.hashes_per_tick = genesis_block.poh_config.hashes_per_tick;
self.ticks_per_slot = genesis_block.ticks_per_slot; self.ticks_per_slot = genesis_block.ticks_per_slot;
self.slots_per_segment = genesis_block.slots_per_segment; self.slots_per_segment = genesis_block.slots_per_segment;
self.max_tick_height = (self.slot + 1) * self.ticks_per_slot; self.max_tick_height = (self.slot + 1) * self.ticks_per_slot;
@ -1421,6 +1426,11 @@ impl Bank {
) )
} }
/// Return the number of hashes per tick
pub fn hashes_per_tick(&self) -> &Option<u64> {
&self.hashes_per_tick
}
/// Return the number of ticks per slot /// Return the number of ticks per slot
pub fn ticks_per_slot(&self) -> u64 { pub fn ticks_per_slot(&self) -> u64 {
self.ticks_per_slot self.ticks_per_slot
@ -1564,6 +1574,7 @@ impl Bank {
assert_eq!(self.slot, dbank.slot); assert_eq!(self.slot, dbank.slot);
assert_eq!(self.collector_id, dbank.collector_id); assert_eq!(self.collector_id, dbank.collector_id);
assert_eq!(self.epoch_schedule, dbank.epoch_schedule); assert_eq!(self.epoch_schedule, dbank.epoch_schedule);
assert_eq!(self.hashes_per_tick, dbank.hashes_per_tick);
assert_eq!(self.ticks_per_slot, dbank.ticks_per_slot); assert_eq!(self.ticks_per_slot, dbank.ticks_per_slot);
assert_eq!(self.parent_hash, dbank.parent_hash); assert_eq!(self.parent_hash, dbank.parent_hash);
assert_eq!( assert_eq!(