2020-01-13 13:13:52 -08:00
//! The `blockstore` module provides functions for parallel verification of the
2018-11-15 15:53:31 -08:00
//! Proof of History ledger as well as iterative read, append write, and random
//! access read to a persistent file-based ledger.
2019-11-14 15:34:39 -08:00
use crate ::{
2021-03-31 20:04:00 -07:00
ancestor_iterator ::AncestorIterator ,
2020-01-13 13:13:52 -08:00
blockstore_db ::{
2020-07-06 12:43:45 -07:00
columns as cf , AccessType , BlockstoreRecoveryMode , Column , Database , IteratorDirection ,
IteratorMode , LedgerColumn , Result , WriteBatch ,
2019-11-14 15:34:39 -08:00
} ,
2020-01-13 13:13:52 -08:00
blockstore_meta ::* ,
2019-11-14 15:34:39 -08:00
entry ::{ create_ticks , Entry } ,
erasure ::ErasureConfig ,
leader_schedule_cache ::LeaderScheduleCache ,
2020-03-05 10:58:00 -08:00
next_slots_iterator ::NextSlotsIterator ,
2021-05-03 06:20:47 -07:00
shred ::{ Result as ShredResult , Shred , Shredder , MAX_DATA_SHREDS_PER_FEC_BLOCK } ,
2019-11-14 15:34:39 -08:00
} ;
2021-03-31 20:04:00 -07:00
pub use crate ::{ blockstore_db ::BlockstoreError , blockstore_meta ::SlotMeta } ;
2019-04-02 14:58:07 -07:00
use bincode ::deserialize ;
2019-10-18 09:28:51 -07:00
use log ::* ;
2019-11-14 15:34:39 -08:00
use rayon ::{
iter ::{ IntoParallelRefIterator , ParallelIterator } ,
ThreadPool ,
} ;
2020-04-24 15:04:23 -07:00
use rocksdb ::DBRawIterator ;
2019-10-26 16:15:59 -07:00
use solana_measure ::measure ::Measure ;
2019-10-04 16:25:22 -07:00
use solana_metrics ::{ datapoint_debug , datapoint_error } ;
2019-10-21 16:15:10 -07:00
use solana_rayon_threadlimit ::get_thread_count ;
2021-02-09 14:49:00 -08:00
use solana_runtime ::hardened_unpack ::{ unpack_genesis_archive , MAX_GENESIS_ARCHIVE_UNPACKED_SIZE } ;
2019-11-14 15:34:39 -08:00
use solana_sdk ::{
2019-11-26 16:21:02 -08:00
clock ::{ Slot , UnixTimestamp , DEFAULT_TICKS_PER_SECOND , MS_PER_TICK } ,
2021-05-24 07:45:36 -07:00
genesis_config ::{ GenesisConfig , DEFAULT_GENESIS_ARCHIVE , DEFAULT_GENESIS_FILE } ,
2019-11-14 15:34:39 -08:00
hash ::Hash ,
2019-12-14 11:23:02 -08:00
pubkey ::Pubkey ,
2021-02-13 22:32:43 -08:00
sanitize ::Sanitize ,
2020-02-20 13:28:55 -08:00
signature ::{ Keypair , Signature , Signer } ,
2020-04-24 15:04:23 -07:00
timing ::timestamp ,
2019-11-18 08:12:42 -08:00
transaction ::Transaction ,
2019-11-14 15:34:39 -08:00
} ;
2021-03-05 08:05:35 -08:00
use solana_storage_proto ::{ StoredExtendedRewards , StoredTransactionStatusMeta } ;
2020-03-26 13:29:30 -07:00
use solana_transaction_status ::{
2020-09-23 22:10:29 -07:00
ConfirmedBlock , ConfirmedTransaction , ConfirmedTransactionStatusWithSignature , Rewards ,
TransactionStatusMeta , TransactionWithStatusMeta ,
2020-03-26 13:29:30 -07:00
} ;
2019-11-14 15:34:39 -08:00
use std ::{
2021-03-22 16:18:22 -07:00
borrow ::Cow ,
2019-11-14 15:34:39 -08:00
cell ::RefCell ,
cmp ,
2021-06-30 09:20:07 -07:00
collections ::{ BTreeMap , HashMap , HashSet } ,
2021-03-05 08:05:35 -08:00
convert ::TryInto ,
2019-11-14 15:34:39 -08:00
fs ,
2021-02-18 23:42:09 -08:00
io ::{ Error as IoError , ErrorKind } ,
2019-11-14 15:34:39 -08:00
path ::{ Path , PathBuf } ,
rc ::Rc ,
sync ::{
2021-05-24 12:24:47 -07:00
atomic ::{ AtomicBool , Ordering } ,
2019-11-14 15:34:39 -08:00
mpsc ::{ sync_channel , Receiver , SyncSender , TrySendError } ,
2021-05-28 00:42:56 -07:00
Arc , Mutex , RwLock , RwLockWriteGuard ,
2019-11-14 15:34:39 -08:00
} ,
2021-06-30 09:20:07 -07:00
time ::Instant ,
2019-11-14 15:34:39 -08:00
} ;
2020-07-03 17:44:32 -07:00
use thiserror ::Error ;
2020-06-23 12:05:00 -07:00
use trees ::{ Tree , TreeWalk } ;
2018-11-15 15:53:31 -08:00
2020-06-02 18:49:31 -07:00
pub mod blockstore_purge ;
2020-01-13 13:13:52 -08:00
pub const BLOCKSTORE_DIRECTORY : & str = " rocksdb " ;
2018-12-20 11:16:07 -08:00
2019-10-21 16:15:10 -07:00
thread_local! ( static PAR_THREAD_POOL : RefCell < ThreadPool > = RefCell ::new ( rayon ::ThreadPoolBuilder ::new ( )
. num_threads ( get_thread_count ( ) )
2020-01-20 20:08:19 -08:00
. thread_name ( | ix | format! ( " blockstore_ {} " , ix ) )
2019-10-21 16:15:10 -07:00
. build ( )
. unwrap ( ) ) ) ;
2020-06-02 18:49:31 -07:00
thread_local! ( static PAR_THREAD_POOL_ALL_CPUS : RefCell < ThreadPool > = RefCell ::new ( rayon ::ThreadPoolBuilder ::new ( )
. num_threads ( num_cpus ::get ( ) )
. thread_name ( | ix | format! ( " blockstore_ {} " , ix ) )
. build ( )
. unwrap ( ) ) ) ;
2019-05-09 14:10:04 -07:00
pub const MAX_COMPLETED_SLOTS_IN_CHANNEL : usize = 100_000 ;
2019-11-26 16:21:02 -08:00
pub const MAX_TURBINE_PROPAGATION_IN_MS : u64 = 100 ;
pub const MAX_TURBINE_DELAY_IN_TICKS : u64 = MAX_TURBINE_PROPAGATION_IN_MS / MS_PER_TICK ;
2019-05-09 14:10:04 -07:00
2019-12-30 07:42:09 -08:00
// An upper bound on maximum number of data shreds we can handle in a slot
// 32K shreds would allow ~320K peak TPS
// (32K shreds per slot * 4 TX per shred * 2.5 slots per sec)
pub const MAX_DATA_SHREDS_PER_SLOT : usize = 32_768 ;
2021-06-02 17:20:00 -07:00
pub type CompletedSlotsSender = SyncSender < Vec < Slot > > ;
pub type CompletedSlotsReceiver = Receiver < Vec < Slot > > ;
2020-06-02 18:49:31 -07:00
type CompletedRanges = Vec < ( u32 , u32 ) > ;
#[ derive(Clone, Copy) ]
pub enum PurgeType {
Exact ,
PrimaryIndex ,
2021-05-28 00:42:56 -07:00
CompactionFilter ,
2020-06-02 18:49:31 -07:00
}
2019-05-09 14:10:04 -07:00
2020-07-03 17:44:32 -07:00
#[ derive(Error, Debug) ]
pub enum InsertDataShredError {
Exists ,
InvalidShred ,
BlockstoreError ( #[ from ] BlockstoreError ) ,
}
impl std ::fmt ::Display for InsertDataShredError {
fn fmt ( & self , f : & mut std ::fmt ::Formatter < '_ > ) -> std ::fmt ::Result {
write! ( f , " insert data shred error " )
}
}
2020-09-01 22:06:06 -07:00
#[ derive(Clone, Copy, Debug, PartialEq, Eq) ]
pub struct CompletedDataSetInfo {
pub slot : Slot ,
pub start_index : u32 ,
pub end_index : u32 ,
}
pub struct BlockstoreSignals {
pub blockstore : Blockstore ,
pub ledger_signal_receiver : Receiver < bool > ,
2021-06-02 17:20:00 -07:00
pub completed_slots_receiver : CompletedSlotsReceiver ,
2020-09-01 22:06:06 -07:00
}
2018-11-15 15:53:31 -08:00
// ledger window
2020-01-13 13:13:52 -08:00
pub struct Blockstore {
2021-02-26 13:44:38 -08:00
ledger_path : PathBuf ,
2019-05-03 14:46:02 -07:00
db : Arc < Database > ,
2019-04-02 14:58:07 -07:00
meta_cf : LedgerColumn < cf ::SlotMeta > ,
2019-06-20 15:50:41 -07:00
dead_slots_cf : LedgerColumn < cf ::DeadSlots > ,
2020-01-13 17:21:39 -08:00
duplicate_slots_cf : LedgerColumn < cf ::DuplicateSlots > ,
2019-04-11 14:14:57 -07:00
erasure_meta_cf : LedgerColumn < cf ::ErasureMeta > ,
2019-04-06 19:41:22 -07:00
orphans_cf : LedgerColumn < cf ::Orphans > ,
2019-07-10 11:08:17 -07:00
index_cf : LedgerColumn < cf ::Index > ,
2020-04-24 15:04:23 -07:00
data_shred_cf : LedgerColumn < cf ::ShredData > ,
code_shred_cf : LedgerColumn < cf ::ShredCode > ,
2019-11-17 08:26:01 -08:00
transaction_status_cf : LedgerColumn < cf ::TransactionStatus > ,
2020-04-08 12:50:39 -07:00
address_signatures_cf : LedgerColumn < cf ::AddressSignatures > ,
2020-04-04 20:24:06 -07:00
transaction_status_index_cf : LedgerColumn < cf ::TransactionStatusIndex > ,
active_transaction_status_index : RwLock < u64 > ,
2020-02-04 18:50:24 -08:00
rewards_cf : LedgerColumn < cf ::Rewards > ,
2020-09-09 08:33:14 -07:00
blocktime_cf : LedgerColumn < cf ::Blocktime > ,
2020-09-22 12:26:32 -07:00
perf_samples_cf : LedgerColumn < cf ::PerfSamples > ,
2021-05-26 21:16:16 -07:00
block_height_cf : LedgerColumn < cf ::BlockHeight > ,
2021-07-01 09:32:41 -07:00
program_costs_cf : LedgerColumn < cf ::ProgramCosts > ,
2019-12-05 11:25:13 -08:00
last_root : Arc < RwLock < Slot > > ,
2019-10-26 04:09:58 -07:00
insert_shreds_lock : Arc < Mutex < ( ) > > ,
2019-09-03 21:32:51 -07:00
pub new_shreds_signals : Vec < SyncSender < bool > > ,
2021-06-02 17:20:00 -07:00
pub completed_slots_senders : Vec < CompletedSlotsSender > ,
2021-05-28 00:42:56 -07:00
pub lowest_cleanup_slot : Arc < RwLock < Slot > > ,
2020-03-23 08:42:32 -07:00
no_compaction : bool ,
2021-06-30 09:20:07 -07:00
slots_stats : Arc < Mutex < SlotsStats > > ,
}
struct SlotsStats {
last_cleanup_ts : Instant ,
stats : BTreeMap < Slot , SlotStats > ,
}
impl Default for SlotsStats {
fn default ( ) -> Self {
SlotsStats {
last_cleanup_ts : Instant ::now ( ) ,
stats : BTreeMap ::new ( ) ,
}
}
}
#[ derive(Default) ]
struct SlotStats {
num_repaired : usize ,
num_recovered : usize ,
2018-11-15 15:53:31 -08:00
}
2019-10-30 16:48:59 -07:00
pub struct IndexMetaWorkingSetEntry {
index : Index ,
2019-10-31 14:03:41 -07:00
// true only if at least one shred for this Index was inserted since the time this
// struct was created
did_insert_occur : bool ,
}
pub struct SlotMetaWorkingSetEntry {
new_slot_meta : Rc < RefCell < SlotMeta > > ,
old_slot_meta : Option < SlotMeta > ,
// True only if at least one shred for this SlotMeta was inserted since the time this
// struct was created.
2019-10-30 16:48:59 -07:00
did_insert_occur : bool ,
}
2021-06-30 09:20:07 -07:00
#[ derive(PartialEq, Debug, Clone) ]
enum ShredSource {
Turbine ,
Repaired ,
Recovered ,
}
2020-03-26 12:51:41 -07:00
#[ derive(Default) ]
2020-01-13 13:13:52 -08:00
pub struct BlockstoreInsertionMetrics {
2019-10-26 16:15:59 -07:00
pub num_shreds : usize ,
pub insert_lock_elapsed : u64 ,
pub insert_shreds_elapsed : u64 ,
pub shred_recovery_elapsed : u64 ,
pub chaining_elapsed : u64 ,
pub commit_working_sets_elapsed : u64 ,
pub write_batch_elapsed : u64 ,
pub total_elapsed : u64 ,
pub num_inserted : u64 ,
2020-09-29 14:13:21 -07:00
pub num_repair : u64 ,
2019-10-26 16:15:59 -07:00
pub num_recovered : usize ,
2020-07-03 17:44:32 -07:00
pub num_recovered_inserted : usize ,
pub num_recovered_failed_sig : usize ,
pub num_recovered_failed_invalid : usize ,
pub num_recovered_exists : usize ,
2019-10-30 16:48:59 -07:00
pub index_meta_time : u64 ,
2019-10-26 16:15:59 -07:00
}
2019-10-31 14:03:41 -07:00
impl SlotMetaWorkingSetEntry {
fn new ( new_slot_meta : Rc < RefCell < SlotMeta > > , old_slot_meta : Option < SlotMeta > ) -> Self {
Self {
new_slot_meta ,
old_slot_meta ,
did_insert_occur : false ,
}
}
}
2020-01-13 13:13:52 -08:00
impl BlockstoreInsertionMetrics {
2019-10-26 16:15:59 -07:00
pub fn report_metrics ( & self , metric_name : & 'static str ) {
2020-03-26 12:51:41 -07:00
datapoint_info! (
2019-10-26 16:15:59 -07:00
metric_name ,
( " num_shreds " , self . num_shreds as i64 , i64 ) ,
( " total_elapsed " , self . total_elapsed as i64 , i64 ) ,
( " insert_lock_elapsed " , self . insert_lock_elapsed as i64 , i64 ) ,
(
" insert_shreds_elapsed " ,
self . insert_shreds_elapsed as i64 ,
i64
) ,
(
" shred_recovery_elapsed " ,
self . shred_recovery_elapsed as i64 ,
i64
) ,
( " chaining_elapsed " , self . chaining_elapsed as i64 , i64 ) ,
(
" commit_working_sets_elapsed " ,
self . commit_working_sets_elapsed as i64 ,
i64
) ,
( " write_batch_elapsed " , self . write_batch_elapsed as i64 , i64 ) ,
( " num_inserted " , self . num_inserted as i64 , i64 ) ,
2020-09-29 14:13:21 -07:00
( " num_repair " , self . num_repair as i64 , i64 ) ,
2019-10-26 16:15:59 -07:00
( " num_recovered " , self . num_recovered as i64 , i64 ) ,
2020-07-03 17:44:32 -07:00
(
" num_recovered_inserted " ,
self . num_recovered_inserted as i64 ,
i64
) ,
(
" num_recovered_failed_sig " ,
self . num_recovered_failed_sig as i64 ,
i64
) ,
(
" num_recovered_failed_invalid " ,
self . num_recovered_failed_invalid as i64 ,
i64
) ,
(
" num_recovered_exists " ,
self . num_recovered_exists as i64 ,
i64
) ,
2019-10-26 16:15:59 -07:00
) ;
}
}
2020-01-13 13:13:52 -08:00
impl Blockstore {
2019-12-12 15:54:50 -08:00
pub fn db ( self ) -> Arc < Database > {
self . db
}
2021-02-26 13:44:38 -08:00
pub fn ledger_path ( & self ) -> & Path {
& self . ledger_path
}
2019-09-03 21:32:51 -07:00
/// Opens a Ledger in directory, provides "infinite" window of shreds
2020-01-13 13:13:52 -08:00
pub fn open ( ledger_path : & Path ) -> Result < Blockstore > {
2020-12-16 17:56:38 -08:00
Self ::do_open ( ledger_path , AccessType ::PrimaryOnly , None , true )
2020-06-02 21:32:44 -07:00
}
pub fn open_with_access_type (
ledger_path : & Path ,
access_type : AccessType ,
2020-07-06 12:43:45 -07:00
recovery_mode : Option < BlockstoreRecoveryMode > ,
2020-12-16 17:56:38 -08:00
enforce_ulimit_nofile : bool ,
2020-06-02 21:32:44 -07:00
) -> Result < Blockstore > {
2020-12-16 17:56:38 -08:00
Self ::do_open (
ledger_path ,
access_type ,
recovery_mode ,
enforce_ulimit_nofile ,
)
2020-06-02 21:32:44 -07:00
}
2020-07-06 12:43:45 -07:00
fn do_open (
ledger_path : & Path ,
access_type : AccessType ,
recovery_mode : Option < BlockstoreRecoveryMode > ,
2020-12-16 17:56:38 -08:00
enforce_ulimit_nofile : bool ,
2020-07-06 12:43:45 -07:00
) -> Result < Blockstore > {
2019-04-02 14:58:07 -07:00
fs ::create_dir_all ( & ledger_path ) ? ;
2020-01-13 13:13:52 -08:00
let blockstore_path = ledger_path . join ( BLOCKSTORE_DIRECTORY ) ;
2019-04-02 14:58:07 -07:00
2020-12-16 17:56:38 -08:00
adjust_ulimit_nofile ( enforce_ulimit_nofile ) ? ;
2019-11-05 11:18:49 -08:00
2019-04-02 14:58:07 -07:00
// Open the database
2019-11-25 21:48:49 -08:00
let mut measure = Measure ::start ( " open " ) ;
2020-04-03 12:51:44 -07:00
info! ( " Opening database at {:?} " , blockstore_path ) ;
2020-07-06 12:43:45 -07:00
let db = Database ::open ( & blockstore_path , access_type , recovery_mode ) ? ;
2019-04-02 14:58:07 -07:00
// Create the metadata column family
2019-04-26 08:52:10 -07:00
let meta_cf = db . column ( ) ;
2019-04-02 14:58:07 -07:00
2019-06-20 15:50:41 -07:00
// Create the dead slots column family
let dead_slots_cf = db . column ( ) ;
2020-01-13 17:21:39 -08:00
let duplicate_slots_cf = db . column ( ) ;
2019-04-26 08:52:10 -07:00
let erasure_meta_cf = db . column ( ) ;
2019-04-02 14:58:07 -07:00
2019-04-06 19:41:22 -07:00
// Create the orphans column family. An "orphan" is defined as
// the head of a detached chain of slots, i.e. a slot with no
// known parent
2019-04-26 08:52:10 -07:00
let orphans_cf = db . column ( ) ;
2019-07-10 11:08:17 -07:00
let index_cf = db . column ( ) ;
2019-04-02 14:58:07 -07:00
2020-04-24 15:04:23 -07:00
let data_shred_cf = db . column ( ) ;
let code_shred_cf = db . column ( ) ;
2019-11-17 08:26:01 -08:00
let transaction_status_cf = db . column ( ) ;
2020-04-08 12:50:39 -07:00
let address_signatures_cf = db . column ( ) ;
2020-04-04 20:24:06 -07:00
let transaction_status_index_cf = db . column ( ) ;
2020-02-04 18:50:24 -08:00
let rewards_cf = db . column ( ) ;
2020-09-09 08:33:14 -07:00
let blocktime_cf = db . column ( ) ;
2020-09-22 12:26:32 -07:00
let perf_samples_cf = db . column ( ) ;
2021-05-26 21:16:16 -07:00
let block_height_cf = db . column ( ) ;
2021-07-01 09:32:41 -07:00
let program_costs_cf = db . column ( ) ;
2019-08-12 10:03:57 -07:00
2019-05-03 14:46:02 -07:00
let db = Arc ::new ( db ) ;
2019-04-26 08:52:10 -07:00
2019-08-27 15:09:41 -07:00
// Get max root or 0 if it doesn't exist
let max_root = db
. iter ::< cf ::Root > ( IteratorMode ::End ) ?
. next ( )
. map ( | ( slot , _ ) | slot )
. unwrap_or ( 0 ) ;
let last_root = Arc ::new ( RwLock ::new ( max_root ) ) ;
2020-04-04 20:24:06 -07:00
// Get active transaction-status index or 0
let active_transaction_status_index = db
. iter ::< cf ::TransactionStatusIndex > ( IteratorMode ::Start ) ?
2020-04-08 12:50:39 -07:00
. next ( ) ;
let initialize_transaction_status_index = active_transaction_status_index . is_none ( ) ;
let active_transaction_status_index = active_transaction_status_index
2020-04-04 20:24:06 -07:00
. and_then ( | ( _ , data ) | {
let index0 : TransactionStatusIndexMeta = deserialize ( & data ) . unwrap ( ) ;
if index0 . frozen {
Some ( 1 )
} else {
None
}
} )
. unwrap_or ( 0 ) ;
2019-11-25 21:20:30 -08:00
measure . stop ( ) ;
2020-01-13 13:13:52 -08:00
info! ( " {:?} {} " , blockstore_path , measure ) ;
2020-01-28 13:45:41 -08:00
let blockstore = Blockstore {
2021-02-26 13:44:38 -08:00
ledger_path : ledger_path . to_path_buf ( ) ,
2019-04-02 14:58:07 -07:00
db ,
meta_cf ,
2019-06-20 15:50:41 -07:00
dead_slots_cf ,
2020-01-13 17:21:39 -08:00
duplicate_slots_cf ,
2019-04-11 14:14:57 -07:00
erasure_meta_cf ,
2019-04-06 19:41:22 -07:00
orphans_cf ,
2019-07-10 11:08:17 -07:00
index_cf ,
2020-04-24 15:04:23 -07:00
data_shred_cf ,
code_shred_cf ,
2019-11-17 08:26:01 -08:00
transaction_status_cf ,
2020-04-08 12:50:39 -07:00
address_signatures_cf ,
2020-04-04 20:24:06 -07:00
transaction_status_index_cf ,
active_transaction_status_index : RwLock ::new ( active_transaction_status_index ) ,
2020-02-04 18:50:24 -08:00
rewards_cf ,
2020-09-09 08:33:14 -07:00
blocktime_cf ,
2020-09-22 12:26:32 -07:00
perf_samples_cf ,
2021-05-26 21:16:16 -07:00
block_height_cf ,
2021-07-01 09:32:41 -07:00
program_costs_cf ,
2019-09-03 21:32:51 -07:00
new_shreds_signals : vec ! [ ] ,
2019-05-09 14:10:04 -07:00
completed_slots_senders : vec ! [ ] ,
2019-10-26 04:09:58 -07:00
insert_shreds_lock : Arc ::new ( Mutex ::new ( ( ) ) ) ,
2019-08-27 15:09:41 -07:00
last_root ,
2020-01-28 13:45:41 -08:00
lowest_cleanup_slot : Arc ::new ( RwLock ::new ( 0 ) ) ,
2020-03-23 08:42:32 -07:00
no_compaction : false ,
2021-06-30 09:20:07 -07:00
slots_stats : Arc ::new ( Mutex ::new ( SlotsStats ::default ( ) ) ) ,
2020-01-28 13:45:41 -08:00
} ;
2020-04-08 12:50:39 -07:00
if initialize_transaction_status_index {
blockstore . initialize_transaction_status_index ( ) ? ;
}
2020-01-28 13:45:41 -08:00
Ok ( blockstore )
2019-04-02 14:58:07 -07:00
}
2019-05-09 14:10:04 -07:00
pub fn open_with_signal (
2019-07-30 15:53:41 -07:00
ledger_path : & Path ,
2020-07-06 12:43:45 -07:00
recovery_mode : Option < BlockstoreRecoveryMode > ,
2020-12-16 17:56:38 -08:00
enforce_ulimit_nofile : bool ,
2020-09-01 22:06:06 -07:00
) -> Result < BlockstoreSignals > {
2020-12-16 17:56:38 -08:00
let mut blockstore = Self ::open_with_access_type (
ledger_path ,
AccessType ::PrimaryOnly ,
recovery_mode ,
enforce_ulimit_nofile ,
) ? ;
2020-09-01 22:06:06 -07:00
let ( ledger_signal_sender , ledger_signal_receiver ) = sync_channel ( 1 ) ;
2021-06-02 17:20:00 -07:00
let ( completed_slots_sender , completed_slots_receiver ) =
2021-03-12 05:44:06 -08:00
sync_channel ( MAX_COMPLETED_SLOTS_IN_CHANNEL ) ;
2020-09-01 22:06:06 -07:00
blockstore . new_shreds_signals = vec! [ ledger_signal_sender ] ;
2021-06-02 17:20:00 -07:00
blockstore . completed_slots_senders = vec! [ completed_slots_sender ] ;
2019-02-04 15:33:43 -08:00
2020-09-01 22:06:06 -07:00
Ok ( BlockstoreSignals {
blockstore ,
ledger_signal_receiver ,
2021-06-02 17:20:00 -07:00
completed_slots_receiver ,
2020-09-01 22:06:06 -07:00
} )
2019-02-04 15:33:43 -08:00
}
2020-08-19 22:04:38 -07:00
pub fn add_tree (
& self ,
forks : Tree < Slot > ,
is_orphan : bool ,
is_slot_complete : bool ,
num_ticks : u64 ,
starting_hash : Hash ,
) {
2020-06-23 12:05:00 -07:00
let mut walk = TreeWalk ::from ( forks ) ;
2020-08-19 22:04:38 -07:00
let mut blockhashes = HashMap ::new ( ) ;
2020-06-23 12:05:00 -07:00
while let Some ( visit ) = walk . get ( ) {
2021-06-17 15:45:09 -07:00
let slot = * visit . node ( ) . data ( ) ;
2020-07-06 22:49:40 -07:00
if self . meta ( slot ) . unwrap ( ) . is_some ( ) & & self . orphan ( slot ) . unwrap ( ) . is_none ( ) {
2020-08-19 22:04:38 -07:00
// If slot exists in blockstore and is not an orphan, then skip it
2020-06-23 12:05:00 -07:00
walk . forward ( ) ;
continue ;
}
2021-06-17 15:45:09 -07:00
let parent = walk . get_parent ( ) . map ( | n | * n . data ( ) ) ;
2020-06-23 12:05:00 -07:00
if parent . is_some ( ) | | ! is_orphan {
2020-08-19 22:04:38 -07:00
let parent_hash = parent
// parent won't exist for first node in a tree where
// `is_orphan == true`
. and_then ( | parent | blockhashes . get ( & parent ) )
. unwrap_or ( & starting_hash ) ;
let mut entries = create_ticks (
num_ticks * ( std ::cmp ::max ( 1 , slot - parent . unwrap_or ( slot ) ) ) ,
0 ,
* parent_hash ,
) ;
blockhashes . insert ( slot , entries . last ( ) . unwrap ( ) . hash ) ;
if ! is_slot_complete {
entries . pop ( ) . unwrap ( ) ;
}
2020-07-02 14:33:04 -07:00
let shreds = entries_to_test_shreds (
entries . clone ( ) ,
slot ,
parent . unwrap_or ( slot ) ,
is_slot_complete ,
0 ,
) ;
2020-06-23 12:05:00 -07:00
self . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
}
walk . forward ( ) ;
}
}
2020-03-23 08:42:32 -07:00
pub fn set_no_compaction ( & mut self , no_compaction : bool ) {
self . no_compaction = no_compaction ;
}
2019-07-30 15:53:41 -07:00
pub fn destroy ( ledger_path : & Path ) -> Result < ( ) > {
// Database::destroy() fails if the path doesn't exist
2019-04-02 14:58:07 -07:00
fs ::create_dir_all ( ledger_path ) ? ;
2020-01-13 13:13:52 -08:00
let blockstore_path = ledger_path . join ( BLOCKSTORE_DIRECTORY ) ;
Database ::destroy ( & blockstore_path )
2019-04-02 14:58:07 -07:00
}
2019-11-02 00:38:30 -07:00
pub fn meta ( & self , slot : Slot ) -> Result < Option < SlotMeta > > {
2019-05-03 14:46:02 -07:00
self . meta_cf . get ( slot )
2019-04-26 08:52:10 -07:00
}
2019-11-02 00:38:30 -07:00
pub fn is_full ( & self , slot : Slot ) -> bool {
2020-12-13 17:26:34 -08:00
if let Ok ( Some ( meta ) ) = self . meta_cf . get ( slot ) {
return meta . is_full ( ) ;
2019-05-29 15:01:20 -07:00
}
false
}
2019-11-02 00:38:30 -07:00
pub fn erasure_meta ( & self , slot : Slot , set_index : u64 ) -> Result < Option < ErasureMeta > > {
2019-05-03 14:46:02 -07:00
self . erasure_meta_cf . get ( ( slot , set_index ) )
2019-01-08 11:41:55 -08:00
}
2019-11-02 00:38:30 -07:00
pub fn orphan ( & self , slot : Slot ) -> Result < Option < bool > > {
2019-05-03 14:46:02 -07:00
self . orphans_cf . get ( slot )
2019-03-29 16:07:24 -07:00
}
2020-08-19 22:04:38 -07:00
// Get max root or 0 if it doesn't exist
pub fn max_root ( & self ) -> Slot {
self . db
. iter ::< cf ::Root > ( IteratorMode ::End )
. expect ( " Couldn't get rooted iterator for max_root() " )
. next ( )
. map ( | ( slot , _ ) | slot )
. unwrap_or ( 0 )
}
2020-12-13 17:26:34 -08:00
pub fn slot_meta_iterator (
& self ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
2020-12-13 17:26:34 -08:00
) -> Result < impl Iterator < Item = ( Slot , SlotMeta ) > + '_ > {
2019-08-27 15:09:41 -07:00
let meta_iter = self
. db
. iter ::< cf ::SlotMeta > ( IteratorMode ::From ( slot , IteratorDirection ::Forward ) ) ? ;
2019-05-15 18:28:23 -07:00
Ok ( meta_iter . map ( | ( slot , slot_meta_bytes ) | {
(
slot ,
2021-02-03 06:42:34 -08:00
deserialize ( & slot_meta_bytes ) . unwrap_or_else ( | e | {
panic! ( " Could not deserialize SlotMeta for slot {} : {:?} " , slot , e )
} ) ,
2019-05-15 18:28:23 -07:00
)
} ) )
2019-02-07 15:10:54 -08:00
}
2020-03-05 10:58:00 -08:00
#[ allow(dead_code) ]
2020-12-13 17:26:34 -08:00
pub fn live_slots_iterator ( & self , root : Slot ) -> impl Iterator < Item = ( Slot , SlotMeta ) > + '_ {
2020-03-05 10:58:00 -08:00
let root_forks = NextSlotsIterator ::new ( root , self ) ;
let orphans_iter = self . orphans_iterator ( root + 1 ) . unwrap ( ) ;
root_forks . chain ( orphans_iter . flat_map ( move | orphan | NextSlotsIterator ::new ( orphan , self ) ) )
}
2020-12-13 17:26:34 -08:00
pub fn slot_data_iterator (
& self ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
2020-03-19 23:35:01 -07:00
index : u64 ,
2020-12-13 17:26:34 -08:00
) -> Result < impl Iterator < Item = ( ( u64 , u64 ) , Box < [ u8 ] > ) > + '_ > {
2020-04-24 15:04:23 -07:00
let slot_iterator = self . db . iter ::< cf ::ShredData > ( IteratorMode ::From (
( slot , index ) ,
IteratorDirection ::Forward ,
) ) ? ;
Ok ( slot_iterator . take_while ( move | ( ( shred_slot , _ ) , _ ) | * shred_slot = = slot ) )
2019-05-13 22:04:54 -07:00
}
2020-12-13 17:26:34 -08:00
pub fn slot_coding_iterator (
& self ,
2019-12-09 00:13:36 -08:00
slot : Slot ,
2020-03-19 23:35:01 -07:00
index : u64 ,
2020-12-13 17:26:34 -08:00
) -> Result < impl Iterator < Item = ( ( u64 , u64 ) , Box < [ u8 ] > ) > + '_ > {
2020-04-24 15:04:23 -07:00
let slot_iterator = self . db . iter ::< cf ::ShredCode > ( IteratorMode ::From (
( slot , index ) ,
IteratorDirection ::Forward ,
) ) ? ;
Ok ( slot_iterator . take_while ( move | ( ( shred_slot , _ ) , _ ) | * shred_slot = = slot ) )
2019-12-09 00:13:36 -08:00
}
2020-12-13 17:26:34 -08:00
pub fn rooted_slot_iterator ( & self , slot : Slot ) -> Result < impl Iterator < Item = u64 > + '_ > {
2020-01-07 19:51:28 -08:00
let slot_iterator = self
. db
. iter ::< cf ::Root > ( IteratorMode ::From ( slot , IteratorDirection ::Forward ) ) ? ;
Ok ( slot_iterator . map ( move | ( rooted_slot , _ ) | rooted_slot ) )
}
2020-05-13 10:09:38 -07:00
fn get_recovery_data_shreds (
index : & mut Index ,
set_index : u64 ,
slot : Slot ,
erasure_meta : & ErasureMeta ,
available_shreds : & mut Vec < Shred > ,
prev_inserted_datas : & mut HashMap < ( u64 , u64 ) , Shred > ,
data_cf : & LedgerColumn < cf ::ShredData > ,
) {
( set_index .. set_index + erasure_meta . config . num_data ( ) as u64 ) . for_each ( | i | {
if index . data ( ) . is_present ( i ) {
if let Some ( shred ) = prev_inserted_datas . remove ( & ( slot , i ) ) . or_else ( | | {
let some_data = data_cf
. get_bytes ( ( slot , i ) )
. expect ( " Database failure, could not fetch data shred " ) ;
if let Some ( data ) = some_data {
Shred ::new_from_serialized_shred ( data ) . ok ( )
} else {
warn! ( " Data shred deleted while reading for recovery " ) ;
None
}
} ) {
available_shreds . push ( shred ) ;
}
}
} ) ;
}
fn get_recovery_coding_shreds (
index : & mut Index ,
slot : Slot ,
erasure_meta : & ErasureMeta ,
available_shreds : & mut Vec < Shred > ,
prev_inserted_codes : & mut HashMap < ( u64 , u64 ) , Shred > ,
code_cf : & LedgerColumn < cf ::ShredCode > ,
) {
2021-04-23 05:00:37 -07:00
( erasure_meta . set_index .. erasure_meta . set_index + erasure_meta . config . num_coding ( ) as u64 )
2020-05-13 10:09:38 -07:00
. for_each ( | i | {
if let Some ( shred ) = prev_inserted_codes
. remove ( & ( slot , i ) )
. map ( | s | {
// Remove from the index so it doesn't get committed. We know
// this is safe to do because everything in
// `prev_inserted_codes` does not yet exist in blockstore
// (guaranteed by `check_cache_coding_shred`)
index . coding_mut ( ) . set_present ( i , false ) ;
s
} )
. or_else ( | | {
if index . coding ( ) . is_present ( i ) {
let some_code = code_cf
. get_bytes ( ( slot , i ) )
. expect ( " Database failure, could not fetch code shred " ) ;
if let Some ( code ) = some_code {
Shred ::new_from_serialized_shred ( code ) . ok ( )
} else {
warn! ( " Code shred deleted while reading for recovery " ) ;
None
}
} else {
None
}
} )
{
available_shreds . push ( shred ) ;
}
} ) ;
}
fn recover_shreds (
index : & mut Index ,
set_index : u64 ,
erasure_meta : & ErasureMeta ,
prev_inserted_datas : & mut HashMap < ( u64 , u64 ) , Shred > ,
prev_inserted_codes : & mut HashMap < ( u64 , u64 ) , Shred > ,
recovered_data_shreds : & mut Vec < Shred > ,
data_cf : & LedgerColumn < cf ::ShredData > ,
code_cf : & LedgerColumn < cf ::ShredCode > ,
) {
// Find shreds for this erasure set and try recovery
let slot = index . slot ;
let mut available_shreds = vec! [ ] ;
Self ::get_recovery_data_shreds (
index ,
set_index ,
slot ,
erasure_meta ,
& mut available_shreds ,
prev_inserted_datas ,
data_cf ,
) ;
Self ::get_recovery_coding_shreds (
index ,
slot ,
erasure_meta ,
& mut available_shreds ,
prev_inserted_codes ,
code_cf ,
) ;
if let Ok ( mut result ) = Shredder ::try_recovery (
available_shreds ,
erasure_meta . config . num_data ( ) ,
erasure_meta . config . num_coding ( ) ,
set_index as usize ,
slot ,
) {
Self ::submit_metrics (
slot ,
set_index ,
erasure_meta ,
true ,
" complete " . into ( ) ,
result . len ( ) ,
) ;
recovered_data_shreds . append ( & mut result ) ;
} else {
Self ::submit_metrics ( slot , set_index , erasure_meta , true , " incomplete " . into ( ) , 0 ) ;
}
}
fn submit_metrics (
slot : Slot ,
set_index : u64 ,
erasure_meta : & ErasureMeta ,
attempted : bool ,
status : String ,
recovered : usize ,
) {
datapoint_debug! (
" blockstore-erasure " ,
( " slot " , slot as i64 , i64 ) ,
( " start_index " , set_index as i64 , i64 ) ,
(
" end_index " ,
( erasure_meta . set_index + erasure_meta . config . num_data ( ) as u64 ) as i64 ,
i64
) ,
( " recovery_attempted " , attempted , bool ) ,
( " recovery_status " , status , String ) ,
( " recovered " , recovered as i64 , i64 ) ,
) ;
}
2019-08-26 18:27:45 -07:00
fn try_shred_recovery (
2020-04-24 15:04:23 -07:00
db : & Database ,
2019-08-26 18:27:45 -07:00
erasure_metas : & HashMap < ( u64 , u64 ) , ErasureMeta > ,
2019-12-09 00:13:36 -08:00
index_working_set : & mut HashMap < u64 , IndexMetaWorkingSetEntry > ,
2019-09-18 16:24:30 -07:00
prev_inserted_datas : & mut HashMap < ( u64 , u64 ) , Shred > ,
prev_inserted_codes : & mut HashMap < ( u64 , u64 ) , Shred > ,
) -> Vec < Shred > {
2020-04-24 15:04:23 -07:00
let data_cf = db . column ::< cf ::ShredData > ( ) ;
let code_cf = db . column ::< cf ::ShredCode > ( ) ;
2019-08-26 18:27:45 -07:00
let mut recovered_data_shreds = vec! [ ] ;
// Recovery rules:
// 1. Only try recovery around indexes for which new data or coding shreds are received
// 2. For new data shreds, check if an erasure set exists. If not, don't try recovery
// 3. Before trying recovery, check if enough number of shreds have been received
// 3a. Enough number of shreds = (#data + #coding shreds) > erasure.num_data
for ( & ( slot , set_index ) , erasure_meta ) in erasure_metas . iter ( ) {
2019-12-09 00:13:36 -08:00
let index_meta_entry = index_working_set . get_mut ( & slot ) . expect ( " Index " ) ;
let index = & mut index_meta_entry . index ;
2021-06-18 06:34:46 -07:00
match erasure_meta . status ( index ) {
2019-08-27 11:22:06 -07:00
ErasureMetaStatus ::CanRecover = > {
2020-05-13 10:09:38 -07:00
Self ::recover_shreds (
index ,
set_index ,
erasure_meta ,
prev_inserted_datas ,
prev_inserted_codes ,
& mut recovered_data_shreds ,
& data_cf ,
& code_cf ,
) ;
2019-08-26 18:27:45 -07:00
}
2019-08-27 11:22:06 -07:00
ErasureMetaStatus ::DataFull = > {
2019-11-11 13:12:55 -08:00
( set_index .. set_index + erasure_meta . config . num_coding ( ) as u64 ) . for_each (
| i | {
2019-12-09 00:13:36 -08:00
// Remove saved coding shreds. We don't need these for future recovery.
if prev_inserted_codes . remove ( & ( slot , i ) ) . is_some ( ) {
// Remove from the index so it doesn't get committed. We know
// this is safe to do because everything in
2020-01-13 13:13:52 -08:00
// `prev_inserted_codes` does not yet exist in blockstore
2019-12-09 00:13:36 -08:00
// (guaranteed by `check_cache_coding_shred`)
index . coding_mut ( ) . set_present ( i , false ) ;
}
2019-11-11 13:12:55 -08:00
} ,
) ;
2020-05-13 10:09:38 -07:00
Self ::submit_metrics (
slot ,
set_index ,
erasure_meta ,
false ,
" complete " . into ( ) ,
0 ,
) ;
2019-08-27 11:22:06 -07:00
}
ErasureMetaStatus ::StillNeed ( needed ) = > {
2020-05-13 10:09:38 -07:00
Self ::submit_metrics (
slot ,
set_index ,
erasure_meta ,
false ,
format! ( " still need: {} " , needed ) ,
0 ,
) ;
2019-08-27 11:22:06 -07:00
}
} ;
2019-08-26 18:27:45 -07:00
}
recovered_data_shreds
}
2020-01-14 15:37:53 -08:00
pub fn insert_shreds_handle_duplicate < F > (
2019-09-05 18:20:30 -07:00
& self ,
2019-09-18 16:24:30 -07:00
shreds : Vec < Shred > ,
2021-06-30 09:20:07 -07:00
is_repaired : Vec < bool > ,
2019-09-05 18:20:30 -07:00
leader_schedule : Option < & Arc < LeaderScheduleCache > > ,
2019-11-14 00:32:07 -08:00
is_trusted : bool ,
2020-01-14 15:37:53 -08:00
handle_duplicate : & F ,
2020-03-26 12:51:41 -07:00
metrics : & mut BlockstoreInsertionMetrics ,
2020-09-29 14:13:21 -07:00
) -> Result < ( Vec < CompletedDataSetInfo > , Vec < usize > ) >
2020-01-14 15:37:53 -08:00
where
2020-06-08 17:38:14 -07:00
F : Fn ( Shred ) ,
2020-01-14 15:37:53 -08:00
{
2021-06-30 09:20:07 -07:00
assert_eq! ( shreds . len ( ) , is_repaired . len ( ) ) ;
2019-10-26 16:15:59 -07:00
let mut total_start = Measure ::start ( " Total elapsed " ) ;
2020-01-13 13:13:52 -08:00
let mut start = Measure ::start ( " Blockstore lock " ) ;
2019-10-26 04:09:58 -07:00
let _lock = self . insert_shreds_lock . lock ( ) . unwrap ( ) ;
2019-10-26 16:15:59 -07:00
start . stop ( ) ;
let insert_lock_elapsed = start . as_us ( ) ;
2019-08-20 17:16:06 -07:00
let db = & * self . db ;
2019-10-24 10:30:53 -07:00
let mut write_batch = db . batch ( ) ? ;
2019-08-20 17:16:06 -07:00
2019-08-26 18:27:45 -07:00
let mut just_inserted_coding_shreds = HashMap ::new ( ) ;
2019-09-04 17:14:42 -07:00
let mut just_inserted_data_shreds = HashMap ::new ( ) ;
2019-08-26 18:27:45 -07:00
let mut erasure_metas = HashMap ::new ( ) ;
2019-08-20 17:16:06 -07:00
let mut slot_meta_working_set = HashMap ::new ( ) ;
let mut index_working_set = HashMap ::new ( ) ;
2019-10-26 16:15:59 -07:00
let num_shreds = shreds . len ( ) ;
let mut start = Measure ::start ( " Shred insertion " ) ;
let mut num_inserted = 0 ;
2019-10-30 16:48:59 -07:00
let mut index_meta_time = 0 ;
2020-09-01 22:06:06 -07:00
let mut newly_completed_data_sets : Vec < CompletedDataSetInfo > = vec! [ ] ;
2020-09-29 14:13:21 -07:00
let mut inserted_indices = Vec ::new ( ) ;
2021-06-30 09:20:07 -07:00
shreds
. into_iter ( )
. zip ( is_repaired . into_iter ( ) )
. enumerate ( )
. for_each ( | ( i , ( shred , is_repaired ) ) | {
if shred . is_data ( ) {
let shred_slot = shred . slot ( ) ;
let shred_source = if is_repaired {
ShredSource ::Repaired
} else {
ShredSource ::Turbine
} ;
if let Ok ( completed_data_sets ) = self . check_insert_data_shred (
shred ,
& mut erasure_metas ,
& mut index_working_set ,
& mut slot_meta_working_set ,
& mut write_batch ,
& mut just_inserted_data_shreds ,
& mut index_meta_time ,
is_trusted ,
handle_duplicate ,
leader_schedule ,
shred_source ,
) {
newly_completed_data_sets . extend ( completed_data_sets . into_iter ( ) . map (
| ( start_index , end_index ) | CompletedDataSetInfo {
slot : shred_slot ,
start_index ,
end_index ,
} ,
) ) ;
inserted_indices . push ( i ) ;
num_inserted + = 1 ;
}
} else if shred . is_code ( ) {
self . check_cache_coding_shred (
shred ,
& mut erasure_metas ,
& mut index_working_set ,
& mut just_inserted_coding_shreds ,
& mut index_meta_time ,
handle_duplicate ,
is_trusted ,
is_repaired ,
) ;
} else {
panic! ( " There should be no other case " ) ;
2019-10-26 16:15:59 -07:00
}
2021-06-30 09:20:07 -07:00
} ) ;
2019-10-26 16:15:59 -07:00
start . stop ( ) ;
2019-08-20 17:16:06 -07:00
2019-10-31 14:03:41 -07:00
let insert_shreds_elapsed = start . as_us ( ) ;
2019-10-26 16:15:59 -07:00
let mut start = Measure ::start ( " Shred recovery " ) ;
let mut num_recovered = 0 ;
2020-07-03 17:44:32 -07:00
let mut num_recovered_inserted = 0 ;
let mut num_recovered_failed_sig = 0 ;
let mut num_recovered_failed_invalid = 0 ;
let mut num_recovered_exists = 0 ;
2019-09-05 18:20:30 -07:00
if let Some ( leader_schedule_cache ) = leader_schedule {
2020-04-24 15:04:23 -07:00
let recovered_data = Self ::try_shred_recovery (
2021-06-18 06:34:46 -07:00
db ,
2019-09-05 18:20:30 -07:00
& erasure_metas ,
2019-12-09 00:13:36 -08:00
& mut index_working_set ,
2019-09-05 18:20:30 -07:00
& mut just_inserted_data_shreds ,
& mut just_inserted_coding_shreds ,
2019-08-26 18:27:45 -07:00
) ;
2019-09-05 18:20:30 -07:00
2019-10-26 16:15:59 -07:00
num_recovered = recovered_data . len ( ) ;
2019-09-05 18:20:30 -07:00
recovered_data . into_iter ( ) . for_each ( | shred | {
if let Some ( leader ) = leader_schedule_cache . slot_leader_at ( shred . slot ( ) , None ) {
2020-09-01 22:06:06 -07:00
let shred_slot = shred . slot ( ) ;
2019-09-05 18:20:30 -07:00
if shred . verify ( & leader ) {
2020-07-03 17:44:32 -07:00
match self . check_insert_data_shred (
2019-09-05 18:20:30 -07:00
shred ,
2019-12-12 17:50:28 -08:00
& mut erasure_metas ,
2019-09-05 18:20:30 -07:00
& mut index_working_set ,
& mut slot_meta_working_set ,
2020-04-24 15:04:23 -07:00
& mut write_batch ,
2019-11-12 10:29:58 -08:00
& mut just_inserted_data_shreds ,
2019-10-30 16:48:59 -07:00
& mut index_meta_time ,
2019-11-14 00:32:07 -08:00
is_trusted ,
2020-01-14 15:37:53 -08:00
& handle_duplicate ,
2020-05-29 04:35:20 -07:00
leader_schedule ,
2021-06-30 09:20:07 -07:00
ShredSource ::Recovered ,
2020-07-03 17:44:32 -07:00
) {
Err ( InsertDataShredError ::Exists ) = > {
num_recovered_exists + = 1 ;
}
Err ( InsertDataShredError ::InvalidShred ) = > {
num_recovered_failed_invalid + = 1 ;
}
Err ( InsertDataShredError ::BlockstoreError ( _ ) ) = > { }
2020-09-01 22:06:06 -07:00
Ok ( completed_data_sets ) = > {
newly_completed_data_sets . extend (
completed_data_sets . into_iter ( ) . map (
| ( start_index , end_index ) | CompletedDataSetInfo {
slot : shred_slot ,
start_index ,
end_index ,
} ,
) ,
) ;
2020-07-03 17:44:32 -07:00
num_recovered_inserted + = 1 ;
}
}
} else {
num_recovered_failed_sig + = 1 ;
2019-09-05 18:20:30 -07:00
}
}
} ) ;
}
2019-10-26 16:15:59 -07:00
start . stop ( ) ;
let shred_recovery_elapsed = start . as_us ( ) ;
2019-08-20 17:16:06 -07:00
2019-11-11 13:12:55 -08:00
just_inserted_coding_shreds
. into_iter ( )
. for_each ( | ( ( _ , _ ) , shred ) | {
2020-04-24 15:04:23 -07:00
self . check_insert_coding_shred (
shred ,
& mut index_working_set ,
& mut write_batch ,
& mut index_meta_time ,
) ;
2019-11-14 00:32:07 -08:00
num_inserted + = 1 ;
2019-11-11 13:12:55 -08:00
} ) ;
2019-10-26 16:15:59 -07:00
let mut start = Measure ::start ( " Shred recovery " ) ;
2019-10-31 14:03:41 -07:00
// Handle chaining for the members of the slot_meta_working_set that were inserted into,
// drop the others
handle_chaining ( & self . db , & mut write_batch , & mut slot_meta_working_set ) ? ;
2019-10-26 16:15:59 -07:00
start . stop ( ) ;
let chaining_elapsed = start . as_us ( ) ;
2019-08-20 17:16:06 -07:00
2019-10-31 14:03:41 -07:00
let mut start = Measure ::start ( " Commit Working Sets " ) ;
2019-09-04 17:14:42 -07:00
let ( should_signal , newly_completed_slots ) = commit_slot_meta_working_set (
2019-08-20 17:16:06 -07:00
& slot_meta_working_set ,
& self . completed_slots_senders ,
& mut write_batch ,
) ? ;
2019-08-27 11:22:06 -07:00
for ( ( slot , set_index ) , erasure_meta ) in erasure_metas {
write_batch . put ::< cf ::ErasureMeta > ( ( slot , set_index ) , & erasure_meta ) ? ;
}
2019-10-30 16:48:59 -07:00
for ( & slot , index_working_set_entry ) in index_working_set . iter ( ) {
if index_working_set_entry . did_insert_occur {
write_batch . put ::< cf ::Index > ( slot , & index_working_set_entry . index ) ? ;
}
2019-08-20 17:16:06 -07:00
}
2019-10-26 16:15:59 -07:00
start . stop ( ) ;
let commit_working_sets_elapsed = start . as_us ( ) ;
2019-08-20 17:16:06 -07:00
2019-10-26 16:15:59 -07:00
let mut start = Measure ::start ( " Write Batch " ) ;
2019-10-24 10:30:53 -07:00
self . db . write ( write_batch ) ? ;
2019-10-26 16:15:59 -07:00
start . stop ( ) ;
let write_batch_elapsed = start . as_us ( ) ;
2019-08-20 17:16:06 -07:00
send_signals (
2019-09-03 21:32:51 -07:00
& self . new_shreds_signals ,
2019-08-20 17:16:06 -07:00
& self . completed_slots_senders ,
should_signal ,
newly_completed_slots ,
2020-12-13 17:26:34 -08:00
) ;
2019-08-20 17:16:06 -07:00
2019-10-26 16:15:59 -07:00
total_start . stop ( ) ;
2020-03-26 12:51:41 -07:00
metrics . num_shreds + = num_shreds ;
metrics . total_elapsed + = total_start . as_us ( ) ;
metrics . insert_lock_elapsed + = insert_lock_elapsed ;
metrics . insert_shreds_elapsed + = insert_shreds_elapsed ;
metrics . shred_recovery_elapsed + = shred_recovery_elapsed ;
metrics . chaining_elapsed + = chaining_elapsed ;
metrics . commit_working_sets_elapsed + = commit_working_sets_elapsed ;
metrics . write_batch_elapsed + = write_batch_elapsed ;
metrics . num_inserted + = num_inserted ;
metrics . num_recovered + = num_recovered ;
2020-07-03 17:44:32 -07:00
metrics . num_recovered_inserted + = num_recovered_inserted ;
metrics . num_recovered_failed_sig + = num_recovered_failed_sig ;
metrics . num_recovered_failed_invalid = num_recovered_failed_invalid ;
metrics . num_recovered_exists = num_recovered_exists ;
2020-03-26 12:51:41 -07:00
metrics . index_meta_time + = index_meta_time ;
2020-09-29 14:13:21 -07:00
Ok ( ( newly_completed_data_sets , inserted_indices ) )
2019-08-20 17:16:06 -07:00
}
2020-05-05 14:07:21 -07:00
pub fn clear_unconfirmed_slot ( & self , slot : Slot ) {
let _lock = self . insert_shreds_lock . lock ( ) . unwrap ( ) ;
if let Some ( mut slot_meta ) = self
. meta ( slot )
. expect ( " Couldn't fetch from SlotMeta column family " )
{
// Clear all slot related information
2020-06-02 18:49:31 -07:00
self . run_purge ( slot , slot , PurgeType ::PrimaryIndex )
2020-05-05 14:07:21 -07:00
. expect ( " Purge database operations failed " ) ;
// Reinsert parts of `slot_meta` that are important to retain, like the `next_slots`
// field.
slot_meta . clear_unconfirmed_slot ( ) ;
self . meta_cf
. put ( slot , & slot_meta )
. expect ( " Couldn't insert into SlotMeta column family " ) ;
} else {
error! (
" clear_unconfirmed_slot() called on slot {} with no SlotMeta " ,
slot
) ;
}
}
2020-01-14 15:37:53 -08:00
pub fn insert_shreds (
& self ,
shreds : Vec < Shred > ,
leader_schedule : Option < & Arc < LeaderScheduleCache > > ,
is_trusted : bool ,
2020-09-29 14:13:21 -07:00
) -> Result < ( Vec < CompletedDataSetInfo > , Vec < usize > ) > {
2021-06-30 09:20:07 -07:00
let shreds_len = shreds . len ( ) ;
2020-03-26 12:51:41 -07:00
self . insert_shreds_handle_duplicate (
shreds ,
2021-06-30 09:20:07 -07:00
vec! [ false ; shreds_len ] ,
2020-03-26 12:51:41 -07:00
leader_schedule ,
is_trusted ,
& | _ | { } ,
& mut BlockstoreInsertionMetrics ::default ( ) ,
)
2020-01-14 15:37:53 -08:00
}
2019-09-04 17:14:42 -07:00
fn check_insert_coding_shred (
2019-08-26 18:27:45 -07:00
& self ,
2019-09-18 16:24:30 -07:00
shred : Shred ,
2019-10-30 16:48:59 -07:00
index_working_set : & mut HashMap < u64 , IndexMetaWorkingSetEntry > ,
2020-04-24 15:04:23 -07:00
write_batch : & mut WriteBatch ,
2019-11-11 13:12:55 -08:00
index_meta_time : & mut u64 ,
) -> bool {
let slot = shred . slot ( ) ;
let index_meta_working_set_entry =
get_index_meta_entry ( & self . db , slot , index_working_set , index_meta_time ) ;
let index_meta = & mut index_meta_working_set_entry . index ;
// This gives the index of first coding shred in this FEC block
// So, all coding shreds in a given FEC block will have the same set index
2020-04-24 15:04:23 -07:00
self . insert_coding_shred ( index_meta , & shred , write_batch )
2019-11-11 13:12:55 -08:00
. map ( | _ | {
index_meta_working_set_entry . did_insert_occur = true ;
} )
. is_ok ( )
}
2020-12-09 23:14:31 -08:00
fn erasure_mismatch ( shred1 : & Shred , shred2 : & Shred ) -> bool {
shred1 . coding_header . num_coding_shreds ! = shred2 . coding_header . num_coding_shreds
| | shred1 . coding_header . num_data_shreds ! = shred2 . coding_header . num_data_shreds
}
fn check_cache_coding_shred < F > (
2019-11-11 13:12:55 -08:00
& self ,
shred : Shred ,
erasure_metas : & mut HashMap < ( u64 , u64 ) , ErasureMeta > ,
index_working_set : & mut HashMap < u64 , IndexMetaWorkingSetEntry > ,
just_received_coding_shreds : & mut HashMap < ( u64 , u64 ) , Shred > ,
2019-10-30 16:48:59 -07:00
index_meta_time : & mut u64 ,
2020-12-09 23:14:31 -08:00
handle_duplicate : & F ,
2019-11-14 00:32:07 -08:00
is_trusted : bool ,
2021-06-30 09:20:07 -07:00
is_repaired : bool ,
2020-12-09 23:14:31 -08:00
) -> bool
where
F : Fn ( Shred ) ,
{
2019-09-04 17:14:42 -07:00
let slot = shred . slot ( ) ;
let shred_index = u64 ::from ( shred . index ( ) ) ;
2019-10-30 16:48:59 -07:00
let index_meta_working_set_entry =
get_index_meta_entry ( & self . db , slot , index_working_set , index_meta_time ) ;
2019-09-04 17:14:42 -07:00
2019-10-30 16:48:59 -07:00
let index_meta = & mut index_meta_working_set_entry . index ;
2020-12-09 23:14:31 -08:00
2019-09-04 17:14:42 -07:00
// This gives the index of first coding shred in this FEC block
// So, all coding shreds in a given FEC block will have the same set index
2019-11-11 13:12:55 -08:00
2020-12-09 23:14:31 -08:00
if ! is_trusted {
if index_meta . coding ( ) . is_present ( shred_index ) {
handle_duplicate ( shred ) ;
return false ;
}
2019-11-11 13:12:55 -08:00
2020-12-09 23:14:31 -08:00
if ! Blockstore ::should_insert_coding_shred ( & shred , & self . last_root ) {
return false ;
2019-11-11 13:12:55 -08:00
}
2020-12-09 23:14:31 -08:00
}
2020-04-24 15:04:23 -07:00
2020-12-09 23:14:31 -08:00
let set_index = u64 ::from ( shred . common_header . fec_set_index ) ;
let erasure_config = ErasureConfig ::new (
shred . coding_header . num_data_shreds as usize ,
shred . coding_header . num_coding_shreds as usize ,
) ;
2019-12-09 00:13:36 -08:00
2020-12-09 23:14:31 -08:00
let erasure_meta = erasure_metas . entry ( ( slot , set_index ) ) . or_insert_with ( | | {
self . erasure_meta_cf
. get ( ( slot , set_index ) )
. expect ( " Expect database get to succeed " )
2021-04-23 05:00:37 -07:00
. unwrap_or_else ( | | ErasureMeta ::new ( set_index , erasure_config ) )
2020-12-09 23:14:31 -08:00
} ) ;
2019-11-11 13:12:55 -08:00
2020-12-09 23:14:31 -08:00
if erasure_config ! = erasure_meta . config {
let conflicting_shred = self . find_conflicting_coding_shred (
& shred ,
slot ,
erasure_meta ,
just_received_coding_shreds ,
) ;
if let Some ( conflicting_shred ) = conflicting_shred {
if self
. store_duplicate_if_not_existing ( slot , conflicting_shred , shred . payload . clone ( ) )
. is_err ( )
{
warn! ( " bad duplicate store.. " ) ;
}
} else {
datapoint_info! ( " bad-conflict-shred " , ( " slot " , slot , i64 ) ) ;
}
// ToDo: This is a potential slashing condition
warn! ( " Received multiple erasure configs for the same erasure set!!! " ) ;
warn! (
" Slot: {}, shred index: {}, set_index: {}, is_duplicate: {}, stored config: {:#?}, new config: {:#?} " ,
slot , shred . index ( ) , set_index , self . has_duplicate_shreds_in_slot ( slot ) , erasure_meta . config , erasure_config
) ;
return false ;
2019-09-04 17:14:42 -07:00
}
2020-12-09 23:14:31 -08:00
2021-06-30 09:20:07 -07:00
if is_repaired {
let mut slots_stats = self . slots_stats . lock ( ) . unwrap ( ) ;
let mut e = slots_stats . stats . entry ( slot ) . or_default ( ) ;
e . num_repaired + = 1 ;
}
2020-12-09 23:14:31 -08:00
// Should be safe to modify index_meta here. Two cases
// 1) Recovery happens: Then all inserted erasure metas are removed
// from just_received_coding_shreds, and nothing will be committed by
// `check_insert_coding_shred`, so the coding index meta will not be
// committed
index_meta . coding_mut ( ) . set_present ( shred_index , true ) ;
just_received_coding_shreds
. entry ( ( slot , shred_index ) )
. or_insert_with ( | | shred ) ;
true
}
fn find_conflicting_coding_shred (
& self ,
shred : & Shred ,
slot : Slot ,
erasure_meta : & ErasureMeta ,
just_received_coding_shreds : & mut HashMap < ( u64 , u64 ) , Shred > ,
) -> Option < Vec < u8 > > {
// Search for the shred which set the initial erasure config, either inserted,
// or in the current batch in just_received_coding_shreds.
2021-04-23 05:00:37 -07:00
let coding_indices = erasure_meta . set_index
.. erasure_meta . set_index + erasure_meta . config . num_coding ( ) as u64 ;
2020-12-09 23:14:31 -08:00
let mut conflicting_shred = None ;
2021-04-23 05:00:37 -07:00
for coding_index in coding_indices {
2020-12-09 23:14:31 -08:00
let maybe_shred = self . get_coding_shred ( slot , coding_index ) ;
if let Ok ( Some ( shred_data ) ) = maybe_shred {
let potential_shred = Shred ::new_from_serialized_shred ( shred_data ) . unwrap ( ) ;
2021-06-18 06:34:46 -07:00
if Self ::erasure_mismatch ( & potential_shred , shred ) {
2020-12-09 23:14:31 -08:00
conflicting_shred = Some ( potential_shred . payload ) ;
}
break ;
} else if let Some ( potential_shred ) =
just_received_coding_shreds . get ( & ( slot , coding_index ) )
{
2021-06-18 06:34:46 -07:00
if Self ::erasure_mismatch ( potential_shred , shred ) {
2020-12-09 23:14:31 -08:00
conflicting_shred = Some ( potential_shred . payload . clone ( ) ) ;
}
break ;
}
}
conflicting_shred
2019-09-04 17:14:42 -07:00
}
2020-01-14 15:37:53 -08:00
#[ allow(clippy::too_many_arguments) ]
fn check_insert_data_shred < F > (
2019-09-04 17:14:42 -07:00
& self ,
2019-09-18 16:24:30 -07:00
shred : Shred ,
2019-12-12 17:50:28 -08:00
erasure_metas : & mut HashMap < ( u64 , u64 ) , ErasureMeta > ,
2019-10-30 16:48:59 -07:00
index_working_set : & mut HashMap < u64 , IndexMetaWorkingSetEntry > ,
2019-09-04 17:14:42 -07:00
slot_meta_working_set : & mut HashMap < u64 , SlotMetaWorkingSetEntry > ,
2020-04-24 15:04:23 -07:00
write_batch : & mut WriteBatch ,
2019-09-18 16:24:30 -07:00
just_inserted_data_shreds : & mut HashMap < ( u64 , u64 ) , Shred > ,
2019-10-30 16:48:59 -07:00
index_meta_time : & mut u64 ,
2019-11-14 00:32:07 -08:00
is_trusted : bool ,
2020-01-14 15:37:53 -08:00
handle_duplicate : & F ,
2020-05-29 04:35:20 -07:00
leader_schedule : Option < & Arc < LeaderScheduleCache > > ,
2021-06-30 09:20:07 -07:00
shred_source : ShredSource ,
2020-09-01 22:06:06 -07:00
) -> std ::result ::Result < Vec < ( u32 , u32 ) > , InsertDataShredError >
2020-01-14 15:37:53 -08:00
where
2020-06-08 17:38:14 -07:00
F : Fn ( Shred ) ,
2020-01-14 15:37:53 -08:00
{
2019-08-26 18:27:45 -07:00
let slot = shred . slot ( ) ;
let shred_index = u64 ::from ( shred . index ( ) ) ;
2019-10-30 16:48:59 -07:00
let index_meta_working_set_entry =
get_index_meta_entry ( & self . db , slot , index_working_set , index_meta_time ) ;
let index_meta = & mut index_meta_working_set_entry . index ;
2019-10-31 14:03:41 -07:00
let slot_meta_entry =
2019-09-04 17:14:42 -07:00
get_slot_meta_entry ( & self . db , slot_meta_working_set , slot , shred . parent ( ) ) ;
2019-10-31 14:03:41 -07:00
let slot_meta = & mut slot_meta_entry . new_slot_meta . borrow_mut ( ) ;
2020-01-14 15:37:53 -08:00
if ! is_trusted {
2021-06-18 06:34:46 -07:00
if Self ::is_data_shred_present ( & shred , slot_meta , index_meta . data ( ) ) {
2020-01-14 15:37:53 -08:00
handle_duplicate ( shred ) ;
2020-07-03 17:44:32 -07:00
return Err ( InsertDataShredError ::Exists ) ;
2021-05-26 16:12:57 -07:00
}
if shred . last_in_slot ( ) & & shred_index < slot_meta . received & & ! slot_meta . is_full ( ) {
// We got a last shred < slot_meta.received, which signals there's an alternative,
// shorter version of the slot. Because also `!slot_meta.is_full()`, then this
// means, for the current version of the slot, we might never get all the
// shreds < the current last index, never replay this slot, and make no
// progress (for instance if a leader sends an additional detached "last index"
// shred with a very high index, but none of the intermediate shreds). Ideally, we would
// just purge all shreds > the new last index slot, but because replay may have already
// replayed entries past the newly detected "last" shred, then mark the slot as dead
// and wait for replay to dump and repair the correct version.
warn! ( " Received *last* shred index {} less than previous shred index {}, and slot {} is not full, marking slot dead " , shred_index , slot_meta . received , slot ) ;
write_batch . put ::< cf ::DeadSlots > ( slot , & true ) . unwrap ( ) ;
}
if ! self . should_insert_data_shred (
2020-05-29 04:35:20 -07:00
& shred ,
slot_meta ,
2021-03-22 16:18:22 -07:00
just_inserted_data_shreds ,
2020-05-29 04:35:20 -07:00
& self . last_root ,
leader_schedule ,
2021-06-30 09:20:07 -07:00
shred_source . clone ( ) ,
2020-05-29 04:35:20 -07:00
) {
2020-07-03 17:44:32 -07:00
return Err ( InsertDataShredError ::InvalidShred ) ;
2020-01-14 15:37:53 -08:00
}
}
let set_index = u64 ::from ( shred . common_header . fec_set_index ) ;
2021-06-30 09:20:07 -07:00
let newly_completed_data_sets = self . insert_data_shred (
slot_meta ,
index_meta . data_mut ( ) ,
& shred ,
write_batch ,
shred_source ,
) ? ;
2020-07-03 17:44:32 -07:00
just_inserted_data_shreds . insert ( ( slot , shred_index ) , shred ) ;
index_meta_working_set_entry . did_insert_occur = true ;
slot_meta_entry . did_insert_occur = true ;
2021-05-19 07:31:47 -07:00
if let std ::collections ::hash_map ::Entry ::Vacant ( _ ) = erasure_metas . entry ( ( slot , set_index ) )
{
2020-07-03 17:44:32 -07:00
if let Some ( meta ) = self
. erasure_meta_cf
. get ( ( slot , set_index ) )
. expect ( " Expect database get to succeed " )
{
erasure_metas . insert ( ( slot , set_index ) , meta ) ;
2019-09-04 17:14:42 -07:00
}
}
2020-09-01 22:06:06 -07:00
Ok ( newly_completed_data_sets )
2019-09-04 17:14:42 -07:00
}
2020-12-09 23:14:31 -08:00
fn should_insert_coding_shred ( shred : & Shred , last_root : & RwLock < u64 > ) -> bool {
2019-09-04 17:14:42 -07:00
let shred_index = shred . index ( ) ;
2021-05-03 06:20:47 -07:00
let fec_set_index = shred . common_header . fec_set_index ;
let num_coding_shreds = shred . coding_header . num_coding_shreds as u32 ;
shred . is_code ( )
& & shred_index > = fec_set_index
& & shred_index - fec_set_index < num_coding_shreds
& & num_coding_shreds ! = 0
& & num_coding_shreds < = 8 * MAX_DATA_SHREDS_PER_FEC_BLOCK
& & num_coding_shreds - 1 < = u32 ::MAX - fec_set_index
& & shred . slot ( ) > * last_root . read ( ) . unwrap ( )
2019-09-04 17:14:42 -07:00
}
2020-04-24 15:04:23 -07:00
fn insert_coding_shred (
& self ,
index_meta : & mut Index ,
shred : & Shred ,
write_batch : & mut WriteBatch ,
) -> Result < ( ) > {
2019-09-04 17:14:42 -07:00
let slot = shred . slot ( ) ;
let shred_index = u64 ::from ( shred . index ( ) ) ;
// Assert guaranteed by integrity checks on the shred that happen before
// `insert_coding_shred` is called
2021-05-03 06:20:47 -07:00
assert! ( shred . is_code ( ) & & shred_index > = shred . common_header . fec_set_index as u64 ) ;
2019-09-04 17:14:42 -07:00
// Commit step: commit all changes to the mutable structures at once, or none at all.
// We don't want only a subset of these changes going through.
2020-04-24 15:04:23 -07:00
write_batch . put_bytes ::< cf ::ShredCode > ( ( slot , shred_index ) , & shred . payload ) ? ;
2019-09-04 17:14:42 -07:00
index_meta . coding_mut ( ) . set_present ( shred_index , true ) ;
2019-09-17 18:22:46 -07:00
Ok ( ( ) )
2019-09-04 17:14:42 -07:00
}
2020-01-14 15:37:53 -08:00
fn is_data_shred_present ( shred : & Shred , slot_meta : & SlotMeta , data_index : & ShredIndex ) -> bool {
let shred_index = u64 ::from ( shred . index ( ) ) ;
// Check that the shred doesn't already exist in blockstore
shred_index < slot_meta . consumed | | data_index . is_present ( shred_index )
}
2021-03-22 16:18:22 -07:00
fn get_data_shred_from_just_inserted_or_db < ' a > (
& ' a self ,
just_inserted_data_shreds : & ' a HashMap < ( u64 , u64 ) , Shred > ,
slot : Slot ,
index : u64 ,
) -> Cow < ' a , Vec < u8 > > {
if let Some ( shred ) = just_inserted_data_shreds . get ( & ( slot , index ) ) {
Cow ::Borrowed ( & shred . payload )
} else {
// If it doesn't exist in the just inserted set, it must exist in
// the backing store
Cow ::Owned ( self . get_data_shred ( slot , index ) . unwrap ( ) . unwrap ( ) )
}
}
2019-09-04 17:14:42 -07:00
fn should_insert_data_shred (
2020-12-09 23:14:31 -08:00
& self ,
2019-09-18 16:24:30 -07:00
shred : & Shred ,
2019-09-04 17:14:42 -07:00
slot_meta : & SlotMeta ,
2021-03-22 16:18:22 -07:00
just_inserted_data_shreds : & HashMap < ( u64 , u64 ) , Shred > ,
2019-09-04 17:14:42 -07:00
last_root : & RwLock < u64 > ,
2020-05-29 04:35:20 -07:00
leader_schedule : Option < & Arc < LeaderScheduleCache > > ,
2021-06-30 09:20:07 -07:00
shred_source : ShredSource ,
2019-09-04 17:14:42 -07:00
) -> bool {
2021-04-27 15:40:41 -07:00
use crate ::shred ::SHRED_PAYLOAD_SIZE ;
2019-09-04 17:14:42 -07:00
let shred_index = u64 ::from ( shred . index ( ) ) ;
let slot = shred . slot ( ) ;
2019-09-16 10:28:28 -07:00
let last_in_slot = if shred . last_in_slot ( ) {
2019-09-04 17:14:42 -07:00
debug! ( " got last in slot " ) ;
true
} else {
false
} ;
2021-04-27 15:40:41 -07:00
if shred . data_header . size = = 0 {
2021-04-30 08:38:15 -07:00
let leader_pubkey = leader_schedule
. and_then ( | leader_schedule | leader_schedule . slot_leader_at ( slot , None ) ) ;
datapoint_error! (
" blockstore_error " ,
(
" error " ,
format! (
" Leader {:?}, slot {}: received index {} is empty " ,
leader_pubkey , slot , shred_index ,
) ,
String
)
) ;
2021-04-27 15:40:41 -07:00
return false ;
}
if shred . payload . len ( ) > SHRED_PAYLOAD_SIZE {
2021-04-30 08:38:15 -07:00
let leader_pubkey = leader_schedule
. and_then ( | leader_schedule | leader_schedule . slot_leader_at ( slot , None ) ) ;
datapoint_error! (
" blockstore_error " ,
(
" error " ,
format! (
" Leader {:?}, slot {}: received index {} shred.payload.len() > SHRED_PAYLOAD_SIZE " ,
leader_pubkey , slot , shred_index ,
) ,
String
)
) ;
2021-04-27 15:40:41 -07:00
return false ;
}
2019-09-04 17:14:42 -07:00
// Check that we do not receive shred_index >= than the last_index
// for the slot
let last_index = slot_meta . last_index ;
if shred_index > = last_index {
2020-05-29 04:35:20 -07:00
let leader_pubkey = leader_schedule
2021-04-30 08:38:15 -07:00
. and_then ( | leader_schedule | leader_schedule . slot_leader_at ( slot , None ) ) ;
2020-12-09 23:14:31 -08:00
2021-03-22 16:18:22 -07:00
let ending_shred : Cow < Vec < u8 > > = self . get_data_shred_from_just_inserted_or_db (
just_inserted_data_shreds ,
slot ,
last_index ,
) ;
2020-12-09 23:14:31 -08:00
if self
2021-03-22 16:18:22 -07:00
. store_duplicate_if_not_existing (
slot ,
ending_shred . into_owned ( ) ,
shred . payload . clone ( ) ,
)
2020-12-09 23:14:31 -08:00
. is_err ( )
{
warn! ( " store duplicate error " ) ;
}
2019-09-04 17:14:42 -07:00
datapoint_error! (
2021-04-30 08:38:15 -07:00
" blockstore_error " ,
(
" error " ,
format! (
2021-06-30 09:20:07 -07:00
" Leader {:?}, slot {}: received index {} >= slot.last_index {}, shred_source: {:?} " ,
leader_pubkey , slot , shred_index , last_index , shred_source
2021-04-30 08:38:15 -07:00
) ,
String
)
) ;
2019-09-04 17:14:42 -07:00
return false ;
2019-08-26 18:27:45 -07:00
}
2019-11-14 11:49:31 -08:00
// Check that we do not receive a shred with "last_index" true, but shred_index
2019-09-04 17:14:42 -07:00
// less than our current received
if last_in_slot & & shred_index < slot_meta . received {
2020-05-29 04:35:20 -07:00
let leader_pubkey = leader_schedule
2021-04-30 08:38:15 -07:00
. and_then ( | leader_schedule | leader_schedule . slot_leader_at ( slot , None ) ) ;
2020-12-09 23:14:31 -08:00
2021-03-22 16:18:22 -07:00
let ending_shred : Cow < Vec < u8 > > = self . get_data_shred_from_just_inserted_or_db (
just_inserted_data_shreds ,
slot ,
slot_meta . received - 1 ,
) ;
2020-12-09 23:14:31 -08:00
if self
2021-03-22 16:18:22 -07:00
. store_duplicate_if_not_existing (
slot ,
ending_shred . into_owned ( ) ,
shred . payload . clone ( ) ,
)
2020-12-09 23:14:31 -08:00
. is_err ( )
{
warn! ( " store duplicate error " ) ;
}
2019-09-04 17:14:42 -07:00
datapoint_error! (
2021-04-30 08:38:15 -07:00
" blockstore_error " ,
(
" error " ,
format! (
2021-06-30 09:20:07 -07:00
" Leader {:?}, slot {}: received shred_index {} < slot.received {}, shred_source: {:?} " ,
leader_pubkey , slot , shred_index , slot_meta . received , shred_source
2021-04-30 08:38:15 -07:00
) ,
String
)
) ;
2019-09-04 17:14:42 -07:00
return false ;
}
let last_root = * last_root . read ( ) . unwrap ( ) ;
2019-10-30 23:37:25 -07:00
verify_shred_slots ( slot , slot_meta . parent_slot , last_root )
2019-08-26 18:27:45 -07:00
}
2019-08-20 17:16:06 -07:00
fn insert_data_shred (
2019-08-27 15:09:41 -07:00
& self ,
2019-09-04 17:14:42 -07:00
slot_meta : & mut SlotMeta ,
2020-01-13 12:03:19 -08:00
data_index : & mut ShredIndex ,
2019-09-18 16:24:30 -07:00
shred : & Shred ,
2020-04-24 15:04:23 -07:00
write_batch : & mut WriteBatch ,
2021-06-30 09:20:07 -07:00
shred_source : ShredSource ,
2020-09-01 22:06:06 -07:00
) -> Result < Vec < ( u32 , u32 ) > > {
2019-08-20 17:16:06 -07:00
let slot = shred . slot ( ) ;
let index = u64 ::from ( shred . index ( ) ) ;
2019-09-16 10:28:28 -07:00
let last_in_slot = if shred . last_in_slot ( ) {
2019-08-20 17:16:06 -07:00
debug! ( " got last in slot " ) ;
true
} else {
false
} ;
2019-10-21 16:15:10 -07:00
let last_in_data = if shred . data_complete ( ) {
debug! ( " got last in data " ) ;
true
} else {
false
} ;
2019-10-31 14:03:41 -07:00
// Parent for slot meta should have been set by this point
assert! ( ! is_orphan ( slot_meta ) ) ;
2019-08-20 17:16:06 -07:00
2019-09-04 17:14:42 -07:00
let new_consumed = if slot_meta . consumed = = index {
let mut current_index = index + 1 ;
2019-08-26 18:27:45 -07:00
2019-12-09 01:12:14 -08:00
while data_index . is_present ( current_index ) {
2019-09-04 17:14:42 -07:00
current_index + = 1 ;
}
current_index
} else {
slot_meta . consumed
} ;
2019-08-20 17:16:06 -07:00
2019-09-04 17:14:42 -07:00
// Commit step: commit all changes to the mutable structures at once, or none at all.
// We don't want only a subset of these changes going through.
2021-04-27 15:40:41 -07:00
write_batch . put_bytes ::< cf ::ShredData > (
( slot , index ) ,
// Payload will be padded out to SHRED_PAYLOAD_SIZE
// But only need to store the bytes within data_header.size
& shred . payload [ .. shred . data_header . size as usize ] ,
) ? ;
2020-09-01 22:06:06 -07:00
data_index . set_present ( index , true ) ;
let newly_completed_data_sets = update_slot_meta (
2019-10-21 16:15:10 -07:00
last_in_slot ,
last_in_data ,
slot_meta ,
index as u32 ,
new_consumed ,
2019-11-07 11:08:09 -08:00
shred . reference_tick ( ) ,
2021-06-18 06:34:46 -07:00
data_index ,
2019-10-21 16:15:10 -07:00
) ;
2021-06-30 09:20:07 -07:00
if shred_source = = ShredSource ::Repaired | | shred_source = = ShredSource ::Recovered {
let mut slots_stats = self . slots_stats . lock ( ) . unwrap ( ) ;
let mut e = slots_stats . stats . entry ( slot_meta . slot ) . or_default ( ) ;
if shred_source = = ShredSource ::Repaired {
e . num_repaired + = 1 ;
}
if shred_source = = ShredSource ::Recovered {
e . num_recovered + = 1 ;
}
}
2020-06-19 18:28:15 -07:00
if slot_meta . is_full ( ) {
2021-06-30 09:20:07 -07:00
let ( num_repaired , num_recovered ) = {
let mut slots_stats = self . slots_stats . lock ( ) . unwrap ( ) ;
if let Some ( e ) = slots_stats . stats . remove ( & slot_meta . slot ) {
if slots_stats . last_cleanup_ts . elapsed ( ) . as_secs ( ) > 30 {
let root = self . last_root ( ) ;
slots_stats . stats = slots_stats . stats . split_off ( & root ) ;
slots_stats . last_cleanup_ts = Instant ::now ( ) ;
}
( e . num_repaired , e . num_recovered )
} else {
( 0 , 0 )
}
} ;
2020-10-07 13:36:54 -07:00
datapoint_info! (
" shred_insert_is_full " ,
(
" total_time_ms " ,
solana_sdk ::timing ::timestamp ( ) - slot_meta . first_shred_timestamp ,
i64
) ,
( " slot " , slot_meta . slot , i64 ) ,
( " last_index " , slot_meta . last_index , i64 ) ,
2021-06-30 09:20:07 -07:00
( " num_repaired " , num_repaired , i64 ) ,
( " num_recovered " , num_recovered , i64 ) ,
2020-06-19 18:28:15 -07:00
) ;
}
2019-09-04 17:14:42 -07:00
trace! ( " inserted shred into slot {:?} and index {:?} " , slot , index ) ;
2020-09-01 22:06:06 -07:00
Ok ( newly_completed_data_sets )
2019-08-20 17:16:06 -07:00
}
2019-11-02 00:38:30 -07:00
pub fn get_data_shred ( & self , slot : Slot , index : u64 ) -> Result < Option < Vec < u8 > > > {
2021-04-27 15:40:41 -07:00
use crate ::shred ::SHRED_PAYLOAD_SIZE ;
self . data_shred_cf . get_bytes ( ( slot , index ) ) . map ( | data | {
data . map ( | mut d | {
// Only data_header.size bytes stored in the blockstore so
// pad the payload out to SHRED_PAYLOAD_SIZE so that the
// erasure recovery works properly.
d . resize ( cmp ::max ( d . len ( ) , SHRED_PAYLOAD_SIZE ) , 0 ) ;
d
} )
} )
2019-08-20 17:16:06 -07:00
}
2020-03-19 23:35:01 -07:00
pub fn get_data_shreds_for_slot (
& self ,
slot : Slot ,
start_index : u64 ,
) -> ShredResult < Vec < Shred > > {
2020-04-24 15:04:23 -07:00
self . slot_data_iterator ( slot , start_index )
. expect ( " blockstore couldn't fetch iterator " )
. map ( | data | Shred ::new_from_serialized_shred ( data . 1. to_vec ( ) ) )
. collect ( )
2020-03-19 23:35:01 -07:00
}
2019-08-28 22:34:47 -07:00
pub fn get_data_shreds (
& self ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
2019-08-28 22:34:47 -07:00
from_index : u64 ,
2019-09-03 21:32:51 -07:00
to_index : u64 ,
2019-08-28 22:34:47 -07:00
buffer : & mut [ u8 ] ,
) -> Result < ( u64 , usize ) > {
2021-06-04 15:40:27 -07:00
let _lock = self . check_lowest_cleanup_slot ( slot ) ? ;
2019-08-28 22:34:47 -07:00
let meta_cf = self . db . column ::< cf ::SlotMeta > ( ) ;
let mut buffer_offset = 0 ;
let mut last_index = 0 ;
if let Some ( meta ) = meta_cf . get ( slot ) ? {
if ! meta . is_full ( ) {
warn! ( " The slot is not yet full. Will not return any shreds " ) ;
return Ok ( ( last_index , buffer_offset ) ) ;
}
2019-09-03 21:32:51 -07:00
let to_index = cmp ::min ( to_index , meta . consumed ) ;
for index in from_index .. to_index {
2019-08-28 22:34:47 -07:00
if let Some ( shred_data ) = self . get_data_shred ( slot , index ) ? {
let shred_len = shred_data . len ( ) ;
if buffer . len ( ) . saturating_sub ( buffer_offset ) > = shred_len {
buffer [ buffer_offset .. buffer_offset + shred_len ]
. copy_from_slice ( & shred_data [ .. shred_len ] ) ;
buffer_offset + = shred_len ;
last_index = index ;
// All shreds are of the same length.
2020-06-17 20:54:52 -07:00
// Let's check if we have scope to accommodate another shred
2019-08-28 22:34:47 -07:00
// If not, let's break right away, as it'll save on 1 DB read
if buffer . len ( ) . saturating_sub ( buffer_offset ) < shred_len {
break ;
}
} else {
break ;
}
}
}
}
Ok ( ( last_index , buffer_offset ) )
}
2019-11-02 00:38:30 -07:00
pub fn get_coding_shred ( & self , slot : Slot , index : u64 ) -> Result < Option < Vec < u8 > > > {
2020-04-24 15:04:23 -07:00
self . code_shred_cf . get_bytes ( ( slot , index ) )
2018-11-15 15:53:31 -08:00
}
2020-03-19 23:35:01 -07:00
pub fn get_coding_shreds_for_slot (
& self ,
slot : Slot ,
start_index : u64 ,
) -> ShredResult < Vec < Shred > > {
2020-04-24 15:04:23 -07:00
self . slot_coding_iterator ( slot , start_index )
. expect ( " blockstore couldn't fetch iterator " )
. map ( | code | Shred ::new_from_serialized_shred ( code . 1. to_vec ( ) ) )
. collect ( )
2020-03-19 23:35:01 -07:00
}
2019-11-18 18:05:02 -08:00
// Only used by tests
#[ allow(clippy::too_many_arguments) ]
2021-04-21 05:47:50 -07:00
pub ( crate ) fn write_entries (
2019-08-20 17:16:06 -07:00
& self ,
2019-11-02 00:38:30 -07:00
start_slot : Slot ,
2019-08-20 17:16:06 -07:00
num_ticks_in_start_slot : u64 ,
2019-10-08 00:42:51 -07:00
start_index : u32 ,
2019-08-20 17:16:06 -07:00
ticks_per_slot : u64 ,
parent : Option < u64 > ,
is_full_slot : bool ,
2019-08-28 22:34:47 -07:00
keypair : & Arc < Keypair > ,
2019-10-08 00:42:51 -07:00
entries : Vec < Entry > ,
2019-11-18 18:05:02 -08:00
version : u16 ,
2021-04-21 05:47:50 -07:00
) -> Result < usize /* num of data shreds */ > {
2019-10-31 13:38:50 -07:00
let mut parent_slot = parent . map_or ( start_slot . saturating_sub ( 1 ) , | v | v ) ;
let num_slots = ( start_slot - parent_slot ) . max ( 1 ) ; // Note: slot 0 has parent slot 0
assert! ( num_ticks_in_start_slot < num_slots * ticks_per_slot ) ;
let mut remaining_ticks_in_slot = num_slots * ticks_per_slot - num_ticks_in_start_slot ;
2019-08-20 17:16:06 -07:00
let mut current_slot = start_slot ;
2021-06-21 13:12:38 -07:00
let mut shredder = Shredder ::new ( current_slot , parent_slot , 0 , version ) . unwrap ( ) ;
2019-08-20 17:16:06 -07:00
let mut all_shreds = vec! [ ] ;
2019-10-08 00:42:51 -07:00
let mut slot_entries = vec! [ ] ;
2019-08-20 17:16:06 -07:00
// Find all the entries for start_slot
2019-10-08 00:42:51 -07:00
for entry in entries . into_iter ( ) {
2019-08-20 17:16:06 -07:00
if remaining_ticks_in_slot = = 0 {
current_slot + = 1 ;
parent_slot = current_slot - 1 ;
remaining_ticks_in_slot = ticks_per_slot ;
2019-10-08 00:42:51 -07:00
let mut current_entries = vec! [ ] ;
std ::mem ::swap ( & mut slot_entries , & mut current_entries ) ;
let start_index = {
if all_shreds . is_empty ( ) {
start_index
} else {
0
}
} ;
let ( mut data_shreds , mut coding_shreds , _ ) =
2021-06-21 13:12:38 -07:00
shredder . entries_to_shreds ( keypair , & current_entries , true , start_index ) ;
2019-10-08 00:42:51 -07:00
all_shreds . append ( & mut data_shreds ) ;
all_shreds . append ( & mut coding_shreds ) ;
2019-11-06 13:27:58 -08:00
shredder = Shredder ::new (
current_slot ,
parent_slot ,
( ticks_per_slot - remaining_ticks_in_slot ) as u8 ,
2019-11-18 18:05:02 -08:00
version ,
2019-11-06 13:27:58 -08:00
)
2021-04-21 05:47:50 -07:00
. unwrap ( ) ;
2019-08-20 17:16:06 -07:00
}
2019-10-08 00:42:51 -07:00
if entry . is_tick ( ) {
2019-08-20 17:16:06 -07:00
remaining_ticks_in_slot - = 1 ;
}
2019-10-08 00:42:51 -07:00
slot_entries . push ( entry ) ;
2019-08-20 17:16:06 -07:00
}
2019-10-08 00:42:51 -07:00
if ! slot_entries . is_empty ( ) {
let ( mut data_shreds , mut coding_shreds , _ ) =
2021-06-21 13:12:38 -07:00
shredder . entries_to_shreds ( keypair , & slot_entries , is_full_slot , 0 ) ;
2019-10-08 00:42:51 -07:00
all_shreds . append ( & mut data_shreds ) ;
all_shreds . append ( & mut coding_shreds ) ;
2019-08-20 17:16:06 -07:00
}
2021-04-21 05:47:50 -07:00
let num_data = all_shreds . iter ( ) . filter ( | shred | shred . is_data ( ) ) . count ( ) ;
2019-11-14 00:32:07 -08:00
self . insert_shreds ( all_shreds , None , false ) ? ;
2021-04-21 05:47:50 -07:00
Ok ( num_data )
2019-08-20 17:16:06 -07:00
}
2019-11-02 00:38:30 -07:00
pub fn get_index ( & self , slot : Slot ) -> Result < Option < Index > > {
2019-07-10 11:08:17 -07:00
self . index_cf . get ( slot )
}
2019-07-17 14:42:29 -07:00
/// Manually update the meta for a slot.
/// Can interfere with automatic meta update and potentially break chaining.
/// Dangerous. Use with care.
2019-11-02 00:38:30 -07:00
pub fn put_meta_bytes ( & self , slot : Slot , bytes : & [ u8 ] ) -> Result < ( ) > {
2019-07-17 14:42:29 -07:00
self . meta_cf . put_bytes ( slot , bytes )
}
2020-04-24 15:04:23 -07:00
// Given a start and end entry index, find all the missing
// indexes in the ledger in the range [start_index, end_index)
// for the slot with the specified slot
fn find_missing_indexes < C > (
db_iterator : & mut DBRawIterator ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
2020-04-24 15:04:23 -07:00
first_timestamp : u64 ,
2019-01-08 15:53:44 -08:00
start_index : u64 ,
end_index : u64 ,
max_missing : usize ,
2020-04-24 15:04:23 -07:00
) -> Vec < u64 >
where
C : Column < Index = ( u64 , u64 ) > ,
{
if start_index > = end_index | | max_missing = = 0 {
return vec! [ ] ;
}
let mut missing_indexes = vec! [ ] ;
let ticks_since_first_insert =
DEFAULT_TICKS_PER_SECOND * ( timestamp ( ) - first_timestamp ) / 1000 ;
// Seek to the first shred with index >= start_index
db_iterator . seek ( & C ::key ( ( slot , start_index ) ) ) ;
// The index of the first missing shred in the slot
let mut prev_index = start_index ;
' outer : loop {
if ! db_iterator . valid ( ) {
for i in prev_index .. end_index {
missing_indexes . push ( i ) ;
if missing_indexes . len ( ) = = max_missing {
break ;
2019-01-08 15:53:44 -08:00
}
2020-04-24 15:04:23 -07:00
}
break ;
}
2021-06-18 06:34:46 -07:00
let ( current_slot , index ) = C ::index ( db_iterator . key ( ) . expect ( " Expect a valid key " ) ) ;
2020-04-24 15:04:23 -07:00
let current_index = {
if current_slot > slot {
end_index
2019-02-07 15:10:54 -08:00
} else {
2020-04-24 15:04:23 -07:00
index
2019-01-08 15:53:44 -08:00
}
2020-04-24 15:04:23 -07:00
} ;
let upper_index = cmp ::min ( current_index , end_index ) ;
// the tick that will be used to figure out the timeout for this hole
let reference_tick = u64 ::from ( Shred ::reference_tick_from_data (
2021-06-18 06:34:46 -07:00
db_iterator . value ( ) . expect ( " couldn't read value " ) ,
2020-04-24 15:04:23 -07:00
) ) ;
if ticks_since_first_insert < reference_tick + MAX_TURBINE_DELAY_IN_TICKS {
// The higher index holes have not timed out yet
break 'outer ;
}
for i in prev_index .. upper_index {
missing_indexes . push ( i ) ;
if missing_indexes . len ( ) = = max_missing {
break 'outer ;
}
}
if current_slot > slot {
break ;
}
if current_index > = end_index {
break ;
}
prev_index = current_index + 1 ;
db_iterator . next ( ) ;
2019-01-08 15:53:44 -08:00
}
2020-04-24 15:04:23 -07:00
missing_indexes
2019-01-08 15:53:44 -08:00
}
2020-04-24 15:04:23 -07:00
pub fn find_missing_data_indexes (
2019-01-08 15:53:44 -08:00
& self ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
2020-04-24 15:04:23 -07:00
first_timestamp : u64 ,
2019-01-08 15:53:44 -08:00
start_index : u64 ,
end_index : u64 ,
max_missing : usize ,
) -> Vec < u64 > {
2020-04-24 15:04:23 -07:00
if let Ok ( mut db_iterator ) = self
. db
. raw_iterator_cf ( self . db . cf_handle ::< cf ::ShredData > ( ) )
{
Self ::find_missing_indexes ::< cf ::ShredData > (
& mut db_iterator ,
slot ,
first_timestamp ,
start_index ,
end_index ,
max_missing ,
)
} else {
vec! [ ]
2019-04-02 14:58:07 -07:00
}
2019-01-08 15:53:44 -08:00
}
2020-09-09 08:33:14 -07:00
pub fn get_block_time ( & self , slot : Slot ) -> Result < Option < UnixTimestamp > > {
2020-04-15 17:09:14 -07:00
datapoint_info! (
" blockstore-rpc-api " ,
( " method " , " get_block_time " . to_string ( ) , String )
) ;
2021-06-04 15:40:27 -07:00
let _lock = self . check_lowest_cleanup_slot ( slot ) ? ;
2020-09-09 08:33:14 -07:00
self . blocktime_cf . get ( slot )
}
2020-10-26 12:23:45 -07:00
pub fn cache_block_time ( & self , slot : Slot , timestamp : UnixTimestamp ) -> Result < ( ) > {
2021-05-26 21:16:16 -07:00
self . blocktime_cf . put ( slot , & timestamp )
}
pub fn get_block_height ( & self , slot : Slot ) -> Result < Option < u64 > > {
datapoint_info! (
" blockstore-rpc-api " ,
( " method " , " get_block_height " . to_string ( ) , String )
) ;
2021-06-04 15:40:27 -07:00
let _lock = self . check_lowest_cleanup_slot ( slot ) ? ;
2021-05-26 21:16:16 -07:00
self . block_height_cf . get ( slot )
}
pub fn cache_block_height ( & self , slot : Slot , block_height : u64 ) -> Result < ( ) > {
self . block_height_cf . put ( slot , & block_height )
2020-10-26 12:23:45 -07:00
}
2020-04-22 13:33:06 -07:00
pub fn get_first_available_block ( & self ) -> Result < Slot > {
2020-04-28 09:22:10 -07:00
let mut root_iterator = self . rooted_slot_iterator ( self . lowest_slot ( ) ) ? ;
2020-04-22 13:33:06 -07:00
Ok ( root_iterator . next ( ) . unwrap_or_default ( ) )
}
2021-03-26 15:47:35 -07:00
pub fn get_rooted_block (
2021-02-17 17:04:52 -08:00
& self ,
slot : Slot ,
require_previous_blockhash : bool ,
) -> Result < ConfirmedBlock > {
2020-04-15 17:09:14 -07:00
datapoint_info! (
" blockstore-rpc-api " ,
2021-03-26 15:47:35 -07:00
( " method " , " get_rooted_block " . to_string ( ) , String )
2020-04-15 17:09:14 -07:00
) ;
2021-06-04 15:40:27 -07:00
let _lock = self . check_lowest_cleanup_slot ( slot ) ? ;
2019-11-14 15:34:39 -08:00
if self . is_root ( slot ) {
2021-03-26 15:47:35 -07:00
return self . get_complete_block ( slot , require_previous_blockhash ) ;
}
Err ( BlockstoreError ::SlotNotRooted )
}
2019-11-17 19:17:15 -08:00
2021-03-26 15:47:35 -07:00
pub fn get_complete_block (
& self ,
slot : Slot ,
require_previous_blockhash : bool ,
) -> Result < ConfirmedBlock > {
let slot_meta_cf = self . db . column ::< cf ::SlotMeta > ( ) ;
let slot_meta = match slot_meta_cf . get ( slot ) ? {
Some ( slot_meta ) = > slot_meta ,
None = > {
info! ( " SlotMeta not found for slot {} " , slot ) ;
return Err ( BlockstoreError ::SlotUnavailable ) ;
}
} ;
if slot_meta . is_full ( ) {
2020-04-09 13:09:59 -07:00
let slot_entries = self . get_slot_entries ( slot , 0 ) ? ;
2019-12-11 14:06:54 -08:00
if ! slot_entries . is_empty ( ) {
let slot_transaction_iterator = slot_entries
. iter ( )
. cloned ( )
2021-02-13 22:32:43 -08:00
. flat_map ( | entry | entry . transactions )
. map ( | transaction | {
if let Err ( err ) = transaction . sanitize ( ) {
warn! (
2021-03-26 15:47:35 -07:00
" Blockstore::get_block sanitize failed: {:?}, \
2021-02-13 22:32:43 -08:00
slot : { :? } , \
{ :? } " ,
err , slot , transaction ,
) ;
}
transaction
} ) ;
2020-04-22 13:33:06 -07:00
let parent_slot_entries = self
. get_slot_entries ( slot_meta . parent_slot , 0 )
. unwrap_or_default ( ) ;
2021-02-17 17:04:52 -08:00
if parent_slot_entries . is_empty ( ) & & require_previous_blockhash {
return Err ( BlockstoreError ::ParentEntriesUnavailable ) ;
}
2019-12-11 14:06:54 -08:00
let previous_blockhash = if ! parent_slot_entries . is_empty ( ) {
get_last_hash ( parent_slot_entries . iter ( ) ) . unwrap ( )
} else {
Hash ::default ( )
} ;
2020-01-12 21:34:30 -08:00
let blockhash = get_last_hash ( slot_entries . iter ( ) )
. unwrap_or_else ( | | panic! ( " Rooted slot {:?} must have blockhash " , slot ) ) ;
2020-10-15 17:04:10 -07:00
let rewards = self
. rewards_cf
. get_protobuf_or_bincode ::< StoredExtendedRewards > ( slot ) ?
. unwrap_or_default ( )
. into ( ) ;
2021-05-26 21:16:16 -07:00
// The Blocktime and BlockHeight column families are updated asynchronously; they
// may not be written by the time the complete slot entries are available. In this
// case, these fields will be `None`.
2020-09-09 08:33:14 -07:00
let block_time = self . blocktime_cf . get ( slot ) ? ;
2021-05-26 21:16:16 -07:00
let block_height = self . block_height_cf . get ( slot ) ? ;
2020-02-04 18:50:24 -08:00
2020-03-26 13:29:30 -07:00
let block = ConfirmedBlock {
2020-01-12 21:34:30 -08:00
previous_blockhash : previous_blockhash . to_string ( ) ,
blockhash : blockhash . to_string ( ) ,
2019-12-11 14:06:54 -08:00
parent_slot : slot_meta . parent_slot ,
2020-09-23 22:10:29 -07:00
transactions : self
. map_transactions_to_statuses ( slot , slot_transaction_iterator ) ,
2020-02-04 18:50:24 -08:00
rewards ,
2020-09-09 08:33:14 -07:00
block_time ,
2021-05-26 21:16:16 -07:00
block_height ,
2019-12-11 14:06:54 -08:00
} ;
return Ok ( block ) ;
}
2019-11-14 15:34:39 -08:00
}
2021-03-26 15:47:35 -07:00
Err ( BlockstoreError ::SlotUnavailable )
2019-11-14 15:34:39 -08:00
}
2021-05-04 00:51:42 -07:00
pub fn map_transactions_to_statuses < ' a > (
2019-11-17 19:17:15 -08:00
& self ,
2019-11-18 08:12:42 -08:00
slot : Slot ,
2019-11-17 19:17:15 -08:00
iterator : impl Iterator < Item = Transaction > + ' a ,
2020-03-26 13:29:30 -07:00
) -> Vec < TransactionWithStatusMeta > {
2019-11-18 08:12:42 -08:00
iterator
. map ( | transaction | {
let signature = transaction . signatures [ 0 ] ;
2020-03-26 13:29:30 -07:00
TransactionWithStatusMeta {
2020-09-23 22:10:29 -07:00
transaction ,
2020-01-14 23:25:45 -08:00
meta : self
2020-04-04 20:24:06 -07:00
. read_transaction_status ( ( signature , slot ) )
2021-03-05 08:05:35 -08:00
. ok ( )
. flatten ( ) ,
2020-01-14 23:25:45 -08:00
}
2019-11-18 08:12:42 -08:00
} )
. collect ( )
2019-11-17 19:17:15 -08:00
}
2020-08-10 09:27:38 -07:00
/// Initializes the TransactionStatusIndex column family with two records, `0` and `1`,
/// which are used as the primary index for entries in the TransactionStatus and
/// AddressSignatures columns. At any given time, one primary index is active (ie. new records
/// are stored under this index), the other is frozen.
2020-04-04 20:24:06 -07:00
fn initialize_transaction_status_index ( & self ) -> Result < ( ) > {
self . transaction_status_index_cf
. put ( 0 , & TransactionStatusIndexMeta ::default ( ) ) ? ;
self . transaction_status_index_cf
. put ( 1 , & TransactionStatusIndexMeta ::default ( ) ) ? ;
// This dummy status improves compaction performance
2021-03-05 08:05:35 -08:00
let default_status = TransactionStatusMeta ::default ( ) . into ( ) ;
self . transaction_status_cf
. put_protobuf ( cf ::TransactionStatus ::as_index ( 2 ) , & default_status ) ? ;
2020-04-08 12:50:39 -07:00
self . address_signatures_cf . put (
cf ::AddressSignatures ::as_index ( 2 ) ,
& AddressSignatureMeta ::default ( ) ,
2020-04-04 20:24:06 -07:00
)
}
2020-08-10 09:27:38 -07:00
/// Toggles the active primary index between `0` and `1`, and clears the stored max-slot of the
/// frozen index in preparation for pruning.
2020-04-04 20:24:06 -07:00
fn toggle_transaction_status_index (
& self ,
batch : & mut WriteBatch ,
w_active_transaction_status_index : & mut u64 ,
to_slot : Slot ,
) -> Result < Option < u64 > > {
let index0 = self . transaction_status_index_cf . get ( 0 ) ? ;
if index0 . is_none ( ) {
return Ok ( None ) ;
}
let mut index0 = index0 . unwrap ( ) ;
let mut index1 = self . transaction_status_index_cf . get ( 1 ) ? . unwrap ( ) ;
if ! index0 . frozen & & ! index1 . frozen {
index0 . frozen = true ;
* w_active_transaction_status_index = 1 ;
batch . put ::< cf ::TransactionStatusIndex > ( 0 , & index0 ) ? ;
Ok ( None )
} else {
2021-05-28 00:42:56 -07:00
let purge_target_primary_index = if index0 . frozen & & to_slot > index0 . max_slot {
info! (
" Pruning expired primary index 0 up to slot {} (max requested: {}) " ,
index0 . max_slot , to_slot
) ;
2020-04-04 20:24:06 -07:00
Some ( 0 )
} else if index1 . frozen & & to_slot > index1 . max_slot {
2021-05-28 00:42:56 -07:00
info! (
" Pruning expired primary index 1 up to slot {} (max requested: {}) " ,
index1 . max_slot , to_slot
) ;
2020-04-04 20:24:06 -07:00
Some ( 1 )
} else {
None
} ;
2021-05-28 00:42:56 -07:00
if let Some ( purge_target_primary_index ) = purge_target_primary_index {
* w_active_transaction_status_index = purge_target_primary_index ;
2020-04-04 20:24:06 -07:00
if index0 . frozen {
index0 . max_slot = 0
} ;
index0 . frozen = ! index0 . frozen ;
batch . put ::< cf ::TransactionStatusIndex > ( 0 , & index0 ) ? ;
if index1 . frozen {
index1 . max_slot = 0
} ;
index1 . frozen = ! index1 . frozen ;
batch . put ::< cf ::TransactionStatusIndex > ( 1 , & index1 ) ? ;
}
2021-05-28 00:42:56 -07:00
Ok ( purge_target_primary_index )
2020-04-04 20:24:06 -07:00
}
}
2021-05-28 00:42:56 -07:00
fn get_primary_index_to_write (
2020-04-04 20:24:06 -07:00
& self ,
2020-04-08 12:50:39 -07:00
slot : Slot ,
2021-05-28 00:42:56 -07:00
// take WriteGuard to require critical section semantics at call site
w_active_transaction_status_index : & RwLockWriteGuard < Slot > ,
2020-04-08 12:50:39 -07:00
) -> Result < u64 > {
2021-05-28 00:42:56 -07:00
let i = * * w_active_transaction_status_index ;
2020-04-04 20:24:06 -07:00
let mut index_meta = self . transaction_status_index_cf . get ( i ) ? . unwrap ( ) ;
if slot > index_meta . max_slot {
assert! ( ! index_meta . frozen ) ;
index_meta . max_slot = slot ;
self . transaction_status_index_cf . put ( i , & index_meta ) ? ;
}
2020-04-08 12:50:39 -07:00
Ok ( i )
2020-04-04 20:24:06 -07:00
}
2020-03-23 12:49:21 -07:00
pub fn read_transaction_status (
& self ,
2020-04-04 20:24:06 -07:00
index : ( Signature , Slot ) ,
2020-03-26 13:29:30 -07:00
) -> Result < Option < TransactionStatusMeta > > {
2020-04-04 20:24:06 -07:00
let ( signature , slot ) = index ;
2021-03-05 08:05:35 -08:00
let result = self
. transaction_status_cf
. get_protobuf_or_bincode ::< StoredTransactionStatusMeta > ( ( 0 , signature , slot ) ) ? ;
2020-04-04 20:24:06 -07:00
if result . is_none ( ) {
2021-03-05 08:05:35 -08:00
Ok ( self
. transaction_status_cf
. get_protobuf_or_bincode ::< StoredTransactionStatusMeta > ( ( 1 , signature , slot ) ) ?
. and_then ( | meta | meta . try_into ( ) . ok ( ) ) )
2020-04-04 20:24:06 -07:00
} else {
2021-03-05 08:05:35 -08:00
Ok ( result . and_then ( | meta | meta . try_into ( ) . ok ( ) ) )
2020-04-04 20:24:06 -07:00
}
2020-03-23 12:49:21 -07:00
}
2019-11-20 15:43:10 -08:00
pub fn write_transaction_status (
& self ,
2020-04-08 12:50:39 -07:00
slot : Slot ,
signature : Signature ,
writable_keys : Vec < & Pubkey > ,
readonly_keys : Vec < & Pubkey > ,
2021-03-05 08:05:35 -08:00
status : TransactionStatusMeta ,
2019-11-20 15:43:10 -08:00
) -> Result < ( ) > {
2021-03-05 08:05:35 -08:00
let status = status . into ( ) ;
2020-04-08 12:50:39 -07:00
// This write lock prevents interleaving issues with the transaction_status_index_cf by gating
// writes to that column
2021-05-28 00:42:56 -07:00
let w_active_transaction_status_index =
2020-04-04 20:24:06 -07:00
self . active_transaction_status_index . write ( ) . unwrap ( ) ;
2021-05-28 00:42:56 -07:00
let primary_index =
self . get_primary_index_to_write ( slot , & w_active_transaction_status_index ) ? ;
2020-04-08 12:50:39 -07:00
self . transaction_status_cf
2021-03-05 08:05:35 -08:00
. put_protobuf ( ( primary_index , signature , slot ) , & status ) ? ;
2020-04-08 12:50:39 -07:00
for address in writable_keys {
self . address_signatures_cf . put (
2020-04-09 20:21:31 -07:00
( primary_index , * address , slot , signature ) ,
& AddressSignatureMeta { writeable : true } ,
2020-04-08 12:50:39 -07:00
) ? ;
}
for address in readonly_keys {
self . address_signatures_cf . put (
2020-04-09 20:21:31 -07:00
( primary_index , * address , slot , signature ) ,
& AddressSignatureMeta { writeable : false } ,
2020-04-08 12:50:39 -07:00
) ? ;
}
Ok ( ( ) )
2019-11-20 15:43:10 -08:00
}
2021-06-04 15:40:27 -07:00
fn check_lowest_cleanup_slot ( & self , slot : Slot ) -> Result < std ::sync ::RwLockReadGuard < Slot > > {
// lowest_cleanup_slot is the last slot that was not cleaned up by LedgerCleanupService
let lowest_cleanup_slot = self . lowest_cleanup_slot . read ( ) . unwrap ( ) ;
if * lowest_cleanup_slot > 0 & & * lowest_cleanup_slot > = slot {
return Err ( BlockstoreError ::SlotCleanedUp ) ;
}
// Make caller hold this lock properly; otherwise LedgerCleanupService can purge/compact
// needed slots here at any given moment
Ok ( lowest_cleanup_slot )
}
2021-05-28 00:42:56 -07:00
fn ensure_lowest_cleanup_slot ( & self ) -> ( std ::sync ::RwLockReadGuard < Slot > , Slot ) {
// Ensures consistent result by using lowest_cleanup_slot as the lower bound
// for reading columns that do not employ strong read consistency with slot-based
// delete_range
let lowest_cleanup_slot = self . lowest_cleanup_slot . read ( ) . unwrap ( ) ;
let lowest_available_slot = ( * lowest_cleanup_slot )
. checked_add ( 1 )
. expect ( " overflow from trusted value " ) ;
// Make caller hold this lock properly; otherwise LedgerCleanupService can purge/compact
// needed slots here at any given moment.
// Blockstore callers, like rpc, can process concurrent read queries
( lowest_cleanup_slot , lowest_available_slot )
}
2021-03-26 15:47:35 -07:00
// Returns a transaction status, as well as a loop counter for unit testing
2020-04-06 03:04:54 -07:00
fn get_transaction_status_with_counter (
& self ,
signature : Signature ,
2021-03-31 20:04:00 -07:00
confirmed_unrooted_slots : & [ Slot ] ,
2020-04-06 03:04:54 -07:00
) -> Result < ( Option < ( Slot , TransactionStatusMeta ) > , u64 ) > {
let mut counter = 0 ;
2021-05-28 00:42:56 -07:00
let ( lock , lowest_available_slot ) = self . ensure_lowest_cleanup_slot ( ) ;
2020-04-06 03:04:54 -07:00
for transaction_status_cf_primary_index in 0 ..= 1 {
let index_iterator = self . transaction_status_cf . iter ( IteratorMode ::From (
2021-05-28 00:42:56 -07:00
(
transaction_status_cf_primary_index ,
signature ,
lowest_available_slot ,
) ,
2020-04-06 03:04:54 -07:00
IteratorDirection ::Forward ,
) ) ? ;
2021-03-05 08:05:35 -08:00
for ( ( i , sig , slot ) , _data ) in index_iterator {
2020-04-06 03:04:54 -07:00
counter + = 1 ;
2020-08-06 15:21:46 -07:00
if i ! = transaction_status_cf_primary_index | | sig ! = signature {
2020-04-06 03:04:54 -07:00
break ;
}
2021-03-31 20:04:00 -07:00
if ! self . is_root ( slot ) & & ! confirmed_unrooted_slots . contains ( & slot ) {
2021-03-26 15:47:35 -07:00
continue ;
2020-04-06 03:04:54 -07:00
}
2021-03-26 15:47:35 -07:00
let status = self
. transaction_status_cf
. get_protobuf_or_bincode ::< StoredTransactionStatusMeta > ( ( i , sig , slot ) ) ?
. and_then ( | status | status . try_into ( ) . ok ( ) )
. map ( | status | ( slot , status ) ) ;
return Ok ( ( status , counter ) ) ;
2020-04-06 03:04:54 -07:00
}
}
2021-05-28 00:42:56 -07:00
drop ( lock ) ;
2020-04-06 03:04:54 -07:00
Ok ( ( None , counter ) )
}
2021-03-31 20:04:00 -07:00
/// Returns a transaction status
pub fn get_rooted_transaction_status (
& self ,
signature : Signature ,
) -> Result < Option < ( Slot , TransactionStatusMeta ) > > {
datapoint_info! (
" blockstore-rpc-api " ,
(
" method " ,
" get_rooted_transaction_status " . to_string ( ) ,
String
)
) ;
self . get_transaction_status ( signature , & [ ] )
}
2021-03-26 15:47:35 -07:00
/// Returns a transaction status
2020-04-06 03:04:54 -07:00
pub fn get_transaction_status (
& self ,
signature : Signature ,
2021-03-31 20:04:00 -07:00
confirmed_unrooted_slots : & [ Slot ] ,
2020-04-06 03:04:54 -07:00
) -> Result < Option < ( Slot , TransactionStatusMeta ) > > {
2020-04-15 17:09:14 -07:00
datapoint_info! (
" blockstore-rpc-api " ,
( " method " , " get_transaction_status " . to_string ( ) , String )
) ;
2021-03-31 20:04:00 -07:00
self . get_transaction_status_with_counter ( signature , confirmed_unrooted_slots )
2020-04-06 03:04:54 -07:00
. map ( | ( status , _ ) | status )
}
2020-04-08 23:57:30 -07:00
/// Returns a complete transaction if it was processed in a root
2021-03-31 20:04:00 -07:00
pub fn get_rooted_transaction (
2020-04-08 23:57:30 -07:00
& self ,
signature : Signature ,
) -> Result < Option < ConfirmedTransaction > > {
2020-04-15 17:09:14 -07:00
datapoint_info! (
" blockstore-rpc-api " ,
2021-03-31 20:04:00 -07:00
( " method " , " get_rooted_transaction " . to_string ( ) , String )
2020-04-15 17:09:14 -07:00
) ;
2021-03-31 20:04:00 -07:00
self . get_transaction_with_status ( signature , & [ ] )
2021-03-26 15:47:35 -07:00
}
/// Returns a complete transaction
pub fn get_complete_transaction (
& self ,
signature : Signature ,
2021-03-31 20:04:00 -07:00
highest_confirmed_slot : Slot ,
2021-03-26 15:47:35 -07:00
) -> Result < Option < ConfirmedTransaction > > {
datapoint_info! (
" blockstore-rpc-api " ,
( " method " , " get_complete_transaction " . to_string ( ) , String )
) ;
2021-04-04 21:14:02 -07:00
let last_root = self . last_root ( ) ;
2021-03-31 20:04:00 -07:00
let confirmed_unrooted_slots : Vec < _ > =
AncestorIterator ::new_inclusive ( highest_confirmed_slot , self )
2021-04-04 21:14:02 -07:00
. take_while ( | & slot | slot > last_root )
2021-03-31 20:04:00 -07:00
. collect ( ) ;
self . get_transaction_with_status ( signature , & confirmed_unrooted_slots )
2021-03-26 15:47:35 -07:00
}
fn get_transaction_with_status (
& self ,
signature : Signature ,
2021-03-31 20:04:00 -07:00
confirmed_unrooted_slots : & [ Slot ] ,
2021-03-26 15:47:35 -07:00
) -> Result < Option < ConfirmedTransaction > > {
2021-03-31 20:04:00 -07:00
if let Some ( ( slot , status ) ) =
self . get_transaction_status ( signature , confirmed_unrooted_slots ) ?
{
2020-08-06 15:21:46 -07:00
let transaction = self
. find_transaction_in_slot ( slot , signature ) ?
. ok_or ( BlockstoreError ::TransactionStatusSlotMismatch ) ? ; // Should not happen
2021-01-20 22:10:35 -08:00
let block_time = self . get_block_time ( slot ) ? ;
2020-04-08 23:57:30 -07:00
Ok ( Some ( ConfirmedTransaction {
slot ,
transaction : TransactionWithStatusMeta {
2020-09-23 22:10:29 -07:00
transaction ,
meta : Some ( status ) ,
2020-04-08 23:57:30 -07:00
} ,
2021-01-20 22:10:35 -08:00
block_time ,
2020-04-08 23:57:30 -07:00
} ) )
} else {
Ok ( None )
}
}
fn find_transaction_in_slot (
& self ,
slot : Slot ,
signature : Signature ,
) -> Result < Option < Transaction > > {
2020-04-09 13:09:59 -07:00
let slot_entries = self . get_slot_entries ( slot , 0 ) ? ;
2020-04-08 23:57:30 -07:00
Ok ( slot_entries
. iter ( )
. cloned ( )
. flat_map ( | entry | entry . transactions )
2021-02-13 22:32:43 -08:00
. map ( | transaction | {
if let Err ( err ) = transaction . sanitize ( ) {
warn! (
" Blockstore::find_transaction_in_slot sanitize failed: {:?}, \
slot : { :? } , \
{ :? } " ,
err , slot , transaction ,
) ;
}
transaction
} )
2020-04-08 23:57:30 -07:00
. find ( | transaction | transaction . signatures [ 0 ] = = signature ) )
}
2021-03-26 15:47:35 -07:00
// Returns all rooted signatures for an address, ordered by slot that the transaction was
// processed in. Within each slot the transactions will be ordered by signature, and NOT by
2020-08-05 11:21:22 -07:00
// the order in which the transactions exist in the block
2021-03-31 21:35:57 -07:00
//
// DEPRECATED
2020-04-09 20:21:31 -07:00
fn find_address_signatures (
& self ,
pubkey : Pubkey ,
start_slot : Slot ,
end_slot : Slot ,
) -> Result < Vec < ( Slot , Signature ) > > {
2021-05-28 00:42:56 -07:00
let ( lock , lowest_available_slot ) = self . ensure_lowest_cleanup_slot ( ) ;
2020-04-09 20:21:31 -07:00
let mut signatures : Vec < ( Slot , Signature ) > = vec! [ ] ;
for transaction_status_cf_primary_index in 0 ..= 1 {
let index_iterator = self . address_signatures_cf . iter ( IteratorMode ::From (
(
transaction_status_cf_primary_index ,
pubkey ,
2021-05-28 00:42:56 -07:00
start_slot . max ( lowest_available_slot ) ,
2020-04-09 20:21:31 -07:00
Signature ::default ( ) ,
) ,
IteratorDirection ::Forward ,
) ) ? ;
for ( ( i , address , slot , signature ) , _ ) in index_iterator {
if i ! = transaction_status_cf_primary_index | | slot > end_slot | | address ! = pubkey
{
break ;
}
if self . is_root ( slot ) {
signatures . push ( ( slot , signature ) ) ;
}
}
}
2021-05-28 00:42:56 -07:00
drop ( lock ) ;
2020-08-10 09:27:38 -07:00
signatures . sort_by ( | a , b | a . 0. partial_cmp ( & b . 0 ) . unwrap ( ) . then ( a . 1. cmp ( & b . 1 ) ) ) ;
2020-04-09 20:21:31 -07:00
Ok ( signatures )
}
2021-03-31 21:35:57 -07:00
// Returns all signatures for an address in a particular slot, regardless of whether that slot
// has been rooted. The transactions will be ordered by signature, and NOT by the order in
// which the transactions exist in the block
fn find_address_signatures_for_slot (
& self ,
pubkey : Pubkey ,
slot : Slot ,
) -> Result < Vec < ( Slot , Signature ) > > {
2021-05-28 00:42:56 -07:00
let ( lock , lowest_available_slot ) = self . ensure_lowest_cleanup_slot ( ) ;
2021-03-31 21:35:57 -07:00
let mut signatures : Vec < ( Slot , Signature ) > = vec! [ ] ;
for transaction_status_cf_primary_index in 0 ..= 1 {
let index_iterator = self . address_signatures_cf . iter ( IteratorMode ::From (
(
transaction_status_cf_primary_index ,
pubkey ,
2021-05-28 00:42:56 -07:00
slot . max ( lowest_available_slot ) ,
2021-03-31 21:35:57 -07:00
Signature ::default ( ) ,
) ,
IteratorDirection ::Forward ,
) ) ? ;
for ( ( i , address , transaction_slot , signature ) , _ ) in index_iterator {
if i ! = transaction_status_cf_primary_index
| | transaction_slot > slot
| | address ! = pubkey
{
break ;
}
signatures . push ( ( slot , signature ) ) ;
}
}
2021-05-28 00:42:56 -07:00
drop ( lock ) ;
2021-03-31 21:35:57 -07:00
signatures . sort_by ( | a , b | a . 0. partial_cmp ( & b . 0 ) . unwrap ( ) . then ( a . 1. cmp ( & b . 1 ) ) ) ;
Ok ( signatures )
}
// DEPRECATED
2020-04-09 20:21:31 -07:00
pub fn get_confirmed_signatures_for_address (
& self ,
pubkey : Pubkey ,
start_slot : Slot ,
end_slot : Slot ,
) -> Result < Vec < Signature > > {
2020-04-15 17:09:14 -07:00
datapoint_info! (
" blockstore-rpc-api " ,
(
" method " ,
" get_confirmed_signatures_for_address " . to_string ( ) ,
String
)
) ;
2020-04-09 20:21:31 -07:00
self . find_address_signatures ( pubkey , start_slot , end_slot )
. map ( | signatures | signatures . iter ( ) . map ( | ( _ , signature ) | * signature ) . collect ( ) )
}
2020-07-27 11:42:49 -07:00
pub fn get_confirmed_signatures_for_address2 (
& self ,
address : Pubkey ,
2021-03-31 21:35:57 -07:00
highest_slot : Slot , // highest_confirmed_root or highest_confirmed_slot
2020-08-05 11:30:21 -07:00
before : Option < Signature > ,
2020-08-15 09:42:17 -07:00
until : Option < Signature > ,
2020-07-27 11:42:49 -07:00
limit : usize ,
) -> Result < Vec < ConfirmedTransactionStatusWithSignature > > {
datapoint_info! (
" blockstore-rpc-api " ,
(
" method " ,
" get_confirmed_signatures_for_address2 " . to_string ( ) ,
String
)
) ;
2021-04-04 21:14:02 -07:00
let last_root = self . last_root ( ) ;
2021-03-31 21:35:57 -07:00
let confirmed_unrooted_slots : Vec < _ > = AncestorIterator ::new_inclusive ( highest_slot , self )
2021-04-04 21:14:02 -07:00
. take_while ( | & slot | slot > last_root )
2021-03-31 21:35:57 -07:00
. collect ( ) ;
2020-07-27 11:42:49 -07:00
2020-08-05 11:21:22 -07:00
// Figure the `slot` to start listing signatures at, based on the ledger location of the
2020-08-05 11:30:21 -07:00
// `before` signature if present. Also generate a HashSet of signatures that should
2020-08-05 11:21:22 -07:00
// be excluded from the results.
2020-08-10 09:27:38 -07:00
let mut get_before_slot_timer = Measure ::start ( " get_before_slot_timer " ) ;
2020-08-15 09:42:17 -07:00
let ( slot , mut before_excluded_signatures ) = match before {
2021-03-31 21:35:57 -07:00
None = > ( highest_slot , None ) ,
2020-08-05 11:30:21 -07:00
Some ( before ) = > {
2021-03-31 21:35:57 -07:00
let transaction_status =
self . get_transaction_status ( before , & confirmed_unrooted_slots ) ? ;
2020-08-05 11:21:22 -07:00
match transaction_status {
2020-07-27 11:42:49 -07:00
None = > return Ok ( vec! [ ] ) ,
2020-08-05 11:21:22 -07:00
Some ( ( slot , _ ) ) = > {
2021-03-31 21:35:57 -07:00
let block = self . get_complete_block ( slot , false ) . map_err ( | err | {
BlockstoreError ::Io ( IoError ::new (
ErrorKind ::Other ,
format! ( " Unable to get block: {} " , err ) ,
) )
} ) ? ;
2020-08-05 11:21:22 -07:00
// Load all signatures for the block
2021-03-31 21:35:57 -07:00
let mut slot_signatures : Vec < _ > = block
2020-08-05 11:21:22 -07:00
. transactions
2020-09-23 22:10:29 -07:00
. into_iter ( )
2020-08-05 11:21:22 -07:00
. filter_map ( | transaction_with_meta | {
2020-09-23 22:10:29 -07:00
transaction_with_meta
. transaction
. signatures
. into_iter ( )
. next ( )
2020-08-05 11:21:22 -07:00
} )
. collect ( ) ;
// Sort signatures as a way to entire a stable ordering within a slot, as
2020-08-10 09:27:38 -07:00
// the AddressSignatures column is ordered by signatures within a slot,
2020-08-05 11:21:22 -07:00
// not by block ordering
slot_signatures . sort ( ) ;
2020-08-10 09:27:38 -07:00
slot_signatures . reverse ( ) ;
2020-08-05 11:21:22 -07:00
2020-08-05 11:30:21 -07:00
if let Some ( pos ) = slot_signatures . iter ( ) . position ( | & x | x = = before ) {
2020-08-05 11:21:22 -07:00
slot_signatures . truncate ( pos + 1 ) ;
2020-07-27 11:42:49 -07:00
}
2020-08-05 11:21:22 -07:00
(
slot ,
Some ( slot_signatures . into_iter ( ) . collect ::< HashSet < _ > > ( ) ) ,
)
2020-07-27 11:42:49 -07:00
}
}
}
} ;
2020-08-10 09:27:38 -07:00
get_before_slot_timer . stop ( ) ;
2020-07-27 11:42:49 -07:00
2020-08-15 09:42:17 -07:00
// Generate a HashSet of signatures that should be excluded from the results based on
// `until` signature
let mut get_until_slot_timer = Measure ::start ( " get_until_slot_timer " ) ;
let ( lowest_slot , until_excluded_signatures ) = match until {
None = > ( 0 , HashSet ::new ( ) ) ,
Some ( until ) = > {
2021-03-31 21:35:57 -07:00
let transaction_status =
self . get_transaction_status ( until , & confirmed_unrooted_slots ) ? ;
2020-08-15 09:42:17 -07:00
match transaction_status {
None = > ( 0 , HashSet ::new ( ) ) ,
Some ( ( slot , _ ) ) = > {
2021-03-31 21:35:57 -07:00
let block = self . get_complete_block ( slot , false ) . map_err ( | err | {
BlockstoreError ::Io ( IoError ::new (
ErrorKind ::Other ,
format! ( " Unable to get block: {} " , err ) ,
) )
} ) ? ;
2020-08-15 09:42:17 -07:00
// Load all signatures for the block
2021-03-31 21:35:57 -07:00
let mut slot_signatures : Vec < _ > = block
2020-08-15 09:42:17 -07:00
. transactions
2020-09-23 22:10:29 -07:00
. into_iter ( )
2020-08-15 09:42:17 -07:00
. filter_map ( | transaction_with_meta | {
2020-09-23 22:10:29 -07:00
transaction_with_meta
. transaction
. signatures
. into_iter ( )
. next ( )
2020-08-15 09:42:17 -07:00
} )
. collect ( ) ;
// Sort signatures as a way to entire a stable ordering within a slot, as
// the AddressSignatures column is ordered by signatures within a slot,
// not by block ordering
slot_signatures . sort ( ) ;
slot_signatures . reverse ( ) ;
if let Some ( pos ) = slot_signatures . iter ( ) . position ( | & x | x = = until ) {
slot_signatures = slot_signatures . split_off ( pos ) ;
}
( slot , slot_signatures . into_iter ( ) . collect ::< HashSet < _ > > ( ) )
}
}
}
} ;
get_until_slot_timer . stop ( ) ;
2020-07-27 11:42:49 -07:00
// Fetch the list of signatures that affect the given address
let first_available_block = self . get_first_available_block ( ) ? ;
let mut address_signatures = vec! [ ] ;
2020-08-10 09:27:38 -07:00
// Get signatures in `slot`
let mut get_initial_slot_timer = Measure ::start ( " get_initial_slot_timer " ) ;
2021-03-31 21:35:57 -07:00
let mut signatures = self . find_address_signatures_for_slot ( address , slot ) ? ;
2020-08-10 09:27:38 -07:00
signatures . reverse ( ) ;
2020-08-15 09:42:17 -07:00
if let Some ( excluded_signatures ) = before_excluded_signatures . take ( ) {
2020-08-10 09:27:38 -07:00
address_signatures . extend (
signatures
. into_iter ( )
2021-06-18 06:34:46 -07:00
. filter ( | ( _ , signature ) | ! excluded_signatures . contains ( signature ) ) ,
2020-08-10 09:27:38 -07:00
)
} else {
address_signatures . append ( & mut signatures ) ;
}
get_initial_slot_timer . stop ( ) ;
// Check the active_transaction_status_index to see if it contains slot. If so, start with
// that index, as it will contain higher slots
let starting_primary_index = * self . active_transaction_status_index . read ( ) . unwrap ( ) ;
let next_primary_index = if starting_primary_index = = 0 { 1 } else { 0 } ;
let next_max_slot = self
. transaction_status_index_cf
. get ( next_primary_index ) ?
. unwrap ( )
. max_slot ;
let mut starting_primary_index_iter_timer = Measure ::start ( " starting_primary_index_iter " ) ;
if slot > next_max_slot {
let mut starting_iterator = self . address_signatures_cf . iter ( IteratorMode ::From (
( starting_primary_index , address , slot , Signature ::default ( ) ) ,
IteratorDirection ::Reverse ,
) ) ? ;
// Iterate through starting_iterator until limit is reached
while address_signatures . len ( ) < limit {
if let Some ( ( ( i , key_address , slot , signature ) , _ ) ) = starting_iterator . next ( ) {
2020-08-15 09:42:17 -07:00
if slot = = next_max_slot | | slot < lowest_slot {
2020-08-10 09:27:38 -07:00
break ;
}
if i = = starting_primary_index
& & key_address = = address
& & slot > = first_available_block
{
2021-03-31 21:35:57 -07:00
if self . is_root ( slot ) | | confirmed_unrooted_slots . contains ( & slot ) {
2020-08-13 10:07:42 -07:00
address_signatures . push ( ( slot , signature ) ) ;
}
2020-08-10 09:27:38 -07:00
continue ;
}
}
2020-07-27 11:42:49 -07:00
break ;
}
2020-08-10 09:27:38 -07:00
// Handle slots that cross primary indexes
2020-08-15 09:42:17 -07:00
if next_max_slot > = lowest_slot {
let mut signatures =
2021-03-31 21:35:57 -07:00
self . find_address_signatures_for_slot ( address , next_max_slot ) ? ;
2020-08-15 09:42:17 -07:00
signatures . reverse ( ) ;
address_signatures . append ( & mut signatures ) ;
}
2020-08-10 09:27:38 -07:00
}
starting_primary_index_iter_timer . stop ( ) ;
2020-07-27 11:42:49 -07:00
2020-08-10 09:27:38 -07:00
// Iterate through next_iterator until limit is reached
let mut next_primary_index_iter_timer = Measure ::start ( " next_primary_index_iter_timer " ) ;
let mut next_iterator = self . address_signatures_cf . iter ( IteratorMode ::From (
( next_primary_index , address , slot , Signature ::default ( ) ) ,
IteratorDirection ::Reverse ,
) ) ? ;
while address_signatures . len ( ) < limit {
if let Some ( ( ( i , key_address , slot , signature ) , _ ) ) = next_iterator . next ( ) {
// Skip next_max_slot, which is already included
if slot = = next_max_slot {
continue ;
}
2020-08-15 09:42:17 -07:00
if slot < lowest_slot {
break ;
}
2020-08-10 09:27:38 -07:00
if i = = next_primary_index
& & key_address = = address
& & slot > = first_available_block
{
2021-03-31 21:35:57 -07:00
if self . is_root ( slot ) | | confirmed_unrooted_slots . contains ( & slot ) {
2020-08-13 10:07:42 -07:00
address_signatures . push ( ( slot , signature ) ) ;
}
2020-08-10 09:27:38 -07:00
continue ;
}
2020-07-27 11:42:49 -07:00
}
2020-08-10 09:27:38 -07:00
break ;
2020-07-27 11:42:49 -07:00
}
2020-08-10 09:27:38 -07:00
next_primary_index_iter_timer . stop ( ) ;
2020-08-15 09:42:17 -07:00
let mut address_signatures : Vec < ( Slot , Signature ) > = address_signatures
. into_iter ( )
2021-06-18 06:34:46 -07:00
. filter ( | ( _ , signature ) | ! until_excluded_signatures . contains ( signature ) )
2020-08-15 09:42:17 -07:00
. collect ( ) ;
2020-07-27 11:42:49 -07:00
address_signatures . truncate ( limit ) ;
// Fill in the status information for each found transaction
2020-08-10 09:27:38 -07:00
let mut get_status_info_timer = Measure ::start ( " get_status_info_timer " ) ;
2020-07-27 11:42:49 -07:00
let mut infos = vec! [ ] ;
for ( slot , signature ) in address_signatures . into_iter ( ) {
2021-03-31 21:35:57 -07:00
let transaction_status =
self . get_transaction_status ( signature , & confirmed_unrooted_slots ) ? ;
2021-04-08 11:40:37 -07:00
let err = transaction_status . and_then ( | ( _slot , status ) | status . status . err ( ) ) ;
2021-01-20 22:10:35 -08:00
let block_time = self . get_block_time ( slot ) ? ;
2020-07-27 11:42:49 -07:00
infos . push ( ConfirmedTransactionStatusWithSignature {
signature ,
slot ,
err ,
memo : None ,
2021-01-20 22:10:35 -08:00
block_time ,
2020-07-27 11:42:49 -07:00
} ) ;
}
2020-08-10 09:27:38 -07:00
get_status_info_timer . stop ( ) ;
datapoint_info! (
" blockstore-get-conf-sigs-for-addr-2 " ,
(
" get_before_slot_us " ,
get_before_slot_timer . as_us ( ) as i64 ,
i64
) ,
(
" get_initial_slot_us " ,
get_initial_slot_timer . as_us ( ) as i64 ,
i64
) ,
(
" starting_primary_index_iter_us " ,
starting_primary_index_iter_timer . as_us ( ) as i64 ,
i64
) ,
(
" next_primary_index_iter_us " ,
next_primary_index_iter_timer . as_us ( ) as i64 ,
i64
) ,
(
" get_status_info_us " ,
get_status_info_timer . as_us ( ) as i64 ,
i64
2020-08-15 09:42:17 -07:00
) ,
(
" get_until_slot_us " ,
get_until_slot_timer . as_us ( ) as i64 ,
i64
2020-08-10 09:27:38 -07:00
)
) ;
2020-07-27 11:42:49 -07:00
Ok ( infos )
}
2020-03-26 13:29:30 -07:00
pub fn read_rewards ( & self , index : Slot ) -> Result < Option < Rewards > > {
2020-10-15 17:04:10 -07:00
self . rewards_cf
. get_protobuf_or_bincode ::< Rewards > ( index )
. map ( | result | result . map ( | option | option . into ( ) ) )
2020-03-23 12:49:21 -07:00
}
2020-03-26 13:29:30 -07:00
pub fn write_rewards ( & self , index : Slot , rewards : Rewards ) -> Result < ( ) > {
2020-10-15 17:04:10 -07:00
let rewards = rewards . into ( ) ;
self . rewards_cf . put_protobuf ( index , & rewards )
2020-02-04 18:50:24 -08:00
}
2020-09-22 12:26:32 -07:00
pub fn get_recent_perf_samples ( & self , num : usize ) -> Result < Vec < ( Slot , PerfSample ) > > {
Ok ( self
. db
. iter ::< cf ::PerfSamples > ( IteratorMode ::End ) ?
. take ( num )
. map ( | ( slot , data ) | {
let perf_sample = deserialize ( & data ) . unwrap ( ) ;
( slot , perf_sample )
} )
. collect ( ) )
}
pub fn write_perf_sample ( & self , index : Slot , perf_sample : & PerfSample ) -> Result < ( ) > {
self . perf_samples_cf . put ( index , perf_sample )
}
2021-07-01 09:32:41 -07:00
pub fn read_program_costs ( & self ) -> Result < Vec < ( Pubkey , u64 ) > > {
Ok ( self
. db
. iter ::< cf ::ProgramCosts > ( IteratorMode ::End ) ?
. map ( | ( pubkey , data ) | {
let program_cost : ProgramCost = deserialize ( & data ) . unwrap ( ) ;
( pubkey , program_cost . cost )
} )
. collect ( ) )
}
pub fn write_program_cost ( & self , key : & Pubkey , value : & u64 ) -> Result < ( ) > {
self . program_costs_cf
. put ( * key , & ProgramCost { cost : * value } )
}
pub fn delete_program_cost ( & self , key : & Pubkey ) -> Result < ( ) > {
self . program_costs_cf . delete ( * key )
}
2019-09-03 21:32:51 -07:00
/// Returns the entry vector for the slot starting with `shred_start_index`
2020-04-09 13:09:59 -07:00
pub fn get_slot_entries ( & self , slot : Slot , shred_start_index : u64 ) -> Result < Vec < Entry > > {
2020-04-09 20:10:51 -07:00
self . get_slot_entries_with_shred_info ( slot , shred_start_index , false )
2019-02-26 21:57:45 -08:00
. map ( | x | x . 0 )
2019-02-07 15:10:54 -08:00
}
2019-11-08 17:21:54 -08:00
/// Returns the entry vector for the slot starting with `shred_start_index`, the number of
/// shreds that comprise the entry vector, and whether the slot is full (consumed all shreds).
pub fn get_slot_entries_with_shred_info (
2019-08-20 17:16:06 -07:00
& self ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
2019-10-21 16:15:10 -07:00
start_index : u64 ,
2020-04-09 20:10:51 -07:00
allow_dead_slots : bool ,
2020-01-14 17:15:26 -08:00
) -> Result < ( Vec < Entry > , u64 , bool ) > {
2021-05-26 16:12:57 -07:00
let ( completed_ranges , slot_meta ) = self . get_completed_ranges ( slot , start_index ) ? ;
// Check if the slot is dead *after* fetching completed ranges to avoid a race
// where a slot is marked dead by another thread before the completed range query finishes.
// This should be sufficient because full slots will never be marked dead from another thread,
// this can only happen during entry processing during replay stage.
2020-04-09 20:10:51 -07:00
if self . is_dead ( slot ) & & ! allow_dead_slots {
2020-01-13 13:13:52 -08:00
return Err ( BlockstoreError ::DeadSlot ) ;
2021-05-26 16:12:57 -07:00
} else if completed_ranges . is_empty ( ) {
2019-11-08 17:21:54 -08:00
return Ok ( ( vec! [ ] , 0 , false ) ) ;
2019-10-21 16:15:10 -07:00
}
2021-05-26 16:12:57 -07:00
2020-06-02 18:49:31 -07:00
let slot_meta = slot_meta . unwrap ( ) ;
2019-10-21 16:15:10 -07:00
let num_shreds = completed_ranges
. last ( )
2019-11-08 17:21:54 -08:00
. map ( | ( _ , end_index ) | u64 ::from ( * end_index ) - start_index + 1 )
2020-01-14 17:15:26 -08:00
. unwrap_or ( 0 ) ;
2019-10-21 16:15:10 -07:00
2019-11-08 17:21:54 -08:00
let entries : Result < Vec < Vec < Entry > > > = PAR_THREAD_POOL . with ( | thread_pool | {
2019-10-21 16:15:10 -07:00
thread_pool . borrow ( ) . install ( | | {
completed_ranges
. par_iter ( )
. map ( | ( start_index , end_index ) | {
2020-09-01 22:06:06 -07:00
self . get_entries_in_data_block (
slot ,
* start_index ,
* end_index ,
Some ( & slot_meta ) ,
)
2019-10-21 16:15:10 -07:00
} )
. collect ( )
} )
} ) ;
2019-11-08 17:21:54 -08:00
let entries : Vec < Entry > = entries ? . into_iter ( ) . flatten ( ) . collect ( ) ;
Ok ( ( entries , num_shreds , slot_meta . is_full ( ) ) )
2019-10-21 16:15:10 -07:00
}
2020-06-02 18:49:31 -07:00
fn get_completed_ranges (
& self ,
slot : Slot ,
start_index : u64 ,
) -> Result < ( CompletedRanges , Option < SlotMeta > ) > {
2021-06-04 15:40:27 -07:00
let _lock = self . check_lowest_cleanup_slot ( slot ) ? ;
2020-06-02 18:49:31 -07:00
let slot_meta_cf = self . db . column ::< cf ::SlotMeta > ( ) ;
let slot_meta = slot_meta_cf . get ( slot ) ? ;
if slot_meta . is_none ( ) {
return Ok ( ( vec! [ ] , slot_meta ) ) ;
}
let slot_meta = slot_meta . unwrap ( ) ;
// Find all the ranges for the completed data blocks
let completed_ranges = Self ::get_completed_data_ranges (
start_index as u32 ,
& slot_meta . completed_data_indexes [ .. ] ,
slot_meta . consumed as u32 ,
) ;
Ok ( ( completed_ranges , Some ( slot_meta ) ) )
}
2019-10-21 16:15:10 -07:00
// Get the range of indexes [start_index, end_index] of every completed data block
fn get_completed_data_ranges (
mut start_index : u32 ,
completed_data_end_indexes : & [ u32 ] ,
consumed : u32 ,
2020-06-02 18:49:31 -07:00
) -> CompletedRanges {
2019-10-21 16:15:10 -07:00
let mut completed_data_ranges = vec! [ ] ;
let floor = completed_data_end_indexes
. iter ( )
. position ( | i | * i > = start_index )
. unwrap_or_else ( | | completed_data_end_indexes . len ( ) ) ;
2019-08-20 17:16:06 -07:00
2019-10-21 16:15:10 -07:00
for i in & completed_data_end_indexes [ floor as usize .. ] {
// `consumed` is the next missing shred index, but shred `i` existing in
// completed_data_end_indexes implies it's not missing
assert! ( * i ! = consumed ) ;
if * i < consumed {
completed_data_ranges . push ( ( start_index , * i ) ) ;
start_index = * i + 1 ;
}
2019-10-16 14:32:18 -07:00
}
2019-08-20 17:16:06 -07:00
2019-10-21 16:15:10 -07:00
completed_data_ranges
2019-10-16 14:32:18 -07:00
}
2019-08-20 17:16:06 -07:00
2020-09-01 22:06:06 -07:00
pub fn get_entries_in_data_block (
2019-10-16 14:32:18 -07:00
& self ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
2019-10-21 16:15:10 -07:00
start_index : u32 ,
end_index : u32 ,
2020-09-01 22:06:06 -07:00
slot_meta : Option < & SlotMeta > ,
2019-10-21 16:15:10 -07:00
) -> Result < Vec < Entry > > {
2020-04-24 15:04:23 -07:00
let data_shred_cf = self . db . column ::< cf ::ShredData > ( ) ;
2019-10-21 16:15:10 -07:00
// Short circuit on first error
let data_shreds : Result < Vec < Shred > > = ( start_index ..= end_index )
. map ( | i | {
2020-04-24 15:04:23 -07:00
data_shred_cf
. get_bytes ( ( slot , u64 ::from ( i ) ) )
2019-10-21 16:15:10 -07:00
. and_then ( | serialized_shred | {
2020-09-01 22:06:06 -07:00
if serialized_shred . is_none ( ) {
if let Some ( slot_meta ) = slot_meta {
panic! (
" Shred with
slot : { } ,
index : { } ,
consumed : { } ,
completed_indexes : { :? }
must exist if shred index was included in a range : { } { } " ,
slot ,
i ,
slot_meta . consumed ,
slot_meta . completed_data_indexes ,
start_index ,
end_index
) ;
} else {
return Err ( BlockstoreError ::InvalidShredData ( Box ::new (
bincode ::ErrorKind ::Custom ( format! (
" Missing shred for slot {}, index {} " ,
slot , i
) ) ,
) ) ) ;
}
}
Shred ::new_from_serialized_shred ( serialized_shred . unwrap ( ) ) . map_err ( | err | {
2020-01-13 13:13:52 -08:00
BlockstoreError ::InvalidShredData ( Box ::new ( bincode ::ErrorKind ::Custom (
2019-11-05 18:40:00 -08:00
format! (
" Could not reconstruct shred from shred payload: {:?} " ,
err
) ,
2019-10-21 16:15:10 -07:00
) ) )
} )
} )
} )
. collect ( ) ;
let data_shreds = data_shreds ? ;
2020-11-16 21:30:38 -08:00
let last_shred = data_shreds . last ( ) . unwrap ( ) ;
assert! ( last_shred . data_complete ( ) | | last_shred . last_in_slot ( ) ) ;
2019-10-21 16:15:10 -07:00
2020-05-19 12:38:18 -07:00
let deshred_payload = Shredder ::deshred ( & data_shreds ) . map_err ( | e | {
BlockstoreError ::InvalidShredData ( Box ::new ( bincode ::ErrorKind ::Custom ( format! (
" Could not reconstruct data block from constituent shreds, error: {:?} " ,
e
) ) ) )
2019-10-21 16:15:10 -07:00
} ) ? ;
debug! ( " {:?} shreds in last FEC set " , data_shreds . len ( ) , ) ;
2021-02-03 06:42:34 -08:00
bincode ::deserialize ::< Vec < Entry > > ( & deshred_payload ) . map_err ( | e | {
BlockstoreError ::InvalidShredData ( Box ::new ( bincode ::ErrorKind ::Custom ( format! (
" could not reconstruct entries: {:?} " ,
e
) ) ) )
2019-10-21 16:15:10 -07:00
} )
2019-08-20 17:16:06 -07:00
}
2020-06-02 18:49:31 -07:00
fn get_any_valid_slot_entries ( & self , slot : Slot , start_index : u64 ) -> Vec < Entry > {
let ( completed_ranges , slot_meta ) = self
. get_completed_ranges ( slot , start_index )
. unwrap_or_default ( ) ;
if completed_ranges . is_empty ( ) {
return vec! [ ] ;
}
let slot_meta = slot_meta . unwrap ( ) ;
let entries : Vec < Vec < Entry > > = PAR_THREAD_POOL_ALL_CPUS . with ( | thread_pool | {
thread_pool . borrow ( ) . install ( | | {
completed_ranges
. par_iter ( )
. map ( | ( start_index , end_index ) | {
2020-09-01 22:06:06 -07:00
self . get_entries_in_data_block (
slot ,
* start_index ,
* end_index ,
Some ( & slot_meta ) ,
)
. unwrap_or_default ( )
2020-06-02 18:49:31 -07:00
} )
. collect ( )
} )
} ) ;
entries . into_iter ( ) . flatten ( ) . collect ( )
}
2019-03-05 14:18:29 -08:00
// Returns slots connecting to any element of the list `slots`.
pub fn get_slots_since ( & self , slots : & [ u64 ] ) -> Result < HashMap < u64 , Vec < u64 > > > {
2019-02-07 15:10:54 -08:00
// Return error if there was a database error during lookup of any of the
// slot indexes
2019-03-05 14:18:29 -08:00
let slot_metas : Result < Vec < Option < SlotMeta > > > =
slots . iter ( ) . map ( | slot | self . meta ( * slot ) ) . collect ( ) ;
2019-02-07 15:10:54 -08:00
2019-02-28 19:49:22 -08:00
let slot_metas = slot_metas ? ;
2019-03-05 14:18:29 -08:00
let result : HashMap < u64 , Vec < u64 > > = slots
2019-02-28 19:49:22 -08:00
. iter ( )
. zip ( slot_metas )
2021-05-25 13:43:47 -07:00
. filter_map ( | ( height , meta ) | meta . map ( | meta | ( * height , meta . next_slots . to_vec ( ) ) ) )
2019-02-07 15:10:54 -08:00
. collect ( ) ;
2019-02-28 19:49:22 -08:00
Ok ( result )
2019-02-07 15:10:54 -08:00
}
2019-01-08 15:53:44 -08:00
2019-11-02 00:38:30 -07:00
pub fn is_root ( & self , slot : Slot ) -> bool {
2020-08-01 08:44:32 -07:00
matches! ( self . db . get ::< cf ::Root > ( slot ) , Ok ( Some ( true ) ) )
2019-04-15 13:12:28 -07:00
}
2020-12-16 12:40:36 -08:00
/// Returns true if a slot is between the rooted slot bounds of the ledger, but has not itself
/// been rooted. This is either because the slot was skipped, or due to a gap in ledger data,
/// as when booting from a newer snapshot.
pub fn is_skipped ( & self , slot : Slot ) -> bool {
let lowest_root = self
. rooted_slot_iterator ( 0 )
. ok ( )
. and_then ( | mut iter | iter . next ( ) )
. unwrap_or_default ( ) ;
match self . db . get ::< cf ::Root > ( slot ) . ok ( ) . flatten ( ) {
Some ( _ ) = > false ,
None = > slot < self . max_root ( ) & & slot > lowest_root ,
}
}
2019-05-29 09:43:22 -07:00
pub fn set_roots ( & self , rooted_slots : & [ u64 ] ) -> Result < ( ) > {
2019-10-24 10:30:53 -07:00
let mut write_batch = self . db . batch ( ) ? ;
for slot in rooted_slots {
write_batch . put ::< cf ::Root > ( * slot , & true ) ? ;
2019-05-20 19:04:18 -07:00
}
2019-08-27 15:09:41 -07:00
2019-10-24 10:30:53 -07:00
self . db . write ( write_batch ) ? ;
2019-08-27 15:09:41 -07:00
let mut last_root = self . last_root . write ( ) . unwrap ( ) ;
if * last_root = = std ::u64 ::MAX {
* last_root = 0 ;
}
* last_root = cmp ::max ( * rooted_slots . iter ( ) . max ( ) . unwrap ( ) , * last_root ) ;
2019-05-20 19:04:18 -07:00
Ok ( ( ) )
2019-05-03 14:46:02 -07:00
}
2019-11-02 00:38:30 -07:00
pub fn is_dead ( & self , slot : Slot ) -> bool {
2020-08-01 08:44:32 -07:00
matches! (
self . db
. get ::< cf ::DeadSlots > ( slot )
. expect ( " fetch from DeadSlots column family failed " ) ,
Some ( true )
)
2019-06-20 15:50:41 -07:00
}
2019-11-02 00:38:30 -07:00
pub fn set_dead_slot ( & self , slot : Slot ) -> Result < ( ) > {
2019-06-20 15:50:41 -07:00
self . dead_slots_cf . put ( slot , & true )
}
2020-12-09 23:14:31 -08:00
pub fn store_duplicate_if_not_existing (
& self ,
slot : Slot ,
shred1 : Vec < u8 > ,
shred2 : Vec < u8 > ,
) -> Result < ( ) > {
if ! self . has_duplicate_shreds_in_slot ( slot ) {
self . store_duplicate_slot ( slot , shred1 , shred2 )
} else {
Ok ( ( ) )
}
}
2020-01-13 17:21:39 -08:00
pub fn store_duplicate_slot ( & self , slot : Slot , shred1 : Vec < u8 > , shred2 : Vec < u8 > ) -> Result < ( ) > {
let duplicate_slot_proof = DuplicateSlotProof ::new ( shred1 , shred2 ) ;
self . duplicate_slots_cf . put ( slot , & duplicate_slot_proof )
}
pub fn get_duplicate_slot ( & self , slot : u64 ) -> Option < DuplicateSlotProof > {
self . duplicate_slots_cf
. get ( slot )
. expect ( " fetch from DuplicateSlots column family failed " )
}
2020-06-17 20:54:52 -07:00
// `new_shred` is assumed to have slot and index equal to the given slot and index.
2020-01-16 15:27:54 -08:00
// Returns the existing shred if `new_shred` is not equal to the existing shred at the
// given slot and index as this implies the leader generated two different shreds with
2020-01-13 17:21:39 -08:00
// the same slot and index
2020-12-10 18:20:08 -08:00
pub fn is_shred_duplicate (
& self ,
slot : u64 ,
index : u32 ,
2021-04-27 15:40:41 -07:00
new_shred_raw : & [ u8 ] ,
2020-12-10 18:20:08 -08:00
is_data : bool ,
) -> Option < Vec < u8 > > {
let res = if is_data {
self . get_data_shred ( slot , index as u64 )
. expect ( " fetch from DuplicateSlots column family failed " )
} else {
self . get_coding_shred ( slot , index as u64 )
. expect ( " fetch from DuplicateSlots column family failed " )
} ;
2020-01-13 17:21:39 -08:00
2021-04-27 15:40:41 -07:00
let mut payload = new_shred_raw . to_vec ( ) ;
payload . resize (
std ::cmp ::max ( new_shred_raw . len ( ) , crate ::shred ::SHRED_PAYLOAD_SIZE ) ,
0 ,
) ;
let new_shred = Shred ::new_from_serialized_shred ( payload ) . unwrap ( ) ;
2020-01-16 15:27:54 -08:00
res . map ( | existing_shred | {
2021-04-27 15:40:41 -07:00
if existing_shred ! = new_shred . payload {
2020-01-16 15:27:54 -08:00
Some ( existing_shred )
} else {
None
}
} )
. unwrap_or ( None )
2020-01-13 17:21:39 -08:00
}
pub fn has_duplicate_shreds_in_slot ( & self , slot : Slot ) -> bool {
self . duplicate_slots_cf
. get ( slot )
. expect ( " fetch from DuplicateSlots column family failed " )
. is_some ( )
}
2020-12-13 17:26:34 -08:00
pub fn orphans_iterator ( & self , slot : Slot ) -> Result < impl Iterator < Item = u64 > + '_ > {
2020-03-04 18:10:30 -08:00
let orphans_iter = self
2019-10-18 08:18:36 -07:00
. db
2020-03-04 18:10:30 -08:00
. iter ::< cf ::Orphans > ( IteratorMode ::From ( slot , IteratorDirection ::Forward ) ) ? ;
Ok ( orphans_iter . map ( | ( slot , _ ) | slot ) )
2019-04-06 19:41:22 -07:00
}
2020-12-13 17:26:34 -08:00
pub fn dead_slots_iterator ( & self , slot : Slot ) -> Result < impl Iterator < Item = Slot > + '_ > {
2020-05-05 14:07:21 -07:00
let dead_slots_iterator = self
. db
. iter ::< cf ::DeadSlots > ( IteratorMode ::From ( slot , IteratorDirection ::Forward ) ) ? ;
Ok ( dead_slots_iterator . map ( | ( slot , _ ) | slot ) )
}
2021-04-02 21:48:44 -07:00
pub fn duplicate_slots_iterator ( & self , slot : Slot ) -> Result < impl Iterator < Item = Slot > + '_ > {
let duplicate_slots_iterator = self
. db
. iter ::< cf ::DuplicateSlots > ( IteratorMode ::From ( slot , IteratorDirection ::Forward ) ) ? ;
Ok ( duplicate_slots_iterator . map ( | ( slot , _ ) | slot ) )
}
2019-12-05 11:25:13 -08:00
pub fn last_root ( & self ) -> Slot {
2019-08-27 15:09:41 -07:00
* self . last_root . read ( ) . unwrap ( )
}
2019-12-05 11:25:13 -08:00
2020-01-13 13:13:52 -08:00
// find the first available slot in blockstore that has some data in it
2019-12-05 11:25:13 -08:00
pub fn lowest_slot ( & self ) -> Slot {
for ( slot , meta ) in self
. slot_meta_iterator ( 0 )
. expect ( " unable to iterate over meta " )
{
if slot > 0 & & meta . received > 0 {
return slot ;
}
}
2020-01-13 13:13:52 -08:00
// This means blockstore is empty, should never get here aside from right at boot.
2019-12-05 11:25:13 -08:00
self . last_root ( )
}
2019-12-06 19:32:45 -08:00
2021-05-24 12:24:47 -07:00
pub fn lowest_cleanup_slot ( & self ) -> Slot {
* self . lowest_cleanup_slot . read ( ) . unwrap ( )
}
2019-12-12 11:55:30 -08:00
pub fn storage_size ( & self ) -> Result < u64 > {
2020-04-24 15:04:23 -07:00
self . db . storage_size ( )
2019-12-06 19:32:45 -08:00
}
2020-06-02 21:32:44 -07:00
pub fn is_primary_access ( & self ) -> bool {
self . db . is_primary_access ( )
}
2021-05-24 12:24:47 -07:00
pub fn scan_and_fix_roots ( & self , exit : & Arc < AtomicBool > ) -> Result < ( ) > {
2021-06-18 06:34:46 -07:00
let ancestor_iterator = AncestorIterator ::new ( self . last_root ( ) , self )
2021-05-24 12:24:47 -07:00
. take_while ( | & slot | slot > = self . lowest_cleanup_slot ( ) ) ;
let mut find_missing_roots = Measure ::start ( " find_missing_roots " ) ;
let mut roots_to_fix = vec! [ ] ;
for slot in ancestor_iterator . filter ( | slot | ! self . is_root ( * slot ) ) {
if exit . load ( Ordering ::Relaxed ) {
return Ok ( ( ) ) ;
}
roots_to_fix . push ( slot ) ;
}
find_missing_roots . stop ( ) ;
let mut fix_roots = Measure ::start ( " fix_roots " ) ;
if ! roots_to_fix . is_empty ( ) {
info! ( " {} slots to be rooted " , roots_to_fix . len ( ) ) ;
for chunk in roots_to_fix . chunks ( 100 ) {
if exit . load ( Ordering ::Relaxed ) {
return Ok ( ( ) ) ;
}
trace! ( " {:?} " , chunk ) ;
2021-06-30 13:29:16 -07:00
self . set_roots ( chunk ) ? ;
2021-05-24 12:24:47 -07:00
}
} else {
debug! (
" No missing roots found in range {} to {} " ,
self . lowest_cleanup_slot ( ) ,
self . last_root ( )
) ;
}
fix_roots . stop ( ) ;
datapoint_info! (
" blockstore-scan_and_fix_roots " ,
(
" find_missing_roots_us " ,
find_missing_roots . as_us ( ) as i64 ,
i64
) ,
( " num_roots_to_fix " , roots_to_fix . len ( ) as i64 , i64 ) ,
( " fix_roots_us " , fix_roots . as_us ( ) as i64 , i64 ) ,
) ;
Ok ( ( ) )
}
2019-04-26 08:52:10 -07:00
}
2019-02-07 15:10:54 -08:00
2020-09-01 22:06:06 -07:00
// Update the `completed_data_indexes` with a new shred `new_shred_index`. If a
// data set is complete, return the range of shred indexes [start_index, end_index]
// for that completed data set.
fn update_completed_data_indexes (
is_last_in_data : bool ,
new_shred_index : u32 ,
received_data_shreds : & ShredIndex ,
// Sorted array of shred indexes marked data complete
completed_data_indexes : & mut Vec < u32 > ,
) -> Vec < ( u32 , u32 ) > {
let mut first_greater_pos = None ;
let mut prev_completed_shred_index = None ;
// Find the first item in `completed_data_indexes > new_shred_index`
for ( i , completed_data_index ) in completed_data_indexes . iter ( ) . enumerate ( ) {
// `completed_data_indexes` should be sorted from smallest to largest
assert! (
prev_completed_shred_index . is_none ( )
| | * completed_data_index > prev_completed_shred_index . unwrap ( )
) ;
if * completed_data_index > new_shred_index {
first_greater_pos = Some ( i ) ;
break ;
}
prev_completed_shred_index = Some ( * completed_data_index ) ;
}
// Consecutive entries i, k, j in this vector represent potential ranges [i, k),
// [k, j) that could be completed data ranges
let mut check_ranges : Vec < u32 > = vec! [ prev_completed_shred_index
. map ( | completed_data_shred_index | completed_data_shred_index + 1 )
. unwrap_or ( 0 ) ] ;
let mut first_greater_data_complete_index =
first_greater_pos . map ( | i | completed_data_indexes [ i ] ) ;
// `new_shred_index` is data complete, so need to insert here into the
// `completed_data_indexes`
if is_last_in_data {
if first_greater_pos . is_some ( ) {
// If there exists a data complete shred greater than `new_shred_index`,
// and the new shred is marked data complete, then the range
// [new_shred_index + 1, completed_data_indexes[pos]] may be complete,
// so add that range to check
check_ranges . push ( new_shred_index + 1 ) ;
}
completed_data_indexes . insert (
first_greater_pos . unwrap_or_else ( | | {
// If `first_greater_pos` is none, then there was no greater
// data complete index so mark this new shred's index as the latest data
// complete index
first_greater_data_complete_index = Some ( new_shred_index ) ;
completed_data_indexes . len ( )
} ) ,
new_shred_index ,
) ;
}
if first_greater_data_complete_index . is_none ( ) {
// That means new_shred_index > all known completed data indexes and
// new shred not data complete, which means the data set of that new
// shred is not data complete
return vec! [ ] ;
}
check_ranges . push ( first_greater_data_complete_index . unwrap ( ) + 1 ) ;
let mut completed_data_ranges = vec! [ ] ;
for range in check_ranges . windows ( 2 ) {
let mut is_complete = true ;
for shred_index in range [ 0 ] .. range [ 1 ] {
// If we're missing any shreds, the data set cannot be confirmed
// to be completed, so check the next range
if ! received_data_shreds . is_present ( shred_index as u64 ) {
is_complete = false ;
break ;
}
}
if is_complete {
completed_data_ranges . push ( ( range [ 0 ] , range [ 1 ] - 1 ) ) ;
}
}
completed_data_ranges
}
2019-08-20 17:16:06 -07:00
fn update_slot_meta (
is_last_in_slot : bool ,
2019-10-21 16:15:10 -07:00
is_last_in_data : bool ,
2019-08-20 17:16:06 -07:00
slot_meta : & mut SlotMeta ,
2019-10-21 16:15:10 -07:00
index : u32 ,
2019-08-20 17:16:06 -07:00
new_consumed : u64 ,
2019-11-07 11:08:09 -08:00
reference_tick : u8 ,
2020-09-01 22:06:06 -07:00
received_data_shreds : & ShredIndex ,
) -> Vec < ( u32 , u32 ) > {
2019-11-07 11:08:09 -08:00
let maybe_first_insert = slot_meta . received = = 0 ;
2019-04-26 08:52:10 -07:00
// Index is zero-indexed, while the "received" height starts from 1,
2019-09-03 21:32:51 -07:00
// so received = index + 1 for the same shred.
2019-10-21 16:15:10 -07:00
slot_meta . received = cmp ::max ( ( u64 ::from ( index ) + 1 ) as u64 , slot_meta . received ) ;
2019-11-07 11:08:09 -08:00
if maybe_first_insert & & slot_meta . received > 0 {
// predict the timestamp of what would have been the first shred in this slot
let slot_time_elapsed = u64 ::from ( reference_tick ) * 1000 / DEFAULT_TICKS_PER_SECOND ;
slot_meta . first_shred_timestamp = timestamp ( ) - slot_time_elapsed ;
}
2019-04-26 08:52:10 -07:00
slot_meta . consumed = new_consumed ;
slot_meta . last_index = {
2019-05-09 14:10:04 -07:00
// If the last index in the slot hasn't been set before, then
2019-09-03 21:32:51 -07:00
// set it to this shred index
2019-04-26 08:52:10 -07:00
if slot_meta . last_index = = std ::u64 ::MAX {
2019-08-20 17:16:06 -07:00
if is_last_in_slot {
2019-10-21 16:15:10 -07:00
u64 ::from ( index )
2019-04-26 08:52:10 -07:00
} else {
std ::u64 ::MAX
2019-03-29 16:07:24 -07:00
}
2019-04-26 08:52:10 -07:00
} else {
slot_meta . last_index
2019-02-07 15:10:54 -08:00
}
2019-04-26 08:52:10 -07:00
} ;
2019-10-21 16:15:10 -07:00
2020-09-01 22:06:06 -07:00
update_completed_data_indexes (
is_last_in_slot | | is_last_in_data ,
index ,
received_data_shreds ,
& mut slot_meta . completed_data_indexes ,
)
2019-08-20 17:16:06 -07:00
}
2019-09-04 17:14:42 -07:00
fn get_index_meta_entry < ' a > (
2019-08-20 17:16:06 -07:00
db : & Database ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
2019-10-30 16:48:59 -07:00
index_working_set : & ' a mut HashMap < u64 , IndexMetaWorkingSetEntry > ,
index_meta_time : & mut u64 ,
) -> & ' a mut IndexMetaWorkingSetEntry {
2019-09-04 17:14:42 -07:00
let index_cf = db . column ::< cf ::Index > ( ) ;
2019-10-30 16:48:59 -07:00
let mut total_start = Measure ::start ( " Total elapsed " ) ;
let res = index_working_set . entry ( slot ) . or_insert_with ( | | {
let newly_inserted_meta = index_cf
. get ( slot )
. unwrap ( )
. unwrap_or_else ( | | Index ::new ( slot ) ) ;
IndexMetaWorkingSetEntry {
index : newly_inserted_meta ,
did_insert_occur : false ,
}
} ) ;
total_start . stop ( ) ;
* index_meta_time + = total_start . as_us ( ) ;
res
2019-04-26 08:52:10 -07:00
}
2019-02-07 15:10:54 -08:00
2019-09-04 17:14:42 -07:00
fn get_slot_meta_entry < ' a > (
db : & Database ,
slot_meta_working_set : & ' a mut HashMap < u64 , SlotMetaWorkingSetEntry > ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
parent_slot : Slot ,
2019-10-31 14:03:41 -07:00
) -> & ' a mut SlotMetaWorkingSetEntry {
2019-09-04 17:14:42 -07:00
let meta_cf = db . column ::< cf ::SlotMeta > ( ) ;
2019-08-27 15:09:41 -07:00
2019-11-14 11:49:31 -08:00
// Check if we've already inserted the slot metadata for this shred's slot
2019-10-31 14:03:41 -07:00
slot_meta_working_set . entry ( slot ) . or_insert_with ( | | {
// Store a 2-tuple of the metadata (working copy, backup copy)
if let Some ( mut meta ) = meta_cf . get ( slot ) . expect ( " Expect database get to succeed " ) {
let backup = Some ( meta . clone ( ) ) ;
// If parent_slot == std::u64::MAX, then this is one of the orphans inserted
// during the chaining process, see the function find_slot_meta_in_cached_state()
// for details. Slots that are orphans are missing a parent_slot, so we should
// fill in the parent now that we know it.
if is_orphan ( & meta ) {
meta . parent_slot = parent_slot ;
2019-09-04 17:14:42 -07:00
}
2019-10-31 14:03:41 -07:00
SlotMetaWorkingSetEntry ::new ( Rc ::new ( RefCell ::new ( meta ) ) , backup )
} else {
SlotMetaWorkingSetEntry ::new (
Rc ::new ( RefCell ::new ( SlotMeta ::new ( slot , parent_slot ) ) ) ,
None ,
)
}
} )
2019-04-26 08:52:10 -07:00
}
2019-11-17 19:17:15 -08:00
fn get_last_hash < ' a > ( iterator : impl Iterator < Item = & ' a Entry > + ' a ) -> Option < Hash > {
iterator . last ( ) . map ( | entry | entry . hash )
}
2019-11-02 00:38:30 -07:00
fn is_valid_write_to_slot_0 ( slot_to_write : u64 , parent_slot : Slot , last_root : u64 ) -> bool {
2019-08-27 15:09:41 -07:00
slot_to_write = = 0 & & last_root = = 0 & & parent_slot = = 0
}
2019-07-10 11:08:17 -07:00
fn send_signals (
2019-09-03 21:32:51 -07:00
new_shreds_signals : & [ SyncSender < bool > ] ,
2019-07-10 11:08:17 -07:00
completed_slots_senders : & [ SyncSender < Vec < u64 > > ] ,
should_signal : bool ,
newly_completed_slots : Vec < u64 > ,
2020-12-13 17:26:34 -08:00
) {
2019-07-10 11:08:17 -07:00
if should_signal {
2019-09-03 21:32:51 -07:00
for signal in new_shreds_signals {
2019-07-10 11:08:17 -07:00
let _ = signal . try_send ( true ) ;
}
}
if ! completed_slots_senders . is_empty ( ) & & ! newly_completed_slots . is_empty ( ) {
let mut slots : Vec < _ > = ( 0 .. completed_slots_senders . len ( ) - 1 )
. map ( | _ | newly_completed_slots . clone ( ) )
. collect ( ) ;
slots . push ( newly_completed_slots ) ;
for ( signal , slots ) in completed_slots_senders . iter ( ) . zip ( slots . into_iter ( ) ) {
let res = signal . try_send ( slots ) ;
if let Err ( TrySendError ::Full ( _ ) ) = res {
2019-09-07 12:48:45 -07:00
datapoint_error! (
2020-01-13 13:13:52 -08:00
" blockstore_error " ,
2019-09-07 12:48:45 -07:00
(
" error " ,
" Unable to send newly completed slot because channel is full " . to_string ( ) ,
String
) ,
2019-07-10 11:08:17 -07:00
) ;
}
}
}
}
2019-09-04 17:14:42 -07:00
fn commit_slot_meta_working_set (
slot_meta_working_set : & HashMap < u64 , SlotMetaWorkingSetEntry > ,
2019-07-10 11:08:17 -07:00
completed_slots_senders : & [ SyncSender < Vec < u64 > > ] ,
write_batch : & mut WriteBatch ,
) -> Result < ( bool , Vec < u64 > ) > {
let mut should_signal = false ;
let mut newly_completed_slots = vec! [ ] ;
// Check if any metadata was changed, if so, insert the new version of the
// metadata into the write batch
2019-10-31 14:03:41 -07:00
for ( slot , slot_meta_entry ) in slot_meta_working_set . iter ( ) {
// Any slot that wasn't written to should have been filtered out by now.
assert! ( slot_meta_entry . did_insert_occur ) ;
let meta : & SlotMeta = & RefCell ::borrow ( & * slot_meta_entry . new_slot_meta ) ;
let meta_backup = & slot_meta_entry . old_slot_meta ;
2019-07-10 11:08:17 -07:00
if ! completed_slots_senders . is_empty ( ) & & is_newly_completed_slot ( meta , meta_backup ) {
newly_completed_slots . push ( * slot ) ;
}
// Check if the working copy of the metadata has changed
if Some ( meta ) ! = meta_backup . as_ref ( ) {
2021-06-18 06:34:46 -07:00
should_signal = should_signal | | slot_has_updates ( meta , meta_backup ) ;
write_batch . put ::< cf ::SlotMeta > ( * slot , meta ) ? ;
2019-07-10 11:08:17 -07:00
}
}
Ok ( ( should_signal , newly_completed_slots ) )
}
2019-04-26 08:52:10 -07:00
// 1) Find the slot metadata in the cache of dirty slot metadata we've previously touched,
// else:
// 2) Search the database for that slot metadata. If still no luck, then:
// 3) Create a dummy orphan slot in the database
fn find_slot_meta_else_create < ' a > (
db : & Database ,
2019-09-04 17:14:42 -07:00
working_set : & ' a HashMap < u64 , SlotMetaWorkingSetEntry > ,
2019-04-26 08:52:10 -07:00
chained_slots : & ' a mut HashMap < u64 , Rc < RefCell < SlotMeta > > > ,
slot_index : u64 ,
) -> Result < Rc < RefCell < SlotMeta > > > {
2020-12-13 17:26:34 -08:00
let result = find_slot_meta_in_cached_state ( working_set , chained_slots , slot_index ) ;
2019-04-26 08:52:10 -07:00
if let Some ( slot ) = result {
Ok ( slot )
} else {
find_slot_meta_in_db_else_create ( db , slot_index , chained_slots )
2019-02-07 15:10:54 -08:00
}
2019-04-26 08:52:10 -07:00
}
2019-02-07 15:10:54 -08:00
2019-04-26 08:52:10 -07:00
// Search the database for that slot metadata. If still no luck, then
// create a dummy orphan slot in the database
2020-12-13 17:26:34 -08:00
fn find_slot_meta_in_db_else_create (
2019-04-26 08:52:10 -07:00
db : & Database ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
2020-12-13 17:26:34 -08:00
insert_map : & mut HashMap < u64 , Rc < RefCell < SlotMeta > > > ,
2019-04-26 08:52:10 -07:00
) -> Result < Rc < RefCell < SlotMeta > > > {
2019-05-03 14:46:02 -07:00
if let Some ( slot_meta ) = db . column ::< cf ::SlotMeta > ( ) . get ( slot ) ? {
2019-04-26 08:52:10 -07:00
insert_map . insert ( slot , Rc ::new ( RefCell ::new ( slot_meta ) ) ) ;
Ok ( insert_map . get ( & slot ) . unwrap ( ) . clone ( ) )
} else {
// If this slot doesn't exist, make a orphan slot. This way we
2019-09-03 21:32:51 -07:00
// remember which slots chained to this one when we eventually get a real shred
2019-04-26 08:52:10 -07:00
// for this slot
2020-05-05 14:07:21 -07:00
insert_map . insert ( slot , Rc ::new ( RefCell ::new ( SlotMeta ::new_orphan ( slot ) ) ) ) ;
2019-04-26 08:52:10 -07:00
Ok ( insert_map . get ( & slot ) . unwrap ( ) . clone ( ) )
2019-04-25 00:04:49 -07:00
}
2019-04-26 08:52:10 -07:00
}
2019-04-25 00:04:49 -07:00
2019-04-26 08:52:10 -07:00
// Find the slot metadata in the cache of dirty slot metadata we've previously touched
fn find_slot_meta_in_cached_state < ' a > (
2019-09-04 17:14:42 -07:00
working_set : & ' a HashMap < u64 , SlotMetaWorkingSetEntry > ,
2019-04-26 08:52:10 -07:00
chained_slots : & ' a HashMap < u64 , Rc < RefCell < SlotMeta > > > ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
2020-12-13 17:26:34 -08:00
) -> Option < Rc < RefCell < SlotMeta > > > {
2019-10-31 14:03:41 -07:00
if let Some ( entry ) = working_set . get ( & slot ) {
2020-12-13 17:26:34 -08:00
Some ( entry . new_slot_meta . clone ( ) )
2019-04-26 08:52:10 -07:00
} else {
2021-04-08 11:40:37 -07:00
chained_slots . get ( & slot ) . cloned ( )
2019-03-29 16:07:24 -07:00
}
2019-04-26 08:52:10 -07:00
}
2019-03-29 16:07:24 -07:00
2019-04-26 08:52:10 -07:00
// Chaining based on latest discussion here: https://github.com/solana-labs/solana/pull/2253
fn handle_chaining (
db : & Database ,
write_batch : & mut WriteBatch ,
2019-10-31 14:03:41 -07:00
working_set : & mut HashMap < u64 , SlotMetaWorkingSetEntry > ,
2019-04-26 08:52:10 -07:00
) -> Result < ( ) > {
2019-10-31 14:03:41 -07:00
// Handle chaining for all the SlotMetas that were inserted into
working_set . retain ( | _ , entry | entry . did_insert_occur ) ;
2019-04-26 08:52:10 -07:00
let mut new_chained_slots = HashMap ::new ( ) ;
2019-10-31 14:03:41 -07:00
let working_set_slots : Vec < _ > = working_set . keys ( ) . collect ( ) ;
2019-04-26 08:52:10 -07:00
for slot in working_set_slots {
2019-10-31 14:03:41 -07:00
handle_chaining_for_slot ( db , write_batch , working_set , & mut new_chained_slots , * slot ) ? ;
2019-02-07 15:10:54 -08:00
}
2019-04-26 08:52:10 -07:00
// Write all the newly changed slots in new_chained_slots to the write_batch
for ( slot , meta ) in new_chained_slots . iter ( ) {
let meta : & SlotMeta = & RefCell ::borrow ( & * meta ) ;
write_batch . put ::< cf ::SlotMeta > ( * slot , meta ) ? ;
2019-02-07 15:10:54 -08:00
}
2019-04-26 08:52:10 -07:00
Ok ( ( ) )
}
2019-02-07 15:10:54 -08:00
2019-04-26 08:52:10 -07:00
fn handle_chaining_for_slot (
db : & Database ,
write_batch : & mut WriteBatch ,
2019-09-04 17:14:42 -07:00
working_set : & HashMap < u64 , SlotMetaWorkingSetEntry > ,
2019-04-26 08:52:10 -07:00
new_chained_slots : & mut HashMap < u64 , Rc < RefCell < SlotMeta > > > ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
2019-04-26 08:52:10 -07:00
) -> Result < ( ) > {
2019-10-31 14:03:41 -07:00
let slot_meta_entry = working_set
2019-04-26 08:52:10 -07:00
. get ( & slot )
. expect ( " Slot must exist in the working_set hashmap " ) ;
2019-04-24 15:53:01 -07:00
2019-10-31 14:03:41 -07:00
let meta = & slot_meta_entry . new_slot_meta ;
let meta_backup = & slot_meta_entry . old_slot_meta ;
2019-04-26 08:52:10 -07:00
{
let mut meta_mut = meta . borrow_mut ( ) ;
2019-05-09 14:10:04 -07:00
let was_orphan_slot = meta_backup . is_some ( ) & & is_orphan ( meta_backup . as_ref ( ) . unwrap ( ) ) ;
2019-04-24 15:53:01 -07:00
2019-04-26 08:52:10 -07:00
// If:
// 1) This is a new slot
// 2) slot != 0
// then try to chain this slot to a previous slot
if slot ! = 0 {
let prev_slot = meta_mut . parent_slot ;
2019-04-24 15:53:01 -07:00
2019-04-26 08:52:10 -07:00
// Check if the slot represented by meta_mut is either a new slot or a orphan.
// In both cases we need to run the chaining logic b/c the parent on the slot was
// previously unknown.
2019-05-09 14:10:04 -07:00
if meta_backup . is_none ( ) | | was_orphan_slot {
2019-04-26 08:52:10 -07:00
let prev_slot_meta =
find_slot_meta_else_create ( db , working_set , new_chained_slots , prev_slot ) ? ;
2019-02-07 15:10:54 -08:00
2019-05-09 14:10:04 -07:00
// This is a newly inserted slot/orphan so run the chaining logic to link it to a
// newly discovered parent
2019-04-26 08:52:10 -07:00
chain_new_slot_to_prev_slot ( & mut prev_slot_meta . borrow_mut ( ) , slot , & mut meta_mut ) ;
2019-02-07 15:10:54 -08:00
2019-05-09 14:10:04 -07:00
// If the parent of `slot` is a newly inserted orphan, insert it into the orphans
// column family
2019-04-26 08:52:10 -07:00
if is_orphan ( & RefCell ::borrow ( & * prev_slot_meta ) ) {
write_batch . put ::< cf ::Orphans > ( prev_slot , & true ) ? ;
2019-02-12 16:06:23 -08:00
}
}
2019-04-26 08:52:10 -07:00
}
2019-05-09 14:10:04 -07:00
// At this point this slot has received a parent, so it's no longer an orphan
if was_orphan_slot {
2019-04-26 08:52:10 -07:00
write_batch . delete ::< cf ::Orphans > ( slot ) ? ;
}
2019-02-07 15:10:54 -08:00
}
2019-05-09 14:10:04 -07:00
// If this is a newly inserted slot, then we know the children of this slot were not previously
// connected to the trunk of the ledger. Thus if slot.is_connected is now true, we need to
// update all child slots with `is_connected` = true because these children are also now newly
2019-08-21 17:46:59 -07:00
// connected to trunk of the ledger
2019-04-26 08:52:10 -07:00
let should_propagate_is_connected =
is_newly_completed_slot ( & RefCell ::borrow ( & * meta ) , meta_backup )
& & RefCell ::borrow ( & * meta ) . is_connected ;
2019-04-11 14:14:57 -07:00
2019-04-26 08:52:10 -07:00
if should_propagate_is_connected {
// slot_function returns a boolean indicating whether to explore the children
// of the input slot
let slot_function = | slot : & mut SlotMeta | {
slot . is_connected = true ;
2019-04-18 21:56:43 -07:00
2019-04-26 08:52:10 -07:00
// We don't want to set the is_connected flag on the children of non-full
// slots
slot . is_full ( )
} ;
traverse_children_mut (
db ,
slot ,
2021-06-18 06:34:46 -07:00
meta ,
2019-04-26 08:52:10 -07:00
working_set ,
new_chained_slots ,
slot_function ,
) ? ;
}
Ok ( ( ) )
}
fn traverse_children_mut < F > (
db : & Database ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
2019-11-19 20:15:37 -08:00
slot_meta : & Rc < RefCell < SlotMeta > > ,
2019-09-04 17:14:42 -07:00
working_set : & HashMap < u64 , SlotMetaWorkingSetEntry > ,
2019-04-26 08:52:10 -07:00
new_chained_slots : & mut HashMap < u64 , Rc < RefCell < SlotMeta > > > ,
slot_function : F ,
) -> Result < ( ) >
where
F : Fn ( & mut SlotMeta ) -> bool ,
{
2019-11-19 20:15:37 -08:00
let mut next_slots : Vec < ( u64 , Rc < RefCell < SlotMeta > > ) > = vec! [ ( slot , slot_meta . clone ( ) ) ] ;
2019-04-26 08:52:10 -07:00
while ! next_slots . is_empty ( ) {
let ( _ , current_slot ) = next_slots . pop ( ) . unwrap ( ) ;
// Check whether we should explore the children of this slot
if slot_function ( & mut current_slot . borrow_mut ( ) ) {
let current_slot = & RefCell ::borrow ( & * current_slot ) ;
for next_slot_index in current_slot . next_slots . iter ( ) {
let next_slot = find_slot_meta_else_create (
db ,
working_set ,
new_chained_slots ,
* next_slot_index ,
) ? ;
next_slots . push ( ( * next_slot_index , next_slot ) ) ;
2019-04-11 14:14:57 -07:00
}
}
2019-04-26 08:52:10 -07:00
}
2019-04-11 14:14:57 -07:00
2019-04-26 08:52:10 -07:00
Ok ( ( ) )
}
2019-04-11 14:14:57 -07:00
2019-04-26 08:52:10 -07:00
fn is_orphan ( meta : & SlotMeta ) -> bool {
// If we have no parent, then this is the head of a detached chain of
// slots
! meta . is_parent_set ( )
}
2019-04-18 21:56:43 -07:00
2019-04-26 08:52:10 -07:00
// 1) Chain current_slot to the previous slot defined by prev_slot_meta
// 2) Determine whether to set the is_connected flag
fn chain_new_slot_to_prev_slot (
prev_slot_meta : & mut SlotMeta ,
2019-11-02 00:38:30 -07:00
current_slot : Slot ,
2019-04-26 08:52:10 -07:00
current_slot_meta : & mut SlotMeta ,
) {
prev_slot_meta . next_slots . push ( current_slot ) ;
current_slot_meta . is_connected = prev_slot_meta . is_connected & & prev_slot_meta . is_full ( ) ;
}
2019-04-11 14:14:57 -07:00
2019-04-26 08:52:10 -07:00
fn is_newly_completed_slot ( slot_meta : & SlotMeta , backup_slot_meta : & Option < SlotMeta > ) -> bool {
slot_meta . is_full ( )
& & ( backup_slot_meta . is_none ( )
| | slot_meta . consumed ! = backup_slot_meta . as_ref ( ) . unwrap ( ) . consumed )
}
2019-04-11 14:14:57 -07:00
2019-04-26 08:52:10 -07:00
fn slot_has_updates ( slot_meta : & SlotMeta , slot_meta_backup : & Option < SlotMeta > ) -> bool {
// We should signal that there are updates if we extended the chain of consecutive blocks starting
// from block 0, which is true iff:
// 1) The block with index prev_block_index is itself part of the trunk of consecutive blocks
// starting from block 0,
slot_meta . is_connected & &
// AND either:
// 1) The slot didn't exist in the database before, and now we have a consecutive
// block for that slot
( ( slot_meta_backup . is_none ( ) & & slot_meta . consumed ! = 0 ) | |
// OR
// 2) The slot did exist, but now we have a new consecutive block for that slot
( slot_meta_backup . is_some ( ) & & slot_meta_backup . as_ref ( ) . unwrap ( ) . consumed ! = slot_meta . consumed ) )
2018-12-11 09:14:23 -08:00
}
2019-02-26 16:35:00 -08:00
// Creates a new ledger with slot 0 full of ticks (and only ticks).
//
2019-03-02 10:25:16 -08:00
// Returns the blockhash that can be used to append entries with.
2020-04-29 18:53:34 -07:00
pub fn create_new_ledger (
ledger_path : & Path ,
genesis_config : & GenesisConfig ,
max_genesis_archive_unpacked_size : u64 ,
2020-06-02 21:32:44 -07:00
access_type : AccessType ,
2020-04-29 18:53:34 -07:00
) -> Result < Hash > {
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( ledger_path ) ? ;
2021-06-18 06:34:46 -07:00
genesis_config . write ( ledger_path ) ? ;
2019-01-29 15:49:29 -08:00
2019-11-08 20:56:57 -08:00
// Fill slot 0 with ticks that link back to the genesis_config to bootstrap the ledger.
2020-12-17 08:46:13 -08:00
let blockstore = Blockstore ::open_with_access_type ( ledger_path , access_type , None , false ) ? ;
2019-11-08 20:56:57 -08:00
let ticks_per_slot = genesis_config . ticks_per_slot ;
let hashes_per_tick = genesis_config . poh_config . hashes_per_tick . unwrap_or ( 0 ) ;
let entries = create_ticks ( ticks_per_slot , hashes_per_tick , genesis_config . hash ( ) ) ;
2019-10-08 00:42:51 -07:00
let last_hash = entries . last ( ) . unwrap ( ) . hash ;
2020-02-24 09:18:08 -08:00
let version = solana_sdk ::shred_version ::version_from_hash ( & last_hash ) ;
2019-01-29 15:49:29 -08:00
2021-06-21 13:12:38 -07:00
let shredder = Shredder ::new ( 0 , 0 , 0 , version ) . unwrap ( ) ;
let shreds = shredder
. entries_to_shreds ( & Keypair ::new ( ) , & entries , true , 0 )
. 0 ;
2019-10-08 00:42:51 -07:00
assert! ( shreds . last ( ) . unwrap ( ) . last_in_slot ( ) ) ;
2019-08-20 17:16:06 -07:00
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds , None , false ) ? ;
blockstore . set_roots ( & [ 0 ] ) ? ;
// Explicitly close the blockstore before we create the archived genesis file
drop ( blockstore ) ;
2019-11-21 09:57:27 -08:00
2021-05-24 07:45:36 -07:00
let archive_path = ledger_path . join ( DEFAULT_GENESIS_ARCHIVE ) ;
2019-11-21 09:57:27 -08:00
let args = vec! [
" jcfhS " ,
archive_path . to_str ( ) . unwrap ( ) ,
" -C " ,
ledger_path . to_str ( ) . unwrap ( ) ,
2021-05-24 07:45:36 -07:00
DEFAULT_GENESIS_FILE ,
2019-11-21 09:57:27 -08:00
" rocksdb " ,
] ;
let output = std ::process ::Command ::new ( " tar " )
. args ( & args )
. output ( )
. unwrap ( ) ;
if ! output . status . success ( ) {
use std ::str ::from_utf8 ;
2020-01-10 15:37:22 -08:00
error! ( " tar stdout: {} " , from_utf8 ( & output . stdout ) . unwrap_or ( " ? " ) ) ;
error! ( " tar stderr: {} " , from_utf8 ( & output . stderr ) . unwrap_or ( " ? " ) ) ;
2019-11-21 09:57:27 -08:00
2021-02-18 23:42:09 -08:00
return Err ( BlockstoreError ::Io ( IoError ::new (
2019-11-21 09:57:27 -08:00
ErrorKind ::Other ,
format! (
" Error trying to generate snapshot archive: {} " ,
output . status
) ,
) ) ) ;
}
2019-08-20 17:16:06 -07:00
2020-04-29 18:53:34 -07:00
// ensure the genesis archive can be unpacked and it is under
2020-06-17 20:54:52 -07:00
// max_genesis_archive_unpacked_size, immediately after creating it above.
2020-04-29 18:53:34 -07:00
{
2020-06-30 19:38:59 -07:00
let temp_dir = tempfile ::tempdir_in ( ledger_path ) . unwrap ( ) ;
2020-04-29 18:53:34 -07:00
// unpack into a temp dir, while completely discarding the unpacked files
let unpack_check = unpack_genesis_archive (
& archive_path ,
& temp_dir . into_path ( ) ,
max_genesis_archive_unpacked_size ,
) ;
if let Err ( unpack_err ) = unpack_check {
// stash problematic original archived genesis related files to
// examine them later and to prevent validator and ledger-tool from
// naively consuming them
let mut error_messages = String ::new ( ) ;
fs ::rename (
2021-05-24 07:45:36 -07:00
& ledger_path . join ( DEFAULT_GENESIS_ARCHIVE ) ,
ledger_path . join ( format! ( " {} .failed " , DEFAULT_GENESIS_ARCHIVE ) ) ,
2020-04-29 18:53:34 -07:00
)
. unwrap_or_else ( | e | {
2021-05-24 07:45:36 -07:00
error_messages + = & format! (
" /failed to stash problematic {}: {} " ,
DEFAULT_GENESIS_ARCHIVE , e
)
2020-04-29 18:53:34 -07:00
} ) ;
fs ::rename (
2021-05-24 07:45:36 -07:00
& ledger_path . join ( DEFAULT_GENESIS_FILE ) ,
ledger_path . join ( format! ( " {} .failed " , DEFAULT_GENESIS_FILE ) ) ,
2020-04-29 18:53:34 -07:00
)
. unwrap_or_else ( | e | {
2021-05-24 07:45:36 -07:00
error_messages + = & format! (
" /failed to stash problematic {}: {} " ,
DEFAULT_GENESIS_FILE , e
)
2020-04-29 18:53:34 -07:00
} ) ;
fs ::rename (
& ledger_path . join ( " rocksdb " ) ,
ledger_path . join ( " rocksdb.failed " ) ,
)
. unwrap_or_else ( | e | {
error_messages + = & format! ( " /failed to stash problematic rocksdb: {} " , e )
} ) ;
2021-02-18 23:42:09 -08:00
return Err ( BlockstoreError ::Io ( IoError ::new (
2020-04-29 18:53:34 -07:00
ErrorKind ::Other ,
format! (
" Error checking to unpack genesis archive: {}{} " ,
unpack_err , error_messages
) ,
) ) ) ;
}
}
2019-08-20 17:16:06 -07:00
Ok ( last_hash )
2019-01-24 12:04:04 -08:00
}
2019-02-26 17:11:26 -08:00
#[ macro_export ]
macro_rules ! tmp_ledger_name {
( ) = > {
& format! ( " {} - {} " , file! ( ) , line! ( ) )
} ;
}
#[ macro_export ]
macro_rules ! get_tmp_ledger_path {
( ) = > {
2020-01-13 13:13:52 -08:00
$crate ::blockstore ::get_ledger_path_from_name ( $crate ::tmp_ledger_name! ( ) )
2019-02-26 17:11:26 -08:00
} ;
}
2019-11-13 07:14:09 -08:00
pub fn get_ledger_path_from_name ( name : & str ) -> PathBuf {
2019-01-09 14:33:44 -08:00
use std ::env ;
2019-07-17 14:27:58 -07:00
let out_dir = env ::var ( " FARF_DIR " ) . unwrap_or_else ( | _ | " farf " . to_string ( ) ) ;
2019-01-09 14:33:44 -08:00
let keypair = Keypair ::new ( ) ;
2019-07-30 15:53:41 -07:00
let path = [
out_dir ,
" ledger " . to_string ( ) ,
format! ( " {} - {} " , name , keypair . pubkey ( ) ) ,
]
. iter ( )
. collect ( ) ;
2019-01-09 14:33:44 -08:00
// whack any possible collision
2019-01-24 12:04:04 -08:00
let _ignored = fs ::remove_dir_all ( & path ) ;
2019-01-09 14:33:44 -08:00
path
}
2019-02-26 19:19:34 -08:00
#[ macro_export ]
macro_rules ! create_new_tmp_ledger {
2019-11-08 20:56:57 -08:00
( $genesis_config :expr ) = > {
2020-06-02 21:32:44 -07:00
$crate ::blockstore ::create_new_ledger_from_name (
$crate ::tmp_ledger_name! ( ) ,
$genesis_config ,
$crate ::blockstore_db ::AccessType ::PrimaryOnly ,
)
2019-02-26 19:19:34 -08:00
} ;
}
2019-12-05 11:25:13 -08:00
pub fn verify_shred_slots ( slot : Slot , parent_slot : Slot , last_root : Slot ) -> bool {
2019-09-16 13:13:53 -07:00
if ! is_valid_write_to_slot_0 ( slot , parent_slot , last_root ) {
// Check that the parent_slot < slot
if parent_slot > = slot {
return false ;
}
2019-11-14 11:49:31 -08:00
// Ignore shreds that chain to slots before the last root
2019-09-16 13:13:53 -07:00
if parent_slot < last_root {
return false ;
}
// Above two checks guarantee that by this point, slot > last_root
}
true
}
2019-02-26 16:35:00 -08:00
// Same as `create_new_ledger()` but use a temporary ledger name based on the provided `name`
//
// Note: like `create_new_ledger` the returned ledger will have slot 0 full of ticks (and only
// ticks)
2020-06-02 21:32:44 -07:00
pub fn create_new_ledger_from_name (
name : & str ,
genesis_config : & GenesisConfig ,
access_type : AccessType ,
) -> ( PathBuf , Hash ) {
2019-11-13 07:14:09 -08:00
let ledger_path = get_ledger_path_from_name ( name ) ;
2020-04-29 18:53:34 -07:00
let blockhash = create_new_ledger (
& ledger_path ,
genesis_config ,
MAX_GENESIS_ARCHIVE_UNPACKED_SIZE ,
2020-06-02 21:32:44 -07:00
access_type ,
2020-04-29 18:53:34 -07:00
)
. unwrap ( ) ;
2019-03-02 10:25:16 -08:00
( ledger_path , blockhash )
2019-02-21 22:36:01 -08:00
}
2019-09-03 21:32:51 -07:00
pub fn entries_to_test_shreds (
entries : Vec < Entry > ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
parent_slot : Slot ,
2019-09-03 21:32:51 -07:00
is_full_slot : bool ,
2019-11-18 18:05:02 -08:00
version : u16 ,
2019-09-18 16:24:30 -07:00
) -> Vec < Shred > {
2021-06-21 13:12:38 -07:00
Shredder ::new ( slot , parent_slot , 0 , version )
2021-04-21 05:47:50 -07:00
. unwrap ( )
2021-06-21 13:12:38 -07:00
. entries_to_shreds ( & Keypair ::new ( ) , & entries , is_full_slot , 0 )
2021-04-21 05:47:50 -07:00
. 0
2019-09-03 21:32:51 -07:00
}
2019-10-31 13:38:50 -07:00
// used for tests only
2019-10-18 09:28:51 -07:00
pub fn make_slot_entries (
2019-11-02 00:38:30 -07:00
slot : Slot ,
parent_slot : Slot ,
2019-10-18 09:28:51 -07:00
num_entries : u64 ,
) -> ( Vec < Shred > , Vec < Entry > ) {
2019-10-31 13:38:50 -07:00
let entries = create_ticks ( num_entries , 0 , Hash ::default ( ) ) ;
2019-11-18 18:05:02 -08:00
let shreds = entries_to_test_shreds ( entries . clone ( ) , slot , parent_slot , true , 0 ) ;
2019-10-18 09:28:51 -07:00
( shreds , entries )
}
2019-10-31 13:38:50 -07:00
// used for tests only
2019-10-18 09:28:51 -07:00
pub fn make_many_slot_entries (
2019-11-02 00:38:30 -07:00
start_slot : Slot ,
2019-10-18 09:28:51 -07:00
num_slots : u64 ,
entries_per_slot : u64 ,
) -> ( Vec < Shred > , Vec < Entry > ) {
let mut shreds = vec! [ ] ;
let mut entries = vec! [ ] ;
for slot in start_slot .. start_slot + num_slots {
let parent_slot = if slot = = 0 { 0 } else { slot - 1 } ;
let ( slot_shreds , slot_entries ) = make_slot_entries ( slot , parent_slot , entries_per_slot ) ;
shreds . extend ( slot_shreds ) ;
entries . extend ( slot_entries ) ;
}
( shreds , entries )
}
// Create shreds for slots that have a parent-child relationship defined by the input `chain`
2019-10-31 13:38:50 -07:00
// used for tests only
2019-10-18 09:28:51 -07:00
pub fn make_chaining_slot_entries (
chain : & [ u64 ] ,
entries_per_slot : u64 ,
) -> Vec < ( Vec < Shred > , Vec < Entry > ) > {
let mut slots_shreds_and_entries = vec! [ ] ;
for ( i , slot ) in chain . iter ( ) . enumerate ( ) {
let parent_slot = {
if * slot = = 0 | | i = = 0 {
0
} else {
chain [ i - 1 ]
}
} ;
let result = make_slot_entries ( * slot , parent_slot , entries_per_slot ) ;
slots_shreds_and_entries . push ( result ) ;
}
slots_shreds_and_entries
}
2019-11-05 11:18:49 -08:00
#[ cfg(not(unix)) ]
2020-12-16 17:56:38 -08:00
fn adjust_ulimit_nofile ( _enforce_ulimit_nofile : bool ) -> Result < ( ) > {
2020-05-15 12:14:21 -07:00
Ok ( ( ) )
}
2019-11-05 11:18:49 -08:00
#[ cfg(unix) ]
2020-12-16 17:56:38 -08:00
fn adjust_ulimit_nofile ( enforce_ulimit_nofile : bool ) -> Result < ( ) > {
2019-11-05 11:18:49 -08:00
// Rocks DB likes to have many open files. The default open file descriptor limit is
// usually not enough
2021-02-03 10:26:23 -08:00
let desired_nofile = 500000 ;
2019-11-05 11:18:49 -08:00
fn get_nofile ( ) -> libc ::rlimit {
let mut nofile = libc ::rlimit {
rlim_cur : 0 ,
rlim_max : 0 ,
} ;
if unsafe { libc ::getrlimit ( libc ::RLIMIT_NOFILE , & mut nofile ) } ! = 0 {
warn! ( " getrlimit(RLIMIT_NOFILE) failed " ) ;
}
nofile
}
let mut nofile = get_nofile ( ) ;
if nofile . rlim_cur < desired_nofile {
nofile . rlim_cur = desired_nofile ;
if unsafe { libc ::setrlimit ( libc ::RLIMIT_NOFILE , & nofile ) } ! = 0 {
error! (
" Unable to increase the maximum open file descriptor limit to {} " ,
desired_nofile
) ;
if cfg! ( target_os = " macos " ) {
2020-08-20 15:15:05 -07:00
error! (
" On mac OS you may need to run |sudo launchctl limit maxfiles {} {}| first " ,
desired_nofile , desired_nofile ,
) ;
2019-11-05 11:18:49 -08:00
}
2020-12-16 17:56:38 -08:00
if enforce_ulimit_nofile {
return Err ( BlockstoreError ::UnableToSetOpenFileDescriptorLimit ) ;
}
2019-11-05 11:18:49 -08:00
}
nofile = get_nofile ( ) ;
}
info! ( " Maximum open file descriptors: {} " , nofile . rlim_cur ) ;
2020-05-15 12:14:21 -07:00
Ok ( ( ) )
2019-11-05 11:18:49 -08:00
}
2018-11-15 15:53:31 -08:00
#[ cfg(test) ]
2019-02-18 18:41:31 -08:00
pub mod tests {
2018-11-15 15:53:31 -08:00
use super ::* ;
2019-11-14 15:34:39 -08:00
use crate ::{
2019-11-17 19:17:15 -08:00
entry ::{ next_entry , next_entry_mut } ,
2019-11-14 15:34:39 -08:00
genesis_utils ::{ create_genesis_config , GenesisConfigInfo } ,
2019-12-09 00:13:36 -08:00
leader_schedule ::{ FixedSchedule , LeaderSchedule } ,
2019-11-14 15:34:39 -08:00
shred ::{ max_ticks_per_n_shreds , DataShredHeader } ,
} ;
2019-12-11 14:06:54 -08:00
use assert_matches ::assert_matches ;
use bincode ::serialize ;
2019-09-03 21:32:51 -07:00
use itertools ::Itertools ;
2019-11-14 15:34:39 -08:00
use rand ::{ seq ::SliceRandom , thread_rng } ;
2021-03-05 08:05:35 -08:00
use solana_account_decoder ::parse_token ::UiTokenAmount ;
2020-10-15 17:04:10 -07:00
use solana_runtime ::bank ::{ Bank , RewardType } ;
2019-11-14 15:34:39 -08:00
use solana_sdk ::{
2019-12-11 14:06:54 -08:00
hash ::{ self , hash , Hash } ,
2019-11-17 19:17:15 -08:00
instruction ::CompiledInstruction ,
packet ::PACKET_DATA_SIZE ,
pubkey ::Pubkey ,
signature ::Signature ,
transaction ::TransactionError ,
2019-11-14 15:34:39 -08:00
} ;
2020-10-15 17:04:10 -07:00
use solana_storage_proto ::convert ::generated ;
2021-03-05 08:05:35 -08:00
use solana_transaction_status ::{ InnerInstructions , Reward , Rewards , TransactionTokenBalance } ;
2021-05-26 16:12:57 -07:00
use std ::{ sync ::mpsc ::channel , thread ::Builder , time ::Duration } ;
2019-11-14 15:34:39 -08:00
// used for tests only
2020-06-02 18:49:31 -07:00
pub ( crate ) fn make_slot_entries_with_transactions ( num_entries : u64 ) -> Vec < Entry > {
2019-11-14 15:34:39 -08:00
let mut entries : Vec < Entry > = Vec ::new ( ) ;
2019-12-11 14:06:54 -08:00
for x in 0 .. num_entries {
2019-11-14 15:34:39 -08:00
let transaction = Transaction ::new_with_compiled_instructions (
& [ & Keypair ::new ( ) ] ,
2020-10-19 12:12:08 -07:00
& [ solana_sdk ::pubkey ::new_rand ( ) ] ,
2019-11-14 15:34:39 -08:00
Hash ::default ( ) ,
2020-10-19 12:12:08 -07:00
vec! [ solana_sdk ::pubkey ::new_rand ( ) ] ,
2019-11-14 15:34:39 -08:00
vec! [ CompiledInstruction ::new ( 1 , & ( ) , vec! [ 0 ] ) ] ,
) ;
entries . push ( next_entry_mut ( & mut Hash ::default ( ) , 0 , vec! [ transaction ] ) ) ;
2019-12-11 14:06:54 -08:00
let mut tick = create_ticks ( 1 , 0 , hash ( & serialize ( & x ) . unwrap ( ) ) ) ;
2019-11-14 15:34:39 -08:00
entries . append ( & mut tick ) ;
}
2019-12-09 00:13:36 -08:00
entries
2019-11-14 15:34:39 -08:00
}
2018-11-15 15:53:31 -08:00
2019-10-08 00:42:51 -07:00
#[ test ]
fn test_create_new_ledger ( ) {
let mint_total = 1_000_000_000_000 ;
2019-11-08 20:56:57 -08:00
let GenesisConfigInfo { genesis_config , .. } = create_genesis_config ( mint_total ) ;
let ( ledger_path , _blockhash ) = create_new_tmp_ledger! ( & genesis_config ) ;
2020-01-13 13:13:52 -08:00
let ledger = Blockstore ::open ( & ledger_path ) . unwrap ( ) ;
2019-10-08 00:42:51 -07:00
2019-11-08 20:56:57 -08:00
let ticks = create_ticks ( genesis_config . ticks_per_slot , 0 , genesis_config . hash ( ) ) ;
2020-04-09 13:09:59 -07:00
let entries = ledger . get_slot_entries ( 0 , 0 ) . unwrap ( ) ;
2019-10-08 00:42:51 -07:00
assert_eq! ( ticks , entries ) ;
// Destroying database without closing it first is undefined behavior
drop ( ledger ) ;
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & ledger_path ) . expect ( " Expected successful database destruction " ) ;
2019-10-08 00:42:51 -07:00
}
#[ test ]
fn test_insert_get_bytes ( ) {
// Create enough entries to ensure there are at least two shreds created
2020-05-19 12:38:18 -07:00
let num_entries = max_ticks_per_n_shreds ( 1 , None ) + 1 ;
2019-10-08 00:42:51 -07:00
assert! ( num_entries > 1 ) ;
let ( mut shreds , _ ) = make_slot_entries ( 0 , 0 , num_entries ) ;
2019-11-13 07:14:09 -08:00
let ledger_path = get_tmp_ledger_path! ( ) ;
2020-01-13 13:13:52 -08:00
let ledger = Blockstore ::open ( & ledger_path ) . unwrap ( ) ;
2019-10-08 00:42:51 -07:00
// Insert last shred, test we can retrieve it
let last_shred = shreds . pop ( ) . unwrap ( ) ;
assert! ( last_shred . index ( ) > 0 ) ;
ledger
2019-11-14 00:32:07 -08:00
. insert_shreds ( vec! [ last_shred . clone ( ) ] , None , false )
2019-10-08 00:42:51 -07:00
. unwrap ( ) ;
let serialized_shred = ledger
2020-04-24 15:04:23 -07:00
. data_shred_cf
. get_bytes ( ( 0 , last_shred . index ( ) as u64 ) )
2019-10-08 00:42:51 -07:00
. unwrap ( )
. unwrap ( ) ;
let deserialized_shred = Shred ::new_from_serialized_shred ( serialized_shred ) . unwrap ( ) ;
assert_eq! ( last_shred , deserialized_shred ) ;
// Destroying database without closing it first is undefined behavior
drop ( ledger ) ;
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & ledger_path ) . expect ( " Expected successful database destruction " ) ;
2019-10-08 00:42:51 -07:00
}
2019-02-18 19:49:43 -08:00
#[ test ]
fn test_write_entries ( ) {
2019-02-26 17:11:26 -08:00
solana_logger ::setup ( ) ;
let ledger_path = get_tmp_ledger_path! ( ) ;
2019-02-18 19:49:43 -08:00
{
let ticks_per_slot = 10 ;
let num_slots = 10 ;
2020-01-13 13:13:52 -08:00
let ledger = Blockstore ::open ( & ledger_path ) . unwrap ( ) ;
2019-08-20 17:16:06 -07:00
let mut ticks = vec! [ ] ;
//let mut shreds_per_slot = 0 as u64;
let mut shreds_per_slot = vec! [ ] ;
2019-02-18 19:59:09 -08:00
2019-08-20 17:16:06 -07:00
for i in 0 .. num_slots {
2019-10-31 13:38:50 -07:00
let mut new_ticks = create_ticks ( ticks_per_slot , 0 , Hash ::default ( ) ) ;
2019-08-20 17:16:06 -07:00
let num_shreds = ledger
2019-09-03 21:32:51 -07:00
. write_entries (
2019-08-20 17:16:06 -07:00
i ,
0 ,
0 ,
ticks_per_slot ,
Some ( i . saturating_sub ( 1 ) ) ,
true ,
2019-08-28 22:34:47 -07:00
& Arc ::new ( Keypair ::new ( ) ) ,
2019-08-20 17:16:06 -07:00
new_ticks . clone ( ) ,
2019-11-18 18:05:02 -08:00
0 ,
2019-08-20 17:16:06 -07:00
)
. unwrap ( ) as u64 ;
shreds_per_slot . push ( num_shreds ) ;
ticks . append ( & mut new_ticks ) ;
}
2019-02-18 19:49:43 -08:00
for i in 0 .. num_slots {
let meta = ledger . meta ( i ) . unwrap ( ) . unwrap ( ) ;
2019-08-20 17:16:06 -07:00
let num_shreds = shreds_per_slot [ i as usize ] ;
assert_eq! ( meta . consumed , num_shreds ) ;
assert_eq! ( meta . received , num_shreds ) ;
assert_eq! ( meta . last_index , num_shreds - 1 ) ;
2019-02-18 19:59:09 -08:00
if i = = num_slots - 1 {
assert! ( meta . next_slots . is_empty ( ) ) ;
} else {
assert_eq! ( meta . next_slots , vec! [ i + 1 ] ) ;
}
2019-02-18 19:49:43 -08:00
if i = = 0 {
assert_eq! ( meta . parent_slot , 0 ) ;
} else {
assert_eq! ( meta . parent_slot , i - 1 ) ;
}
assert_eq! (
& ticks [ ( i * ticks_per_slot ) as usize .. ( ( i + 1 ) * ticks_per_slot ) as usize ] ,
2020-04-09 13:09:59 -07:00
& ledger . get_slot_entries ( i , 0 ) . unwrap ( ) [ .. ]
2019-02-18 19:49:43 -08:00
) ;
}
2019-02-18 19:59:09 -08:00
2019-08-20 17:16:06 -07:00
/*
// Simulate writing to the end of a slot with existing ticks
ledger
. write_entries (
num_slots ,
ticks_per_slot - 1 ,
ticks_per_slot - 2 ,
ticks_per_slot ,
& ticks [ 0 .. 2 ] ,
)
. unwrap ( ) ;
let meta = ledger . meta ( num_slots ) . unwrap ( ) . unwrap ( ) ;
assert_eq! ( meta . consumed , 0 ) ;
2019-09-03 21:32:51 -07:00
// received shred was ticks_per_slot - 2, so received should be ticks_per_slot - 2 + 1
2019-08-20 17:16:06 -07:00
assert_eq! ( meta . received , ticks_per_slot - 1 ) ;
2019-09-03 21:32:51 -07:00
// last shred index ticks_per_slot - 2 because that's the shred that made tick_height == ticks_per_slot
2019-08-20 17:16:06 -07:00
// for the slot
assert_eq! ( meta . last_index , ticks_per_slot - 2 ) ;
assert_eq! ( meta . parent_slot , num_slots - 1 ) ;
assert_eq! ( meta . next_slots , vec! [ num_slots + 1 ] ) ;
assert_eq! (
& ticks [ 0 .. 1 ] ,
& ledger
2020-04-09 13:09:59 -07:00
. get_slot_entries ( num_slots , ticks_per_slot - 2 )
2019-08-20 17:16:06 -07:00
. unwrap ( ) [ .. ]
) ;
// We wrote two entries, the second should spill into slot num_slots + 1
let meta = ledger . meta ( num_slots + 1 ) . unwrap ( ) . unwrap ( ) ;
assert_eq! ( meta . consumed , 1 ) ;
assert_eq! ( meta . received , 1 ) ;
assert_eq! ( meta . last_index , std ::u64 ::MAX ) ;
assert_eq! ( meta . parent_slot , num_slots ) ;
assert! ( meta . next_slots . is_empty ( ) ) ;
assert_eq! (
& ticks [ 1 .. 2 ] ,
2020-04-09 13:09:59 -07:00
& ledger . get_slot_entries ( num_slots + 1 , 0 ) . unwrap ( ) [ .. ]
2019-08-20 17:16:06 -07:00
) ;
* /
2019-02-18 19:49:43 -08:00
}
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & ledger_path ) . expect ( " Expected successful database destruction " ) ;
2019-02-18 19:49:43 -08:00
}
2020-04-24 15:04:23 -07:00
#[ test ]
fn test_put_get_simple ( ) {
let ledger_path = get_tmp_ledger_path! ( ) ;
let ledger = Blockstore ::open ( & ledger_path ) . unwrap ( ) ;
// Test meta column family
let meta = SlotMeta ::new ( 0 , 1 ) ;
ledger . meta_cf . put ( 0 , & meta ) . unwrap ( ) ;
let result = ledger
. meta_cf
. get ( 0 )
. unwrap ( )
. expect ( " Expected meta object to exist " ) ;
assert_eq! ( result , meta ) ;
// Test erasure column family
let erasure = vec! [ 1 u8 ; 16 ] ;
let erasure_key = ( 0 , 0 ) ;
ledger
. code_shred_cf
. put_bytes ( erasure_key , & erasure )
. unwrap ( ) ;
let result = ledger
. code_shred_cf
. get_bytes ( erasure_key )
. unwrap ( )
. expect ( " Expected erasure object to exist " ) ;
assert_eq! ( result , erasure ) ;
// Test data column family
let data = vec! [ 2 u8 ; 16 ] ;
let data_key = ( 0 , 0 ) ;
ledger . data_shred_cf . put_bytes ( data_key , & data ) . unwrap ( ) ;
let result = ledger
. data_shred_cf
. get_bytes ( data_key )
. unwrap ( )
. expect ( " Expected data object to exist " ) ;
assert_eq! ( result , data ) ;
// Destroying database without closing it first is undefined behavior
drop ( ledger ) ;
Blockstore ::destroy ( & ledger_path ) . expect ( " Expected successful database destruction " ) ;
}
2018-11-15 15:53:31 -08:00
#[ test ]
2019-09-03 21:32:51 -07:00
fn test_read_shred_bytes ( ) {
2019-02-25 12:48:48 -08:00
let slot = 0 ;
2019-09-03 21:32:51 -07:00
let ( shreds , _ ) = make_slot_entries ( slot , 0 , 100 ) ;
let num_shreds = shreds . len ( ) as u64 ;
2019-09-18 16:24:30 -07:00
let shred_bufs : Vec < _ > = shreds . iter ( ) . map ( | shred | shred . payload . clone ( ) ) . collect ( ) ;
2018-11-15 15:53:31 -08:00
2019-11-13 07:14:09 -08:00
let ledger_path = get_tmp_ledger_path! ( ) ;
2020-01-13 13:13:52 -08:00
let ledger = Blockstore ::open ( & ledger_path ) . unwrap ( ) ;
2019-11-14 00:32:07 -08:00
ledger . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2018-11-15 15:53:31 -08:00
2019-09-03 21:32:51 -07:00
let mut buf = [ 0 ; 4096 ] ;
let ( _ , bytes ) = ledger . get_data_shreds ( slot , 0 , 1 , & mut buf ) . unwrap ( ) ;
assert_eq! ( buf [ .. bytes ] , shred_bufs [ 0 ] [ .. bytes ] ) ;
2018-11-15 15:53:31 -08:00
2019-09-03 21:32:51 -07:00
let ( last_index , bytes2 ) = ledger . get_data_shreds ( slot , 0 , 2 , & mut buf ) . unwrap ( ) ;
assert_eq! ( last_index , 1 ) ;
2018-11-15 15:53:31 -08:00
assert! ( bytes2 > bytes ) ;
{
2019-09-03 21:32:51 -07:00
let shred_data_1 = & buf [ .. bytes ] ;
assert_eq! ( shred_data_1 , & shred_bufs [ 0 ] [ .. bytes ] ) ;
2018-11-15 15:53:31 -08:00
2019-09-03 21:32:51 -07:00
let shred_data_2 = & buf [ bytes .. bytes2 ] ;
assert_eq! ( shred_data_2 , & shred_bufs [ 1 ] [ .. bytes2 - bytes ] ) ;
2018-11-15 15:53:31 -08:00
}
2019-09-03 21:32:51 -07:00
// buf size part-way into shred[1], should just return shred[0]
2018-11-15 15:53:31 -08:00
let mut buf = vec! [ 0 ; bytes + 1 ] ;
2019-09-03 21:32:51 -07:00
let ( last_index , bytes3 ) = ledger . get_data_shreds ( slot , 0 , 2 , & mut buf ) . unwrap ( ) ;
assert_eq! ( last_index , 0 ) ;
2018-11-15 15:53:31 -08:00
assert_eq! ( bytes3 , bytes ) ;
let mut buf = vec! [ 0 ; bytes2 - 1 ] ;
2019-09-03 21:32:51 -07:00
let ( last_index , bytes4 ) = ledger . get_data_shreds ( slot , 0 , 2 , & mut buf ) . unwrap ( ) ;
assert_eq! ( last_index , 0 ) ;
2018-11-15 15:53:31 -08:00
assert_eq! ( bytes4 , bytes ) ;
let mut buf = vec! [ 0 ; bytes * 2 ] ;
2019-09-17 15:11:29 -07:00
let ( last_index , bytes6 ) = ledger
. get_data_shreds ( slot , num_shreds - 1 , num_shreds , & mut buf )
. unwrap ( ) ;
assert_eq! ( last_index , num_shreds - 1 ) ;
2018-11-15 15:53:31 -08:00
{
2019-09-03 21:32:51 -07:00
let shred_data = & buf [ .. bytes6 ] ;
2019-09-17 15:11:29 -07:00
assert_eq! ( shred_data , & shred_bufs [ ( num_shreds - 1 ) as usize ] [ .. bytes6 ] ) ;
2018-11-15 15:53:31 -08:00
}
// Read out of range
2019-09-03 21:32:51 -07:00
let ( last_index , bytes6 ) = ledger
. get_data_shreds ( slot , num_shreds , num_shreds + 2 , & mut buf )
. unwrap ( ) ;
assert_eq! ( last_index , 0 ) ;
assert_eq! ( bytes6 , 0 ) ;
2018-11-15 15:53:31 -08:00
// Destroying database without closing it first is undefined behavior
drop ( ledger ) ;
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & ledger_path ) . expect ( " Expected successful database destruction " ) ;
2018-11-15 15:53:31 -08:00
}
2020-04-08 18:47:16 -07:00
#[ test ]
fn test_shred_cleanup_check ( ) {
let slot = 1 ;
let ( shreds , _ ) = make_slot_entries ( slot , 0 , 100 ) ;
let ledger_path = get_tmp_ledger_path! ( ) ;
let ledger = Blockstore ::open ( & ledger_path ) . unwrap ( ) ;
ledger . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
let mut buf = [ 0 ; 4096 ] ;
assert! ( ledger . get_data_shreds ( slot , 0 , 1 , & mut buf ) . is_ok ( ) ) ;
let max_purge_slot = 1 ;
2020-06-02 18:49:31 -07:00
ledger
. run_purge ( 0 , max_purge_slot , PurgeType ::PrimaryIndex )
. unwrap ( ) ;
2020-04-08 18:47:16 -07:00
* ledger . lowest_cleanup_slot . write ( ) . unwrap ( ) = max_purge_slot ;
let mut buf = [ 0 ; 4096 ] ;
assert! ( ledger . get_data_shreds ( slot , 0 , 1 , & mut buf ) . is_err ( ) ) ;
}
2018-11-15 15:53:31 -08:00
#[ test ]
2019-08-20 17:16:06 -07:00
fn test_insert_data_shreds_basic ( ) {
2019-10-08 00:42:51 -07:00
// Create enough entries to ensure there are at least two shreds created
2020-05-19 12:38:18 -07:00
let num_entries = max_ticks_per_n_shreds ( 1 , None ) + 1 ;
2019-02-12 19:54:18 -08:00
assert! ( num_entries > 1 ) ;
2018-12-19 16:11:47 -08:00
2019-09-03 21:32:51 -07:00
let ( mut shreds , entries ) = make_slot_entries ( 0 , 0 , num_entries ) ;
2019-08-20 17:16:06 -07:00
let num_shreds = shreds . len ( ) as u64 ;
2018-11-15 15:53:31 -08:00
2019-11-13 07:14:09 -08:00
let ledger_path = get_tmp_ledger_path! ( ) ;
2020-01-13 13:13:52 -08:00
let ledger = Blockstore ::open ( & ledger_path ) . unwrap ( ) ;
2018-11-15 15:53:31 -08:00
2019-09-03 21:32:51 -07:00
// Insert last shred, we're missing the other shreds, so no consecutive
// shreds starting from slot 0, index 0 should exist.
2019-10-08 00:42:51 -07:00
assert! ( shreds . len ( ) > 1 ) ;
2019-08-20 17:16:06 -07:00
let last_shred = shreds . pop ( ) . unwrap ( ) ;
2019-11-14 00:32:07 -08:00
ledger . insert_shreds ( vec! [ last_shred ] , None , false ) . unwrap ( ) ;
2020-04-09 13:09:59 -07:00
assert! ( ledger . get_slot_entries ( 0 , 0 ) . unwrap ( ) . is_empty ( ) ) ;
2019-02-04 15:33:43 -08:00
2018-11-15 15:53:31 -08:00
let meta = ledger
2019-04-26 08:52:10 -07:00
. meta ( 0 )
2018-11-15 15:53:31 -08:00
. unwrap ( )
. expect ( " Expected new metadata object to be created " ) ;
2019-08-20 17:16:06 -07:00
assert! ( meta . consumed = = 0 & & meta . received = = num_shreds ) ;
2018-11-15 15:53:31 -08:00
2019-09-03 21:32:51 -07:00
// Insert the other shreds, check for consecutive returned entries
2019-11-14 00:32:07 -08:00
ledger . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2020-04-09 13:09:59 -07:00
let result = ledger . get_slot_entries ( 0 , 0 ) . unwrap ( ) ;
2018-11-15 15:53:31 -08:00
assert_eq! ( result , entries ) ;
let meta = ledger
2019-04-26 08:52:10 -07:00
. meta ( 0 )
2018-11-15 15:53:31 -08:00
. unwrap ( )
. expect ( " Expected new metadata object to exist " ) ;
2019-08-20 17:16:06 -07:00
assert_eq! ( meta . consumed , num_shreds ) ;
assert_eq! ( meta . received , num_shreds ) ;
2019-02-12 19:54:18 -08:00
assert_eq! ( meta . parent_slot , 0 ) ;
2019-08-20 17:16:06 -07:00
assert_eq! ( meta . last_index , num_shreds - 1 ) ;
2019-02-07 15:10:54 -08:00
assert! ( meta . next_slots . is_empty ( ) ) ;
2019-03-20 11:19:37 -07:00
assert! ( meta . is_connected ) ;
2018-11-15 15:53:31 -08:00
// Destroying database without closing it first is undefined behavior
drop ( ledger ) ;
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & ledger_path ) . expect ( " Expected successful database destruction " ) ;
2018-11-15 15:53:31 -08:00
}
#[ test ]
2019-08-20 17:16:06 -07:00
fn test_insert_data_shreds_reverse ( ) {
2019-10-21 16:15:10 -07:00
let num_shreds = 10 ;
2020-05-19 12:38:18 -07:00
let num_entries = max_ticks_per_n_shreds ( num_shreds , None ) ;
2019-09-03 21:32:51 -07:00
let ( mut shreds , entries ) = make_slot_entries ( 0 , 0 , num_entries ) ;
2019-08-20 17:16:06 -07:00
let num_shreds = shreds . len ( ) as u64 ;
2018-11-15 15:53:31 -08:00
2019-11-13 07:14:09 -08:00
let ledger_path = get_tmp_ledger_path! ( ) ;
2020-01-13 13:13:52 -08:00
let ledger = Blockstore ::open ( & ledger_path ) . unwrap ( ) ;
2018-11-15 15:53:31 -08:00
2019-09-03 21:32:51 -07:00
// Insert shreds in reverse, check for consecutive returned shreds
2019-08-20 17:16:06 -07:00
for i in ( 0 .. num_shreds ) . rev ( ) {
let shred = shreds . pop ( ) . unwrap ( ) ;
2019-11-14 00:32:07 -08:00
ledger . insert_shreds ( vec! [ shred ] , None , false ) . unwrap ( ) ;
2020-04-09 13:09:59 -07:00
let result = ledger . get_slot_entries ( 0 , 0 ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
2018-11-15 15:53:31 -08:00
let meta = ledger
2019-04-26 08:52:10 -07:00
. meta ( 0 )
2018-11-15 15:53:31 -08:00
. unwrap ( )
. expect ( " Expected metadata object to exist " ) ;
2019-08-20 17:16:06 -07:00
assert_eq! ( meta . last_index , num_shreds - 1 ) ;
2018-11-15 15:53:31 -08:00
if i ! = 0 {
assert_eq! ( result . len ( ) , 0 ) ;
2019-08-20 17:16:06 -07:00
assert! ( meta . consumed = = 0 & & meta . received = = num_shreds as u64 ) ;
2018-11-15 15:53:31 -08:00
} else {
2019-08-20 17:16:06 -07:00
assert_eq! ( meta . parent_slot , 0 ) ;
2018-11-15 15:53:31 -08:00
assert_eq! ( result , entries ) ;
2019-08-20 17:16:06 -07:00
assert! ( meta . consumed = = num_shreds as u64 & & meta . received = = num_shreds as u64 ) ;
2018-11-15 15:53:31 -08:00
}
}
// Destroying database without closing it first is undefined behavior
drop ( ledger ) ;
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & ledger_path ) . expect ( " Expected successful database destruction " ) ;
2018-11-15 15:53:31 -08:00
}
2018-11-22 01:35:19 -08:00
2018-12-12 15:58:29 -08:00
#[ test ]
2019-02-07 15:10:54 -08:00
fn test_insert_slots ( ) {
2019-09-03 21:32:51 -07:00
test_insert_data_shreds_slots ( " test_insert_data_shreds_slots_single " , false ) ;
test_insert_data_shreds_slots ( " test_insert_data_shreds_slots_bulk " , true ) ;
2018-12-12 15:58:29 -08:00
}
2019-09-03 21:32:51 -07:00
/*
#[ test ]
pub fn test_iteration_order ( ) {
let slot = 0 ;
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
2019-09-03 21:32:51 -07:00
{
2020-01-13 13:13:52 -08:00
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2018-11-22 01:35:19 -08:00
2019-09-03 21:32:51 -07:00
// Write entries
let num_entries = 8 ;
let entries = make_tiny_test_entries ( num_entries ) ;
let mut shreds = entries . to_single_entry_shreds ( ) ;
2018-11-22 01:35:19 -08:00
2019-09-03 21:32:51 -07:00
for ( i , b ) in shreds . iter_mut ( ) . enumerate ( ) {
b . set_index ( 1 < < ( i * 8 ) ) ;
b . set_slot ( 0 ) ;
}
2018-11-22 01:35:19 -08:00
2020-01-13 13:13:52 -08:00
blockstore
2019-09-03 21:32:51 -07:00
. write_shreds ( & shreds )
. expect ( " Expected successful write of shreds " ) ;
2020-01-13 13:13:52 -08:00
let mut db_iterator = blockstore
2019-09-03 21:32:51 -07:00
. db
. cursor ::< cf ::Data > ( )
. expect ( " Expected to be able to open database iterator " ) ;
db_iterator . seek ( ( slot , 1 ) ) ;
// Iterate through ledger
for i in 0 .. num_entries {
assert! ( db_iterator . valid ( ) ) ;
let ( _ , current_index ) = db_iterator . key ( ) . expect ( " Expected a valid key " ) ;
assert_eq! ( current_index , ( 1 as u64 ) < < ( i * 8 ) ) ;
db_iterator . next ( ) ;
}
2018-11-22 01:35:19 -08:00
}
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2018-11-22 01:35:19 -08:00
}
2019-09-03 21:32:51 -07:00
* /
2018-12-11 09:14:23 -08:00
2019-02-04 15:33:43 -08:00
#[ test ]
pub fn test_get_slot_entries1 ( ) {
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
2019-02-04 15:33:43 -08:00
{
2020-01-13 13:13:52 -08:00
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2019-10-31 13:38:50 -07:00
let entries = create_ticks ( 8 , 0 , Hash ::default ( ) ) ;
2019-11-18 18:05:02 -08:00
let shreds = entries_to_test_shreds ( entries [ 0 .. 4 ] . to_vec ( ) , 1 , 0 , false , 0 ) ;
2020-01-13 13:13:52 -08:00
blockstore
2019-11-14 00:32:07 -08:00
. insert_shreds ( shreds , None , false )
2019-09-03 21:32:51 -07:00
. expect ( " Expected successful write of shreds " ) ;
2019-02-04 15:33:43 -08:00
2019-11-18 18:05:02 -08:00
let mut shreds1 = entries_to_test_shreds ( entries [ 4 .. ] . to_vec ( ) , 1 , 0 , false , 0 ) ;
2019-08-21 20:07:51 -07:00
for ( i , b ) in shreds1 . iter_mut ( ) . enumerate ( ) {
b . set_index ( 8 + i as u32 ) ;
}
2020-01-13 13:13:52 -08:00
blockstore
2019-11-14 00:32:07 -08:00
. insert_shreds ( shreds1 , None , false )
2019-09-03 21:32:51 -07:00
. expect ( " Expected successful write of shreds " ) ;
2019-08-21 20:07:51 -07:00
2019-02-04 15:33:43 -08:00
assert_eq! (
2020-04-09 13:09:59 -07:00
blockstore . get_slot_entries ( 1 , 0 ) . unwrap ( ) [ 2 .. 4 ] ,
2019-02-04 15:33:43 -08:00
entries [ 2 .. 4 ] ,
) ;
}
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-02-04 15:33:43 -08:00
}
2019-08-21 20:07:51 -07:00
// This test seems to be unnecessary with introduction of data shreds. There are no
2019-09-03 21:32:51 -07:00
// guarantees that a particular shred index contains a complete entry
2019-02-04 15:33:43 -08:00
#[ test ]
2019-08-20 17:16:06 -07:00
#[ ignore ]
2019-02-04 15:33:43 -08:00
pub fn test_get_slot_entries2 ( ) {
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
2019-02-04 15:33:43 -08:00
{
2020-01-13 13:13:52 -08:00
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2019-02-04 15:33:43 -08:00
// Write entries
2020-12-13 17:26:34 -08:00
let num_slots = 5_ u64 ;
2019-02-04 15:33:43 -08:00
let mut index = 0 ;
2019-03-05 14:18:29 -08:00
for slot in 0 .. num_slots {
2019-10-31 13:38:50 -07:00
let entries = create_ticks ( slot + 1 , 0 , Hash ::default ( ) ) ;
2019-02-04 15:33:43 -08:00
let last_entry = entries . last ( ) . unwrap ( ) . clone ( ) ;
2019-08-21 20:07:51 -07:00
let mut shreds =
2019-11-18 18:05:02 -08:00
entries_to_test_shreds ( entries , slot , slot . saturating_sub ( 1 ) , false , 0 ) ;
2019-08-21 20:07:51 -07:00
for b in shreds . iter_mut ( ) {
2019-02-04 15:33:43 -08:00
b . set_index ( index ) ;
2019-03-05 14:18:29 -08:00
b . set_slot ( slot as u64 ) ;
2019-02-04 15:33:43 -08:00
index + = 1 ;
}
2020-01-13 13:13:52 -08:00
blockstore
2019-11-14 00:32:07 -08:00
. insert_shreds ( shreds , None , false )
2019-08-21 20:07:51 -07:00
. expect ( " Expected successful write of shreds " ) ;
2019-02-04 15:33:43 -08:00
assert_eq! (
2020-01-13 13:13:52 -08:00
blockstore
2020-04-09 13:09:59 -07:00
. get_slot_entries ( slot , u64 ::from ( index - 1 ) )
2019-08-21 20:07:51 -07:00
. unwrap ( ) ,
2019-02-04 15:33:43 -08:00
vec! [ last_entry ] ,
) ;
}
}
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-02-04 15:33:43 -08:00
}
2019-03-17 18:48:23 -07:00
#[ test ]
pub fn test_get_slot_entries3 ( ) {
2019-09-03 21:32:51 -07:00
// Test inserting/fetching shreds which contain multiple entries per shred
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
2019-03-17 18:48:23 -07:00
{
2020-01-13 13:13:52 -08:00
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2020-12-13 17:26:34 -08:00
let num_slots = 5_ u64 ;
let shreds_per_slot = 5_ u64 ;
2019-03-17 18:48:23 -07:00
let entry_serialized_size =
2019-10-31 13:38:50 -07:00
bincode ::serialized_size ( & create_ticks ( 1 , 0 , Hash ::default ( ) ) ) . unwrap ( ) ;
2019-03-17 18:48:23 -07:00
let entries_per_slot =
2019-08-21 20:07:51 -07:00
( shreds_per_slot * PACKET_DATA_SIZE as u64 ) / entry_serialized_size ;
2019-03-17 18:48:23 -07:00
// Write entries
for slot in 0 .. num_slots {
2019-10-31 13:38:50 -07:00
let entries = create_ticks ( entries_per_slot , 0 , Hash ::default ( ) ) ;
2019-08-21 20:07:51 -07:00
let shreds =
2019-11-18 18:05:02 -08:00
entries_to_test_shreds ( entries . clone ( ) , slot , slot . saturating_sub ( 1 ) , false , 0 ) ;
2019-08-21 20:07:51 -07:00
assert! ( shreds . len ( ) as u64 > = shreds_per_slot ) ;
2020-01-13 13:13:52 -08:00
blockstore
2019-11-14 00:32:07 -08:00
. insert_shreds ( shreds , None , false )
2019-08-21 20:07:51 -07:00
. expect ( " Expected successful write of shreds " ) ;
2020-04-09 13:09:59 -07:00
assert_eq! ( blockstore . get_slot_entries ( slot , 0 ) . unwrap ( ) , entries ) ;
2019-03-17 18:48:23 -07:00
}
}
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-03-17 18:48:23 -07:00
}
2018-12-19 16:11:47 -08:00
#[ test ]
2019-08-20 17:16:06 -07:00
pub fn test_insert_data_shreds_consecutive ( ) {
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
2018-12-19 16:11:47 -08:00
{
2020-01-13 13:13:52 -08:00
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2019-10-08 00:42:51 -07:00
// Create enough entries to ensure there are at least two shreds created
2020-05-19 12:38:18 -07:00
let min_entries = max_ticks_per_n_shreds ( 1 , None ) + 1 ;
2019-04-18 21:56:43 -07:00
for i in 0 .. 4 {
let slot = i ;
let parent_slot = if i = = 0 { 0 } else { i - 1 } ;
// Write entries
2019-10-08 00:42:51 -07:00
let num_entries = min_entries * ( i + 1 ) ;
let ( shreds , original_entries ) = make_slot_entries ( slot , parent_slot , num_entries ) ;
2019-08-20 17:16:06 -07:00
let num_shreds = shreds . len ( ) as u64 ;
2019-10-08 00:42:51 -07:00
assert! ( num_shreds > 1 ) ;
let mut even_shreds = vec! [ ] ;
2019-08-20 17:16:06 -07:00
let mut odd_shreds = vec! [ ] ;
2019-10-08 00:42:51 -07:00
for ( i , shred ) in shreds . into_iter ( ) . enumerate ( ) {
if i % 2 = = 0 {
even_shreds . push ( shred ) ;
} else {
odd_shreds . push ( shred ) ;
2019-08-20 17:16:06 -07:00
}
}
2019-10-08 00:42:51 -07:00
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( odd_shreds , None , false ) . unwrap ( ) ;
2019-04-17 12:52:12 -07:00
2020-04-09 13:09:59 -07:00
assert_eq! ( blockstore . get_slot_entries ( slot , 0 ) . unwrap ( ) , vec! [ ] ) ;
2019-04-17 12:52:12 -07:00
2020-01-13 13:13:52 -08:00
let meta = blockstore . meta ( slot ) . unwrap ( ) . unwrap ( ) ;
2019-08-20 17:16:06 -07:00
if num_shreds % 2 = = 0 {
assert_eq! ( meta . received , num_shreds ) ;
2019-04-18 21:56:43 -07:00
} else {
2019-10-08 00:42:51 -07:00
trace! ( " got here " ) ;
2019-08-20 17:16:06 -07:00
assert_eq! ( meta . received , num_shreds - 1 ) ;
2019-04-18 21:56:43 -07:00
}
assert_eq! ( meta . consumed , 0 ) ;
2019-08-20 17:16:06 -07:00
if num_shreds % 2 = = 0 {
assert_eq! ( meta . last_index , num_shreds - 1 ) ;
2019-04-18 21:56:43 -07:00
} else {
assert_eq! ( meta . last_index , std ::u64 ::MAX ) ;
}
2019-04-17 18:04:30 -07:00
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( even_shreds , None , false ) . unwrap ( ) ;
2019-04-17 18:04:30 -07:00
2019-04-18 21:56:43 -07:00
assert_eq! (
2020-04-09 13:09:59 -07:00
blockstore . get_slot_entries ( slot , 0 ) . unwrap ( ) ,
2019-04-18 21:56:43 -07:00
original_entries ,
) ;
2019-04-17 18:04:30 -07:00
2020-01-13 13:13:52 -08:00
let meta = blockstore . meta ( slot ) . unwrap ( ) . unwrap ( ) ;
2019-08-20 17:16:06 -07:00
assert_eq! ( meta . received , num_shreds ) ;
assert_eq! ( meta . consumed , num_shreds ) ;
2019-04-18 21:56:43 -07:00
assert_eq! ( meta . parent_slot , parent_slot ) ;
2019-08-20 17:16:06 -07:00
assert_eq! ( meta . last_index , num_shreds - 1 ) ;
2019-04-18 21:56:43 -07:00
}
2018-12-20 12:12:04 -08:00
}
2019-02-07 15:10:54 -08:00
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2018-12-20 12:12:04 -08:00
}
2020-09-01 22:06:06 -07:00
#[ test ]
fn test_data_set_completed_on_insert ( ) {
let ledger_path = get_tmp_ledger_path! ( ) ;
let BlockstoreSignals { blockstore , .. } =
2020-12-16 17:56:38 -08:00
Blockstore ::open_with_signal ( & ledger_path , None , true ) . unwrap ( ) ;
2020-09-01 22:06:06 -07:00
// Create enough entries to fill 2 shreds, only the later one is data complete
let slot = 0 ;
let num_entries = max_ticks_per_n_shreds ( 1 , None ) + 1 ;
let entries = create_ticks ( num_entries , slot , Hash ::default ( ) ) ;
let shreds = entries_to_test_shreds ( entries , slot , 0 , true , 0 ) ;
let num_shreds = shreds . len ( ) ;
assert! ( num_shreds > 1 ) ;
assert! ( blockstore
. insert_shreds ( shreds [ 1 .. ] . to_vec ( ) , None , false )
. unwrap ( )
2020-09-29 14:13:21 -07:00
. 0
2020-09-01 22:06:06 -07:00
. is_empty ( ) ) ;
assert_eq! (
blockstore
. insert_shreds ( vec! [ shreds [ 0 ] . clone ( ) ] , None , false )
2020-09-29 14:13:21 -07:00
. unwrap ( )
. 0 ,
2020-09-01 22:06:06 -07:00
vec! [ CompletedDataSetInfo {
slot ,
start_index : 0 ,
end_index : num_shreds as u32 - 1
} ]
) ;
// Inserting shreds again doesn't trigger notification
assert! ( blockstore
. insert_shreds ( shreds , None , false )
. unwrap ( )
2020-09-29 14:13:21 -07:00
. 0
2020-09-01 22:06:06 -07:00
. is_empty ( ) ) ;
}
2019-02-07 15:10:54 -08:00
#[ test ]
2019-09-03 21:32:51 -07:00
pub fn test_new_shreds_signal ( ) {
2019-02-07 15:10:54 -08:00
// Initialize ledger
2019-11-13 07:14:09 -08:00
let ledger_path = get_tmp_ledger_path! ( ) ;
2020-09-01 22:06:06 -07:00
let BlockstoreSignals {
blockstore : ledger ,
ledger_signal_receiver : recvr ,
..
2020-12-16 17:56:38 -08:00
} = Blockstore ::open_with_signal ( & ledger_path , None , true ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
let ledger = Arc ::new ( ledger ) ;
2019-09-03 21:32:51 -07:00
let entries_per_slot = 50 ;
2019-02-12 19:54:18 -08:00
// Create entries for slot 0
2019-09-03 21:32:51 -07:00
let ( mut shreds , _ ) = make_slot_entries ( 0 , 0 , entries_per_slot ) ;
let shreds_per_slot = shreds . len ( ) as u64 ;
2019-02-07 15:10:54 -08:00
2019-09-03 21:32:51 -07:00
// Insert second shred, but we're missing the first shred, so no consecutive
// shreds starting from slot 0, index 0 should exist.
2019-11-14 00:32:07 -08:00
ledger
. insert_shreds ( vec! [ shreds . remove ( 1 ) ] , None , false )
. unwrap ( ) ;
2019-02-07 15:10:54 -08:00
let timer = Duration ::new ( 1 , 0 ) ;
assert! ( recvr . recv_timeout ( timer ) . is_err ( ) ) ;
2019-09-03 21:32:51 -07:00
// Insert first shred, now we've made a consecutive block
2019-11-14 00:32:07 -08:00
ledger
. insert_shreds ( vec! [ shreds . remove ( 0 ) ] , None , false )
. unwrap ( ) ;
2019-02-07 15:10:54 -08:00
// Wait to get notified of update, should only be one update
assert! ( recvr . recv_timeout ( timer ) . is_ok ( ) ) ;
assert! ( recvr . try_recv ( ) . is_err ( ) ) ;
// Insert the rest of the ticks
2019-11-14 00:32:07 -08:00
ledger . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
// Wait to get notified of update, should only be one update
assert! ( recvr . recv_timeout ( timer ) . is_ok ( ) ) ;
assert! ( recvr . try_recv ( ) . is_err ( ) ) ;
// Create some other slots, and send batches of ticks for each slot such that each slot
2019-09-03 21:32:51 -07:00
// is missing the tick at shred index == slot index - 1. Thus, no consecutive blocks
2019-02-07 15:10:54 -08:00
// will be formed
2019-09-03 21:32:51 -07:00
let num_slots = shreds_per_slot ;
2019-09-17 18:22:46 -07:00
let mut shreds = vec! [ ] ;
2019-09-03 21:32:51 -07:00
let mut missing_shreds = vec! [ ] ;
2019-03-05 14:18:29 -08:00
for slot in 1 .. num_slots + 1 {
2019-09-03 21:32:51 -07:00
let ( mut slot_shreds , _ ) = make_slot_entries ( slot , slot - 1 , entries_per_slot ) ;
let missing_shred = slot_shreds . remove ( slot as usize - 1 ) ;
shreds . extend ( slot_shreds ) ;
missing_shreds . push ( missing_shred ) ;
2019-02-07 15:10:54 -08:00
}
// Should be no updates, since no new chains from block 0 were formed
2019-11-14 00:32:07 -08:00
ledger . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
assert! ( recvr . recv_timeout ( timer ) . is_err ( ) ) ;
2019-09-03 21:32:51 -07:00
// Insert a shred for each slot that doesn't make a consecutive block, we
2019-02-07 15:10:54 -08:00
// should get no updates
2019-09-03 21:32:51 -07:00
let shreds : Vec < _ > = ( 1 .. num_slots + 1 )
2019-03-05 14:18:29 -08:00
. flat_map ( | slot | {
2019-09-03 21:32:51 -07:00
let ( mut shred , _ ) = make_slot_entries ( slot , slot - 1 , 1 ) ;
shred [ 0 ] . set_index ( 2 * num_slots as u32 ) ;
shred
2019-02-07 15:10:54 -08:00
} )
. collect ( ) ;
2019-11-14 00:32:07 -08:00
ledger . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
assert! ( recvr . recv_timeout ( timer ) . is_err ( ) ) ;
// For slots 1..num_slots/2, fill in the holes in one batch insertion,
// so we should only get one signal
2019-09-03 21:32:51 -07:00
let missing_shreds2 = missing_shreds
. drain ( ( num_slots / 2 ) as usize .. )
. collect_vec ( ) ;
2019-11-14 00:32:07 -08:00
ledger . insert_shreds ( missing_shreds , None , false ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
assert! ( recvr . recv_timeout ( timer ) . is_ok ( ) ) ;
assert! ( recvr . try_recv ( ) . is_err ( ) ) ;
// Fill in the holes for each of the remaining slots, we should get a single update
// for each
2019-11-14 00:32:07 -08:00
ledger . insert_shreds ( missing_shreds2 , None , false ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
// Destroying database without closing it first is undefined behavior
drop ( ledger ) ;
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & ledger_path ) . expect ( " Expected successful database destruction " ) ;
2019-02-07 15:10:54 -08:00
}
2019-05-09 14:10:04 -07:00
#[ test ]
2019-09-03 21:32:51 -07:00
pub fn test_completed_shreds_signal ( ) {
2019-05-09 14:10:04 -07:00
// Initialize ledger
2019-11-13 07:14:09 -08:00
let ledger_path = get_tmp_ledger_path! ( ) ;
2020-09-01 22:06:06 -07:00
let BlockstoreSignals {
blockstore : ledger ,
2021-06-02 17:20:00 -07:00
completed_slots_receiver : recvr ,
2020-09-01 22:06:06 -07:00
..
2020-12-16 17:56:38 -08:00
} = Blockstore ::open_with_signal ( & ledger_path , None , true ) . unwrap ( ) ;
2019-05-09 14:10:04 -07:00
let ledger = Arc ::new ( ledger ) ;
let entries_per_slot = 10 ;
2019-09-03 21:32:51 -07:00
// Create shreds for slot 0
let ( mut shreds , _ ) = make_slot_entries ( 0 , 0 , entries_per_slot ) ;
2019-05-09 14:10:04 -07:00
2019-09-03 21:32:51 -07:00
let shred0 = shreds . remove ( 0 ) ;
// Insert all but the first shred in the slot, should not be considered complete
2019-11-14 00:32:07 -08:00
ledger . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2019-05-09 14:10:04 -07:00
assert! ( recvr . try_recv ( ) . is_err ( ) ) ;
2019-09-03 21:32:51 -07:00
// Insert first shred, slot should now be considered complete
2019-11-14 00:32:07 -08:00
ledger . insert_shreds ( vec! [ shred0 ] , None , false ) . unwrap ( ) ;
2019-05-09 14:10:04 -07:00
assert_eq! ( recvr . try_recv ( ) . unwrap ( ) , vec! [ 0 ] ) ;
}
#[ test ]
2019-09-03 21:32:51 -07:00
pub fn test_completed_shreds_signal_orphans ( ) {
2019-05-09 14:10:04 -07:00
// Initialize ledger
2019-11-13 07:14:09 -08:00
let ledger_path = get_tmp_ledger_path! ( ) ;
2020-09-01 22:06:06 -07:00
let BlockstoreSignals {
blockstore : ledger ,
2021-06-02 17:20:00 -07:00
completed_slots_receiver : recvr ,
2020-09-01 22:06:06 -07:00
..
2020-12-16 17:56:38 -08:00
} = Blockstore ::open_with_signal ( & ledger_path , None , true ) . unwrap ( ) ;
2019-05-09 14:10:04 -07:00
let ledger = Arc ::new ( ledger ) ;
let entries_per_slot = 10 ;
let slots = vec! [ 2 , 5 , 10 ] ;
2019-09-03 21:32:51 -07:00
let mut all_shreds = make_chaining_slot_entries ( & slots [ .. ] , entries_per_slot ) ;
2019-05-09 14:10:04 -07:00
2019-09-03 21:32:51 -07:00
// Get the shreds for slot 10, chaining to slot 5
let ( mut orphan_child , _ ) = all_shreds . remove ( 2 ) ;
2019-05-09 14:10:04 -07:00
2019-09-03 21:32:51 -07:00
// Get the shreds for slot 5 chaining to slot 2
let ( mut orphan_shreds , _ ) = all_shreds . remove ( 1 ) ;
2019-05-09 14:10:04 -07:00
2019-09-03 21:32:51 -07:00
// Insert all but the first shred in the slot, should not be considered complete
let orphan_child0 = orphan_child . remove ( 0 ) ;
2019-11-14 00:32:07 -08:00
ledger . insert_shreds ( orphan_child , None , false ) . unwrap ( ) ;
2019-05-09 14:10:04 -07:00
assert! ( recvr . try_recv ( ) . is_err ( ) ) ;
2019-09-03 21:32:51 -07:00
// Insert first shred, slot should now be considered complete
2019-11-14 00:32:07 -08:00
ledger
. insert_shreds ( vec! [ orphan_child0 ] , None , false )
. unwrap ( ) ;
2019-05-09 14:10:04 -07:00
assert_eq! ( recvr . try_recv ( ) . unwrap ( ) , vec! [ slots [ 2 ] ] ) ;
2019-09-03 21:32:51 -07:00
// Insert the shreds for the orphan_slot
let orphan_shred0 = orphan_shreds . remove ( 0 ) ;
2019-11-14 00:32:07 -08:00
ledger . insert_shreds ( orphan_shreds , None , false ) . unwrap ( ) ;
2019-05-09 14:10:04 -07:00
assert! ( recvr . try_recv ( ) . is_err ( ) ) ;
2019-09-03 21:32:51 -07:00
// Insert first shred, slot should now be considered complete
2019-11-14 00:32:07 -08:00
ledger
. insert_shreds ( vec! [ orphan_shred0 ] , None , false )
. unwrap ( ) ;
2019-05-09 14:10:04 -07:00
assert_eq! ( recvr . try_recv ( ) . unwrap ( ) , vec! [ slots [ 1 ] ] ) ;
}
#[ test ]
2019-09-03 21:32:51 -07:00
pub fn test_completed_shreds_signal_many ( ) {
2019-05-09 14:10:04 -07:00
// Initialize ledger
2019-11-13 07:14:09 -08:00
let ledger_path = get_tmp_ledger_path! ( ) ;
2020-09-01 22:06:06 -07:00
let BlockstoreSignals {
blockstore : ledger ,
2021-06-02 17:20:00 -07:00
completed_slots_receiver : recvr ,
2020-09-01 22:06:06 -07:00
..
2020-12-16 17:56:38 -08:00
} = Blockstore ::open_with_signal ( & ledger_path , None , true ) . unwrap ( ) ;
2019-05-09 14:10:04 -07:00
let ledger = Arc ::new ( ledger ) ;
let entries_per_slot = 10 ;
let mut slots = vec! [ 2 , 5 , 10 ] ;
2019-09-03 21:32:51 -07:00
let mut all_shreds = make_chaining_slot_entries ( & slots [ .. ] , entries_per_slot ) ;
2019-05-09 14:10:04 -07:00
let disconnected_slot = 4 ;
2019-09-03 21:32:51 -07:00
let ( shreds0 , _ ) = all_shreds . remove ( 0 ) ;
let ( shreds1 , _ ) = all_shreds . remove ( 0 ) ;
let ( shreds2 , _ ) = all_shreds . remove ( 0 ) ;
let ( shreds3 , _ ) = make_slot_entries ( disconnected_slot , 1 , entries_per_slot ) ;
2019-05-09 14:10:04 -07:00
2019-09-03 21:32:51 -07:00
let mut all_shreds : Vec < _ > = vec! [ shreds0 , shreds1 , shreds2 , shreds3 ]
2019-05-09 14:10:04 -07:00
. into_iter ( )
. flatten ( )
. collect ( ) ;
2019-09-03 21:32:51 -07:00
all_shreds . shuffle ( & mut thread_rng ( ) ) ;
2019-11-14 00:32:07 -08:00
ledger . insert_shreds ( all_shreds , None , false ) . unwrap ( ) ;
2019-05-09 14:10:04 -07:00
let mut result = recvr . try_recv ( ) . unwrap ( ) ;
2020-12-13 17:26:34 -08:00
result . sort_unstable ( ) ;
2019-05-09 14:10:04 -07:00
slots . push ( disconnected_slot ) ;
2020-12-13 17:26:34 -08:00
slots . sort_unstable ( ) ;
2019-05-09 14:10:04 -07:00
assert_eq! ( result , slots ) ;
}
2019-02-07 15:10:54 -08:00
#[ test ]
pub fn test_handle_chaining_basic ( ) {
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
2019-02-07 15:10:54 -08:00
{
2019-09-14 21:05:54 -07:00
let entries_per_slot = 5 ;
2019-02-12 19:54:18 -08:00
let num_slots = 3 ;
2020-01-13 13:13:52 -08:00
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2019-02-12 19:54:18 -08:00
2019-09-03 21:32:51 -07:00
// Construct the shreds
let ( mut shreds , _ ) = make_many_slot_entries ( 0 , num_slots , entries_per_slot ) ;
let shreds_per_slot = shreds . len ( ) / num_slots as usize ;
2019-02-07 15:10:54 -08:00
// 1) Write to the first slot
2019-09-03 21:32:51 -07:00
let shreds1 = shreds
. drain ( shreds_per_slot .. 2 * shreds_per_slot )
. collect_vec ( ) ;
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds1 , None , false ) . unwrap ( ) ;
let s1 = blockstore . meta ( 1 ) . unwrap ( ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
assert! ( s1 . next_slots . is_empty ( ) ) ;
// Slot 1 is not trunk because slot 0 hasn't been inserted yet
2019-03-20 11:19:37 -07:00
assert! ( ! s1 . is_connected ) ;
2019-02-12 19:54:18 -08:00
assert_eq! ( s1 . parent_slot , 0 ) ;
2019-09-14 21:05:54 -07:00
assert_eq! ( s1 . last_index , shreds_per_slot as u64 - 1 ) ;
2019-02-07 15:10:54 -08:00
// 2) Write to the second slot
2019-09-03 21:32:51 -07:00
let shreds2 = shreds
. drain ( shreds_per_slot .. 2 * shreds_per_slot )
. collect_vec ( ) ;
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds2 , None , false ) . unwrap ( ) ;
let s2 = blockstore . meta ( 2 ) . unwrap ( ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
assert! ( s2 . next_slots . is_empty ( ) ) ;
// Slot 2 is not trunk because slot 0 hasn't been inserted yet
2019-03-20 11:19:37 -07:00
assert! ( ! s2 . is_connected ) ;
2019-02-12 19:54:18 -08:00
assert_eq! ( s2 . parent_slot , 1 ) ;
2019-09-14 21:05:54 -07:00
assert_eq! ( s2 . last_index , shreds_per_slot as u64 - 1 ) ;
2019-02-07 15:10:54 -08:00
// Check the first slot again, it should chain to the second slot,
// but still isn't part of the trunk
2020-01-13 13:13:52 -08:00
let s1 = blockstore . meta ( 1 ) . unwrap ( ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
assert_eq! ( s1 . next_slots , vec! [ 2 ] ) ;
2019-03-20 11:19:37 -07:00
assert! ( ! s1 . is_connected ) ;
2019-02-12 19:54:18 -08:00
assert_eq! ( s1 . parent_slot , 0 ) ;
2019-09-14 21:05:54 -07:00
assert_eq! ( s1 . last_index , shreds_per_slot as u64 - 1 ) ;
2019-02-07 15:10:54 -08:00
// 3) Write to the zeroth slot, check that every slot
// is now part of the trunk
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
for i in 0 .. 3 {
2020-01-13 13:13:52 -08:00
let s = blockstore . meta ( i ) . unwrap ( ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
// The last slot will not chain to any other slots
if i ! = 2 {
assert_eq! ( s . next_slots , vec! [ i + 1 ] ) ;
}
2019-02-12 19:54:18 -08:00
if i = = 0 {
assert_eq! ( s . parent_slot , 0 ) ;
} else {
assert_eq! ( s . parent_slot , i - 1 ) ;
}
2019-09-14 21:05:54 -07:00
assert_eq! ( s . last_index , shreds_per_slot as u64 - 1 ) ;
2019-03-20 11:19:37 -07:00
assert! ( s . is_connected ) ;
2019-02-07 15:10:54 -08:00
}
}
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-02-07 15:10:54 -08:00
}
#[ test ]
pub fn test_handle_chaining_missing_slots ( ) {
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
2019-02-07 15:10:54 -08:00
{
2020-01-13 13:13:52 -08:00
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
let num_slots = 30 ;
2019-09-14 21:05:54 -07:00
let entries_per_slot = 5 ;
2019-02-07 15:10:54 -08:00
2019-02-12 19:54:18 -08:00
// Separate every other slot into two separate vectors
let mut slots = vec! [ ] ;
let mut missing_slots = vec! [ ] ;
2019-09-14 21:05:54 -07:00
let mut shreds_per_slot = 2 ;
2019-03-05 14:18:29 -08:00
for slot in 0 .. num_slots {
2019-02-12 19:54:18 -08:00
let parent_slot = {
2019-03-05 14:18:29 -08:00
if slot = = 0 {
2019-02-12 19:54:18 -08:00
0
} else {
2019-03-05 14:18:29 -08:00
slot - 1
2019-02-12 19:54:18 -08:00
}
} ;
2019-09-03 21:32:51 -07:00
let ( slot_shreds , _ ) = make_slot_entries ( slot , parent_slot , entries_per_slot ) ;
2019-09-14 21:05:54 -07:00
shreds_per_slot = slot_shreds . len ( ) ;
2019-02-07 15:10:54 -08:00
2019-03-05 14:18:29 -08:00
if slot % 2 = = 1 {
2019-09-03 21:32:51 -07:00
slots . extend ( slot_shreds ) ;
2019-02-12 19:54:18 -08:00
} else {
2019-09-03 21:32:51 -07:00
missing_slots . extend ( slot_shreds ) ;
2019-02-12 19:54:18 -08:00
}
2019-02-07 15:10:54 -08:00
}
2019-09-03 21:32:51 -07:00
// Write the shreds for every other slot
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( slots , None , false ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
// Check metadata
for i in 0 .. num_slots {
2019-02-12 19:54:18 -08:00
// If "i" is the index of a slot we just inserted, then next_slots should be empty
// for slot "i" because no slots chain to that slot, because slot i + 1 is missing.
2019-04-06 19:41:22 -07:00
// However, if it's a slot we haven't inserted, aka one of the gaps, then one of the
// slots we just inserted will chain to that gap, so next_slots for that orphan slot
// won't be empty, but the parent slot is unknown so should equal std::u64::MAX.
2020-01-13 13:13:52 -08:00
let s = blockstore . meta ( i as u64 ) . unwrap ( ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
if i % 2 = = 0 {
assert_eq! ( s . next_slots , vec! [ i as u64 + 1 ] ) ;
2019-02-12 19:54:18 -08:00
assert_eq! ( s . parent_slot , std ::u64 ::MAX ) ;
2019-02-07 15:10:54 -08:00
} else {
assert! ( s . next_slots . is_empty ( ) ) ;
2019-02-12 19:54:18 -08:00
assert_eq! ( s . parent_slot , i - 1 ) ;
2019-02-07 15:10:54 -08:00
}
if i = = 0 {
2019-03-20 11:19:37 -07:00
assert! ( s . is_connected ) ;
2019-02-07 15:10:54 -08:00
} else {
2019-03-20 11:19:37 -07:00
assert! ( ! s . is_connected ) ;
2019-02-07 15:10:54 -08:00
}
}
2019-09-03 21:32:51 -07:00
// Write the shreds for the other half of the slots that we didn't insert earlier
2020-01-13 13:13:52 -08:00
blockstore
. insert_shreds ( missing_slots , None , false )
. unwrap ( ) ;
2019-02-07 15:10:54 -08:00
for i in 0 .. num_slots {
// Check that all the slots chain correctly once the missing slots
// have been filled
2020-01-13 13:13:52 -08:00
let s = blockstore . meta ( i as u64 ) . unwrap ( ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
if i ! = num_slots - 1 {
assert_eq! ( s . next_slots , vec! [ i as u64 + 1 ] ) ;
} else {
assert! ( s . next_slots . is_empty ( ) ) ;
}
2019-02-12 19:54:18 -08:00
if i = = 0 {
assert_eq! ( s . parent_slot , 0 ) ;
} else {
assert_eq! ( s . parent_slot , i - 1 ) ;
}
2019-09-14 21:05:54 -07:00
assert_eq! ( s . last_index , shreds_per_slot as u64 - 1 ) ;
2019-03-20 11:19:37 -07:00
assert! ( s . is_connected ) ;
2019-02-07 15:10:54 -08:00
}
}
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-02-07 15:10:54 -08:00
}
#[ test ]
2020-05-15 09:35:43 -07:00
#[ allow(clippy::cognitive_complexity) ]
2019-03-20 11:19:37 -07:00
pub fn test_forward_chaining_is_connected ( ) {
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
2019-02-07 15:10:54 -08:00
{
2020-01-13 13:13:52 -08:00
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
let num_slots = 15 ;
2019-10-08 00:42:51 -07:00
// Create enough entries to ensure there are at least two shreds created
2020-05-19 12:38:18 -07:00
let entries_per_slot = max_ticks_per_n_shreds ( 1 , None ) + 1 ;
2019-02-12 19:54:18 -08:00
assert! ( entries_per_slot > 1 ) ;
2019-02-07 15:10:54 -08:00
2019-09-03 21:32:51 -07:00
let ( mut shreds , _ ) = make_many_slot_entries ( 0 , num_slots , entries_per_slot ) ;
let shreds_per_slot = shreds . len ( ) / num_slots as usize ;
2019-10-08 00:42:51 -07:00
assert! ( shreds_per_slot > 1 ) ;
2019-02-12 19:54:18 -08:00
2019-09-03 21:32:51 -07:00
// Write the shreds such that every 3rd slot has a gap in the beginning
let mut missing_shreds = vec! [ ] ;
for slot in 0 .. num_slots {
let mut shreds_for_slot = shreds . drain ( .. shreds_per_slot ) . collect_vec ( ) ;
2019-03-05 14:18:29 -08:00
if slot % 3 = = 0 {
2019-09-03 21:32:51 -07:00
let shred0 = shreds_for_slot . remove ( 0 ) ;
missing_shreds . push ( shred0 ) ;
2019-02-07 15:10:54 -08:00
}
2021-04-18 10:27:36 -07:00
blockstore
. insert_shreds ( shreds_for_slot , None , false )
. unwrap ( ) ;
2019-02-07 15:10:54 -08:00
}
// Check metadata
for i in 0 .. num_slots {
2020-01-13 13:13:52 -08:00
let s = blockstore . meta ( i as u64 ) . unwrap ( ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
// The last slot will not chain to any other slots
if i as u64 ! = num_slots - 1 {
assert_eq! ( s . next_slots , vec! [ i as u64 + 1 ] ) ;
} else {
assert! ( s . next_slots . is_empty ( ) ) ;
}
2019-02-12 19:54:18 -08:00
if i = = 0 {
assert_eq! ( s . parent_slot , 0 ) ;
2019-02-07 15:10:54 -08:00
} else {
2019-02-12 19:54:18 -08:00
assert_eq! ( s . parent_slot , i - 1 ) ;
2019-02-07 15:10:54 -08:00
}
2019-09-14 21:05:54 -07:00
assert_eq! ( s . last_index , shreds_per_slot as u64 - 1 ) ;
2019-02-12 19:54:18 -08:00
2019-02-07 15:10:54 -08:00
// Other than slot 0, no slots should be part of the trunk
if i ! = 0 {
2019-03-20 11:19:37 -07:00
assert! ( ! s . is_connected ) ;
2019-02-07 15:10:54 -08:00
} else {
2019-03-20 11:19:37 -07:00
assert! ( s . is_connected ) ;
2019-02-07 15:10:54 -08:00
}
}
// Iteratively finish every 3rd slot, and check that all slots up to and including
// slot_index + 3 become part of the trunk
2019-09-03 21:32:51 -07:00
for slot_index in 0 .. num_slots {
2019-02-07 15:10:54 -08:00
if slot_index % 3 = = 0 {
2019-09-03 21:32:51 -07:00
let shred = missing_shreds . remove ( 0 ) ;
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( vec! [ shred ] , None , false ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
for i in 0 .. num_slots {
2020-01-13 13:13:52 -08:00
let s = blockstore . meta ( i as u64 ) . unwrap ( ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
if i ! = num_slots - 1 {
assert_eq! ( s . next_slots , vec! [ i as u64 + 1 ] ) ;
} else {
assert! ( s . next_slots . is_empty ( ) ) ;
}
if i < = slot_index as u64 + 3 {
2019-03-20 11:19:37 -07:00
assert! ( s . is_connected ) ;
2019-02-07 15:10:54 -08:00
} else {
2019-03-20 11:19:37 -07:00
assert! ( ! s . is_connected ) ;
2019-02-07 15:10:54 -08:00
}
2019-02-12 19:54:18 -08:00
if i = = 0 {
assert_eq! ( s . parent_slot , 0 ) ;
} else {
assert_eq! ( s . parent_slot , i - 1 ) ;
}
2019-09-14 21:05:54 -07:00
assert_eq! ( s . last_index , shreds_per_slot as u64 - 1 ) ;
2019-02-07 15:10:54 -08:00
}
}
}
}
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-02-07 15:10:54 -08:00
}
2019-09-03 21:32:51 -07:00
/*
#[ test ]
pub fn test_chaining_tree ( ) {
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
2019-09-03 21:32:51 -07:00
{
2020-01-13 13:13:52 -08:00
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2019-09-03 21:32:51 -07:00
let num_tree_levels = 6 ;
assert! ( num_tree_levels > 1 ) ;
let branching_factor : u64 = 4 ;
// Number of slots that will be in the tree
let num_slots = ( branching_factor . pow ( num_tree_levels ) - 1 ) / ( branching_factor - 1 ) ;
let erasure_config = ErasureConfig ::default ( ) ;
let entries_per_slot = erasure_config . num_data ( ) as u64 ;
assert! ( entries_per_slot > 1 ) ;
let ( mut shreds , _ ) = make_many_slot_entries ( 0 , num_slots , entries_per_slot ) ;
// Insert tree one slot at a time in a random order
let mut slots : Vec < _ > = ( 0 .. num_slots ) . collect ( ) ;
// Get shreds for the slot
slots . shuffle ( & mut thread_rng ( ) ) ;
for slot in slots {
// Get shreds for the slot "slot"
let slot_shreds = & mut shreds
[ ( slot * entries_per_slot ) as usize .. ( ( slot + 1 ) * entries_per_slot ) as usize ] ;
for shred in slot_shreds . iter_mut ( ) {
// Get the parent slot of the slot in the tree
let slot_parent = {
if slot = = 0 {
0
} else {
( slot - 1 ) / branching_factor
}
} ;
shred . set_parent ( slot_parent ) ;
}
2019-02-07 15:10:54 -08:00
2019-09-03 21:32:51 -07:00
let shared_shreds : Vec < _ > = slot_shreds
. iter ( )
. cloned ( )
. map ( | shred | Arc ::new ( RwLock ::new ( shred ) ) )
. collect ( ) ;
let mut coding_generator = CodingGenerator ::new_from_config ( & erasure_config ) ;
let coding_shreds = coding_generator . next ( & shared_shreds ) ;
assert_eq! ( coding_shreds . len ( ) , erasure_config . num_coding ( ) ) ;
2019-02-13 15:01:56 -08:00
2019-09-03 21:32:51 -07:00
let mut rng = thread_rng ( ) ;
2019-02-13 15:01:56 -08:00
2019-09-03 21:32:51 -07:00
// Randomly pick whether to insert erasure or coding shreds first
if rng . gen_bool ( 0.5 ) {
2020-01-13 13:13:52 -08:00
blockstore . write_shreds ( slot_shreds ) . unwrap ( ) ;
blockstore . put_shared_coding_shreds ( & coding_shreds ) . unwrap ( ) ;
2019-09-03 21:32:51 -07:00
} else {
2020-01-13 13:13:52 -08:00
blockstore . put_shared_coding_shreds ( & coding_shreds ) . unwrap ( ) ;
blockstore . write_shreds ( slot_shreds ) . unwrap ( ) ;
2019-09-03 21:32:51 -07:00
}
}
// Make sure everything chains correctly
let last_level =
( branching_factor . pow ( num_tree_levels - 1 ) - 1 ) / ( branching_factor - 1 ) ;
for slot in 0 .. num_slots {
2020-01-13 13:13:52 -08:00
let slot_meta = blockstore . meta ( slot ) . unwrap ( ) . unwrap ( ) ;
2019-09-03 21:32:51 -07:00
assert_eq! ( slot_meta . consumed , entries_per_slot ) ;
assert_eq! ( slot_meta . received , entries_per_slot ) ;
assert! ( slot_meta . is_connected ) ;
2019-02-13 15:01:56 -08:00
let slot_parent = {
2019-03-05 14:18:29 -08:00
if slot = = 0 {
2019-02-13 15:01:56 -08:00
0
} else {
2019-03-05 14:18:29 -08:00
( slot - 1 ) / branching_factor
2019-02-13 15:01:56 -08:00
}
} ;
2019-09-03 21:32:51 -07:00
assert_eq! ( slot_meta . parent_slot , slot_parent ) ;
2019-02-13 15:01:56 -08:00
2019-09-03 21:32:51 -07:00
let expected_children : HashSet < _ > = {
if slot > = last_level {
HashSet ::new ( )
} else {
let first_child_slot = min ( num_slots - 1 , slot * branching_factor + 1 ) ;
let last_child_slot = min ( num_slots - 1 , ( slot + 1 ) * branching_factor ) ;
( first_child_slot .. last_child_slot + 1 ) . collect ( )
}
} ;
2019-02-13 15:01:56 -08:00
2019-09-03 21:32:51 -07:00
let result : HashSet < _ > = slot_meta . next_slots . iter ( ) . cloned ( ) . collect ( ) ;
if expected_children . len ( ) ! = 0 {
assert_eq! ( slot_meta . next_slots . len ( ) , branching_factor as usize ) ;
2019-02-13 15:01:56 -08:00
} else {
2019-09-03 21:32:51 -07:00
assert_eq! ( slot_meta . next_slots . len ( ) , 0 ) ;
2019-02-13 15:01:56 -08:00
}
2019-09-03 21:32:51 -07:00
assert_eq! ( expected_children , result ) ;
2019-02-13 15:01:56 -08:00
}
2019-09-03 21:32:51 -07:00
// No orphan slots should exist
2020-01-13 13:13:52 -08:00
assert! ( blockstore . orphans_cf . is_empty ( ) . unwrap ( ) )
2019-02-13 15:01:56 -08:00
}
2019-03-29 16:07:24 -07:00
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-02-13 15:01:56 -08:00
}
2019-09-03 21:32:51 -07:00
* /
2019-02-07 15:10:54 -08:00
#[ test ]
pub fn test_get_slots_since ( ) {
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
2019-02-07 15:10:54 -08:00
{
2020-01-13 13:13:52 -08:00
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
// Slot doesn't exist
2020-05-15 09:35:43 -07:00
assert! ( blockstore . get_slots_since ( & [ 0 ] ) . unwrap ( ) . is_empty ( ) ) ;
2019-02-07 15:10:54 -08:00
2019-02-28 19:49:22 -08:00
let mut meta0 = SlotMeta ::new ( 0 , 0 ) ;
2020-01-13 13:13:52 -08:00
blockstore . meta_cf . put ( 0 , & meta0 ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
// Slot exists, chains to nothing
2020-12-13 17:26:34 -08:00
let expected : HashMap < u64 , Vec < u64 > > = vec! [ ( 0 , vec! [ ] ) ] . into_iter ( ) . collect ( ) ;
2020-05-15 09:35:43 -07:00
assert_eq! ( blockstore . get_slots_since ( & [ 0 ] ) . unwrap ( ) , expected ) ;
2019-02-07 15:10:54 -08:00
meta0 . next_slots = vec! [ 1 , 2 ] ;
2020-01-13 13:13:52 -08:00
blockstore . meta_cf . put ( 0 , & meta0 ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
// Slot exists, chains to some other slots
2020-12-13 17:26:34 -08:00
let expected : HashMap < u64 , Vec < u64 > > = vec! [ ( 0 , vec! [ 1 , 2 ] ) ] . into_iter ( ) . collect ( ) ;
2020-05-15 09:35:43 -07:00
assert_eq! ( blockstore . get_slots_since ( & [ 0 ] ) . unwrap ( ) , expected ) ;
assert_eq! ( blockstore . get_slots_since ( & [ 0 , 1 ] ) . unwrap ( ) , expected ) ;
2019-02-07 15:10:54 -08:00
let mut meta3 = SlotMeta ::new ( 3 , 1 ) ;
meta3 . next_slots = vec! [ 10 , 5 ] ;
2020-01-13 13:13:52 -08:00
blockstore . meta_cf . put ( 3 , & meta3 ) . unwrap ( ) ;
2020-12-13 17:26:34 -08:00
let expected : HashMap < u64 , Vec < u64 > > = vec! [ ( 0 , vec! [ 1 , 2 ] ) , ( 3 , vec! [ 10 , 5 ] ) ]
. into_iter ( )
. collect ( ) ;
2020-05-15 09:35:43 -07:00
assert_eq! ( blockstore . get_slots_since ( & [ 0 , 1 , 3 ] ) . unwrap ( ) , expected ) ;
2019-02-07 15:10:54 -08:00
}
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-02-07 15:10:54 -08:00
}
2019-03-29 16:07:24 -07:00
#[ test ]
2019-04-06 19:41:22 -07:00
fn test_orphans ( ) {
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
2019-03-29 16:07:24 -07:00
{
2020-01-13 13:13:52 -08:00
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2019-03-29 16:07:24 -07:00
2019-09-03 21:32:51 -07:00
// Create shreds and entries
2019-03-29 16:07:24 -07:00
let entries_per_slot = 1 ;
2019-09-03 21:32:51 -07:00
let ( mut shreds , _ ) = make_many_slot_entries ( 0 , 3 , entries_per_slot ) ;
let shreds_per_slot = shreds . len ( ) / 3 ;
2019-03-29 16:07:24 -07:00
// Write slot 2, which chains to slot 1. We're missing slot 0,
2019-04-06 19:41:22 -07:00
// so slot 1 is the orphan
2019-09-03 21:32:51 -07:00
let shreds_for_slot = shreds . drain ( ( shreds_per_slot * 2 ) .. ) . collect_vec ( ) ;
2020-01-13 13:13:52 -08:00
blockstore
2019-11-14 00:32:07 -08:00
. insert_shreds ( shreds_for_slot , None , false )
. unwrap ( ) ;
2020-01-13 13:13:52 -08:00
let meta = blockstore
2019-03-29 16:07:24 -07:00
. meta ( 1 )
. expect ( " Expect database get to succeed " )
. unwrap ( ) ;
2019-04-26 08:52:10 -07:00
assert! ( is_orphan ( & meta ) ) ;
2020-03-04 18:10:30 -08:00
assert_eq! (
blockstore . orphans_iterator ( 0 ) . unwrap ( ) . collect ::< Vec < _ > > ( ) ,
vec! [ 1 ]
) ;
2019-03-29 16:07:24 -07:00
// Write slot 1 which chains to slot 0, so now slot 0 is the
2019-04-06 19:41:22 -07:00
// orphan, and slot 1 is no longer the orphan.
2019-09-03 21:32:51 -07:00
let shreds_for_slot = shreds . drain ( shreds_per_slot .. ) . collect_vec ( ) ;
2020-01-13 13:13:52 -08:00
blockstore
2019-11-14 00:32:07 -08:00
. insert_shreds ( shreds_for_slot , None , false )
. unwrap ( ) ;
2020-01-13 13:13:52 -08:00
let meta = blockstore
2019-03-29 16:07:24 -07:00
. meta ( 1 )
. expect ( " Expect database get to succeed " )
. unwrap ( ) ;
2019-04-26 08:52:10 -07:00
assert! ( ! is_orphan ( & meta ) ) ;
2020-01-13 13:13:52 -08:00
let meta = blockstore
2019-03-29 16:07:24 -07:00
. meta ( 0 )
. expect ( " Expect database get to succeed " )
. unwrap ( ) ;
2019-04-26 08:52:10 -07:00
assert! ( is_orphan ( & meta ) ) ;
2020-03-04 18:10:30 -08:00
assert_eq! (
blockstore . orphans_iterator ( 0 ) . unwrap ( ) . collect ::< Vec < _ > > ( ) ,
vec! [ 0 ]
) ;
2019-03-29 16:07:24 -07:00
2019-04-06 19:41:22 -07:00
// Write some slot that also chains to existing slots and orphan,
2019-03-29 16:07:24 -07:00
// nothing should change
2019-09-03 21:32:51 -07:00
let ( shred4 , _ ) = make_slot_entries ( 4 , 0 , 1 ) ;
let ( shred5 , _ ) = make_slot_entries ( 5 , 1 , 1 ) ;
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shred4 , None , false ) . unwrap ( ) ;
blockstore . insert_shreds ( shred5 , None , false ) . unwrap ( ) ;
2020-03-04 18:10:30 -08:00
assert_eq! (
blockstore . orphans_iterator ( 0 ) . unwrap ( ) . collect ::< Vec < _ > > ( ) ,
vec! [ 0 ]
) ;
2019-03-29 16:07:24 -07:00
2019-04-06 19:41:22 -07:00
// Write zeroth slot, no more orphans
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2019-03-29 16:07:24 -07:00
for i in 0 .. 3 {
2020-01-13 13:13:52 -08:00
let meta = blockstore
2019-03-29 16:07:24 -07:00
. meta ( i )
. expect ( " Expect database get to succeed " )
. unwrap ( ) ;
2019-04-26 08:52:10 -07:00
assert! ( ! is_orphan ( & meta ) ) ;
2019-03-29 16:07:24 -07:00
}
2019-04-06 19:41:22 -07:00
// Orphans cf is empty
2020-01-13 13:13:52 -08:00
assert! ( blockstore . orphans_cf . is_empty ( ) . unwrap ( ) )
2019-03-29 16:07:24 -07:00
}
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-03-29 16:07:24 -07:00
}
2019-08-20 17:16:06 -07:00
fn test_insert_data_shreds_slots ( name : & str , should_bulk_write : bool ) {
2020-01-13 13:13:52 -08:00
let blockstore_path = get_ledger_path_from_name ( name ) ;
2019-02-07 15:10:54 -08:00
{
2020-01-13 13:13:52 -08:00
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
2019-09-03 21:32:51 -07:00
// Create shreds and entries
2020-12-13 17:26:34 -08:00
let num_entries = 20_ u64 ;
2019-02-12 19:54:18 -08:00
let mut entries = vec! [ ] ;
2019-08-20 17:16:06 -07:00
let mut shreds = vec! [ ] ;
let mut num_shreds_per_slot = 0 ;
2019-03-05 14:18:29 -08:00
for slot in 0 .. num_entries {
2019-02-12 19:54:18 -08:00
let parent_slot = {
2019-03-05 14:18:29 -08:00
if slot = = 0 {
2019-02-12 19:54:18 -08:00
0
} else {
2019-03-05 14:18:29 -08:00
slot - 1
2019-02-12 19:54:18 -08:00
}
} ;
2019-09-03 21:32:51 -07:00
let ( mut shred , entry ) = make_slot_entries ( slot , parent_slot , 1 ) ;
2019-08-20 17:16:06 -07:00
num_shreds_per_slot = shred . len ( ) as u64 ;
shred
. iter_mut ( )
. enumerate ( )
2019-10-21 16:15:10 -07:00
. for_each ( | ( _ , shred ) | shred . set_index ( 0 ) ) ;
2019-08-20 17:16:06 -07:00
shreds . extend ( shred ) ;
2019-02-12 19:54:18 -08:00
entries . extend ( entry ) ;
2019-02-07 15:10:54 -08:00
}
2019-08-20 17:16:06 -07:00
let num_shreds = shreds . len ( ) ;
2019-09-03 21:32:51 -07:00
// Write shreds to the database
2019-02-07 15:10:54 -08:00
if should_bulk_write {
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
} else {
2019-08-20 17:16:06 -07:00
for _ in 0 .. num_shreds {
let shred = shreds . remove ( 0 ) ;
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( vec! [ shred ] , None , false ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
}
}
for i in 0 .. num_entries - 1 {
assert_eq! (
2020-04-09 13:09:59 -07:00
blockstore . get_slot_entries ( i , 0 ) . unwrap ( ) [ 0 ] ,
2019-02-12 19:54:18 -08:00
entries [ i as usize ]
2019-02-07 15:10:54 -08:00
) ;
2020-01-13 13:13:52 -08:00
let meta = blockstore . meta ( i ) . unwrap ( ) . unwrap ( ) ;
2019-10-21 16:15:10 -07:00
assert_eq! ( meta . received , 1 ) ;
assert_eq! ( meta . last_index , 0 ) ;
2019-02-07 15:10:54 -08:00
if i ! = 0 {
2019-02-12 19:54:18 -08:00
assert_eq! ( meta . parent_slot , i - 1 ) ;
2019-10-21 16:15:10 -07:00
assert_eq! ( meta . consumed , 1 ) ;
2019-02-07 15:10:54 -08:00
} else {
2019-02-12 19:54:18 -08:00
assert_eq! ( meta . parent_slot , 0 ) ;
2019-08-20 17:16:06 -07:00
assert_eq! ( meta . consumed , num_shreds_per_slot ) ;
2019-02-07 15:10:54 -08:00
}
}
}
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-02-07 15:10:54 -08:00
}
2019-02-12 19:54:18 -08:00
2019-03-27 23:55:51 -07:00
#[ test ]
fn test_find_missing_data_indexes ( ) {
let slot = 0 ;
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2019-03-27 23:55:51 -07:00
// Write entries
2019-08-21 15:27:42 -07:00
let gap : u64 = 10 ;
2019-03-27 23:55:51 -07:00
assert! ( gap > 3 ) ;
2019-10-08 00:42:51 -07:00
// Create enough entries to ensure there are at least two shreds created
2020-05-19 12:38:18 -07:00
let num_entries = max_ticks_per_n_shreds ( 1 , None ) + 1 ;
2019-10-31 13:38:50 -07:00
let entries = create_ticks ( num_entries , 0 , Hash ::default ( ) ) ;
2019-11-18 18:05:02 -08:00
let mut shreds = entries_to_test_shreds ( entries , slot , 0 , true , 0 ) ;
2019-08-21 15:27:42 -07:00
let num_shreds = shreds . len ( ) ;
2019-10-08 00:42:51 -07:00
assert! ( num_shreds > 1 ) ;
for ( i , s ) in shreds . iter_mut ( ) . enumerate ( ) {
s . set_index ( i as u32 * gap as u32 ) ;
s . set_slot ( slot ) ;
2019-03-27 23:55:51 -07:00
}
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2019-03-27 23:55:51 -07:00
2019-09-03 21:32:51 -07:00
// Index of the first shred is 0
// Index of the second shred is "gap"
2019-03-27 23:55:51 -07:00
// Thus, the missing indexes should then be [1, gap - 1] for the input index
// range of [0, gap)
let expected : Vec < u64 > = ( 1 .. gap ) . collect ( ) ;
assert_eq! (
2020-04-24 15:04:23 -07:00
blockstore . find_missing_data_indexes ( slot , 0 , 0 , gap , gap as usize ) ,
2019-03-27 23:55:51 -07:00
expected
) ;
assert_eq! (
2020-04-24 15:04:23 -07:00
blockstore . find_missing_data_indexes ( slot , 0 , 1 , gap , ( gap - 1 ) as usize ) ,
2019-03-27 23:55:51 -07:00
expected ,
) ;
assert_eq! (
2020-04-24 15:04:23 -07:00
blockstore . find_missing_data_indexes ( slot , 0 , 0 , gap - 1 , ( gap - 1 ) as usize ) ,
2019-03-27 23:55:51 -07:00
& expected [ .. expected . len ( ) - 1 ] ,
) ;
assert_eq! (
2020-04-24 15:04:23 -07:00
blockstore . find_missing_data_indexes ( slot , 0 , gap - 2 , gap , gap as usize ) ,
2019-03-27 23:55:51 -07:00
vec! [ gap - 2 , gap - 1 ] ,
) ;
assert_eq! (
2020-04-24 15:04:23 -07:00
blockstore . find_missing_data_indexes ( slot , 0 , gap - 2 , gap , 1 ) ,
2019-03-27 23:55:51 -07:00
vec! [ gap - 2 ] ,
) ;
assert_eq! (
2020-04-24 15:04:23 -07:00
blockstore . find_missing_data_indexes ( slot , 0 , 0 , gap , 1 ) ,
2019-03-27 23:55:51 -07:00
vec! [ 1 ] ,
) ;
2019-10-08 00:42:51 -07:00
// Test with a range that encompasses a shred with index == gap which was
// already inserted.
2019-03-27 23:55:51 -07:00
let mut expected : Vec < u64 > = ( 1 .. gap ) . collect ( ) ;
expected . push ( gap + 1 ) ;
assert_eq! (
2020-04-24 15:04:23 -07:00
blockstore . find_missing_data_indexes ( slot , 0 , 0 , gap + 2 , ( gap + 2 ) as usize ) ,
2019-03-27 23:55:51 -07:00
expected ,
) ;
assert_eq! (
2020-04-24 15:04:23 -07:00
blockstore . find_missing_data_indexes ( slot , 0 , 0 , gap + 2 , ( gap - 1 ) as usize ) ,
2019-03-27 23:55:51 -07:00
& expected [ .. expected . len ( ) - 1 ] ,
) ;
2019-08-21 15:27:42 -07:00
for i in 0 .. num_shreds as u64 {
2019-03-27 23:55:51 -07:00
for j in 0 .. i {
let expected : Vec < u64 > = ( j .. i )
. flat_map ( | k | {
let begin = k * gap + 1 ;
let end = ( k + 1 ) * gap ;
2020-03-13 13:15:22 -07:00
begin .. end
2019-03-27 23:55:51 -07:00
} )
. collect ( ) ;
assert_eq! (
2020-01-13 13:13:52 -08:00
blockstore . find_missing_data_indexes (
2019-03-27 23:55:51 -07:00
slot ,
2020-04-24 15:04:23 -07:00
0 ,
2019-03-27 23:55:51 -07:00
j * gap ,
i * gap ,
( ( i - j ) * gap ) as usize
) ,
expected ,
) ;
}
}
2020-01-13 13:13:52 -08:00
drop ( blockstore ) ;
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-03-27 23:55:51 -07:00
}
2019-11-07 11:08:09 -08:00
#[ test ]
fn test_find_missing_data_indexes_timeout ( ) {
let slot = 0 ;
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2019-11-07 11:08:09 -08:00
// Write entries
let gap : u64 = 10 ;
let shreds : Vec < _ > = ( 0 .. 64 )
2019-11-18 18:05:02 -08:00
. map ( | i | {
2019-12-12 16:50:29 -08:00
Shred ::new_from_data (
slot ,
( i * gap ) as u32 ,
0 ,
None ,
false ,
false ,
i as u8 ,
0 ,
( i * gap ) as u32 ,
)
2019-11-18 18:05:02 -08:00
} )
2019-11-07 11:08:09 -08:00
. collect ( ) ;
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2020-04-24 15:04:23 -07:00
2019-11-07 11:08:09 -08:00
let empty : Vec < u64 > = vec! [ ] ;
assert_eq! (
2020-04-24 15:04:23 -07:00
blockstore . find_missing_data_indexes ( slot , timestamp ( ) , 0 , 50 , 1 ) ,
2019-11-07 11:08:09 -08:00
empty
) ;
let expected : Vec < _ > = ( 1 ..= 9 ) . collect ( ) ;
assert_eq! (
2020-04-24 15:04:23 -07:00
blockstore . find_missing_data_indexes ( slot , timestamp ( ) - 400 , 0 , 50 , 9 ) ,
2019-11-07 11:08:09 -08:00
expected
) ;
2020-01-13 13:13:52 -08:00
drop ( blockstore ) ;
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-11-07 11:08:09 -08:00
}
2019-03-27 23:55:51 -07:00
#[ test ]
fn test_find_missing_data_indexes_sanity ( ) {
let slot = 0 ;
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2019-03-27 23:55:51 -07:00
// Early exit conditions
let empty : Vec < u64 > = vec! [ ] ;
2020-04-24 15:04:23 -07:00
assert_eq! (
blockstore . find_missing_data_indexes ( slot , 0 , 0 , 0 , 1 ) ,
empty
) ;
assert_eq! (
blockstore . find_missing_data_indexes ( slot , 0 , 5 , 5 , 1 ) ,
empty
) ;
assert_eq! (
blockstore . find_missing_data_indexes ( slot , 0 , 4 , 3 , 1 ) ,
empty
) ;
assert_eq! (
blockstore . find_missing_data_indexes ( slot , 0 , 1 , 2 , 0 ) ,
empty
) ;
2019-03-27 23:55:51 -07:00
2019-10-31 13:38:50 -07:00
let entries = create_ticks ( 100 , 0 , Hash ::default ( ) ) ;
2019-11-18 18:05:02 -08:00
let mut shreds = entries_to_test_shreds ( entries , slot , 0 , true , 0 ) ;
2019-10-08 00:42:51 -07:00
assert! ( shreds . len ( ) > 2 ) ;
2019-08-26 18:27:45 -07:00
shreds . drain ( 2 .. ) ;
2019-03-27 23:55:51 -07:00
const ONE : u64 = 1 ;
const OTHER : u64 = 4 ;
2019-08-21 15:27:42 -07:00
shreds [ 0 ] . set_index ( ONE as u32 ) ;
shreds [ 1 ] . set_index ( OTHER as u32 ) ;
2019-03-27 23:55:51 -07:00
2019-09-03 21:32:51 -07:00
// Insert one shred at index = first_index
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2019-03-27 23:55:51 -07:00
const STARTS : u64 = OTHER * 2 ;
const END : u64 = OTHER * 3 ;
const MAX : usize = 10 ;
2019-09-03 21:32:51 -07:00
// The first shred has index = first_index. Thus, for i < first_index,
2019-03-27 23:55:51 -07:00
// given the input range of [i, first_index], the missing indexes should be
// [i, first_index - 1]
for start in 0 .. STARTS {
2020-01-13 13:13:52 -08:00
let result = blockstore . find_missing_data_indexes (
2020-04-24 15:04:23 -07:00
slot , 0 , start , // start
2019-03-27 23:55:51 -07:00
END , //end
MAX , //max
) ;
let expected : Vec < u64 > = ( start .. END ) . filter ( | i | * i ! = ONE & & * i ! = OTHER ) . collect ( ) ;
assert_eq! ( result , expected ) ;
}
2020-01-13 13:13:52 -08:00
drop ( blockstore ) ;
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-03-27 23:55:51 -07:00
}
#[ test ]
2019-09-03 21:32:51 -07:00
pub fn test_no_missing_shred_indexes ( ) {
2019-03-27 23:55:51 -07:00
let slot = 0 ;
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2019-03-27 23:55:51 -07:00
// Write entries
let num_entries = 10 ;
2019-10-31 13:38:50 -07:00
let entries = create_ticks ( num_entries , 0 , Hash ::default ( ) ) ;
2019-11-18 18:05:02 -08:00
let shreds = entries_to_test_shreds ( entries , slot , 0 , true , 0 ) ;
2019-08-21 15:27:42 -07:00
let num_shreds = shreds . len ( ) ;
2019-03-27 23:55:51 -07:00
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2019-03-27 23:55:51 -07:00
let empty : Vec < u64 > = vec! [ ] ;
2019-08-21 15:27:42 -07:00
for i in 0 .. num_shreds as u64 {
2019-03-27 23:55:51 -07:00
for j in 0 .. i {
assert_eq! (
2020-04-24 15:04:23 -07:00
blockstore . find_missing_data_indexes ( slot , 0 , j , i , ( i - j ) as usize ) ,
2019-03-27 23:55:51 -07:00
empty
) ;
}
}
2020-01-13 13:13:52 -08:00
drop ( blockstore ) ;
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-03-27 23:55:51 -07:00
}
2019-04-25 00:04:49 -07:00
#[ test ]
2019-09-04 17:14:42 -07:00
pub fn test_should_insert_data_shred ( ) {
2020-12-09 23:14:31 -08:00
solana_logger ::setup ( ) ;
2019-09-17 15:11:29 -07:00
let ( mut shreds , _ ) = make_slot_entries ( 0 , 0 , 200 ) ;
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
2019-09-03 21:32:51 -07:00
{
2020-01-13 13:13:52 -08:00
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2019-09-04 17:14:42 -07:00
let last_root = RwLock ::new ( 0 ) ;
2019-04-25 00:04:49 -07:00
2019-09-04 17:14:42 -07:00
// Insert the first 5 shreds, we don't have a "is_last" shred yet
2020-01-13 13:13:52 -08:00
blockstore
2019-11-14 00:32:07 -08:00
. insert_shreds ( shreds [ 0 .. 5 ] . to_vec ( ) , None , false )
2019-09-05 18:20:30 -07:00
. unwrap ( ) ;
2019-04-25 00:04:49 -07:00
2021-04-27 15:40:41 -07:00
let slot_meta = blockstore . meta ( 0 ) . unwrap ( ) . unwrap ( ) ;
// Corrupt shred by making it too large
let mut shred5 = shreds [ 5 ] . clone ( ) ;
shred5 . payload . push ( 10 ) ;
shred5 . data_header . size = shred5 . payload . len ( ) as u16 ;
2021-04-30 08:38:15 -07:00
assert! ( ! blockstore . should_insert_data_shred (
& shred5 ,
& slot_meta ,
& HashMap ::new ( ) ,
& last_root ,
None ,
2021-06-30 09:20:07 -07:00
ShredSource ::Turbine
2021-04-30 08:38:15 -07:00
) ) ;
// Ensure that an empty shred (one with no data) would get inserted. Such shreds
// may be used as signals (broadcast does so to indicate a slot was interrupted)
// Reuse shred5's header values to avoid a false negative result
let mut empty_shred = Shred ::new_from_data (
shred5 . common_header . slot ,
shred5 . common_header . index ,
shred5 . data_header . parent_offset ,
None , // data
true , // is_last_data
true , // is_last_in_slot
0 , // reference_tick
shred5 . common_header . version ,
shred5 . common_header . fec_set_index ,
2021-04-27 15:40:41 -07:00
) ;
2021-04-30 08:38:15 -07:00
assert! ( blockstore . should_insert_data_shred (
& empty_shred ,
& slot_meta ,
& HashMap ::new ( ) ,
& last_root ,
None ,
2021-06-30 09:20:07 -07:00
ShredSource ::Repaired ,
2021-04-30 08:38:15 -07:00
) ) ;
empty_shred . data_header . size = 0 ;
assert! ( ! blockstore . should_insert_data_shred (
& empty_shred ,
& slot_meta ,
& HashMap ::new ( ) ,
& last_root ,
None ,
2021-06-30 09:20:07 -07:00
ShredSource ::Recovered ,
2021-04-30 08:38:15 -07:00
) ) ;
2021-04-27 15:40:41 -07:00
2019-09-04 17:14:42 -07:00
// Trying to insert another "is_last" shred with index < the received index should fail
// skip over shred 7
2020-01-13 13:13:52 -08:00
blockstore
2019-11-14 00:32:07 -08:00
. insert_shreds ( shreds [ 8 .. 9 ] . to_vec ( ) , None , false )
2019-09-05 18:20:30 -07:00
. unwrap ( ) ;
2020-01-13 13:13:52 -08:00
let slot_meta = blockstore . meta ( 0 ) . unwrap ( ) . unwrap ( ) ;
2019-09-03 21:32:51 -07:00
assert_eq! ( slot_meta . received , 9 ) ;
2019-09-04 17:14:42 -07:00
let shred7 = {
2019-09-16 20:28:54 -07:00
if shreds [ 7 ] . is_data ( ) {
shreds [ 7 ] . set_last_in_slot ( ) ;
shreds [ 7 ] . clone ( )
2019-09-04 17:14:42 -07:00
} else {
panic! ( " Shred in unexpected format " )
}
} ;
2021-05-19 07:31:47 -07:00
assert! ( ! blockstore . should_insert_data_shred (
& shred7 ,
& slot_meta ,
& HashMap ::new ( ) ,
& last_root ,
None ,
2021-06-30 09:20:07 -07:00
ShredSource ::Repaired ,
2021-05-19 07:31:47 -07:00
) ) ;
2020-12-09 23:14:31 -08:00
assert! ( blockstore . has_duplicate_shreds_in_slot ( 0 ) ) ;
2019-09-03 21:32:51 -07:00
// Insert all pending shreds
2019-09-04 17:14:42 -07:00
let mut shred8 = shreds [ 8 ] . clone ( ) ;
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
let slot_meta = blockstore . meta ( 0 ) . unwrap ( ) . unwrap ( ) ;
2019-09-03 21:32:51 -07:00
// Trying to insert a shred with index > the "is_last" shred should fail
2019-09-16 20:28:54 -07:00
if shred8 . is_data ( ) {
shred8 . set_slot ( slot_meta . last_index + 1 ) ;
2019-09-04 17:14:42 -07:00
} else {
panic! ( " Shred in unexpected format " )
}
2021-05-19 07:31:47 -07:00
assert! ( ! blockstore . should_insert_data_shred (
& shred7 ,
& slot_meta ,
& HashMap ::new ( ) ,
& last_root ,
None ,
2021-06-30 09:20:07 -07:00
ShredSource ::Repaired ,
2021-05-19 07:31:47 -07:00
) ) ;
2019-09-03 21:32:51 -07:00
}
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2020-01-14 15:37:53 -08:00
}
#[ test ]
pub fn test_is_data_shred_present ( ) {
let ( shreds , _ ) = make_slot_entries ( 0 , 0 , 200 ) ;
let blockstore_path = get_tmp_ledger_path! ( ) ;
{
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
let index_cf = blockstore . db . column ::< cf ::Index > ( ) ;
blockstore
. insert_shreds ( shreds [ 0 .. 5 ] . to_vec ( ) , None , false )
. unwrap ( ) ;
// Insert a shred less than `slot_meta.consumed`, check that
// it already exists
let slot_meta = blockstore . meta ( 0 ) . unwrap ( ) . unwrap ( ) ;
let index = index_cf . get ( 0 ) . unwrap ( ) . unwrap ( ) ;
assert_eq! ( slot_meta . consumed , 5 ) ;
assert! ( Blockstore ::is_data_shred_present (
& shreds [ 1 ] ,
& slot_meta ,
index . data ( ) ,
) ) ;
// Insert a shred, check that it already exists
blockstore
. insert_shreds ( shreds [ 6 .. 7 ] . to_vec ( ) , None , false )
. unwrap ( ) ;
let slot_meta = blockstore . meta ( 0 ) . unwrap ( ) . unwrap ( ) ;
let index = index_cf . get ( 0 ) . unwrap ( ) . unwrap ( ) ;
assert! ( Blockstore ::is_data_shred_present (
& shreds [ 6 ] ,
& slot_meta ,
index . data ( )
) , ) ;
}
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-09-04 17:14:42 -07:00
}
2020-12-09 23:14:31 -08:00
#[ test ]
pub fn test_check_cache_coding_shred ( ) {
let blockstore_path = get_tmp_ledger_path! ( ) ;
{
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
let slot = 1 ;
2021-05-03 06:20:47 -07:00
let ( shred , coding ) = Shredder ::new_coding_shred_header ( slot , 11 , 11 , 11 , 11 , 0 ) ;
2020-12-09 23:14:31 -08:00
let coding_shred =
Shred ::new_empty_from_header ( shred , DataShredHeader ::default ( ) , coding ) ;
let mut erasure_metas = HashMap ::new ( ) ;
let mut index_working_set = HashMap ::new ( ) ;
let mut just_received_coding_shreds = HashMap ::new ( ) ;
let mut index_meta_time = 0 ;
assert! ( blockstore . check_cache_coding_shred (
coding_shred . clone ( ) ,
& mut erasure_metas ,
& mut index_working_set ,
& mut just_received_coding_shreds ,
& mut index_meta_time ,
& | _shred | {
panic! ( " no dupes " ) ;
} ,
false ,
2021-06-30 09:20:07 -07:00
false ,
2020-12-09 23:14:31 -08:00
) ) ;
// insert again fails on dupe
use std ::sync ::atomic ::{ AtomicUsize , Ordering } ;
let counter = AtomicUsize ::new ( 0 ) ;
assert! ( ! blockstore . check_cache_coding_shred (
coding_shred ,
& mut erasure_metas ,
& mut index_working_set ,
& mut just_received_coding_shreds ,
& mut index_meta_time ,
& | _shred | {
counter . fetch_add ( 1 , Ordering ::Relaxed ) ;
} ,
false ,
2021-06-30 09:20:07 -07:00
false ,
2020-12-09 23:14:31 -08:00
) ) ;
assert_eq! ( counter . load ( Ordering ::Relaxed ) , 1 ) ;
}
}
2019-09-04 17:14:42 -07:00
#[ test ]
pub fn test_should_insert_coding_shred ( ) {
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
2019-09-04 17:14:42 -07:00
{
2020-01-13 13:13:52 -08:00
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2019-09-04 17:14:42 -07:00
let last_root = RwLock ::new ( 0 ) ;
let slot = 1 ;
2021-05-03 06:20:47 -07:00
let ( mut shred , coding ) = Shredder ::new_coding_shred_header ( slot , 11 , 11 , 11 , 11 , 0 ) ;
2019-10-18 22:55:59 -07:00
let coding_shred = Shred ::new_empty_from_header (
shred . clone ( ) ,
DataShredHeader ::default ( ) ,
coding . clone ( ) ,
) ;
2019-09-04 17:14:42 -07:00
// Insert a good coding shred
2020-01-13 13:13:52 -08:00
assert! ( Blockstore ::should_insert_coding_shred (
2019-09-04 17:14:42 -07:00
& coding_shred ,
& last_root
) ) ;
// Insertion should succeed
2020-01-13 13:13:52 -08:00
blockstore
2019-11-14 00:32:07 -08:00
. insert_shreds ( vec! [ coding_shred . clone ( ) ] , None , false )
2019-09-05 18:20:30 -07:00
. unwrap ( ) ;
2019-09-04 17:14:42 -07:00
2020-12-09 23:14:31 -08:00
// Trying to insert the same shred again should pass since this doesn't check for
// duplicate index
2019-09-04 17:14:42 -07:00
{
2020-12-09 23:14:31 -08:00
assert! ( Blockstore ::should_insert_coding_shred (
2019-09-04 17:14:42 -07:00
& coding_shred ,
& last_root
) ) ;
}
2019-10-18 22:55:59 -07:00
shred . index + = 1 ;
2019-09-04 17:14:42 -07:00
// Establish a baseline that works
{
2019-10-18 22:55:59 -07:00
let coding_shred = Shred ::new_empty_from_header (
shred . clone ( ) ,
DataShredHeader ::default ( ) ,
coding . clone ( ) ,
) ;
2020-01-13 13:13:52 -08:00
assert! ( Blockstore ::should_insert_coding_shred (
2019-09-17 18:22:46 -07:00
& coding_shred ,
2019-09-04 17:14:42 -07:00
& last_root
) ) ;
}
// Trying to insert a shred with index < position should fail
{
2019-10-18 22:55:59 -07:00
let mut coding_shred = Shred ::new_empty_from_header (
shred . clone ( ) ,
DataShredHeader ::default ( ) ,
coding . clone ( ) ,
) ;
2021-05-03 06:20:47 -07:00
let index = coding_shred . index ( ) - coding_shred . common_header . fec_set_index - 1 ;
2019-09-17 18:22:46 -07:00
coding_shred . set_index ( index as u32 ) ;
2020-01-13 13:13:52 -08:00
assert! ( ! Blockstore ::should_insert_coding_shred (
2019-09-17 18:22:46 -07:00
& coding_shred ,
2019-09-04 17:14:42 -07:00
& last_root
) ) ;
}
// Trying to insert shred with num_coding == 0 should fail
{
2019-10-18 22:55:59 -07:00
let mut coding_shred = Shred ::new_empty_from_header (
shred . clone ( ) ,
DataShredHeader ::default ( ) ,
coding . clone ( ) ,
) ;
coding_shred . coding_header . num_coding_shreds = 0 ;
2020-01-13 13:13:52 -08:00
assert! ( ! Blockstore ::should_insert_coding_shred (
2019-09-17 18:22:46 -07:00
& coding_shred ,
2019-09-04 17:14:42 -07:00
& last_root
) ) ;
}
// Trying to insert shred with pos >= num_coding should fail
{
2019-10-18 22:55:59 -07:00
let mut coding_shred = Shred ::new_empty_from_header (
shred . clone ( ) ,
DataShredHeader ::default ( ) ,
coding . clone ( ) ,
) ;
2021-05-03 06:20:47 -07:00
let num_coding_shreds =
coding_shred . common_header . index - coding_shred . common_header . fec_set_index ;
coding_shred . coding_header . num_coding_shreds = num_coding_shreds as u16 ;
2020-01-13 13:13:52 -08:00
assert! ( ! Blockstore ::should_insert_coding_shred (
2019-09-17 18:22:46 -07:00
& coding_shred ,
2019-09-04 17:14:42 -07:00
& last_root
) ) ;
}
2019-11-14 11:49:31 -08:00
// Trying to insert with set_index with num_coding that would imply the last shred
2019-09-04 17:14:42 -07:00
// has index > u32::MAX should fail
{
2019-10-18 22:55:59 -07:00
let mut coding_shred = Shred ::new_empty_from_header (
shred . clone ( ) ,
DataShredHeader ::default ( ) ,
coding . clone ( ) ,
) ;
2019-12-12 17:50:28 -08:00
coding_shred . common_header . fec_set_index = std ::u32 ::MAX - 1 ;
2019-10-18 22:55:59 -07:00
coding_shred . coding_header . num_coding_shreds = 3 ;
coding_shred . common_header . index = std ::u32 ::MAX - 1 ;
2020-01-13 13:13:52 -08:00
assert! ( ! Blockstore ::should_insert_coding_shred (
2019-09-17 18:22:46 -07:00
& coding_shred ,
2020-12-09 23:14:31 -08:00
& last_root
) ) ;
coding_shred . coding_header . num_coding_shreds = 2000 ;
assert! ( ! Blockstore ::should_insert_coding_shred (
& coding_shred ,
2019-09-04 17:14:42 -07:00
& last_root
) ) ;
// Decreasing the number of num_coding_shreds will put it within the allowed limit
2019-10-18 22:55:59 -07:00
coding_shred . coding_header . num_coding_shreds = 2 ;
2020-01-13 13:13:52 -08:00
assert! ( Blockstore ::should_insert_coding_shred (
2019-09-04 17:14:42 -07:00
& coding_shred ,
& last_root
) ) ;
// Insertion should succeed
2020-01-13 13:13:52 -08:00
blockstore
2019-11-14 00:32:07 -08:00
. insert_shreds ( vec! [ coding_shred ] , None , false )
. unwrap ( ) ;
2019-09-04 17:14:42 -07:00
}
// Trying to insert value into slot <= than last root should fail
{
2020-05-15 09:35:43 -07:00
let mut coding_shred =
2020-05-29 00:26:06 -07:00
Shred ::new_empty_from_header ( shred , DataShredHeader ::default ( ) , coding ) ;
2019-09-17 18:22:46 -07:00
coding_shred . set_slot ( * last_root . read ( ) . unwrap ( ) ) ;
2020-01-13 13:13:52 -08:00
assert! ( ! Blockstore ::should_insert_coding_shred (
2019-09-17 18:22:46 -07:00
& coding_shred ,
2019-09-04 17:14:42 -07:00
& last_root
) ) ;
}
}
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-04-25 00:04:49 -07:00
}
#[ test ]
pub fn test_insert_multiple_is_last ( ) {
2020-12-09 23:14:31 -08:00
solana_logger ::setup ( ) ;
2019-09-03 21:32:51 -07:00
let ( shreds , _ ) = make_slot_entries ( 0 , 0 , 20 ) ;
let num_shreds = shreds . len ( ) as u64 ;
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2019-04-25 00:04:49 -07:00
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
let slot_meta = blockstore . meta ( 0 ) . unwrap ( ) . unwrap ( ) ;
2019-09-03 21:32:51 -07:00
assert_eq! ( slot_meta . consumed , num_shreds ) ;
assert_eq! ( slot_meta . received , num_shreds ) ;
assert_eq! ( slot_meta . last_index , num_shreds - 1 ) ;
assert! ( slot_meta . is_full ( ) ) ;
2019-04-25 00:04:49 -07:00
2019-09-03 21:32:51 -07:00
let ( shreds , _ ) = make_slot_entries ( 0 , 0 , 22 ) ;
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
let slot_meta = blockstore . meta ( 0 ) . unwrap ( ) . unwrap ( ) ;
2019-04-25 00:04:49 -07:00
2019-09-03 21:32:51 -07:00
assert_eq! ( slot_meta . consumed , num_shreds ) ;
assert_eq! ( slot_meta . received , num_shreds ) ;
assert_eq! ( slot_meta . last_index , num_shreds - 1 ) ;
2019-04-25 00:04:49 -07:00
assert! ( slot_meta . is_full ( ) ) ;
2020-12-09 23:14:31 -08:00
assert! ( blockstore . has_duplicate_shreds_in_slot ( 0 ) ) ;
2020-01-13 13:13:52 -08:00
drop ( blockstore ) ;
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-04-25 00:04:49 -07:00
}
2019-05-13 22:04:54 -07:00
#[ test ]
fn test_slot_data_iterator ( ) {
2019-09-03 21:32:51 -07:00
// Construct the shreds
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2019-09-03 21:32:51 -07:00
let shreds_per_slot = 10 ;
2019-05-13 22:04:54 -07:00
let slots = vec! [ 2 , 4 , 8 , 12 ] ;
2019-09-03 21:32:51 -07:00
let all_shreds = make_chaining_slot_entries ( & slots , shreds_per_slot ) ;
2019-09-17 18:22:46 -07:00
let slot_8_shreds = all_shreds [ 2 ] . 0. clone ( ) ;
2019-09-03 21:32:51 -07:00
for ( slot_shreds , _ ) in all_shreds {
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( slot_shreds , None , false ) . unwrap ( ) ;
2019-05-13 22:04:54 -07:00
}
// Slot doesnt exist, iterator should be empty
2020-04-24 15:04:23 -07:00
let shred_iter = blockstore . slot_data_iterator ( 5 , 0 ) . unwrap ( ) ;
let result : Vec < _ > = shred_iter . collect ( ) ;
assert_eq! ( result , vec! [ ] ) ;
2019-05-13 22:04:54 -07:00
// Test that the iterator for slot 8 contains what was inserted earlier
2020-03-19 23:35:01 -07:00
let shred_iter = blockstore . slot_data_iterator ( 8 , 0 ) . unwrap ( ) ;
2019-09-18 16:24:30 -07:00
let result : Vec < Shred > = shred_iter
. filter_map ( | ( _ , bytes ) | Shred ::new_from_serialized_shred ( bytes . to_vec ( ) ) . ok ( ) )
2019-09-03 21:32:51 -07:00
. collect ( ) ;
2019-09-17 18:22:46 -07:00
assert_eq! ( result . len ( ) , slot_8_shreds . len ( ) ) ;
assert_eq! ( result , slot_8_shreds ) ;
2019-05-13 22:04:54 -07:00
2020-01-13 13:13:52 -08:00
drop ( blockstore ) ;
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-05-13 22:04:54 -07:00
}
2019-05-20 19:04:18 -07:00
#[ test ]
2019-05-29 09:43:22 -07:00
fn test_set_roots ( ) {
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2019-05-20 19:04:18 -07:00
let chained_slots = vec! [ 0 , 2 , 4 , 7 , 12 , 15 ] ;
2020-01-13 13:13:52 -08:00
assert_eq! ( blockstore . last_root ( ) , 0 ) ;
2019-05-20 19:04:18 -07:00
2020-01-13 13:13:52 -08:00
blockstore . set_roots ( & chained_slots ) . unwrap ( ) ;
2019-05-20 19:04:18 -07:00
2020-01-13 13:13:52 -08:00
assert_eq! ( blockstore . last_root ( ) , 15 ) ;
2019-08-27 15:09:41 -07:00
2019-05-20 19:04:18 -07:00
for i in chained_slots {
2020-01-13 13:13:52 -08:00
assert! ( blockstore . is_root ( i ) ) ;
2019-05-20 19:04:18 -07:00
}
2020-01-13 13:13:52 -08:00
drop ( blockstore ) ;
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-05-20 19:04:18 -07:00
}
2020-12-16 12:40:36 -08:00
#[ test ]
fn test_is_skipped ( ) {
let blockstore_path = get_tmp_ledger_path! ( ) ;
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
let roots = vec! [ 2 , 4 , 7 , 12 , 15 ] ;
blockstore . set_roots ( & roots ) . unwrap ( ) ;
for i in 0 .. 20 {
if i < 2 | | roots . contains ( & i ) | | i > 15 {
assert! ( ! blockstore . is_skipped ( i ) ) ;
} else {
assert! ( blockstore . is_skipped ( i ) ) ;
}
}
drop ( blockstore ) ;
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
}
2019-07-17 14:42:29 -07:00
#[ test ]
fn test_iter_bounds ( ) {
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2019-07-17 14:42:29 -07:00
// slot 5 does not exist, iter should be ok and should be a noop
2020-01-13 13:13:52 -08:00
blockstore
2019-07-17 14:42:29 -07:00
. slot_meta_iterator ( 5 )
. unwrap ( )
2020-05-15 09:35:43 -07:00
. for_each ( | _ | panic! ( ) ) ;
2019-07-17 14:42:29 -07:00
2020-01-13 13:13:52 -08:00
drop ( blockstore ) ;
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-07-17 14:42:29 -07:00
}
2019-10-21 16:15:10 -07:00
#[ test ]
fn test_get_completed_data_ranges ( ) {
let completed_data_end_indexes = vec! [ 2 , 4 , 9 , 11 ] ;
// Consumed is 1, which means we're missing shred with index 1, should return empty
let start_index = 0 ;
let consumed = 1 ;
assert_eq! (
2020-01-13 13:13:52 -08:00
Blockstore ::get_completed_data_ranges (
2019-10-21 16:15:10 -07:00
start_index ,
& completed_data_end_indexes [ .. ] ,
consumed
) ,
vec! [ ]
) ;
let start_index = 0 ;
let consumed = 3 ;
assert_eq! (
2020-01-13 13:13:52 -08:00
Blockstore ::get_completed_data_ranges (
2019-10-21 16:15:10 -07:00
start_index ,
& completed_data_end_indexes [ .. ] ,
consumed
) ,
vec! [ ( 0 , 2 ) ]
) ;
// Test all possible ranges:
//
// `consumed == completed_data_end_indexes[j] + 1`, means we have all the shreds up to index
// `completed_data_end_indexes[j] + 1`. Thus the completed data blocks is everything in the
// range:
// [start_index, completed_data_end_indexes[j]] ==
// [completed_data_end_indexes[i], completed_data_end_indexes[j]],
for i in 0 .. completed_data_end_indexes . len ( ) {
for j in i .. completed_data_end_indexes . len ( ) {
let start_index = completed_data_end_indexes [ i ] ;
let consumed = completed_data_end_indexes [ j ] + 1 ;
// When start_index == completed_data_end_indexes[i], then that means
// the shred with index == start_index is a single-shred data block,
// so the start index is the end index for that data block.
let mut expected = vec! [ ( start_index , start_index ) ] ;
expected . extend (
completed_data_end_indexes [ i ..= j ]
. windows ( 2 )
. map ( | end_indexes | ( end_indexes [ 0 ] + 1 , end_indexes [ 1 ] ) ) ,
) ;
assert_eq! (
2020-01-13 13:13:52 -08:00
Blockstore ::get_completed_data_ranges (
2019-10-21 16:15:10 -07:00
start_index ,
& completed_data_end_indexes [ .. ] ,
consumed
) ,
expected
) ;
}
}
}
#[ test ]
fn test_get_slot_entries_with_shred_count_corruption ( ) {
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
2019-10-21 16:15:10 -07:00
{
2020-01-13 13:13:52 -08:00
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2019-10-21 16:15:10 -07:00
let num_ticks = 8 ;
2019-10-31 13:38:50 -07:00
let entries = create_ticks ( num_ticks , 0 , Hash ::default ( ) ) ;
2019-10-21 16:15:10 -07:00
let slot = 1 ;
2019-11-18 18:05:02 -08:00
let shreds = entries_to_test_shreds ( entries , slot , 0 , false , 0 ) ;
2019-10-21 16:15:10 -07:00
let next_shred_index = shreds . len ( ) ;
2020-01-13 13:13:52 -08:00
blockstore
2019-11-14 00:32:07 -08:00
. insert_shreds ( shreds , None , false )
2019-10-21 16:15:10 -07:00
. expect ( " Expected successful write of shreds " ) ;
assert_eq! (
2020-04-09 13:09:59 -07:00
blockstore . get_slot_entries ( slot , 0 ) . unwrap ( ) . len ( ) as u64 ,
2019-10-21 16:15:10 -07:00
num_ticks
) ;
// Insert an empty shred that won't deshred into entries
let shreds = vec! [ Shred ::new_from_data (
slot ,
next_shred_index as u32 ,
1 ,
2019-11-06 07:25:17 -08:00
Some ( & [ 1 , 1 , 1 ] ) ,
2019-10-21 16:15:10 -07:00
true ,
true ,
2019-11-06 13:27:58 -08:00
0 ,
2019-11-18 18:05:02 -08:00
0 ,
2019-12-12 16:50:29 -08:00
next_shred_index as u32 ,
2019-10-21 16:15:10 -07:00
) ] ;
// With the corruption, nothing should be returned, even though an
// earlier data block was valid
2020-01-13 13:13:52 -08:00
blockstore
2019-11-14 00:32:07 -08:00
. insert_shreds ( shreds , None , false )
2019-10-21 16:15:10 -07:00
. expect ( " Expected successful write of shreds " ) ;
2020-04-09 13:09:59 -07:00
assert! ( blockstore . get_slot_entries ( slot , 0 ) . is_err ( ) ) ;
2019-10-21 16:15:10 -07:00
}
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-10-31 14:03:41 -07:00
}
#[ test ]
fn test_no_insert_but_modify_slot_meta ( ) {
// This tests correctness of the SlotMeta in various cases in which a shred
// that gets filtered out by checks
let ( shreds0 , _ ) = make_slot_entries ( 0 , 0 , 200 ) ;
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
2019-10-31 14:03:41 -07:00
{
2020-01-13 13:13:52 -08:00
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2019-10-31 14:03:41 -07:00
// Insert the first 5 shreds, we don't have a "is_last" shred yet
2020-01-13 13:13:52 -08:00
blockstore
2019-11-14 00:32:07 -08:00
. insert_shreds ( shreds0 [ 0 .. 5 ] . to_vec ( ) , None , false )
2019-10-31 14:03:41 -07:00
. unwrap ( ) ;
// Insert a repetitive shred for slot 's', should get ignored, but also
// insert shreds that chains to 's', should see the update in the SlotMeta
// for 's'.
let ( mut shreds2 , _ ) = make_slot_entries ( 2 , 0 , 200 ) ;
let ( mut shreds3 , _ ) = make_slot_entries ( 3 , 0 , 200 ) ;
shreds2 . push ( shreds0 [ 1 ] . clone ( ) ) ;
shreds3 . insert ( 0 , shreds0 [ 1 ] . clone ( ) ) ;
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds2 , None , false ) . unwrap ( ) ;
let slot_meta = blockstore . meta ( 0 ) . unwrap ( ) . unwrap ( ) ;
2019-10-31 14:03:41 -07:00
assert_eq! ( slot_meta . next_slots , vec! [ 2 ] ) ;
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds3 , None , false ) . unwrap ( ) ;
let slot_meta = blockstore . meta ( 0 ) . unwrap ( ) . unwrap ( ) ;
2019-10-31 14:03:41 -07:00
assert_eq! ( slot_meta . next_slots , vec! [ 2 , 3 ] ) ;
}
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-10-21 16:15:10 -07:00
}
2019-11-14 00:32:07 -08:00
#[ test ]
fn test_trusted_insert_shreds ( ) {
// Make shred for slot 1
let ( shreds1 , _ ) = make_slot_entries ( 1 , 0 , 1 ) ;
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
2019-11-14 00:32:07 -08:00
let last_root = 100 ;
{
2020-01-13 13:13:52 -08:00
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
blockstore . set_roots ( & [ last_root ] ) . unwrap ( ) ;
2019-11-14 00:32:07 -08:00
// Insert will fail, slot < root
2020-01-13 13:13:52 -08:00
blockstore
2020-06-08 17:38:14 -07:00
. insert_shreds ( shreds1 [ .. ] . to_vec ( ) , None , false )
2019-11-14 00:32:07 -08:00
. unwrap ( ) ;
2020-01-13 13:13:52 -08:00
assert! ( blockstore . get_data_shred ( 1 , 0 ) . unwrap ( ) . is_none ( ) ) ;
2019-11-14 00:32:07 -08:00
// Insert through trusted path will succeed
2020-01-13 13:13:52 -08:00
blockstore
2019-11-14 00:32:07 -08:00
. insert_shreds ( shreds1 [ .. ] . to_vec ( ) , None , true )
. unwrap ( ) ;
2020-01-13 13:13:52 -08:00
assert! ( blockstore . get_data_shred ( 1 , 0 ) . unwrap ( ) . is_some ( ) ) ;
2019-11-14 00:32:07 -08:00
}
}
2019-11-14 15:34:39 -08:00
#[ test ]
2021-03-26 15:47:35 -07:00
fn test_get_rooted_block ( ) {
2019-12-11 14:06:54 -08:00
let slot = 10 ;
2019-12-09 00:13:36 -08:00
let entries = make_slot_entries_with_transactions ( 100 ) ;
2019-12-11 14:06:54 -08:00
let blockhash = get_last_hash ( entries . iter ( ) ) . unwrap ( ) ;
let shreds = entries_to_test_shreds ( entries . clone ( ) , slot , slot - 1 , true , 0 ) ;
let more_shreds = entries_to_test_shreds ( entries . clone ( ) , slot + 1 , slot , true , 0 ) ;
2021-03-26 15:47:35 -07:00
let unrooted_shreds = entries_to_test_shreds ( entries . clone ( ) , slot + 2 , slot + 1 , true , 0 ) ;
2019-11-14 15:34:39 -08:00
let ledger_path = get_tmp_ledger_path! ( ) ;
2020-01-13 13:13:52 -08:00
let ledger = Blockstore ::open ( & ledger_path ) . unwrap ( ) ;
2019-11-14 15:34:39 -08:00
ledger . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2019-12-11 14:06:54 -08:00
ledger . insert_shreds ( more_shreds , None , false ) . unwrap ( ) ;
2021-03-26 15:47:35 -07:00
ledger . insert_shreds ( unrooted_shreds , None , false ) . unwrap ( ) ;
2019-12-11 14:06:54 -08:00
ledger . set_roots ( & [ slot - 1 , slot , slot + 1 ] ) . unwrap ( ) ;
2020-12-13 17:26:34 -08:00
let parent_meta = SlotMeta {
parent_slot : std ::u64 ::MAX ,
.. SlotMeta ::default ( )
} ;
2019-12-11 14:06:54 -08:00
ledger
. put_meta_bytes ( slot - 1 , & serialize ( & parent_meta ) . unwrap ( ) )
. unwrap ( ) ;
2019-11-14 15:34:39 -08:00
2020-09-23 22:10:29 -07:00
let expected_transactions : Vec < TransactionWithStatusMeta > = entries
2019-11-14 15:34:39 -08:00
. iter ( )
. cloned ( )
. filter ( | entry | ! entry . is_tick ( ) )
. flat_map ( | entry | entry . transactions )
2019-11-18 08:12:42 -08:00
. map ( | transaction | {
2019-12-18 09:56:29 -08:00
let mut pre_balances : Vec < u64 > = vec! [ ] ;
let mut post_balances : Vec < u64 > = vec! [ ] ;
for ( i , _account_key ) in transaction . message . account_keys . iter ( ) . enumerate ( ) {
pre_balances . push ( i as u64 * 10 ) ;
post_balances . push ( i as u64 * 11 ) ;
}
2019-11-18 08:12:42 -08:00
let signature = transaction . signatures [ 0 ] ;
2021-03-05 08:05:35 -08:00
let status = TransactionStatusMeta {
status : Ok ( ( ) ) ,
fee : 42 ,
pre_balances : pre_balances . clone ( ) ,
post_balances : post_balances . clone ( ) ,
inner_instructions : Some ( vec! [ ] ) ,
log_messages : Some ( vec! [ ] ) ,
pre_token_balances : Some ( vec! [ ] ) ,
post_token_balances : Some ( vec! [ ] ) ,
2021-05-26 14:43:15 -07:00
rewards : Some ( vec! [ ] ) ,
2021-03-05 08:05:35 -08:00
}
. into ( ) ;
2019-11-18 08:12:42 -08:00
ledger
. transaction_status_cf
2021-03-05 08:05:35 -08:00
. put_protobuf ( ( 0 , signature , slot ) , & status )
2019-11-18 08:12:42 -08:00
. unwrap ( ) ;
2021-03-05 08:05:35 -08:00
let status = TransactionStatusMeta {
status : Ok ( ( ) ) ,
fee : 42 ,
pre_balances : pre_balances . clone ( ) ,
post_balances : post_balances . clone ( ) ,
inner_instructions : Some ( vec! [ ] ) ,
log_messages : Some ( vec! [ ] ) ,
pre_token_balances : Some ( vec! [ ] ) ,
post_token_balances : Some ( vec! [ ] ) ,
2021-05-26 14:43:15 -07:00
rewards : Some ( vec! [ ] ) ,
2021-03-05 08:05:35 -08:00
}
. into ( ) ;
2019-12-11 14:06:54 -08:00
ledger
. transaction_status_cf
2021-03-05 08:05:35 -08:00
. put_protobuf ( ( 0 , signature , slot + 1 ) , & status )
2019-12-11 14:06:54 -08:00
. unwrap ( ) ;
2021-03-26 15:47:35 -07:00
let status = TransactionStatusMeta {
status : Ok ( ( ) ) ,
fee : 42 ,
pre_balances : pre_balances . clone ( ) ,
post_balances : post_balances . clone ( ) ,
inner_instructions : Some ( vec! [ ] ) ,
log_messages : Some ( vec! [ ] ) ,
pre_token_balances : Some ( vec! [ ] ) ,
post_token_balances : Some ( vec! [ ] ) ,
2021-05-26 14:43:15 -07:00
rewards : Some ( vec! [ ] ) ,
2021-03-26 15:47:35 -07:00
}
. into ( ) ;
ledger
. transaction_status_cf
. put_protobuf ( ( 0 , signature , slot + 2 ) , & status )
. unwrap ( ) ;
2020-09-23 22:10:29 -07:00
TransactionWithStatusMeta {
2019-11-18 08:12:42 -08:00
transaction ,
2020-09-23 22:10:29 -07:00
meta : Some ( TransactionStatusMeta {
status : Ok ( ( ) ) ,
fee : 42 ,
pre_balances ,
post_balances ,
2020-09-24 07:36:22 -07:00
inner_instructions : Some ( vec! [ ] ) ,
2020-10-08 12:06:15 -07:00
log_messages : Some ( vec! [ ] ) ,
2020-12-10 19:25:07 -08:00
pre_token_balances : Some ( vec! [ ] ) ,
post_token_balances : Some ( vec! [ ] ) ,
2021-05-26 14:43:15 -07:00
rewards : Some ( vec! [ ] ) ,
2020-09-23 22:10:29 -07:00
} ) ,
}
2019-11-18 08:12:42 -08:00
} )
2019-11-14 15:34:39 -08:00
. collect ( ) ;
2019-12-11 14:06:54 -08:00
// Even if marked as root, a slot that is empty of entries should return an error
2021-03-26 15:47:35 -07:00
let confirmed_block_err = ledger . get_rooted_block ( slot - 1 , true ) . unwrap_err ( ) ;
assert_matches! ( confirmed_block_err , BlockstoreError ::SlotUnavailable ) ;
2019-12-11 14:06:54 -08:00
2021-02-17 17:04:52 -08:00
// The previous_blockhash of `expected_block` is default because its parent slot is a root,
// but empty of entries (eg. snapshot root slots). This now returns an error.
2021-03-26 15:47:35 -07:00
let confirmed_block_err = ledger . get_rooted_block ( slot , true ) . unwrap_err ( ) ;
2021-02-17 17:04:52 -08:00
assert_matches! (
confirmed_block_err ,
BlockstoreError ::ParentEntriesUnavailable
) ;
2019-12-11 14:06:54 -08:00
2021-02-17 17:04:52 -08:00
// Test if require_previous_blockhash is false
2021-03-26 15:47:35 -07:00
let confirmed_block = ledger . get_rooted_block ( slot , false ) . unwrap ( ) ;
2021-02-17 17:04:52 -08:00
assert_eq! ( confirmed_block . transactions . len ( ) , 100 ) ;
2020-03-26 13:29:30 -07:00
let expected_block = ConfirmedBlock {
2020-09-23 22:10:29 -07:00
transactions : expected_transactions . clone ( ) ,
2020-01-12 21:34:30 -08:00
parent_slot : slot - 1 ,
blockhash : blockhash . to_string ( ) ,
previous_blockhash : Hash ::default ( ) . to_string ( ) ,
2020-02-04 18:50:24 -08:00
rewards : vec ! [ ] ,
2020-07-09 21:47:29 -07:00
block_time : None ,
2021-05-26 21:16:16 -07:00
block_height : None ,
2020-01-12 21:34:30 -08:00
} ;
2019-12-11 14:06:54 -08:00
assert_eq! ( confirmed_block , expected_block ) ;
2021-03-26 15:47:35 -07:00
let confirmed_block = ledger . get_rooted_block ( slot + 1 , true ) . unwrap ( ) ;
2019-11-18 08:12:42 -08:00
assert_eq! ( confirmed_block . transactions . len ( ) , 100 ) ;
2020-09-09 08:33:14 -07:00
let mut expected_block = ConfirmedBlock {
2021-03-26 15:47:35 -07:00
transactions : expected_transactions . clone ( ) ,
2020-01-12 21:34:30 -08:00
parent_slot : slot ,
blockhash : blockhash . to_string ( ) ,
previous_blockhash : blockhash . to_string ( ) ,
2020-02-04 18:50:24 -08:00
rewards : vec ! [ ] ,
2020-07-09 21:47:29 -07:00
block_time : None ,
2021-05-26 21:16:16 -07:00
block_height : None ,
2020-01-12 21:34:30 -08:00
} ;
2019-11-17 19:17:15 -08:00
assert_eq! ( confirmed_block , expected_block ) ;
2021-03-26 15:47:35 -07:00
let not_root = ledger . get_rooted_block ( slot + 2 , true ) . unwrap_err ( ) ;
2020-01-13 13:13:52 -08:00
assert_matches! ( not_root , BlockstoreError ::SlotNotRooted ) ;
2019-11-14 15:34:39 -08:00
2021-03-26 15:47:35 -07:00
let complete_block = ledger . get_complete_block ( slot + 2 , true ) . unwrap ( ) ;
assert_eq! ( complete_block . transactions . len ( ) , 100 ) ;
let mut expected_complete_block = ConfirmedBlock {
transactions : expected_transactions ,
parent_slot : slot + 1 ,
blockhash : blockhash . to_string ( ) ,
previous_blockhash : blockhash . to_string ( ) ,
rewards : vec ! [ ] ,
block_time : None ,
2021-05-26 21:16:16 -07:00
block_height : None ,
2021-03-26 15:47:35 -07:00
} ;
assert_eq! ( complete_block , expected_complete_block ) ;
2021-05-26 21:16:16 -07:00
// Test block_time & block_height return, if available
2020-09-09 08:33:14 -07:00
let timestamp = 1_576_183_541 ;
ledger . blocktime_cf . put ( slot + 1 , & timestamp ) . unwrap ( ) ;
expected_block . block_time = Some ( timestamp ) ;
2021-05-26 21:16:16 -07:00
let block_height = slot - 2 ;
ledger . block_height_cf . put ( slot + 1 , & block_height ) . unwrap ( ) ;
expected_block . block_height = Some ( block_height ) ;
2020-09-09 08:33:14 -07:00
2021-03-26 15:47:35 -07:00
let confirmed_block = ledger . get_rooted_block ( slot + 1 , true ) . unwrap ( ) ;
2020-09-09 08:33:14 -07:00
assert_eq! ( confirmed_block , expected_block ) ;
2021-03-26 15:47:35 -07:00
let timestamp = 1_576_183_542 ;
ledger . blocktime_cf . put ( slot + 2 , & timestamp ) . unwrap ( ) ;
expected_complete_block . block_time = Some ( timestamp ) ;
2021-05-26 21:16:16 -07:00
let block_height = slot - 1 ;
ledger . block_height_cf . put ( slot + 2 , & block_height ) . unwrap ( ) ;
expected_complete_block . block_height = Some ( block_height ) ;
2021-03-26 15:47:35 -07:00
let complete_block = ledger . get_complete_block ( slot + 2 , true ) . unwrap ( ) ;
assert_eq! ( complete_block , expected_complete_block ) ;
2019-11-14 15:34:39 -08:00
drop ( ledger ) ;
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & ledger_path ) . expect ( " Expected successful database destruction " ) ;
2019-11-14 15:34:39 -08:00
}
2019-11-17 08:26:01 -08:00
2019-12-14 11:23:02 -08:00
#[ test ]
fn test_persist_transaction_status ( ) {
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
2019-11-17 08:26:01 -08:00
{
2020-01-13 13:13:52 -08:00
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
let transaction_status_cf = blockstore . db . column ::< cf ::TransactionStatus > ( ) ;
2019-11-17 08:26:01 -08:00
2019-12-18 09:56:29 -08:00
let pre_balances_vec = vec! [ 1 , 2 , 3 ] ;
let post_balances_vec = vec! [ 3 , 2 , 1 ] ;
2020-09-24 07:36:22 -07:00
let inner_instructions_vec = vec! [ InnerInstructions {
index : 0 ,
instructions : vec ! [ CompiledInstruction ::new ( 1 , & ( ) , vec! [ 0 ] ) ] ,
} ] ;
2020-10-08 12:06:15 -07:00
let log_messages_vec = vec! [ String ::from ( " Test message \n " ) ] ;
2020-12-10 19:25:07 -08:00
let pre_token_balances_vec = vec! [ ] ;
let post_token_balances_vec = vec! [ ] ;
2021-05-26 14:43:15 -07:00
let rewards_vec = vec! [ ] ;
2019-12-18 09:56:29 -08:00
2019-11-17 08:26:01 -08:00
// result not found
assert! ( transaction_status_cf
2021-03-05 08:05:35 -08:00
. get_protobuf_or_bincode ::< StoredTransactionStatusMeta > ( (
0 ,
Signature ::default ( ) ,
0
) )
2019-11-17 08:26:01 -08:00
. unwrap ( )
. is_none ( ) ) ;
// insert value
2021-03-05 08:05:35 -08:00
let status = TransactionStatusMeta {
status : solana_sdk ::transaction ::Result ::< ( ) > ::Err (
TransactionError ::AccountNotFound ,
) ,
fee : 5 u64 ,
pre_balances : pre_balances_vec . clone ( ) ,
post_balances : post_balances_vec . clone ( ) ,
inner_instructions : Some ( inner_instructions_vec . clone ( ) ) ,
log_messages : Some ( log_messages_vec . clone ( ) ) ,
pre_token_balances : Some ( pre_token_balances_vec . clone ( ) ) ,
post_token_balances : Some ( post_token_balances_vec . clone ( ) ) ,
2021-05-26 14:43:15 -07:00
rewards : Some ( rewards_vec . clone ( ) ) ,
2021-03-05 08:05:35 -08:00
}
. into ( ) ;
2019-11-17 08:26:01 -08:00
assert! ( transaction_status_cf
2021-03-05 08:05:35 -08:00
. put_protobuf ( ( 0 , Signature ::default ( ) , 0 ) , & status , )
2019-11-17 08:26:01 -08:00
. is_ok ( ) ) ;
// result found
2020-03-26 13:29:30 -07:00
let TransactionStatusMeta {
2019-12-18 09:56:29 -08:00
status ,
fee ,
pre_balances ,
post_balances ,
2020-09-24 07:36:22 -07:00
inner_instructions ,
2020-10-08 12:06:15 -07:00
log_messages ,
2020-12-10 19:25:07 -08:00
pre_token_balances ,
post_token_balances ,
2021-05-26 14:43:15 -07:00
rewards ,
2019-12-18 09:56:29 -08:00
} = transaction_status_cf
2021-03-05 08:05:35 -08:00
. get_protobuf_or_bincode ::< StoredTransactionStatusMeta > ( (
0 ,
Signature ::default ( ) ,
0 ,
) )
2019-11-17 08:26:01 -08:00
. unwrap ( )
2021-03-05 08:05:35 -08:00
. unwrap ( )
. try_into ( )
2019-11-17 08:26:01 -08:00
. unwrap ( ) ;
assert_eq! ( status , Err ( TransactionError ::AccountNotFound ) ) ;
assert_eq! ( fee , 5 u64 ) ;
2019-12-18 09:56:29 -08:00
assert_eq! ( pre_balances , pre_balances_vec ) ;
assert_eq! ( post_balances , post_balances_vec ) ;
2020-09-24 07:36:22 -07:00
assert_eq! ( inner_instructions . unwrap ( ) , inner_instructions_vec ) ;
2020-10-08 12:06:15 -07:00
assert_eq! ( log_messages . unwrap ( ) , log_messages_vec ) ;
2020-12-10 19:25:07 -08:00
assert_eq! ( pre_token_balances . unwrap ( ) , pre_token_balances_vec ) ;
assert_eq! ( post_token_balances . unwrap ( ) , post_token_balances_vec ) ;
2021-05-26 14:43:15 -07:00
assert_eq! ( rewards . unwrap ( ) , rewards_vec ) ;
2019-11-17 08:26:01 -08:00
// insert value
2021-03-05 08:05:35 -08:00
let status = TransactionStatusMeta {
status : solana_sdk ::transaction ::Result ::< ( ) > ::Ok ( ( ) ) ,
fee : 9 u64 ,
pre_balances : pre_balances_vec . clone ( ) ,
post_balances : post_balances_vec . clone ( ) ,
inner_instructions : Some ( inner_instructions_vec . clone ( ) ) ,
log_messages : Some ( log_messages_vec . clone ( ) ) ,
pre_token_balances : Some ( pre_token_balances_vec . clone ( ) ) ,
post_token_balances : Some ( post_token_balances_vec . clone ( ) ) ,
2021-05-26 14:43:15 -07:00
rewards : Some ( rewards_vec . clone ( ) ) ,
2021-03-05 08:05:35 -08:00
}
. into ( ) ;
2019-11-17 08:26:01 -08:00
assert! ( transaction_status_cf
2021-03-05 08:05:35 -08:00
. put_protobuf ( ( 0 , Signature ::new ( & [ 2 u8 ; 64 ] ) , 9 ) , & status , )
2019-11-17 08:26:01 -08:00
. is_ok ( ) ) ;
// result found
2020-03-26 13:29:30 -07:00
let TransactionStatusMeta {
2019-12-18 09:56:29 -08:00
status ,
fee ,
pre_balances ,
post_balances ,
2020-09-24 07:36:22 -07:00
inner_instructions ,
2020-10-08 12:06:15 -07:00
log_messages ,
2020-12-10 19:25:07 -08:00
pre_token_balances ,
post_token_balances ,
2021-05-26 14:43:15 -07:00
rewards ,
2019-12-18 09:56:29 -08:00
} = transaction_status_cf
2021-03-05 08:05:35 -08:00
. get_protobuf_or_bincode ::< StoredTransactionStatusMeta > ( (
0 ,
Signature ::new ( & [ 2 u8 ; 64 ] ) ,
9 ,
) )
. unwrap ( )
2019-11-17 08:26:01 -08:00
. unwrap ( )
2021-03-05 08:05:35 -08:00
. try_into ( )
2019-11-17 08:26:01 -08:00
. unwrap ( ) ;
// deserialize
assert_eq! ( status , Ok ( ( ) ) ) ;
assert_eq! ( fee , 9 u64 ) ;
2019-12-18 09:56:29 -08:00
assert_eq! ( pre_balances , pre_balances_vec ) ;
assert_eq! ( post_balances , post_balances_vec ) ;
2020-09-24 07:36:22 -07:00
assert_eq! ( inner_instructions . unwrap ( ) , inner_instructions_vec ) ;
2020-10-08 12:06:15 -07:00
assert_eq! ( log_messages . unwrap ( ) , log_messages_vec ) ;
2020-12-10 19:25:07 -08:00
assert_eq! ( pre_token_balances . unwrap ( ) , pre_token_balances_vec ) ;
assert_eq! ( post_token_balances . unwrap ( ) , post_token_balances_vec ) ;
2021-05-26 14:43:15 -07:00
assert_eq! ( rewards . unwrap ( ) , rewards_vec ) ;
2019-11-17 08:26:01 -08:00
}
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-11-17 08:26:01 -08:00
}
2019-11-17 19:17:15 -08:00
2020-04-04 20:24:06 -07:00
#[ test ]
2020-05-15 09:35:43 -07:00
#[ allow(clippy::cognitive_complexity) ]
2020-04-04 20:24:06 -07:00
fn test_transaction_status_index ( ) {
let blockstore_path = get_tmp_ledger_path! ( ) ;
{
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
let transaction_status_index_cf = blockstore . db . column ::< cf ::TransactionStatusIndex > ( ) ;
let slot0 = 10 ;
2020-04-08 12:50:39 -07:00
// Primary index column is initialized on Blockstore::open
assert! ( transaction_status_index_cf . get ( 0 ) . unwrap ( ) . is_some ( ) ) ;
assert! ( transaction_status_index_cf . get ( 1 ) . unwrap ( ) . is_some ( ) ) ;
2020-04-04 20:24:06 -07:00
for _ in 0 .. 5 {
let random_bytes : Vec < u8 > = ( 0 .. 64 ) . map ( | _ | rand ::random ::< u8 > ( ) ) . collect ( ) ;
blockstore
. write_transaction_status (
2020-04-08 12:50:39 -07:00
slot0 ,
Signature ::new ( & random_bytes ) ,
vec! [ & Pubkey ::new ( & random_bytes [ 0 .. 32 ] ) ] ,
vec! [ & Pubkey ::new ( & random_bytes [ 32 .. ] ) ] ,
2021-03-05 08:05:35 -08:00
TransactionStatusMeta ::default ( ) ,
2020-04-04 20:24:06 -07:00
)
. unwrap ( ) ;
}
// New statuses bump index 0 max_slot
assert_eq! (
transaction_status_index_cf . get ( 0 ) . unwrap ( ) . unwrap ( ) ,
TransactionStatusIndexMeta {
max_slot : slot0 ,
frozen : false ,
}
) ;
assert_eq! (
transaction_status_index_cf . get ( 1 ) . unwrap ( ) . unwrap ( ) ,
TransactionStatusIndexMeta ::default ( )
) ;
let first_status_entry = blockstore
. db
. iter ::< cf ::TransactionStatus > ( IteratorMode ::From (
2020-04-09 20:21:31 -07:00
cf ::TransactionStatus ::as_index ( 0 ) ,
2020-04-04 20:24:06 -07:00
IteratorDirection ::Forward ,
) )
. unwrap ( )
. next ( )
. unwrap ( )
. 0 ;
assert_eq! ( first_status_entry . 0 , 0 ) ;
assert_eq! ( first_status_entry . 2 , slot0 ) ;
2020-04-08 12:50:39 -07:00
let first_address_entry = blockstore
. db
. iter ::< cf ::AddressSignatures > ( IteratorMode ::From (
2020-04-09 20:21:31 -07:00
cf ::AddressSignatures ::as_index ( 0 ) ,
2020-04-08 12:50:39 -07:00
IteratorDirection ::Forward ,
) )
. unwrap ( )
. next ( )
. unwrap ( )
. 0 ;
assert_eq! ( first_address_entry . 0 , 0 ) ;
assert_eq! ( first_address_entry . 2 , slot0 ) ;
2020-04-04 20:24:06 -07:00
2020-06-02 18:49:31 -07:00
blockstore . run_purge ( 0 , 8 , PurgeType ::PrimaryIndex ) . unwrap ( ) ;
2020-04-04 20:24:06 -07:00
// First successful prune freezes index 0
assert_eq! (
transaction_status_index_cf . get ( 0 ) . unwrap ( ) . unwrap ( ) ,
TransactionStatusIndexMeta {
max_slot : slot0 ,
frozen : true ,
}
) ;
assert_eq! (
transaction_status_index_cf . get ( 1 ) . unwrap ( ) . unwrap ( ) ,
TransactionStatusIndexMeta ::default ( )
) ;
let slot1 = 20 ;
for _ in 0 .. 5 {
let random_bytes : Vec < u8 > = ( 0 .. 64 ) . map ( | _ | rand ::random ::< u8 > ( ) ) . collect ( ) ;
blockstore
. write_transaction_status (
2020-04-08 12:50:39 -07:00
slot1 ,
Signature ::new ( & random_bytes ) ,
vec! [ & Pubkey ::new ( & random_bytes [ 0 .. 32 ] ) ] ,
vec! [ & Pubkey ::new ( & random_bytes [ 32 .. ] ) ] ,
2021-03-05 08:05:35 -08:00
TransactionStatusMeta ::default ( ) ,
2020-04-04 20:24:06 -07:00
)
. unwrap ( ) ;
}
assert_eq! (
transaction_status_index_cf . get ( 0 ) . unwrap ( ) . unwrap ( ) ,
TransactionStatusIndexMeta {
max_slot : slot0 ,
frozen : true ,
}
) ;
// Index 0 is frozen, so new statuses bump index 1 max_slot
assert_eq! (
transaction_status_index_cf . get ( 1 ) . unwrap ( ) . unwrap ( ) ,
TransactionStatusIndexMeta {
max_slot : slot1 ,
frozen : false ,
}
) ;
2020-04-08 12:50:39 -07:00
// Index 0 statuses and address records still exist
2020-04-04 20:24:06 -07:00
let first_status_entry = blockstore
. db
. iter ::< cf ::TransactionStatus > ( IteratorMode ::From (
2020-04-09 20:21:31 -07:00
cf ::TransactionStatus ::as_index ( 0 ) ,
2020-04-04 20:24:06 -07:00
IteratorDirection ::Forward ,
) )
. unwrap ( )
. next ( )
. unwrap ( )
. 0 ;
assert_eq! ( first_status_entry . 0 , 0 ) ;
assert_eq! ( first_status_entry . 2 , 10 ) ;
2020-04-08 12:50:39 -07:00
let first_address_entry = blockstore
. db
. iter ::< cf ::AddressSignatures > ( IteratorMode ::From (
2020-04-09 20:21:31 -07:00
cf ::AddressSignatures ::as_index ( 0 ) ,
2020-04-08 12:50:39 -07:00
IteratorDirection ::Forward ,
) )
. unwrap ( )
. next ( )
. unwrap ( )
. 0 ;
assert_eq! ( first_address_entry . 0 , 0 ) ;
assert_eq! ( first_address_entry . 2 , slot0 ) ;
// New statuses and address records are stored in index 1
2020-04-04 20:24:06 -07:00
let index1_first_status_entry = blockstore
. db
. iter ::< cf ::TransactionStatus > ( IteratorMode ::From (
2020-04-09 20:21:31 -07:00
cf ::TransactionStatus ::as_index ( 1 ) ,
2020-04-04 20:24:06 -07:00
IteratorDirection ::Forward ,
) )
. unwrap ( )
. next ( )
. unwrap ( )
. 0 ;
assert_eq! ( index1_first_status_entry . 0 , 1 ) ;
assert_eq! ( index1_first_status_entry . 2 , slot1 ) ;
2020-04-08 12:50:39 -07:00
let index1_first_address_entry = blockstore
. db
. iter ::< cf ::AddressSignatures > ( IteratorMode ::From (
2020-04-09 20:21:31 -07:00
cf ::AddressSignatures ::as_index ( 1 ) ,
2020-04-08 12:50:39 -07:00
IteratorDirection ::Forward ,
) )
. unwrap ( )
. next ( )
. unwrap ( )
. 0 ;
assert_eq! ( index1_first_address_entry . 0 , 1 ) ;
assert_eq! ( index1_first_address_entry . 2 , slot1 ) ;
2020-04-04 20:24:06 -07:00
2020-06-02 18:49:31 -07:00
blockstore
. run_purge ( 0 , 18 , PurgeType ::PrimaryIndex )
. unwrap ( ) ;
2020-04-04 20:24:06 -07:00
// Successful prune toggles TransactionStatusIndex
assert_eq! (
transaction_status_index_cf . get ( 0 ) . unwrap ( ) . unwrap ( ) ,
TransactionStatusIndexMeta {
max_slot : 0 ,
frozen : false ,
}
) ;
assert_eq! (
transaction_status_index_cf . get ( 1 ) . unwrap ( ) . unwrap ( ) ,
TransactionStatusIndexMeta {
max_slot : slot1 ,
frozen : true ,
}
) ;
2020-04-08 12:50:39 -07:00
// Index 0 has been pruned, so first status and address entries are now index 1
2020-04-04 20:24:06 -07:00
let first_status_entry = blockstore
. db
. iter ::< cf ::TransactionStatus > ( IteratorMode ::From (
2020-04-09 20:21:31 -07:00
cf ::TransactionStatus ::as_index ( 0 ) ,
2020-04-04 20:24:06 -07:00
IteratorDirection ::Forward ,
) )
. unwrap ( )
. next ( )
. unwrap ( )
. 0 ;
assert_eq! ( first_status_entry . 0 , 1 ) ;
assert_eq! ( first_status_entry . 2 , slot1 ) ;
2020-04-08 12:50:39 -07:00
let first_address_entry = blockstore
. db
. iter ::< cf ::AddressSignatures > ( IteratorMode ::From (
2020-04-09 20:21:31 -07:00
cf ::AddressSignatures ::as_index ( 0 ) ,
2020-04-08 12:50:39 -07:00
IteratorDirection ::Forward ,
) )
. unwrap ( )
. next ( )
. unwrap ( )
. 0 ;
assert_eq! ( first_address_entry . 0 , 1 ) ;
assert_eq! ( first_address_entry . 2 , slot1 ) ;
2020-04-04 20:24:06 -07:00
}
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
}
2020-04-06 03:04:54 -07:00
#[ test ]
fn test_get_transaction_status ( ) {
let blockstore_path = get_tmp_ledger_path! ( ) ;
{
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2020-04-08 12:50:39 -07:00
// TransactionStatus column opens initialized with one entry at index 2
2020-04-06 03:04:54 -07:00
let transaction_status_cf = blockstore . db . column ::< cf ::TransactionStatus > ( ) ;
let pre_balances_vec = vec! [ 1 , 2 , 3 ] ;
let post_balances_vec = vec! [ 3 , 2 , 1 ] ;
let status = TransactionStatusMeta {
status : solana_sdk ::transaction ::Result ::< ( ) > ::Ok ( ( ) ) ,
fee : 42 u64 ,
2020-05-15 09:35:43 -07:00
pre_balances : pre_balances_vec ,
post_balances : post_balances_vec ,
2020-09-24 07:36:22 -07:00
inner_instructions : Some ( vec! [ ] ) ,
2020-10-08 12:06:15 -07:00
log_messages : Some ( vec! [ ] ) ,
2020-12-10 19:25:07 -08:00
pre_token_balances : Some ( vec! [ ] ) ,
post_token_balances : Some ( vec! [ ] ) ,
2021-05-26 14:43:15 -07:00
rewards : Some ( vec! [ ] ) ,
2021-03-05 08:05:35 -08:00
}
. into ( ) ;
2020-04-06 03:04:54 -07:00
let signature1 = Signature ::new ( & [ 1 u8 ; 64 ] ) ;
let signature2 = Signature ::new ( & [ 2 u8 ; 64 ] ) ;
let signature3 = Signature ::new ( & [ 3 u8 ; 64 ] ) ;
let signature4 = Signature ::new ( & [ 4 u8 ; 64 ] ) ;
let signature5 = Signature ::new ( & [ 5 u8 ; 64 ] ) ;
let signature6 = Signature ::new ( & [ 6 u8 ; 64 ] ) ;
2021-03-31 20:04:00 -07:00
let signature7 = Signature ::new ( & [ 7 u8 ; 64 ] ) ;
// Insert slots with fork
// 0 (root)
// / \
// 1 |
// 2 (root)
// |
// 3
let meta0 = SlotMeta ::new ( 0 , 0 ) ;
blockstore . meta_cf . put ( 0 , & meta0 ) . unwrap ( ) ;
let meta1 = SlotMeta ::new ( 1 , 0 ) ;
blockstore . meta_cf . put ( 1 , & meta1 ) . unwrap ( ) ;
let meta2 = SlotMeta ::new ( 2 , 0 ) ;
blockstore . meta_cf . put ( 2 , & meta2 ) . unwrap ( ) ;
let meta3 = SlotMeta ::new ( 3 , 2 ) ;
blockstore . meta_cf . put ( 3 , & meta3 ) . unwrap ( ) ;
blockstore . set_roots ( & [ 0 , 2 ] ) . unwrap ( ) ;
2020-04-06 03:04:54 -07:00
// Initialize index 0, including:
// signature2 in non-root and root,
2021-03-31 20:04:00 -07:00
// signature4 in non-root,
// signature5 in skipped slot and non-root,
// signature6 in skipped slot,
2020-04-06 03:04:54 -07:00
transaction_status_cf
2021-03-05 08:05:35 -08:00
. put_protobuf ( ( 0 , signature2 , 1 ) , & status )
2020-04-06 03:04:54 -07:00
. unwrap ( ) ;
transaction_status_cf
2021-03-05 08:05:35 -08:00
. put_protobuf ( ( 0 , signature2 , 2 ) , & status )
2020-04-06 03:04:54 -07:00
. unwrap ( ) ;
transaction_status_cf
2021-03-31 20:04:00 -07:00
. put_protobuf ( ( 0 , signature4 , 1 ) , & status )
2020-04-06 03:04:54 -07:00
. unwrap ( ) ;
transaction_status_cf
2021-03-31 20:04:00 -07:00
. put_protobuf ( ( 0 , signature5 , 1 ) , & status )
2020-04-06 03:04:54 -07:00
. unwrap ( ) ;
transaction_status_cf
2021-03-31 20:04:00 -07:00
. put_protobuf ( ( 0 , signature5 , 3 ) , & status )
2020-04-06 03:04:54 -07:00
. unwrap ( ) ;
transaction_status_cf
2021-03-31 20:04:00 -07:00
. put_protobuf ( ( 0 , signature6 , 1 ) , & status )
2020-04-06 03:04:54 -07:00
. unwrap ( ) ;
// Initialize index 1, including:
2021-03-31 20:04:00 -07:00
// signature4 in root,
// signature6 in non-root,
// signature5 extra entries
2020-04-06 03:04:54 -07:00
transaction_status_cf
2021-03-31 20:04:00 -07:00
. put_protobuf ( ( 1 , signature4 , 2 ) , & status )
2020-04-06 03:04:54 -07:00
. unwrap ( ) ;
transaction_status_cf
2021-03-31 20:04:00 -07:00
. put_protobuf ( ( 1 , signature5 , 4 ) , & status )
2020-04-06 03:04:54 -07:00
. unwrap ( ) ;
transaction_status_cf
2021-03-31 20:04:00 -07:00
. put_protobuf ( ( 1 , signature5 , 5 ) , & status )
2020-04-06 03:04:54 -07:00
. unwrap ( ) ;
transaction_status_cf
2021-03-31 20:04:00 -07:00
. put_protobuf ( ( 1 , signature6 , 3 ) , & status )
2020-04-06 03:04:54 -07:00
. unwrap ( ) ;
// Signature exists, root found in index 0
if let ( Some ( ( slot , _status ) ) , counter ) = blockstore
2021-03-31 20:04:00 -07:00
. get_transaction_status_with_counter ( signature2 , & [ ] )
2020-04-06 03:04:54 -07:00
. unwrap ( )
{
assert_eq! ( slot , 2 ) ;
assert_eq! ( counter , 2 ) ;
}
2021-03-31 20:04:00 -07:00
// Signature exists, root found although not required
2021-03-26 15:47:35 -07:00
if let ( Some ( ( slot , _status ) ) , counter ) = blockstore
2021-03-31 20:04:00 -07:00
. get_transaction_status_with_counter ( signature2 , & [ 3 ] )
2021-03-26 15:47:35 -07:00
. unwrap ( )
{
2021-03-31 20:04:00 -07:00
assert_eq! ( slot , 2 ) ;
assert_eq! ( counter , 2 ) ;
2021-03-26 15:47:35 -07:00
}
2020-04-06 03:04:54 -07:00
// Signature exists, root found in index 1
if let ( Some ( ( slot , _status ) ) , counter ) = blockstore
2021-03-31 20:04:00 -07:00
. get_transaction_status_with_counter ( signature4 , & [ ] )
2020-04-06 03:04:54 -07:00
. unwrap ( )
{
assert_eq! ( slot , 2 ) ;
2021-03-31 20:04:00 -07:00
assert_eq! ( counter , 3 ) ;
}
// Signature exists, root found although not required, in index 1
if let ( Some ( ( slot , _status ) ) , counter ) = blockstore
. get_transaction_status_with_counter ( signature4 , & [ 3 ] )
. unwrap ( )
{
assert_eq! ( slot , 2 ) ;
assert_eq! ( counter , 3 ) ;
2020-04-06 03:04:54 -07:00
}
// Signature exists, no root found
let ( status , counter ) = blockstore
2021-03-31 20:04:00 -07:00
. get_transaction_status_with_counter ( signature5 , & [ ] )
2020-04-06 03:04:54 -07:00
. unwrap ( ) ;
assert_eq! ( status , None ) ;
2020-04-08 12:50:39 -07:00
assert_eq! ( counter , 6 ) ;
2020-04-06 03:04:54 -07:00
2021-03-26 15:47:35 -07:00
// Signature exists, root not required
if let ( Some ( ( slot , _status ) ) , counter ) = blockstore
2021-03-31 20:04:00 -07:00
. get_transaction_status_with_counter ( signature5 , & [ 3 ] )
2021-03-26 15:47:35 -07:00
. unwrap ( )
{
2021-03-31 20:04:00 -07:00
assert_eq! ( slot , 3 ) ;
assert_eq! ( counter , 2 ) ;
2021-03-26 15:47:35 -07:00
}
2020-04-06 03:04:54 -07:00
// Signature does not exist, smaller than existing entries
let ( status , counter ) = blockstore
2021-03-31 20:04:00 -07:00
. get_transaction_status_with_counter ( signature1 , & [ ] )
2021-03-26 15:47:35 -07:00
. unwrap ( ) ;
assert_eq! ( status , None ) ;
assert_eq! ( counter , 2 ) ;
let ( status , counter ) = blockstore
2021-03-31 20:04:00 -07:00
. get_transaction_status_with_counter ( signature1 , & [ 3 ] )
2020-04-06 03:04:54 -07:00
. unwrap ( ) ;
assert_eq! ( status , None ) ;
assert_eq! ( counter , 2 ) ;
// Signature does not exist, between existing entries
let ( status , counter ) = blockstore
2021-03-31 20:04:00 -07:00
. get_transaction_status_with_counter ( signature3 , & [ ] )
2021-03-26 15:47:35 -07:00
. unwrap ( ) ;
assert_eq! ( status , None ) ;
assert_eq! ( counter , 2 ) ;
let ( status , counter ) = blockstore
2021-03-31 20:04:00 -07:00
. get_transaction_status_with_counter ( signature3 , & [ 3 ] )
2020-04-06 03:04:54 -07:00
. unwrap ( ) ;
assert_eq! ( status , None ) ;
assert_eq! ( counter , 2 ) ;
// Signature does not exist, larger than existing entries
let ( status , counter ) = blockstore
2021-03-31 20:04:00 -07:00
. get_transaction_status_with_counter ( signature7 , & [ ] )
2021-03-26 15:47:35 -07:00
. unwrap ( ) ;
assert_eq! ( status , None ) ;
assert_eq! ( counter , 2 ) ;
let ( status , counter ) = blockstore
2021-03-31 20:04:00 -07:00
. get_transaction_status_with_counter ( signature7 , & [ 3 ] )
2020-04-06 03:04:54 -07:00
. unwrap ( ) ;
assert_eq! ( status , None ) ;
2020-04-08 12:50:39 -07:00
assert_eq! ( counter , 2 ) ;
2020-04-06 03:04:54 -07:00
}
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
}
2021-05-28 00:42:56 -07:00
fn do_test_lowest_cleanup_slot_and_special_cfs (
simulate_compaction : bool ,
simulate_ledger_cleanup_service : bool ,
) {
solana_logger ::setup ( ) ;
let blockstore_path = get_tmp_ledger_path! ( ) ;
{
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
// TransactionStatus column opens initialized with one entry at index 2
let transaction_status_cf = blockstore . db . column ::< cf ::TransactionStatus > ( ) ;
let pre_balances_vec = vec! [ 1 , 2 , 3 ] ;
let post_balances_vec = vec! [ 3 , 2 , 1 ] ;
let status = TransactionStatusMeta {
status : solana_sdk ::transaction ::Result ::< ( ) > ::Ok ( ( ) ) ,
fee : 42 u64 ,
pre_balances : pre_balances_vec ,
post_balances : post_balances_vec ,
inner_instructions : Some ( vec! [ ] ) ,
log_messages : Some ( vec! [ ] ) ,
pre_token_balances : Some ( vec! [ ] ) ,
post_token_balances : Some ( vec! [ ] ) ,
rewards : Some ( vec! [ ] ) ,
}
. into ( ) ;
let signature1 = Signature ::new ( & [ 2 u8 ; 64 ] ) ;
let signature2 = Signature ::new ( & [ 3 u8 ; 64 ] ) ;
// Insert rooted slots 0..=3 with no fork
let meta0 = SlotMeta ::new ( 0 , 0 ) ;
blockstore . meta_cf . put ( 0 , & meta0 ) . unwrap ( ) ;
let meta1 = SlotMeta ::new ( 1 , 0 ) ;
blockstore . meta_cf . put ( 1 , & meta1 ) . unwrap ( ) ;
let meta2 = SlotMeta ::new ( 2 , 1 ) ;
blockstore . meta_cf . put ( 2 , & meta2 ) . unwrap ( ) ;
let meta3 = SlotMeta ::new ( 3 , 2 ) ;
blockstore . meta_cf . put ( 3 , & meta3 ) . unwrap ( ) ;
blockstore . set_roots ( & [ 0 , 1 , 2 , 3 ] ) . unwrap ( ) ;
let lowest_cleanup_slot = 1 ;
let lowest_available_slot = lowest_cleanup_slot + 1 ;
transaction_status_cf
. put_protobuf ( ( 0 , signature1 , lowest_cleanup_slot ) , & status )
. unwrap ( ) ;
transaction_status_cf
. put_protobuf ( ( 0 , signature2 , lowest_available_slot ) , & status )
. unwrap ( ) ;
let address0 = solana_sdk ::pubkey ::new_rand ( ) ;
let address1 = solana_sdk ::pubkey ::new_rand ( ) ;
blockstore
. write_transaction_status (
lowest_cleanup_slot ,
signature1 ,
vec! [ & address0 ] ,
vec! [ ] ,
TransactionStatusMeta ::default ( ) ,
)
. unwrap ( ) ;
blockstore
. write_transaction_status (
lowest_available_slot ,
signature2 ,
vec! [ & address1 ] ,
vec! [ ] ,
TransactionStatusMeta ::default ( ) ,
)
. unwrap ( ) ;
let check_for_missing = | | {
(
blockstore
. get_transaction_status_with_counter ( signature1 , & [ ] )
. unwrap ( )
. 0
. is_none ( ) ,
blockstore
. find_address_signatures_for_slot ( address0 , lowest_cleanup_slot )
. unwrap ( )
. is_empty ( ) ,
blockstore
. find_address_signatures ( address0 , lowest_cleanup_slot , lowest_cleanup_slot )
. unwrap ( )
. is_empty ( ) ,
)
} ;
let assert_existing_always = | | {
let are_existing_always = (
blockstore
. get_transaction_status_with_counter ( signature2 , & [ ] )
. unwrap ( )
. 0
. is_some ( ) ,
! blockstore
. find_address_signatures_for_slot ( address1 , lowest_available_slot )
. unwrap ( )
. is_empty ( ) ,
! blockstore
. find_address_signatures (
address1 ,
lowest_available_slot ,
lowest_available_slot ,
)
. unwrap ( )
. is_empty ( ) ,
) ;
assert_eq! ( are_existing_always , ( true , true , true ) ) ;
} ;
let are_missing = check_for_missing ( ) ;
// should never be missing before the conditional compaction & simulation...
assert_eq! ( are_missing , ( false , false , false ) ) ;
assert_existing_always ( ) ;
if simulate_compaction {
blockstore . set_max_expired_slot ( lowest_cleanup_slot ) ;
// force compaction filters to run across whole key range.
blockstore
. compact_storage ( Slot ::min_value ( ) , Slot ::max_value ( ) )
. unwrap ( ) ;
}
if simulate_ledger_cleanup_service {
* blockstore . lowest_cleanup_slot . write ( ) . unwrap ( ) = lowest_cleanup_slot ;
}
let are_missing = check_for_missing ( ) ;
if simulate_compaction | | simulate_ledger_cleanup_service {
// ... when either simulation (or both) is effective, we should observe to be missing
// consistently
assert_eq! ( are_missing , ( true , true , true ) ) ;
} else {
// ... otherwise, we should observe to be existing...
assert_eq! ( are_missing , ( false , false , false ) ) ;
}
assert_existing_always ( ) ;
}
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
}
#[ test ]
fn test_lowest_cleanup_slot_and_special_cfs_with_compact_with_ledger_cleanup_service_simulation (
) {
do_test_lowest_cleanup_slot_and_special_cfs ( true , true ) ;
}
#[ test ]
fn test_lowest_cleanup_slot_and_special_cfs_with_compact_without_ledger_cleanup_service_simulation (
) {
do_test_lowest_cleanup_slot_and_special_cfs ( true , false ) ;
}
#[ test ]
fn test_lowest_cleanup_slot_and_special_cfs_without_compact_with_ledger_cleanup_service_simulation (
) {
do_test_lowest_cleanup_slot_and_special_cfs ( false , true ) ;
}
#[ test ]
fn test_lowest_cleanup_slot_and_special_cfs_without_compact_without_ledger_cleanup_service_simulation (
) {
do_test_lowest_cleanup_slot_and_special_cfs ( false , false ) ;
}
2020-04-08 23:57:30 -07:00
#[ test ]
2021-03-31 20:04:00 -07:00
fn test_get_rooted_transaction ( ) {
2020-04-08 23:57:30 -07:00
let slot = 2 ;
let entries = make_slot_entries_with_transactions ( 5 ) ;
let shreds = entries_to_test_shreds ( entries . clone ( ) , slot , slot - 1 , true , 0 ) ;
let ledger_path = get_tmp_ledger_path! ( ) ;
let blockstore = Blockstore ::open ( & ledger_path ) . unwrap ( ) ;
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
blockstore . set_roots ( & [ slot - 1 , slot ] ) . unwrap ( ) ;
2020-09-23 22:10:29 -07:00
let expected_transactions : Vec < TransactionWithStatusMeta > = entries
2020-04-08 23:57:30 -07:00
. iter ( )
. cloned ( )
. filter ( | entry | ! entry . is_tick ( ) )
. flat_map ( | entry | entry . transactions )
. map ( | transaction | {
let mut pre_balances : Vec < u64 > = vec! [ ] ;
let mut post_balances : Vec < u64 > = vec! [ ] ;
for ( i , _account_key ) in transaction . message . account_keys . iter ( ) . enumerate ( ) {
pre_balances . push ( i as u64 * 10 ) ;
post_balances . push ( i as u64 * 11 ) ;
}
2020-09-24 07:36:22 -07:00
let inner_instructions = Some ( vec! [ InnerInstructions {
index : 0 ,
instructions : vec ! [ CompiledInstruction ::new ( 1 , & ( ) , vec! [ 0 ] ) ] ,
} ] ) ;
2020-10-08 12:06:15 -07:00
let log_messages = Some ( vec! [ String ::from ( " Test message \n " ) ] ) ;
2020-12-10 19:25:07 -08:00
let pre_token_balances = Some ( vec! [ ] ) ;
let post_token_balances = Some ( vec! [ ] ) ;
2021-05-26 14:43:15 -07:00
let rewards = Some ( vec! [ ] ) ;
2020-04-08 23:57:30 -07:00
let signature = transaction . signatures [ 0 ] ;
2021-03-05 08:05:35 -08:00
let status = TransactionStatusMeta {
status : Ok ( ( ) ) ,
fee : 42 ,
pre_balances : pre_balances . clone ( ) ,
post_balances : post_balances . clone ( ) ,
inner_instructions : inner_instructions . clone ( ) ,
log_messages : log_messages . clone ( ) ,
pre_token_balances : pre_token_balances . clone ( ) ,
post_token_balances : post_token_balances . clone ( ) ,
2021-05-26 14:43:15 -07:00
rewards : rewards . clone ( ) ,
2021-03-05 08:05:35 -08:00
}
. into ( ) ;
2020-04-08 23:57:30 -07:00
blockstore
. transaction_status_cf
2021-03-05 08:05:35 -08:00
. put_protobuf ( ( 0 , signature , slot ) , & status )
2020-04-08 23:57:30 -07:00
. unwrap ( ) ;
2020-09-23 22:10:29 -07:00
TransactionWithStatusMeta {
2020-04-08 23:57:30 -07:00
transaction ,
2020-09-23 22:10:29 -07:00
meta : Some ( TransactionStatusMeta {
status : Ok ( ( ) ) ,
fee : 42 ,
pre_balances ,
post_balances ,
2020-09-24 07:36:22 -07:00
inner_instructions ,
2020-10-08 12:06:15 -07:00
log_messages ,
2020-12-10 19:25:07 -08:00
pre_token_balances ,
post_token_balances ,
2021-05-26 14:43:15 -07:00
rewards ,
2020-09-23 22:10:29 -07:00
} ) ,
}
2020-04-08 23:57:30 -07:00
} )
. collect ( ) ;
2020-09-23 22:10:29 -07:00
for transaction in expected_transactions . clone ( ) {
let signature = transaction . transaction . signatures [ 0 ] ;
2020-04-08 23:57:30 -07:00
assert_eq! (
2021-03-31 20:04:00 -07:00
blockstore . get_rooted_transaction ( signature ) . unwrap ( ) ,
2021-03-26 15:47:35 -07:00
Some ( ConfirmedTransaction {
slot ,
transaction : transaction . clone ( ) ,
block_time : None
} )
) ;
assert_eq! (
2021-03-31 20:04:00 -07:00
blockstore
. get_complete_transaction ( signature , slot + 1 )
. unwrap ( ) ,
2021-03-26 15:47:35 -07:00
Some ( ConfirmedTransaction {
slot ,
transaction ,
block_time : None
} )
) ;
}
blockstore . run_purge ( 0 , 2 , PurgeType ::PrimaryIndex ) . unwrap ( ) ;
* blockstore . lowest_cleanup_slot . write ( ) . unwrap ( ) = slot ;
for TransactionWithStatusMeta { transaction , .. } in expected_transactions {
let signature = transaction . signatures [ 0 ] ;
2021-03-31 20:04:00 -07:00
assert_eq! ( blockstore . get_rooted_transaction ( signature ) . unwrap ( ) , None , ) ;
2021-03-26 15:47:35 -07:00
assert_eq! (
2021-03-31 20:04:00 -07:00
blockstore
. get_complete_transaction ( signature , slot + 1 )
. unwrap ( ) ,
2021-03-26 15:47:35 -07:00
None ,
) ;
}
}
#[ test ]
fn test_get_complete_transaction ( ) {
let slot = 2 ;
let entries = make_slot_entries_with_transactions ( 5 ) ;
let shreds = entries_to_test_shreds ( entries . clone ( ) , slot , slot - 1 , true , 0 ) ;
let ledger_path = get_tmp_ledger_path! ( ) ;
let blockstore = Blockstore ::open ( & ledger_path ) . unwrap ( ) ;
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
let expected_transactions : Vec < TransactionWithStatusMeta > = entries
. iter ( )
. cloned ( )
. filter ( | entry | ! entry . is_tick ( ) )
. flat_map ( | entry | entry . transactions )
. map ( | transaction | {
let mut pre_balances : Vec < u64 > = vec! [ ] ;
let mut post_balances : Vec < u64 > = vec! [ ] ;
for ( i , _account_key ) in transaction . message . account_keys . iter ( ) . enumerate ( ) {
pre_balances . push ( i as u64 * 10 ) ;
post_balances . push ( i as u64 * 11 ) ;
}
let inner_instructions = Some ( vec! [ InnerInstructions {
index : 0 ,
instructions : vec ! [ CompiledInstruction ::new ( 1 , & ( ) , vec! [ 0 ] ) ] ,
} ] ) ;
let log_messages = Some ( vec! [ String ::from ( " Test message \n " ) ] ) ;
let pre_token_balances = Some ( vec! [ ] ) ;
let post_token_balances = Some ( vec! [ ] ) ;
2021-05-26 14:43:15 -07:00
let rewards = Some ( vec! [ ] ) ;
2021-03-26 15:47:35 -07:00
let signature = transaction . signatures [ 0 ] ;
let status = TransactionStatusMeta {
status : Ok ( ( ) ) ,
fee : 42 ,
pre_balances : pre_balances . clone ( ) ,
post_balances : post_balances . clone ( ) ,
inner_instructions : inner_instructions . clone ( ) ,
log_messages : log_messages . clone ( ) ,
pre_token_balances : pre_token_balances . clone ( ) ,
post_token_balances : post_token_balances . clone ( ) ,
2021-05-26 14:43:15 -07:00
rewards : rewards . clone ( ) ,
2021-03-26 15:47:35 -07:00
}
. into ( ) ;
blockstore
. transaction_status_cf
. put_protobuf ( ( 0 , signature , slot ) , & status )
. unwrap ( ) ;
TransactionWithStatusMeta {
transaction ,
meta : Some ( TransactionStatusMeta {
status : Ok ( ( ) ) ,
fee : 42 ,
pre_balances ,
post_balances ,
inner_instructions ,
log_messages ,
pre_token_balances ,
post_token_balances ,
2021-05-26 14:43:15 -07:00
rewards ,
2021-03-26 15:47:35 -07:00
} ) ,
}
} )
. collect ( ) ;
for transaction in expected_transactions . clone ( ) {
let signature = transaction . transaction . signatures [ 0 ] ;
assert_eq! (
2021-03-31 20:04:00 -07:00
blockstore
. get_complete_transaction ( signature , slot )
. unwrap ( ) ,
2021-01-20 22:10:35 -08:00
Some ( ConfirmedTransaction {
slot ,
transaction ,
block_time : None
} )
2020-04-08 23:57:30 -07:00
) ;
2021-03-31 20:04:00 -07:00
assert_eq! ( blockstore . get_rooted_transaction ( signature ) . unwrap ( ) , None ) ;
2020-04-08 23:57:30 -07:00
}
2020-06-02 18:49:31 -07:00
blockstore . run_purge ( 0 , 2 , PurgeType ::PrimaryIndex ) . unwrap ( ) ;
2020-04-08 23:57:30 -07:00
* blockstore . lowest_cleanup_slot . write ( ) . unwrap ( ) = slot ;
2020-09-23 22:10:29 -07:00
for TransactionWithStatusMeta { transaction , .. } in expected_transactions {
2020-04-08 23:57:30 -07:00
let signature = transaction . signatures [ 0 ] ;
2021-03-26 15:47:35 -07:00
assert_eq! (
2021-03-31 20:04:00 -07:00
blockstore
. get_complete_transaction ( signature , slot )
. unwrap ( ) ,
2020-04-08 23:57:30 -07:00
None ,
) ;
2021-03-31 20:04:00 -07:00
assert_eq! ( blockstore . get_rooted_transaction ( signature ) . unwrap ( ) , None , ) ;
2020-04-08 23:57:30 -07:00
}
}
2020-08-06 15:21:46 -07:00
#[ test ]
fn test_empty_transaction_status ( ) {
let blockstore_path = get_tmp_ledger_path! ( ) ;
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
blockstore . set_roots ( & [ 0 ] ) . unwrap ( ) ;
assert_eq! (
blockstore
2021-03-31 20:04:00 -07:00
. get_rooted_transaction ( Signature ::default ( ) )
2020-08-06 15:21:46 -07:00
. unwrap ( ) ,
None
) ;
}
2020-04-09 20:21:31 -07:00
#[ test ]
fn test_get_confirmed_signatures_for_address ( ) {
let blockstore_path = get_tmp_ledger_path! ( ) ;
{
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2020-10-19 12:12:08 -07:00
let address0 = solana_sdk ::pubkey ::new_rand ( ) ;
let address1 = solana_sdk ::pubkey ::new_rand ( ) ;
2020-04-09 20:21:31 -07:00
let slot0 = 10 ;
for x in 1 .. 5 {
let signature = Signature ::new ( & [ x ; 64 ] ) ;
blockstore
. write_transaction_status (
slot0 ,
signature ,
vec! [ & address0 ] ,
vec! [ & address1 ] ,
2021-03-05 08:05:35 -08:00
TransactionStatusMeta ::default ( ) ,
2020-04-09 20:21:31 -07:00
)
. unwrap ( ) ;
}
// Purge to freeze index 0
2020-06-02 18:49:31 -07:00
blockstore . run_purge ( 0 , 1 , PurgeType ::PrimaryIndex ) . unwrap ( ) ;
2020-04-09 20:21:31 -07:00
let slot1 = 20 ;
for x in 5 .. 9 {
let signature = Signature ::new ( & [ x ; 64 ] ) ;
blockstore
. write_transaction_status (
slot1 ,
signature ,
vec! [ & address0 ] ,
vec! [ & address1 ] ,
2021-03-05 08:05:35 -08:00
TransactionStatusMeta ::default ( ) ,
2020-04-09 20:21:31 -07:00
)
. unwrap ( ) ;
}
blockstore . set_roots ( & [ slot0 , slot1 ] ) . unwrap ( ) ;
let all0 = blockstore
. get_confirmed_signatures_for_address ( address0 , 0 , 50 )
. unwrap ( ) ;
assert_eq! ( all0 . len ( ) , 8 ) ;
for x in 1 .. 9 {
let expected_signature = Signature ::new ( & [ x ; 64 ] ) ;
assert_eq! ( all0 [ x as usize - 1 ] , expected_signature ) ;
}
assert_eq! (
blockstore
. get_confirmed_signatures_for_address ( address0 , 20 , 50 )
. unwrap ( )
. len ( ) ,
4
) ;
assert_eq! (
blockstore
. get_confirmed_signatures_for_address ( address0 , 0 , 10 )
. unwrap ( )
. len ( ) ,
4
) ;
assert! ( blockstore
. get_confirmed_signatures_for_address ( address0 , 1 , 5 )
. unwrap ( )
. is_empty ( ) ) ;
assert_eq! (
blockstore
. get_confirmed_signatures_for_address ( address0 , 1 , 15 )
. unwrap ( )
. len ( ) ,
4
) ;
let all1 = blockstore
. get_confirmed_signatures_for_address ( address1 , 0 , 50 )
. unwrap ( ) ;
assert_eq! ( all1 . len ( ) , 8 ) ;
for x in 1 .. 9 {
let expected_signature = Signature ::new ( & [ x ; 64 ] ) ;
assert_eq! ( all1 [ x as usize - 1 ] , expected_signature ) ;
}
// Purge index 0
2020-06-02 18:49:31 -07:00
blockstore
. run_purge ( 0 , 10 , PurgeType ::PrimaryIndex )
. unwrap ( ) ;
2020-04-09 20:21:31 -07:00
assert_eq! (
blockstore
. get_confirmed_signatures_for_address ( address0 , 0 , 50 )
. unwrap ( )
. len ( ) ,
4
) ;
assert_eq! (
blockstore
. get_confirmed_signatures_for_address ( address0 , 20 , 50 )
. unwrap ( )
. len ( ) ,
4
) ;
assert! ( blockstore
. get_confirmed_signatures_for_address ( address0 , 0 , 10 )
. unwrap ( )
. is_empty ( ) ) ;
assert! ( blockstore
. get_confirmed_signatures_for_address ( address0 , 1 , 5 )
. unwrap ( )
. is_empty ( ) ) ;
assert_eq! (
blockstore
. get_confirmed_signatures_for_address ( address0 , 1 , 25 )
. unwrap ( )
. len ( ) ,
4
) ;
// Test sort, regardless of entry order or signature value
for slot in ( 21 .. 25 ) . rev ( ) {
let random_bytes : Vec < u8 > = ( 0 .. 64 ) . map ( | _ | rand ::random ::< u8 > ( ) ) . collect ( ) ;
let signature = Signature ::new ( & random_bytes ) ;
blockstore
. write_transaction_status (
slot ,
signature ,
vec! [ & address0 ] ,
vec! [ & address1 ] ,
2021-03-05 08:05:35 -08:00
TransactionStatusMeta ::default ( ) ,
2020-04-09 20:21:31 -07:00
)
. unwrap ( ) ;
}
blockstore . set_roots ( & [ 21 , 22 , 23 , 24 ] ) . unwrap ( ) ;
let mut past_slot = 0 ;
for ( slot , _ ) in blockstore . find_address_signatures ( address0 , 1 , 25 ) . unwrap ( ) {
assert! ( slot > = past_slot ) ;
past_slot = slot ;
}
}
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2020-08-06 15:29:25 -07:00
}
2021-03-31 21:35:57 -07:00
#[ test ]
fn test_find_address_signatures_for_slot ( ) {
let blockstore_path = get_tmp_ledger_path! ( ) ;
{
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
let address0 = solana_sdk ::pubkey ::new_rand ( ) ;
let address1 = solana_sdk ::pubkey ::new_rand ( ) ;
let slot1 = 1 ;
for x in 1 .. 5 {
let signature = Signature ::new ( & [ x ; 64 ] ) ;
blockstore
. write_transaction_status (
slot1 ,
signature ,
vec! [ & address0 ] ,
vec! [ & address1 ] ,
TransactionStatusMeta ::default ( ) ,
)
. unwrap ( ) ;
}
let slot2 = 2 ;
for x in 5 .. 7 {
let signature = Signature ::new ( & [ x ; 64 ] ) ;
blockstore
. write_transaction_status (
slot2 ,
signature ,
vec! [ & address0 ] ,
vec! [ & address1 ] ,
TransactionStatusMeta ::default ( ) ,
)
. unwrap ( ) ;
}
// Purge to freeze index 0
blockstore . run_purge ( 0 , 1 , PurgeType ::PrimaryIndex ) . unwrap ( ) ;
for x in 7 .. 9 {
let signature = Signature ::new ( & [ x ; 64 ] ) ;
blockstore
. write_transaction_status (
slot2 ,
signature ,
vec! [ & address0 ] ,
vec! [ & address1 ] ,
TransactionStatusMeta ::default ( ) ,
)
. unwrap ( ) ;
}
let slot3 = 3 ;
for x in 9 .. 13 {
let signature = Signature ::new ( & [ x ; 64 ] ) ;
blockstore
. write_transaction_status (
slot3 ,
signature ,
vec! [ & address0 ] ,
vec! [ & address1 ] ,
TransactionStatusMeta ::default ( ) ,
)
. unwrap ( ) ;
}
blockstore . set_roots ( & [ slot1 ] ) . unwrap ( ) ;
let slot1_signatures = blockstore
. find_address_signatures_for_slot ( address0 , 1 )
. unwrap ( ) ;
for ( i , ( slot , signature ) ) in slot1_signatures . iter ( ) . enumerate ( ) {
assert_eq! ( * slot , slot1 ) ;
assert_eq! ( * signature , Signature ::new ( & [ i as u8 + 1 ; 64 ] ) ) ;
}
let slot2_signatures = blockstore
. find_address_signatures_for_slot ( address0 , 2 )
. unwrap ( ) ;
for ( i , ( slot , signature ) ) in slot2_signatures . iter ( ) . enumerate ( ) {
assert_eq! ( * slot , slot2 ) ;
assert_eq! ( * signature , Signature ::new ( & [ i as u8 + 5 ; 64 ] ) ) ;
}
let slot3_signatures = blockstore
. find_address_signatures_for_slot ( address0 , 3 )
. unwrap ( ) ;
for ( i , ( slot , signature ) ) in slot3_signatures . iter ( ) . enumerate ( ) {
assert_eq! ( * slot , slot3 ) ;
assert_eq! ( * signature , Signature ::new ( & [ i as u8 + 9 ; 64 ] ) ) ;
}
}
}
2020-07-27 11:42:49 -07:00
#[ test ]
fn test_get_confirmed_signatures_for_address2 ( ) {
let blockstore_path = get_tmp_ledger_path! ( ) ;
{
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
fn make_slot_entries_with_transaction_addresses ( addresses : & [ Pubkey ] ) -> Vec < Entry > {
let mut entries : Vec < Entry > = Vec ::new ( ) ;
for address in addresses {
let transaction = Transaction ::new_with_compiled_instructions (
& [ & Keypair ::new ( ) ] ,
& [ * address ] ,
Hash ::default ( ) ,
2020-10-19 12:12:08 -07:00
vec! [ solana_sdk ::pubkey ::new_rand ( ) ] ,
2020-07-27 11:42:49 -07:00
vec! [ CompiledInstruction ::new ( 1 , & ( ) , vec! [ 0 ] ) ] ,
) ;
entries . push ( next_entry_mut ( & mut Hash ::default ( ) , 0 , vec! [ transaction ] ) ) ;
let mut tick = create_ticks ( 1 , 0 , hash ( & serialize ( address ) . unwrap ( ) ) ) ;
entries . append ( & mut tick ) ;
}
entries
}
2020-10-19 12:12:08 -07:00
let address0 = solana_sdk ::pubkey ::new_rand ( ) ;
let address1 = solana_sdk ::pubkey ::new_rand ( ) ;
2020-07-27 11:42:49 -07:00
2020-08-13 10:07:42 -07:00
for slot in 2 ..= 8 {
2020-07-27 11:42:49 -07:00
let entries = make_slot_entries_with_transaction_addresses ( & [
address0 , address1 , address0 , address1 ,
] ) ;
let shreds = entries_to_test_shreds ( entries . clone ( ) , slot , slot - 1 , true , 0 ) ;
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2020-08-10 09:27:38 -07:00
for ( i , entry ) in entries . iter ( ) . enumerate ( ) {
if slot = = 4 & & i = = 2 {
// Purge to freeze index 0 and write address-signatures in new primary index
blockstore . run_purge ( 0 , 1 , PurgeType ::PrimaryIndex ) . unwrap ( ) ;
}
2020-07-27 11:42:49 -07:00
for transaction in & entry . transactions {
assert_eq! ( transaction . signatures . len ( ) , 1 ) ;
blockstore
. write_transaction_status (
slot ,
transaction . signatures [ 0 ] ,
transaction . message . account_keys . iter ( ) . collect ( ) ,
vec! [ ] ,
2021-03-05 08:05:35 -08:00
TransactionStatusMeta ::default ( ) ,
2020-07-27 11:42:49 -07:00
)
. unwrap ( ) ;
}
}
}
2021-03-31 21:35:57 -07:00
// Add 2 slots that both descend from slot 8
for slot in 9 ..= 10 {
let entries = make_slot_entries_with_transaction_addresses ( & [
address0 , address1 , address0 , address1 ,
] ) ;
let shreds = entries_to_test_shreds ( entries . clone ( ) , slot , 8 , true , 0 ) ;
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
for entry in entries . iter ( ) {
for transaction in & entry . transactions {
assert_eq! ( transaction . signatures . len ( ) , 1 ) ;
blockstore
. write_transaction_status (
slot ,
transaction . signatures [ 0 ] ,
transaction . message . account_keys . iter ( ) . collect ( ) ,
vec! [ ] ,
TransactionStatusMeta ::default ( ) ,
)
. unwrap ( ) ;
}
}
}
2020-08-13 10:07:42 -07:00
// Leave one slot unrooted to test only returns confirmed signatures
blockstore . set_roots ( & [ 1 , 2 , 4 , 5 , 6 , 7 , 8 ] ) . unwrap ( ) ;
let highest_confirmed_root = 8 ;
2020-07-27 11:42:49 -07:00
2021-03-31 21:35:57 -07:00
// Fetch all rooted signatures for address 0 at once...
2020-07-27 11:42:49 -07:00
let all0 = blockstore
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_root ,
None ,
2020-08-15 09:42:17 -07:00
None ,
2020-07-27 11:42:49 -07:00
usize ::MAX ,
)
. unwrap ( ) ;
2020-08-10 09:27:38 -07:00
assert_eq! ( all0 . len ( ) , 12 ) ;
2020-07-27 11:42:49 -07:00
2021-03-31 21:35:57 -07:00
// Fetch all rooted signatures for address 1 at once...
2020-07-27 11:42:49 -07:00
let all1 = blockstore
. get_confirmed_signatures_for_address2 (
address1 ,
highest_confirmed_root ,
None ,
2020-08-15 09:42:17 -07:00
None ,
2020-07-27 11:42:49 -07:00
usize ::MAX ,
)
. unwrap ( ) ;
2020-08-10 09:27:38 -07:00
assert_eq! ( all1 . len ( ) , 12 ) ;
2020-07-27 11:42:49 -07:00
// Fetch all signatures for address 0 individually
for i in 0 .. all0 . len ( ) {
let results = blockstore
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_root ,
if i = = 0 {
None
} else {
Some ( all0 [ i - 1 ] . signature )
} ,
2020-08-15 09:42:17 -07:00
None ,
2020-07-27 11:42:49 -07:00
1 ,
)
. unwrap ( ) ;
assert_eq! ( results . len ( ) , 1 ) ;
2020-08-05 11:21:22 -07:00
assert_eq! ( results [ 0 ] , all0 [ i ] , " Unexpected result for {} " , i ) ;
2020-07-27 11:42:49 -07:00
}
2020-08-15 09:42:17 -07:00
// Fetch all signatures for address 0 individually using `until`
for i in 0 .. all0 . len ( ) {
let results = blockstore
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_root ,
if i = = 0 {
None
} else {
Some ( all0 [ i - 1 ] . signature )
} ,
if i = = all0 . len ( ) - 1 | | i = = all0 . len ( ) {
None
} else {
Some ( all0 [ i + 1 ] . signature )
} ,
10 ,
)
. unwrap ( ) ;
assert_eq! ( results . len ( ) , 1 ) ;
assert_eq! ( results [ 0 ] , all0 [ i ] , " Unexpected result for {} " , i ) ;
}
2020-07-27 11:42:49 -07:00
assert! ( blockstore
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_root ,
Some ( all0 [ all0 . len ( ) - 1 ] . signature ) ,
2020-08-15 09:42:17 -07:00
None ,
2020-07-27 11:42:49 -07:00
1 ,
)
. unwrap ( )
. is_empty ( ) ) ;
2020-08-15 09:42:17 -07:00
assert! ( blockstore
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_root ,
None ,
Some ( all0 [ 0 ] . signature ) ,
2 ,
)
. unwrap ( )
. is_empty ( ) ) ;
2020-07-27 11:42:49 -07:00
// Fetch all signatures for address 0, three at a time
assert! ( all0 . len ( ) % 3 = = 0 ) ;
for i in ( 0 .. all0 . len ( ) ) . step_by ( 3 ) {
let results = blockstore
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_root ,
if i = = 0 {
None
} else {
Some ( all0 [ i - 1 ] . signature )
} ,
2020-08-15 09:42:17 -07:00
None ,
2020-07-27 11:42:49 -07:00
3 ,
)
. unwrap ( ) ;
assert_eq! ( results . len ( ) , 3 ) ;
assert_eq! ( results [ 0 ] , all0 [ i ] ) ;
assert_eq! ( results [ 1 ] , all0 [ i + 1 ] ) ;
assert_eq! ( results [ 2 ] , all0 [ i + 2 ] ) ;
}
2020-08-10 09:27:38 -07:00
// Ensure that the signatures within a slot are reverse ordered by signature
2020-08-05 11:21:22 -07:00
// (current limitation of the .get_confirmed_signatures_for_address2())
for i in ( 0 .. all1 . len ( ) ) . step_by ( 2 ) {
let results = blockstore
. get_confirmed_signatures_for_address2 (
address1 ,
highest_confirmed_root ,
if i = = 0 {
None
} else {
Some ( all1 [ i - 1 ] . signature )
} ,
2020-08-15 09:42:17 -07:00
None ,
2020-08-05 11:21:22 -07:00
2 ,
)
. unwrap ( ) ;
assert_eq! ( results . len ( ) , 2 ) ;
assert_eq! ( results [ 0 ] . slot , results [ 1 ] . slot ) ;
2020-08-10 09:27:38 -07:00
assert! ( results [ 0 ] . signature > = results [ 1 ] . signature ) ;
2020-08-05 11:21:22 -07:00
assert_eq! ( results [ 0 ] , all1 [ i ] ) ;
assert_eq! ( results [ 1 ] , all1 [ i + 1 ] ) ;
}
2020-08-15 09:42:17 -07:00
// A search for address 0 with `before` and/or `until` signatures from address1 should also work
2020-08-05 11:21:22 -07:00
let results = blockstore
2020-07-27 11:42:49 -07:00
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_root ,
Some ( all1 [ 0 ] . signature ) ,
2020-08-15 09:42:17 -07:00
None ,
2020-07-27 11:42:49 -07:00
usize ::MAX ,
)
2020-08-05 11:21:22 -07:00
. unwrap ( ) ;
// The exact number of results returned is variable, based on the sort order of the
// random signatures that are generated
assert! ( ! results . is_empty ( ) ) ;
2020-08-15 09:42:17 -07:00
let results2 = blockstore
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_root ,
Some ( all1 [ 0 ] . signature ) ,
Some ( all1 [ 4 ] . signature ) ,
usize ::MAX ,
)
. unwrap ( ) ;
assert! ( results2 . len ( ) < results . len ( ) ) ;
2021-03-31 21:35:57 -07:00
// Duplicate all tests using confirmed signatures
let highest_confirmed_slot = 10 ;
// Fetch all signatures for address 0 at once...
let all0 = blockstore
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_slot ,
None ,
None ,
usize ::MAX ,
)
. unwrap ( ) ;
assert_eq! ( all0 . len ( ) , 14 ) ;
// Fetch all signatures for address 1 at once...
let all1 = blockstore
. get_confirmed_signatures_for_address2 (
address1 ,
highest_confirmed_slot ,
None ,
None ,
usize ::MAX ,
)
. unwrap ( ) ;
assert_eq! ( all1 . len ( ) , 14 ) ;
// Fetch all signatures for address 0 individually
for i in 0 .. all0 . len ( ) {
let results = blockstore
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_slot ,
if i = = 0 {
None
} else {
Some ( all0 [ i - 1 ] . signature )
} ,
None ,
1 ,
)
. unwrap ( ) ;
assert_eq! ( results . len ( ) , 1 ) ;
assert_eq! ( results [ 0 ] , all0 [ i ] , " Unexpected result for {} " , i ) ;
}
// Fetch all signatures for address 0 individually using `until`
for i in 0 .. all0 . len ( ) {
let results = blockstore
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_slot ,
if i = = 0 {
None
} else {
Some ( all0 [ i - 1 ] . signature )
} ,
if i = = all0 . len ( ) - 1 | | i = = all0 . len ( ) {
None
} else {
Some ( all0 [ i + 1 ] . signature )
} ,
10 ,
)
. unwrap ( ) ;
assert_eq! ( results . len ( ) , 1 ) ;
assert_eq! ( results [ 0 ] , all0 [ i ] , " Unexpected result for {} " , i ) ;
}
assert! ( blockstore
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_slot ,
Some ( all0 [ all0 . len ( ) - 1 ] . signature ) ,
None ,
1 ,
)
. unwrap ( )
. is_empty ( ) ) ;
assert! ( blockstore
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_slot ,
None ,
Some ( all0 [ 0 ] . signature ) ,
2 ,
)
. unwrap ( )
. is_empty ( ) ) ;
// Fetch all signatures for address 0, three at a time
assert! ( all0 . len ( ) % 3 = = 2 ) ;
for i in ( 0 .. all0 . len ( ) ) . step_by ( 3 ) {
let results = blockstore
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_slot ,
if i = = 0 {
None
} else {
Some ( all0 [ i - 1 ] . signature )
} ,
None ,
3 ,
)
. unwrap ( ) ;
if i < 12 {
assert_eq! ( results . len ( ) , 3 ) ;
assert_eq! ( results [ 2 ] , all0 [ i + 2 ] ) ;
} else {
assert_eq! ( results . len ( ) , 2 ) ;
}
assert_eq! ( results [ 0 ] , all0 [ i ] ) ;
assert_eq! ( results [ 1 ] , all0 [ i + 1 ] ) ;
}
// Ensure that the signatures within a slot are reverse ordered by signature
// (current limitation of the .get_confirmed_signatures_for_address2())
for i in ( 0 .. all1 . len ( ) ) . step_by ( 2 ) {
let results = blockstore
. get_confirmed_signatures_for_address2 (
address1 ,
highest_confirmed_slot ,
if i = = 0 {
None
} else {
Some ( all1 [ i - 1 ] . signature )
} ,
None ,
2 ,
)
. unwrap ( ) ;
assert_eq! ( results . len ( ) , 2 ) ;
assert_eq! ( results [ 0 ] . slot , results [ 1 ] . slot ) ;
assert! ( results [ 0 ] . signature > = results [ 1 ] . signature ) ;
assert_eq! ( results [ 0 ] , all1 [ i ] ) ;
assert_eq! ( results [ 1 ] , all1 [ i + 1 ] ) ;
}
// A search for address 0 with `before` and/or `until` signatures from address1 should also work
let results = blockstore
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_slot ,
Some ( all1 [ 0 ] . signature ) ,
None ,
usize ::MAX ,
)
. unwrap ( ) ;
// The exact number of results returned is variable, based on the sort order of the
// random signatures that are generated
assert! ( ! results . is_empty ( ) ) ;
let results2 = blockstore
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_slot ,
Some ( all1 [ 0 ] . signature ) ,
Some ( all1 [ 4 ] . signature ) ,
usize ::MAX ,
)
. unwrap ( ) ;
assert! ( results2 . len ( ) < results . len ( ) ) ;
2020-07-27 11:42:49 -07:00
}
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
}
2019-11-17 19:17:15 -08:00
#[ test ]
2020-08-14 11:43:14 -07:00
#[ allow(clippy::same_item_push) ]
2019-11-17 19:17:15 -08:00
fn test_get_last_hash ( ) {
let mut entries : Vec < Entry > = vec! [ ] ;
let empty_entries_iterator = entries . iter ( ) ;
assert! ( get_last_hash ( empty_entries_iterator ) . is_none ( ) ) ;
let mut prev_hash = hash ::hash ( & [ 42 u8 ] ) ;
for _ in 0 .. 10 {
let entry = next_entry ( & prev_hash , 1 , vec! [ ] ) ;
prev_hash = entry . hash ;
entries . push ( entry ) ;
}
let entries_iterator = entries . iter ( ) ;
assert_eq! ( get_last_hash ( entries_iterator ) . unwrap ( ) , entries [ 9 ] . hash ) ;
}
2019-11-18 08:12:42 -08:00
#[ test ]
fn test_map_transactions_to_statuses ( ) {
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
2019-11-18 08:12:42 -08:00
{
2020-01-13 13:13:52 -08:00
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
let transaction_status_cf = blockstore . db . column ::< cf ::TransactionStatus > ( ) ;
2019-11-18 08:12:42 -08:00
let slot = 0 ;
let mut transactions : Vec < Transaction > = vec! [ ] ;
for x in 0 .. 4 {
let transaction = Transaction ::new_with_compiled_instructions (
& [ & Keypair ::new ( ) ] ,
2020-10-19 12:12:08 -07:00
& [ solana_sdk ::pubkey ::new_rand ( ) ] ,
2019-11-18 08:12:42 -08:00
Hash ::default ( ) ,
2020-10-19 12:12:08 -07:00
vec! [ solana_sdk ::pubkey ::new_rand ( ) ] ,
2019-11-18 08:12:42 -08:00
vec! [ CompiledInstruction ::new ( 1 , & ( ) , vec! [ 0 ] ) ] ,
) ;
2021-03-05 08:05:35 -08:00
let status = TransactionStatusMeta {
status : solana_sdk ::transaction ::Result ::< ( ) > ::Err (
TransactionError ::AccountNotFound ,
) ,
fee : x ,
pre_balances : vec ! [ ] ,
post_balances : vec ! [ ] ,
inner_instructions : Some ( vec! [ ] ) ,
log_messages : Some ( vec! [ ] ) ,
pre_token_balances : Some ( vec! [ ] ) ,
post_token_balances : Some ( vec! [ ] ) ,
2021-05-26 14:43:15 -07:00
rewards : Some ( vec! [ ] ) ,
2021-03-05 08:05:35 -08:00
}
. into ( ) ;
2019-11-18 08:12:42 -08:00
transaction_status_cf
2021-03-05 08:05:35 -08:00
. put_protobuf ( ( 0 , transaction . signatures [ 0 ] , slot ) , & status )
2019-11-18 08:12:42 -08:00
. unwrap ( ) ;
transactions . push ( transaction ) ;
}
// Push transaction that will not have matching status, as a test case
transactions . push ( Transaction ::new_with_compiled_instructions (
& [ & Keypair ::new ( ) ] ,
2020-10-19 12:12:08 -07:00
& [ solana_sdk ::pubkey ::new_rand ( ) ] ,
2019-11-18 08:12:42 -08:00
Hash ::default ( ) ,
2020-10-19 12:12:08 -07:00
vec! [ solana_sdk ::pubkey ::new_rand ( ) ] ,
2019-11-18 08:12:42 -08:00
vec! [ CompiledInstruction ::new ( 1 , & ( ) , vec! [ 0 ] ) ] ,
) ) ;
2020-09-23 22:10:29 -07:00
let map = blockstore . map_transactions_to_statuses ( slot , transactions . into_iter ( ) ) ;
2019-11-18 08:12:42 -08:00
assert_eq! ( map . len ( ) , 5 ) ;
2020-05-15 09:35:43 -07:00
for ( x , m ) in map . iter ( ) . take ( 4 ) . enumerate ( ) {
assert_eq! ( m . meta . as_ref ( ) . unwrap ( ) . fee , x as u64 ) ;
2019-11-18 08:12:42 -08:00
}
2020-01-14 23:25:45 -08:00
assert_eq! ( map [ 4 ] . meta , None ) ;
2019-11-18 08:12:42 -08:00
}
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2020-09-22 12:26:32 -07:00
}
#[ test ]
fn test_write_get_perf_samples ( ) {
let blockstore_path = get_tmp_ledger_path! ( ) ;
{
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
let num_entries : usize = 10 ;
let mut perf_samples : Vec < ( Slot , PerfSample ) > = vec! [ ] ;
for x in 1 .. num_entries + 1 {
perf_samples . push ( (
x as u64 * 50 ,
PerfSample {
num_transactions : 1000 + x as u64 ,
num_slots : 50 ,
sample_period_secs : 20 ,
} ,
) ) ;
}
for ( slot , sample ) in perf_samples . iter ( ) {
blockstore . write_perf_sample ( * slot , sample ) . unwrap ( ) ;
}
for x in 0 .. num_entries {
let mut expected_samples = perf_samples [ num_entries - 1 - x .. ] . to_vec ( ) ;
expected_samples . sort_by ( | a , b | b . 0. cmp ( & a . 0 ) ) ;
assert_eq! (
blockstore . get_recent_perf_samples ( x + 1 ) . unwrap ( ) ,
expected_samples
) ;
}
}
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-11-18 08:12:42 -08:00
}
2019-12-05 11:25:13 -08:00
#[ test ]
fn test_lowest_slot ( ) {
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
2019-12-05 11:25:13 -08:00
{
2020-01-13 13:13:52 -08:00
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2019-12-05 11:25:13 -08:00
for i in 0 .. 10 {
let slot = i ;
let ( shreds , _ ) = make_slot_entries ( slot , 0 , 1 ) ;
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2019-12-05 11:25:13 -08:00
}
2020-01-13 13:13:52 -08:00
assert_eq! ( blockstore . lowest_slot ( ) , 1 ) ;
2020-06-02 18:49:31 -07:00
blockstore . run_purge ( 0 , 5 , PurgeType ::PrimaryIndex ) . unwrap ( ) ;
2020-01-13 13:13:52 -08:00
assert_eq! ( blockstore . lowest_slot ( ) , 6 ) ;
2019-12-05 11:25:13 -08:00
}
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-12-05 11:25:13 -08:00
}
2019-12-09 00:13:36 -08:00
#[ test ]
fn test_recovery ( ) {
let slot = 1 ;
let ( data_shreds , coding_shreds , leader_schedule_cache ) =
2021-04-21 05:47:50 -07:00
setup_erasure_shreds ( slot , 0 , 100 ) ;
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
2019-12-09 00:13:36 -08:00
{
2020-01-13 13:13:52 -08:00
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
blockstore
2019-12-09 00:13:36 -08:00
. insert_shreds ( coding_shreds , Some ( & leader_schedule_cache ) , false )
. unwrap ( ) ;
let shred_bufs : Vec < _ > = data_shreds
. iter ( )
. map ( | shred | shred . payload . clone ( ) )
. collect ( ) ;
// Check all the data shreds were recovered
for ( s , buf ) in data_shreds . iter ( ) . zip ( shred_bufs ) {
assert_eq! (
2020-01-13 13:13:52 -08:00
blockstore
2019-12-09 00:13:36 -08:00
. get_data_shred ( s . slot ( ) , s . index ( ) as u64 )
. unwrap ( )
. unwrap ( ) ,
buf
) ;
}
2020-01-13 13:13:52 -08:00
verify_index_integrity ( & blockstore , slot ) ;
2019-12-09 00:13:36 -08:00
}
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-12-09 00:13:36 -08:00
}
#[ test ]
fn test_index_integrity ( ) {
let slot = 1 ;
let num_entries = 100 ;
let ( data_shreds , coding_shreds , leader_schedule_cache ) =
2021-04-21 05:47:50 -07:00
setup_erasure_shreds ( slot , 0 , num_entries ) ;
2019-12-09 00:13:36 -08:00
assert! ( data_shreds . len ( ) > 3 ) ;
assert! ( coding_shreds . len ( ) > 3 ) ;
2020-01-13 13:13:52 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
2019-12-09 00:13:36 -08:00
{
2020-01-13 13:13:52 -08:00
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
2019-12-09 00:13:36 -08:00
// Test inserting all the shreds
let all_shreds : Vec < _ > = data_shreds
. iter ( )
. cloned ( )
. chain ( coding_shreds . iter ( ) . cloned ( ) )
. collect ( ) ;
2020-01-13 13:13:52 -08:00
blockstore
2019-12-09 00:13:36 -08:00
. insert_shreds ( all_shreds , Some ( & leader_schedule_cache ) , false )
. unwrap ( ) ;
2020-01-13 13:13:52 -08:00
verify_index_integrity ( & blockstore , slot ) ;
2020-06-29 14:44:35 -07:00
blockstore . purge_and_compact_slots ( 0 , slot ) ;
2019-12-09 00:13:36 -08:00
// Test inserting just the codes, enough for recovery
2020-01-13 13:13:52 -08:00
blockstore
2019-12-09 00:13:36 -08:00
. insert_shreds ( coding_shreds . clone ( ) , Some ( & leader_schedule_cache ) , false )
. unwrap ( ) ;
2020-01-13 13:13:52 -08:00
verify_index_integrity ( & blockstore , slot ) ;
2020-06-29 14:44:35 -07:00
blockstore . purge_and_compact_slots ( 0 , slot ) ;
2019-12-09 00:13:36 -08:00
// Test inserting some codes, but not enough for recovery
2020-01-13 13:13:52 -08:00
blockstore
2019-12-09 00:13:36 -08:00
. insert_shreds (
coding_shreds [ .. coding_shreds . len ( ) - 1 ] . to_vec ( ) ,
Some ( & leader_schedule_cache ) ,
false ,
)
. unwrap ( ) ;
2020-01-13 13:13:52 -08:00
verify_index_integrity ( & blockstore , slot ) ;
2020-06-29 14:44:35 -07:00
blockstore . purge_and_compact_slots ( 0 , slot ) ;
2019-12-09 00:13:36 -08:00
// Test inserting just the codes, and some data, enough for recovery
let shreds : Vec < _ > = data_shreds [ .. data_shreds . len ( ) - 1 ]
. iter ( )
. cloned ( )
. chain ( coding_shreds [ .. coding_shreds . len ( ) - 1 ] . iter ( ) . cloned ( ) )
. collect ( ) ;
2020-01-13 13:13:52 -08:00
blockstore
2019-12-09 00:13:36 -08:00
. insert_shreds ( shreds , Some ( & leader_schedule_cache ) , false )
. unwrap ( ) ;
2020-01-13 13:13:52 -08:00
verify_index_integrity ( & blockstore , slot ) ;
2020-06-29 14:44:35 -07:00
blockstore . purge_and_compact_slots ( 0 , slot ) ;
2019-12-09 00:13:36 -08:00
// Test inserting some codes, and some data, but enough for recovery
let shreds : Vec < _ > = data_shreds [ .. data_shreds . len ( ) / 2 - 1 ]
. iter ( )
. cloned ( )
. chain ( coding_shreds [ .. coding_shreds . len ( ) / 2 - 1 ] . iter ( ) . cloned ( ) )
. collect ( ) ;
2020-01-13 13:13:52 -08:00
blockstore
2019-12-09 00:13:36 -08:00
. insert_shreds ( shreds , Some ( & leader_schedule_cache ) , false )
. unwrap ( ) ;
2020-01-13 13:13:52 -08:00
verify_index_integrity ( & blockstore , slot ) ;
2020-06-29 14:44:35 -07:00
blockstore . purge_and_compact_slots ( 0 , slot ) ;
2019-12-09 00:13:36 -08:00
// Test inserting all shreds in 2 rounds, make sure nothing is lost
let shreds1 : Vec < _ > = data_shreds [ .. data_shreds . len ( ) / 2 - 1 ]
. iter ( )
. cloned ( )
. chain ( coding_shreds [ .. coding_shreds . len ( ) / 2 - 1 ] . iter ( ) . cloned ( ) )
. collect ( ) ;
let shreds2 : Vec < _ > = data_shreds [ data_shreds . len ( ) / 2 - 1 .. ]
. iter ( )
. cloned ( )
. chain ( coding_shreds [ coding_shreds . len ( ) / 2 - 1 .. ] . iter ( ) . cloned ( ) )
. collect ( ) ;
2020-01-13 13:13:52 -08:00
blockstore
2019-12-09 00:13:36 -08:00
. insert_shreds ( shreds1 , Some ( & leader_schedule_cache ) , false )
. unwrap ( ) ;
2020-01-13 13:13:52 -08:00
blockstore
2019-12-09 00:13:36 -08:00
. insert_shreds ( shreds2 , Some ( & leader_schedule_cache ) , false )
. unwrap ( ) ;
2020-01-13 13:13:52 -08:00
verify_index_integrity ( & blockstore , slot ) ;
2020-06-29 14:44:35 -07:00
blockstore . purge_and_compact_slots ( 0 , slot ) ;
2019-12-09 00:13:36 -08:00
// Test not all, but enough data and coding shreds in 2 rounds to trigger recovery,
// make sure nothing is lost
let shreds1 : Vec < _ > = data_shreds [ .. data_shreds . len ( ) / 2 - 1 ]
. iter ( )
. cloned ( )
. chain ( coding_shreds [ .. coding_shreds . len ( ) / 2 - 1 ] . iter ( ) . cloned ( ) )
. collect ( ) ;
let shreds2 : Vec < _ > = data_shreds [ data_shreds . len ( ) / 2 - 1 .. data_shreds . len ( ) / 2 ]
. iter ( )
. cloned ( )
. chain (
2020-02-04 15:45:01 -08:00
coding_shreds [ coding_shreds . len ( ) / 2 - 1 .. coding_shreds . len ( ) / 2 ]
2019-12-09 00:13:36 -08:00
. iter ( )
. cloned ( ) ,
)
. collect ( ) ;
2020-01-13 13:13:52 -08:00
blockstore
2019-12-09 00:13:36 -08:00
. insert_shreds ( shreds1 , Some ( & leader_schedule_cache ) , false )
. unwrap ( ) ;
2020-01-13 13:13:52 -08:00
blockstore
2019-12-09 00:13:36 -08:00
. insert_shreds ( shreds2 , Some ( & leader_schedule_cache ) , false )
. unwrap ( ) ;
2020-01-13 13:13:52 -08:00
verify_index_integrity ( & blockstore , slot ) ;
2020-06-29 14:44:35 -07:00
blockstore . purge_and_compact_slots ( 0 , slot ) ;
2019-12-09 00:13:36 -08:00
// Test insert shreds in 2 rounds, but not enough to trigger
// recovery, make sure nothing is lost
let shreds1 : Vec < _ > = data_shreds [ .. data_shreds . len ( ) / 2 - 2 ]
. iter ( )
. cloned ( )
. chain ( coding_shreds [ .. coding_shreds . len ( ) / 2 - 2 ] . iter ( ) . cloned ( ) )
. collect ( ) ;
let shreds2 : Vec < _ > = data_shreds [ data_shreds . len ( ) / 2 - 2 .. data_shreds . len ( ) / 2 - 1 ]
. iter ( )
. cloned ( )
. chain (
coding_shreds [ coding_shreds . len ( ) / 2 - 2 .. coding_shreds . len ( ) / 2 - 1 ]
. iter ( )
. cloned ( ) ,
)
. collect ( ) ;
2020-01-13 13:13:52 -08:00
blockstore
2019-12-09 00:13:36 -08:00
. insert_shreds ( shreds1 , Some ( & leader_schedule_cache ) , false )
. unwrap ( ) ;
2020-01-13 13:13:52 -08:00
blockstore
2019-12-09 00:13:36 -08:00
. insert_shreds ( shreds2 , Some ( & leader_schedule_cache ) , false )
. unwrap ( ) ;
2020-01-13 13:13:52 -08:00
verify_index_integrity ( & blockstore , slot ) ;
2020-06-29 14:44:35 -07:00
blockstore . purge_and_compact_slots ( 0 , slot ) ;
2019-12-09 00:13:36 -08:00
}
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
2019-12-09 00:13:36 -08:00
}
fn setup_erasure_shreds (
slot : u64 ,
parent_slot : u64 ,
num_entries : u64 ,
) -> ( Vec < Shred > , Vec < Shred > , Arc < LeaderScheduleCache > ) {
let entries = make_slot_entries_with_transactions ( num_entries ) ;
let leader_keypair = Arc ::new ( Keypair ::new ( ) ) ;
2021-06-21 13:12:38 -07:00
let shredder = Shredder ::new ( slot , parent_slot , 0 , 0 ) . unwrap ( ) ;
let ( data_shreds , coding_shreds , _ ) =
shredder . entries_to_shreds ( & leader_keypair , & entries , true , 0 ) ;
2019-12-09 00:13:36 -08:00
let genesis_config = create_genesis_config ( 2 ) . genesis_config ;
let bank = Arc ::new ( Bank ::new ( & genesis_config ) ) ;
let mut leader_schedule_cache = LeaderScheduleCache ::new_from_bank ( & bank ) ;
let fixed_schedule = FixedSchedule {
leader_schedule : Arc ::new ( LeaderSchedule ::new_from_schedule ( vec! [
leader_keypair . pubkey ( )
] ) ) ,
start_epoch : 0 ,
} ;
leader_schedule_cache . set_fixed_leader_schedule ( Some ( fixed_schedule ) ) ;
( data_shreds , coding_shreds , Arc ::new ( leader_schedule_cache ) )
}
2020-01-13 13:13:52 -08:00
fn verify_index_integrity ( blockstore : & Blockstore , slot : u64 ) {
2021-06-14 11:23:31 -07:00
let shred_index = blockstore . get_index ( slot ) . unwrap ( ) . unwrap ( ) ;
2020-04-24 15:04:23 -07:00
let data_iter = blockstore . slot_data_iterator ( slot , 0 ) . unwrap ( ) ;
2019-12-09 00:13:36 -08:00
let mut num_data = 0 ;
2020-04-24 15:04:23 -07:00
for ( ( slot , index ) , _ ) in data_iter {
num_data + = 1 ;
2021-06-14 11:23:31 -07:00
// Test that iterator and individual shred lookup yield same set
2020-04-24 15:04:23 -07:00
assert! ( blockstore . get_data_shred ( slot , index ) . unwrap ( ) . is_some ( ) ) ;
2021-06-14 11:23:31 -07:00
// Test that the data index has current shred accounted for
assert! ( shred_index . data ( ) . is_present ( index ) ) ;
2019-12-09 00:13:36 -08:00
}
// Test the data index doesn't have anything extra
2021-06-14 11:23:31 -07:00
let num_data_in_index = shred_index . data ( ) . num_shreds ( ) ;
2019-12-09 00:13:36 -08:00
assert_eq! ( num_data_in_index , num_data ) ;
2020-03-19 23:35:01 -07:00
let coding_iter = blockstore . slot_coding_iterator ( slot , 0 ) . unwrap ( ) ;
2019-12-09 00:13:36 -08:00
let mut num_coding = 0 ;
for ( ( slot , index ) , _ ) in coding_iter {
num_coding + = 1 ;
2021-06-14 11:23:31 -07:00
// Test that the iterator and individual shred lookup yield same set
2020-01-13 13:13:52 -08:00
assert! ( blockstore . get_coding_shred ( slot , index ) . unwrap ( ) . is_some ( ) ) ;
2021-06-14 11:23:31 -07:00
// Test that the coding index has current shred accounted for
assert! ( shred_index . coding ( ) . is_present ( index ) ) ;
2019-12-09 00:13:36 -08:00
}
// Test the data index doesn't have anything extra
2021-06-14 11:23:31 -07:00
let num_coding_in_index = shred_index . coding ( ) . num_shreds ( ) ;
2019-12-09 00:13:36 -08:00
assert_eq! ( num_coding_in_index , num_coding ) ;
}
2020-01-13 17:21:39 -08:00
#[ test ]
fn test_duplicate_slot ( ) {
let slot = 0 ;
let entries1 = make_slot_entries_with_transactions ( 1 ) ;
let entries2 = make_slot_entries_with_transactions ( 1 ) ;
let leader_keypair = Arc ::new ( Keypair ::new ( ) ) ;
2021-06-21 13:12:38 -07:00
let shredder = Shredder ::new ( slot , 0 , 0 , 0 ) . unwrap ( ) ;
let ( shreds , _ , _ ) = shredder . entries_to_shreds ( & leader_keypair , & entries1 , true , 0 ) ;
let ( duplicate_shreds , _ , _ ) =
shredder . entries_to_shreds ( & leader_keypair , & entries2 , true , 0 ) ;
2020-01-13 17:21:39 -08:00
let shred = shreds [ 0 ] . clone ( ) ;
let duplicate_shred = duplicate_shreds [ 0 ] . clone ( ) ;
let non_duplicate_shred = shred . clone ( ) ;
let blockstore_path = get_tmp_ledger_path! ( ) ;
{
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
blockstore
. insert_shreds ( vec! [ shred . clone ( ) ] , None , false )
. unwrap ( ) ;
// No duplicate shreds exist yet
assert! ( ! blockstore . has_duplicate_shreds_in_slot ( slot ) ) ;
// Check if shreds are duplicated
2020-01-16 15:27:54 -08:00
assert_eq! (
2020-12-10 18:20:08 -08:00
blockstore . is_shred_duplicate (
slot ,
0 ,
& duplicate_shred . payload ,
duplicate_shred . is_data ( )
) ,
2021-04-27 15:40:41 -07:00
Some ( shred . payload . to_vec ( ) )
2020-01-16 15:27:54 -08:00
) ;
assert! ( blockstore
2020-12-10 18:20:08 -08:00
. is_shred_duplicate (
slot ,
0 ,
& non_duplicate_shred . payload ,
duplicate_shred . is_data ( )
)
2020-01-16 15:27:54 -08:00
. is_none ( ) ) ;
2020-01-13 17:21:39 -08:00
// Store a duplicate shred
blockstore
. store_duplicate_slot ( slot , shred . payload . clone ( ) , duplicate_shred . payload . clone ( ) )
. unwrap ( ) ;
// Slot is now marked as duplicate
assert! ( blockstore . has_duplicate_shreds_in_slot ( slot ) ) ;
// Check ability to fetch the duplicates
let duplicate_proof = blockstore . get_duplicate_slot ( slot ) . unwrap ( ) ;
assert_eq! ( duplicate_proof . shred1 , shred . payload ) ;
assert_eq! ( duplicate_proof . shred2 , duplicate_shred . payload ) ;
}
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
}
2020-05-05 14:07:21 -07:00
#[ test ]
fn test_clear_unconfirmed_slot ( ) {
let blockstore_path = get_tmp_ledger_path! ( ) ;
{
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
let unconfirmed_slot = 9 ;
let unconfirmed_child_slot = 10 ;
let slots = vec! [ 2 , unconfirmed_slot , unconfirmed_child_slot ] ;
// Insert into slot 9, mark it as dead
let shreds : Vec < _ > = make_chaining_slot_entries ( & slots , 1 )
. into_iter ( )
. flat_map ( | x | x . 0 )
. collect ( ) ;
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
// Should only be one shred in slot 9
assert! ( blockstore
. get_data_shred ( unconfirmed_slot , 0 )
. unwrap ( )
. is_some ( ) ) ;
assert! ( blockstore
. get_data_shred ( unconfirmed_slot , 1 )
. unwrap ( )
. is_none ( ) ) ;
blockstore . set_dead_slot ( unconfirmed_slot ) . unwrap ( ) ;
// Purge the slot
blockstore . clear_unconfirmed_slot ( unconfirmed_slot ) ;
assert! ( ! blockstore . is_dead ( unconfirmed_slot ) ) ;
assert_eq! (
blockstore
. meta ( unconfirmed_slot )
. unwrap ( )
. unwrap ( )
. next_slots ,
vec! [ unconfirmed_child_slot ]
) ;
assert! ( blockstore
. get_data_shred ( unconfirmed_slot , 0 )
. unwrap ( )
. is_none ( ) ) ;
}
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
}
2020-09-01 22:06:06 -07:00
#[ test ]
fn test_update_completed_data_indexes ( ) {
let mut completed_data_indexes : Vec < u32 > = vec! [ ] ;
let mut shred_index = ShredIndex ::default ( ) ;
for i in 0 .. 10 {
shred_index . set_present ( i as u64 , true ) ;
assert_eq! (
update_completed_data_indexes ( true , i , & shred_index , & mut completed_data_indexes ) ,
vec! [ ( i , i ) ]
) ;
assert_eq! ( completed_data_indexes , ( 0 ..= i ) . collect ::< Vec < u32 > > ( ) ) ;
}
}
#[ test ]
fn test_update_completed_data_indexes_out_of_order ( ) {
let mut completed_data_indexes = vec! [ ] ;
let mut shred_index = ShredIndex ::default ( ) ;
shred_index . set_present ( 4 , true ) ;
assert! (
update_completed_data_indexes ( false , 4 , & shred_index , & mut completed_data_indexes )
. is_empty ( )
) ;
assert! ( completed_data_indexes . is_empty ( ) ) ;
shred_index . set_present ( 2 , true ) ;
assert! (
update_completed_data_indexes ( false , 2 , & shred_index , & mut completed_data_indexes )
. is_empty ( )
) ;
assert! ( completed_data_indexes . is_empty ( ) ) ;
shred_index . set_present ( 3 , true ) ;
assert! (
update_completed_data_indexes ( true , 3 , & shred_index , & mut completed_data_indexes )
. is_empty ( )
) ;
assert_eq! ( completed_data_indexes , vec! [ 3 ] ) ;
// Inserting data complete shred 1 now confirms the range of shreds [2, 3]
// is part of the same data set
shred_index . set_present ( 1 , true ) ;
assert_eq! (
update_completed_data_indexes ( true , 1 , & shred_index , & mut completed_data_indexes ) ,
vec! [ ( 2 , 3 ) ]
) ;
assert_eq! ( completed_data_indexes , vec! [ 1 , 3 ] ) ;
// Inserting data complete shred 0 now confirms the range of shreds [0]
// is part of the same data set
shred_index . set_present ( 0 , true ) ;
assert_eq! (
update_completed_data_indexes ( true , 0 , & shred_index , & mut completed_data_indexes ) ,
vec! [ ( 0 , 0 ) , ( 1 , 1 ) ]
) ;
assert_eq! ( completed_data_indexes , vec! [ 0 , 1 , 3 ] ) ;
}
2020-10-15 17:04:10 -07:00
#[ test ]
fn test_rewards_protobuf_backward_compatability ( ) {
let blockstore_path = get_tmp_ledger_path! ( ) ;
{
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
let rewards : Rewards = ( 0 .. 100 )
. map ( | i | Reward {
2020-10-19 12:12:08 -07:00
pubkey : solana_sdk ::pubkey ::new_rand ( ) . to_string ( ) ,
2020-10-15 17:04:10 -07:00
lamports : 42 + i ,
post_balance : std ::u64 ::MAX ,
reward_type : Some ( RewardType ::Fee ) ,
} )
. collect ( ) ;
let protobuf_rewards : generated ::Rewards = rewards . into ( ) ;
let deprecated_rewards : StoredExtendedRewards = protobuf_rewards . clone ( ) . into ( ) ;
for slot in 0 .. 2 {
let data = serialize ( & deprecated_rewards ) . unwrap ( ) ;
blockstore . rewards_cf . put_bytes ( slot , & data ) . unwrap ( ) ;
}
for slot in 2 .. 4 {
blockstore
. rewards_cf
. put_protobuf ( slot , & protobuf_rewards )
. unwrap ( ) ;
}
for slot in 0 .. 4 {
assert_eq! (
blockstore
. rewards_cf
. get_protobuf_or_bincode ::< StoredExtendedRewards > ( slot )
. unwrap ( )
. unwrap ( ) ,
protobuf_rewards
) ;
}
}
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
}
2020-11-16 21:30:38 -08:00
2021-03-05 08:05:35 -08:00
#[ test ]
fn test_transaction_status_protobuf_backward_compatability ( ) {
let blockstore_path = get_tmp_ledger_path! ( ) ;
{
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
let status = TransactionStatusMeta {
status : Ok ( ( ) ) ,
fee : 42 ,
pre_balances : vec ! [ 1 , 2 , 3 ] ,
post_balances : vec ! [ 1 , 2 , 3 ] ,
inner_instructions : Some ( vec! [ ] ) ,
log_messages : Some ( vec! [ ] ) ,
pre_token_balances : Some ( vec! [ TransactionTokenBalance {
account_index : 0 ,
mint : Pubkey ::new_unique ( ) . to_string ( ) ,
ui_token_amount : UiTokenAmount {
ui_amount : Some ( 1.1 ) ,
decimals : 1 ,
amount : " 11 " . to_string ( ) ,
ui_amount_string : " 1.1 " . to_string ( ) ,
} ,
} ] ) ,
post_token_balances : Some ( vec! [ TransactionTokenBalance {
account_index : 0 ,
mint : Pubkey ::new_unique ( ) . to_string ( ) ,
ui_token_amount : UiTokenAmount {
ui_amount : None ,
decimals : 1 ,
amount : " 11 " . to_string ( ) ,
ui_amount_string : " 1.1 " . to_string ( ) ,
} ,
} ] ) ,
2021-05-26 14:43:15 -07:00
rewards : Some ( vec! [ Reward {
pubkey : " My11111111111111111111111111111111111111111 " . to_string ( ) ,
lamports : - 42 ,
post_balance : 42 ,
reward_type : Some ( RewardType ::Rent ) ,
} ] ) ,
2021-03-05 08:05:35 -08:00
} ;
let deprecated_status : StoredTransactionStatusMeta = status . clone ( ) . into ( ) ;
let protobuf_status : generated ::TransactionStatusMeta = status . into ( ) ;
for slot in 0 .. 2 {
let data = serialize ( & deprecated_status ) . unwrap ( ) ;
blockstore
. transaction_status_cf
. put_bytes ( ( 0 , Signature ::default ( ) , slot ) , & data )
. unwrap ( ) ;
}
for slot in 2 .. 4 {
blockstore
. transaction_status_cf
. put_protobuf ( ( 0 , Signature ::default ( ) , slot ) , & protobuf_status )
. unwrap ( ) ;
}
for slot in 0 .. 4 {
assert_eq! (
blockstore
. transaction_status_cf
. get_protobuf_or_bincode ::< StoredTransactionStatusMeta > ( (
0 ,
Signature ::default ( ) ,
slot
) )
. unwrap ( )
. unwrap ( ) ,
protobuf_status
) ;
}
}
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
}
2020-11-16 21:30:38 -08:00
#[ test ]
fn test_remove_shred_data_complete_flag ( ) {
let ( mut shreds , entries ) = make_slot_entries ( 0 , 0 , 1 ) ;
let ledger_path = get_tmp_ledger_path! ( ) ;
let ledger = Blockstore ::open ( & ledger_path ) . unwrap ( ) ;
// Remove the data complete flag from the last shred
shreds [ 0 ] . unset_data_complete ( ) ;
ledger . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
// Check that the `data_complete` flag was unset in the stored shred, but the
// `last_in_slot` flag is set.
let stored_shred = & ledger . get_data_shreds_for_slot ( 0 , 0 ) . unwrap ( ) [ 0 ] ;
assert! ( ! stored_shred . data_complete ( ) ) ;
assert! ( stored_shred . last_in_slot ( ) ) ;
assert_eq! ( entries , ledger . get_any_valid_slot_entries ( 0 , 0 ) ) ;
}
2020-12-09 23:14:31 -08:00
fn make_large_tx_entry ( num_txs : usize ) -> Entry {
let txs : Vec < _ > = ( 0 .. num_txs )
. into_iter ( )
. map ( | _ | {
let keypair0 = Keypair ::new ( ) ;
let to = solana_sdk ::pubkey ::new_rand ( ) ;
solana_sdk ::system_transaction ::transfer ( & keypair0 , & to , 1 , Hash ::default ( ) )
} )
. collect ( ) ;
Entry ::new ( & Hash ::default ( ) , 1 , txs )
}
#[ test ]
fn erasure_multiple_config ( ) {
solana_logger ::setup ( ) ;
let slot = 1 ;
let parent = 0 ;
let num_txs = 20 ;
let entry = make_large_tx_entry ( num_txs ) ;
let shreds = entries_to_test_shreds ( vec! [ entry ] , slot , parent , true , 0 ) ;
assert! ( shreds . len ( ) > 1 ) ;
let ledger_path = get_tmp_ledger_path! ( ) ;
let ledger = Blockstore ::open ( & ledger_path ) . unwrap ( ) ;
2021-04-21 05:47:50 -07:00
let coding1 = Shredder ::generate_coding_shreds ( & shreds , false ) ;
let coding2 = Shredder ::generate_coding_shreds ( & shreds , true ) ;
2020-12-09 23:14:31 -08:00
for shred in & shreds {
info! ( " shred {:?} " , shred ) ;
}
for shred in & coding1 {
info! ( " coding1 {:?} " , shred ) ;
}
for shred in & coding2 {
info! ( " coding2 {:?} " , shred ) ;
}
ledger
. insert_shreds ( shreds [ .. shreds . len ( ) - 2 ] . to_vec ( ) , None , false )
. unwrap ( ) ;
ledger
. insert_shreds ( vec! [ coding1 [ 0 ] . clone ( ) , coding2 [ 1 ] . clone ( ) ] , None , false )
. unwrap ( ) ;
assert! ( ledger . has_duplicate_shreds_in_slot ( slot ) ) ;
}
#[ test ]
fn test_large_num_coding ( ) {
solana_logger ::setup ( ) ;
let slot = 1 ;
let ( _data_shreds , mut coding_shreds , leader_schedule_cache ) =
2021-04-21 05:47:50 -07:00
setup_erasure_shreds ( slot , 0 , 100 ) ;
2020-12-09 23:14:31 -08:00
let blockstore_path = get_tmp_ledger_path! ( ) ;
{
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
coding_shreds [ 1 ] . coding_header . num_coding_shreds = u16 ::MAX ;
blockstore
. insert_shreds (
vec! [ coding_shreds [ 1 ] . clone ( ) ] ,
Some ( & leader_schedule_cache ) ,
false ,
)
. unwrap ( ) ;
// Check no coding shreds are inserted
let res = blockstore . get_coding_shreds_for_slot ( slot , 0 ) . unwrap ( ) ;
assert! ( res . is_empty ( ) ) ;
}
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
}
2021-03-22 16:18:22 -07:00
2021-05-26 16:12:57 -07:00
#[ test ]
pub fn test_insert_data_shreds_same_slot_last_index ( ) {
// Create RocksDb ledger
let blockstore_path = get_tmp_ledger_path! ( ) ;
{
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
// Create enough entries to ensure there are at least two shreds created
let num_unique_entries = max_ticks_per_n_shreds ( 1 , None ) + 1 ;
let ( mut original_shreds , original_entries ) =
make_slot_entries ( 0 , 0 , num_unique_entries ) ;
// Discard first shred, so that the slot is not full
assert! ( original_shreds . len ( ) > 1 ) ;
let last_index = original_shreds . last ( ) . unwrap ( ) . index ( ) as u64 ;
original_shreds . remove ( 0 ) ;
// Insert the same shreds, including the last shred specifically, multiple
// times
for _ in 0 .. 10 {
blockstore
. insert_shreds ( original_shreds . clone ( ) , None , false )
. unwrap ( ) ;
let meta = blockstore . meta ( 0 ) . unwrap ( ) . unwrap ( ) ;
assert! ( ! blockstore . is_dead ( 0 ) ) ;
assert_eq! ( blockstore . get_slot_entries ( 0 , 0 ) . unwrap ( ) , vec! [ ] ) ;
assert_eq! ( meta . consumed , 0 ) ;
assert_eq! ( meta . received , last_index + 1 ) ;
assert_eq! ( meta . parent_slot , 0 ) ;
assert_eq! ( meta . last_index , last_index ) ;
assert! ( ! blockstore . is_full ( 0 ) ) ;
}
let duplicate_shreds = entries_to_test_shreds ( original_entries . clone ( ) , 0 , 0 , true , 0 ) ;
let num_shreds = duplicate_shreds . len ( ) as u64 ;
blockstore
. insert_shreds ( duplicate_shreds , None , false )
. unwrap ( ) ;
assert_eq! ( blockstore . get_slot_entries ( 0 , 0 ) . unwrap ( ) , original_entries ) ;
let meta = blockstore . meta ( 0 ) . unwrap ( ) . unwrap ( ) ;
assert_eq! ( meta . consumed , num_shreds ) ;
assert_eq! ( meta . received , num_shreds ) ;
assert_eq! ( meta . parent_slot , 0 ) ;
assert_eq! ( meta . last_index , num_shreds - 1 ) ;
assert! ( blockstore . is_full ( 0 ) ) ;
assert! ( ! blockstore . is_dead ( 0 ) ) ;
}
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
}
2021-03-22 16:18:22 -07:00
#[ test ]
fn test_duplicate_last_index ( ) {
let num_shreds = 2 ;
let num_entries = max_ticks_per_n_shreds ( num_shreds , None ) ;
let slot = 1 ;
let ( mut shreds , _ ) = make_slot_entries ( slot , 0 , num_entries ) ;
// Mark both as last shred
shreds [ 0 ] . set_last_in_slot ( ) ;
shreds [ 1 ] . set_last_in_slot ( ) ;
let blockstore_path = get_tmp_ledger_path! ( ) ;
{
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
assert! ( blockstore . get_duplicate_slot ( slot ) . is_some ( ) ) ;
}
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
}
2021-05-26 16:12:57 -07:00
#[ test ]
fn test_duplicate_last_index_mark_dead ( ) {
let num_shreds = 10 ;
let smaller_last_shred_index = 5 ;
let larger_last_shred_index = 8 ;
let setup_test_shreds = | slot : Slot | -> Vec < Shred > {
let num_entries = max_ticks_per_n_shreds ( num_shreds , None ) ;
let ( mut shreds , _ ) = make_slot_entries ( slot , 0 , num_entries ) ;
shreds [ smaller_last_shred_index ] . set_last_in_slot ( ) ;
shreds [ larger_last_shred_index ] . set_last_in_slot ( ) ;
shreds
} ;
let get_expected_slot_meta_and_index_meta =
| blockstore : & Blockstore , shreds : Vec < Shred > | -> ( SlotMeta , Index ) {
let slot = shreds [ 0 ] . slot ( ) ;
blockstore
. insert_shreds ( shreds . clone ( ) , None , false )
. unwrap ( ) ;
let meta = blockstore . meta ( slot ) . unwrap ( ) . unwrap ( ) ;
assert_eq! ( meta . consumed , shreds . len ( ) as u64 ) ;
let shreds_index = blockstore . get_index ( slot ) . unwrap ( ) . unwrap ( ) ;
for i in 0 .. shreds . len ( ) as u64 {
assert! ( shreds_index . data ( ) . is_present ( i ) ) ;
}
// Cleanup the slot
blockstore
. run_purge ( slot , slot , PurgeType ::PrimaryIndex )
. expect ( " Purge database operations failed " ) ;
assert! ( blockstore . meta ( slot ) . unwrap ( ) . is_none ( ) ) ;
( meta , shreds_index )
} ;
let blockstore_path = get_tmp_ledger_path! ( ) ;
{
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
let mut slot = 0 ;
let shreds = setup_test_shreds ( slot ) ;
// Case 1: Insert in the same batch. Since we're inserting the shreds in order,
// any shreds > smaller_last_shred_index will not be inserted. Slot is not marked
// as dead because no slots > the first "last" index shred are inserted before
// the "last" index shred itself is inserted.
let ( expected_slot_meta , expected_index ) = get_expected_slot_meta_and_index_meta (
& blockstore ,
shreds [ ..= smaller_last_shred_index ] . to_vec ( ) ,
) ;
blockstore
. insert_shreds ( shreds . clone ( ) , None , false )
. unwrap ( ) ;
assert! ( blockstore . get_duplicate_slot ( slot ) . is_some ( ) ) ;
assert! ( ! blockstore . is_dead ( slot ) ) ;
for i in 0 .. num_shreds {
if i < = smaller_last_shred_index as u64 {
assert_eq! (
blockstore . get_data_shred ( slot , i ) . unwrap ( ) . unwrap ( ) ,
shreds [ i as usize ] . payload
) ;
} else {
assert! ( blockstore . get_data_shred ( slot , i ) . unwrap ( ) . is_none ( ) ) ;
}
}
let mut meta = blockstore . meta ( slot ) . unwrap ( ) . unwrap ( ) ;
meta . first_shred_timestamp = expected_slot_meta . first_shred_timestamp ;
assert_eq! ( meta , expected_slot_meta ) ;
assert_eq! ( blockstore . get_index ( slot ) . unwrap ( ) . unwrap ( ) , expected_index ) ;
// Case 2: Inserting a duplicate with an even smaller last shred index should not
// mark the slot as dead since the Slotmeta is full.
let mut even_smaller_last_shred_duplicate =
shreds [ smaller_last_shred_index - 1 ] . clone ( ) ;
even_smaller_last_shred_duplicate . set_last_in_slot ( ) ;
// Flip a byte to create a duplicate shred
even_smaller_last_shred_duplicate . payload [ 0 ] =
std ::u8 ::MAX - even_smaller_last_shred_duplicate . payload [ 0 ] ;
assert! ( blockstore
. is_shred_duplicate (
slot ,
even_smaller_last_shred_duplicate . index ( ) ,
& even_smaller_last_shred_duplicate . payload ,
true
)
. is_some ( ) ) ;
blockstore
. insert_shreds ( vec! [ even_smaller_last_shred_duplicate ] , None , false )
. unwrap ( ) ;
assert! ( ! blockstore . is_dead ( slot ) ) ;
for i in 0 .. num_shreds {
if i < = smaller_last_shred_index as u64 {
assert_eq! (
blockstore . get_data_shred ( slot , i ) . unwrap ( ) . unwrap ( ) ,
shreds [ i as usize ] . payload
) ;
} else {
assert! ( blockstore . get_data_shred ( slot , i ) . unwrap ( ) . is_none ( ) ) ;
}
}
let mut meta = blockstore . meta ( slot ) . unwrap ( ) . unwrap ( ) ;
meta . first_shred_timestamp = expected_slot_meta . first_shred_timestamp ;
assert_eq! ( meta , expected_slot_meta ) ;
assert_eq! ( blockstore . get_index ( slot ) . unwrap ( ) . unwrap ( ) , expected_index ) ;
// Case 3: Insert shreds in reverse so that consumed will not be updated. Now on insert, the
// the slot should be marked as dead
slot + = 1 ;
let mut shreds = setup_test_shreds ( slot ) ;
shreds . reverse ( ) ;
blockstore
. insert_shreds ( shreds . clone ( ) , None , false )
. unwrap ( ) ;
assert! ( blockstore . is_dead ( slot ) ) ;
// All the shreds other than the two last index shreds because those two
// are marked as last, but less than the first received index == 10.
// The others will be inserted even after the slot is marked dead on attempted
// insert of the first last_index shred since dead slots can still be
// inserted into.
for i in 0 .. num_shreds {
let shred_to_check = & shreds [ i as usize ] ;
let shred_index = shred_to_check . index ( ) as u64 ;
if shred_index ! = smaller_last_shred_index as u64
& & shred_index ! = larger_last_shred_index as u64
{
assert_eq! (
blockstore
. get_data_shred ( slot , shred_index )
. unwrap ( )
. unwrap ( ) ,
shred_to_check . payload
) ;
} else {
assert! ( blockstore
. get_data_shred ( slot , shred_index )
. unwrap ( )
. is_none ( ) ) ;
}
}
// Case 4: Same as Case 3, but this time insert the shreds one at a time to test that the clearing
// of data shreds works even after they've been committed
slot + = 1 ;
let mut shreds = setup_test_shreds ( slot ) ;
shreds . reverse ( ) ;
for shred in shreds . clone ( ) {
blockstore . insert_shreds ( vec! [ shred ] , None , false ) . unwrap ( ) ;
}
assert! ( blockstore . is_dead ( slot ) ) ;
// All the shreds will be inserted since dead slots can still be inserted into.
for i in 0 .. num_shreds {
let shred_to_check = & shreds [ i as usize ] ;
let shred_index = shred_to_check . index ( ) as u64 ;
if shred_index ! = smaller_last_shred_index as u64
& & shred_index ! = larger_last_shred_index as u64
{
assert_eq! (
blockstore
. get_data_shred ( slot , shred_index )
. unwrap ( )
. unwrap ( ) ,
shred_to_check . payload
) ;
} else {
assert! ( blockstore
. get_data_shred ( slot , shred_index )
. unwrap ( )
. is_none ( ) ) ;
}
}
}
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
}
#[ test ]
fn test_get_slot_entries_dead_slot_race ( ) {
let setup_test_shreds = move | slot : Slot | -> Vec < Shred > {
let num_shreds = 10 ;
let middle_shred_index = 5 ;
let num_entries = max_ticks_per_n_shreds ( num_shreds , None ) ;
let ( shreds , _ ) = make_slot_entries ( slot , 0 , num_entries ) ;
// Reverse shreds so that last shred gets inserted first and sets meta.received
let mut shreds : Vec < Shred > = shreds . into_iter ( ) . rev ( ) . collect ( ) ;
// Push the real middle shred to the end of the shreds list
shreds . push ( shreds [ middle_shred_index ] . clone ( ) ) ;
// Set the middle shred as a last shred to cause the slot to be marked dead
shreds [ middle_shred_index ] . set_last_in_slot ( ) ;
shreds
} ;
let blockstore_path = get_tmp_ledger_path! ( ) ;
{
let blockstore = Arc ::new ( Blockstore ::open ( & blockstore_path ) . unwrap ( ) ) ;
let ( slot_sender , slot_receiver ) = channel ( ) ;
let ( shred_sender , shred_receiver ) = channel ::< Vec < Shred > > ( ) ;
let ( signal_sender , signal_receiver ) = channel ( ) ;
let t_entry_getter = {
let blockstore = blockstore . clone ( ) ;
let signal_sender = signal_sender . clone ( ) ;
Builder ::new ( )
. spawn ( move | | {
while let Ok ( slot ) = slot_receiver . recv ( ) {
match blockstore . get_slot_entries_with_shred_info ( slot , 0 , false ) {
Ok ( ( _entries , _num_shreds , is_full ) ) = > {
if is_full {
signal_sender
. send ( Err ( IoError ::new (
ErrorKind ::Other ,
" got full slot entries for dead slot " ,
) ) )
. unwrap ( ) ;
}
}
Err ( err ) = > {
assert_matches! ( err , BlockstoreError ::DeadSlot ) ;
}
}
signal_sender . send ( Ok ( ( ) ) ) . unwrap ( ) ;
}
} )
. unwrap ( )
} ;
let t_shred_inserter = Builder ::new ( )
. spawn ( move | | {
while let Ok ( shreds ) = shred_receiver . recv ( ) {
let slot = shreds [ 0 ] . slot ( ) ;
// Grab this lock to block `get_slot_entries` before it fetches completed datasets
// and then mark the slot as dead, but full, by inserting carefully crafted shreds.
let _lowest_cleanup_slot = blockstore . lowest_cleanup_slot . write ( ) . unwrap ( ) ;
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
assert! ( blockstore . get_duplicate_slot ( slot ) . is_some ( ) ) ;
assert! ( blockstore . is_dead ( slot ) ) ;
assert! ( blockstore . meta ( slot ) . unwrap ( ) . unwrap ( ) . is_full ( ) ) ;
signal_sender . send ( Ok ( ( ) ) ) . unwrap ( ) ;
}
} )
. unwrap ( ) ;
for slot in 0 .. 100 {
let shreds = setup_test_shreds ( slot ) ;
// Start a task on each thread to trigger a race condition
slot_sender . send ( slot ) . unwrap ( ) ;
shred_sender . send ( shreds ) . unwrap ( ) ;
// Check that each thread processed their task before continuing
for _ in 1 ..= 2 {
let res = signal_receiver . recv ( ) . unwrap ( ) ;
assert! ( res . is_ok ( ) , " race condition: {:?} " , res ) ;
}
}
drop ( slot_sender ) ;
drop ( shred_sender ) ;
let handles = vec! [ t_entry_getter , t_shred_inserter ] ;
for handle in handles {
assert! ( handle . join ( ) . is_ok ( ) ) ;
}
}
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
}
2021-07-01 09:32:41 -07:00
#[ test ]
fn test_read_write_cost_table ( ) {
let blockstore_path = get_tmp_ledger_path! ( ) ;
{
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
let num_entries : usize = 10 ;
let mut cost_table : HashMap < Pubkey , u64 > = HashMap ::new ( ) ;
for x in 1 .. num_entries + 1 {
cost_table . insert ( Pubkey ::new_unique ( ) , ( x + 100 ) as u64 ) ;
}
// write to db
for ( key , cost ) in cost_table . iter ( ) {
blockstore
. write_program_cost ( key , cost )
. expect ( " write a program " ) ;
}
// read back from db
let read_back = blockstore . read_program_costs ( ) . expect ( " read programs " ) ;
// verify
assert_eq! ( read_back . len ( ) , cost_table . len ( ) ) ;
for ( read_key , read_cost ) in read_back {
assert_eq! ( read_cost , * cost_table . get ( & read_key ) . unwrap ( ) ) ;
}
// update value, write to db
for val in cost_table . values_mut ( ) {
* val + = 100 ;
}
for ( key , cost ) in cost_table . iter ( ) {
blockstore
. write_program_cost ( key , cost )
. expect ( " write a program " ) ;
}
// add a new record
let new_program_key = Pubkey ::new_unique ( ) ;
let new_program_cost = 999 ;
blockstore
. write_program_cost ( & new_program_key , & new_program_cost )
. unwrap ( ) ;
// confirm value updated
let read_back = blockstore . read_program_costs ( ) . expect ( " read programs " ) ;
// verify
assert_eq! ( read_back . len ( ) , cost_table . len ( ) + 1 ) ;
for ( key , cost ) in cost_table . iter ( ) {
assert_eq! ( * cost , read_back . iter ( ) . find ( | ( k , _v ) | k = = key ) . unwrap ( ) . 1 ) ;
}
assert_eq! (
new_program_cost ,
read_back
. iter ( )
. find ( | ( k , _v ) | * k = = new_program_key )
. unwrap ( )
. 1
) ;
// test delete
blockstore
. delete_program_cost ( & new_program_key )
. expect ( " delete a progrma " ) ;
let read_back = blockstore . read_program_costs ( ) . expect ( " read programs " ) ;
// verify
assert_eq! ( read_back . len ( ) , cost_table . len ( ) ) ;
for ( read_key , read_cost ) in read_back {
assert_eq! ( read_cost , * cost_table . get ( & read_key ) . unwrap ( ) ) ;
}
}
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
}
#[ test ]
fn test_delete_old_records_from_cost_table ( ) {
let blockstore_path = get_tmp_ledger_path! ( ) ;
{
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
let num_entries : usize = 10 ;
let mut cost_table : HashMap < Pubkey , u64 > = HashMap ::new ( ) ;
for x in 1 .. num_entries + 1 {
cost_table . insert ( Pubkey ::new_unique ( ) , ( x + 100 ) as u64 ) ;
}
// write to db
for ( key , cost ) in cost_table . iter ( ) {
blockstore
. write_program_cost ( key , cost )
. expect ( " write a program " ) ;
}
// remove a record
let mut removed_key = Pubkey ::new_unique ( ) ;
for ( key , cost ) in cost_table . iter ( ) {
if * cost = = 101_ u64 {
removed_key = * key ;
break ;
}
}
cost_table . remove ( & removed_key ) ;
// delete records from blockstore if they are no longer in cost_table
let db_records = blockstore . read_program_costs ( ) . expect ( " read programs " ) ;
db_records . iter ( ) . for_each ( | ( pubkey , _ ) | {
if ! cost_table . iter ( ) . any ( | ( key , _ ) | key = = pubkey ) {
assert_eq! ( * pubkey , removed_key ) ;
blockstore
. delete_program_cost ( pubkey )
. expect ( " delete old program " ) ;
}
} ) ;
// read back from db
let read_back = blockstore . read_program_costs ( ) . expect ( " read programs " ) ;
// verify
assert_eq! ( read_back . len ( ) , cost_table . len ( ) ) ;
for ( read_key , read_cost ) in read_back {
assert_eq! ( read_cost , * cost_table . get ( & read_key ) . unwrap ( ) ) ;
}
}
Blockstore ::destroy ( & blockstore_path ) . expect ( " Expected successful database destruction " ) ;
}
2018-11-15 15:53:31 -08:00
}