2020-01-13 13:13:52 -08:00
//! The `blockstore` module provides functions for parallel verification of the
2018-11-15 15:53:31 -08:00
//! Proof of History ledger as well as iterative read, append write, and random
//! access read to a persistent file-based ledger.
2021-08-13 12:05:18 -07:00
use {
crate ::{
ancestor_iterator ::AncestorIterator ,
blockstore_db ::{
2022-03-23 20:51:49 -07:00
columns as cf , AccessType , BlockstoreOptions , Column , Database , IteratorDirection ,
IteratorMode , LedgerColumn , LedgerColumnOptions , Result , ShredStorageType , WriteBatch ,
2021-08-13 12:05:18 -07:00
} ,
blockstore_meta ::* ,
leader_schedule_cache ::LeaderScheduleCache ,
next_slots_iterator ::NextSlotsIterator ,
2022-04-28 16:42:37 -07:00
shred ::{ self , max_ticks_per_n_shreds , ErasureSetId , Shred , ShredId , ShredType , Shredder } ,
2022-03-25 12:32:22 -07:00
slot_stats ::{ ShredSource , SlotsStats } ,
2021-08-13 12:05:18 -07:00
} ,
bincode ::deserialize ,
2022-01-11 02:44:46 -08:00
crossbeam_channel ::{ bounded , Receiver , Sender , TrySendError } ,
2021-08-13 12:05:18 -07:00
log ::* ,
rayon ::{
iter ::{ IntoParallelRefIterator , ParallelIterator } ,
ThreadPool ,
} ,
rocksdb ::DBRawIterator ,
solana_entry ::entry ::{ create_ticks , Entry } ,
solana_measure ::measure ::Measure ,
2022-03-30 07:04:49 -07:00
solana_metrics ::{
datapoint_debug , datapoint_error ,
poh_timing_point ::{ send_poh_timing_point , PohTimingSender , SlotPohTimingInfo } ,
} ,
2022-05-05 13:00:50 -07:00
solana_rayon_threadlimit ::get_max_thread_count ,
2021-08-13 12:05:18 -07:00
solana_runtime ::hardened_unpack ::{ unpack_genesis_archive , MAX_GENESIS_ARCHIVE_UNPACKED_SIZE } ,
solana_sdk ::{
clock ::{ Slot , UnixTimestamp , DEFAULT_TICKS_PER_SECOND , MS_PER_TICK } ,
genesis_config ::{ GenesisConfig , DEFAULT_GENESIS_ARCHIVE , DEFAULT_GENESIS_FILE } ,
hash ::Hash ,
pubkey ::Pubkey ,
signature ::{ Keypair , Signature , Signer } ,
timing ::timestamp ,
2021-08-17 15:17:56 -07:00
transaction ::VersionedTransaction ,
2019-11-14 15:34:39 -08:00
} ,
2021-08-13 12:05:18 -07:00
solana_storage_proto ::{ StoredExtendedRewards , StoredTransactionStatusMeta } ,
solana_transaction_status ::{
2022-02-09 21:28:18 -08:00
ConfirmedTransactionStatusWithSignature , ConfirmedTransactionWithStatusMeta , Rewards ,
TransactionStatusMeta , TransactionWithStatusMeta , VersionedConfirmedBlock ,
2022-01-13 23:24:41 -08:00
VersionedTransactionWithStatusMeta ,
2021-08-13 12:05:18 -07:00
} ,
std ::{
borrow ::Cow ,
cell ::RefCell ,
cmp ,
2022-03-25 12:32:22 -07:00
collections ::{ hash_map ::Entry as HashMapEntry , BTreeSet , HashMap , HashSet } ,
2021-08-13 12:05:18 -07:00
convert ::TryInto ,
2022-05-22 19:10:48 -07:00
fmt ::Write ,
2021-08-13 12:05:18 -07:00
fs ,
io ::{ Error as IoError , ErrorKind } ,
path ::{ Path , PathBuf } ,
rc ::Rc ,
sync ::{
atomic ::{ AtomicBool , Ordering } ,
Arc , Mutex , RwLock , RwLockWriteGuard ,
} ,
} ,
2021-09-10 05:33:08 -07:00
tempfile ::{ Builder , TempDir } ,
2021-08-13 12:05:18 -07:00
thiserror ::Error ,
trees ::{ Tree , TreeWalk } ,
2019-11-14 15:34:39 -08:00
} ;
2022-04-08 09:46:12 -07:00
pub mod blockstore_purge ;
2022-02-24 10:12:29 -08:00
pub use {
2022-05-24 12:03:28 -07:00
crate ::{
blockstore_db ::BlockstoreError ,
blockstore_meta ::{ OptimisticSlotMetaVersioned , SlotMeta } ,
} ,
2022-04-08 09:46:12 -07:00
blockstore_purge ::PurgeType ,
2022-02-24 10:12:29 -08:00
rocksdb ::properties as RocksProperties ,
} ;
2021-07-21 11:15:08 -07:00
2022-03-02 18:30:22 -08:00
pub const BLOCKSTORE_DIRECTORY_ROCKS_LEVEL : & str = " rocksdb " ;
pub const BLOCKSTORE_DIRECTORY_ROCKS_FIFO : & str = " rocksdb_fifo " ;
2018-12-20 11:16:07 -08:00
2022-05-05 13:00:50 -07:00
// get_max_thread_count to match number of threads in the old code.
// see: https://github.com/solana-labs/solana/pull/24853
lazy_static! {
static ref PAR_THREAD_POOL : ThreadPool = rayon ::ThreadPoolBuilder ::new ( )
. num_threads ( get_max_thread_count ( ) )
. thread_name ( | ix | format! ( " blockstore_ {} " , ix ) )
. build ( )
. unwrap ( ) ;
static ref PAR_THREAD_POOL_ALL_CPUS : ThreadPool = rayon ::ThreadPoolBuilder ::new ( )
. num_threads ( num_cpus ::get ( ) )
. thread_name ( | ix | format! ( " blockstore_ {} " , ix ) )
. build ( )
. unwrap ( ) ;
}
2020-06-02 18:49:31 -07:00
2022-04-05 06:57:12 -07:00
pub const MAX_REPLAY_WAKE_UP_SIGNALS : usize = 1 ;
2019-05-09 14:10:04 -07:00
pub const MAX_COMPLETED_SLOTS_IN_CHANNEL : usize = 100_000 ;
2019-11-26 16:21:02 -08:00
pub const MAX_TURBINE_PROPAGATION_IN_MS : u64 = 100 ;
pub const MAX_TURBINE_DELAY_IN_TICKS : u64 = MAX_TURBINE_PROPAGATION_IN_MS / MS_PER_TICK ;
2019-05-09 14:10:04 -07:00
2019-12-30 07:42:09 -08:00
// An upper bound on maximum number of data shreds we can handle in a slot
// 32K shreds would allow ~320K peak TPS
// (32K shreds per slot * 4 TX per shred * 2.5 slots per sec)
pub const MAX_DATA_SHREDS_PER_SLOT : usize = 32_768 ;
2022-01-11 02:44:46 -08:00
pub type CompletedSlotsSender = Sender < Vec < Slot > > ;
2021-06-02 17:20:00 -07:00
pub type CompletedSlotsReceiver = Receiver < Vec < Slot > > ;
2020-06-02 18:49:31 -07:00
type CompletedRanges = Vec < ( u32 , u32 ) > ;
2021-12-29 09:25:10 -08:00
#[ derive(Default) ]
pub struct SignatureInfosForAddress {
pub infos : Vec < ConfirmedTransactionStatusWithSignature > ,
pub found_before : bool ,
}
2020-07-03 17:44:32 -07:00
#[ derive(Error, Debug) ]
pub enum InsertDataShredError {
Exists ,
InvalidShred ,
BlockstoreError ( #[ from ] BlockstoreError ) ,
}
impl std ::fmt ::Display for InsertDataShredError {
fn fmt ( & self , f : & mut std ::fmt ::Formatter < '_ > ) -> std ::fmt ::Result {
write! ( f , " insert data shred error " )
}
}
2020-09-01 22:06:06 -07:00
#[ derive(Clone, Copy, Debug, PartialEq, Eq) ]
pub struct CompletedDataSetInfo {
pub slot : Slot ,
pub start_index : u32 ,
pub end_index : u32 ,
}
pub struct BlockstoreSignals {
pub blockstore : Blockstore ,
pub ledger_signal_receiver : Receiver < bool > ,
2021-06-02 17:20:00 -07:00
pub completed_slots_receiver : CompletedSlotsReceiver ,
2020-09-01 22:06:06 -07:00
}
2018-11-15 15:53:31 -08:00
// ledger window
2020-01-13 13:13:52 -08:00
pub struct Blockstore {
2021-02-26 13:44:38 -08:00
ledger_path : PathBuf ,
2019-05-03 14:46:02 -07:00
db : Arc < Database > ,
2019-04-02 14:58:07 -07:00
meta_cf : LedgerColumn < cf ::SlotMeta > ,
2019-06-20 15:50:41 -07:00
dead_slots_cf : LedgerColumn < cf ::DeadSlots > ,
2020-01-13 17:21:39 -08:00
duplicate_slots_cf : LedgerColumn < cf ::DuplicateSlots > ,
2019-04-11 14:14:57 -07:00
erasure_meta_cf : LedgerColumn < cf ::ErasureMeta > ,
2019-04-06 19:41:22 -07:00
orphans_cf : LedgerColumn < cf ::Orphans > ,
2019-07-10 11:08:17 -07:00
index_cf : LedgerColumn < cf ::Index > ,
2020-04-24 15:04:23 -07:00
data_shred_cf : LedgerColumn < cf ::ShredData > ,
code_shred_cf : LedgerColumn < cf ::ShredCode > ,
2019-11-17 08:26:01 -08:00
transaction_status_cf : LedgerColumn < cf ::TransactionStatus > ,
2020-04-08 12:50:39 -07:00
address_signatures_cf : LedgerColumn < cf ::AddressSignatures > ,
2021-09-02 23:28:52 -07:00
transaction_memos_cf : LedgerColumn < cf ::TransactionMemos > ,
2020-04-04 20:24:06 -07:00
transaction_status_index_cf : LedgerColumn < cf ::TransactionStatusIndex > ,
active_transaction_status_index : RwLock < u64 > ,
2020-02-04 18:50:24 -08:00
rewards_cf : LedgerColumn < cf ::Rewards > ,
2020-09-09 08:33:14 -07:00
blocktime_cf : LedgerColumn < cf ::Blocktime > ,
2020-09-22 12:26:32 -07:00
perf_samples_cf : LedgerColumn < cf ::PerfSamples > ,
2021-05-26 21:16:16 -07:00
block_height_cf : LedgerColumn < cf ::BlockHeight > ,
2021-07-01 09:32:41 -07:00
program_costs_cf : LedgerColumn < cf ::ProgramCosts > ,
2021-07-12 20:59:16 -07:00
bank_hash_cf : LedgerColumn < cf ::BankHash > ,
2022-05-20 16:46:23 -07:00
optimistic_slots_cf : LedgerColumn < cf ::OptimisticSlots > ,
2022-03-17 12:43:57 -07:00
last_root : RwLock < Slot > ,
insert_shreds_lock : Mutex < ( ) > ,
2022-04-04 09:38:05 -07:00
new_shreds_signals : Mutex < Vec < Sender < bool > > > ,
completed_slots_senders : Mutex < Vec < CompletedSlotsSender > > ,
2022-03-30 07:04:49 -07:00
pub shred_timing_point_sender : Option < PohTimingSender > ,
2022-03-17 12:43:57 -07:00
pub lowest_cleanup_slot : RwLock < Slot > ,
2020-03-23 08:42:32 -07:00
no_compaction : bool ,
2022-04-04 14:44:21 -07:00
pub slots_stats : SlotsStats ,
2021-06-30 09:20:07 -07:00
}
2019-10-30 16:48:59 -07:00
pub struct IndexMetaWorkingSetEntry {
index : Index ,
2019-10-31 14:03:41 -07:00
// true only if at least one shred for this Index was inserted since the time this
// struct was created
did_insert_occur : bool ,
}
2022-02-11 10:04:26 -08:00
/// The in-memory data structure for updating entries in the column family
/// [`cf::SlotMeta`].
2019-10-31 14:03:41 -07:00
pub struct SlotMetaWorkingSetEntry {
2022-02-11 10:04:26 -08:00
/// The dirty version of the `SlotMeta` which might not be persisted
/// to the blockstore yet.
2019-10-31 14:03:41 -07:00
new_slot_meta : Rc < RefCell < SlotMeta > > ,
2022-02-11 10:04:26 -08:00
/// The latest version of the `SlotMeta` that was persisted in the
/// blockstore. If None, it means the current slot is new to the
/// blockstore.
2019-10-31 14:03:41 -07:00
old_slot_meta : Option < SlotMeta > ,
2022-02-11 10:04:26 -08:00
/// True only if at least one shred for this SlotMeta was inserted since
/// this struct was created.
2019-10-30 16:48:59 -07:00
did_insert_occur : bool ,
}
2020-03-26 12:51:41 -07:00
#[ derive(Default) ]
2020-01-13 13:13:52 -08:00
pub struct BlockstoreInsertionMetrics {
2019-10-26 16:15:59 -07:00
pub num_shreds : usize ,
pub insert_lock_elapsed : u64 ,
pub insert_shreds_elapsed : u64 ,
pub shred_recovery_elapsed : u64 ,
pub chaining_elapsed : u64 ,
pub commit_working_sets_elapsed : u64 ,
pub write_batch_elapsed : u64 ,
pub total_elapsed : u64 ,
pub num_inserted : u64 ,
2020-09-29 14:13:21 -07:00
pub num_repair : u64 ,
2019-10-26 16:15:59 -07:00
pub num_recovered : usize ,
2021-10-17 08:02:34 -07:00
num_recovered_blockstore_error : usize ,
2020-07-03 17:44:32 -07:00
pub num_recovered_inserted : usize ,
pub num_recovered_failed_sig : usize ,
pub num_recovered_failed_invalid : usize ,
pub num_recovered_exists : usize ,
2019-10-30 16:48:59 -07:00
pub index_meta_time : u64 ,
2021-10-17 08:02:34 -07:00
num_data_shreds_exists : usize ,
num_data_shreds_invalid : usize ,
num_data_shreds_blockstore_error : usize ,
num_coding_shreds_exists : usize ,
num_coding_shreds_invalid : usize ,
num_coding_shreds_invalid_erasure_config : usize ,
num_coding_shreds_inserted : usize ,
2019-10-26 16:15:59 -07:00
}
2019-10-31 14:03:41 -07:00
impl SlotMetaWorkingSetEntry {
2022-02-11 10:04:26 -08:00
/// Construct a new SlotMetaWorkingSetEntry with the specified `new_slot_meta`
/// and `old_slot_meta`. `did_insert_occur` is set to false.
2019-10-31 14:03:41 -07:00
fn new ( new_slot_meta : Rc < RefCell < SlotMeta > > , old_slot_meta : Option < SlotMeta > ) -> Self {
Self {
new_slot_meta ,
old_slot_meta ,
did_insert_occur : false ,
}
}
}
2020-01-13 13:13:52 -08:00
impl BlockstoreInsertionMetrics {
2019-10-26 16:15:59 -07:00
pub fn report_metrics ( & self , metric_name : & 'static str ) {
2020-03-26 12:51:41 -07:00
datapoint_info! (
2019-10-26 16:15:59 -07:00
metric_name ,
( " num_shreds " , self . num_shreds as i64 , i64 ) ,
( " total_elapsed " , self . total_elapsed as i64 , i64 ) ,
( " insert_lock_elapsed " , self . insert_lock_elapsed as i64 , i64 ) ,
(
" insert_shreds_elapsed " ,
self . insert_shreds_elapsed as i64 ,
i64
) ,
(
" shred_recovery_elapsed " ,
self . shred_recovery_elapsed as i64 ,
i64
) ,
( " chaining_elapsed " , self . chaining_elapsed as i64 , i64 ) ,
(
" commit_working_sets_elapsed " ,
self . commit_working_sets_elapsed as i64 ,
i64
) ,
( " write_batch_elapsed " , self . write_batch_elapsed as i64 , i64 ) ,
( " num_inserted " , self . num_inserted as i64 , i64 ) ,
2020-09-29 14:13:21 -07:00
( " num_repair " , self . num_repair as i64 , i64 ) ,
2019-10-26 16:15:59 -07:00
( " num_recovered " , self . num_recovered as i64 , i64 ) ,
2020-07-03 17:44:32 -07:00
(
" num_recovered_inserted " ,
self . num_recovered_inserted as i64 ,
i64
) ,
(
" num_recovered_failed_sig " ,
self . num_recovered_failed_sig as i64 ,
i64
) ,
(
" num_recovered_failed_invalid " ,
self . num_recovered_failed_invalid as i64 ,
i64
) ,
(
" num_recovered_exists " ,
self . num_recovered_exists as i64 ,
i64
) ,
2021-10-17 08:02:34 -07:00
(
" num_recovered_blockstore_error " ,
self . num_recovered_blockstore_error ,
i64
) ,
( " num_data_shreds_exists " , self . num_data_shreds_exists , i64 ) ,
( " num_data_shreds_invalid " , self . num_data_shreds_invalid , i64 ) ,
(
" num_data_shreds_blockstore_error " ,
self . num_data_shreds_blockstore_error ,
i64
) ,
(
" num_coding_shreds_exists " ,
self . num_coding_shreds_exists ,
i64
) ,
(
" num_coding_shreds_invalid " ,
self . num_coding_shreds_invalid ,
i64
) ,
(
" num_coding_shreds_invalid_erasure_config " ,
self . num_coding_shreds_invalid_erasure_config ,
i64
) ,
(
" num_coding_shreds_inserted " ,
self . num_coding_shreds_inserted ,
i64
) ,
2019-10-26 16:15:59 -07:00
) ;
}
}
2020-01-13 13:13:52 -08:00
impl Blockstore {
2019-12-12 15:54:50 -08:00
pub fn db ( self ) -> Arc < Database > {
self . db
}
2022-03-02 18:30:22 -08:00
/// The path to the ledger store
2021-08-02 14:33:28 -07:00
pub fn ledger_path ( & self ) -> & PathBuf {
2021-02-26 13:44:38 -08:00
& self . ledger_path
}
2022-03-02 18:30:22 -08:00
/// The directory under `ledger_path` to the underlying blockstore.
pub fn blockstore_directory ( shred_storage_type : & ShredStorageType ) -> & str {
match shred_storage_type {
ShredStorageType ::RocksLevel = > BLOCKSTORE_DIRECTORY_ROCKS_LEVEL ,
ShredStorageType ::RocksFifo ( _ ) = > BLOCKSTORE_DIRECTORY_ROCKS_FIFO ,
}
}
2019-09-03 21:32:51 -07:00
/// Opens a Ledger in directory, provides "infinite" window of shreds
2020-01-13 13:13:52 -08:00
pub fn open ( ledger_path : & Path ) -> Result < Blockstore > {
2022-01-03 20:30:45 -08:00
Self ::do_open ( ledger_path , BlockstoreOptions ::default ( ) )
2020-06-02 21:32:44 -07:00
}
2022-01-07 12:11:43 -08:00
pub fn open_with_options ( ledger_path : & Path , options : BlockstoreOptions ) -> Result < Blockstore > {
2022-01-03 20:30:45 -08:00
Self ::do_open ( ledger_path , options )
2020-06-02 21:32:44 -07:00
}
2022-01-03 20:30:45 -08:00
fn do_open ( ledger_path : & Path , options : BlockstoreOptions ) -> Result < Blockstore > {
2019-04-02 14:58:07 -07:00
fs ::create_dir_all ( & ledger_path ) ? ;
2022-03-11 15:17:34 -08:00
let blockstore_path = ledger_path . join ( Self ::blockstore_directory (
2022-03-18 11:13:35 -07:00
& options . column_options . shred_storage_type ,
2022-03-11 15:17:34 -08:00
) ) ;
2019-04-02 14:58:07 -07:00
2022-01-03 20:30:45 -08:00
adjust_ulimit_nofile ( options . enforce_ulimit_nofile ) ? ;
2019-11-05 11:18:49 -08:00
2019-04-02 14:58:07 -07:00
// Open the database
2019-11-25 21:48:49 -08:00
let mut measure = Measure ::start ( " open " ) ;
2020-04-03 12:51:44 -07:00
info! ( " Opening database at {:?} " , blockstore_path ) ;
2022-01-03 20:30:45 -08:00
let db = Database ::open ( & blockstore_path , options ) ? ;
2019-04-02 14:58:07 -07:00
// Create the metadata column family
2019-04-26 08:52:10 -07:00
let meta_cf = db . column ( ) ;
2019-04-02 14:58:07 -07:00
2019-06-20 15:50:41 -07:00
// Create the dead slots column family
let dead_slots_cf = db . column ( ) ;
2020-01-13 17:21:39 -08:00
let duplicate_slots_cf = db . column ( ) ;
2019-04-26 08:52:10 -07:00
let erasure_meta_cf = db . column ( ) ;
2019-04-02 14:58:07 -07:00
2019-04-06 19:41:22 -07:00
// Create the orphans column family. An "orphan" is defined as
// the head of a detached chain of slots, i.e. a slot with no
// known parent
2019-04-26 08:52:10 -07:00
let orphans_cf = db . column ( ) ;
2019-07-10 11:08:17 -07:00
let index_cf = db . column ( ) ;
2019-04-02 14:58:07 -07:00
2020-04-24 15:04:23 -07:00
let data_shred_cf = db . column ( ) ;
let code_shred_cf = db . column ( ) ;
2019-11-17 08:26:01 -08:00
let transaction_status_cf = db . column ( ) ;
2020-04-08 12:50:39 -07:00
let address_signatures_cf = db . column ( ) ;
2021-09-02 23:28:52 -07:00
let transaction_memos_cf = db . column ( ) ;
2020-04-04 20:24:06 -07:00
let transaction_status_index_cf = db . column ( ) ;
2020-02-04 18:50:24 -08:00
let rewards_cf = db . column ( ) ;
2020-09-09 08:33:14 -07:00
let blocktime_cf = db . column ( ) ;
2020-09-22 12:26:32 -07:00
let perf_samples_cf = db . column ( ) ;
2021-05-26 21:16:16 -07:00
let block_height_cf = db . column ( ) ;
2021-07-01 09:32:41 -07:00
let program_costs_cf = db . column ( ) ;
2021-07-12 20:59:16 -07:00
let bank_hash_cf = db . column ( ) ;
2022-05-20 16:46:23 -07:00
let optimistic_slots_cf = db . column ( ) ;
2019-08-12 10:03:57 -07:00
2019-05-03 14:46:02 -07:00
let db = Arc ::new ( db ) ;
2019-04-26 08:52:10 -07:00
2019-08-27 15:09:41 -07:00
// Get max root or 0 if it doesn't exist
let max_root = db
. iter ::< cf ::Root > ( IteratorMode ::End ) ?
. next ( )
. map ( | ( slot , _ ) | slot )
. unwrap_or ( 0 ) ;
2022-03-17 12:43:57 -07:00
let last_root = RwLock ::new ( max_root ) ;
2019-08-27 15:09:41 -07:00
2020-04-04 20:24:06 -07:00
// Get active transaction-status index or 0
let active_transaction_status_index = db
. iter ::< cf ::TransactionStatusIndex > ( IteratorMode ::Start ) ?
2020-04-08 12:50:39 -07:00
. next ( ) ;
let initialize_transaction_status_index = active_transaction_status_index . is_none ( ) ;
let active_transaction_status_index = active_transaction_status_index
2020-04-04 20:24:06 -07:00
. and_then ( | ( _ , data ) | {
let index0 : TransactionStatusIndexMeta = deserialize ( & data ) . unwrap ( ) ;
if index0 . frozen {
Some ( 1 )
} else {
None
}
} )
. unwrap_or ( 0 ) ;
2019-11-25 21:20:30 -08:00
measure . stop ( ) ;
2020-01-13 13:13:52 -08:00
info! ( " {:?} {} " , blockstore_path , measure ) ;
2020-01-28 13:45:41 -08:00
let blockstore = Blockstore {
2021-02-26 13:44:38 -08:00
ledger_path : ledger_path . to_path_buf ( ) ,
2019-04-02 14:58:07 -07:00
db ,
meta_cf ,
2019-06-20 15:50:41 -07:00
dead_slots_cf ,
2020-01-13 17:21:39 -08:00
duplicate_slots_cf ,
2019-04-11 14:14:57 -07:00
erasure_meta_cf ,
2019-04-06 19:41:22 -07:00
orphans_cf ,
2019-07-10 11:08:17 -07:00
index_cf ,
2020-04-24 15:04:23 -07:00
data_shred_cf ,
code_shred_cf ,
2019-11-17 08:26:01 -08:00
transaction_status_cf ,
2020-04-08 12:50:39 -07:00
address_signatures_cf ,
2021-09-02 23:28:52 -07:00
transaction_memos_cf ,
2020-04-04 20:24:06 -07:00
transaction_status_index_cf ,
active_transaction_status_index : RwLock ::new ( active_transaction_status_index ) ,
2020-02-04 18:50:24 -08:00
rewards_cf ,
2020-09-09 08:33:14 -07:00
blocktime_cf ,
2020-09-22 12:26:32 -07:00
perf_samples_cf ,
2021-05-26 21:16:16 -07:00
block_height_cf ,
2021-07-01 09:32:41 -07:00
program_costs_cf ,
2021-07-12 20:59:16 -07:00
bank_hash_cf ,
2022-05-20 16:46:23 -07:00
optimistic_slots_cf ,
2022-04-04 09:38:05 -07:00
new_shreds_signals : Mutex ::default ( ) ,
completed_slots_senders : Mutex ::default ( ) ,
2022-03-30 07:04:49 -07:00
shred_timing_point_sender : None ,
2022-03-17 12:43:57 -07:00
insert_shreds_lock : Mutex ::< ( ) > ::default ( ) ,
2019-08-27 15:09:41 -07:00
last_root ,
2022-03-17 12:43:57 -07:00
lowest_cleanup_slot : RwLock ::< Slot > ::default ( ) ,
2020-03-23 08:42:32 -07:00
no_compaction : false ,
2022-04-04 14:44:21 -07:00
slots_stats : SlotsStats ::default ( ) ,
2020-01-28 13:45:41 -08:00
} ;
2020-04-08 12:50:39 -07:00
if initialize_transaction_status_index {
blockstore . initialize_transaction_status_index ( ) ? ;
}
2020-01-28 13:45:41 -08:00
Ok ( blockstore )
2019-04-02 14:58:07 -07:00
}
2019-05-09 14:10:04 -07:00
pub fn open_with_signal (
2019-07-30 15:53:41 -07:00
ledger_path : & Path ,
2022-01-03 20:30:45 -08:00
options : BlockstoreOptions ,
2020-09-01 22:06:06 -07:00
) -> Result < BlockstoreSignals > {
2022-04-04 09:38:05 -07:00
let blockstore = Self ::open_with_options ( ledger_path , options ) ? ;
2022-04-05 06:57:12 -07:00
let ( ledger_signal_sender , ledger_signal_receiver ) = bounded ( MAX_REPLAY_WAKE_UP_SIGNALS ) ;
2021-06-02 17:20:00 -07:00
let ( completed_slots_sender , completed_slots_receiver ) =
2022-01-11 02:44:46 -08:00
bounded ( MAX_COMPLETED_SLOTS_IN_CHANNEL ) ;
2021-03-12 05:44:06 -08:00
2022-04-04 09:38:05 -07:00
blockstore . add_new_shred_signal ( ledger_signal_sender ) ;
blockstore . add_completed_slots_signal ( completed_slots_sender ) ;
2019-02-04 15:33:43 -08:00
2020-09-01 22:06:06 -07:00
Ok ( BlockstoreSignals {
blockstore ,
ledger_signal_receiver ,
2021-06-02 17:20:00 -07:00
completed_slots_receiver ,
2020-09-01 22:06:06 -07:00
} )
2019-02-04 15:33:43 -08:00
}
2020-08-19 22:04:38 -07:00
pub fn add_tree (
& self ,
forks : Tree < Slot > ,
is_orphan : bool ,
is_slot_complete : bool ,
num_ticks : u64 ,
starting_hash : Hash ,
) {
2020-06-23 12:05:00 -07:00
let mut walk = TreeWalk ::from ( forks ) ;
2020-08-19 22:04:38 -07:00
let mut blockhashes = HashMap ::new ( ) ;
2020-06-23 12:05:00 -07:00
while let Some ( visit ) = walk . get ( ) {
2021-06-17 15:45:09 -07:00
let slot = * visit . node ( ) . data ( ) ;
2020-07-06 22:49:40 -07:00
if self . meta ( slot ) . unwrap ( ) . is_some ( ) & & self . orphan ( slot ) . unwrap ( ) . is_none ( ) {
2020-08-19 22:04:38 -07:00
// If slot exists in blockstore and is not an orphan, then skip it
2020-06-23 12:05:00 -07:00
walk . forward ( ) ;
continue ;
}
2021-06-17 15:45:09 -07:00
let parent = walk . get_parent ( ) . map ( | n | * n . data ( ) ) ;
2020-06-23 12:05:00 -07:00
if parent . is_some ( ) | | ! is_orphan {
2020-08-19 22:04:38 -07:00
let parent_hash = parent
// parent won't exist for first node in a tree where
// `is_orphan == true`
. and_then ( | parent | blockhashes . get ( & parent ) )
. unwrap_or ( & starting_hash ) ;
let mut entries = create_ticks (
num_ticks * ( std ::cmp ::max ( 1 , slot - parent . unwrap_or ( slot ) ) ) ,
0 ,
* parent_hash ,
) ;
blockhashes . insert ( slot , entries . last ( ) . unwrap ( ) . hash ) ;
if ! is_slot_complete {
entries . pop ( ) . unwrap ( ) ;
}
2020-07-02 14:33:04 -07:00
let shreds = entries_to_test_shreds (
2021-12-24 12:32:43 -08:00
& entries ,
2020-07-02 14:33:04 -07:00
slot ,
parent . unwrap_or ( slot ) ,
is_slot_complete ,
0 ,
) ;
2020-06-23 12:05:00 -07:00
self . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
}
walk . forward ( ) ;
}
}
2022-01-28 22:25:07 -08:00
/// Whether to disable compaction in [`compact_storage`], which is used
/// by the ledger cleanup service and [`backup_and_clear_blockstore`].
///
/// Note that this setting is not related to the RocksDB's background
/// compaction.
///
/// To disable RocksDB's background compaction, open the Blockstore
2022-04-29 18:05:39 -07:00
/// with AccessType::PrimaryForMaintenance.
2020-03-23 08:42:32 -07:00
pub fn set_no_compaction ( & mut self , no_compaction : bool ) {
self . no_compaction = no_compaction ;
}
2022-01-28 22:25:07 -08:00
/// Deletes the blockstore at the specified path.
2022-03-02 18:30:22 -08:00
///
/// Note that if the `ledger_path` has multiple rocksdb instances, this
/// function will destroy all.
2019-07-30 15:53:41 -07:00
pub fn destroy ( ledger_path : & Path ) -> Result < ( ) > {
2022-03-02 18:30:22 -08:00
// Database::destroy() fails if the root directory doesn't exist
2019-04-02 14:58:07 -07:00
fs ::create_dir_all ( ledger_path ) ? ;
2022-03-02 18:30:22 -08:00
Database ::destroy ( & Path ::new ( ledger_path ) . join ( BLOCKSTORE_DIRECTORY_ROCKS_LEVEL ) ) . and (
Database ::destroy ( & Path ::new ( ledger_path ) . join ( BLOCKSTORE_DIRECTORY_ROCKS_FIFO ) ) ,
)
2019-04-02 14:58:07 -07:00
}
2022-01-28 22:25:07 -08:00
/// Returns the SlotMeta of the specified slot.
2019-11-02 00:38:30 -07:00
pub fn meta ( & self , slot : Slot ) -> Result < Option < SlotMeta > > {
2019-05-03 14:46:02 -07:00
self . meta_cf . get ( slot )
2019-04-26 08:52:10 -07:00
}
2022-01-28 22:25:07 -08:00
/// Returns true if the specified slot is full.
2019-11-02 00:38:30 -07:00
pub fn is_full ( & self , slot : Slot ) -> bool {
2020-12-13 17:26:34 -08:00
if let Ok ( Some ( meta ) ) = self . meta_cf . get ( slot ) {
return meta . is_full ( ) ;
2019-05-29 15:01:20 -07:00
}
false
}
2021-12-16 06:18:55 -08:00
fn erasure_meta ( & self , erasure_set : ErasureSetId ) -> Result < Option < ErasureMeta > > {
self . erasure_meta_cf . get ( erasure_set . store_key ( ) )
2019-01-08 11:41:55 -08:00
}
2022-01-28 22:25:07 -08:00
/// Check whether the specified slot is an orphan slot which does not
/// have a parent slot.
///
/// Returns true if the specified slot does not have a parent slot.
/// For other return values, it means either the slot is not in the
/// blockstore or the slot isn't an orphan slot.
2019-11-02 00:38:30 -07:00
pub fn orphan ( & self , slot : Slot ) -> Result < Option < bool > > {
2019-05-03 14:46:02 -07:00
self . orphans_cf . get ( slot )
2019-03-29 16:07:24 -07:00
}
2022-01-28 22:25:07 -08:00
/// Returns the max root or 0 if it does not exist.
2020-08-19 22:04:38 -07:00
pub fn max_root ( & self ) -> Slot {
self . db
. iter ::< cf ::Root > ( IteratorMode ::End )
. expect ( " Couldn't get rooted iterator for max_root() " )
. next ( )
. map ( | ( slot , _ ) | slot )
. unwrap_or ( 0 )
}
2020-12-13 17:26:34 -08:00
pub fn slot_meta_iterator (
& self ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
2020-12-13 17:26:34 -08:00
) -> Result < impl Iterator < Item = ( Slot , SlotMeta ) > + '_ > {
2019-08-27 15:09:41 -07:00
let meta_iter = self
. db
. iter ::< cf ::SlotMeta > ( IteratorMode ::From ( slot , IteratorDirection ::Forward ) ) ? ;
2019-05-15 18:28:23 -07:00
Ok ( meta_iter . map ( | ( slot , slot_meta_bytes ) | {
(
slot ,
2021-02-03 06:42:34 -08:00
deserialize ( & slot_meta_bytes ) . unwrap_or_else ( | e | {
panic! ( " Could not deserialize SlotMeta for slot {} : {:?} " , slot , e )
} ) ,
2019-05-15 18:28:23 -07:00
)
} ) )
2019-02-07 15:10:54 -08:00
}
2020-03-05 10:58:00 -08:00
#[ allow(dead_code) ]
2020-12-13 17:26:34 -08:00
pub fn live_slots_iterator ( & self , root : Slot ) -> impl Iterator < Item = ( Slot , SlotMeta ) > + '_ {
2020-03-05 10:58:00 -08:00
let root_forks = NextSlotsIterator ::new ( root , self ) ;
let orphans_iter = self . orphans_iterator ( root + 1 ) . unwrap ( ) ;
root_forks . chain ( orphans_iter . flat_map ( move | orphan | NextSlotsIterator ::new ( orphan , self ) ) )
}
2020-12-13 17:26:34 -08:00
pub fn slot_data_iterator (
& self ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
2020-03-19 23:35:01 -07:00
index : u64 ,
2020-12-13 17:26:34 -08:00
) -> Result < impl Iterator < Item = ( ( u64 , u64 ) , Box < [ u8 ] > ) > + '_ > {
2020-04-24 15:04:23 -07:00
let slot_iterator = self . db . iter ::< cf ::ShredData > ( IteratorMode ::From (
( slot , index ) ,
IteratorDirection ::Forward ,
) ) ? ;
Ok ( slot_iterator . take_while ( move | ( ( shred_slot , _ ) , _ ) | * shred_slot = = slot ) )
2019-05-13 22:04:54 -07:00
}
2020-12-13 17:26:34 -08:00
pub fn slot_coding_iterator (
& self ,
2019-12-09 00:13:36 -08:00
slot : Slot ,
2020-03-19 23:35:01 -07:00
index : u64 ,
2020-12-13 17:26:34 -08:00
) -> Result < impl Iterator < Item = ( ( u64 , u64 ) , Box < [ u8 ] > ) > + '_ > {
2020-04-24 15:04:23 -07:00
let slot_iterator = self . db . iter ::< cf ::ShredCode > ( IteratorMode ::From (
( slot , index ) ,
IteratorDirection ::Forward ,
) ) ? ;
Ok ( slot_iterator . take_while ( move | ( ( shred_slot , _ ) , _ ) | * shred_slot = = slot ) )
2019-12-09 00:13:36 -08:00
}
2020-12-13 17:26:34 -08:00
pub fn rooted_slot_iterator ( & self , slot : Slot ) -> Result < impl Iterator < Item = u64 > + '_ > {
2020-01-07 19:51:28 -08:00
let slot_iterator = self
. db
. iter ::< cf ::Root > ( IteratorMode ::From ( slot , IteratorDirection ::Forward ) ) ? ;
Ok ( slot_iterator . map ( move | ( rooted_slot , _ ) | rooted_slot ) )
}
2021-11-12 11:16:48 -08:00
fn get_recovery_data_shreds < ' a > (
index : & ' a Index ,
2020-05-13 10:09:38 -07:00
slot : Slot ,
2021-11-12 11:16:48 -08:00
erasure_meta : & ' a ErasureMeta ,
2021-12-14 09:34:02 -08:00
prev_inserted_shreds : & ' a HashMap < ShredId , Shred > ,
2021-11-12 11:16:48 -08:00
data_cf : & ' a LedgerColumn < cf ::ShredData > ,
) -> impl Iterator < Item = Shred > + ' a {
erasure_meta . data_shreds_indices ( ) . filter_map ( move | i | {
2021-12-14 09:34:02 -08:00
let key = ShredId ::new ( slot , u32 ::try_from ( i ) . unwrap ( ) , ShredType ::Data ) ;
if let Some ( shred ) = prev_inserted_shreds . get ( & key ) {
return Some ( shred . clone ( ) ) ;
2021-11-12 11:16:48 -08:00
}
2021-12-16 11:17:32 -08:00
if ! index . data ( ) . contains ( i ) {
2021-11-12 11:16:48 -08:00
return None ;
}
match data_cf . get_bytes ( ( slot , i ) ) . unwrap ( ) {
None = > {
warn! ( " Data shred deleted while reading for recovery " ) ;
None
2020-05-13 10:09:38 -07:00
}
2021-11-12 11:16:48 -08:00
Some ( data ) = > Shred ::new_from_serialized_shred ( data ) . ok ( ) ,
2020-05-13 10:09:38 -07:00
}
2021-11-12 11:16:48 -08:00
} )
2020-05-13 10:09:38 -07:00
}
2021-11-12 11:16:48 -08:00
fn get_recovery_coding_shreds < ' a > (
2021-12-14 09:34:02 -08:00
index : & ' a Index ,
2020-05-13 10:09:38 -07:00
slot : Slot ,
2021-11-12 11:16:48 -08:00
erasure_meta : & ' a ErasureMeta ,
2021-12-14 09:34:02 -08:00
prev_inserted_shreds : & ' a HashMap < ShredId , Shred > ,
2021-11-12 11:16:48 -08:00
code_cf : & ' a LedgerColumn < cf ::ShredCode > ,
) -> impl Iterator < Item = Shred > + ' a {
erasure_meta . coding_shreds_indices ( ) . filter_map ( move | i | {
2021-12-14 09:34:02 -08:00
let key = ShredId ::new ( slot , u32 ::try_from ( i ) . unwrap ( ) , ShredType ::Code ) ;
if let Some ( shred ) = prev_inserted_shreds . get ( & key ) {
2021-11-16 11:26:34 -08:00
return Some ( shred . clone ( ) ) ;
2021-11-12 11:16:48 -08:00
}
2021-12-16 11:17:32 -08:00
if ! index . coding ( ) . contains ( i ) {
2021-11-12 11:16:48 -08:00
return None ;
}
match code_cf . get_bytes ( ( slot , i ) ) . unwrap ( ) {
None = > {
warn! ( " Code shred deleted while reading for recovery " ) ;
None
2020-05-13 10:09:38 -07:00
}
2021-11-12 11:16:48 -08:00
Some ( code ) = > Shred ::new_from_serialized_shred ( code ) . ok ( ) ,
}
} )
2020-05-13 10:09:38 -07:00
}
fn recover_shreds (
index : & mut Index ,
erasure_meta : & ErasureMeta ,
2021-12-14 09:34:02 -08:00
prev_inserted_shreds : & HashMap < ShredId , Shred > ,
2020-05-13 10:09:38 -07:00
recovered_data_shreds : & mut Vec < Shred > ,
data_cf : & LedgerColumn < cf ::ShredData > ,
code_cf : & LedgerColumn < cf ::ShredCode > ,
) {
// Find shreds for this erasure set and try recovery
let slot = index . slot ;
2021-12-14 09:34:02 -08:00
let available_shreds : Vec < _ > = Self ::get_recovery_data_shreds (
index ,
slot ,
erasure_meta ,
prev_inserted_shreds ,
data_cf ,
)
. chain ( Self ::get_recovery_coding_shreds (
2020-05-13 10:09:38 -07:00
index ,
slot ,
erasure_meta ,
2021-12-14 09:34:02 -08:00
prev_inserted_shreds ,
2020-05-13 10:09:38 -07:00
code_cf ,
2021-12-14 09:34:02 -08:00
) )
. collect ( ) ;
2021-11-10 13:19:03 -08:00
if let Ok ( mut result ) = Shredder ::try_recovery ( available_shreds ) {
2021-11-12 11:16:48 -08:00
Self ::submit_metrics ( slot , erasure_meta , true , " complete " . into ( ) , result . len ( ) ) ;
2020-05-13 10:09:38 -07:00
recovered_data_shreds . append ( & mut result ) ;
} else {
2021-11-12 11:16:48 -08:00
Self ::submit_metrics ( slot , erasure_meta , true , " incomplete " . into ( ) , 0 ) ;
2020-05-13 10:09:38 -07:00
}
}
fn submit_metrics (
slot : Slot ,
erasure_meta : & ErasureMeta ,
attempted : bool ,
status : String ,
recovered : usize ,
) {
2021-11-12 11:16:48 -08:00
let mut data_shreds_indices = erasure_meta . data_shreds_indices ( ) ;
let start_index = data_shreds_indices . next ( ) . unwrap_or_default ( ) ;
let end_index = data_shreds_indices . last ( ) . unwrap_or ( start_index ) ;
2020-05-13 10:09:38 -07:00
datapoint_debug! (
" blockstore-erasure " ,
( " slot " , slot as i64 , i64 ) ,
2021-11-12 11:16:48 -08:00
( " start_index " , start_index , i64 ) ,
( " end_index " , end_index + 1 , i64 ) ,
2020-05-13 10:09:38 -07:00
( " recovery_attempted " , attempted , bool ) ,
( " recovery_status " , status , String ) ,
( " recovered " , recovered as i64 , i64 ) ,
) ;
}
2022-03-05 16:13:03 -08:00
/// Collects and reports [`BlockstoreRocksDbColumnFamilyMetrics`] for the
/// all the column families.
pub fn submit_rocksdb_cf_metrics_for_all_cfs ( & self ) {
2022-03-23 20:51:49 -07:00
self . meta_cf . submit_rocksdb_cf_metrics ( ) ;
self . dead_slots_cf . submit_rocksdb_cf_metrics ( ) ;
self . duplicate_slots_cf . submit_rocksdb_cf_metrics ( ) ;
self . erasure_meta_cf . submit_rocksdb_cf_metrics ( ) ;
self . orphans_cf . submit_rocksdb_cf_metrics ( ) ;
self . index_cf . submit_rocksdb_cf_metrics ( ) ;
self . data_shred_cf . submit_rocksdb_cf_metrics ( ) ;
self . code_shred_cf . submit_rocksdb_cf_metrics ( ) ;
self . transaction_status_cf . submit_rocksdb_cf_metrics ( ) ;
self . address_signatures_cf . submit_rocksdb_cf_metrics ( ) ;
self . transaction_memos_cf . submit_rocksdb_cf_metrics ( ) ;
self . transaction_status_index_cf . submit_rocksdb_cf_metrics ( ) ;
self . rewards_cf . submit_rocksdb_cf_metrics ( ) ;
self . blocktime_cf . submit_rocksdb_cf_metrics ( ) ;
self . perf_samples_cf . submit_rocksdb_cf_metrics ( ) ;
self . block_height_cf . submit_rocksdb_cf_metrics ( ) ;
self . program_costs_cf . submit_rocksdb_cf_metrics ( ) ;
self . bank_hash_cf . submit_rocksdb_cf_metrics ( ) ;
2022-05-20 16:46:23 -07:00
self . optimistic_slots_cf . submit_rocksdb_cf_metrics ( ) ;
2022-03-05 16:13:03 -08:00
}
2019-08-26 18:27:45 -07:00
fn try_shred_recovery (
2020-04-24 15:04:23 -07:00
db : & Database ,
2021-12-16 06:18:55 -08:00
erasure_metas : & HashMap < ErasureSetId , ErasureMeta > ,
2019-12-09 00:13:36 -08:00
index_working_set : & mut HashMap < u64 , IndexMetaWorkingSetEntry > ,
2021-12-14 09:34:02 -08:00
prev_inserted_shreds : & HashMap < ShredId , Shred > ,
2019-09-18 16:24:30 -07:00
) -> Vec < Shred > {
2020-04-24 15:04:23 -07:00
let data_cf = db . column ::< cf ::ShredData > ( ) ;
let code_cf = db . column ::< cf ::ShredCode > ( ) ;
2019-08-26 18:27:45 -07:00
let mut recovered_data_shreds = vec! [ ] ;
// Recovery rules:
// 1. Only try recovery around indexes for which new data or coding shreds are received
// 2. For new data shreds, check if an erasure set exists. If not, don't try recovery
// 3. Before trying recovery, check if enough number of shreds have been received
// 3a. Enough number of shreds = (#data + #coding shreds) > erasure.num_data
2021-12-16 06:18:55 -08:00
for ( erasure_set , erasure_meta ) in erasure_metas . iter ( ) {
let slot = erasure_set . slot ( ) ;
2019-12-09 00:13:36 -08:00
let index_meta_entry = index_working_set . get_mut ( & slot ) . expect ( " Index " ) ;
let index = & mut index_meta_entry . index ;
2021-06-18 06:34:46 -07:00
match erasure_meta . status ( index ) {
2019-08-27 11:22:06 -07:00
ErasureMetaStatus ::CanRecover = > {
2020-05-13 10:09:38 -07:00
Self ::recover_shreds (
index ,
erasure_meta ,
2021-12-14 09:34:02 -08:00
prev_inserted_shreds ,
2020-05-13 10:09:38 -07:00
& mut recovered_data_shreds ,
& data_cf ,
& code_cf ,
) ;
2019-08-26 18:27:45 -07:00
}
2019-08-27 11:22:06 -07:00
ErasureMetaStatus ::DataFull = > {
2021-11-12 11:16:48 -08:00
Self ::submit_metrics ( slot , erasure_meta , false , " complete " . into ( ) , 0 ) ;
2019-08-27 11:22:06 -07:00
}
ErasureMetaStatus ::StillNeed ( needed ) = > {
2020-05-13 10:09:38 -07:00
Self ::submit_metrics (
slot ,
erasure_meta ,
false ,
format! ( " still need: {} " , needed ) ,
0 ,
) ;
2019-08-27 11:22:06 -07:00
}
} ;
2019-08-26 18:27:45 -07:00
}
recovered_data_shreds
}
2021-12-21 22:36:13 -08:00
/// The main helper function that performs the shred insertion logic
/// and updates corresponding meta-data.
///
/// This function updates the following column families:
/// - [`cf::DeadSlots`]: mark a shred as "dead" if its meta-data indicates
/// there is no need to replay this shred. Specifically when both the
/// following conditions satisfy,
/// - We get a new shred N marked as the last shred in the slot S,
/// but N.index() is less than the current slot_meta.received
/// for slot S.
/// - The slot is not currently full
/// It means there's an alternate version of this slot. See
/// `check_insert_data_shred` for more details.
/// - [`cf::ShredData`]: stores data shreds (in check_insert_data_shreds).
/// - [`cf::ShredCode`]: stores coding shreds (in check_insert_coding_shreds).
/// - [`cf::SlotMeta`]: the SlotMeta of the input `shreds` and their related
/// shreds are updated. Specifically:
/// - `handle_chaining()` updates `cf::SlotMeta` in two ways. First, it
/// updates the in-memory slot_meta_working_set, which will later be
/// persisted in commit_slot_meta_working_set(). Second, for the newly
/// chained slots (updated inside handle_chaining_for_slot()), it will
/// directly persist their slot-meta into `cf::SlotMeta`.
/// - In `commit_slot_meta_working_set()`, persists everything stored
/// in the in-memory structure slot_meta_working_set, which is updated
/// by both `check_insert_data_shred()` and `handle_chaining()`.
/// - [`cf::Orphans`]: add or remove the ID of a slot to `cf::Orphans`
/// if it becomes / is no longer an orphan slot in `handle_chaining()`.
/// - [`cf::ErasureMeta`]: the associated ErasureMeta of the coding and data
/// shreds inside `shreds` will be updated and committed to
/// `cf::ErasureMeta`.
/// - [`cf::Index`]: stores (slot id, index to the index_working_set_entry)
/// pair to the `cf::Index` column family for each index_working_set_entry
/// which insert did occur in this function call.
///
/// Arguments:
/// - `shreds`: the shreds to be inserted.
/// - `is_repaired`: a boolean vector aligned with `shreds` where each
/// boolean indicates whether the corresponding shred is repaired or not.
/// - `leader_schedule`: the leader schedule
/// - `is_trusted`: whether the shreds come from a trusted source. If this
/// is set to true, then the function will skip the shred duplication and
/// integrity checks.
/// - `retransmit_sender`: the sender for transmitting any recovered
/// data shreds.
/// - `handle_duplicate`: a function for handling shreds that have the same slot
/// and index.
/// - `metrics`: the metric for reporting detailed stats
///
/// On success, the function returns an Ok result with a vector of
/// `CompletedDataSetInfo` and a vector of its corresponding index in the
/// input `shreds` vector.
2020-01-14 15:37:53 -08:00
pub fn insert_shreds_handle_duplicate < F > (
2019-09-05 18:20:30 -07:00
& self ,
2019-09-18 16:24:30 -07:00
shreds : Vec < Shred > ,
2021-06-30 09:20:07 -07:00
is_repaired : Vec < bool > ,
2021-08-13 07:47:02 -07:00
leader_schedule : Option < & LeaderScheduleCache > ,
2019-11-14 00:32:07 -08:00
is_trusted : bool ,
2021-08-13 11:11:37 -07:00
retransmit_sender : Option < & Sender < Vec < Shred > > > ,
2020-01-14 15:37:53 -08:00
handle_duplicate : & F ,
2020-03-26 12:51:41 -07:00
metrics : & mut BlockstoreInsertionMetrics ,
2020-09-29 14:13:21 -07:00
) -> Result < ( Vec < CompletedDataSetInfo > , Vec < usize > ) >
2020-01-14 15:37:53 -08:00
where
2020-06-08 17:38:14 -07:00
F : Fn ( Shred ) ,
2020-01-14 15:37:53 -08:00
{
2021-06-30 09:20:07 -07:00
assert_eq! ( shreds . len ( ) , is_repaired . len ( ) ) ;
2019-10-26 16:15:59 -07:00
let mut total_start = Measure ::start ( " Total elapsed " ) ;
2020-01-13 13:13:52 -08:00
let mut start = Measure ::start ( " Blockstore lock " ) ;
2019-10-26 04:09:58 -07:00
let _lock = self . insert_shreds_lock . lock ( ) . unwrap ( ) ;
2019-10-26 16:15:59 -07:00
start . stop ( ) ;
2021-08-13 11:11:37 -07:00
metrics . insert_lock_elapsed + = start . as_us ( ) ;
2019-10-26 16:15:59 -07:00
2019-08-20 17:16:06 -07:00
let db = & * self . db ;
2019-10-24 10:30:53 -07:00
let mut write_batch = db . batch ( ) ? ;
2019-08-20 17:16:06 -07:00
2021-12-14 09:34:02 -08:00
let mut just_inserted_shreds = HashMap ::with_capacity ( shreds . len ( ) ) ;
2019-08-26 18:27:45 -07:00
let mut erasure_metas = HashMap ::new ( ) ;
2019-08-20 17:16:06 -07:00
let mut slot_meta_working_set = HashMap ::new ( ) ;
let mut index_working_set = HashMap ::new ( ) ;
2021-08-13 11:11:37 -07:00
metrics . num_shreds + = shreds . len ( ) ;
2019-10-26 16:15:59 -07:00
let mut start = Measure ::start ( " Shred insertion " ) ;
2019-10-30 16:48:59 -07:00
let mut index_meta_time = 0 ;
2020-09-01 22:06:06 -07:00
let mut newly_completed_data_sets : Vec < CompletedDataSetInfo > = vec! [ ] ;
2020-09-29 14:13:21 -07:00
let mut inserted_indices = Vec ::new ( ) ;
2021-08-13 11:11:37 -07:00
for ( i , ( shred , is_repaired ) ) in shreds . into_iter ( ) . zip ( is_repaired ) . enumerate ( ) {
2022-03-25 12:32:22 -07:00
let shred_source = if is_repaired {
ShredSource ::Repaired
} else {
ShredSource ::Turbine
} ;
2021-11-16 09:50:56 -08:00
match shred . shred_type ( ) {
ShredType ::Data = > {
match self . check_insert_data_shred (
shred ,
& mut erasure_metas ,
& mut index_working_set ,
& mut slot_meta_working_set ,
& mut write_batch ,
2021-12-14 09:34:02 -08:00
& mut just_inserted_shreds ,
2021-11-16 09:50:56 -08:00
& mut index_meta_time ,
is_trusted ,
handle_duplicate ,
leader_schedule ,
shred_source ,
) {
Err ( InsertDataShredError ::Exists ) = > metrics . num_data_shreds_exists + = 1 ,
Err ( InsertDataShredError ::InvalidShred ) = > {
metrics . num_data_shreds_invalid + = 1
}
Err ( InsertDataShredError ::BlockstoreError ( err ) ) = > {
metrics . num_data_shreds_blockstore_error + = 1 ;
error! ( " blockstore error: {} " , err ) ;
}
Ok ( completed_data_sets ) = > {
newly_completed_data_sets . extend ( completed_data_sets ) ;
inserted_indices . push ( i ) ;
metrics . num_inserted + = 1 ;
}
} ;
}
ShredType ::Code = > {
2021-11-30 09:18:36 -08:00
self . check_insert_coding_shred (
2021-11-16 09:50:56 -08:00
shred ,
& mut erasure_metas ,
& mut index_working_set ,
2021-11-30 09:18:36 -08:00
& mut write_batch ,
2021-12-14 09:34:02 -08:00
& mut just_inserted_shreds ,
2021-11-16 09:50:56 -08:00
& mut index_meta_time ,
handle_duplicate ,
is_trusted ,
2022-03-25 12:32:22 -07:00
shred_source ,
2021-11-16 09:50:56 -08:00
metrics ,
) ;
}
} ;
2021-08-13 11:11:37 -07:00
}
2019-10-26 16:15:59 -07:00
start . stop ( ) ;
2019-08-20 17:16:06 -07:00
2021-08-13 11:11:37 -07:00
metrics . insert_shreds_elapsed + = start . as_us ( ) ;
2019-10-26 16:15:59 -07:00
let mut start = Measure ::start ( " Shred recovery " ) ;
2019-09-05 18:20:30 -07:00
if let Some ( leader_schedule_cache ) = leader_schedule {
2021-08-13 11:11:37 -07:00
let recovered_data_shreds = Self ::try_shred_recovery (
2021-06-18 06:34:46 -07:00
db ,
2019-09-05 18:20:30 -07:00
& erasure_metas ,
2019-12-09 00:13:36 -08:00
& mut index_working_set ,
2021-12-14 09:34:02 -08:00
& just_inserted_shreds ,
2019-08-26 18:27:45 -07:00
) ;
2019-09-05 18:20:30 -07:00
2021-08-13 11:11:37 -07:00
metrics . num_recovered + = recovered_data_shreds . len ( ) ;
let recovered_data_shreds : Vec < _ > = recovered_data_shreds
. into_iter ( )
. filter_map ( | shred | {
let leader =
leader_schedule_cache . slot_leader_at ( shred . slot ( ) , /* bank= */ None ) ? ;
if ! shred . verify ( & leader ) {
metrics . num_recovered_failed_sig + = 1 ;
return None ;
}
match self . check_insert_data_shred (
shred . clone ( ) ,
& mut erasure_metas ,
& mut index_working_set ,
& mut slot_meta_working_set ,
& mut write_batch ,
2021-12-14 09:34:02 -08:00
& mut just_inserted_shreds ,
2021-08-13 11:11:37 -07:00
& mut index_meta_time ,
is_trusted ,
& handle_duplicate ,
leader_schedule ,
ShredSource ::Recovered ,
) {
Err ( InsertDataShredError ::Exists ) = > {
metrics . num_recovered_exists + = 1 ;
None
}
Err ( InsertDataShredError ::InvalidShred ) = > {
metrics . num_recovered_failed_invalid + = 1 ;
None
}
2021-10-17 08:02:34 -07:00
Err ( InsertDataShredError ::BlockstoreError ( err ) ) = > {
metrics . num_recovered_blockstore_error + = 1 ;
error! ( " blockstore error: {} " , err ) ;
None
}
2021-08-13 11:11:37 -07:00
Ok ( completed_data_sets ) = > {
newly_completed_data_sets . extend ( completed_data_sets ) ;
metrics . num_recovered_inserted + = 1 ;
Some ( shred )
2020-07-03 17:44:32 -07:00
}
2019-09-05 18:20:30 -07:00
}
2021-08-13 11:11:37 -07:00
} )
// Always collect recovered-shreds so that above insert code is
// executed even if retransmit-sender is None.
. collect ( ) ;
if ! recovered_data_shreds . is_empty ( ) {
if let Some ( retransmit_sender ) = retransmit_sender {
let _ = retransmit_sender . send ( recovered_data_shreds ) ;
2019-09-05 18:20:30 -07:00
}
2021-08-13 11:11:37 -07:00
}
2019-09-05 18:20:30 -07:00
}
2019-10-26 16:15:59 -07:00
start . stop ( ) ;
2021-08-13 11:11:37 -07:00
metrics . shred_recovery_elapsed + = start . as_us ( ) ;
2019-08-20 17:16:06 -07:00
2019-10-26 16:15:59 -07:00
let mut start = Measure ::start ( " Shred recovery " ) ;
2019-10-31 14:03:41 -07:00
// Handle chaining for the members of the slot_meta_working_set that were inserted into,
// drop the others
handle_chaining ( & self . db , & mut write_batch , & mut slot_meta_working_set ) ? ;
2019-10-26 16:15:59 -07:00
start . stop ( ) ;
2021-08-13 11:11:37 -07:00
metrics . chaining_elapsed + = start . as_us ( ) ;
2019-08-20 17:16:06 -07:00
2019-10-31 14:03:41 -07:00
let mut start = Measure ::start ( " Commit Working Sets " ) ;
2019-09-04 17:14:42 -07:00
let ( should_signal , newly_completed_slots ) = commit_slot_meta_working_set (
2019-08-20 17:16:06 -07:00
& slot_meta_working_set ,
2022-04-04 09:38:05 -07:00
& self . completed_slots_senders . lock ( ) . unwrap ( ) ,
2019-08-20 17:16:06 -07:00
& mut write_batch ,
) ? ;
2021-12-16 06:18:55 -08:00
for ( erasure_set , erasure_meta ) in erasure_metas {
write_batch . put ::< cf ::ErasureMeta > ( erasure_set . store_key ( ) , & erasure_meta ) ? ;
2019-08-27 11:22:06 -07:00
}
2019-10-30 16:48:59 -07:00
for ( & slot , index_working_set_entry ) in index_working_set . iter ( ) {
if index_working_set_entry . did_insert_occur {
write_batch . put ::< cf ::Index > ( slot , & index_working_set_entry . index ) ? ;
}
2019-08-20 17:16:06 -07:00
}
2019-10-26 16:15:59 -07:00
start . stop ( ) ;
2021-08-13 11:11:37 -07:00
metrics . commit_working_sets_elapsed + = start . as_us ( ) ;
2019-08-20 17:16:06 -07:00
2019-10-26 16:15:59 -07:00
let mut start = Measure ::start ( " Write Batch " ) ;
2019-10-24 10:30:53 -07:00
self . db . write ( write_batch ) ? ;
2019-10-26 16:15:59 -07:00
start . stop ( ) ;
2021-08-13 11:11:37 -07:00
metrics . write_batch_elapsed + = start . as_us ( ) ;
2019-08-20 17:16:06 -07:00
send_signals (
2022-04-04 09:38:05 -07:00
& self . new_shreds_signals . lock ( ) . unwrap ( ) ,
& self . completed_slots_senders . lock ( ) . unwrap ( ) ,
2019-08-20 17:16:06 -07:00
should_signal ,
newly_completed_slots ,
2020-12-13 17:26:34 -08:00
) ;
2019-08-20 17:16:06 -07:00
2019-10-26 16:15:59 -07:00
total_start . stop ( ) ;
2020-03-26 12:51:41 -07:00
metrics . total_elapsed + = total_start . as_us ( ) ;
metrics . index_meta_time + = index_meta_time ;
2020-09-29 14:13:21 -07:00
Ok ( ( newly_completed_data_sets , inserted_indices ) )
2019-08-20 17:16:06 -07:00
}
2022-04-04 09:38:05 -07:00
pub fn add_new_shred_signal ( & self , s : Sender < bool > ) {
self . new_shreds_signals . lock ( ) . unwrap ( ) . push ( s ) ;
}
pub fn add_completed_slots_signal ( & self , s : CompletedSlotsSender ) {
self . completed_slots_senders . lock ( ) . unwrap ( ) . push ( s ) ;
}
pub fn get_new_shred_signals_len ( & self ) -> usize {
self . new_shreds_signals . lock ( ) . unwrap ( ) . len ( )
}
pub fn get_new_shred_signal ( & self , index : usize ) -> Option < Sender < bool > > {
self . new_shreds_signals . lock ( ) . unwrap ( ) . get ( index ) . cloned ( )
}
pub fn drop_signal ( & self ) {
self . new_shreds_signals . lock ( ) . unwrap ( ) . clear ( ) ;
self . completed_slots_senders . lock ( ) . unwrap ( ) . clear ( ) ;
}
2021-12-15 00:37:09 -08:00
/// Range-delete all entries which prefix matches the specified `slot` and
/// clear all the related `SlotMeta` except its next_slots.
///
/// This function currently requires `insert_shreds_lock`, as both
/// `clear_unconfirmed_slot()` and `insert_shreds_handle_duplicate()`
/// try to perform read-modify-write operation on [`cf::SlotMeta`] column
/// family.
2020-05-05 14:07:21 -07:00
pub fn clear_unconfirmed_slot ( & self , slot : Slot ) {
let _lock = self . insert_shreds_lock . lock ( ) . unwrap ( ) ;
if let Some ( mut slot_meta ) = self
. meta ( slot )
. expect ( " Couldn't fetch from SlotMeta column family " )
{
// Clear all slot related information
2020-06-02 18:49:31 -07:00
self . run_purge ( slot , slot , PurgeType ::PrimaryIndex )
2020-05-05 14:07:21 -07:00
. expect ( " Purge database operations failed " ) ;
// Reinsert parts of `slot_meta` that are important to retain, like the `next_slots`
// field.
slot_meta . clear_unconfirmed_slot ( ) ;
self . meta_cf
. put ( slot , & slot_meta )
. expect ( " Couldn't insert into SlotMeta column family " ) ;
} else {
error! (
" clear_unconfirmed_slot() called on slot {} with no SlotMeta " ,
slot
) ;
}
}
2020-01-14 15:37:53 -08:00
pub fn insert_shreds (
& self ,
shreds : Vec < Shred > ,
2021-08-13 07:47:02 -07:00
leader_schedule : Option < & LeaderScheduleCache > ,
2020-01-14 15:37:53 -08:00
is_trusted : bool ,
2020-09-29 14:13:21 -07:00
) -> Result < ( Vec < CompletedDataSetInfo > , Vec < usize > ) > {
2021-06-30 09:20:07 -07:00
let shreds_len = shreds . len ( ) ;
2020-03-26 12:51:41 -07:00
self . insert_shreds_handle_duplicate (
shreds ,
2021-06-30 09:20:07 -07:00
vec! [ false ; shreds_len ] ,
2020-03-26 12:51:41 -07:00
leader_schedule ,
is_trusted ,
2021-08-13 11:11:37 -07:00
None , // retransmit-sender
& | _ | { } , // handle-duplicates
2020-03-26 12:51:41 -07:00
& mut BlockstoreInsertionMetrics ::default ( ) ,
)
2020-01-14 15:37:53 -08:00
}
2021-10-17 08:02:34 -07:00
#[ allow(clippy::too_many_arguments) ]
2021-11-30 09:18:36 -08:00
fn check_insert_coding_shred < F > (
2019-11-11 13:12:55 -08:00
& self ,
shred : Shred ,
2021-12-16 06:18:55 -08:00
erasure_metas : & mut HashMap < ErasureSetId , ErasureMeta > ,
2019-11-11 13:12:55 -08:00
index_working_set : & mut HashMap < u64 , IndexMetaWorkingSetEntry > ,
2021-11-30 09:18:36 -08:00
write_batch : & mut WriteBatch ,
2021-12-14 09:34:02 -08:00
just_received_shreds : & mut HashMap < ShredId , Shred > ,
2019-10-30 16:48:59 -07:00
index_meta_time : & mut u64 ,
2020-12-09 23:14:31 -08:00
handle_duplicate : & F ,
2019-11-14 00:32:07 -08:00
is_trusted : bool ,
2022-03-25 12:32:22 -07:00
shred_source : ShredSource ,
2021-10-17 08:02:34 -07:00
metrics : & mut BlockstoreInsertionMetrics ,
2020-12-09 23:14:31 -08:00
) -> bool
where
F : Fn ( Shred ) ,
{
2019-09-04 17:14:42 -07:00
let slot = shred . slot ( ) ;
let shred_index = u64 ::from ( shred . index ( ) ) ;
2019-10-30 16:48:59 -07:00
let index_meta_working_set_entry =
get_index_meta_entry ( & self . db , slot , index_working_set , index_meta_time ) ;
2019-09-04 17:14:42 -07:00
2019-10-30 16:48:59 -07:00
let index_meta = & mut index_meta_working_set_entry . index ;
2020-12-09 23:14:31 -08:00
2019-09-04 17:14:42 -07:00
// This gives the index of first coding shred in this FEC block
// So, all coding shreds in a given FEC block will have the same set index
2019-11-11 13:12:55 -08:00
2020-12-09 23:14:31 -08:00
if ! is_trusted {
2021-12-16 11:17:32 -08:00
if index_meta . coding ( ) . contains ( shred_index ) {
2021-10-17 08:02:34 -07:00
metrics . num_coding_shreds_exists + = 1 ;
2020-12-09 23:14:31 -08:00
handle_duplicate ( shred ) ;
return false ;
}
2019-11-11 13:12:55 -08:00
2020-12-09 23:14:31 -08:00
if ! Blockstore ::should_insert_coding_shred ( & shred , & self . last_root ) {
2021-10-17 08:02:34 -07:00
metrics . num_coding_shreds_invalid + = 1 ;
2020-12-09 23:14:31 -08:00
return false ;
2019-11-11 13:12:55 -08:00
}
2020-12-09 23:14:31 -08:00
}
2020-04-24 15:04:23 -07:00
2021-12-16 06:18:55 -08:00
let erasure_set = shred . erasure_set ( ) ;
let erasure_meta = erasure_metas . entry ( erasure_set ) . or_insert_with ( | | {
self . erasure_meta ( erasure_set )
2020-12-09 23:14:31 -08:00
. expect ( " Expect database get to succeed " )
2021-12-10 12:08:04 -08:00
. unwrap_or_else ( | | ErasureMeta ::from_coding_shred ( & shred ) . unwrap ( ) )
2020-12-09 23:14:31 -08:00
} ) ;
2019-11-11 13:12:55 -08:00
2021-11-12 11:16:48 -08:00
// TODO: handle_duplicate is not invoked and so duplicate shreds are
// not gossiped to the rest of cluster.
2021-12-10 12:08:04 -08:00
if ! erasure_meta . check_coding_shred ( & shred ) {
2021-10-17 08:02:34 -07:00
metrics . num_coding_shreds_invalid_erasure_config + = 1 ;
2020-12-09 23:14:31 -08:00
let conflicting_shred = self . find_conflicting_coding_shred (
& shred ,
slot ,
erasure_meta ,
2021-12-14 09:34:02 -08:00
just_received_shreds ,
2020-12-09 23:14:31 -08:00
) ;
if let Some ( conflicting_shred ) = conflicting_shred {
if self
2022-04-25 05:43:22 -07:00
. store_duplicate_if_not_existing (
slot ,
conflicting_shred ,
shred . payload ( ) . clone ( ) ,
)
2020-12-09 23:14:31 -08:00
. is_err ( )
{
warn! ( " bad duplicate store.. " ) ;
}
} else {
datapoint_info! ( " bad-conflict-shred " , ( " slot " , slot , i64 ) ) ;
}
// ToDo: This is a potential slashing condition
warn! ( " Received multiple erasure configs for the same erasure set!!! " ) ;
warn! (
2022-04-25 05:43:22 -07:00
" Slot: {}, shred index: {}, erasure_set: {:?}, \
is_duplicate : { } , stored config : { :#? } , new shred : { :#? } " ,
slot ,
shred . index ( ) ,
erasure_set ,
self . has_duplicate_shreds_in_slot ( slot ) ,
erasure_meta . config ( ) ,
shred ,
2020-12-09 23:14:31 -08:00
) ;
return false ;
2019-09-04 17:14:42 -07:00
}
2022-04-04 14:44:21 -07:00
2022-03-25 12:32:22 -07:00
self . slots_stats
2022-04-04 14:44:21 -07:00
. record_shred ( shred . slot ( ) , shred . fec_set_index ( ) , shred_source , None ) ;
2021-11-30 09:18:36 -08:00
// insert coding shred into rocks
let result = self
. insert_coding_shred ( index_meta , & shred , write_batch )
. is_ok ( ) ;
if result {
index_meta_working_set_entry . did_insert_occur = true ;
metrics . num_inserted + = 1 ;
}
2020-12-09 23:14:31 -08:00
2021-12-14 09:34:02 -08:00
if let HashMapEntry ::Vacant ( entry ) = just_received_shreds . entry ( shred . id ( ) ) {
2021-10-17 08:02:34 -07:00
metrics . num_coding_shreds_inserted + = 1 ;
entry . insert ( shred ) ;
}
2021-11-30 09:18:36 -08:00
result
2020-12-09 23:14:31 -08:00
}
fn find_conflicting_coding_shred (
& self ,
shred : & Shred ,
slot : Slot ,
erasure_meta : & ErasureMeta ,
2021-12-14 09:34:02 -08:00
just_received_shreds : & HashMap < ShredId , Shred > ,
2020-12-09 23:14:31 -08:00
) -> Option < Vec < u8 > > {
// Search for the shred which set the initial erasure config, either inserted,
2021-12-14 09:34:02 -08:00
// or in the current batch in just_received_shreds.
2021-11-12 11:16:48 -08:00
for coding_index in erasure_meta . coding_shreds_indices ( ) {
2020-12-09 23:14:31 -08:00
let maybe_shred = self . get_coding_shred ( slot , coding_index ) ;
if let Ok ( Some ( shred_data ) ) = maybe_shred {
let potential_shred = Shred ::new_from_serialized_shred ( shred_data ) . unwrap ( ) ;
2022-04-25 05:43:22 -07:00
if shred . erasure_mismatch ( & potential_shred ) . unwrap ( ) {
return Some ( potential_shred . into_payload ( ) ) ;
2020-12-09 23:14:31 -08:00
}
2021-12-14 09:34:02 -08:00
} else if let Some ( potential_shred ) = {
let key = ShredId ::new ( slot , u32 ::try_from ( coding_index ) . unwrap ( ) , ShredType ::Code ) ;
just_received_shreds . get ( & key )
} {
2022-04-25 05:43:22 -07:00
if shred . erasure_mismatch ( potential_shred ) . unwrap ( ) {
return Some ( potential_shred . payload ( ) . clone ( ) ) ;
2020-12-09 23:14:31 -08:00
}
}
}
2021-12-14 09:34:02 -08:00
None
2019-09-04 17:14:42 -07:00
}
2021-12-15 00:36:11 -08:00
/// Create an entry to the specified `write_batch` that performs shred
/// insertion and associated metadata update. The function also updates
/// its in-memory copy of the associated metadata.
///
/// Currently, this function must be invoked while holding
/// `insert_shreds_lock` as it performs read-modify-write operations
/// on multiple column families.
///
/// The resulting `write_batch` may include updates to [`cf::DeadSlots`]
/// and [`cf::ShredData`]. Note that it will also update the in-memory copy
/// of `erasure_metas` and `index_working_set`, which will later be
/// used to update other column families such as [`cf::ErasureMeta`] and
/// [`cf::Index`].
///
/// Arguments:
/// - `shred`: the shred to be inserted
/// - `erasure_metas`: the in-memory hash-map that maintains the dirty
/// copy of the erasure meta. It will later be written to
/// `cf::ErasureMeta` in insert_shreds_handle_duplicate().
/// - `index_working_set`: the in-memory hash-map that maintains the
/// dirty copy of the index meta. It will later be written to
/// `cf::Index` in insert_shreds_handle_duplicate().
/// - `slot_meta_working_set`: the in-memory hash-map that maintains
/// the dirty copy of the index meta. It will later be written to
/// `cf::SlotMeta` in insert_shreds_handle_duplicate().
/// - `write_batch`: the collection of the current writes which will
/// be committed atomically.
/// - `just_inserted_data_shreds`: a (slot, shred index within the slot)
/// to shred map which maintains which data shreds have been inserted.
/// - `index_meta_time`: the time spent on loading or creating the
/// index meta entry from the db.
/// - `is_trusted`: if false, this function will check whether the
2022-02-10 19:43:44 -08:00
/// input shred is duplicate.
2021-12-15 00:36:11 -08:00
/// - `handle_duplicate`: the function that handles duplication.
/// - `leader_schedule`: the leader schedule will be used to check
/// whether it is okay to insert the input shred.
/// - `shred_source`: the source of the shred.
2020-01-14 15:37:53 -08:00
#[ allow(clippy::too_many_arguments) ]
fn check_insert_data_shred < F > (
2019-09-04 17:14:42 -07:00
& self ,
2019-09-18 16:24:30 -07:00
shred : Shred ,
2021-12-16 06:18:55 -08:00
erasure_metas : & mut HashMap < ErasureSetId , ErasureMeta > ,
2019-10-30 16:48:59 -07:00
index_working_set : & mut HashMap < u64 , IndexMetaWorkingSetEntry > ,
2019-09-04 17:14:42 -07:00
slot_meta_working_set : & mut HashMap < u64 , SlotMetaWorkingSetEntry > ,
2020-04-24 15:04:23 -07:00
write_batch : & mut WriteBatch ,
2021-12-14 09:34:02 -08:00
just_inserted_shreds : & mut HashMap < ShredId , Shred > ,
2019-10-30 16:48:59 -07:00
index_meta_time : & mut u64 ,
2019-11-14 00:32:07 -08:00
is_trusted : bool ,
2020-01-14 15:37:53 -08:00
handle_duplicate : & F ,
2021-08-13 07:47:02 -07:00
leader_schedule : Option < & LeaderScheduleCache > ,
2021-06-30 09:20:07 -07:00
shred_source : ShredSource ,
2021-08-13 12:05:18 -07:00
) -> std ::result ::Result < Vec < CompletedDataSetInfo > , InsertDataShredError >
2020-01-14 15:37:53 -08:00
where
2020-06-08 17:38:14 -07:00
F : Fn ( Shred ) ,
2020-01-14 15:37:53 -08:00
{
2019-08-26 18:27:45 -07:00
let slot = shred . slot ( ) ;
let shred_index = u64 ::from ( shred . index ( ) ) ;
2019-10-30 16:48:59 -07:00
let index_meta_working_set_entry =
get_index_meta_entry ( & self . db , slot , index_working_set , index_meta_time ) ;
let index_meta = & mut index_meta_working_set_entry . index ;
2021-11-23 06:45:26 -08:00
let slot_meta_entry = get_slot_meta_entry (
& self . db ,
slot_meta_working_set ,
slot ,
2021-12-09 08:43:57 -08:00
shred
. parent ( )
. map_err ( | _ | InsertDataShredError ::InvalidShred ) ? ,
2021-11-23 06:45:26 -08:00
) ;
2019-09-04 17:14:42 -07:00
2019-10-31 14:03:41 -07:00
let slot_meta = & mut slot_meta_entry . new_slot_meta . borrow_mut ( ) ;
2020-01-14 15:37:53 -08:00
if ! is_trusted {
2021-06-18 06:34:46 -07:00
if Self ::is_data_shred_present ( & shred , slot_meta , index_meta . data ( ) ) {
2020-01-14 15:37:53 -08:00
handle_duplicate ( shred ) ;
2020-07-03 17:44:32 -07:00
return Err ( InsertDataShredError ::Exists ) ;
2021-05-26 16:12:57 -07:00
}
if shred . last_in_slot ( ) & & shred_index < slot_meta . received & & ! slot_meta . is_full ( ) {
// We got a last shred < slot_meta.received, which signals there's an alternative,
// shorter version of the slot. Because also `!slot_meta.is_full()`, then this
// means, for the current version of the slot, we might never get all the
// shreds < the current last index, never replay this slot, and make no
// progress (for instance if a leader sends an additional detached "last index"
// shred with a very high index, but none of the intermediate shreds). Ideally, we would
// just purge all shreds > the new last index slot, but because replay may have already
// replayed entries past the newly detected "last" shred, then mark the slot as dead
// and wait for replay to dump and repair the correct version.
warn! ( " Received *last* shred index {} less than previous shred index {}, and slot {} is not full, marking slot dead " , shred_index , slot_meta . received , slot ) ;
write_batch . put ::< cf ::DeadSlots > ( slot , & true ) . unwrap ( ) ;
}
if ! self . should_insert_data_shred (
2020-05-29 04:35:20 -07:00
& shred ,
slot_meta ,
2021-12-14 09:34:02 -08:00
just_inserted_shreds ,
2020-05-29 04:35:20 -07:00
& self . last_root ,
leader_schedule ,
2022-03-25 12:32:22 -07:00
shred_source ,
2020-05-29 04:35:20 -07:00
) {
2020-07-03 17:44:32 -07:00
return Err ( InsertDataShredError ::InvalidShred ) ;
2020-01-14 15:37:53 -08:00
}
}
2021-12-16 06:18:55 -08:00
let erasure_set = shred . erasure_set ( ) ;
2021-06-30 09:20:07 -07:00
let newly_completed_data_sets = self . insert_data_shred (
slot_meta ,
index_meta . data_mut ( ) ,
& shred ,
write_batch ,
shred_source ,
) ? ;
2021-12-14 09:34:02 -08:00
just_inserted_shreds . insert ( shred . id ( ) , shred ) ;
2020-07-03 17:44:32 -07:00
index_meta_working_set_entry . did_insert_occur = true ;
slot_meta_entry . did_insert_occur = true ;
2021-12-16 06:18:55 -08:00
if let HashMapEntry ::Vacant ( entry ) = erasure_metas . entry ( erasure_set ) {
if let Some ( meta ) = self . erasure_meta ( erasure_set ) . unwrap ( ) {
2021-11-12 11:16:48 -08:00
entry . insert ( meta ) ;
2019-09-04 17:14:42 -07:00
}
}
2020-09-01 22:06:06 -07:00
Ok ( newly_completed_data_sets )
2019-09-04 17:14:42 -07:00
}
2020-12-09 23:14:31 -08:00
fn should_insert_coding_shred ( shred : & Shred , last_root : & RwLock < u64 > ) -> bool {
2022-04-25 16:19:37 -07:00
shred . is_code ( ) & & shred . sanitize ( ) . is_ok ( ) & & shred . slot ( ) > * last_root . read ( ) . unwrap ( )
2019-09-04 17:14:42 -07:00
}
2020-04-24 15:04:23 -07:00
fn insert_coding_shred (
& self ,
index_meta : & mut Index ,
shred : & Shred ,
write_batch : & mut WriteBatch ,
) -> Result < ( ) > {
2019-09-04 17:14:42 -07:00
let slot = shred . slot ( ) ;
let shred_index = u64 ::from ( shred . index ( ) ) ;
// Assert guaranteed by integrity checks on the shred that happen before
// `insert_coding_shred` is called
2022-04-25 16:19:37 -07:00
assert! ( shred . is_code ( ) & & shred . sanitize ( ) . is_ok ( ) ) ;
2019-09-04 17:14:42 -07:00
// Commit step: commit all changes to the mutable structures at once, or none at all.
// We don't want only a subset of these changes going through.
2022-04-25 05:43:22 -07:00
write_batch . put_bytes ::< cf ::ShredCode > ( ( slot , shred_index ) , shred . payload ( ) ) ? ;
2021-12-16 11:17:32 -08:00
index_meta . coding_mut ( ) . insert ( shred_index ) ;
2019-09-04 17:14:42 -07:00
2019-09-17 18:22:46 -07:00
Ok ( ( ) )
2019-09-04 17:14:42 -07:00
}
2020-01-14 15:37:53 -08:00
fn is_data_shred_present ( shred : & Shred , slot_meta : & SlotMeta , data_index : & ShredIndex ) -> bool {
let shred_index = u64 ::from ( shred . index ( ) ) ;
// Check that the shred doesn't already exist in blockstore
2021-12-16 11:17:32 -08:00
shred_index < slot_meta . consumed | | data_index . contains ( shred_index )
2020-01-14 15:37:53 -08:00
}
2021-03-22 16:18:22 -07:00
fn get_data_shred_from_just_inserted_or_db < ' a > (
& ' a self ,
2021-12-14 09:34:02 -08:00
just_inserted_shreds : & ' a HashMap < ShredId , Shred > ,
2021-03-22 16:18:22 -07:00
slot : Slot ,
index : u64 ,
) -> Cow < ' a , Vec < u8 > > {
2021-12-14 09:34:02 -08:00
let key = ShredId ::new ( slot , u32 ::try_from ( index ) . unwrap ( ) , ShredType ::Data ) ;
if let Some ( shred ) = just_inserted_shreds . get ( & key ) {
2022-04-25 05:43:22 -07:00
Cow ::Borrowed ( shred . payload ( ) )
2021-03-22 16:18:22 -07:00
} else {
// If it doesn't exist in the just inserted set, it must exist in
// the backing store
Cow ::Owned ( self . get_data_shred ( slot , index ) . unwrap ( ) . unwrap ( ) )
}
}
2019-09-04 17:14:42 -07:00
fn should_insert_data_shred (
2020-12-09 23:14:31 -08:00
& self ,
2019-09-18 16:24:30 -07:00
shred : & Shred ,
2019-09-04 17:14:42 -07:00
slot_meta : & SlotMeta ,
2021-12-14 09:34:02 -08:00
just_inserted_shreds : & HashMap < ShredId , Shred > ,
2019-09-04 17:14:42 -07:00
last_root : & RwLock < u64 > ,
2021-08-13 07:47:02 -07:00
leader_schedule : Option < & LeaderScheduleCache > ,
2021-06-30 09:20:07 -07:00
shred_source : ShredSource ,
2019-09-04 17:14:42 -07:00
) -> bool {
let shred_index = u64 ::from ( shred . index ( ) ) ;
let slot = shred . slot ( ) ;
2019-09-16 10:28:28 -07:00
let last_in_slot = if shred . last_in_slot ( ) {
2019-09-04 17:14:42 -07:00
debug! ( " got last in slot " ) ;
true
} else {
false
} ;
2022-04-25 16:19:37 -07:00
if let Err ( err ) = shred . sanitize ( ) {
2021-04-30 08:38:15 -07:00
let leader_pubkey = leader_schedule
. and_then ( | leader_schedule | leader_schedule . slot_leader_at ( slot , None ) ) ;
datapoint_error! (
" blockstore_error " ,
(
" error " ,
format! (
2022-04-25 16:19:37 -07:00
" Leader {:?}, slot {}: received invalid shred: {:?} " ,
leader_pubkey , slot , err ,
2021-04-30 08:38:15 -07:00
) ,
String
)
) ;
2021-04-27 15:40:41 -07:00
return false ;
}
2019-09-04 17:14:42 -07:00
// Check that we do not receive shred_index >= than the last_index
// for the slot
let last_index = slot_meta . last_index ;
2021-12-11 06:47:20 -08:00
if last_index . map ( | ix | shred_index > = ix ) . unwrap_or_default ( ) {
2020-05-29 04:35:20 -07:00
let leader_pubkey = leader_schedule
2021-04-30 08:38:15 -07:00
. and_then ( | leader_schedule | leader_schedule . slot_leader_at ( slot , None ) ) ;
2020-12-09 23:14:31 -08:00
2021-03-22 16:18:22 -07:00
let ending_shred : Cow < Vec < u8 > > = self . get_data_shred_from_just_inserted_or_db (
2021-12-14 09:34:02 -08:00
just_inserted_shreds ,
2021-03-22 16:18:22 -07:00
slot ,
2021-12-11 06:47:20 -08:00
last_index . unwrap ( ) ,
2021-03-22 16:18:22 -07:00
) ;
2020-12-09 23:14:31 -08:00
if self
2021-03-22 16:18:22 -07:00
. store_duplicate_if_not_existing (
slot ,
ending_shred . into_owned ( ) ,
2022-04-25 05:43:22 -07:00
shred . payload ( ) . clone ( ) ,
2021-03-22 16:18:22 -07:00
)
2020-12-09 23:14:31 -08:00
. is_err ( )
{
warn! ( " store duplicate error " ) ;
}
2019-09-04 17:14:42 -07:00
datapoint_error! (
2021-04-30 08:38:15 -07:00
" blockstore_error " ,
(
" error " ,
format! (
2021-12-11 06:47:20 -08:00
" Leader {:?}, slot {}: received index {} >= slot.last_index {:?}, shred_source: {:?} " ,
2021-06-30 09:20:07 -07:00
leader_pubkey , slot , shred_index , last_index , shred_source
2021-04-30 08:38:15 -07:00
) ,
String
)
) ;
2019-09-04 17:14:42 -07:00
return false ;
2019-08-26 18:27:45 -07:00
}
2019-11-14 11:49:31 -08:00
// Check that we do not receive a shred with "last_index" true, but shred_index
2019-09-04 17:14:42 -07:00
// less than our current received
if last_in_slot & & shred_index < slot_meta . received {
2020-05-29 04:35:20 -07:00
let leader_pubkey = leader_schedule
2021-04-30 08:38:15 -07:00
. and_then ( | leader_schedule | leader_schedule . slot_leader_at ( slot , None ) ) ;
2020-12-09 23:14:31 -08:00
2021-03-22 16:18:22 -07:00
let ending_shred : Cow < Vec < u8 > > = self . get_data_shred_from_just_inserted_or_db (
2021-12-14 09:34:02 -08:00
just_inserted_shreds ,
2021-03-22 16:18:22 -07:00
slot ,
slot_meta . received - 1 ,
) ;
2020-12-09 23:14:31 -08:00
if self
2021-03-22 16:18:22 -07:00
. store_duplicate_if_not_existing (
slot ,
ending_shred . into_owned ( ) ,
2022-04-25 05:43:22 -07:00
shred . payload ( ) . clone ( ) ,
2021-03-22 16:18:22 -07:00
)
2020-12-09 23:14:31 -08:00
. is_err ( )
{
warn! ( " store duplicate error " ) ;
}
2019-09-04 17:14:42 -07:00
datapoint_error! (
2021-04-30 08:38:15 -07:00
" blockstore_error " ,
(
" error " ,
format! (
2021-06-30 09:20:07 -07:00
" Leader {:?}, slot {}: received shred_index {} < slot.received {}, shred_source: {:?} " ,
leader_pubkey , slot , shred_index , slot_meta . received , shred_source
2021-04-30 08:38:15 -07:00
) ,
String
)
) ;
2019-09-04 17:14:42 -07:00
return false ;
}
let last_root = * last_root . read ( ) . unwrap ( ) ;
2021-12-14 10:57:11 -08:00
// TODO Shouldn't this use shred.parent() instead and update
// slot_meta.parent_slot accordingly?
slot_meta
. parent_slot
. map ( | parent_slot | verify_shred_slots ( slot , parent_slot , last_root ) )
. unwrap_or_default ( )
2019-08-26 18:27:45 -07:00
}
2022-03-30 07:04:49 -07:00
/// send slot full timing point to poh_timing_report service
fn send_slot_full_timing ( & self , slot : Slot ) {
if let Some ( ref sender ) = self . shred_timing_point_sender {
send_poh_timing_point (
sender ,
SlotPohTimingInfo ::new_slot_full_poh_time_point (
slot ,
Some ( self . last_root ( ) ) ,
solana_sdk ::timing ::timestamp ( ) ,
) ,
) ;
}
}
2019-08-20 17:16:06 -07:00
fn insert_data_shred (
2019-08-27 15:09:41 -07:00
& self ,
2019-09-04 17:14:42 -07:00
slot_meta : & mut SlotMeta ,
2020-01-13 12:03:19 -08:00
data_index : & mut ShredIndex ,
2019-09-18 16:24:30 -07:00
shred : & Shred ,
2020-04-24 15:04:23 -07:00
write_batch : & mut WriteBatch ,
2021-06-30 09:20:07 -07:00
shred_source : ShredSource ,
2021-08-13 12:05:18 -07:00
) -> Result < Vec < CompletedDataSetInfo > > {
2019-08-20 17:16:06 -07:00
let slot = shred . slot ( ) ;
let index = u64 ::from ( shred . index ( ) ) ;
2019-09-16 10:28:28 -07:00
let last_in_slot = if shred . last_in_slot ( ) {
2019-08-20 17:16:06 -07:00
debug! ( " got last in slot " ) ;
true
} else {
false
} ;
2019-10-21 16:15:10 -07:00
let last_in_data = if shred . data_complete ( ) {
debug! ( " got last in data " ) ;
true
} else {
false
} ;
2019-10-31 14:03:41 -07:00
// Parent for slot meta should have been set by this point
assert! ( ! is_orphan ( slot_meta ) ) ;
2019-08-20 17:16:06 -07:00
2019-09-04 17:14:42 -07:00
let new_consumed = if slot_meta . consumed = = index {
let mut current_index = index + 1 ;
2019-08-26 18:27:45 -07:00
2021-12-16 11:17:32 -08:00
while data_index . contains ( current_index ) {
2019-09-04 17:14:42 -07:00
current_index + = 1 ;
}
current_index
} else {
slot_meta . consumed
} ;
2019-08-20 17:16:06 -07:00
2019-09-04 17:14:42 -07:00
// Commit step: commit all changes to the mutable structures at once, or none at all.
// We don't want only a subset of these changes going through.
2022-04-25 05:43:22 -07:00
write_batch . put_bytes ::< cf ::ShredData > ( ( slot , index ) , shred . bytes_to_store ( ) ) ? ;
2021-12-16 11:17:32 -08:00
data_index . insert ( index ) ;
2020-09-01 22:06:06 -07:00
let newly_completed_data_sets = update_slot_meta (
2019-10-21 16:15:10 -07:00
last_in_slot ,
last_in_data ,
slot_meta ,
index as u32 ,
new_consumed ,
2019-11-07 11:08:09 -08:00
shred . reference_tick ( ) ,
2021-06-18 06:34:46 -07:00
data_index ,
2021-08-13 12:05:18 -07:00
)
. into_iter ( )
. map ( | ( start_index , end_index ) | CompletedDataSetInfo {
slot ,
start_index ,
end_index ,
} )
. collect ( ) ;
2022-04-04 14:44:21 -07:00
self . slots_stats . record_shred (
shred . slot ( ) ,
shred . fec_set_index ( ) ,
shred_source ,
Some ( slot_meta ) ,
) ;
2022-03-30 07:04:49 -07:00
// slot is full, send slot full timing to poh_timing_report service.
if slot_meta . is_full ( ) {
self . send_slot_full_timing ( slot ) ;
}
2019-09-04 17:14:42 -07:00
trace! ( " inserted shred into slot {:?} and index {:?} " , slot , index ) ;
2022-03-30 07:04:49 -07:00
2020-09-01 22:06:06 -07:00
Ok ( newly_completed_data_sets )
2019-08-20 17:16:06 -07:00
}
2019-11-02 00:38:30 -07:00
pub fn get_data_shred ( & self , slot : Slot , index : u64 ) -> Result < Option < Vec < u8 > > > {
2022-04-28 16:42:37 -07:00
let shred = self . data_shred_cf . get_bytes ( ( slot , index ) ) ? ;
let shred = shred . map ( Shred ::resize_stored_shred ) . transpose ( ) ;
shred . map_err ( | err | {
let err = format! ( " Invalid stored shred: {} " , err ) ;
let err = Box ::new ( bincode ::ErrorKind ::Custom ( err ) ) ;
BlockstoreError ::InvalidShredData ( err )
2021-04-27 15:40:41 -07:00
} )
2019-08-20 17:16:06 -07:00
}
2020-03-19 23:35:01 -07:00
pub fn get_data_shreds_for_slot (
& self ,
slot : Slot ,
start_index : u64 ,
2022-04-25 16:19:37 -07:00
) -> std ::result ::Result < Vec < Shred > , shred ::Error > {
2020-04-24 15:04:23 -07:00
self . slot_data_iterator ( slot , start_index )
. expect ( " blockstore couldn't fetch iterator " )
. map ( | data | Shred ::new_from_serialized_shred ( data . 1. to_vec ( ) ) )
. collect ( )
2020-03-19 23:35:01 -07:00
}
2021-12-11 06:47:20 -08:00
#[ cfg(test) ]
fn get_data_shreds (
2019-08-28 22:34:47 -07:00
& self ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
2019-08-28 22:34:47 -07:00
from_index : u64 ,
2019-09-03 21:32:51 -07:00
to_index : u64 ,
2019-08-28 22:34:47 -07:00
buffer : & mut [ u8 ] ,
) -> Result < ( u64 , usize ) > {
2021-06-04 15:40:27 -07:00
let _lock = self . check_lowest_cleanup_slot ( slot ) ? ;
2019-08-28 22:34:47 -07:00
let meta_cf = self . db . column ::< cf ::SlotMeta > ( ) ;
let mut buffer_offset = 0 ;
let mut last_index = 0 ;
if let Some ( meta ) = meta_cf . get ( slot ) ? {
if ! meta . is_full ( ) {
warn! ( " The slot is not yet full. Will not return any shreds " ) ;
return Ok ( ( last_index , buffer_offset ) ) ;
}
2019-09-03 21:32:51 -07:00
let to_index = cmp ::min ( to_index , meta . consumed ) ;
for index in from_index .. to_index {
2019-08-28 22:34:47 -07:00
if let Some ( shred_data ) = self . get_data_shred ( slot , index ) ? {
let shred_len = shred_data . len ( ) ;
if buffer . len ( ) . saturating_sub ( buffer_offset ) > = shred_len {
buffer [ buffer_offset .. buffer_offset + shred_len ]
. copy_from_slice ( & shred_data [ .. shred_len ] ) ;
buffer_offset + = shred_len ;
last_index = index ;
// All shreds are of the same length.
2020-06-17 20:54:52 -07:00
// Let's check if we have scope to accommodate another shred
2019-08-28 22:34:47 -07:00
// If not, let's break right away, as it'll save on 1 DB read
if buffer . len ( ) . saturating_sub ( buffer_offset ) < shred_len {
break ;
}
} else {
break ;
}
}
}
}
Ok ( ( last_index , buffer_offset ) )
}
2019-11-02 00:38:30 -07:00
pub fn get_coding_shred ( & self , slot : Slot , index : u64 ) -> Result < Option < Vec < u8 > > > {
2020-04-24 15:04:23 -07:00
self . code_shred_cf . get_bytes ( ( slot , index ) )
2018-11-15 15:53:31 -08:00
}
2020-03-19 23:35:01 -07:00
pub fn get_coding_shreds_for_slot (
& self ,
slot : Slot ,
start_index : u64 ,
2022-04-25 16:19:37 -07:00
) -> std ::result ::Result < Vec < Shred > , shred ::Error > {
2020-04-24 15:04:23 -07:00
self . slot_coding_iterator ( slot , start_index )
. expect ( " blockstore couldn't fetch iterator " )
. map ( | code | Shred ::new_from_serialized_shred ( code . 1. to_vec ( ) ) )
. collect ( )
2020-03-19 23:35:01 -07:00
}
2019-11-18 18:05:02 -08:00
// Only used by tests
#[ allow(clippy::too_many_arguments) ]
2021-04-21 05:47:50 -07:00
pub ( crate ) fn write_entries (
2019-08-20 17:16:06 -07:00
& self ,
2019-11-02 00:38:30 -07:00
start_slot : Slot ,
2019-08-20 17:16:06 -07:00
num_ticks_in_start_slot : u64 ,
2019-10-08 00:42:51 -07:00
start_index : u32 ,
2019-08-20 17:16:06 -07:00
ticks_per_slot : u64 ,
parent : Option < u64 > ,
is_full_slot : bool ,
2022-03-17 12:43:57 -07:00
keypair : & Keypair ,
2019-10-08 00:42:51 -07:00
entries : Vec < Entry > ,
2019-11-18 18:05:02 -08:00
version : u16 ,
2021-04-21 05:47:50 -07:00
) -> Result < usize /* num of data shreds */ > {
2019-10-31 13:38:50 -07:00
let mut parent_slot = parent . map_or ( start_slot . saturating_sub ( 1 ) , | v | v ) ;
let num_slots = ( start_slot - parent_slot ) . max ( 1 ) ; // Note: slot 0 has parent slot 0
assert! ( num_ticks_in_start_slot < num_slots * ticks_per_slot ) ;
let mut remaining_ticks_in_slot = num_slots * ticks_per_slot - num_ticks_in_start_slot ;
2019-08-20 17:16:06 -07:00
let mut current_slot = start_slot ;
2021-06-21 13:12:38 -07:00
let mut shredder = Shredder ::new ( current_slot , parent_slot , 0 , version ) . unwrap ( ) ;
2019-08-20 17:16:06 -07:00
let mut all_shreds = vec! [ ] ;
2019-10-08 00:42:51 -07:00
let mut slot_entries = vec! [ ] ;
2019-08-20 17:16:06 -07:00
// Find all the entries for start_slot
2019-10-08 00:42:51 -07:00
for entry in entries . into_iter ( ) {
2019-08-20 17:16:06 -07:00
if remaining_ticks_in_slot = = 0 {
current_slot + = 1 ;
parent_slot = current_slot - 1 ;
remaining_ticks_in_slot = ticks_per_slot ;
2019-10-08 00:42:51 -07:00
let mut current_entries = vec! [ ] ;
std ::mem ::swap ( & mut slot_entries , & mut current_entries ) ;
let start_index = {
if all_shreds . is_empty ( ) {
start_index
} else {
0
}
} ;
2021-12-19 14:37:55 -08:00
let ( mut data_shreds , mut coding_shreds ) = shredder . entries_to_shreds (
keypair ,
& current_entries ,
true , // is_last_in_slot
start_index , // next_shred_index
start_index , // next_code_index
) ;
2019-10-08 00:42:51 -07:00
all_shreds . append ( & mut data_shreds ) ;
all_shreds . append ( & mut coding_shreds ) ;
2019-11-06 13:27:58 -08:00
shredder = Shredder ::new (
current_slot ,
parent_slot ,
( ticks_per_slot - remaining_ticks_in_slot ) as u8 ,
2019-11-18 18:05:02 -08:00
version ,
2019-11-06 13:27:58 -08:00
)
2021-04-21 05:47:50 -07:00
. unwrap ( ) ;
2019-08-20 17:16:06 -07:00
}
2019-10-08 00:42:51 -07:00
if entry . is_tick ( ) {
2019-08-20 17:16:06 -07:00
remaining_ticks_in_slot - = 1 ;
}
2019-10-08 00:42:51 -07:00
slot_entries . push ( entry ) ;
2019-08-20 17:16:06 -07:00
}
2019-10-08 00:42:51 -07:00
if ! slot_entries . is_empty ( ) {
2021-12-19 14:37:55 -08:00
let ( mut data_shreds , mut coding_shreds ) = shredder . entries_to_shreds (
keypair ,
& slot_entries ,
is_full_slot ,
0 , // next_shred_index
0 , // next_code_index
) ;
2019-10-08 00:42:51 -07:00
all_shreds . append ( & mut data_shreds ) ;
all_shreds . append ( & mut coding_shreds ) ;
2019-08-20 17:16:06 -07:00
}
2021-04-21 05:47:50 -07:00
let num_data = all_shreds . iter ( ) . filter ( | shred | shred . is_data ( ) ) . count ( ) ;
2019-11-14 00:32:07 -08:00
self . insert_shreds ( all_shreds , None , false ) ? ;
2021-04-21 05:47:50 -07:00
Ok ( num_data )
2019-08-20 17:16:06 -07:00
}
2019-11-02 00:38:30 -07:00
pub fn get_index ( & self , slot : Slot ) -> Result < Option < Index > > {
2019-07-10 11:08:17 -07:00
self . index_cf . get ( slot )
}
2019-07-17 14:42:29 -07:00
/// Manually update the meta for a slot.
/// Can interfere with automatic meta update and potentially break chaining.
/// Dangerous. Use with care.
2019-11-02 00:38:30 -07:00
pub fn put_meta_bytes ( & self , slot : Slot , bytes : & [ u8 ] ) -> Result < ( ) > {
2019-07-17 14:42:29 -07:00
self . meta_cf . put_bytes ( slot , bytes )
}
2020-04-24 15:04:23 -07:00
// Given a start and end entry index, find all the missing
// indexes in the ledger in the range [start_index, end_index)
// for the slot with the specified slot
fn find_missing_indexes < C > (
db_iterator : & mut DBRawIterator ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
2020-04-24 15:04:23 -07:00
first_timestamp : u64 ,
2019-01-08 15:53:44 -08:00
start_index : u64 ,
end_index : u64 ,
max_missing : usize ,
2020-04-24 15:04:23 -07:00
) -> Vec < u64 >
where
C : Column < Index = ( u64 , u64 ) > ,
{
if start_index > = end_index | | max_missing = = 0 {
return vec! [ ] ;
}
let mut missing_indexes = vec! [ ] ;
let ticks_since_first_insert =
DEFAULT_TICKS_PER_SECOND * ( timestamp ( ) - first_timestamp ) / 1000 ;
// Seek to the first shred with index >= start_index
db_iterator . seek ( & C ::key ( ( slot , start_index ) ) ) ;
// The index of the first missing shred in the slot
let mut prev_index = start_index ;
' outer : loop {
if ! db_iterator . valid ( ) {
for i in prev_index .. end_index {
missing_indexes . push ( i ) ;
if missing_indexes . len ( ) = = max_missing {
break ;
2019-01-08 15:53:44 -08:00
}
2020-04-24 15:04:23 -07:00
}
break ;
}
2021-06-18 06:34:46 -07:00
let ( current_slot , index ) = C ::index ( db_iterator . key ( ) . expect ( " Expect a valid key " ) ) ;
2020-04-24 15:04:23 -07:00
let current_index = {
if current_slot > slot {
end_index
2019-02-07 15:10:54 -08:00
} else {
2020-04-24 15:04:23 -07:00
index
2019-01-08 15:53:44 -08:00
}
2020-04-24 15:04:23 -07:00
} ;
let upper_index = cmp ::min ( current_index , end_index ) ;
// the tick that will be used to figure out the timeout for this hole
2022-05-18 06:23:04 -07:00
let data = db_iterator . value ( ) . expect ( " couldn't read value " ) ;
let reference_tick = u64 ::from ( Shred ::reference_tick_from_data ( data ) . unwrap ( ) ) ;
2020-04-24 15:04:23 -07:00
if ticks_since_first_insert < reference_tick + MAX_TURBINE_DELAY_IN_TICKS {
// The higher index holes have not timed out yet
break 'outer ;
}
for i in prev_index .. upper_index {
missing_indexes . push ( i ) ;
if missing_indexes . len ( ) = = max_missing {
break 'outer ;
}
}
if current_slot > slot {
break ;
}
if current_index > = end_index {
break ;
}
prev_index = current_index + 1 ;
db_iterator . next ( ) ;
2019-01-08 15:53:44 -08:00
}
2020-04-24 15:04:23 -07:00
missing_indexes
2019-01-08 15:53:44 -08:00
}
2020-04-24 15:04:23 -07:00
pub fn find_missing_data_indexes (
2019-01-08 15:53:44 -08:00
& self ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
2020-04-24 15:04:23 -07:00
first_timestamp : u64 ,
2019-01-08 15:53:44 -08:00
start_index : u64 ,
end_index : u64 ,
max_missing : usize ,
) -> Vec < u64 > {
2020-04-24 15:04:23 -07:00
if let Ok ( mut db_iterator ) = self
. db
. raw_iterator_cf ( self . db . cf_handle ::< cf ::ShredData > ( ) )
{
Self ::find_missing_indexes ::< cf ::ShredData > (
& mut db_iterator ,
slot ,
first_timestamp ,
start_index ,
end_index ,
max_missing ,
)
} else {
vec! [ ]
2019-04-02 14:58:07 -07:00
}
2019-01-08 15:53:44 -08:00
}
2020-09-09 08:33:14 -07:00
pub fn get_block_time ( & self , slot : Slot ) -> Result < Option < UnixTimestamp > > {
2022-01-21 16:01:22 -08:00
datapoint_info! ( " blockstore-rpc-api " , ( " method " , " get_block_time " , String ) ) ;
2021-06-04 15:40:27 -07:00
let _lock = self . check_lowest_cleanup_slot ( slot ) ? ;
2020-09-09 08:33:14 -07:00
self . blocktime_cf . get ( slot )
}
2020-10-26 12:23:45 -07:00
pub fn cache_block_time ( & self , slot : Slot , timestamp : UnixTimestamp ) -> Result < ( ) > {
2021-05-26 21:16:16 -07:00
self . blocktime_cf . put ( slot , & timestamp )
}
pub fn get_block_height ( & self , slot : Slot ) -> Result < Option < u64 > > {
2022-01-21 16:01:22 -08:00
datapoint_info! ( " blockstore-rpc-api " , ( " method " , " get_block_height " , String ) ) ;
2021-06-04 15:40:27 -07:00
let _lock = self . check_lowest_cleanup_slot ( slot ) ? ;
2021-05-26 21:16:16 -07:00
self . block_height_cf . get ( slot )
}
pub fn cache_block_height ( & self , slot : Slot , block_height : u64 ) -> Result < ( ) > {
self . block_height_cf . put ( slot , & block_height )
2020-10-26 12:23:45 -07:00
}
2022-02-28 22:57:41 -08:00
/// The first complete block that is available in the Blockstore ledger
2020-04-22 13:33:06 -07:00
pub fn get_first_available_block ( & self ) -> Result < Slot > {
2022-04-27 23:36:19 -07:00
let mut root_iterator = self . rooted_slot_iterator ( self . lowest_slot_with_genesis ( ) ) ? ;
let first_root = root_iterator . next ( ) . unwrap_or_default ( ) ;
// If the first root is slot 0, it is genesis. Genesis is always complete, so it is correct
// to return it as first-available.
if first_root = = 0 {
return Ok ( first_root ) ;
}
// Otherwise, the block at root-index 0 cannot ever be complete, because it is missing its
// parent blockhash. A parent blockhash must be calculated from the entries of the previous
// block. Therefore, the first available complete block is that at root-index 1.
Ok ( root_iterator . next ( ) . unwrap_or_default ( ) )
2020-04-22 13:33:06 -07:00
}
2021-03-26 15:47:35 -07:00
pub fn get_rooted_block (
2021-02-17 17:04:52 -08:00
& self ,
slot : Slot ,
require_previous_blockhash : bool ,
2022-01-13 23:24:41 -08:00
) -> Result < VersionedConfirmedBlock > {
2022-01-21 16:01:22 -08:00
datapoint_info! ( " blockstore-rpc-api " , ( " method " , " get_rooted_block " , String ) ) ;
2021-06-04 15:40:27 -07:00
let _lock = self . check_lowest_cleanup_slot ( slot ) ? ;
2019-11-14 15:34:39 -08:00
if self . is_root ( slot ) {
2021-03-26 15:47:35 -07:00
return self . get_complete_block ( slot , require_previous_blockhash ) ;
}
Err ( BlockstoreError ::SlotNotRooted )
}
2019-11-17 19:17:15 -08:00
2021-03-26 15:47:35 -07:00
pub fn get_complete_block (
& self ,
slot : Slot ,
require_previous_blockhash : bool ,
2022-01-13 23:24:41 -08:00
) -> Result < VersionedConfirmedBlock > {
2021-03-26 15:47:35 -07:00
let slot_meta_cf = self . db . column ::< cf ::SlotMeta > ( ) ;
let slot_meta = match slot_meta_cf . get ( slot ) ? {
Some ( slot_meta ) = > slot_meta ,
None = > {
info! ( " SlotMeta not found for slot {} " , slot ) ;
return Err ( BlockstoreError ::SlotUnavailable ) ;
}
} ;
if slot_meta . is_full ( ) {
2020-04-09 13:09:59 -07:00
let slot_entries = self . get_slot_entries ( slot , 0 ) ? ;
2019-12-11 14:06:54 -08:00
if ! slot_entries . is_empty ( ) {
2021-08-17 15:17:56 -07:00
let blockhash = slot_entries
. last ( )
. map ( | entry | entry . hash )
. unwrap_or_else ( | | panic! ( " Rooted slot {:?} must have blockhash " , slot ) ) ;
2019-12-11 14:06:54 -08:00
let slot_transaction_iterator = slot_entries
2021-08-17 15:17:56 -07:00
. into_iter ( )
2021-02-13 22:32:43 -08:00
. flat_map ( | entry | entry . transactions )
. map ( | transaction | {
2022-05-06 12:19:50 -07:00
if let Err ( err ) = transaction . sanitize (
// Don't enable additional sanitization checks until
// all clusters have activated the static program id
// feature gate so that bigtable upload isn't affected
false , // require_static_program_ids
) {
2021-02-13 22:32:43 -08:00
warn! (
2021-03-26 15:47:35 -07:00
" Blockstore::get_block sanitize failed: {:?}, \
2021-02-13 22:32:43 -08:00
slot : { :? } , \
{ :? } " ,
err , slot , transaction ,
) ;
}
transaction
} ) ;
2021-12-14 10:57:11 -08:00
let parent_slot_entries = slot_meta
. parent_slot
. and_then ( | parent_slot | {
self . get_slot_entries ( parent_slot , /* shred_start_index: */ 0 )
. ok ( )
} )
2020-04-22 13:33:06 -07:00
. unwrap_or_default ( ) ;
2021-02-17 17:04:52 -08:00
if parent_slot_entries . is_empty ( ) & & require_previous_blockhash {
return Err ( BlockstoreError ::ParentEntriesUnavailable ) ;
}
2019-12-11 14:06:54 -08:00
let previous_blockhash = if ! parent_slot_entries . is_empty ( ) {
get_last_hash ( parent_slot_entries . iter ( ) ) . unwrap ( )
} else {
Hash ::default ( )
} ;
2020-10-15 17:04:10 -07:00
let rewards = self
. rewards_cf
. get_protobuf_or_bincode ::< StoredExtendedRewards > ( slot ) ?
. unwrap_or_default ( )
. into ( ) ;
2021-05-26 21:16:16 -07:00
// The Blocktime and BlockHeight column families are updated asynchronously; they
// may not be written by the time the complete slot entries are available. In this
// case, these fields will be `None`.
2020-09-09 08:33:14 -07:00
let block_time = self . blocktime_cf . get ( slot ) ? ;
2021-05-26 21:16:16 -07:00
let block_height = self . block_height_cf . get ( slot ) ? ;
2020-02-04 18:50:24 -08:00
2022-01-13 23:24:41 -08:00
let block = VersionedConfirmedBlock {
2020-01-12 21:34:30 -08:00
previous_blockhash : previous_blockhash . to_string ( ) ,
blockhash : blockhash . to_string ( ) ,
2021-12-14 10:57:11 -08:00
// If the slot is full it should have parent_slot populated
// from shreds received.
parent_slot : slot_meta . parent_slot . unwrap ( ) ,
2020-09-23 22:10:29 -07:00
transactions : self
2021-08-17 15:17:56 -07:00
. map_transactions_to_statuses ( slot , slot_transaction_iterator ) ? ,
2020-02-04 18:50:24 -08:00
rewards ,
2020-09-09 08:33:14 -07:00
block_time ,
2021-05-26 21:16:16 -07:00
block_height ,
2019-12-11 14:06:54 -08:00
} ;
return Ok ( block ) ;
}
2019-11-14 15:34:39 -08:00
}
2021-03-26 15:47:35 -07:00
Err ( BlockstoreError ::SlotUnavailable )
2019-11-14 15:34:39 -08:00
}
2021-08-17 15:17:56 -07:00
pub fn map_transactions_to_statuses (
2019-11-17 19:17:15 -08:00
& self ,
2019-11-18 08:12:42 -08:00
slot : Slot ,
2021-08-17 15:17:56 -07:00
iterator : impl Iterator < Item = VersionedTransaction > ,
2022-01-13 23:24:41 -08:00
) -> Result < Vec < VersionedTransactionWithStatusMeta > > {
2019-11-18 08:12:42 -08:00
iterator
2022-01-13 23:24:41 -08:00
. map ( | transaction | {
let signature = transaction . signatures [ 0 ] ;
Ok ( VersionedTransactionWithStatusMeta {
transaction ,
meta : self
2022-02-09 21:28:18 -08:00
. read_transaction_status ( ( signature , slot ) ) ?
. ok_or ( BlockstoreError ::MissingTransactionMetadata ) ? ,
2022-01-13 23:24:41 -08:00
} )
2019-11-18 08:12:42 -08:00
} )
. collect ( )
2019-11-17 19:17:15 -08:00
}
2020-08-10 09:27:38 -07:00
/// Initializes the TransactionStatusIndex column family with two records, `0` and `1`,
/// which are used as the primary index for entries in the TransactionStatus and
/// AddressSignatures columns. At any given time, one primary index is active (ie. new records
/// are stored under this index), the other is frozen.
2020-04-04 20:24:06 -07:00
fn initialize_transaction_status_index ( & self ) -> Result < ( ) > {
self . transaction_status_index_cf
. put ( 0 , & TransactionStatusIndexMeta ::default ( ) ) ? ;
self . transaction_status_index_cf
. put ( 1 , & TransactionStatusIndexMeta ::default ( ) ) ? ;
// This dummy status improves compaction performance
2021-03-05 08:05:35 -08:00
let default_status = TransactionStatusMeta ::default ( ) . into ( ) ;
self . transaction_status_cf
. put_protobuf ( cf ::TransactionStatus ::as_index ( 2 ) , & default_status ) ? ;
2020-04-08 12:50:39 -07:00
self . address_signatures_cf . put (
cf ::AddressSignatures ::as_index ( 2 ) ,
& AddressSignatureMeta ::default ( ) ,
2020-04-04 20:24:06 -07:00
)
}
2020-08-10 09:27:38 -07:00
/// Toggles the active primary index between `0` and `1`, and clears the stored max-slot of the
/// frozen index in preparation for pruning.
2020-04-04 20:24:06 -07:00
fn toggle_transaction_status_index (
& self ,
batch : & mut WriteBatch ,
w_active_transaction_status_index : & mut u64 ,
to_slot : Slot ,
) -> Result < Option < u64 > > {
let index0 = self . transaction_status_index_cf . get ( 0 ) ? ;
if index0 . is_none ( ) {
return Ok ( None ) ;
}
let mut index0 = index0 . unwrap ( ) ;
let mut index1 = self . transaction_status_index_cf . get ( 1 ) ? . unwrap ( ) ;
if ! index0 . frozen & & ! index1 . frozen {
index0 . frozen = true ;
* w_active_transaction_status_index = 1 ;
batch . put ::< cf ::TransactionStatusIndex > ( 0 , & index0 ) ? ;
Ok ( None )
} else {
2021-05-28 00:42:56 -07:00
let purge_target_primary_index = if index0 . frozen & & to_slot > index0 . max_slot {
info! (
" Pruning expired primary index 0 up to slot {} (max requested: {}) " ,
index0 . max_slot , to_slot
) ;
2020-04-04 20:24:06 -07:00
Some ( 0 )
} else if index1 . frozen & & to_slot > index1 . max_slot {
2021-05-28 00:42:56 -07:00
info! (
" Pruning expired primary index 1 up to slot {} (max requested: {}) " ,
index1 . max_slot , to_slot
) ;
2020-04-04 20:24:06 -07:00
Some ( 1 )
} else {
None
} ;
2021-05-28 00:42:56 -07:00
if let Some ( purge_target_primary_index ) = purge_target_primary_index {
* w_active_transaction_status_index = purge_target_primary_index ;
2020-04-04 20:24:06 -07:00
if index0 . frozen {
index0 . max_slot = 0
} ;
index0 . frozen = ! index0 . frozen ;
batch . put ::< cf ::TransactionStatusIndex > ( 0 , & index0 ) ? ;
if index1 . frozen {
index1 . max_slot = 0
} ;
index1 . frozen = ! index1 . frozen ;
batch . put ::< cf ::TransactionStatusIndex > ( 1 , & index1 ) ? ;
}
2021-05-28 00:42:56 -07:00
Ok ( purge_target_primary_index )
2020-04-04 20:24:06 -07:00
}
}
2021-05-28 00:42:56 -07:00
fn get_primary_index_to_write (
2020-04-04 20:24:06 -07:00
& self ,
2020-04-08 12:50:39 -07:00
slot : Slot ,
2021-05-28 00:42:56 -07:00
// take WriteGuard to require critical section semantics at call site
w_active_transaction_status_index : & RwLockWriteGuard < Slot > ,
2020-04-08 12:50:39 -07:00
) -> Result < u64 > {
2021-05-28 00:42:56 -07:00
let i = * * w_active_transaction_status_index ;
2020-04-04 20:24:06 -07:00
let mut index_meta = self . transaction_status_index_cf . get ( i ) ? . unwrap ( ) ;
if slot > index_meta . max_slot {
assert! ( ! index_meta . frozen ) ;
index_meta . max_slot = slot ;
self . transaction_status_index_cf . put ( i , & index_meta ) ? ;
}
2020-04-08 12:50:39 -07:00
Ok ( i )
2020-04-04 20:24:06 -07:00
}
2020-03-23 12:49:21 -07:00
pub fn read_transaction_status (
& self ,
2020-04-04 20:24:06 -07:00
index : ( Signature , Slot ) ,
2020-03-26 13:29:30 -07:00
) -> Result < Option < TransactionStatusMeta > > {
2020-04-04 20:24:06 -07:00
let ( signature , slot ) = index ;
2021-03-05 08:05:35 -08:00
let result = self
. transaction_status_cf
. get_protobuf_or_bincode ::< StoredTransactionStatusMeta > ( ( 0 , signature , slot ) ) ? ;
2020-04-04 20:24:06 -07:00
if result . is_none ( ) {
2021-03-05 08:05:35 -08:00
Ok ( self
. transaction_status_cf
. get_protobuf_or_bincode ::< StoredTransactionStatusMeta > ( ( 1 , signature , slot ) ) ?
. and_then ( | meta | meta . try_into ( ) . ok ( ) ) )
2020-04-04 20:24:06 -07:00
} else {
2021-03-05 08:05:35 -08:00
Ok ( result . and_then ( | meta | meta . try_into ( ) . ok ( ) ) )
2020-04-04 20:24:06 -07:00
}
2020-03-23 12:49:21 -07:00
}
2019-11-20 15:43:10 -08:00
pub fn write_transaction_status (
& self ,
2020-04-08 12:50:39 -07:00
slot : Slot ,
signature : Signature ,
writable_keys : Vec < & Pubkey > ,
readonly_keys : Vec < & Pubkey > ,
2021-03-05 08:05:35 -08:00
status : TransactionStatusMeta ,
2019-11-20 15:43:10 -08:00
) -> Result < ( ) > {
2021-03-05 08:05:35 -08:00
let status = status . into ( ) ;
2020-04-08 12:50:39 -07:00
// This write lock prevents interleaving issues with the transaction_status_index_cf by gating
// writes to that column
2021-05-28 00:42:56 -07:00
let w_active_transaction_status_index =
2020-04-04 20:24:06 -07:00
self . active_transaction_status_index . write ( ) . unwrap ( ) ;
2021-05-28 00:42:56 -07:00
let primary_index =
self . get_primary_index_to_write ( slot , & w_active_transaction_status_index ) ? ;
2020-04-08 12:50:39 -07:00
self . transaction_status_cf
2021-03-05 08:05:35 -08:00
. put_protobuf ( ( primary_index , signature , slot ) , & status ) ? ;
2020-04-08 12:50:39 -07:00
for address in writable_keys {
self . address_signatures_cf . put (
2020-04-09 20:21:31 -07:00
( primary_index , * address , slot , signature ) ,
& AddressSignatureMeta { writeable : true } ,
2020-04-08 12:50:39 -07:00
) ? ;
}
for address in readonly_keys {
self . address_signatures_cf . put (
2020-04-09 20:21:31 -07:00
( primary_index , * address , slot , signature ) ,
& AddressSignatureMeta { writeable : false } ,
2020-04-08 12:50:39 -07:00
) ? ;
}
Ok ( ( ) )
2019-11-20 15:43:10 -08:00
}
2021-09-02 23:28:52 -07:00
pub fn read_transaction_memos ( & self , signature : Signature ) -> Result < Option < String > > {
self . transaction_memos_cf . get ( signature )
}
pub fn write_transaction_memos ( & self , signature : & Signature , memos : String ) -> Result < ( ) > {
self . transaction_memos_cf . put ( * signature , & memos )
}
2021-06-04 15:40:27 -07:00
fn check_lowest_cleanup_slot ( & self , slot : Slot ) -> Result < std ::sync ::RwLockReadGuard < Slot > > {
// lowest_cleanup_slot is the last slot that was not cleaned up by LedgerCleanupService
let lowest_cleanup_slot = self . lowest_cleanup_slot . read ( ) . unwrap ( ) ;
if * lowest_cleanup_slot > 0 & & * lowest_cleanup_slot > = slot {
return Err ( BlockstoreError ::SlotCleanedUp ) ;
}
// Make caller hold this lock properly; otherwise LedgerCleanupService can purge/compact
// needed slots here at any given moment
Ok ( lowest_cleanup_slot )
}
2021-05-28 00:42:56 -07:00
fn ensure_lowest_cleanup_slot ( & self ) -> ( std ::sync ::RwLockReadGuard < Slot > , Slot ) {
// Ensures consistent result by using lowest_cleanup_slot as the lower bound
// for reading columns that do not employ strong read consistency with slot-based
// delete_range
let lowest_cleanup_slot = self . lowest_cleanup_slot . read ( ) . unwrap ( ) ;
let lowest_available_slot = ( * lowest_cleanup_slot )
. checked_add ( 1 )
. expect ( " overflow from trusted value " ) ;
// Make caller hold this lock properly; otherwise LedgerCleanupService can purge/compact
// needed slots here at any given moment.
// Blockstore callers, like rpc, can process concurrent read queries
( lowest_cleanup_slot , lowest_available_slot )
}
2021-03-26 15:47:35 -07:00
// Returns a transaction status, as well as a loop counter for unit testing
2020-04-06 03:04:54 -07:00
fn get_transaction_status_with_counter (
& self ,
signature : Signature ,
2021-03-31 20:04:00 -07:00
confirmed_unrooted_slots : & [ Slot ] ,
2020-04-06 03:04:54 -07:00
) -> Result < ( Option < ( Slot , TransactionStatusMeta ) > , u64 ) > {
let mut counter = 0 ;
2021-05-28 00:42:56 -07:00
let ( lock , lowest_available_slot ) = self . ensure_lowest_cleanup_slot ( ) ;
2020-04-06 03:04:54 -07:00
for transaction_status_cf_primary_index in 0 ..= 1 {
let index_iterator = self . transaction_status_cf . iter ( IteratorMode ::From (
2021-05-28 00:42:56 -07:00
(
transaction_status_cf_primary_index ,
signature ,
lowest_available_slot ,
) ,
2020-04-06 03:04:54 -07:00
IteratorDirection ::Forward ,
) ) ? ;
2021-03-05 08:05:35 -08:00
for ( ( i , sig , slot ) , _data ) in index_iterator {
2020-04-06 03:04:54 -07:00
counter + = 1 ;
2020-08-06 15:21:46 -07:00
if i ! = transaction_status_cf_primary_index | | sig ! = signature {
2020-04-06 03:04:54 -07:00
break ;
}
2021-03-31 20:04:00 -07:00
if ! self . is_root ( slot ) & & ! confirmed_unrooted_slots . contains ( & slot ) {
2021-03-26 15:47:35 -07:00
continue ;
2020-04-06 03:04:54 -07:00
}
2021-03-26 15:47:35 -07:00
let status = self
. transaction_status_cf
. get_protobuf_or_bincode ::< StoredTransactionStatusMeta > ( ( i , sig , slot ) ) ?
. and_then ( | status | status . try_into ( ) . ok ( ) )
. map ( | status | ( slot , status ) ) ;
return Ok ( ( status , counter ) ) ;
2020-04-06 03:04:54 -07:00
}
}
2021-05-28 00:42:56 -07:00
drop ( lock ) ;
2020-04-06 03:04:54 -07:00
Ok ( ( None , counter ) )
}
2021-03-31 20:04:00 -07:00
/// Returns a transaction status
pub fn get_rooted_transaction_status (
& self ,
signature : Signature ,
) -> Result < Option < ( Slot , TransactionStatusMeta ) > > {
datapoint_info! (
" blockstore-rpc-api " ,
2022-01-21 16:01:22 -08:00
( " method " , " get_rooted_transaction_status " , String )
2021-03-31 20:04:00 -07:00
) ;
self . get_transaction_status ( signature , & [ ] )
}
2021-03-26 15:47:35 -07:00
/// Returns a transaction status
2020-04-06 03:04:54 -07:00
pub fn get_transaction_status (
& self ,
signature : Signature ,
2021-03-31 20:04:00 -07:00
confirmed_unrooted_slots : & [ Slot ] ,
2020-04-06 03:04:54 -07:00
) -> Result < Option < ( Slot , TransactionStatusMeta ) > > {
2020-04-15 17:09:14 -07:00
datapoint_info! (
" blockstore-rpc-api " ,
2022-01-21 16:01:22 -08:00
( " method " , " get_transaction_status " , String )
2020-04-15 17:09:14 -07:00
) ;
2021-03-31 20:04:00 -07:00
self . get_transaction_status_with_counter ( signature , confirmed_unrooted_slots )
2020-04-06 03:04:54 -07:00
. map ( | ( status , _ ) | status )
}
2020-04-08 23:57:30 -07:00
/// Returns a complete transaction if it was processed in a root
2021-03-31 20:04:00 -07:00
pub fn get_rooted_transaction (
2020-04-08 23:57:30 -07:00
& self ,
signature : Signature ,
2022-02-09 21:28:18 -08:00
) -> Result < Option < ConfirmedTransactionWithStatusMeta > > {
2020-04-15 17:09:14 -07:00
datapoint_info! (
" blockstore-rpc-api " ,
2022-01-21 16:01:22 -08:00
( " method " , " get_rooted_transaction " , String )
2020-04-15 17:09:14 -07:00
) ;
2021-03-31 20:04:00 -07:00
self . get_transaction_with_status ( signature , & [ ] )
2021-03-26 15:47:35 -07:00
}
/// Returns a complete transaction
pub fn get_complete_transaction (
& self ,
signature : Signature ,
2021-03-31 20:04:00 -07:00
highest_confirmed_slot : Slot ,
2022-02-09 21:28:18 -08:00
) -> Result < Option < ConfirmedTransactionWithStatusMeta > > {
2021-03-26 15:47:35 -07:00
datapoint_info! (
" blockstore-rpc-api " ,
2022-01-21 16:01:22 -08:00
( " method " , " get_complete_transaction " , String )
2021-03-26 15:47:35 -07:00
) ;
2021-04-04 21:14:02 -07:00
let last_root = self . last_root ( ) ;
2021-03-31 20:04:00 -07:00
let confirmed_unrooted_slots : Vec < _ > =
AncestorIterator ::new_inclusive ( highest_confirmed_slot , self )
2021-04-04 21:14:02 -07:00
. take_while ( | & slot | slot > last_root )
2021-03-31 20:04:00 -07:00
. collect ( ) ;
self . get_transaction_with_status ( signature , & confirmed_unrooted_slots )
2021-03-26 15:47:35 -07:00
}
fn get_transaction_with_status (
& self ,
signature : Signature ,
2021-03-31 20:04:00 -07:00
confirmed_unrooted_slots : & [ Slot ] ,
2022-02-09 21:28:18 -08:00
) -> Result < Option < ConfirmedTransactionWithStatusMeta > > {
if let Some ( ( slot , meta ) ) =
2021-03-31 20:04:00 -07:00
self . get_transaction_status ( signature , confirmed_unrooted_slots ) ?
{
2020-08-06 15:21:46 -07:00
let transaction = self
. find_transaction_in_slot ( slot , signature ) ?
. ok_or ( BlockstoreError ::TransactionStatusSlotMismatch ) ? ; // Should not happen
2021-08-17 15:17:56 -07:00
2021-01-20 22:10:35 -08:00
let block_time = self . get_block_time ( slot ) ? ;
2022-02-09 21:28:18 -08:00
Ok ( Some ( ConfirmedTransactionWithStatusMeta {
2020-04-08 23:57:30 -07:00
slot ,
2022-02-09 21:28:18 -08:00
tx_with_meta : TransactionWithStatusMeta ::Complete (
VersionedTransactionWithStatusMeta { transaction , meta } ,
) ,
2021-01-20 22:10:35 -08:00
block_time ,
2020-04-08 23:57:30 -07:00
} ) )
} else {
Ok ( None )
}
}
fn find_transaction_in_slot (
& self ,
slot : Slot ,
signature : Signature ,
2021-08-17 15:17:56 -07:00
) -> Result < Option < VersionedTransaction > > {
2020-04-09 13:09:59 -07:00
let slot_entries = self . get_slot_entries ( slot , 0 ) ? ;
2020-04-08 23:57:30 -07:00
Ok ( slot_entries
. iter ( )
. cloned ( )
. flat_map ( | entry | entry . transactions )
2021-02-13 22:32:43 -08:00
. map ( | transaction | {
2022-05-06 12:19:50 -07:00
if let Err ( err ) = transaction . sanitize (
true , // require_static_program_ids
) {
2021-02-13 22:32:43 -08:00
warn! (
" Blockstore::find_transaction_in_slot sanitize failed: {:?}, \
slot : { :? } , \
{ :? } " ,
err , slot , transaction ,
) ;
}
transaction
} )
2020-04-08 23:57:30 -07:00
. find ( | transaction | transaction . signatures [ 0 ] = = signature ) )
}
2021-03-26 15:47:35 -07:00
// Returns all rooted signatures for an address, ordered by slot that the transaction was
// processed in. Within each slot the transactions will be ordered by signature, and NOT by
2020-08-05 11:21:22 -07:00
// the order in which the transactions exist in the block
2021-03-31 21:35:57 -07:00
//
// DEPRECATED
2020-04-09 20:21:31 -07:00
fn find_address_signatures (
& self ,
pubkey : Pubkey ,
start_slot : Slot ,
end_slot : Slot ,
) -> Result < Vec < ( Slot , Signature ) > > {
2021-05-28 00:42:56 -07:00
let ( lock , lowest_available_slot ) = self . ensure_lowest_cleanup_slot ( ) ;
2020-04-09 20:21:31 -07:00
let mut signatures : Vec < ( Slot , Signature ) > = vec! [ ] ;
for transaction_status_cf_primary_index in 0 ..= 1 {
let index_iterator = self . address_signatures_cf . iter ( IteratorMode ::From (
(
transaction_status_cf_primary_index ,
pubkey ,
2021-05-28 00:42:56 -07:00
start_slot . max ( lowest_available_slot ) ,
2020-04-09 20:21:31 -07:00
Signature ::default ( ) ,
) ,
IteratorDirection ::Forward ,
) ) ? ;
for ( ( i , address , slot , signature ) , _ ) in index_iterator {
if i ! = transaction_status_cf_primary_index | | slot > end_slot | | address ! = pubkey
{
break ;
}
if self . is_root ( slot ) {
signatures . push ( ( slot , signature ) ) ;
}
}
}
2021-05-28 00:42:56 -07:00
drop ( lock ) ;
2020-08-10 09:27:38 -07:00
signatures . sort_by ( | a , b | a . 0. partial_cmp ( & b . 0 ) . unwrap ( ) . then ( a . 1. cmp ( & b . 1 ) ) ) ;
2020-04-09 20:21:31 -07:00
Ok ( signatures )
}
2021-03-31 21:35:57 -07:00
// Returns all signatures for an address in a particular slot, regardless of whether that slot
// has been rooted. The transactions will be ordered by signature, and NOT by the order in
// which the transactions exist in the block
fn find_address_signatures_for_slot (
& self ,
pubkey : Pubkey ,
slot : Slot ,
) -> Result < Vec < ( Slot , Signature ) > > {
2021-05-28 00:42:56 -07:00
let ( lock , lowest_available_slot ) = self . ensure_lowest_cleanup_slot ( ) ;
2021-03-31 21:35:57 -07:00
let mut signatures : Vec < ( Slot , Signature ) > = vec! [ ] ;
for transaction_status_cf_primary_index in 0 ..= 1 {
let index_iterator = self . address_signatures_cf . iter ( IteratorMode ::From (
(
transaction_status_cf_primary_index ,
pubkey ,
2021-05-28 00:42:56 -07:00
slot . max ( lowest_available_slot ) ,
2021-03-31 21:35:57 -07:00
Signature ::default ( ) ,
) ,
IteratorDirection ::Forward ,
) ) ? ;
for ( ( i , address , transaction_slot , signature ) , _ ) in index_iterator {
if i ! = transaction_status_cf_primary_index
| | transaction_slot > slot
| | address ! = pubkey
{
break ;
}
signatures . push ( ( slot , signature ) ) ;
}
}
2021-05-28 00:42:56 -07:00
drop ( lock ) ;
2021-03-31 21:35:57 -07:00
signatures . sort_by ( | a , b | a . 0. partial_cmp ( & b . 0 ) . unwrap ( ) . then ( a . 1. cmp ( & b . 1 ) ) ) ;
Ok ( signatures )
}
// DEPRECATED
2020-04-09 20:21:31 -07:00
pub fn get_confirmed_signatures_for_address (
& self ,
pubkey : Pubkey ,
start_slot : Slot ,
end_slot : Slot ,
) -> Result < Vec < Signature > > {
2020-04-15 17:09:14 -07:00
datapoint_info! (
" blockstore-rpc-api " ,
2022-01-21 16:01:22 -08:00
( " method " , " get_confirmed_signatures_for_address " , String )
2020-04-15 17:09:14 -07:00
) ;
2020-04-09 20:21:31 -07:00
self . find_address_signatures ( pubkey , start_slot , end_slot )
. map ( | signatures | signatures . iter ( ) . map ( | ( _ , signature ) | * signature ) . collect ( ) )
}
2021-10-13 00:55:19 -07:00
fn get_sorted_block_signatures ( & self , slot : Slot ) -> Result < Vec < Signature > > {
let block = self . get_complete_block ( slot , false ) . map_err ( | err | {
BlockstoreError ::Io ( IoError ::new (
ErrorKind ::Other ,
format! ( " Unable to get block: {} " , err ) ,
) )
} ) ? ;
// Load all signatures for the block
let mut slot_signatures : Vec < _ > = block
. transactions
. into_iter ( )
. filter_map ( | transaction_with_meta | {
transaction_with_meta
. transaction
. signatures
. into_iter ( )
. next ( )
} )
. collect ( ) ;
// Reverse sort signatures as a way to entire a stable ordering within a slot, as
// the AddressSignatures column is ordered by signatures within a slot,
// not by block ordering
slot_signatures . sort_unstable_by ( | a , b | b . cmp ( a ) ) ;
Ok ( slot_signatures )
}
2020-07-27 11:42:49 -07:00
pub fn get_confirmed_signatures_for_address2 (
& self ,
address : Pubkey ,
2021-03-31 21:35:57 -07:00
highest_slot : Slot , // highest_confirmed_root or highest_confirmed_slot
2020-08-05 11:30:21 -07:00
before : Option < Signature > ,
2020-08-15 09:42:17 -07:00
until : Option < Signature > ,
2020-07-27 11:42:49 -07:00
limit : usize ,
2021-12-29 09:25:10 -08:00
) -> Result < SignatureInfosForAddress > {
2020-07-27 11:42:49 -07:00
datapoint_info! (
" blockstore-rpc-api " ,
2022-01-21 16:01:22 -08:00
( " method " , " get_confirmed_signatures_for_address2 " , String )
2020-07-27 11:42:49 -07:00
) ;
2021-04-04 21:14:02 -07:00
let last_root = self . last_root ( ) ;
2021-03-31 21:35:57 -07:00
let confirmed_unrooted_slots : Vec < _ > = AncestorIterator ::new_inclusive ( highest_slot , self )
2021-04-04 21:14:02 -07:00
. take_while ( | & slot | slot > last_root )
2021-03-31 21:35:57 -07:00
. collect ( ) ;
2020-07-27 11:42:49 -07:00
2020-08-05 11:21:22 -07:00
// Figure the `slot` to start listing signatures at, based on the ledger location of the
2020-08-05 11:30:21 -07:00
// `before` signature if present. Also generate a HashSet of signatures that should
2020-08-05 11:21:22 -07:00
// be excluded from the results.
2020-08-10 09:27:38 -07:00
let mut get_before_slot_timer = Measure ::start ( " get_before_slot_timer " ) ;
2020-08-15 09:42:17 -07:00
let ( slot , mut before_excluded_signatures ) = match before {
2021-03-31 21:35:57 -07:00
None = > ( highest_slot , None ) ,
2020-08-05 11:30:21 -07:00
Some ( before ) = > {
2021-03-31 21:35:57 -07:00
let transaction_status =
self . get_transaction_status ( before , & confirmed_unrooted_slots ) ? ;
2020-08-05 11:21:22 -07:00
match transaction_status {
2021-12-29 09:25:10 -08:00
None = > return Ok ( SignatureInfosForAddress ::default ( ) ) ,
2020-08-05 11:21:22 -07:00
Some ( ( slot , _ ) ) = > {
2021-10-13 00:55:19 -07:00
let mut slot_signatures = self . get_sorted_block_signatures ( slot ) ? ;
2020-08-05 11:30:21 -07:00
if let Some ( pos ) = slot_signatures . iter ( ) . position ( | & x | x = = before ) {
2020-08-05 11:21:22 -07:00
slot_signatures . truncate ( pos + 1 ) ;
2020-07-27 11:42:49 -07:00
}
2020-08-05 11:21:22 -07:00
(
slot ,
Some ( slot_signatures . into_iter ( ) . collect ::< HashSet < _ > > ( ) ) ,
)
2020-07-27 11:42:49 -07:00
}
}
}
} ;
2020-08-10 09:27:38 -07:00
get_before_slot_timer . stop ( ) ;
2020-07-27 11:42:49 -07:00
2020-08-15 09:42:17 -07:00
// Generate a HashSet of signatures that should be excluded from the results based on
// `until` signature
let mut get_until_slot_timer = Measure ::start ( " get_until_slot_timer " ) ;
let ( lowest_slot , until_excluded_signatures ) = match until {
None = > ( 0 , HashSet ::new ( ) ) ,
Some ( until ) = > {
2021-03-31 21:35:57 -07:00
let transaction_status =
self . get_transaction_status ( until , & confirmed_unrooted_slots ) ? ;
2020-08-15 09:42:17 -07:00
match transaction_status {
None = > ( 0 , HashSet ::new ( ) ) ,
Some ( ( slot , _ ) ) = > {
2021-10-13 00:55:19 -07:00
let mut slot_signatures = self . get_sorted_block_signatures ( slot ) ? ;
2020-08-15 09:42:17 -07:00
if let Some ( pos ) = slot_signatures . iter ( ) . position ( | & x | x = = until ) {
slot_signatures = slot_signatures . split_off ( pos ) ;
}
( slot , slot_signatures . into_iter ( ) . collect ::< HashSet < _ > > ( ) )
}
}
}
} ;
get_until_slot_timer . stop ( ) ;
2020-07-27 11:42:49 -07:00
// Fetch the list of signatures that affect the given address
let first_available_block = self . get_first_available_block ( ) ? ;
let mut address_signatures = vec! [ ] ;
2020-08-10 09:27:38 -07:00
// Get signatures in `slot`
let mut get_initial_slot_timer = Measure ::start ( " get_initial_slot_timer " ) ;
2021-03-31 21:35:57 -07:00
let mut signatures = self . find_address_signatures_for_slot ( address , slot ) ? ;
2020-08-10 09:27:38 -07:00
signatures . reverse ( ) ;
2020-08-15 09:42:17 -07:00
if let Some ( excluded_signatures ) = before_excluded_signatures . take ( ) {
2020-08-10 09:27:38 -07:00
address_signatures . extend (
signatures
. into_iter ( )
2021-06-18 06:34:46 -07:00
. filter ( | ( _ , signature ) | ! excluded_signatures . contains ( signature ) ) ,
2020-08-10 09:27:38 -07:00
)
} else {
address_signatures . append ( & mut signatures ) ;
}
get_initial_slot_timer . stop ( ) ;
// Check the active_transaction_status_index to see if it contains slot. If so, start with
// that index, as it will contain higher slots
let starting_primary_index = * self . active_transaction_status_index . read ( ) . unwrap ( ) ;
let next_primary_index = if starting_primary_index = = 0 { 1 } else { 0 } ;
let next_max_slot = self
. transaction_status_index_cf
. get ( next_primary_index ) ?
. unwrap ( )
. max_slot ;
let mut starting_primary_index_iter_timer = Measure ::start ( " starting_primary_index_iter " ) ;
if slot > next_max_slot {
let mut starting_iterator = self . address_signatures_cf . iter ( IteratorMode ::From (
( starting_primary_index , address , slot , Signature ::default ( ) ) ,
IteratorDirection ::Reverse ,
) ) ? ;
// Iterate through starting_iterator until limit is reached
while address_signatures . len ( ) < limit {
if let Some ( ( ( i , key_address , slot , signature ) , _ ) ) = starting_iterator . next ( ) {
2020-08-15 09:42:17 -07:00
if slot = = next_max_slot | | slot < lowest_slot {
2020-08-10 09:27:38 -07:00
break ;
}
if i = = starting_primary_index
& & key_address = = address
& & slot > = first_available_block
{
2021-03-31 21:35:57 -07:00
if self . is_root ( slot ) | | confirmed_unrooted_slots . contains ( & slot ) {
2020-08-13 10:07:42 -07:00
address_signatures . push ( ( slot , signature ) ) ;
}
2020-08-10 09:27:38 -07:00
continue ;
}
}
2020-07-27 11:42:49 -07:00
break ;
}
2020-08-10 09:27:38 -07:00
// Handle slots that cross primary indexes
2020-08-15 09:42:17 -07:00
if next_max_slot > = lowest_slot {
let mut signatures =
2021-03-31 21:35:57 -07:00
self . find_address_signatures_for_slot ( address , next_max_slot ) ? ;
2020-08-15 09:42:17 -07:00
signatures . reverse ( ) ;
address_signatures . append ( & mut signatures ) ;
}
2020-08-10 09:27:38 -07:00
}
starting_primary_index_iter_timer . stop ( ) ;
2020-07-27 11:42:49 -07:00
2020-08-10 09:27:38 -07:00
// Iterate through next_iterator until limit is reached
let mut next_primary_index_iter_timer = Measure ::start ( " next_primary_index_iter_timer " ) ;
let mut next_iterator = self . address_signatures_cf . iter ( IteratorMode ::From (
( next_primary_index , address , slot , Signature ::default ( ) ) ,
IteratorDirection ::Reverse ,
) ) ? ;
while address_signatures . len ( ) < limit {
if let Some ( ( ( i , key_address , slot , signature ) , _ ) ) = next_iterator . next ( ) {
// Skip next_max_slot, which is already included
if slot = = next_max_slot {
continue ;
}
2020-08-15 09:42:17 -07:00
if slot < lowest_slot {
break ;
}
2020-08-10 09:27:38 -07:00
if i = = next_primary_index
& & key_address = = address
& & slot > = first_available_block
{
2021-03-31 21:35:57 -07:00
if self . is_root ( slot ) | | confirmed_unrooted_slots . contains ( & slot ) {
2020-08-13 10:07:42 -07:00
address_signatures . push ( ( slot , signature ) ) ;
}
2020-08-10 09:27:38 -07:00
continue ;
}
2020-07-27 11:42:49 -07:00
}
2020-08-10 09:27:38 -07:00
break ;
2020-07-27 11:42:49 -07:00
}
2020-08-10 09:27:38 -07:00
next_primary_index_iter_timer . stop ( ) ;
2020-08-15 09:42:17 -07:00
let mut address_signatures : Vec < ( Slot , Signature ) > = address_signatures
. into_iter ( )
2021-06-18 06:34:46 -07:00
. filter ( | ( _ , signature ) | ! until_excluded_signatures . contains ( signature ) )
2020-08-15 09:42:17 -07:00
. collect ( ) ;
2020-07-27 11:42:49 -07:00
address_signatures . truncate ( limit ) ;
// Fill in the status information for each found transaction
2020-08-10 09:27:38 -07:00
let mut get_status_info_timer = Measure ::start ( " get_status_info_timer " ) ;
2020-07-27 11:42:49 -07:00
let mut infos = vec! [ ] ;
for ( slot , signature ) in address_signatures . into_iter ( ) {
2021-03-31 21:35:57 -07:00
let transaction_status =
self . get_transaction_status ( signature , & confirmed_unrooted_slots ) ? ;
2021-04-08 11:40:37 -07:00
let err = transaction_status . and_then ( | ( _slot , status ) | status . status . err ( ) ) ;
2021-09-02 23:28:52 -07:00
let memo = self . read_transaction_memos ( signature ) ? ;
2021-01-20 22:10:35 -08:00
let block_time = self . get_block_time ( slot ) ? ;
2020-07-27 11:42:49 -07:00
infos . push ( ConfirmedTransactionStatusWithSignature {
signature ,
slot ,
err ,
2021-09-02 23:28:52 -07:00
memo ,
2021-01-20 22:10:35 -08:00
block_time ,
2020-07-27 11:42:49 -07:00
} ) ;
}
2020-08-10 09:27:38 -07:00
get_status_info_timer . stop ( ) ;
datapoint_info! (
" blockstore-get-conf-sigs-for-addr-2 " ,
(
" get_before_slot_us " ,
get_before_slot_timer . as_us ( ) as i64 ,
i64
) ,
(
" get_initial_slot_us " ,
get_initial_slot_timer . as_us ( ) as i64 ,
i64
) ,
(
" starting_primary_index_iter_us " ,
starting_primary_index_iter_timer . as_us ( ) as i64 ,
i64
) ,
(
" next_primary_index_iter_us " ,
next_primary_index_iter_timer . as_us ( ) as i64 ,
i64
) ,
(
" get_status_info_us " ,
get_status_info_timer . as_us ( ) as i64 ,
i64
2020-08-15 09:42:17 -07:00
) ,
(
" get_until_slot_us " ,
get_until_slot_timer . as_us ( ) as i64 ,
i64
2020-08-10 09:27:38 -07:00
)
) ;
2020-07-27 11:42:49 -07:00
2021-12-29 09:25:10 -08:00
Ok ( SignatureInfosForAddress {
infos ,
found_before : true , // if `before` signature was not found, this method returned early
} )
2020-07-27 11:42:49 -07:00
}
2020-03-26 13:29:30 -07:00
pub fn read_rewards ( & self , index : Slot ) -> Result < Option < Rewards > > {
2020-10-15 17:04:10 -07:00
self . rewards_cf
. get_protobuf_or_bincode ::< Rewards > ( index )
. map ( | result | result . map ( | option | option . into ( ) ) )
2020-03-23 12:49:21 -07:00
}
2020-03-26 13:29:30 -07:00
pub fn write_rewards ( & self , index : Slot , rewards : Rewards ) -> Result < ( ) > {
2020-10-15 17:04:10 -07:00
let rewards = rewards . into ( ) ;
self . rewards_cf . put_protobuf ( index , & rewards )
2020-02-04 18:50:24 -08:00
}
2020-09-22 12:26:32 -07:00
pub fn get_recent_perf_samples ( & self , num : usize ) -> Result < Vec < ( Slot , PerfSample ) > > {
Ok ( self
. db
. iter ::< cf ::PerfSamples > ( IteratorMode ::End ) ?
. take ( num )
. map ( | ( slot , data ) | {
let perf_sample = deserialize ( & data ) . unwrap ( ) ;
( slot , perf_sample )
} )
. collect ( ) )
}
pub fn write_perf_sample ( & self , index : Slot , perf_sample : & PerfSample ) -> Result < ( ) > {
self . perf_samples_cf . put ( index , perf_sample )
}
2021-07-01 09:32:41 -07:00
pub fn read_program_costs ( & self ) -> Result < Vec < ( Pubkey , u64 ) > > {
Ok ( self
. db
. iter ::< cf ::ProgramCosts > ( IteratorMode ::End ) ?
. map ( | ( pubkey , data ) | {
let program_cost : ProgramCost = deserialize ( & data ) . unwrap ( ) ;
( pubkey , program_cost . cost )
} )
. collect ( ) )
}
pub fn write_program_cost ( & self , key : & Pubkey , value : & u64 ) -> Result < ( ) > {
self . program_costs_cf
. put ( * key , & ProgramCost { cost : * value } )
}
pub fn delete_program_cost ( & self , key : & Pubkey ) -> Result < ( ) > {
self . program_costs_cf . delete ( * key )
}
2019-09-03 21:32:51 -07:00
/// Returns the entry vector for the slot starting with `shred_start_index`
2020-04-09 13:09:59 -07:00
pub fn get_slot_entries ( & self , slot : Slot , shred_start_index : u64 ) -> Result < Vec < Entry > > {
2020-04-09 20:10:51 -07:00
self . get_slot_entries_with_shred_info ( slot , shred_start_index , false )
2019-02-26 21:57:45 -08:00
. map ( | x | x . 0 )
2019-02-07 15:10:54 -08:00
}
2019-11-08 17:21:54 -08:00
/// Returns the entry vector for the slot starting with `shred_start_index`, the number of
/// shreds that comprise the entry vector, and whether the slot is full (consumed all shreds).
pub fn get_slot_entries_with_shred_info (
2019-08-20 17:16:06 -07:00
& self ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
2019-10-21 16:15:10 -07:00
start_index : u64 ,
2020-04-09 20:10:51 -07:00
allow_dead_slots : bool ,
2020-01-14 17:15:26 -08:00
) -> Result < ( Vec < Entry > , u64 , bool ) > {
2021-05-26 16:12:57 -07:00
let ( completed_ranges , slot_meta ) = self . get_completed_ranges ( slot , start_index ) ? ;
// Check if the slot is dead *after* fetching completed ranges to avoid a race
// where a slot is marked dead by another thread before the completed range query finishes.
// This should be sufficient because full slots will never be marked dead from another thread,
// this can only happen during entry processing during replay stage.
2020-04-09 20:10:51 -07:00
if self . is_dead ( slot ) & & ! allow_dead_slots {
2020-01-13 13:13:52 -08:00
return Err ( BlockstoreError ::DeadSlot ) ;
2021-05-26 16:12:57 -07:00
} else if completed_ranges . is_empty ( ) {
2019-11-08 17:21:54 -08:00
return Ok ( ( vec! [ ] , 0 , false ) ) ;
2019-10-21 16:15:10 -07:00
}
2021-05-26 16:12:57 -07:00
2020-06-02 18:49:31 -07:00
let slot_meta = slot_meta . unwrap ( ) ;
2019-10-21 16:15:10 -07:00
let num_shreds = completed_ranges
. last ( )
2019-11-08 17:21:54 -08:00
. map ( | ( _ , end_index ) | u64 ::from ( * end_index ) - start_index + 1 )
2020-01-14 17:15:26 -08:00
. unwrap_or ( 0 ) ;
2019-10-21 16:15:10 -07:00
2022-05-05 13:00:50 -07:00
let entries : Result < Vec < Vec < Entry > > > = PAR_THREAD_POOL . install ( | | {
completed_ranges
. par_iter ( )
. map ( | ( start_index , end_index ) | {
self . get_entries_in_data_block ( slot , * start_index , * end_index , Some ( & slot_meta ) )
} )
. collect ( )
2019-10-21 16:15:10 -07:00
} ) ;
2019-11-08 17:21:54 -08:00
let entries : Vec < Entry > = entries ? . into_iter ( ) . flatten ( ) . collect ( ) ;
Ok ( ( entries , num_shreds , slot_meta . is_full ( ) ) )
2019-10-21 16:15:10 -07:00
}
2020-06-02 18:49:31 -07:00
fn get_completed_ranges (
& self ,
slot : Slot ,
start_index : u64 ,
) -> Result < ( CompletedRanges , Option < SlotMeta > ) > {
2021-06-04 15:40:27 -07:00
let _lock = self . check_lowest_cleanup_slot ( slot ) ? ;
2020-06-02 18:49:31 -07:00
let slot_meta_cf = self . db . column ::< cf ::SlotMeta > ( ) ;
let slot_meta = slot_meta_cf . get ( slot ) ? ;
if slot_meta . is_none ( ) {
return Ok ( ( vec! [ ] , slot_meta ) ) ;
}
let slot_meta = slot_meta . unwrap ( ) ;
// Find all the ranges for the completed data blocks
let completed_ranges = Self ::get_completed_data_ranges (
start_index as u32 ,
2021-10-31 05:56:25 -07:00
& slot_meta . completed_data_indexes ,
2020-06-02 18:49:31 -07:00
slot_meta . consumed as u32 ,
) ;
Ok ( ( completed_ranges , Some ( slot_meta ) ) )
}
2019-10-21 16:15:10 -07:00
// Get the range of indexes [start_index, end_index] of every completed data block
fn get_completed_data_ranges (
2021-10-31 05:56:25 -07:00
start_index : u32 ,
completed_data_indexes : & BTreeSet < u32 > ,
2019-10-21 16:15:10 -07:00
consumed : u32 ,
2020-06-02 18:49:31 -07:00
) -> CompletedRanges {
2021-10-31 05:56:25 -07:00
// `consumed` is the next missing shred index, but shred `i` existing in
// completed_data_end_indexes implies it's not missing
assert! ( ! completed_data_indexes . contains ( & consumed ) ) ;
completed_data_indexes
. range ( start_index .. consumed )
. scan ( start_index , | begin , index | {
let out = ( * begin , * index ) ;
* begin = index + 1 ;
Some ( out )
} )
. collect ( )
2019-10-16 14:32:18 -07:00
}
2019-08-20 17:16:06 -07:00
2020-09-01 22:06:06 -07:00
pub fn get_entries_in_data_block (
2019-10-16 14:32:18 -07:00
& self ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
2019-10-21 16:15:10 -07:00
start_index : u32 ,
end_index : u32 ,
2020-09-01 22:06:06 -07:00
slot_meta : Option < & SlotMeta > ,
2019-10-21 16:15:10 -07:00
) -> Result < Vec < Entry > > {
2020-04-24 15:04:23 -07:00
let data_shred_cf = self . db . column ::< cf ::ShredData > ( ) ;
2019-10-21 16:15:10 -07:00
// Short circuit on first error
let data_shreds : Result < Vec < Shred > > = ( start_index ..= end_index )
. map ( | i | {
2020-04-24 15:04:23 -07:00
data_shred_cf
. get_bytes ( ( slot , u64 ::from ( i ) ) )
2019-10-21 16:15:10 -07:00
. and_then ( | serialized_shred | {
2020-09-01 22:06:06 -07:00
if serialized_shred . is_none ( ) {
if let Some ( slot_meta ) = slot_meta {
panic! (
" Shred with
slot : { } ,
index : { } ,
consumed : { } ,
completed_indexes : { :? }
must exist if shred index was included in a range : { } { } " ,
slot ,
i ,
slot_meta . consumed ,
slot_meta . completed_data_indexes ,
start_index ,
end_index
) ;
} else {
return Err ( BlockstoreError ::InvalidShredData ( Box ::new (
bincode ::ErrorKind ::Custom ( format! (
" Missing shred for slot {}, index {} " ,
slot , i
) ) ,
) ) ) ;
}
}
Shred ::new_from_serialized_shred ( serialized_shred . unwrap ( ) ) . map_err ( | err | {
2020-01-13 13:13:52 -08:00
BlockstoreError ::InvalidShredData ( Box ::new ( bincode ::ErrorKind ::Custom (
2019-11-05 18:40:00 -08:00
format! (
" Could not reconstruct shred from shred payload: {:?} " ,
err
) ,
2019-10-21 16:15:10 -07:00
) ) )
} )
} )
} )
. collect ( ) ;
let data_shreds = data_shreds ? ;
2020-11-16 21:30:38 -08:00
let last_shred = data_shreds . last ( ) . unwrap ( ) ;
assert! ( last_shred . data_complete ( ) | | last_shred . last_in_slot ( ) ) ;
2019-10-21 16:15:10 -07:00
2020-05-19 12:38:18 -07:00
let deshred_payload = Shredder ::deshred ( & data_shreds ) . map_err ( | e | {
BlockstoreError ::InvalidShredData ( Box ::new ( bincode ::ErrorKind ::Custom ( format! (
" Could not reconstruct data block from constituent shreds, error: {:?} " ,
e
) ) ) )
2019-10-21 16:15:10 -07:00
} ) ? ;
debug! ( " {:?} shreds in last FEC set " , data_shreds . len ( ) , ) ;
2021-02-03 06:42:34 -08:00
bincode ::deserialize ::< Vec < Entry > > ( & deshred_payload ) . map_err ( | e | {
BlockstoreError ::InvalidShredData ( Box ::new ( bincode ::ErrorKind ::Custom ( format! (
" could not reconstruct entries: {:?} " ,
e
) ) ) )
2019-10-21 16:15:10 -07:00
} )
2019-08-20 17:16:06 -07:00
}
2020-06-02 18:49:31 -07:00
fn get_any_valid_slot_entries ( & self , slot : Slot , start_index : u64 ) -> Vec < Entry > {
let ( completed_ranges , slot_meta ) = self
. get_completed_ranges ( slot , start_index )
. unwrap_or_default ( ) ;
if completed_ranges . is_empty ( ) {
return vec! [ ] ;
}
let slot_meta = slot_meta . unwrap ( ) ;
2022-05-05 13:00:50 -07:00
let entries : Vec < Vec < Entry > > = PAR_THREAD_POOL_ALL_CPUS . install ( | | {
completed_ranges
. par_iter ( )
. map ( | ( start_index , end_index ) | {
self . get_entries_in_data_block ( slot , * start_index , * end_index , Some ( & slot_meta ) )
2020-09-01 22:06:06 -07:00
. unwrap_or_default ( )
2022-05-05 13:00:50 -07:00
} )
. collect ( )
2020-06-02 18:49:31 -07:00
} ) ;
entries . into_iter ( ) . flatten ( ) . collect ( )
}
2019-03-05 14:18:29 -08:00
// Returns slots connecting to any element of the list `slots`.
pub fn get_slots_since ( & self , slots : & [ u64 ] ) -> Result < HashMap < u64 , Vec < u64 > > > {
2019-02-07 15:10:54 -08:00
// Return error if there was a database error during lookup of any of the
// slot indexes
2019-03-05 14:18:29 -08:00
let slot_metas : Result < Vec < Option < SlotMeta > > > =
slots . iter ( ) . map ( | slot | self . meta ( * slot ) ) . collect ( ) ;
2019-02-07 15:10:54 -08:00
2019-02-28 19:49:22 -08:00
let slot_metas = slot_metas ? ;
2019-03-05 14:18:29 -08:00
let result : HashMap < u64 , Vec < u64 > > = slots
2019-02-28 19:49:22 -08:00
. iter ( )
. zip ( slot_metas )
2021-05-25 13:43:47 -07:00
. filter_map ( | ( height , meta ) | meta . map ( | meta | ( * height , meta . next_slots . to_vec ( ) ) ) )
2019-02-07 15:10:54 -08:00
. collect ( ) ;
2019-02-28 19:49:22 -08:00
Ok ( result )
2019-02-07 15:10:54 -08:00
}
2019-01-08 15:53:44 -08:00
2019-11-02 00:38:30 -07:00
pub fn is_root ( & self , slot : Slot ) -> bool {
2020-08-01 08:44:32 -07:00
matches! ( self . db . get ::< cf ::Root > ( slot ) , Ok ( Some ( true ) ) )
2019-04-15 13:12:28 -07:00
}
2020-12-16 12:40:36 -08:00
/// Returns true if a slot is between the rooted slot bounds of the ledger, but has not itself
/// been rooted. This is either because the slot was skipped, or due to a gap in ledger data,
/// as when booting from a newer snapshot.
pub fn is_skipped ( & self , slot : Slot ) -> bool {
let lowest_root = self
. rooted_slot_iterator ( 0 )
. ok ( )
. and_then ( | mut iter | iter . next ( ) )
. unwrap_or_default ( ) ;
match self . db . get ::< cf ::Root > ( slot ) . ok ( ) . flatten ( ) {
Some ( _ ) = > false ,
None = > slot < self . max_root ( ) & & slot > lowest_root ,
}
}
2021-07-12 20:59:16 -07:00
pub fn insert_bank_hash ( & self , slot : Slot , frozen_hash : Hash , is_duplicate_confirmed : bool ) {
if let Some ( prev_value ) = self . bank_hash_cf . get ( slot ) . unwrap ( ) {
if prev_value . frozen_hash ( ) = = frozen_hash & & prev_value . is_duplicate_confirmed ( ) {
// Don't overwrite is_duplicate_confirmed == true with is_duplicate_confirmed == false,
// which may happen on startup when procesing from blockstore processor because the
// blocks may not reflect earlier observed gossip votes from before the restart.
return ;
}
}
let data = FrozenHashVersioned ::Current ( FrozenHashStatus {
frozen_hash ,
is_duplicate_confirmed ,
} ) ;
self . bank_hash_cf . put ( slot , & data ) . unwrap ( )
}
pub fn get_bank_hash ( & self , slot : Slot ) -> Option < Hash > {
self . bank_hash_cf
. get ( slot )
. unwrap ( )
. map ( | versioned | versioned . frozen_hash ( ) )
}
pub fn is_duplicate_confirmed ( & self , slot : Slot ) -> bool {
self . bank_hash_cf
. get ( slot )
. unwrap ( )
. map ( | versioned | versioned . is_duplicate_confirmed ( ) )
. unwrap_or ( false )
}
2022-05-24 12:03:28 -07:00
pub fn insert_optimistic_slot (
& self ,
slot : Slot ,
hash : & Hash ,
timestamp : UnixTimestamp ,
) -> Result < ( ) > {
let slot_data = OptimisticSlotMetaVersioned ::new ( * hash , timestamp ) ;
self . optimistic_slots_cf . put ( slot , & slot_data )
}
pub fn get_latest_optimistic_slots (
& self ,
num : usize ,
) -> Result < Vec < ( Slot , Hash , UnixTimestamp ) > > {
Ok ( self
. db
. iter ::< cf ::OptimisticSlots > ( IteratorMode ::End ) ?
. take ( num )
. map ( | ( slot , data ) | {
let meta : OptimisticSlotMetaVersioned = deserialize ( & data ) . unwrap ( ) ;
( slot , meta . hash ( ) , meta . timestamp ( ) )
} )
. collect ( ) )
}
2021-07-12 20:59:16 -07:00
pub fn set_duplicate_confirmed_slots_and_hashes (
& self ,
duplicate_confirmed_slot_hashes : impl Iterator < Item = ( Slot , Hash ) > ,
) -> Result < ( ) > {
let mut write_batch = self . db . batch ( ) ? ;
for ( slot , frozen_hash ) in duplicate_confirmed_slot_hashes {
let data = FrozenHashVersioned ::Current ( FrozenHashStatus {
frozen_hash ,
is_duplicate_confirmed : true ,
} ) ;
write_batch . put ::< cf ::BankHash > ( slot , & data ) ? ;
}
self . db . write ( write_batch ) ? ;
Ok ( ( ) )
}
2021-07-01 20:02:40 -07:00
pub fn set_roots < ' a > ( & self , rooted_slots : impl Iterator < Item = & ' a Slot > ) -> Result < ( ) > {
2019-10-24 10:30:53 -07:00
let mut write_batch = self . db . batch ( ) ? ;
2021-07-01 20:02:40 -07:00
let mut max_new_rooted_slot = 0 ;
2019-10-24 10:30:53 -07:00
for slot in rooted_slots {
2021-07-01 20:02:40 -07:00
max_new_rooted_slot = std ::cmp ::max ( max_new_rooted_slot , * slot ) ;
2019-10-24 10:30:53 -07:00
write_batch . put ::< cf ::Root > ( * slot , & true ) ? ;
2019-05-20 19:04:18 -07:00
}
2019-08-27 15:09:41 -07:00
2019-10-24 10:30:53 -07:00
self . db . write ( write_batch ) ? ;
2019-08-27 15:09:41 -07:00
let mut last_root = self . last_root . write ( ) . unwrap ( ) ;
if * last_root = = std ::u64 ::MAX {
* last_root = 0 ;
}
2021-07-01 20:02:40 -07:00
* last_root = cmp ::max ( max_new_rooted_slot , * last_root ) ;
2019-05-20 19:04:18 -07:00
Ok ( ( ) )
2019-05-03 14:46:02 -07:00
}
2019-11-02 00:38:30 -07:00
pub fn is_dead ( & self , slot : Slot ) -> bool {
2020-08-01 08:44:32 -07:00
matches! (
self . db
. get ::< cf ::DeadSlots > ( slot )
. expect ( " fetch from DeadSlots column family failed " ) ,
Some ( true )
)
2019-06-20 15:50:41 -07:00
}
2019-11-02 00:38:30 -07:00
pub fn set_dead_slot ( & self , slot : Slot ) -> Result < ( ) > {
2019-06-20 15:50:41 -07:00
self . dead_slots_cf . put ( slot , & true )
}
2021-10-19 20:23:16 -07:00
pub fn remove_dead_slot ( & self , slot : Slot ) -> Result < ( ) > {
self . dead_slots_cf . delete ( slot )
}
2020-12-09 23:14:31 -08:00
pub fn store_duplicate_if_not_existing (
& self ,
slot : Slot ,
shred1 : Vec < u8 > ,
shred2 : Vec < u8 > ,
) -> Result < ( ) > {
if ! self . has_duplicate_shreds_in_slot ( slot ) {
self . store_duplicate_slot ( slot , shred1 , shred2 )
} else {
Ok ( ( ) )
}
}
2020-01-13 17:21:39 -08:00
pub fn store_duplicate_slot ( & self , slot : Slot , shred1 : Vec < u8 > , shred2 : Vec < u8 > ) -> Result < ( ) > {
let duplicate_slot_proof = DuplicateSlotProof ::new ( shred1 , shred2 ) ;
self . duplicate_slots_cf . put ( slot , & duplicate_slot_proof )
}
pub fn get_duplicate_slot ( & self , slot : u64 ) -> Option < DuplicateSlotProof > {
self . duplicate_slots_cf
. get ( slot )
. expect ( " fetch from DuplicateSlots column family failed " )
}
2020-06-17 20:54:52 -07:00
// `new_shred` is assumed to have slot and index equal to the given slot and index.
2020-01-16 15:27:54 -08:00
// Returns the existing shred if `new_shred` is not equal to the existing shred at the
// given slot and index as this implies the leader generated two different shreds with
2020-01-13 17:21:39 -08:00
// the same slot and index
2022-04-28 16:42:37 -07:00
pub fn is_shred_duplicate ( & self , shred : ShredId , payload : Vec < u8 > ) -> Option < Vec < u8 > > {
2021-12-14 09:34:02 -08:00
let ( slot , index , shred_type ) = shred . unwrap ( ) ;
2021-11-18 06:58:56 -08:00
let existing_shred = match shred_type {
ShredType ::Data = > self . get_data_shred ( slot , index as u64 ) ,
ShredType ::Code = > self . get_coding_shred ( slot , index as u64 ) ,
}
. expect ( " fetch from DuplicateSlots column family failed " ) ? ;
2021-04-27 15:40:41 -07:00
let new_shred = Shred ::new_from_serialized_shred ( payload ) . unwrap ( ) ;
2022-04-25 05:43:22 -07:00
( existing_shred ! = * new_shred . payload ( ) ) . then ( | | existing_shred )
2020-01-13 17:21:39 -08:00
}
pub fn has_duplicate_shreds_in_slot ( & self , slot : Slot ) -> bool {
self . duplicate_slots_cf
. get ( slot )
. expect ( " fetch from DuplicateSlots column family failed " )
. is_some ( )
}
2020-12-13 17:26:34 -08:00
pub fn orphans_iterator ( & self , slot : Slot ) -> Result < impl Iterator < Item = u64 > + '_ > {
2020-03-04 18:10:30 -08:00
let orphans_iter = self
2019-10-18 08:18:36 -07:00
. db
2020-03-04 18:10:30 -08:00
. iter ::< cf ::Orphans > ( IteratorMode ::From ( slot , IteratorDirection ::Forward ) ) ? ;
Ok ( orphans_iter . map ( | ( slot , _ ) | slot ) )
2019-04-06 19:41:22 -07:00
}
2020-12-13 17:26:34 -08:00
pub fn dead_slots_iterator ( & self , slot : Slot ) -> Result < impl Iterator < Item = Slot > + '_ > {
2020-05-05 14:07:21 -07:00
let dead_slots_iterator = self
. db
. iter ::< cf ::DeadSlots > ( IteratorMode ::From ( slot , IteratorDirection ::Forward ) ) ? ;
Ok ( dead_slots_iterator . map ( | ( slot , _ ) | slot ) )
}
2021-04-02 21:48:44 -07:00
pub fn duplicate_slots_iterator ( & self , slot : Slot ) -> Result < impl Iterator < Item = Slot > + '_ > {
let duplicate_slots_iterator = self
. db
. iter ::< cf ::DuplicateSlots > ( IteratorMode ::From ( slot , IteratorDirection ::Forward ) ) ? ;
Ok ( duplicate_slots_iterator . map ( | ( slot , _ ) | slot ) )
}
2019-12-05 11:25:13 -08:00
pub fn last_root ( & self ) -> Slot {
2019-08-27 15:09:41 -07:00
* self . last_root . read ( ) . unwrap ( )
}
2019-12-05 11:25:13 -08:00
2020-01-13 13:13:52 -08:00
// find the first available slot in blockstore that has some data in it
2019-12-05 11:25:13 -08:00
pub fn lowest_slot ( & self ) -> Slot {
for ( slot , meta ) in self
. slot_meta_iterator ( 0 )
. expect ( " unable to iterate over meta " )
{
if slot > 0 & & meta . received > 0 {
return slot ;
}
}
2020-01-13 13:13:52 -08:00
// This means blockstore is empty, should never get here aside from right at boot.
2019-12-05 11:25:13 -08:00
self . last_root ( )
}
2019-12-06 19:32:45 -08:00
2022-04-27 23:36:19 -07:00
fn lowest_slot_with_genesis ( & self ) -> Slot {
for ( slot , meta ) in self
. slot_meta_iterator ( 0 )
. expect ( " unable to iterate over meta " )
{
if meta . received > 0 {
return slot ;
}
}
// This means blockstore is empty, should never get here aside from right at boot.
self . last_root ( )
}
2021-05-24 12:24:47 -07:00
pub fn lowest_cleanup_slot ( & self ) -> Slot {
* self . lowest_cleanup_slot . read ( ) . unwrap ( )
}
2019-12-12 11:55:30 -08:00
pub fn storage_size ( & self ) -> Result < u64 > {
2020-04-24 15:04:23 -07:00
self . db . storage_size ( )
2019-12-06 19:32:45 -08:00
}
2020-06-02 21:32:44 -07:00
2022-01-19 19:31:19 -08:00
/// Returns the total physical storage size contributed by all data shreds.
///
/// Note that the reported size does not include those recently inserted
/// shreds that are still in memory.
2022-03-05 16:13:03 -08:00
pub fn total_data_shred_storage_size ( & self ) -> Result < i64 > {
2022-01-19 19:31:19 -08:00
let shred_data_cf = self . db . column ::< cf ::ShredData > ( ) ;
2022-02-23 21:02:11 -08:00
shred_data_cf . get_int_property ( RocksProperties ::TOTAL_SST_FILES_SIZE )
2022-01-19 19:31:19 -08:00
}
/// Returns the total physical storage size contributed by all coding shreds.
///
/// Note that the reported size does not include those recently inserted
/// shreds that are still in memory.
2022-03-05 16:13:03 -08:00
pub fn total_coding_shred_storage_size ( & self ) -> Result < i64 > {
2022-01-19 19:31:19 -08:00
let shred_code_cf = self . db . column ::< cf ::ShredCode > ( ) ;
2022-02-23 21:02:11 -08:00
shred_code_cf . get_int_property ( RocksProperties ::TOTAL_SST_FILES_SIZE )
2022-01-19 19:31:19 -08:00
}
2022-04-29 18:05:39 -07:00
/// Returns whether the blockstore has primary (read and write) access
2020-06-02 21:32:44 -07:00
pub fn is_primary_access ( & self ) -> bool {
self . db . is_primary_access ( )
}
2021-05-24 12:24:47 -07:00
2022-03-17 12:43:57 -07:00
pub fn scan_and_fix_roots ( & self , exit : & AtomicBool ) -> Result < ( ) > {
2021-06-18 06:34:46 -07:00
let ancestor_iterator = AncestorIterator ::new ( self . last_root ( ) , self )
2021-05-24 12:24:47 -07:00
. take_while ( | & slot | slot > = self . lowest_cleanup_slot ( ) ) ;
let mut find_missing_roots = Measure ::start ( " find_missing_roots " ) ;
let mut roots_to_fix = vec! [ ] ;
for slot in ancestor_iterator . filter ( | slot | ! self . is_root ( * slot ) ) {
if exit . load ( Ordering ::Relaxed ) {
return Ok ( ( ) ) ;
}
roots_to_fix . push ( slot ) ;
}
find_missing_roots . stop ( ) ;
let mut fix_roots = Measure ::start ( " fix_roots " ) ;
if ! roots_to_fix . is_empty ( ) {
info! ( " {} slots to be rooted " , roots_to_fix . len ( ) ) ;
for chunk in roots_to_fix . chunks ( 100 ) {
if exit . load ( Ordering ::Relaxed ) {
return Ok ( ( ) ) ;
}
trace! ( " {:?} " , chunk ) ;
2021-07-01 20:02:40 -07:00
self . set_roots ( chunk . iter ( ) ) ? ;
2021-05-24 12:24:47 -07:00
}
} else {
debug! (
" No missing roots found in range {} to {} " ,
self . lowest_cleanup_slot ( ) ,
self . last_root ( )
) ;
}
fix_roots . stop ( ) ;
datapoint_info! (
" blockstore-scan_and_fix_roots " ,
(
" find_missing_roots_us " ,
find_missing_roots . as_us ( ) as i64 ,
i64
) ,
( " num_roots_to_fix " , roots_to_fix . len ( ) as i64 , i64 ) ,
( " fix_roots_us " , fix_roots . as_us ( ) as i64 , i64 ) ,
) ;
Ok ( ( ) )
}
2019-04-26 08:52:10 -07:00
}
2019-02-07 15:10:54 -08:00
2020-09-01 22:06:06 -07:00
// Update the `completed_data_indexes` with a new shred `new_shred_index`. If a
// data set is complete, return the range of shred indexes [start_index, end_index]
// for that completed data set.
fn update_completed_data_indexes (
is_last_in_data : bool ,
new_shred_index : u32 ,
received_data_shreds : & ShredIndex ,
2021-10-31 05:56:25 -07:00
// Shreds indices which are marked data complete.
completed_data_indexes : & mut BTreeSet < u32 > ,
2020-09-01 22:06:06 -07:00
) -> Vec < ( u32 , u32 ) > {
2021-10-31 05:56:25 -07:00
let start_shred_index = completed_data_indexes
. range ( .. new_shred_index )
. next_back ( )
. map ( | index | index + 1 )
. unwrap_or_default ( ) ;
2020-09-01 22:06:06 -07:00
// Consecutive entries i, k, j in this vector represent potential ranges [i, k),
// [k, j) that could be completed data ranges
2021-10-31 05:56:25 -07:00
let mut shred_indices = vec! [ start_shred_index ] ;
2020-09-01 22:06:06 -07:00
// `new_shred_index` is data complete, so need to insert here into the
// `completed_data_indexes`
if is_last_in_data {
2021-10-31 05:56:25 -07:00
completed_data_indexes . insert ( new_shred_index ) ;
shred_indices . push ( new_shred_index + 1 ) ;
}
if let Some ( index ) = completed_data_indexes . range ( new_shred_index + 1 .. ) . next ( ) {
shred_indices . push ( index + 1 ) ;
}
shred_indices
. windows ( 2 )
. filter ( | ix | {
let ( begin , end ) = ( ix [ 0 ] as u64 , ix [ 1 ] as u64 ) ;
let num_shreds = ( end - begin ) as usize ;
2021-12-16 11:17:32 -08:00
received_data_shreds . range ( begin .. end ) . count ( ) = = num_shreds
2021-10-31 05:56:25 -07:00
} )
. map ( | ix | ( ix [ 0 ] , ix [ 1 ] - 1 ) )
. collect ( )
2020-09-01 22:06:06 -07:00
}
2019-08-20 17:16:06 -07:00
fn update_slot_meta (
is_last_in_slot : bool ,
2019-10-21 16:15:10 -07:00
is_last_in_data : bool ,
2019-08-20 17:16:06 -07:00
slot_meta : & mut SlotMeta ,
2019-10-21 16:15:10 -07:00
index : u32 ,
2019-08-20 17:16:06 -07:00
new_consumed : u64 ,
2019-11-07 11:08:09 -08:00
reference_tick : u8 ,
2020-09-01 22:06:06 -07:00
received_data_shreds : & ShredIndex ,
) -> Vec < ( u32 , u32 ) > {
2019-11-07 11:08:09 -08:00
let maybe_first_insert = slot_meta . received = = 0 ;
2019-04-26 08:52:10 -07:00
// Index is zero-indexed, while the "received" height starts from 1,
2019-09-03 21:32:51 -07:00
// so received = index + 1 for the same shred.
2019-10-21 16:15:10 -07:00
slot_meta . received = cmp ::max ( ( u64 ::from ( index ) + 1 ) as u64 , slot_meta . received ) ;
2019-11-07 11:08:09 -08:00
if maybe_first_insert & & slot_meta . received > 0 {
// predict the timestamp of what would have been the first shred in this slot
let slot_time_elapsed = u64 ::from ( reference_tick ) * 1000 / DEFAULT_TICKS_PER_SECOND ;
slot_meta . first_shred_timestamp = timestamp ( ) - slot_time_elapsed ;
}
2019-04-26 08:52:10 -07:00
slot_meta . consumed = new_consumed ;
2021-12-11 06:47:20 -08:00
// If the last index in the slot hasn't been set before, then
// set it to this shred index
if is_last_in_slot & & slot_meta . last_index . is_none ( ) {
slot_meta . last_index = Some ( u64 ::from ( index ) ) ;
}
2020-09-01 22:06:06 -07:00
update_completed_data_indexes (
is_last_in_slot | | is_last_in_data ,
index ,
received_data_shreds ,
& mut slot_meta . completed_data_indexes ,
)
2019-08-20 17:16:06 -07:00
}
2019-09-04 17:14:42 -07:00
fn get_index_meta_entry < ' a > (
2019-08-20 17:16:06 -07:00
db : & Database ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
2019-10-30 16:48:59 -07:00
index_working_set : & ' a mut HashMap < u64 , IndexMetaWorkingSetEntry > ,
index_meta_time : & mut u64 ,
) -> & ' a mut IndexMetaWorkingSetEntry {
2019-09-04 17:14:42 -07:00
let index_cf = db . column ::< cf ::Index > ( ) ;
2019-10-30 16:48:59 -07:00
let mut total_start = Measure ::start ( " Total elapsed " ) ;
let res = index_working_set . entry ( slot ) . or_insert_with ( | | {
let newly_inserted_meta = index_cf
. get ( slot )
. unwrap ( )
. unwrap_or_else ( | | Index ::new ( slot ) ) ;
IndexMetaWorkingSetEntry {
index : newly_inserted_meta ,
did_insert_occur : false ,
}
} ) ;
total_start . stop ( ) ;
* index_meta_time + = total_start . as_us ( ) ;
res
2019-04-26 08:52:10 -07:00
}
2019-02-07 15:10:54 -08:00
2021-12-15 13:12:38 -08:00
/// Obtain the SlotMeta from the in-memory slot_meta_working_set or load
/// it from the database if it does not exist in slot_meta_working_set.
///
/// In case none of the above has the specified SlotMeta, a new one will
/// be created.
///
/// Note that this function will also update the parent slot of the specified
/// slot.
///
/// Arguments:
/// - `db`: the database
/// - `slot_meta_working_set`: a in-memory structure for storing the cached
/// SlotMeta.
/// - `slot`: the slot for loading its meta.
/// - `parent_slot`: the parent slot to be assigned to the specified slot meta
///
/// This function returns the matched `SlotMetaWorkingSetEntry`. If such entry
/// does not exist in the database, a new entry will be created.
2019-09-04 17:14:42 -07:00
fn get_slot_meta_entry < ' a > (
db : & Database ,
slot_meta_working_set : & ' a mut HashMap < u64 , SlotMetaWorkingSetEntry > ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
parent_slot : Slot ,
2019-10-31 14:03:41 -07:00
) -> & ' a mut SlotMetaWorkingSetEntry {
2019-09-04 17:14:42 -07:00
let meta_cf = db . column ::< cf ::SlotMeta > ( ) ;
2019-08-27 15:09:41 -07:00
2019-11-14 11:49:31 -08:00
// Check if we've already inserted the slot metadata for this shred's slot
2019-10-31 14:03:41 -07:00
slot_meta_working_set . entry ( slot ) . or_insert_with ( | | {
// Store a 2-tuple of the metadata (working copy, backup copy)
if let Some ( mut meta ) = meta_cf . get ( slot ) . expect ( " Expect database get to succeed " ) {
let backup = Some ( meta . clone ( ) ) ;
2021-12-14 10:57:11 -08:00
// If parent_slot == None, then this is one of the orphans inserted
2019-10-31 14:03:41 -07:00
// during the chaining process, see the function find_slot_meta_in_cached_state()
// for details. Slots that are orphans are missing a parent_slot, so we should
// fill in the parent now that we know it.
if is_orphan ( & meta ) {
2021-12-14 10:57:11 -08:00
meta . parent_slot = Some ( parent_slot ) ;
2019-09-04 17:14:42 -07:00
}
2019-10-31 14:03:41 -07:00
SlotMetaWorkingSetEntry ::new ( Rc ::new ( RefCell ::new ( meta ) ) , backup )
} else {
SlotMetaWorkingSetEntry ::new (
2021-12-14 10:57:11 -08:00
Rc ::new ( RefCell ::new ( SlotMeta ::new ( slot , Some ( parent_slot ) ) ) ) ,
2019-10-31 14:03:41 -07:00
None ,
)
}
} )
2019-04-26 08:52:10 -07:00
}
2019-11-17 19:17:15 -08:00
fn get_last_hash < ' a > ( iterator : impl Iterator < Item = & ' a Entry > + ' a ) -> Option < Hash > {
iterator . last ( ) . map ( | entry | entry . hash )
}
2019-11-02 00:38:30 -07:00
fn is_valid_write_to_slot_0 ( slot_to_write : u64 , parent_slot : Slot , last_root : u64 ) -> bool {
2019-08-27 15:09:41 -07:00
slot_to_write = = 0 & & last_root = = 0 & & parent_slot = = 0
}
2019-07-10 11:08:17 -07:00
fn send_signals (
2022-01-11 02:44:46 -08:00
new_shreds_signals : & [ Sender < bool > ] ,
completed_slots_senders : & [ Sender < Vec < u64 > > ] ,
2019-07-10 11:08:17 -07:00
should_signal : bool ,
newly_completed_slots : Vec < u64 > ,
2020-12-13 17:26:34 -08:00
) {
2019-07-10 11:08:17 -07:00
if should_signal {
2019-09-03 21:32:51 -07:00
for signal in new_shreds_signals {
2022-04-05 06:57:12 -07:00
match signal . try_send ( true ) {
Ok ( _ ) = > { }
Err ( TrySendError ::Full ( _ ) ) = > {
trace! ( " replay wake up signal channel is full. " )
}
Err ( TrySendError ::Disconnected ( _ ) ) = > {
trace! ( " replay wake up signal channel is disconnected. " )
}
}
2019-07-10 11:08:17 -07:00
}
}
if ! completed_slots_senders . is_empty ( ) & & ! newly_completed_slots . is_empty ( ) {
let mut slots : Vec < _ > = ( 0 .. completed_slots_senders . len ( ) - 1 )
. map ( | _ | newly_completed_slots . clone ( ) )
. collect ( ) ;
slots . push ( newly_completed_slots ) ;
for ( signal , slots ) in completed_slots_senders . iter ( ) . zip ( slots . into_iter ( ) ) {
let res = signal . try_send ( slots ) ;
if let Err ( TrySendError ::Full ( _ ) ) = res {
2019-09-07 12:48:45 -07:00
datapoint_error! (
2020-01-13 13:13:52 -08:00
" blockstore_error " ,
2019-09-07 12:48:45 -07:00
(
" error " ,
2022-01-21 16:01:22 -08:00
" Unable to send newly completed slot because channel is full " ,
2019-09-07 12:48:45 -07:00
String
) ,
2019-07-10 11:08:17 -07:00
) ;
}
}
}
}
2021-12-15 13:12:50 -08:00
/// For each slot in the slot_meta_working_set which has any change, include
/// corresponding updates to cf::SlotMeta via the specified `write_batch`.
/// The `write_batch` will later be atomically committed to the blockstore.
///
/// Arguments:
/// - `slot_meta_working_set`: a map that maintains slot-id to its `SlotMeta`
/// mapping.
/// - `completed_slot_senders`: the units which are responsible for sending
/// signals for completed slots.
/// - `write_batch`: the write batch which includes all the updates of the
/// the current write and ensures their atomicity.
///
/// On success, the function returns an Ok result with <should_signal,
/// newly_completed_slots> pair where:
/// - `should_signal`: a boolean flag indicating whether to send signal.
/// - `newly_completed_slots`: a subset of slot_meta_working_set which are
/// newly completed.
2019-09-04 17:14:42 -07:00
fn commit_slot_meta_working_set (
slot_meta_working_set : & HashMap < u64 , SlotMetaWorkingSetEntry > ,
2022-01-11 02:44:46 -08:00
completed_slots_senders : & [ Sender < Vec < u64 > > ] ,
2019-07-10 11:08:17 -07:00
write_batch : & mut WriteBatch ,
) -> Result < ( bool , Vec < u64 > ) > {
let mut should_signal = false ;
let mut newly_completed_slots = vec! [ ] ;
// Check if any metadata was changed, if so, insert the new version of the
// metadata into the write batch
2019-10-31 14:03:41 -07:00
for ( slot , slot_meta_entry ) in slot_meta_working_set . iter ( ) {
// Any slot that wasn't written to should have been filtered out by now.
assert! ( slot_meta_entry . did_insert_occur ) ;
let meta : & SlotMeta = & RefCell ::borrow ( & * slot_meta_entry . new_slot_meta ) ;
let meta_backup = & slot_meta_entry . old_slot_meta ;
2019-07-10 11:08:17 -07:00
if ! completed_slots_senders . is_empty ( ) & & is_newly_completed_slot ( meta , meta_backup ) {
newly_completed_slots . push ( * slot ) ;
}
// Check if the working copy of the metadata has changed
if Some ( meta ) ! = meta_backup . as_ref ( ) {
2021-06-18 06:34:46 -07:00
should_signal = should_signal | | slot_has_updates ( meta , meta_backup ) ;
write_batch . put ::< cf ::SlotMeta > ( * slot , meta ) ? ;
2019-07-10 11:08:17 -07:00
}
}
Ok ( ( should_signal , newly_completed_slots ) )
}
2022-01-28 12:07:09 -08:00
/// Returns the `SlotMeta` with the specified `slot_index`. The resulting
/// `SlotMeta` could be either from the cache or from the DB. Specifically,
/// the function:
///
/// 1) Finds the slot metadata in the cache of dirty slot metadata we've
/// previously touched, otherwise:
2022-02-10 19:43:44 -08:00
/// 2) Searches the database for that slot metadata. If still no luck, then:
2022-01-28 12:07:09 -08:00
/// 3) Create a dummy orphan slot in the database.
///
/// Also see [`find_slot_meta_in_cached_state`] and [`find_slot_meta_in_db_else_create`].
2019-04-26 08:52:10 -07:00
fn find_slot_meta_else_create < ' a > (
db : & Database ,
2019-09-04 17:14:42 -07:00
working_set : & ' a HashMap < u64 , SlotMetaWorkingSetEntry > ,
2019-04-26 08:52:10 -07:00
chained_slots : & ' a mut HashMap < u64 , Rc < RefCell < SlotMeta > > > ,
slot_index : u64 ,
) -> Result < Rc < RefCell < SlotMeta > > > {
2020-12-13 17:26:34 -08:00
let result = find_slot_meta_in_cached_state ( working_set , chained_slots , slot_index ) ;
2019-04-26 08:52:10 -07:00
if let Some ( slot ) = result {
Ok ( slot )
} else {
find_slot_meta_in_db_else_create ( db , slot_index , chained_slots )
2019-02-07 15:10:54 -08:00
}
2019-04-26 08:52:10 -07:00
}
2019-02-07 15:10:54 -08:00
2022-01-28 12:07:09 -08:00
/// A helper function to [`find_slot_meta_else_create`] that searches the
/// `SlotMeta` based on the specified `slot` in `db` and updates `insert_map`.
///
/// If the specified `db` does not contain a matched entry, then it will create
/// a dummy orphan slot in the database.
2020-12-13 17:26:34 -08:00
fn find_slot_meta_in_db_else_create (
2019-04-26 08:52:10 -07:00
db : & Database ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
2020-12-13 17:26:34 -08:00
insert_map : & mut HashMap < u64 , Rc < RefCell < SlotMeta > > > ,
2019-04-26 08:52:10 -07:00
) -> Result < Rc < RefCell < SlotMeta > > > {
2019-05-03 14:46:02 -07:00
if let Some ( slot_meta ) = db . column ::< cf ::SlotMeta > ( ) . get ( slot ) ? {
2019-04-26 08:52:10 -07:00
insert_map . insert ( slot , Rc ::new ( RefCell ::new ( slot_meta ) ) ) ;
} else {
// If this slot doesn't exist, make a orphan slot. This way we
2019-09-03 21:32:51 -07:00
// remember which slots chained to this one when we eventually get a real shred
2019-04-26 08:52:10 -07:00
// for this slot
2020-05-05 14:07:21 -07:00
insert_map . insert ( slot , Rc ::new ( RefCell ::new ( SlotMeta ::new_orphan ( slot ) ) ) ) ;
2019-04-25 00:04:49 -07:00
}
2021-08-23 06:56:03 -07:00
Ok ( insert_map . get ( & slot ) . unwrap ( ) . clone ( ) )
2019-04-26 08:52:10 -07:00
}
2019-04-25 00:04:49 -07:00
2022-01-28 12:07:09 -08:00
/// Returns the `SlotMeta` of the specified `slot` from the two cached states:
/// `working_set` and `chained_slots`. If both contain the `SlotMeta`, then
/// the latest one from the `working_set` will be returned.
2019-04-26 08:52:10 -07:00
fn find_slot_meta_in_cached_state < ' a > (
2019-09-04 17:14:42 -07:00
working_set : & ' a HashMap < u64 , SlotMetaWorkingSetEntry > ,
2019-04-26 08:52:10 -07:00
chained_slots : & ' a HashMap < u64 , Rc < RefCell < SlotMeta > > > ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
2020-12-13 17:26:34 -08:00
) -> Option < Rc < RefCell < SlotMeta > > > {
2019-10-31 14:03:41 -07:00
if let Some ( entry ) = working_set . get ( & slot ) {
2020-12-13 17:26:34 -08:00
Some ( entry . new_slot_meta . clone ( ) )
2019-04-26 08:52:10 -07:00
} else {
2021-04-08 11:40:37 -07:00
chained_slots . get ( & slot ) . cloned ( )
2019-03-29 16:07:24 -07:00
}
2019-04-26 08:52:10 -07:00
}
2019-03-29 16:07:24 -07:00
2021-12-21 22:36:24 -08:00
/// For each entry in `working_set` whose `did_insert_occur` is true, this
/// function handles its chaining effect by updating the SlotMeta of both
/// the slot and its parent slot to reflect the slot descends from the
/// parent slot. In addition, when a slot is newly connected, it also
/// checks whether any of its direct and indirect children slots are connected
/// or not.
///
/// This function may update column families [`cf::SlotMeta`] and
/// [`cf::Orphans`].
///
/// For more information about the chaining, check the previous discussion here:
/// https://github.com/solana-labs/solana/pull/2253
///
/// Arguments:
/// - `db`: the blockstore db that stores both shreds and their metadata.
/// - `write_batch`: the write batch which includes all the updates of the
/// the current write and ensures their atomicity.
/// - `working_set`: a slot-id to SlotMetaWorkingSetEntry map. This function
/// will remove all entries which insertion did not actually occur.
2019-04-26 08:52:10 -07:00
fn handle_chaining (
db : & Database ,
write_batch : & mut WriteBatch ,
2019-10-31 14:03:41 -07:00
working_set : & mut HashMap < u64 , SlotMetaWorkingSetEntry > ,
2019-04-26 08:52:10 -07:00
) -> Result < ( ) > {
2019-10-31 14:03:41 -07:00
// Handle chaining for all the SlotMetas that were inserted into
working_set . retain ( | _ , entry | entry . did_insert_occur ) ;
2019-04-26 08:52:10 -07:00
let mut new_chained_slots = HashMap ::new ( ) ;
2019-10-31 14:03:41 -07:00
let working_set_slots : Vec < _ > = working_set . keys ( ) . collect ( ) ;
2019-04-26 08:52:10 -07:00
for slot in working_set_slots {
2019-10-31 14:03:41 -07:00
handle_chaining_for_slot ( db , write_batch , working_set , & mut new_chained_slots , * slot ) ? ;
2019-02-07 15:10:54 -08:00
}
2019-04-26 08:52:10 -07:00
// Write all the newly changed slots in new_chained_slots to the write_batch
for ( slot , meta ) in new_chained_slots . iter ( ) {
let meta : & SlotMeta = & RefCell ::borrow ( & * meta ) ;
write_batch . put ::< cf ::SlotMeta > ( * slot , meta ) ? ;
2019-02-07 15:10:54 -08:00
}
2019-04-26 08:52:10 -07:00
Ok ( ( ) )
}
2019-02-07 15:10:54 -08:00
2021-12-21 22:36:24 -08:00
/// A helper function of handle_chaining which handles the chaining based
/// on the `SlotMetaWorkingSetEntry` of the specified `slot`. Specifically,
/// it handles the following two things:
///
/// 1. based on the `SlotMetaWorkingSetEntry` for `slot`, check if `slot`
/// did not previously have a parent slot but does now. If `slot` satisfies
/// this condition, update the Orphan property of both `slot` and its parent
/// slot based on their current orphan status. Specifically:
/// - updates the orphan property of slot to no longer be an orphan because
/// it has a parent.
/// - adds the parent to the orphan column family if the parent's parent is
/// currently unknown.
///
/// 2. if the `SlotMetaWorkingSetEntry` for `slot` indicates this slot
/// is newly connected to a parent slot, then this function will update
/// the is_connected property of all its direct and indirect children slots.
///
/// This function may update column family [`cf::Orphans`] and indirectly
/// update SlotMeta from its output parameter `new_chained_slots`.
///
/// Arguments:
/// `db`: the underlying db for blockstore
/// `write_batch`: the write batch which includes all the updates of the
/// the current write and ensures their atomicity.
/// `working_set`: the working set which include the specified `slot`
/// `new_chained_slots`: an output parameter which includes all the slots
/// which connectivity have been updated.
/// `slot`: the slot which we want to handle its chaining effect.
2019-04-26 08:52:10 -07:00
fn handle_chaining_for_slot (
db : & Database ,
write_batch : & mut WriteBatch ,
2019-09-04 17:14:42 -07:00
working_set : & HashMap < u64 , SlotMetaWorkingSetEntry > ,
2019-04-26 08:52:10 -07:00
new_chained_slots : & mut HashMap < u64 , Rc < RefCell < SlotMeta > > > ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
2019-04-26 08:52:10 -07:00
) -> Result < ( ) > {
2019-10-31 14:03:41 -07:00
let slot_meta_entry = working_set
2019-04-26 08:52:10 -07:00
. get ( & slot )
. expect ( " Slot must exist in the working_set hashmap " ) ;
2019-04-24 15:53:01 -07:00
2019-10-31 14:03:41 -07:00
let meta = & slot_meta_entry . new_slot_meta ;
let meta_backup = & slot_meta_entry . old_slot_meta ;
2019-04-26 08:52:10 -07:00
{
let mut meta_mut = meta . borrow_mut ( ) ;
2019-05-09 14:10:04 -07:00
let was_orphan_slot = meta_backup . is_some ( ) & & is_orphan ( meta_backup . as_ref ( ) . unwrap ( ) ) ;
2019-04-24 15:53:01 -07:00
2019-04-26 08:52:10 -07:00
// If:
// 1) This is a new slot
// 2) slot != 0
// then try to chain this slot to a previous slot
2021-12-14 10:57:11 -08:00
if slot ! = 0 & & meta_mut . parent_slot . is_some ( ) {
let prev_slot = meta_mut . parent_slot . unwrap ( ) ;
2019-04-24 15:53:01 -07:00
2019-04-26 08:52:10 -07:00
// Check if the slot represented by meta_mut is either a new slot or a orphan.
// In both cases we need to run the chaining logic b/c the parent on the slot was
// previously unknown.
2019-05-09 14:10:04 -07:00
if meta_backup . is_none ( ) | | was_orphan_slot {
2019-04-26 08:52:10 -07:00
let prev_slot_meta =
find_slot_meta_else_create ( db , working_set , new_chained_slots , prev_slot ) ? ;
2019-02-07 15:10:54 -08:00
2019-05-09 14:10:04 -07:00
// This is a newly inserted slot/orphan so run the chaining logic to link it to a
// newly discovered parent
2019-04-26 08:52:10 -07:00
chain_new_slot_to_prev_slot ( & mut prev_slot_meta . borrow_mut ( ) , slot , & mut meta_mut ) ;
2019-02-07 15:10:54 -08:00
2019-05-09 14:10:04 -07:00
// If the parent of `slot` is a newly inserted orphan, insert it into the orphans
// column family
2019-04-26 08:52:10 -07:00
if is_orphan ( & RefCell ::borrow ( & * prev_slot_meta ) ) {
write_batch . put ::< cf ::Orphans > ( prev_slot , & true ) ? ;
2019-02-12 16:06:23 -08:00
}
}
2019-04-26 08:52:10 -07:00
}
2019-05-09 14:10:04 -07:00
// At this point this slot has received a parent, so it's no longer an orphan
if was_orphan_slot {
2019-04-26 08:52:10 -07:00
write_batch . delete ::< cf ::Orphans > ( slot ) ? ;
}
2019-02-07 15:10:54 -08:00
}
2019-05-09 14:10:04 -07:00
// If this is a newly inserted slot, then we know the children of this slot were not previously
// connected to the trunk of the ledger. Thus if slot.is_connected is now true, we need to
// update all child slots with `is_connected` = true because these children are also now newly
2019-08-21 17:46:59 -07:00
// connected to trunk of the ledger
2019-04-26 08:52:10 -07:00
let should_propagate_is_connected =
is_newly_completed_slot ( & RefCell ::borrow ( & * meta ) , meta_backup )
& & RefCell ::borrow ( & * meta ) . is_connected ;
2019-04-11 14:14:57 -07:00
2019-04-26 08:52:10 -07:00
if should_propagate_is_connected {
// slot_function returns a boolean indicating whether to explore the children
// of the input slot
let slot_function = | slot : & mut SlotMeta | {
slot . is_connected = true ;
2019-04-18 21:56:43 -07:00
2019-04-26 08:52:10 -07:00
// We don't want to set the is_connected flag on the children of non-full
// slots
slot . is_full ( )
} ;
traverse_children_mut (
db ,
slot ,
2021-06-18 06:34:46 -07:00
meta ,
2019-04-26 08:52:10 -07:00
working_set ,
new_chained_slots ,
slot_function ,
) ? ;
}
Ok ( ( ) )
}
2021-12-21 22:36:24 -08:00
/// Traverse all the direct and indirect children slots and apply the specified
/// `slot_function`.
///
/// Arguments:
/// `db`: the blockstore db that stores shreds and their metadata.
/// `slot`: starting slot to traverse.
/// `slot_meta`: the SlotMeta of the above `slot`.
/// `working_set`: a slot-id to SlotMetaWorkingSetEntry map which is used
/// to traverse the graph.
2022-02-10 19:43:44 -08:00
/// `passed_visited_slots`: all the traversed slots which have passed the
2021-12-21 22:36:24 -08:00
/// slot_function. This may also include the input `slot`.
/// `slot_function`: a function which updates the SlotMeta of the visisted
/// slots and determine whether to further traverse the children slots of
/// a given slot.
2019-04-26 08:52:10 -07:00
fn traverse_children_mut < F > (
db : & Database ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
2019-11-19 20:15:37 -08:00
slot_meta : & Rc < RefCell < SlotMeta > > ,
2019-09-04 17:14:42 -07:00
working_set : & HashMap < u64 , SlotMetaWorkingSetEntry > ,
2021-12-21 22:36:24 -08:00
passed_visisted_slots : & mut HashMap < u64 , Rc < RefCell < SlotMeta > > > ,
2019-04-26 08:52:10 -07:00
slot_function : F ,
) -> Result < ( ) >
where
F : Fn ( & mut SlotMeta ) -> bool ,
{
2019-11-19 20:15:37 -08:00
let mut next_slots : Vec < ( u64 , Rc < RefCell < SlotMeta > > ) > = vec! [ ( slot , slot_meta . clone ( ) ) ] ;
2019-04-26 08:52:10 -07:00
while ! next_slots . is_empty ( ) {
let ( _ , current_slot ) = next_slots . pop ( ) . unwrap ( ) ;
// Check whether we should explore the children of this slot
if slot_function ( & mut current_slot . borrow_mut ( ) ) {
let current_slot = & RefCell ::borrow ( & * current_slot ) ;
for next_slot_index in current_slot . next_slots . iter ( ) {
let next_slot = find_slot_meta_else_create (
db ,
working_set ,
2021-12-21 22:36:24 -08:00
passed_visisted_slots ,
2019-04-26 08:52:10 -07:00
* next_slot_index ,
) ? ;
next_slots . push ( ( * next_slot_index , next_slot ) ) ;
2019-04-11 14:14:57 -07:00
}
}
2019-04-26 08:52:10 -07:00
}
2019-04-11 14:14:57 -07:00
2019-04-26 08:52:10 -07:00
Ok ( ( ) )
}
2019-04-11 14:14:57 -07:00
2019-04-26 08:52:10 -07:00
fn is_orphan ( meta : & SlotMeta ) -> bool {
// If we have no parent, then this is the head of a detached chain of
// slots
2021-12-14 10:57:11 -08:00
meta . parent_slot . is_none ( )
2019-04-26 08:52:10 -07:00
}
2019-04-18 21:56:43 -07:00
2019-04-26 08:52:10 -07:00
// 1) Chain current_slot to the previous slot defined by prev_slot_meta
// 2) Determine whether to set the is_connected flag
fn chain_new_slot_to_prev_slot (
prev_slot_meta : & mut SlotMeta ,
2019-11-02 00:38:30 -07:00
current_slot : Slot ,
2019-04-26 08:52:10 -07:00
current_slot_meta : & mut SlotMeta ,
) {
prev_slot_meta . next_slots . push ( current_slot ) ;
current_slot_meta . is_connected = prev_slot_meta . is_connected & & prev_slot_meta . is_full ( ) ;
}
2019-04-11 14:14:57 -07:00
2019-04-26 08:52:10 -07:00
fn is_newly_completed_slot ( slot_meta : & SlotMeta , backup_slot_meta : & Option < SlotMeta > ) -> bool {
slot_meta . is_full ( )
& & ( backup_slot_meta . is_none ( )
| | slot_meta . consumed ! = backup_slot_meta . as_ref ( ) . unwrap ( ) . consumed )
}
2019-04-11 14:14:57 -07:00
2019-04-26 08:52:10 -07:00
fn slot_has_updates ( slot_meta : & SlotMeta , slot_meta_backup : & Option < SlotMeta > ) -> bool {
// We should signal that there are updates if we extended the chain of consecutive blocks starting
// from block 0, which is true iff:
// 1) The block with index prev_block_index is itself part of the trunk of consecutive blocks
// starting from block 0,
slot_meta . is_connected & &
// AND either:
// 1) The slot didn't exist in the database before, and now we have a consecutive
// block for that slot
( ( slot_meta_backup . is_none ( ) & & slot_meta . consumed ! = 0 ) | |
// OR
// 2) The slot did exist, but now we have a new consecutive block for that slot
( slot_meta_backup . is_some ( ) & & slot_meta_backup . as_ref ( ) . unwrap ( ) . consumed ! = slot_meta . consumed ) )
2018-12-11 09:14:23 -08:00
}
2019-02-26 16:35:00 -08:00
// Creates a new ledger with slot 0 full of ticks (and only ticks).
//
2019-03-02 10:25:16 -08:00
// Returns the blockhash that can be used to append entries with.
2020-04-29 18:53:34 -07:00
pub fn create_new_ledger (
ledger_path : & Path ,
genesis_config : & GenesisConfig ,
max_genesis_archive_unpacked_size : u64 ,
2022-03-18 11:13:35 -07:00
column_options : LedgerColumnOptions ,
2020-04-29 18:53:34 -07:00
) -> Result < Hash > {
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( ledger_path ) ? ;
2021-06-18 06:34:46 -07:00
genesis_config . write ( ledger_path ) ? ;
2019-01-29 15:49:29 -08:00
2019-11-08 20:56:57 -08:00
// Fill slot 0 with ticks that link back to the genesis_config to bootstrap the ledger.
2022-03-18 11:13:35 -07:00
let blockstore_dir = Blockstore ::blockstore_directory ( & column_options . shred_storage_type ) ;
2022-01-07 12:11:43 -08:00
let blockstore = Blockstore ::open_with_options (
2022-01-03 20:30:45 -08:00
ledger_path ,
BlockstoreOptions {
2022-04-29 18:05:39 -07:00
access_type : AccessType ::Primary ,
2022-01-03 20:30:45 -08:00
recovery_mode : None ,
enforce_ulimit_nofile : false ,
2022-03-18 11:13:35 -07:00
column_options : column_options . clone ( ) ,
2022-01-03 20:30:45 -08:00
} ,
) ? ;
2019-11-08 20:56:57 -08:00
let ticks_per_slot = genesis_config . ticks_per_slot ;
let hashes_per_tick = genesis_config . poh_config . hashes_per_tick . unwrap_or ( 0 ) ;
let entries = create_ticks ( ticks_per_slot , hashes_per_tick , genesis_config . hash ( ) ) ;
2019-10-08 00:42:51 -07:00
let last_hash = entries . last ( ) . unwrap ( ) . hash ;
2020-02-24 09:18:08 -08:00
let version = solana_sdk ::shred_version ::version_from_hash ( & last_hash ) ;
2019-01-29 15:49:29 -08:00
2021-06-21 13:12:38 -07:00
let shredder = Shredder ::new ( 0 , 0 , 0 , version ) . unwrap ( ) ;
let shreds = shredder
2021-12-19 14:37:55 -08:00
. entries_to_shreds (
& Keypair ::new ( ) ,
& entries ,
true , // is_last_in_slot
0 , // next_shred_index
0 , // next_code_index
)
2021-06-21 13:12:38 -07:00
. 0 ;
2019-10-08 00:42:51 -07:00
assert! ( shreds . last ( ) . unwrap ( ) . last_in_slot ( ) ) ;
2019-08-20 17:16:06 -07:00
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds , None , false ) ? ;
2021-07-01 20:02:40 -07:00
blockstore . set_roots ( std ::iter ::once ( & 0 ) ) ? ;
2020-01-13 13:13:52 -08:00
// Explicitly close the blockstore before we create the archived genesis file
drop ( blockstore ) ;
2019-11-21 09:57:27 -08:00
2021-05-24 07:45:36 -07:00
let archive_path = ledger_path . join ( DEFAULT_GENESIS_ARCHIVE ) ;
2019-11-21 09:57:27 -08:00
let args = vec! [
" jcfhS " ,
archive_path . to_str ( ) . unwrap ( ) ,
" -C " ,
ledger_path . to_str ( ) . unwrap ( ) ,
2021-05-24 07:45:36 -07:00
DEFAULT_GENESIS_FILE ,
2022-03-02 18:30:22 -08:00
blockstore_dir ,
2019-11-21 09:57:27 -08:00
] ;
let output = std ::process ::Command ::new ( " tar " )
. args ( & args )
. output ( )
. unwrap ( ) ;
if ! output . status . success ( ) {
use std ::str ::from_utf8 ;
2020-01-10 15:37:22 -08:00
error! ( " tar stdout: {} " , from_utf8 ( & output . stdout ) . unwrap_or ( " ? " ) ) ;
error! ( " tar stderr: {} " , from_utf8 ( & output . stderr ) . unwrap_or ( " ? " ) ) ;
2019-11-21 09:57:27 -08:00
2021-02-18 23:42:09 -08:00
return Err ( BlockstoreError ::Io ( IoError ::new (
2019-11-21 09:57:27 -08:00
ErrorKind ::Other ,
format! (
" Error trying to generate snapshot archive: {} " ,
output . status
) ,
) ) ) ;
}
2019-08-20 17:16:06 -07:00
2020-04-29 18:53:34 -07:00
// ensure the genesis archive can be unpacked and it is under
2020-06-17 20:54:52 -07:00
// max_genesis_archive_unpacked_size, immediately after creating it above.
2020-04-29 18:53:34 -07:00
{
2020-06-30 19:38:59 -07:00
let temp_dir = tempfile ::tempdir_in ( ledger_path ) . unwrap ( ) ;
2020-04-29 18:53:34 -07:00
// unpack into a temp dir, while completely discarding the unpacked files
let unpack_check = unpack_genesis_archive (
& archive_path ,
2021-09-13 20:13:56 -07:00
temp_dir . path ( ) ,
2020-04-29 18:53:34 -07:00
max_genesis_archive_unpacked_size ,
) ;
if let Err ( unpack_err ) = unpack_check {
// stash problematic original archived genesis related files to
// examine them later and to prevent validator and ledger-tool from
// naively consuming them
let mut error_messages = String ::new ( ) ;
fs ::rename (
2021-05-24 07:45:36 -07:00
& ledger_path . join ( DEFAULT_GENESIS_ARCHIVE ) ,
ledger_path . join ( format! ( " {} .failed " , DEFAULT_GENESIS_ARCHIVE ) ) ,
2020-04-29 18:53:34 -07:00
)
. unwrap_or_else ( | e | {
2022-05-22 19:10:48 -07:00
let _ = write! (
& mut error_messages ,
2021-05-24 07:45:36 -07:00
" /failed to stash problematic {}: {} " ,
DEFAULT_GENESIS_ARCHIVE , e
2022-05-22 19:10:48 -07:00
) ;
2020-04-29 18:53:34 -07:00
} ) ;
fs ::rename (
2021-05-24 07:45:36 -07:00
& ledger_path . join ( DEFAULT_GENESIS_FILE ) ,
ledger_path . join ( format! ( " {} .failed " , DEFAULT_GENESIS_FILE ) ) ,
2020-04-29 18:53:34 -07:00
)
. unwrap_or_else ( | e | {
2022-05-22 19:10:48 -07:00
let _ = write! (
& mut error_messages ,
2021-05-24 07:45:36 -07:00
" /failed to stash problematic {}: {} " ,
DEFAULT_GENESIS_FILE , e
2022-05-22 19:10:48 -07:00
) ;
2020-04-29 18:53:34 -07:00
} ) ;
fs ::rename (
2022-03-02 18:30:22 -08:00
& ledger_path . join ( blockstore_dir ) ,
ledger_path . join ( format! ( " {} .failed " , blockstore_dir ) ) ,
2020-04-29 18:53:34 -07:00
)
. unwrap_or_else ( | e | {
2022-05-22 19:10:48 -07:00
let _ = write! (
& mut error_messages ,
" /failed to stash problematic {}: {} " ,
blockstore_dir , e
) ;
2020-04-29 18:53:34 -07:00
} ) ;
2021-02-18 23:42:09 -08:00
return Err ( BlockstoreError ::Io ( IoError ::new (
2020-04-29 18:53:34 -07:00
ErrorKind ::Other ,
format! (
" Error checking to unpack genesis archive: {}{} " ,
unpack_err , error_messages
) ,
) ) ) ;
}
}
2019-08-20 17:16:06 -07:00
Ok ( last_hash )
2019-01-24 12:04:04 -08:00
}
2019-02-26 17:11:26 -08:00
#[ macro_export ]
macro_rules ! tmp_ledger_name {
( ) = > {
& format! ( " {} - {} " , file! ( ) , line! ( ) )
} ;
}
#[ macro_export ]
macro_rules ! get_tmp_ledger_path {
( ) = > {
2020-01-13 13:13:52 -08:00
$crate ::blockstore ::get_ledger_path_from_name ( $crate ::tmp_ledger_name! ( ) )
2019-02-26 17:11:26 -08:00
} ;
}
2021-07-21 11:15:08 -07:00
#[ macro_export ]
macro_rules ! get_tmp_ledger_path_auto_delete {
( ) = > {
$crate ::blockstore ::get_ledger_path_from_name_auto_delete ( $crate ::tmp_ledger_name! ( ) )
} ;
}
pub fn get_ledger_path_from_name_auto_delete ( name : & str ) -> TempDir {
2021-09-10 05:33:08 -07:00
let mut path = get_ledger_path_from_name ( name ) ;
// path is a directory so .file_name() returns the last component of the path
let last = path . file_name ( ) . unwrap ( ) . to_str ( ) . unwrap ( ) . to_string ( ) ;
path . pop ( ) ;
2021-07-21 11:15:08 -07:00
fs ::create_dir_all ( & path ) . unwrap ( ) ;
2021-09-10 05:33:08 -07:00
Builder ::new ( )
. prefix ( & last )
. rand_bytes ( 0 )
. tempdir_in ( path )
. unwrap ( )
2021-07-21 11:15:08 -07:00
}
2019-11-13 07:14:09 -08:00
pub fn get_ledger_path_from_name ( name : & str ) -> PathBuf {
2019-01-09 14:33:44 -08:00
use std ::env ;
2019-07-17 14:27:58 -07:00
let out_dir = env ::var ( " FARF_DIR " ) . unwrap_or_else ( | _ | " farf " . to_string ( ) ) ;
2019-01-09 14:33:44 -08:00
let keypair = Keypair ::new ( ) ;
2019-07-30 15:53:41 -07:00
let path = [
out_dir ,
" ledger " . to_string ( ) ,
format! ( " {} - {} " , name , keypair . pubkey ( ) ) ,
]
. iter ( )
. collect ( ) ;
2019-01-09 14:33:44 -08:00
// whack any possible collision
2019-01-24 12:04:04 -08:00
let _ignored = fs ::remove_dir_all ( & path ) ;
2019-01-09 14:33:44 -08:00
path
}
2019-02-26 19:19:34 -08:00
#[ macro_export ]
macro_rules ! create_new_tmp_ledger {
2019-11-08 20:56:57 -08:00
( $genesis_config :expr ) = > {
2020-06-02 21:32:44 -07:00
$crate ::blockstore ::create_new_ledger_from_name (
$crate ::tmp_ledger_name! ( ) ,
$genesis_config ,
2022-03-18 11:13:35 -07:00
$crate ::blockstore_db ::LedgerColumnOptions ::default ( ) ,
2020-06-02 21:32:44 -07:00
)
2019-02-26 19:19:34 -08:00
} ;
}
2021-09-10 05:33:08 -07:00
#[ macro_export ]
macro_rules ! create_new_tmp_ledger_auto_delete {
( $genesis_config :expr ) = > {
$crate ::blockstore ::create_new_ledger_from_name_auto_delete (
$crate ::tmp_ledger_name! ( ) ,
$genesis_config ,
2022-03-18 11:13:35 -07:00
$crate ::blockstore_db ::LedgerColumnOptions ::default ( ) ,
2022-03-02 18:30:22 -08:00
)
} ;
}
#[ macro_export ]
macro_rules ! create_new_tmp_ledger_fifo_auto_delete {
( $genesis_config :expr ) = > {
$crate ::blockstore ::create_new_ledger_from_name_auto_delete (
$crate ::tmp_ledger_name! ( ) ,
$genesis_config ,
2022-03-18 11:13:35 -07:00
$crate ::blockstore_db ::LedgerColumnOptions {
2022-03-11 15:17:34 -08:00
shred_storage_type : $crate ::blockstore_db ::ShredStorageType ::RocksFifo (
$crate ::blockstore_db ::BlockstoreRocksFifoOptions ::default ( ) ,
) ,
2022-03-22 02:27:09 -07:00
.. $crate ::blockstore_db ::LedgerColumnOptions ::default ( )
2022-03-11 15:17:34 -08:00
} ,
2021-09-10 05:33:08 -07:00
)
} ;
}
2019-12-05 11:25:13 -08:00
pub fn verify_shred_slots ( slot : Slot , parent_slot : Slot , last_root : Slot ) -> bool {
2019-09-16 13:13:53 -07:00
if ! is_valid_write_to_slot_0 ( slot , parent_slot , last_root ) {
// Check that the parent_slot < slot
if parent_slot > = slot {
return false ;
}
2019-11-14 11:49:31 -08:00
// Ignore shreds that chain to slots before the last root
2019-09-16 13:13:53 -07:00
if parent_slot < last_root {
return false ;
}
// Above two checks guarantee that by this point, slot > last_root
}
true
}
2019-02-26 16:35:00 -08:00
// Same as `create_new_ledger()` but use a temporary ledger name based on the provided `name`
//
// Note: like `create_new_ledger` the returned ledger will have slot 0 full of ticks (and only
// ticks)
2020-06-02 21:32:44 -07:00
pub fn create_new_ledger_from_name (
name : & str ,
genesis_config : & GenesisConfig ,
2022-03-18 11:13:35 -07:00
column_options : LedgerColumnOptions ,
2020-06-02 21:32:44 -07:00
) -> ( PathBuf , Hash ) {
2022-03-14 22:07:16 -07:00
let ( ledger_path , blockhash ) =
2022-03-18 11:13:35 -07:00
create_new_ledger_from_name_auto_delete ( name , genesis_config , column_options ) ;
2021-09-10 05:33:08 -07:00
( ledger_path . into_path ( ) , blockhash )
}
// Same as `create_new_ledger()` but use a temporary ledger name based on the provided `name`
//
// Note: like `create_new_ledger` the returned ledger will have slot 0 full of ticks (and only
// ticks)
pub fn create_new_ledger_from_name_auto_delete (
name : & str ,
genesis_config : & GenesisConfig ,
2022-03-18 11:13:35 -07:00
column_options : LedgerColumnOptions ,
2021-09-10 05:33:08 -07:00
) -> ( TempDir , Hash ) {
let ledger_path = get_ledger_path_from_name_auto_delete ( name ) ;
2020-04-29 18:53:34 -07:00
let blockhash = create_new_ledger (
2021-09-10 05:33:08 -07:00
ledger_path . path ( ) ,
2020-04-29 18:53:34 -07:00
genesis_config ,
MAX_GENESIS_ARCHIVE_UNPACKED_SIZE ,
2022-03-18 11:13:35 -07:00
column_options ,
2020-04-29 18:53:34 -07:00
)
. unwrap ( ) ;
2019-03-02 10:25:16 -08:00
( ledger_path , blockhash )
2019-02-21 22:36:01 -08:00
}
2019-09-03 21:32:51 -07:00
pub fn entries_to_test_shreds (
2021-12-24 12:32:43 -08:00
entries : & [ Entry ] ,
2019-11-02 00:38:30 -07:00
slot : Slot ,
parent_slot : Slot ,
2019-09-03 21:32:51 -07:00
is_full_slot : bool ,
2019-11-18 18:05:02 -08:00
version : u16 ,
2019-09-18 16:24:30 -07:00
) -> Vec < Shred > {
2021-06-21 13:12:38 -07:00
Shredder ::new ( slot , parent_slot , 0 , version )
2021-04-21 05:47:50 -07:00
. unwrap ( )
2021-12-19 14:37:55 -08:00
. entries_to_shreds (
& Keypair ::new ( ) ,
2021-12-24 12:32:43 -08:00
entries ,
2021-12-19 14:37:55 -08:00
is_full_slot ,
0 , // next_shred_index,
0 , // next_code_index
)
2021-04-21 05:47:50 -07:00
. 0
2019-09-03 21:32:51 -07:00
}
2019-10-31 13:38:50 -07:00
// used for tests only
2019-10-18 09:28:51 -07:00
pub fn make_slot_entries (
2019-11-02 00:38:30 -07:00
slot : Slot ,
parent_slot : Slot ,
2019-10-18 09:28:51 -07:00
num_entries : u64 ,
) -> ( Vec < Shred > , Vec < Entry > ) {
2019-10-31 13:38:50 -07:00
let entries = create_ticks ( num_entries , 0 , Hash ::default ( ) ) ;
2021-12-24 12:32:43 -08:00
let shreds = entries_to_test_shreds ( & entries , slot , parent_slot , true , 0 ) ;
2019-10-18 09:28:51 -07:00
( shreds , entries )
}
2019-10-31 13:38:50 -07:00
// used for tests only
2019-10-18 09:28:51 -07:00
pub fn make_many_slot_entries (
2019-11-02 00:38:30 -07:00
start_slot : Slot ,
2019-10-18 09:28:51 -07:00
num_slots : u64 ,
entries_per_slot : u64 ,
) -> ( Vec < Shred > , Vec < Entry > ) {
let mut shreds = vec! [ ] ;
let mut entries = vec! [ ] ;
for slot in start_slot .. start_slot + num_slots {
let parent_slot = if slot = = 0 { 0 } else { slot - 1 } ;
let ( slot_shreds , slot_entries ) = make_slot_entries ( slot , parent_slot , entries_per_slot ) ;
shreds . extend ( slot_shreds ) ;
entries . extend ( slot_entries ) ;
}
( shreds , entries )
}
2022-03-10 13:31:43 -08:00
// test-only: check that all columns are either empty or start at `min_slot`
pub fn test_all_empty_or_min ( blockstore : & Blockstore , min_slot : Slot ) {
let condition_met = blockstore
. db
. iter ::< cf ::SlotMeta > ( IteratorMode ::Start )
. unwrap ( )
. next ( )
. map ( | ( slot , _ ) | slot > = min_slot )
. unwrap_or ( true )
& blockstore
. db
. iter ::< cf ::Root > ( IteratorMode ::Start )
. unwrap ( )
. next ( )
. map ( | ( slot , _ ) | slot > = min_slot )
. unwrap_or ( true )
& blockstore
. db
. iter ::< cf ::ShredData > ( IteratorMode ::Start )
. unwrap ( )
. next ( )
. map ( | ( ( slot , _ ) , _ ) | slot > = min_slot )
. unwrap_or ( true )
& blockstore
. db
. iter ::< cf ::ShredCode > ( IteratorMode ::Start )
. unwrap ( )
. next ( )
. map ( | ( ( slot , _ ) , _ ) | slot > = min_slot )
. unwrap_or ( true )
& blockstore
. db
. iter ::< cf ::DeadSlots > ( IteratorMode ::Start )
. unwrap ( )
. next ( )
. map ( | ( slot , _ ) | slot > = min_slot )
. unwrap_or ( true )
& blockstore
. db
. iter ::< cf ::DuplicateSlots > ( IteratorMode ::Start )
. unwrap ( )
. next ( )
. map ( | ( slot , _ ) | slot > = min_slot )
. unwrap_or ( true )
& blockstore
. db
. iter ::< cf ::ErasureMeta > ( IteratorMode ::Start )
. unwrap ( )
. next ( )
. map ( | ( ( slot , _ ) , _ ) | slot > = min_slot )
. unwrap_or ( true )
& blockstore
. db
. iter ::< cf ::Orphans > ( IteratorMode ::Start )
. unwrap ( )
. next ( )
. map ( | ( slot , _ ) | slot > = min_slot )
. unwrap_or ( true )
& blockstore
. db
. iter ::< cf ::Index > ( IteratorMode ::Start )
. unwrap ( )
. next ( )
. map ( | ( slot , _ ) | slot > = min_slot )
. unwrap_or ( true )
& blockstore
. db
. iter ::< cf ::TransactionStatus > ( IteratorMode ::Start )
. unwrap ( )
. next ( )
. map ( | ( ( primary_index , _ , slot ) , _ ) | {
slot > = min_slot | | ( primary_index = = 2 & & slot = = 0 )
} )
. unwrap_or ( true )
& blockstore
. db
. iter ::< cf ::AddressSignatures > ( IteratorMode ::Start )
. unwrap ( )
. next ( )
. map ( | ( ( primary_index , _ , slot , _ ) , _ ) | {
slot > = min_slot | | ( primary_index = = 2 & & slot = = 0 )
} )
. unwrap_or ( true )
& blockstore
. db
. iter ::< cf ::Rewards > ( IteratorMode ::Start )
. unwrap ( )
. next ( )
. map ( | ( slot , _ ) | slot > = min_slot )
. unwrap_or ( true ) ;
assert! ( condition_met ) ;
}
2021-12-13 21:34:43 -08:00
// used for tests only
// Create `num_shreds` shreds for [start_slot, start_slot + num_slot) slots
pub fn make_many_slot_shreds (
start_slot : u64 ,
num_slots : u64 ,
num_shreds_per_slot : u64 ,
) -> ( Vec < Shred > , Vec < Entry > ) {
// Use `None` as shred_size so the default (full) value is used
let num_entries = max_ticks_per_n_shreds ( num_shreds_per_slot , None ) ;
make_many_slot_entries ( start_slot , num_slots , num_entries )
}
2019-10-18 09:28:51 -07:00
// Create shreds for slots that have a parent-child relationship defined by the input `chain`
2019-10-31 13:38:50 -07:00
// used for tests only
2019-10-18 09:28:51 -07:00
pub fn make_chaining_slot_entries (
chain : & [ u64 ] ,
entries_per_slot : u64 ,
) -> Vec < ( Vec < Shred > , Vec < Entry > ) > {
let mut slots_shreds_and_entries = vec! [ ] ;
for ( i , slot ) in chain . iter ( ) . enumerate ( ) {
let parent_slot = {
if * slot = = 0 | | i = = 0 {
0
} else {
chain [ i - 1 ]
}
} ;
let result = make_slot_entries ( * slot , parent_slot , entries_per_slot ) ;
slots_shreds_and_entries . push ( result ) ;
}
slots_shreds_and_entries
}
2019-11-05 11:18:49 -08:00
#[ cfg(not(unix)) ]
2020-12-16 17:56:38 -08:00
fn adjust_ulimit_nofile ( _enforce_ulimit_nofile : bool ) -> Result < ( ) > {
2020-05-15 12:14:21 -07:00
Ok ( ( ) )
}
2019-11-05 11:18:49 -08:00
#[ cfg(unix) ]
2020-12-16 17:56:38 -08:00
fn adjust_ulimit_nofile ( enforce_ulimit_nofile : bool ) -> Result < ( ) > {
2019-11-05 11:18:49 -08:00
// Rocks DB likes to have many open files. The default open file descriptor limit is
// usually not enough
2021-02-03 10:26:23 -08:00
let desired_nofile = 500000 ;
2019-11-05 11:18:49 -08:00
fn get_nofile ( ) -> libc ::rlimit {
let mut nofile = libc ::rlimit {
rlim_cur : 0 ,
rlim_max : 0 ,
} ;
if unsafe { libc ::getrlimit ( libc ::RLIMIT_NOFILE , & mut nofile ) } ! = 0 {
warn! ( " getrlimit(RLIMIT_NOFILE) failed " ) ;
}
nofile
}
let mut nofile = get_nofile ( ) ;
if nofile . rlim_cur < desired_nofile {
nofile . rlim_cur = desired_nofile ;
if unsafe { libc ::setrlimit ( libc ::RLIMIT_NOFILE , & nofile ) } ! = 0 {
error! (
" Unable to increase the maximum open file descriptor limit to {} " ,
desired_nofile
) ;
if cfg! ( target_os = " macos " ) {
2020-08-20 15:15:05 -07:00
error! (
" On mac OS you may need to run |sudo launchctl limit maxfiles {} {}| first " ,
desired_nofile , desired_nofile ,
) ;
2019-11-05 11:18:49 -08:00
}
2020-12-16 17:56:38 -08:00
if enforce_ulimit_nofile {
return Err ( BlockstoreError ::UnableToSetOpenFileDescriptorLimit ) ;
}
2019-11-05 11:18:49 -08:00
}
nofile = get_nofile ( ) ;
}
info! ( " Maximum open file descriptors: {} " , nofile . rlim_cur ) ;
2020-05-15 12:14:21 -07:00
Ok ( ( ) )
2019-11-05 11:18:49 -08:00
}
2018-11-15 15:53:31 -08:00
#[ cfg(test) ]
2019-02-18 18:41:31 -08:00
pub mod tests {
2021-12-03 09:00:31 -08:00
use {
super ::* ,
crate ::{
2022-03-02 18:30:22 -08:00
blockstore_db ::BlockstoreRocksFifoOptions ,
2021-12-03 09:00:31 -08:00
genesis_utils ::{ create_genesis_config , GenesisConfigInfo } ,
leader_schedule ::{ FixedSchedule , LeaderSchedule } ,
2022-05-02 16:33:53 -07:00
shred ::{ max_ticks_per_n_shreds , ShredFlags } ,
2021-12-03 09:00:31 -08:00
} ,
assert_matches ::assert_matches ,
bincode ::serialize ,
2022-01-11 02:44:46 -08:00
crossbeam_channel ::unbounded ,
2021-12-03 09:00:31 -08:00
itertools ::Itertools ,
rand ::{ seq ::SliceRandom , thread_rng } ,
solana_account_decoder ::parse_token ::UiTokenAmount ,
solana_entry ::entry ::{ next_entry , next_entry_mut } ,
solana_runtime ::bank ::{ Bank , RewardType } ,
solana_sdk ::{
hash ::{ self , hash , Hash } ,
instruction ::CompiledInstruction ,
2022-01-13 23:24:41 -08:00
message ::v0 ::LoadedAddresses ,
2021-12-03 09:00:31 -08:00
packet ::PACKET_DATA_SIZE ,
pubkey ::Pubkey ,
signature ::Signature ,
transaction ::{ Transaction , TransactionError } ,
2022-03-22 15:17:05 -07:00
transaction_context ::TransactionReturnData ,
2021-12-03 09:00:31 -08:00
} ,
solana_storage_proto ::convert ::generated ,
solana_transaction_status ::{ InnerInstructions , Reward , Rewards , TransactionTokenBalance } ,
2022-01-11 02:44:46 -08:00
std ::{ thread ::Builder , time ::Duration } ,
2019-11-14 15:34:39 -08:00
} ;
// used for tests only
2020-06-02 18:49:31 -07:00
pub ( crate ) fn make_slot_entries_with_transactions ( num_entries : u64 ) -> Vec < Entry > {
2019-11-14 15:34:39 -08:00
let mut entries : Vec < Entry > = Vec ::new ( ) ;
2019-12-11 14:06:54 -08:00
for x in 0 .. num_entries {
2019-11-14 15:34:39 -08:00
let transaction = Transaction ::new_with_compiled_instructions (
& [ & Keypair ::new ( ) ] ,
2020-10-19 12:12:08 -07:00
& [ solana_sdk ::pubkey ::new_rand ( ) ] ,
2019-11-14 15:34:39 -08:00
Hash ::default ( ) ,
2020-10-19 12:12:08 -07:00
vec! [ solana_sdk ::pubkey ::new_rand ( ) ] ,
2019-11-14 15:34:39 -08:00
vec! [ CompiledInstruction ::new ( 1 , & ( ) , vec! [ 0 ] ) ] ,
) ;
entries . push ( next_entry_mut ( & mut Hash ::default ( ) , 0 , vec! [ transaction ] ) ) ;
2019-12-11 14:06:54 -08:00
let mut tick = create_ticks ( 1 , 0 , hash ( & serialize ( & x ) . unwrap ( ) ) ) ;
2019-11-14 15:34:39 -08:00
entries . append ( & mut tick ) ;
}
2019-12-09 00:13:36 -08:00
entries
2019-11-14 15:34:39 -08:00
}
2018-11-15 15:53:31 -08:00
2019-10-08 00:42:51 -07:00
#[ test ]
fn test_create_new_ledger ( ) {
2021-09-10 05:33:08 -07:00
solana_logger ::setup ( ) ;
2019-10-08 00:42:51 -07:00
let mint_total = 1_000_000_000_000 ;
2019-11-08 20:56:57 -08:00
let GenesisConfigInfo { genesis_config , .. } = create_genesis_config ( mint_total ) ;
2021-09-10 05:33:08 -07:00
let ( ledger_path , _blockhash ) = create_new_tmp_ledger_auto_delete! ( & genesis_config ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ; //FINDME
2019-10-08 00:42:51 -07:00
2019-11-08 20:56:57 -08:00
let ticks = create_ticks ( genesis_config . ticks_per_slot , 0 , genesis_config . hash ( ) ) ;
2021-09-10 05:33:08 -07:00
let entries = blockstore . get_slot_entries ( 0 , 0 ) . unwrap ( ) ;
2019-10-08 00:42:51 -07:00
assert_eq! ( ticks , entries ) ;
2022-03-02 18:30:22 -08:00
assert! ( Path ::new ( ledger_path . path ( ) )
. join ( Blockstore ::blockstore_directory (
& ShredStorageType ::RocksLevel ,
) )
. exists ( ) ) ;
}
#[ test ]
fn test_create_new_ledger_with_options_fifo ( ) {
solana_logger ::setup ( ) ;
let mint_total = 1_000_000_000_000 ;
let GenesisConfigInfo { genesis_config , .. } = create_genesis_config ( mint_total ) ;
let ( ledger_path , _blockhash ) = create_new_tmp_ledger_fifo_auto_delete! ( & genesis_config ) ;
let blockstore = Blockstore ::open_with_options (
ledger_path . path ( ) ,
BlockstoreOptions {
2022-03-18 11:13:35 -07:00
column_options : LedgerColumnOptions {
2022-03-11 15:17:34 -08:00
shred_storage_type : ShredStorageType ::RocksFifo (
BlockstoreRocksFifoOptions ::default ( ) ,
) ,
2022-03-22 02:27:09 -07:00
.. LedgerColumnOptions ::default ( )
2022-03-11 15:17:34 -08:00
} ,
2022-03-02 18:30:22 -08:00
.. BlockstoreOptions ::default ( )
} ,
)
. unwrap ( ) ;
let ticks = create_ticks ( genesis_config . ticks_per_slot , 0 , genesis_config . hash ( ) ) ;
let entries = blockstore . get_slot_entries ( 0 , 0 ) . unwrap ( ) ;
assert_eq! ( ticks , entries ) ;
assert! ( Path ::new ( ledger_path . path ( ) )
. join ( Blockstore ::blockstore_directory (
& ShredStorageType ::RocksFifo ( BlockstoreRocksFifoOptions ::default ( ) )
) )
. exists ( ) ) ;
}
#[ test ]
fn test_rocksdb_directory ( ) {
assert_eq! (
Blockstore ::blockstore_directory ( & ShredStorageType ::RocksLevel ) ,
BLOCKSTORE_DIRECTORY_ROCKS_LEVEL
) ;
assert_eq! (
Blockstore ::blockstore_directory ( & ShredStorageType ::RocksFifo (
BlockstoreRocksFifoOptions ::default ( )
) ) ,
BLOCKSTORE_DIRECTORY_ROCKS_FIFO
) ;
2019-10-08 00:42:51 -07:00
}
#[ test ]
fn test_insert_get_bytes ( ) {
// Create enough entries to ensure there are at least two shreds created
2020-05-19 12:38:18 -07:00
let num_entries = max_ticks_per_n_shreds ( 1 , None ) + 1 ;
2019-10-08 00:42:51 -07:00
assert! ( num_entries > 1 ) ;
let ( mut shreds , _ ) = make_slot_entries ( 0 , 0 , num_entries ) ;
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2019-10-08 00:42:51 -07:00
// Insert last shred, test we can retrieve it
let last_shred = shreds . pop ( ) . unwrap ( ) ;
assert! ( last_shred . index ( ) > 0 ) ;
2021-09-10 05:33:08 -07:00
blockstore
2019-11-14 00:32:07 -08:00
. insert_shreds ( vec! [ last_shred . clone ( ) ] , None , false )
2019-10-08 00:42:51 -07:00
. unwrap ( ) ;
2021-09-10 05:33:08 -07:00
let serialized_shred = blockstore
2020-04-24 15:04:23 -07:00
. data_shred_cf
. get_bytes ( ( 0 , last_shred . index ( ) as u64 ) )
2019-10-08 00:42:51 -07:00
. unwrap ( )
. unwrap ( ) ;
let deserialized_shred = Shred ::new_from_serialized_shred ( serialized_shred ) . unwrap ( ) ;
assert_eq! ( last_shred , deserialized_shred ) ;
}
2019-02-18 19:49:43 -08:00
#[ test ]
fn test_write_entries ( ) {
2019-02-26 17:11:26 -08:00
solana_logger ::setup ( ) ;
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
let ticks_per_slot = 10 ;
let num_slots = 10 ;
let mut ticks = vec! [ ] ;
//let mut shreds_per_slot = 0 as u64;
let mut shreds_per_slot = vec! [ ] ;
for i in 0 .. num_slots {
let mut new_ticks = create_ticks ( ticks_per_slot , 0 , Hash ::default ( ) ) ;
let num_shreds = blockstore
. write_entries (
i ,
0 ,
0 ,
ticks_per_slot ,
Some ( i . saturating_sub ( 1 ) ) ,
true ,
& Arc ::new ( Keypair ::new ( ) ) ,
new_ticks . clone ( ) ,
0 ,
)
. unwrap ( ) as u64 ;
shreds_per_slot . push ( num_shreds ) ;
ticks . append ( & mut new_ticks ) ;
}
for i in 0 .. num_slots {
let meta = blockstore . meta ( i ) . unwrap ( ) . unwrap ( ) ;
let num_shreds = shreds_per_slot [ i as usize ] ;
assert_eq! ( meta . consumed , num_shreds ) ;
assert_eq! ( meta . received , num_shreds ) ;
2021-12-11 06:47:20 -08:00
assert_eq! ( meta . last_index , Some ( num_shreds - 1 ) ) ;
2021-09-10 05:33:08 -07:00
if i = = num_slots - 1 {
assert! ( meta . next_slots . is_empty ( ) ) ;
} else {
assert_eq! ( meta . next_slots , vec! [ i + 1 ] ) ;
}
if i = = 0 {
2021-12-14 10:57:11 -08:00
assert_eq! ( meta . parent_slot , Some ( 0 ) ) ;
2021-09-10 05:33:08 -07:00
} else {
2021-12-14 10:57:11 -08:00
assert_eq! ( meta . parent_slot , Some ( i - 1 ) ) ;
2019-08-20 17:16:06 -07:00
}
2019-02-18 19:49:43 -08:00
2021-09-10 05:33:08 -07:00
assert_eq! (
& ticks [ ( i * ticks_per_slot ) as usize .. ( ( i + 1 ) * ticks_per_slot ) as usize ] ,
& blockstore . get_slot_entries ( i , 0 ) . unwrap ( ) [ .. ]
) ;
}
2019-02-18 19:49:43 -08:00
2021-09-10 05:33:08 -07:00
/*
// Simulate writing to the end of a slot with existing ticks
blockstore
. write_entries (
num_slots ,
ticks_per_slot - 1 ,
ticks_per_slot - 2 ,
ticks_per_slot ,
& ticks [ 0 .. 2 ] ,
)
. unwrap ( ) ;
let meta = blockstore . meta ( num_slots ) . unwrap ( ) . unwrap ( ) ;
assert_eq! ( meta . consumed , 0 ) ;
// received shred was ticks_per_slot - 2, so received should be ticks_per_slot - 2 + 1
assert_eq! ( meta . received , ticks_per_slot - 1 ) ;
// last shred index ticks_per_slot - 2 because that's the shred that made tick_height == ticks_per_slot
// for the slot
assert_eq! ( meta . last_index , ticks_per_slot - 2 ) ;
assert_eq! ( meta . parent_slot , num_slots - 1 ) ;
assert_eq! ( meta . next_slots , vec! [ num_slots + 1 ] ) ;
assert_eq! (
& ticks [ 0 .. 1 ] ,
& blockstore
. get_slot_entries ( num_slots , ticks_per_slot - 2 )
. unwrap ( ) [ .. ]
) ;
2019-02-18 19:59:09 -08:00
2021-09-10 05:33:08 -07:00
// We wrote two entries, the second should spill into slot num_slots + 1
let meta = blockstore . meta ( num_slots + 1 ) . unwrap ( ) . unwrap ( ) ;
assert_eq! ( meta . consumed , 1 ) ;
assert_eq! ( meta . received , 1 ) ;
assert_eq! ( meta . last_index , std ::u64 ::MAX ) ;
assert_eq! ( meta . parent_slot , num_slots ) ;
assert! ( meta . next_slots . is_empty ( ) ) ;
assert_eq! (
& ticks [ 1 .. 2 ] ,
& blockstore . get_slot_entries ( num_slots + 1 , 0 ) . unwrap ( ) [ .. ]
) ;
* /
2019-02-18 19:49:43 -08:00
}
2020-04-24 15:04:23 -07:00
#[ test ]
fn test_put_get_simple ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2020-04-24 15:04:23 -07:00
// Test meta column family
2021-12-14 10:57:11 -08:00
let meta = SlotMeta ::new ( 0 , Some ( 1 ) ) ;
2021-09-10 05:33:08 -07:00
blockstore . meta_cf . put ( 0 , & meta ) . unwrap ( ) ;
let result = blockstore
2020-04-24 15:04:23 -07:00
. meta_cf
. get ( 0 )
. unwrap ( )
. expect ( " Expected meta object to exist " ) ;
assert_eq! ( result , meta ) ;
// Test erasure column family
let erasure = vec! [ 1 u8 ; 16 ] ;
let erasure_key = ( 0 , 0 ) ;
2021-09-10 05:33:08 -07:00
blockstore
2020-04-24 15:04:23 -07:00
. code_shred_cf
. put_bytes ( erasure_key , & erasure )
. unwrap ( ) ;
2021-09-10 05:33:08 -07:00
let result = blockstore
2020-04-24 15:04:23 -07:00
. code_shred_cf
. get_bytes ( erasure_key )
. unwrap ( )
. expect ( " Expected erasure object to exist " ) ;
assert_eq! ( result , erasure ) ;
// Test data column family
let data = vec! [ 2 u8 ; 16 ] ;
let data_key = ( 0 , 0 ) ;
2021-09-10 05:33:08 -07:00
blockstore . data_shred_cf . put_bytes ( data_key , & data ) . unwrap ( ) ;
2020-04-24 15:04:23 -07:00
2021-09-10 05:33:08 -07:00
let result = blockstore
2020-04-24 15:04:23 -07:00
. data_shred_cf
. get_bytes ( data_key )
. unwrap ( )
. expect ( " Expected data object to exist " ) ;
assert_eq! ( result , data ) ;
}
2018-11-15 15:53:31 -08:00
#[ test ]
2019-09-03 21:32:51 -07:00
fn test_read_shred_bytes ( ) {
2019-02-25 12:48:48 -08:00
let slot = 0 ;
2019-09-03 21:32:51 -07:00
let ( shreds , _ ) = make_slot_entries ( slot , 0 , 100 ) ;
let num_shreds = shreds . len ( ) as u64 ;
2022-04-25 05:43:22 -07:00
let shred_bufs : Vec < _ > = shreds . iter ( ) . map ( Shred ::payload ) . cloned ( ) . collect ( ) ;
2018-11-15 15:53:31 -08:00
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2018-11-15 15:53:31 -08:00
2019-09-03 21:32:51 -07:00
let mut buf = [ 0 ; 4096 ] ;
2021-09-10 05:33:08 -07:00
let ( _ , bytes ) = blockstore . get_data_shreds ( slot , 0 , 1 , & mut buf ) . unwrap ( ) ;
2019-09-03 21:32:51 -07:00
assert_eq! ( buf [ .. bytes ] , shred_bufs [ 0 ] [ .. bytes ] ) ;
2018-11-15 15:53:31 -08:00
2021-09-10 05:33:08 -07:00
let ( last_index , bytes2 ) = blockstore . get_data_shreds ( slot , 0 , 2 , & mut buf ) . unwrap ( ) ;
2019-09-03 21:32:51 -07:00
assert_eq! ( last_index , 1 ) ;
2018-11-15 15:53:31 -08:00
assert! ( bytes2 > bytes ) ;
{
2019-09-03 21:32:51 -07:00
let shred_data_1 = & buf [ .. bytes ] ;
assert_eq! ( shred_data_1 , & shred_bufs [ 0 ] [ .. bytes ] ) ;
2018-11-15 15:53:31 -08:00
2019-09-03 21:32:51 -07:00
let shred_data_2 = & buf [ bytes .. bytes2 ] ;
assert_eq! ( shred_data_2 , & shred_bufs [ 1 ] [ .. bytes2 - bytes ] ) ;
2018-11-15 15:53:31 -08:00
}
2019-09-03 21:32:51 -07:00
// buf size part-way into shred[1], should just return shred[0]
2018-11-15 15:53:31 -08:00
let mut buf = vec! [ 0 ; bytes + 1 ] ;
2021-09-10 05:33:08 -07:00
let ( last_index , bytes3 ) = blockstore . get_data_shreds ( slot , 0 , 2 , & mut buf ) . unwrap ( ) ;
2019-09-03 21:32:51 -07:00
assert_eq! ( last_index , 0 ) ;
2018-11-15 15:53:31 -08:00
assert_eq! ( bytes3 , bytes ) ;
let mut buf = vec! [ 0 ; bytes2 - 1 ] ;
2021-09-10 05:33:08 -07:00
let ( last_index , bytes4 ) = blockstore . get_data_shreds ( slot , 0 , 2 , & mut buf ) . unwrap ( ) ;
2019-09-03 21:32:51 -07:00
assert_eq! ( last_index , 0 ) ;
2018-11-15 15:53:31 -08:00
assert_eq! ( bytes4 , bytes ) ;
let mut buf = vec! [ 0 ; bytes * 2 ] ;
2021-09-10 05:33:08 -07:00
let ( last_index , bytes6 ) = blockstore
2019-09-17 15:11:29 -07:00
. get_data_shreds ( slot , num_shreds - 1 , num_shreds , & mut buf )
. unwrap ( ) ;
assert_eq! ( last_index , num_shreds - 1 ) ;
2018-11-15 15:53:31 -08:00
{
2019-09-03 21:32:51 -07:00
let shred_data = & buf [ .. bytes6 ] ;
2019-09-17 15:11:29 -07:00
assert_eq! ( shred_data , & shred_bufs [ ( num_shreds - 1 ) as usize ] [ .. bytes6 ] ) ;
2018-11-15 15:53:31 -08:00
}
// Read out of range
2021-09-10 05:33:08 -07:00
let ( last_index , bytes6 ) = blockstore
2019-09-03 21:32:51 -07:00
. get_data_shreds ( slot , num_shreds , num_shreds + 2 , & mut buf )
. unwrap ( ) ;
assert_eq! ( last_index , 0 ) ;
assert_eq! ( bytes6 , 0 ) ;
2018-11-15 15:53:31 -08:00
}
2020-04-08 18:47:16 -07:00
#[ test ]
fn test_shred_cleanup_check ( ) {
let slot = 1 ;
let ( shreds , _ ) = make_slot_entries ( slot , 0 , 100 ) ;
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2020-04-08 18:47:16 -07:00
let mut buf = [ 0 ; 4096 ] ;
2021-09-10 05:33:08 -07:00
assert! ( blockstore . get_data_shreds ( slot , 0 , 1 , & mut buf ) . is_ok ( ) ) ;
2020-04-08 18:47:16 -07:00
let max_purge_slot = 1 ;
2021-09-10 05:33:08 -07:00
blockstore
2020-06-02 18:49:31 -07:00
. run_purge ( 0 , max_purge_slot , PurgeType ::PrimaryIndex )
. unwrap ( ) ;
2021-09-10 05:33:08 -07:00
* blockstore . lowest_cleanup_slot . write ( ) . unwrap ( ) = max_purge_slot ;
2020-04-08 18:47:16 -07:00
let mut buf = [ 0 ; 4096 ] ;
2021-09-10 05:33:08 -07:00
assert! ( blockstore . get_data_shreds ( slot , 0 , 1 , & mut buf ) . is_err ( ) ) ;
2020-04-08 18:47:16 -07:00
}
2018-11-15 15:53:31 -08:00
#[ test ]
2019-08-20 17:16:06 -07:00
fn test_insert_data_shreds_basic ( ) {
2019-10-08 00:42:51 -07:00
// Create enough entries to ensure there are at least two shreds created
2020-05-19 12:38:18 -07:00
let num_entries = max_ticks_per_n_shreds ( 1 , None ) + 1 ;
2019-02-12 19:54:18 -08:00
assert! ( num_entries > 1 ) ;
2018-12-19 16:11:47 -08:00
2019-09-03 21:32:51 -07:00
let ( mut shreds , entries ) = make_slot_entries ( 0 , 0 , num_entries ) ;
2019-08-20 17:16:06 -07:00
let num_shreds = shreds . len ( ) as u64 ;
2018-11-15 15:53:31 -08:00
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2018-11-15 15:53:31 -08:00
2019-09-03 21:32:51 -07:00
// Insert last shred, we're missing the other shreds, so no consecutive
// shreds starting from slot 0, index 0 should exist.
2019-10-08 00:42:51 -07:00
assert! ( shreds . len ( ) > 1 ) ;
2019-08-20 17:16:06 -07:00
let last_shred = shreds . pop ( ) . unwrap ( ) ;
2021-09-10 05:33:08 -07:00
blockstore
. insert_shreds ( vec! [ last_shred ] , None , false )
. unwrap ( ) ;
assert! ( blockstore . get_slot_entries ( 0 , 0 ) . unwrap ( ) . is_empty ( ) ) ;
2019-02-04 15:33:43 -08:00
2021-09-10 05:33:08 -07:00
let meta = blockstore
2019-04-26 08:52:10 -07:00
. meta ( 0 )
2018-11-15 15:53:31 -08:00
. unwrap ( )
. expect ( " Expected new metadata object to be created " ) ;
2019-08-20 17:16:06 -07:00
assert! ( meta . consumed = = 0 & & meta . received = = num_shreds ) ;
2018-11-15 15:53:31 -08:00
2019-09-03 21:32:51 -07:00
// Insert the other shreds, check for consecutive returned entries
2021-09-10 05:33:08 -07:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
let result = blockstore . get_slot_entries ( 0 , 0 ) . unwrap ( ) ;
2018-11-15 15:53:31 -08:00
assert_eq! ( result , entries ) ;
2021-09-10 05:33:08 -07:00
let meta = blockstore
2019-04-26 08:52:10 -07:00
. meta ( 0 )
2018-11-15 15:53:31 -08:00
. unwrap ( )
. expect ( " Expected new metadata object to exist " ) ;
2019-08-20 17:16:06 -07:00
assert_eq! ( meta . consumed , num_shreds ) ;
assert_eq! ( meta . received , num_shreds ) ;
2021-12-14 10:57:11 -08:00
assert_eq! ( meta . parent_slot , Some ( 0 ) ) ;
2021-12-11 06:47:20 -08:00
assert_eq! ( meta . last_index , Some ( num_shreds - 1 ) ) ;
2019-02-07 15:10:54 -08:00
assert! ( meta . next_slots . is_empty ( ) ) ;
2019-03-20 11:19:37 -07:00
assert! ( meta . is_connected ) ;
2018-11-15 15:53:31 -08:00
}
#[ test ]
2019-08-20 17:16:06 -07:00
fn test_insert_data_shreds_reverse ( ) {
2019-10-21 16:15:10 -07:00
let num_shreds = 10 ;
2020-05-19 12:38:18 -07:00
let num_entries = max_ticks_per_n_shreds ( num_shreds , None ) ;
2019-09-03 21:32:51 -07:00
let ( mut shreds , entries ) = make_slot_entries ( 0 , 0 , num_entries ) ;
2019-08-20 17:16:06 -07:00
let num_shreds = shreds . len ( ) as u64 ;
2018-11-15 15:53:31 -08:00
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2018-11-15 15:53:31 -08:00
2019-09-03 21:32:51 -07:00
// Insert shreds in reverse, check for consecutive returned shreds
2019-08-20 17:16:06 -07:00
for i in ( 0 .. num_shreds ) . rev ( ) {
let shred = shreds . pop ( ) . unwrap ( ) ;
2021-09-10 05:33:08 -07:00
blockstore . insert_shreds ( vec! [ shred ] , None , false ) . unwrap ( ) ;
let result = blockstore . get_slot_entries ( 0 , 0 ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
2021-09-10 05:33:08 -07:00
let meta = blockstore
2019-04-26 08:52:10 -07:00
. meta ( 0 )
2018-11-15 15:53:31 -08:00
. unwrap ( )
. expect ( " Expected metadata object to exist " ) ;
2021-12-11 06:47:20 -08:00
assert_eq! ( meta . last_index , Some ( num_shreds - 1 ) ) ;
2018-11-15 15:53:31 -08:00
if i ! = 0 {
assert_eq! ( result . len ( ) , 0 ) ;
2019-08-20 17:16:06 -07:00
assert! ( meta . consumed = = 0 & & meta . received = = num_shreds as u64 ) ;
2018-11-15 15:53:31 -08:00
} else {
2021-12-14 10:57:11 -08:00
assert_eq! ( meta . parent_slot , Some ( 0 ) ) ;
2018-11-15 15:53:31 -08:00
assert_eq! ( result , entries ) ;
2019-08-20 17:16:06 -07:00
assert! ( meta . consumed = = num_shreds as u64 & & meta . received = = num_shreds as u64 ) ;
2018-11-15 15:53:31 -08:00
}
}
}
2018-11-22 01:35:19 -08:00
2018-12-12 15:58:29 -08:00
#[ test ]
2019-02-07 15:10:54 -08:00
fn test_insert_slots ( ) {
2021-09-10 05:33:08 -07:00
test_insert_data_shreds_slots ( false ) ;
test_insert_data_shreds_slots ( true ) ;
2018-12-12 15:58:29 -08:00
}
2019-09-03 21:32:51 -07:00
/*
#[ test ]
pub fn test_iteration_order ( ) {
let slot = 0 ;
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2018-11-22 01:35:19 -08:00
2021-09-10 05:33:08 -07:00
// Write entries
let num_entries = 8 ;
let entries = make_tiny_test_entries ( num_entries ) ;
let mut shreds = entries . to_single_entry_shreds ( ) ;
2018-11-22 01:35:19 -08:00
2021-09-10 05:33:08 -07:00
for ( i , b ) in shreds . iter_mut ( ) . enumerate ( ) {
b . set_index ( 1 < < ( i * 8 ) ) ;
b . set_slot ( 0 ) ;
}
2018-11-22 01:35:19 -08:00
2021-09-10 05:33:08 -07:00
blockstore
. write_shreds ( & shreds )
. expect ( " Expected successful write of shreds " ) ;
let mut db_iterator = blockstore
. db
. cursor ::< cf ::Data > ( )
. expect ( " Expected to be able to open database iterator " ) ;
db_iterator . seek ( ( slot , 1 ) ) ;
// Iterate through blockstore
for i in 0 .. num_entries {
assert! ( db_iterator . valid ( ) ) ;
let ( _ , current_index ) = db_iterator . key ( ) . expect ( " Expected a valid key " ) ;
assert_eq! ( current_index , ( 1 as u64 ) < < ( i * 8 ) ) ;
db_iterator . next ( ) ;
2018-11-22 01:35:19 -08:00
}
2021-09-10 05:33:08 -07:00
2018-11-22 01:35:19 -08:00
}
2019-09-03 21:32:51 -07:00
* /
2018-12-11 09:14:23 -08:00
2019-02-04 15:33:43 -08:00
#[ test ]
pub fn test_get_slot_entries1 ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
let entries = create_ticks ( 8 , 0 , Hash ::default ( ) ) ;
2021-12-24 12:32:43 -08:00
let shreds = entries_to_test_shreds ( & entries [ 0 .. 4 ] , 1 , 0 , false , 0 ) ;
2021-09-10 05:33:08 -07:00
blockstore
. insert_shreds ( shreds , None , false )
. expect ( " Expected successful write of shreds " ) ;
2019-08-21 20:07:51 -07:00
2021-12-24 12:32:43 -08:00
let mut shreds1 = entries_to_test_shreds ( & entries [ 4 .. ] , 1 , 0 , false , 0 ) ;
2021-09-10 05:33:08 -07:00
for ( i , b ) in shreds1 . iter_mut ( ) . enumerate ( ) {
b . set_index ( 8 + i as u32 ) ;
2019-02-04 15:33:43 -08:00
}
2021-09-10 05:33:08 -07:00
blockstore
. insert_shreds ( shreds1 , None , false )
. expect ( " Expected successful write of shreds " ) ;
assert_eq! (
blockstore . get_slot_entries ( 1 , 0 ) . unwrap ( ) [ 2 .. 4 ] ,
entries [ 2 .. 4 ] ,
) ;
2019-02-04 15:33:43 -08:00
}
2019-08-21 20:07:51 -07:00
// This test seems to be unnecessary with introduction of data shreds. There are no
2019-09-03 21:32:51 -07:00
// guarantees that a particular shred index contains a complete entry
2019-02-04 15:33:43 -08:00
#[ test ]
2019-08-20 17:16:06 -07:00
#[ ignore ]
2019-02-04 15:33:43 -08:00
pub fn test_get_slot_entries2 ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2019-02-04 15:33:43 -08:00
2021-09-10 05:33:08 -07:00
// Write entries
let num_slots = 5_ u64 ;
let mut index = 0 ;
for slot in 0 .. num_slots {
let entries = create_ticks ( slot + 1 , 0 , Hash ::default ( ) ) ;
let last_entry = entries . last ( ) . unwrap ( ) . clone ( ) ;
let mut shreds =
2021-12-24 12:32:43 -08:00
entries_to_test_shreds ( & entries , slot , slot . saturating_sub ( 1 ) , false , 0 ) ;
2021-09-10 05:33:08 -07:00
for b in shreds . iter_mut ( ) {
b . set_index ( index ) ;
b . set_slot ( slot as u64 ) ;
index + = 1 ;
2019-02-04 15:33:43 -08:00
}
2021-09-10 05:33:08 -07:00
blockstore
. insert_shreds ( shreds , None , false )
. expect ( " Expected successful write of shreds " ) ;
assert_eq! (
blockstore
. get_slot_entries ( slot , u64 ::from ( index - 1 ) )
. unwrap ( ) ,
vec! [ last_entry ] ,
) ;
2019-02-04 15:33:43 -08:00
}
}
2019-03-17 18:48:23 -07:00
#[ test ]
pub fn test_get_slot_entries3 ( ) {
2019-09-03 21:32:51 -07:00
// Test inserting/fetching shreds which contain multiple entries per shred
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
2019-03-17 18:48:23 -07:00
2021-09-10 05:33:08 -07:00
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
let num_slots = 5_ u64 ;
let shreds_per_slot = 5_ u64 ;
let entry_serialized_size =
bincode ::serialized_size ( & create_ticks ( 1 , 0 , Hash ::default ( ) ) ) . unwrap ( ) ;
let entries_per_slot = ( shreds_per_slot * PACKET_DATA_SIZE as u64 ) / entry_serialized_size ;
// Write entries
for slot in 0 .. num_slots {
let entries = create_ticks ( entries_per_slot , 0 , Hash ::default ( ) ) ;
2021-12-24 12:32:43 -08:00
let shreds = entries_to_test_shreds ( & entries , slot , slot . saturating_sub ( 1 ) , false , 0 ) ;
2021-09-10 05:33:08 -07:00
assert! ( shreds . len ( ) as u64 > = shreds_per_slot ) ;
blockstore
. insert_shreds ( shreds , None , false )
. expect ( " Expected successful write of shreds " ) ;
assert_eq! ( blockstore . get_slot_entries ( slot , 0 ) . unwrap ( ) , entries ) ;
2019-03-17 18:48:23 -07:00
}
}
2018-12-19 16:11:47 -08:00
#[ test ]
2019-08-20 17:16:06 -07:00
pub fn test_insert_data_shreds_consecutive ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
// Create enough entries to ensure there are at least two shreds created
let min_entries = max_ticks_per_n_shreds ( 1 , None ) + 1 ;
for i in 0 .. 4 {
let slot = i ;
let parent_slot = if i = = 0 { 0 } else { i - 1 } ;
// Write entries
let num_entries = min_entries * ( i + 1 ) ;
let ( shreds , original_entries ) = make_slot_entries ( slot , parent_slot , num_entries ) ;
2019-04-17 12:52:12 -07:00
2021-09-10 05:33:08 -07:00
let num_shreds = shreds . len ( ) as u64 ;
assert! ( num_shreds > 1 ) ;
let mut even_shreds = vec! [ ] ;
let mut odd_shreds = vec! [ ] ;
2019-04-17 12:52:12 -07:00
2021-09-10 05:33:08 -07:00
for ( i , shred ) in shreds . into_iter ( ) . enumerate ( ) {
if i % 2 = = 0 {
even_shreds . push ( shred ) ;
2019-04-18 21:56:43 -07:00
} else {
2021-09-10 05:33:08 -07:00
odd_shreds . push ( shred ) ;
2019-04-18 21:56:43 -07:00
}
2021-09-10 05:33:08 -07:00
}
2019-04-17 18:04:30 -07:00
2021-09-10 05:33:08 -07:00
blockstore . insert_shreds ( odd_shreds , None , false ) . unwrap ( ) ;
2019-04-17 18:04:30 -07:00
2021-09-10 05:33:08 -07:00
assert_eq! ( blockstore . get_slot_entries ( slot , 0 ) . unwrap ( ) , vec! [ ] ) ;
2019-04-17 18:04:30 -07:00
2021-09-10 05:33:08 -07:00
let meta = blockstore . meta ( slot ) . unwrap ( ) . unwrap ( ) ;
if num_shreds % 2 = = 0 {
2019-08-20 17:16:06 -07:00
assert_eq! ( meta . received , num_shreds ) ;
2021-09-10 05:33:08 -07:00
} else {
trace! ( " got here " ) ;
assert_eq! ( meta . received , num_shreds - 1 ) ;
}
assert_eq! ( meta . consumed , 0 ) ;
if num_shreds % 2 = = 0 {
2021-12-11 06:47:20 -08:00
assert_eq! ( meta . last_index , Some ( num_shreds - 1 ) ) ;
2021-09-10 05:33:08 -07:00
} else {
2021-12-11 06:47:20 -08:00
assert_eq! ( meta . last_index , None ) ;
2019-04-18 21:56:43 -07:00
}
2019-02-07 15:10:54 -08:00
2021-09-10 05:33:08 -07:00
blockstore . insert_shreds ( even_shreds , None , false ) . unwrap ( ) ;
assert_eq! (
blockstore . get_slot_entries ( slot , 0 ) . unwrap ( ) ,
original_entries ,
) ;
let meta = blockstore . meta ( slot ) . unwrap ( ) . unwrap ( ) ;
assert_eq! ( meta . received , num_shreds ) ;
assert_eq! ( meta . consumed , num_shreds ) ;
2021-12-14 10:57:11 -08:00
assert_eq! ( meta . parent_slot , Some ( parent_slot ) ) ;
2021-12-11 06:47:20 -08:00
assert_eq! ( meta . last_index , Some ( num_shreds - 1 ) ) ;
2021-09-10 05:33:08 -07:00
}
2018-12-20 12:12:04 -08:00
}
2020-09-01 22:06:06 -07:00
#[ test ]
fn test_data_set_completed_on_insert ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
2020-09-01 22:06:06 -07:00
let BlockstoreSignals { blockstore , .. } =
2022-01-03 20:30:45 -08:00
Blockstore ::open_with_signal ( ledger_path . path ( ) , BlockstoreOptions ::default ( ) ) . unwrap ( ) ;
2020-09-01 22:06:06 -07:00
// Create enough entries to fill 2 shreds, only the later one is data complete
let slot = 0 ;
let num_entries = max_ticks_per_n_shreds ( 1 , None ) + 1 ;
let entries = create_ticks ( num_entries , slot , Hash ::default ( ) ) ;
2021-12-24 12:32:43 -08:00
let shreds = entries_to_test_shreds ( & entries , slot , 0 , true , 0 ) ;
2020-09-01 22:06:06 -07:00
let num_shreds = shreds . len ( ) ;
assert! ( num_shreds > 1 ) ;
assert! ( blockstore
. insert_shreds ( shreds [ 1 .. ] . to_vec ( ) , None , false )
. unwrap ( )
2020-09-29 14:13:21 -07:00
. 0
2020-09-01 22:06:06 -07:00
. is_empty ( ) ) ;
assert_eq! (
blockstore
. insert_shreds ( vec! [ shreds [ 0 ] . clone ( ) ] , None , false )
2020-09-29 14:13:21 -07:00
. unwrap ( )
. 0 ,
2020-09-01 22:06:06 -07:00
vec! [ CompletedDataSetInfo {
slot ,
start_index : 0 ,
end_index : num_shreds as u32 - 1
} ]
) ;
// Inserting shreds again doesn't trigger notification
assert! ( blockstore
. insert_shreds ( shreds , None , false )
. unwrap ( )
2020-09-29 14:13:21 -07:00
. 0
2020-09-01 22:06:06 -07:00
. is_empty ( ) ) ;
}
2019-02-07 15:10:54 -08:00
#[ test ]
2019-09-03 21:32:51 -07:00
pub fn test_new_shreds_signal ( ) {
2021-09-10 05:33:08 -07:00
// Initialize blockstore
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
2020-09-01 22:06:06 -07:00
let BlockstoreSignals {
2021-09-10 05:33:08 -07:00
blockstore ,
2020-09-01 22:06:06 -07:00
ledger_signal_receiver : recvr ,
..
2022-01-03 20:30:45 -08:00
} = Blockstore ::open_with_signal ( ledger_path . path ( ) , BlockstoreOptions ::default ( ) ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
2019-09-03 21:32:51 -07:00
let entries_per_slot = 50 ;
2019-02-12 19:54:18 -08:00
// Create entries for slot 0
2019-09-03 21:32:51 -07:00
let ( mut shreds , _ ) = make_slot_entries ( 0 , 0 , entries_per_slot ) ;
let shreds_per_slot = shreds . len ( ) as u64 ;
2019-02-07 15:10:54 -08:00
2019-09-03 21:32:51 -07:00
// Insert second shred, but we're missing the first shred, so no consecutive
// shreds starting from slot 0, index 0 should exist.
2021-09-10 05:33:08 -07:00
blockstore
2019-11-14 00:32:07 -08:00
. insert_shreds ( vec! [ shreds . remove ( 1 ) ] , None , false )
. unwrap ( ) ;
2019-02-07 15:10:54 -08:00
let timer = Duration ::new ( 1 , 0 ) ;
assert! ( recvr . recv_timeout ( timer ) . is_err ( ) ) ;
2019-09-03 21:32:51 -07:00
// Insert first shred, now we've made a consecutive block
2021-09-10 05:33:08 -07:00
blockstore
2019-11-14 00:32:07 -08:00
. insert_shreds ( vec! [ shreds . remove ( 0 ) ] , None , false )
. unwrap ( ) ;
2019-02-07 15:10:54 -08:00
// Wait to get notified of update, should only be one update
assert! ( recvr . recv_timeout ( timer ) . is_ok ( ) ) ;
assert! ( recvr . try_recv ( ) . is_err ( ) ) ;
// Insert the rest of the ticks
2021-09-10 05:33:08 -07:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
// Wait to get notified of update, should only be one update
assert! ( recvr . recv_timeout ( timer ) . is_ok ( ) ) ;
assert! ( recvr . try_recv ( ) . is_err ( ) ) ;
// Create some other slots, and send batches of ticks for each slot such that each slot
2019-09-03 21:32:51 -07:00
// is missing the tick at shred index == slot index - 1. Thus, no consecutive blocks
2019-02-07 15:10:54 -08:00
// will be formed
2019-09-03 21:32:51 -07:00
let num_slots = shreds_per_slot ;
2019-09-17 18:22:46 -07:00
let mut shreds = vec! [ ] ;
2019-09-03 21:32:51 -07:00
let mut missing_shreds = vec! [ ] ;
2019-03-05 14:18:29 -08:00
for slot in 1 .. num_slots + 1 {
2019-09-03 21:32:51 -07:00
let ( mut slot_shreds , _ ) = make_slot_entries ( slot , slot - 1 , entries_per_slot ) ;
let missing_shred = slot_shreds . remove ( slot as usize - 1 ) ;
shreds . extend ( slot_shreds ) ;
missing_shreds . push ( missing_shred ) ;
2019-02-07 15:10:54 -08:00
}
// Should be no updates, since no new chains from block 0 were formed
2021-09-10 05:33:08 -07:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
assert! ( recvr . recv_timeout ( timer ) . is_err ( ) ) ;
2019-09-03 21:32:51 -07:00
// Insert a shred for each slot that doesn't make a consecutive block, we
2019-02-07 15:10:54 -08:00
// should get no updates
2019-09-03 21:32:51 -07:00
let shreds : Vec < _ > = ( 1 .. num_slots + 1 )
2019-03-05 14:18:29 -08:00
. flat_map ( | slot | {
2019-09-03 21:32:51 -07:00
let ( mut shred , _ ) = make_slot_entries ( slot , slot - 1 , 1 ) ;
shred [ 0 ] . set_index ( 2 * num_slots as u32 ) ;
shred
2019-02-07 15:10:54 -08:00
} )
. collect ( ) ;
2021-09-10 05:33:08 -07:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
assert! ( recvr . recv_timeout ( timer ) . is_err ( ) ) ;
// For slots 1..num_slots/2, fill in the holes in one batch insertion,
// so we should only get one signal
2019-09-03 21:32:51 -07:00
let missing_shreds2 = missing_shreds
. drain ( ( num_slots / 2 ) as usize .. )
. collect_vec ( ) ;
2021-09-10 05:33:08 -07:00
blockstore
. insert_shreds ( missing_shreds , None , false )
. unwrap ( ) ;
2019-02-07 15:10:54 -08:00
assert! ( recvr . recv_timeout ( timer ) . is_ok ( ) ) ;
assert! ( recvr . try_recv ( ) . is_err ( ) ) ;
// Fill in the holes for each of the remaining slots, we should get a single update
// for each
2021-09-10 05:33:08 -07:00
blockstore
. insert_shreds ( missing_shreds2 , None , false )
. unwrap ( ) ;
2019-02-07 15:10:54 -08:00
}
2019-05-09 14:10:04 -07:00
#[ test ]
2019-09-03 21:32:51 -07:00
pub fn test_completed_shreds_signal ( ) {
2021-09-10 05:33:08 -07:00
// Initialize blockstore
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
2020-09-01 22:06:06 -07:00
let BlockstoreSignals {
2021-09-10 05:33:08 -07:00
blockstore ,
2021-06-02 17:20:00 -07:00
completed_slots_receiver : recvr ,
2020-09-01 22:06:06 -07:00
..
2022-01-03 20:30:45 -08:00
} = Blockstore ::open_with_signal ( ledger_path . path ( ) , BlockstoreOptions ::default ( ) ) . unwrap ( ) ;
2019-05-09 14:10:04 -07:00
let entries_per_slot = 10 ;
2019-09-03 21:32:51 -07:00
// Create shreds for slot 0
let ( mut shreds , _ ) = make_slot_entries ( 0 , 0 , entries_per_slot ) ;
2019-05-09 14:10:04 -07:00
2019-09-03 21:32:51 -07:00
let shred0 = shreds . remove ( 0 ) ;
// Insert all but the first shred in the slot, should not be considered complete
2021-09-10 05:33:08 -07:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2019-05-09 14:10:04 -07:00
assert! ( recvr . try_recv ( ) . is_err ( ) ) ;
2019-09-03 21:32:51 -07:00
// Insert first shred, slot should now be considered complete
2021-09-10 05:33:08 -07:00
blockstore . insert_shreds ( vec! [ shred0 ] , None , false ) . unwrap ( ) ;
2019-05-09 14:10:04 -07:00
assert_eq! ( recvr . try_recv ( ) . unwrap ( ) , vec! [ 0 ] ) ;
}
#[ test ]
2019-09-03 21:32:51 -07:00
pub fn test_completed_shreds_signal_orphans ( ) {
2021-09-10 05:33:08 -07:00
// Initialize blockstore
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
2020-09-01 22:06:06 -07:00
let BlockstoreSignals {
2021-09-10 05:33:08 -07:00
blockstore ,
2021-06-02 17:20:00 -07:00
completed_slots_receiver : recvr ,
2020-09-01 22:06:06 -07:00
..
2022-01-03 20:30:45 -08:00
} = Blockstore ::open_with_signal ( ledger_path . path ( ) , BlockstoreOptions ::default ( ) ) . unwrap ( ) ;
2019-05-09 14:10:04 -07:00
let entries_per_slot = 10 ;
let slots = vec! [ 2 , 5 , 10 ] ;
2019-09-03 21:32:51 -07:00
let mut all_shreds = make_chaining_slot_entries ( & slots [ .. ] , entries_per_slot ) ;
2019-05-09 14:10:04 -07:00
2019-09-03 21:32:51 -07:00
// Get the shreds for slot 10, chaining to slot 5
let ( mut orphan_child , _ ) = all_shreds . remove ( 2 ) ;
2019-05-09 14:10:04 -07:00
2019-09-03 21:32:51 -07:00
// Get the shreds for slot 5 chaining to slot 2
let ( mut orphan_shreds , _ ) = all_shreds . remove ( 1 ) ;
2019-05-09 14:10:04 -07:00
2019-09-03 21:32:51 -07:00
// Insert all but the first shred in the slot, should not be considered complete
let orphan_child0 = orphan_child . remove ( 0 ) ;
2021-09-10 05:33:08 -07:00
blockstore . insert_shreds ( orphan_child , None , false ) . unwrap ( ) ;
2019-05-09 14:10:04 -07:00
assert! ( recvr . try_recv ( ) . is_err ( ) ) ;
2019-09-03 21:32:51 -07:00
// Insert first shred, slot should now be considered complete
2021-09-10 05:33:08 -07:00
blockstore
2019-11-14 00:32:07 -08:00
. insert_shreds ( vec! [ orphan_child0 ] , None , false )
. unwrap ( ) ;
2019-05-09 14:10:04 -07:00
assert_eq! ( recvr . try_recv ( ) . unwrap ( ) , vec! [ slots [ 2 ] ] ) ;
2019-09-03 21:32:51 -07:00
// Insert the shreds for the orphan_slot
let orphan_shred0 = orphan_shreds . remove ( 0 ) ;
2021-09-10 05:33:08 -07:00
blockstore
. insert_shreds ( orphan_shreds , None , false )
. unwrap ( ) ;
2019-05-09 14:10:04 -07:00
assert! ( recvr . try_recv ( ) . is_err ( ) ) ;
2019-09-03 21:32:51 -07:00
// Insert first shred, slot should now be considered complete
2021-09-10 05:33:08 -07:00
blockstore
2019-11-14 00:32:07 -08:00
. insert_shreds ( vec! [ orphan_shred0 ] , None , false )
. unwrap ( ) ;
2019-05-09 14:10:04 -07:00
assert_eq! ( recvr . try_recv ( ) . unwrap ( ) , vec! [ slots [ 1 ] ] ) ;
}
#[ test ]
2019-09-03 21:32:51 -07:00
pub fn test_completed_shreds_signal_many ( ) {
2021-09-10 05:33:08 -07:00
// Initialize blockstore
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
2020-09-01 22:06:06 -07:00
let BlockstoreSignals {
2021-09-10 05:33:08 -07:00
blockstore ,
2021-06-02 17:20:00 -07:00
completed_slots_receiver : recvr ,
2020-09-01 22:06:06 -07:00
..
2022-01-03 20:30:45 -08:00
} = Blockstore ::open_with_signal ( ledger_path . path ( ) , BlockstoreOptions ::default ( ) ) . unwrap ( ) ;
2019-05-09 14:10:04 -07:00
let entries_per_slot = 10 ;
let mut slots = vec! [ 2 , 5 , 10 ] ;
2019-09-03 21:32:51 -07:00
let mut all_shreds = make_chaining_slot_entries ( & slots [ .. ] , entries_per_slot ) ;
2019-05-09 14:10:04 -07:00
let disconnected_slot = 4 ;
2019-09-03 21:32:51 -07:00
let ( shreds0 , _ ) = all_shreds . remove ( 0 ) ;
let ( shreds1 , _ ) = all_shreds . remove ( 0 ) ;
let ( shreds2 , _ ) = all_shreds . remove ( 0 ) ;
let ( shreds3 , _ ) = make_slot_entries ( disconnected_slot , 1 , entries_per_slot ) ;
2019-05-09 14:10:04 -07:00
2019-09-03 21:32:51 -07:00
let mut all_shreds : Vec < _ > = vec! [ shreds0 , shreds1 , shreds2 , shreds3 ]
2019-05-09 14:10:04 -07:00
. into_iter ( )
. flatten ( )
. collect ( ) ;
2019-09-03 21:32:51 -07:00
all_shreds . shuffle ( & mut thread_rng ( ) ) ;
2021-09-10 05:33:08 -07:00
blockstore . insert_shreds ( all_shreds , None , false ) . unwrap ( ) ;
2019-05-09 14:10:04 -07:00
let mut result = recvr . try_recv ( ) . unwrap ( ) ;
2020-12-13 17:26:34 -08:00
result . sort_unstable ( ) ;
2019-05-09 14:10:04 -07:00
slots . push ( disconnected_slot ) ;
2020-12-13 17:26:34 -08:00
slots . sort_unstable ( ) ;
2019-05-09 14:10:04 -07:00
assert_eq! ( result , slots ) ;
}
2019-02-07 15:10:54 -08:00
#[ test ]
pub fn test_handle_chaining_basic ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2019-02-12 19:54:18 -08:00
2021-09-10 05:33:08 -07:00
let entries_per_slot = 5 ;
let num_slots = 3 ;
// Construct the shreds
let ( mut shreds , _ ) = make_many_slot_entries ( 0 , num_slots , entries_per_slot ) ;
let shreds_per_slot = shreds . len ( ) / num_slots as usize ;
// 1) Write to the first slot
let shreds1 = shreds
. drain ( shreds_per_slot .. 2 * shreds_per_slot )
. collect_vec ( ) ;
blockstore . insert_shreds ( shreds1 , None , false ) . unwrap ( ) ;
let s1 = blockstore . meta ( 1 ) . unwrap ( ) . unwrap ( ) ;
assert! ( s1 . next_slots . is_empty ( ) ) ;
// Slot 1 is not trunk because slot 0 hasn't been inserted yet
assert! ( ! s1 . is_connected ) ;
2021-12-14 10:57:11 -08:00
assert_eq! ( s1 . parent_slot , Some ( 0 ) ) ;
2021-12-11 06:47:20 -08:00
assert_eq! ( s1 . last_index , Some ( shreds_per_slot as u64 - 1 ) ) ;
2021-09-10 05:33:08 -07:00
// 2) Write to the second slot
let shreds2 = shreds
. drain ( shreds_per_slot .. 2 * shreds_per_slot )
. collect_vec ( ) ;
blockstore . insert_shreds ( shreds2 , None , false ) . unwrap ( ) ;
let s2 = blockstore . meta ( 2 ) . unwrap ( ) . unwrap ( ) ;
assert! ( s2 . next_slots . is_empty ( ) ) ;
// Slot 2 is not trunk because slot 0 hasn't been inserted yet
assert! ( ! s2 . is_connected ) ;
2021-12-14 10:57:11 -08:00
assert_eq! ( s2 . parent_slot , Some ( 1 ) ) ;
2021-12-11 06:47:20 -08:00
assert_eq! ( s2 . last_index , Some ( shreds_per_slot as u64 - 1 ) ) ;
2021-09-10 05:33:08 -07:00
// Check the first slot again, it should chain to the second slot,
// but still isn't part of the trunk
let s1 = blockstore . meta ( 1 ) . unwrap ( ) . unwrap ( ) ;
assert_eq! ( s1 . next_slots , vec! [ 2 ] ) ;
assert! ( ! s1 . is_connected ) ;
2021-12-14 10:57:11 -08:00
assert_eq! ( s1 . parent_slot , Some ( 0 ) ) ;
2021-12-11 06:47:20 -08:00
assert_eq! ( s1 . last_index , Some ( shreds_per_slot as u64 - 1 ) ) ;
2021-09-10 05:33:08 -07:00
// 3) Write to the zeroth slot, check that every slot
// is now part of the trunk
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
for i in 0 .. 3 {
let s = blockstore . meta ( i ) . unwrap ( ) . unwrap ( ) ;
// The last slot will not chain to any other slots
if i ! = 2 {
assert_eq! ( s . next_slots , vec! [ i + 1 ] ) ;
}
if i = = 0 {
2021-12-14 10:57:11 -08:00
assert_eq! ( s . parent_slot , Some ( 0 ) ) ;
2021-09-10 05:33:08 -07:00
} else {
2021-12-14 10:57:11 -08:00
assert_eq! ( s . parent_slot , Some ( i - 1 ) ) ;
2019-02-07 15:10:54 -08:00
}
2021-12-11 06:47:20 -08:00
assert_eq! ( s . last_index , Some ( shreds_per_slot as u64 - 1 ) ) ;
2021-09-10 05:33:08 -07:00
assert! ( s . is_connected ) ;
2019-02-07 15:10:54 -08:00
}
}
#[ test ]
pub fn test_handle_chaining_missing_slots ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
let num_slots = 30 ;
let entries_per_slot = 5 ;
// Separate every other slot into two separate vectors
let mut slots = vec! [ ] ;
let mut missing_slots = vec! [ ] ;
let mut shreds_per_slot = 2 ;
for slot in 0 .. num_slots {
let parent_slot = {
if slot = = 0 {
0
2019-02-07 15:10:54 -08:00
} else {
2021-09-10 05:33:08 -07:00
slot - 1
2019-02-07 15:10:54 -08:00
}
2021-09-10 05:33:08 -07:00
} ;
let ( slot_shreds , _ ) = make_slot_entries ( slot , parent_slot , entries_per_slot ) ;
shreds_per_slot = slot_shreds . len ( ) ;
2019-02-07 15:10:54 -08:00
2021-09-10 05:33:08 -07:00
if slot % 2 = = 1 {
slots . extend ( slot_shreds ) ;
} else {
missing_slots . extend ( slot_shreds ) ;
2019-02-07 15:10:54 -08:00
}
2021-09-10 05:33:08 -07:00
}
2019-02-07 15:10:54 -08:00
2021-09-10 05:33:08 -07:00
// Write the shreds for every other slot
blockstore . insert_shreds ( slots , None , false ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
2021-09-10 05:33:08 -07:00
// Check metadata
for i in 0 .. num_slots {
// If "i" is the index of a slot we just inserted, then next_slots should be empty
// for slot "i" because no slots chain to that slot, because slot i + 1 is missing.
// However, if it's a slot we haven't inserted, aka one of the gaps, then one of the
// slots we just inserted will chain to that gap, so next_slots for that orphan slot
// won't be empty, but the parent slot is unknown so should equal std::u64::MAX.
let s = blockstore . meta ( i as u64 ) . unwrap ( ) . unwrap ( ) ;
if i % 2 = = 0 {
assert_eq! ( s . next_slots , vec! [ i as u64 + 1 ] ) ;
2021-12-14 10:57:11 -08:00
assert_eq! ( s . parent_slot , None ) ;
2021-09-10 05:33:08 -07:00
} else {
assert! ( s . next_slots . is_empty ( ) ) ;
2021-12-14 10:57:11 -08:00
assert_eq! ( s . parent_slot , Some ( i - 1 ) ) ;
2021-09-10 05:33:08 -07:00
}
2019-02-12 19:54:18 -08:00
2021-09-10 05:33:08 -07:00
if i = = 0 {
2019-03-20 11:19:37 -07:00
assert! ( s . is_connected ) ;
2021-09-10 05:33:08 -07:00
} else {
assert! ( ! s . is_connected ) ;
2019-02-07 15:10:54 -08:00
}
}
2021-09-10 05:33:08 -07:00
// Write the shreds for the other half of the slots that we didn't insert earlier
blockstore
. insert_shreds ( missing_slots , None , false )
. unwrap ( ) ;
for i in 0 .. num_slots {
// Check that all the slots chain correctly once the missing slots
// have been filled
let s = blockstore . meta ( i as u64 ) . unwrap ( ) . unwrap ( ) ;
if i ! = num_slots - 1 {
assert_eq! ( s . next_slots , vec! [ i as u64 + 1 ] ) ;
} else {
assert! ( s . next_slots . is_empty ( ) ) ;
}
if i = = 0 {
2021-12-14 10:57:11 -08:00
assert_eq! ( s . parent_slot , Some ( 0 ) ) ;
2021-09-10 05:33:08 -07:00
} else {
2021-12-14 10:57:11 -08:00
assert_eq! ( s . parent_slot , Some ( i - 1 ) ) ;
2021-09-10 05:33:08 -07:00
}
2021-12-11 06:47:20 -08:00
assert_eq! ( s . last_index , Some ( shreds_per_slot as u64 - 1 ) ) ;
2021-09-10 05:33:08 -07:00
assert! ( s . is_connected ) ;
}
2019-02-07 15:10:54 -08:00
}
#[ test ]
2020-05-15 09:35:43 -07:00
#[ allow(clippy::cognitive_complexity) ]
2019-03-20 11:19:37 -07:00
pub fn test_forward_chaining_is_connected ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
2021-09-10 05:33:08 -07:00
let num_slots = 15 ;
// Create enough entries to ensure there are at least two shreds created
let entries_per_slot = max_ticks_per_n_shreds ( 1 , None ) + 1 ;
assert! ( entries_per_slot > 1 ) ;
2019-02-12 19:54:18 -08:00
2021-09-10 05:33:08 -07:00
let ( mut shreds , _ ) = make_many_slot_entries ( 0 , num_slots , entries_per_slot ) ;
let shreds_per_slot = shreds . len ( ) / num_slots as usize ;
assert! ( shreds_per_slot > 1 ) ;
// Write the shreds such that every 3rd slot has a gap in the beginning
let mut missing_shreds = vec! [ ] ;
for slot in 0 .. num_slots {
let mut shreds_for_slot = shreds . drain ( .. shreds_per_slot ) . collect_vec ( ) ;
if slot % 3 = = 0 {
let shred0 = shreds_for_slot . remove ( 0 ) ;
missing_shreds . push ( shred0 ) ;
2019-02-07 15:10:54 -08:00
}
2021-09-10 05:33:08 -07:00
blockstore
. insert_shreds ( shreds_for_slot , None , false )
. unwrap ( ) ;
}
2019-02-07 15:10:54 -08:00
2021-09-10 05:33:08 -07:00
// Check metadata
for i in 0 .. num_slots {
let s = blockstore . meta ( i as u64 ) . unwrap ( ) . unwrap ( ) ;
// The last slot will not chain to any other slots
if i as u64 ! = num_slots - 1 {
assert_eq! ( s . next_slots , vec! [ i as u64 + 1 ] ) ;
} else {
assert! ( s . next_slots . is_empty ( ) ) ;
}
2019-02-12 19:54:18 -08:00
2021-09-10 05:33:08 -07:00
if i = = 0 {
2021-12-14 10:57:11 -08:00
assert_eq! ( s . parent_slot , Some ( 0 ) ) ;
2021-09-10 05:33:08 -07:00
} else {
2021-12-14 10:57:11 -08:00
assert_eq! ( s . parent_slot , Some ( i - 1 ) ) ;
2021-09-10 05:33:08 -07:00
}
2019-02-07 15:10:54 -08:00
2021-12-11 06:47:20 -08:00
assert_eq! ( s . last_index , Some ( shreds_per_slot as u64 - 1 ) ) ;
2019-02-12 19:54:18 -08:00
2021-09-10 05:33:08 -07:00
// Other than slot 0, no slots should be part of the trunk
if i ! = 0 {
assert! ( ! s . is_connected ) ;
} else {
assert! ( s . is_connected ) ;
2019-02-07 15:10:54 -08:00
}
2021-09-10 05:33:08 -07:00
}
2019-02-07 15:10:54 -08:00
2021-09-10 05:33:08 -07:00
// Iteratively finish every 3rd slot, and check that all slots up to and including
// slot_index + 3 become part of the trunk
for slot_index in 0 .. num_slots {
if slot_index % 3 = = 0 {
let shred = missing_shreds . remove ( 0 ) ;
blockstore . insert_shreds ( vec! [ shred ] , None , false ) . unwrap ( ) ;
2019-02-12 19:54:18 -08:00
2021-09-10 05:33:08 -07:00
for i in 0 .. num_slots {
let s = blockstore . meta ( i as u64 ) . unwrap ( ) . unwrap ( ) ;
if i ! = num_slots - 1 {
assert_eq! ( s . next_slots , vec! [ i as u64 + 1 ] ) ;
} else {
assert! ( s . next_slots . is_empty ( ) ) ;
}
if i < = slot_index as u64 + 3 {
assert! ( s . is_connected ) ;
} else {
assert! ( ! s . is_connected ) ;
}
2019-02-12 19:54:18 -08:00
2021-09-10 05:33:08 -07:00
if i = = 0 {
2021-12-14 10:57:11 -08:00
assert_eq! ( s . parent_slot , Some ( 0 ) ) ;
2021-09-10 05:33:08 -07:00
} else {
2021-12-14 10:57:11 -08:00
assert_eq! ( s . parent_slot , Some ( i - 1 ) ) ;
2019-02-07 15:10:54 -08:00
}
2021-09-10 05:33:08 -07:00
2021-12-11 06:47:20 -08:00
assert_eq! ( s . last_index , Some ( shreds_per_slot as u64 - 1 ) ) ;
2019-02-07 15:10:54 -08:00
}
}
}
}
2019-09-03 21:32:51 -07:00
/*
#[ test ]
pub fn test_chaining_tree ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
let num_tree_levels = 6 ;
assert! ( num_tree_levels > 1 ) ;
let branching_factor : u64 = 4 ;
// Number of slots that will be in the tree
let num_slots = ( branching_factor . pow ( num_tree_levels ) - 1 ) / ( branching_factor - 1 ) ;
let erasure_config = ErasureConfig ::default ( ) ;
let entries_per_slot = erasure_config . num_data ( ) as u64 ;
assert! ( entries_per_slot > 1 ) ;
2019-02-07 15:10:54 -08:00
2021-09-10 05:33:08 -07:00
let ( mut shreds , _ ) = make_many_slot_entries ( 0 , num_slots , entries_per_slot ) ;
2019-09-03 21:32:51 -07:00
2021-09-10 05:33:08 -07:00
// Insert tree one slot at a time in a random order
let mut slots : Vec < _ > = ( 0 .. num_slots ) . collect ( ) ;
// Get shreds for the slot
slots . shuffle ( & mut thread_rng ( ) ) ;
for slot in slots {
// Get shreds for the slot "slot"
let slot_shreds = & mut shreds
[ ( slot * entries_per_slot ) as usize .. ( ( slot + 1 ) * entries_per_slot ) as usize ] ;
for shred in slot_shreds . iter_mut ( ) {
// Get the parent slot of the slot in the tree
2019-02-13 15:01:56 -08:00
let slot_parent = {
2019-03-05 14:18:29 -08:00
if slot = = 0 {
2019-02-13 15:01:56 -08:00
0
} else {
2019-03-05 14:18:29 -08:00
( slot - 1 ) / branching_factor
2019-02-13 15:01:56 -08:00
}
} ;
2021-09-10 05:33:08 -07:00
shred . set_parent ( slot_parent ) ;
}
2019-02-13 15:01:56 -08:00
2021-09-10 05:33:08 -07:00
let shared_shreds : Vec < _ > = slot_shreds
. iter ( )
. cloned ( )
. map ( | shred | Arc ::new ( RwLock ::new ( shred ) ) )
. collect ( ) ;
let mut coding_generator = CodingGenerator ::new_from_config ( & erasure_config ) ;
let coding_shreds = coding_generator . next ( & shared_shreds ) ;
assert_eq! ( coding_shreds . len ( ) , erasure_config . num_coding ( ) ) ;
let mut rng = thread_rng ( ) ;
// Randomly pick whether to insert erasure or coding shreds first
if rng . gen_bool ( 0.5 ) {
blockstore . write_shreds ( slot_shreds ) . unwrap ( ) ;
blockstore . put_shared_coding_shreds ( & coding_shreds ) . unwrap ( ) ;
} else {
blockstore . put_shared_coding_shreds ( & coding_shreds ) . unwrap ( ) ;
blockstore . write_shreds ( slot_shreds ) . unwrap ( ) ;
}
}
2019-02-13 15:01:56 -08:00
2021-09-10 05:33:08 -07:00
// Make sure everything chains correctly
let last_level =
( branching_factor . pow ( num_tree_levels - 1 ) - 1 ) / ( branching_factor - 1 ) ;
for slot in 0 .. num_slots {
let slot_meta = blockstore . meta ( slot ) . unwrap ( ) . unwrap ( ) ;
assert_eq! ( slot_meta . consumed , entries_per_slot ) ;
assert_eq! ( slot_meta . received , entries_per_slot ) ;
assert! ( slot_meta . is_connected ) ;
let slot_parent = {
if slot = = 0 {
0
2019-02-13 15:01:56 -08:00
} else {
2021-09-10 05:33:08 -07:00
( slot - 1 ) / branching_factor
2019-02-13 15:01:56 -08:00
}
2021-09-10 05:33:08 -07:00
} ;
2021-12-14 10:57:11 -08:00
assert_eq! ( slot_meta . parent_slot , Some ( slot_parent ) ) ;
2021-09-10 05:33:08 -07:00
let expected_children : HashSet < _ > = {
if slot > = last_level {
HashSet ::new ( )
} else {
let first_child_slot = min ( num_slots - 1 , slot * branching_factor + 1 ) ;
let last_child_slot = min ( num_slots - 1 , ( slot + 1 ) * branching_factor ) ;
( first_child_slot .. last_child_slot + 1 ) . collect ( )
}
} ;
2019-09-03 21:32:51 -07:00
2021-09-10 05:33:08 -07:00
let result : HashSet < _ > = slot_meta . next_slots . iter ( ) . cloned ( ) . collect ( ) ;
if expected_children . len ( ) ! = 0 {
assert_eq! ( slot_meta . next_slots . len ( ) , branching_factor as usize ) ;
} else {
assert_eq! ( slot_meta . next_slots . len ( ) , 0 ) ;
}
assert_eq! ( expected_children , result ) ;
2019-02-13 15:01:56 -08:00
}
2019-03-29 16:07:24 -07:00
2021-09-10 05:33:08 -07:00
// No orphan slots should exist
assert! ( blockstore . orphans_cf . is_empty ( ) . unwrap ( ) )
2019-02-13 15:01:56 -08:00
}
2019-09-03 21:32:51 -07:00
* /
2019-02-07 15:10:54 -08:00
#[ test ]
pub fn test_get_slots_since ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
// Slot doesn't exist
assert! ( blockstore . get_slots_since ( & [ 0 ] ) . unwrap ( ) . is_empty ( ) ) ;
2021-12-14 10:57:11 -08:00
let mut meta0 = SlotMeta ::new ( 0 , Some ( 0 ) ) ;
2021-09-10 05:33:08 -07:00
blockstore . meta_cf . put ( 0 , & meta0 ) . unwrap ( ) ;
// Slot exists, chains to nothing
let expected : HashMap < u64 , Vec < u64 > > = vec! [ ( 0 , vec! [ ] ) ] . into_iter ( ) . collect ( ) ;
assert_eq! ( blockstore . get_slots_since ( & [ 0 ] ) . unwrap ( ) , expected ) ;
meta0 . next_slots = vec! [ 1 , 2 ] ;
blockstore . meta_cf . put ( 0 , & meta0 ) . unwrap ( ) ;
// Slot exists, chains to some other slots
let expected : HashMap < u64 , Vec < u64 > > = vec! [ ( 0 , vec! [ 1 , 2 ] ) ] . into_iter ( ) . collect ( ) ;
assert_eq! ( blockstore . get_slots_since ( & [ 0 ] ) . unwrap ( ) , expected ) ;
assert_eq! ( blockstore . get_slots_since ( & [ 0 , 1 ] ) . unwrap ( ) , expected ) ;
2021-12-14 10:57:11 -08:00
let mut meta3 = SlotMeta ::new ( 3 , Some ( 1 ) ) ;
2021-09-10 05:33:08 -07:00
meta3 . next_slots = vec! [ 10 , 5 ] ;
blockstore . meta_cf . put ( 3 , & meta3 ) . unwrap ( ) ;
let expected : HashMap < u64 , Vec < u64 > > = vec! [ ( 0 , vec! [ 1 , 2 ] ) , ( 3 , vec! [ 10 , 5 ] ) ]
. into_iter ( )
. collect ( ) ;
assert_eq! ( blockstore . get_slots_since ( & [ 0 , 1 , 3 ] ) . unwrap ( ) , expected ) ;
2019-02-07 15:10:54 -08:00
}
2019-03-29 16:07:24 -07:00
#[ test ]
2019-04-06 19:41:22 -07:00
fn test_orphans ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2019-03-29 16:07:24 -07:00
2021-09-10 05:33:08 -07:00
// Create shreds and entries
let entries_per_slot = 1 ;
let ( mut shreds , _ ) = make_many_slot_entries ( 0 , 3 , entries_per_slot ) ;
let shreds_per_slot = shreds . len ( ) / 3 ;
2019-03-29 16:07:24 -07:00
2021-09-10 05:33:08 -07:00
// Write slot 2, which chains to slot 1. We're missing slot 0,
// so slot 1 is the orphan
let shreds_for_slot = shreds . drain ( ( shreds_per_slot * 2 ) .. ) . collect_vec ( ) ;
blockstore
. insert_shreds ( shreds_for_slot , None , false )
. unwrap ( ) ;
let meta = blockstore
. meta ( 1 )
. expect ( " Expect database get to succeed " )
. unwrap ( ) ;
assert! ( is_orphan ( & meta ) ) ;
assert_eq! (
blockstore . orphans_iterator ( 0 ) . unwrap ( ) . collect ::< Vec < _ > > ( ) ,
vec! [ 1 ]
) ;
2019-03-29 16:07:24 -07:00
2021-09-10 05:33:08 -07:00
// Write slot 1 which chains to slot 0, so now slot 0 is the
// orphan, and slot 1 is no longer the orphan.
let shreds_for_slot = shreds . drain ( shreds_per_slot .. ) . collect_vec ( ) ;
blockstore
. insert_shreds ( shreds_for_slot , None , false )
. unwrap ( ) ;
let meta = blockstore
. meta ( 1 )
. expect ( " Expect database get to succeed " )
. unwrap ( ) ;
assert! ( ! is_orphan ( & meta ) ) ;
let meta = blockstore
. meta ( 0 )
. expect ( " Expect database get to succeed " )
. unwrap ( ) ;
assert! ( is_orphan ( & meta ) ) ;
assert_eq! (
blockstore . orphans_iterator ( 0 ) . unwrap ( ) . collect ::< Vec < _ > > ( ) ,
vec! [ 0 ]
) ;
// Write some slot that also chains to existing slots and orphan,
// nothing should change
let ( shred4 , _ ) = make_slot_entries ( 4 , 0 , 1 ) ;
let ( shred5 , _ ) = make_slot_entries ( 5 , 1 , 1 ) ;
blockstore . insert_shreds ( shred4 , None , false ) . unwrap ( ) ;
blockstore . insert_shreds ( shred5 , None , false ) . unwrap ( ) ;
assert_eq! (
blockstore . orphans_iterator ( 0 ) . unwrap ( ) . collect ::< Vec < _ > > ( ) ,
vec! [ 0 ]
) ;
// Write zeroth slot, no more orphans
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
for i in 0 .. 3 {
2020-01-13 13:13:52 -08:00
let meta = blockstore
2021-09-10 05:33:08 -07:00
. meta ( i )
2019-03-29 16:07:24 -07:00
. expect ( " Expect database get to succeed " )
. unwrap ( ) ;
2019-04-26 08:52:10 -07:00
assert! ( ! is_orphan ( & meta ) ) ;
2019-03-29 16:07:24 -07:00
}
2021-09-10 05:33:08 -07:00
// Orphans cf is empty
assert! ( blockstore . orphans_cf . is_empty ( ) . unwrap ( ) ) ;
2019-03-29 16:07:24 -07:00
}
2021-09-10 05:33:08 -07:00
fn test_insert_data_shreds_slots ( should_bulk_write : bool ) {
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
2021-09-10 05:33:08 -07:00
// Create shreds and entries
let num_entries = 20_ u64 ;
let mut entries = vec! [ ] ;
let mut shreds = vec! [ ] ;
let mut num_shreds_per_slot = 0 ;
for slot in 0 .. num_entries {
let parent_slot = {
if slot = = 0 {
0
} else {
slot - 1
2019-02-07 15:10:54 -08:00
}
2021-09-10 05:33:08 -07:00
} ;
let ( mut shred , entry ) = make_slot_entries ( slot , parent_slot , 1 ) ;
num_shreds_per_slot = shred . len ( ) as u64 ;
shred
. iter_mut ( )
. enumerate ( )
. for_each ( | ( _ , shred ) | shred . set_index ( 0 ) ) ;
shreds . extend ( shred ) ;
entries . extend ( entry ) ;
}
let num_shreds = shreds . len ( ) ;
// Write shreds to the database
if should_bulk_write {
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
} else {
for _ in 0 .. num_shreds {
let shred = shreds . remove ( 0 ) ;
blockstore . insert_shreds ( vec! [ shred ] , None , false ) . unwrap ( ) ;
2019-02-07 15:10:54 -08:00
}
2021-09-10 05:33:08 -07:00
}
2019-02-07 15:10:54 -08:00
2021-09-10 05:33:08 -07:00
for i in 0 .. num_entries - 1 {
assert_eq! (
blockstore . get_slot_entries ( i , 0 ) . unwrap ( ) [ 0 ] ,
entries [ i as usize ]
) ;
2019-02-07 15:10:54 -08:00
2021-09-10 05:33:08 -07:00
let meta = blockstore . meta ( i ) . unwrap ( ) . unwrap ( ) ;
assert_eq! ( meta . received , 1 ) ;
2021-12-11 06:47:20 -08:00
assert_eq! ( meta . last_index , Some ( 0 ) ) ;
2021-09-10 05:33:08 -07:00
if i ! = 0 {
2021-12-14 10:57:11 -08:00
assert_eq! ( meta . parent_slot , Some ( i - 1 ) ) ;
2021-09-10 05:33:08 -07:00
assert_eq! ( meta . consumed , 1 ) ;
} else {
2021-12-14 10:57:11 -08:00
assert_eq! ( meta . parent_slot , Some ( 0 ) ) ;
2021-09-10 05:33:08 -07:00
assert_eq! ( meta . consumed , num_shreds_per_slot ) ;
2019-02-07 15:10:54 -08:00
}
}
}
2019-02-12 19:54:18 -08:00
2019-03-27 23:55:51 -07:00
#[ test ]
fn test_find_missing_data_indexes ( ) {
let slot = 0 ;
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2019-03-27 23:55:51 -07:00
// Write entries
2019-08-21 15:27:42 -07:00
let gap : u64 = 10 ;
2019-03-27 23:55:51 -07:00
assert! ( gap > 3 ) ;
2019-10-08 00:42:51 -07:00
// Create enough entries to ensure there are at least two shreds created
2020-05-19 12:38:18 -07:00
let num_entries = max_ticks_per_n_shreds ( 1 , None ) + 1 ;
2019-10-31 13:38:50 -07:00
let entries = create_ticks ( num_entries , 0 , Hash ::default ( ) ) ;
2021-12-24 12:32:43 -08:00
let mut shreds = entries_to_test_shreds ( & entries , slot , 0 , true , 0 ) ;
2019-08-21 15:27:42 -07:00
let num_shreds = shreds . len ( ) ;
2019-10-08 00:42:51 -07:00
assert! ( num_shreds > 1 ) ;
for ( i , s ) in shreds . iter_mut ( ) . enumerate ( ) {
s . set_index ( i as u32 * gap as u32 ) ;
s . set_slot ( slot ) ;
2019-03-27 23:55:51 -07:00
}
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2019-03-27 23:55:51 -07:00
2019-09-03 21:32:51 -07:00
// Index of the first shred is 0
// Index of the second shred is "gap"
2019-03-27 23:55:51 -07:00
// Thus, the missing indexes should then be [1, gap - 1] for the input index
// range of [0, gap)
let expected : Vec < u64 > = ( 1 .. gap ) . collect ( ) ;
assert_eq! (
2020-04-24 15:04:23 -07:00
blockstore . find_missing_data_indexes ( slot , 0 , 0 , gap , gap as usize ) ,
2019-03-27 23:55:51 -07:00
expected
) ;
assert_eq! (
2020-04-24 15:04:23 -07:00
blockstore . find_missing_data_indexes ( slot , 0 , 1 , gap , ( gap - 1 ) as usize ) ,
2019-03-27 23:55:51 -07:00
expected ,
) ;
assert_eq! (
2020-04-24 15:04:23 -07:00
blockstore . find_missing_data_indexes ( slot , 0 , 0 , gap - 1 , ( gap - 1 ) as usize ) ,
2019-03-27 23:55:51 -07:00
& expected [ .. expected . len ( ) - 1 ] ,
) ;
assert_eq! (
2020-04-24 15:04:23 -07:00
blockstore . find_missing_data_indexes ( slot , 0 , gap - 2 , gap , gap as usize ) ,
2019-03-27 23:55:51 -07:00
vec! [ gap - 2 , gap - 1 ] ,
) ;
assert_eq! (
2020-04-24 15:04:23 -07:00
blockstore . find_missing_data_indexes ( slot , 0 , gap - 2 , gap , 1 ) ,
2019-03-27 23:55:51 -07:00
vec! [ gap - 2 ] ,
) ;
assert_eq! (
2020-04-24 15:04:23 -07:00
blockstore . find_missing_data_indexes ( slot , 0 , 0 , gap , 1 ) ,
2019-03-27 23:55:51 -07:00
vec! [ 1 ] ,
) ;
2019-10-08 00:42:51 -07:00
// Test with a range that encompasses a shred with index == gap which was
// already inserted.
2019-03-27 23:55:51 -07:00
let mut expected : Vec < u64 > = ( 1 .. gap ) . collect ( ) ;
expected . push ( gap + 1 ) ;
assert_eq! (
2020-04-24 15:04:23 -07:00
blockstore . find_missing_data_indexes ( slot , 0 , 0 , gap + 2 , ( gap + 2 ) as usize ) ,
2019-03-27 23:55:51 -07:00
expected ,
) ;
assert_eq! (
2020-04-24 15:04:23 -07:00
blockstore . find_missing_data_indexes ( slot , 0 , 0 , gap + 2 , ( gap - 1 ) as usize ) ,
2019-03-27 23:55:51 -07:00
& expected [ .. expected . len ( ) - 1 ] ,
) ;
2019-08-21 15:27:42 -07:00
for i in 0 .. num_shreds as u64 {
2019-03-27 23:55:51 -07:00
for j in 0 .. i {
let expected : Vec < u64 > = ( j .. i )
. flat_map ( | k | {
let begin = k * gap + 1 ;
let end = ( k + 1 ) * gap ;
2020-03-13 13:15:22 -07:00
begin .. end
2019-03-27 23:55:51 -07:00
} )
. collect ( ) ;
assert_eq! (
2020-01-13 13:13:52 -08:00
blockstore . find_missing_data_indexes (
2019-03-27 23:55:51 -07:00
slot ,
2020-04-24 15:04:23 -07:00
0 ,
2019-03-27 23:55:51 -07:00
j * gap ,
i * gap ,
( ( i - j ) * gap ) as usize
) ,
expected ,
) ;
}
}
}
2019-11-07 11:08:09 -08:00
#[ test ]
fn test_find_missing_data_indexes_timeout ( ) {
let slot = 0 ;
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2019-11-07 11:08:09 -08:00
// Write entries
let gap : u64 = 10 ;
let shreds : Vec < _ > = ( 0 .. 64 )
2019-11-18 18:05:02 -08:00
. map ( | i | {
2019-12-12 16:50:29 -08:00
Shred ::new_from_data (
slot ,
( i * gap ) as u32 ,
0 ,
2022-04-27 11:04:10 -07:00
& [ ] ,
2022-05-02 16:33:53 -07:00
ShredFlags ::empty ( ) ,
2019-12-12 16:50:29 -08:00
i as u8 ,
0 ,
( i * gap ) as u32 ,
)
2019-11-18 18:05:02 -08:00
} )
2019-11-07 11:08:09 -08:00
. collect ( ) ;
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2020-04-24 15:04:23 -07:00
2019-11-07 11:08:09 -08:00
let empty : Vec < u64 > = vec! [ ] ;
assert_eq! (
2020-04-24 15:04:23 -07:00
blockstore . find_missing_data_indexes ( slot , timestamp ( ) , 0 , 50 , 1 ) ,
2019-11-07 11:08:09 -08:00
empty
) ;
let expected : Vec < _ > = ( 1 ..= 9 ) . collect ( ) ;
assert_eq! (
2020-04-24 15:04:23 -07:00
blockstore . find_missing_data_indexes ( slot , timestamp ( ) - 400 , 0 , 50 , 9 ) ,
2019-11-07 11:08:09 -08:00
expected
) ;
}
2019-03-27 23:55:51 -07:00
#[ test ]
fn test_find_missing_data_indexes_sanity ( ) {
let slot = 0 ;
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2019-03-27 23:55:51 -07:00
// Early exit conditions
let empty : Vec < u64 > = vec! [ ] ;
2020-04-24 15:04:23 -07:00
assert_eq! (
blockstore . find_missing_data_indexes ( slot , 0 , 0 , 0 , 1 ) ,
empty
) ;
assert_eq! (
blockstore . find_missing_data_indexes ( slot , 0 , 5 , 5 , 1 ) ,
empty
) ;
assert_eq! (
blockstore . find_missing_data_indexes ( slot , 0 , 4 , 3 , 1 ) ,
empty
) ;
assert_eq! (
blockstore . find_missing_data_indexes ( slot , 0 , 1 , 2 , 0 ) ,
empty
) ;
2019-03-27 23:55:51 -07:00
2019-10-31 13:38:50 -07:00
let entries = create_ticks ( 100 , 0 , Hash ::default ( ) ) ;
2021-12-24 12:32:43 -08:00
let mut shreds = entries_to_test_shreds ( & entries , slot , 0 , true , 0 ) ;
2019-10-08 00:42:51 -07:00
assert! ( shreds . len ( ) > 2 ) ;
2019-08-26 18:27:45 -07:00
shreds . drain ( 2 .. ) ;
2019-03-27 23:55:51 -07:00
const ONE : u64 = 1 ;
const OTHER : u64 = 4 ;
2019-08-21 15:27:42 -07:00
shreds [ 0 ] . set_index ( ONE as u32 ) ;
shreds [ 1 ] . set_index ( OTHER as u32 ) ;
2019-03-27 23:55:51 -07:00
2019-09-03 21:32:51 -07:00
// Insert one shred at index = first_index
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2019-03-27 23:55:51 -07:00
const STARTS : u64 = OTHER * 2 ;
const END : u64 = OTHER * 3 ;
const MAX : usize = 10 ;
2019-09-03 21:32:51 -07:00
// The first shred has index = first_index. Thus, for i < first_index,
2019-03-27 23:55:51 -07:00
// given the input range of [i, first_index], the missing indexes should be
// [i, first_index - 1]
for start in 0 .. STARTS {
2020-01-13 13:13:52 -08:00
let result = blockstore . find_missing_data_indexes (
2020-04-24 15:04:23 -07:00
slot , 0 , start , // start
2019-03-27 23:55:51 -07:00
END , //end
MAX , //max
) ;
let expected : Vec < u64 > = ( start .. END ) . filter ( | i | * i ! = ONE & & * i ! = OTHER ) . collect ( ) ;
assert_eq! ( result , expected ) ;
}
}
#[ test ]
2019-09-03 21:32:51 -07:00
pub fn test_no_missing_shred_indexes ( ) {
2019-03-27 23:55:51 -07:00
let slot = 0 ;
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2019-03-27 23:55:51 -07:00
// Write entries
let num_entries = 10 ;
2019-10-31 13:38:50 -07:00
let entries = create_ticks ( num_entries , 0 , Hash ::default ( ) ) ;
2021-12-24 12:32:43 -08:00
let shreds = entries_to_test_shreds ( & entries , slot , 0 , true , 0 ) ;
2019-08-21 15:27:42 -07:00
let num_shreds = shreds . len ( ) ;
2019-03-27 23:55:51 -07:00
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2019-03-27 23:55:51 -07:00
let empty : Vec < u64 > = vec! [ ] ;
2019-08-21 15:27:42 -07:00
for i in 0 .. num_shreds as u64 {
2019-03-27 23:55:51 -07:00
for j in 0 .. i {
assert_eq! (
2020-04-24 15:04:23 -07:00
blockstore . find_missing_data_indexes ( slot , 0 , j , i , ( i - j ) as usize ) ,
2019-03-27 23:55:51 -07:00
empty
) ;
}
}
}
2019-04-25 00:04:49 -07:00
#[ test ]
2019-09-04 17:14:42 -07:00
pub fn test_should_insert_data_shred ( ) {
2020-12-09 23:14:31 -08:00
solana_logger ::setup ( ) ;
2019-09-17 15:11:29 -07:00
let ( mut shreds , _ ) = make_slot_entries ( 0 , 0 , 200 ) ;
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2021-04-27 15:40:41 -07:00
2021-09-10 05:33:08 -07:00
let last_root = RwLock ::new ( 0 ) ;
2019-09-03 21:32:51 -07:00
2021-09-10 05:33:08 -07:00
// Insert the first 5 shreds, we don't have a "is_last" shred yet
blockstore
. insert_shreds ( shreds [ 0 .. 5 ] . to_vec ( ) , None , false )
. unwrap ( ) ;
2019-09-03 21:32:51 -07:00
2021-09-10 05:33:08 -07:00
let slot_meta = blockstore . meta ( 0 ) . unwrap ( ) . unwrap ( ) ;
2022-04-25 05:43:22 -07:00
let shred5 = shreds [ 5 ] . clone ( ) ;
2021-09-10 05:33:08 -07:00
// Ensure that an empty shred (one with no data) would get inserted. Such shreds
// may be used as signals (broadcast does so to indicate a slot was interrupted)
// Reuse shred5's header values to avoid a false negative result
2022-04-25 05:43:22 -07:00
let empty_shred = Shred ::new_from_data (
shred5 . slot ( ) ,
shred5 . index ( ) ,
{
let parent_offset = shred5 . slot ( ) - shred5 . parent ( ) . unwrap ( ) ;
parent_offset as u16
} ,
2022-05-02 16:33:53 -07:00
& [ ] , // data
ShredFlags ::LAST_SHRED_IN_SLOT ,
0 , // reference_tick
2022-04-25 05:43:22 -07:00
shred5 . version ( ) ,
2021-12-10 12:08:04 -08:00
shred5 . fec_set_index ( ) ,
2021-09-10 05:33:08 -07:00
) ;
assert! ( blockstore . should_insert_data_shred (
& empty_shred ,
& slot_meta ,
& HashMap ::new ( ) ,
& last_root ,
None ,
ShredSource ::Repaired ,
) ) ;
// Trying to insert another "is_last" shred with index < the received index should fail
// skip over shred 7
blockstore
. insert_shreds ( shreds [ 8 .. 9 ] . to_vec ( ) , None , false )
. unwrap ( ) ;
let slot_meta = blockstore . meta ( 0 ) . unwrap ( ) . unwrap ( ) ;
assert_eq! ( slot_meta . received , 9 ) ;
let shred7 = {
if shreds [ 7 ] . is_data ( ) {
shreds [ 7 ] . set_last_in_slot ( ) ;
shreds [ 7 ] . clone ( )
2019-09-04 17:14:42 -07:00
} else {
panic! ( " Shred in unexpected format " )
}
2021-09-10 05:33:08 -07:00
} ;
assert! ( ! blockstore . should_insert_data_shred (
& shred7 ,
& slot_meta ,
& HashMap ::new ( ) ,
& last_root ,
None ,
ShredSource ::Repaired ,
) ) ;
assert! ( blockstore . has_duplicate_shreds_in_slot ( 0 ) ) ;
// Insert all pending shreds
let mut shred8 = shreds [ 8 ] . clone ( ) ;
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
let slot_meta = blockstore . meta ( 0 ) . unwrap ( ) . unwrap ( ) ;
// Trying to insert a shred with index > the "is_last" shred should fail
if shred8 . is_data ( ) {
2021-12-11 06:47:20 -08:00
shred8 . set_slot ( slot_meta . last_index . unwrap ( ) + 1 ) ;
2021-09-10 05:33:08 -07:00
} else {
panic! ( " Shred in unexpected format " )
2019-09-03 21:32:51 -07:00
}
2021-09-10 05:33:08 -07:00
assert! ( ! blockstore . should_insert_data_shred (
& shred7 ,
& slot_meta ,
& HashMap ::new ( ) ,
& last_root ,
None ,
ShredSource ::Repaired ,
) ) ;
2020-01-14 15:37:53 -08:00
}
#[ test ]
pub fn test_is_data_shred_present ( ) {
let ( shreds , _ ) = make_slot_entries ( 0 , 0 , 200 ) ;
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
let index_cf = & blockstore . index_cf ;
2020-01-14 15:37:53 -08:00
2021-09-10 05:33:08 -07:00
blockstore
. insert_shreds ( shreds [ 0 .. 5 ] . to_vec ( ) , None , false )
. unwrap ( ) ;
// Insert a shred less than `slot_meta.consumed`, check that
// it already exists
let slot_meta = blockstore . meta ( 0 ) . unwrap ( ) . unwrap ( ) ;
let index = index_cf . get ( 0 ) . unwrap ( ) . unwrap ( ) ;
assert_eq! ( slot_meta . consumed , 5 ) ;
assert! ( Blockstore ::is_data_shred_present (
& shreds [ 1 ] ,
& slot_meta ,
index . data ( ) ,
) ) ;
// Insert a shred, check that it already exists
blockstore
. insert_shreds ( shreds [ 6 .. 7 ] . to_vec ( ) , None , false )
. unwrap ( ) ;
let slot_meta = blockstore . meta ( 0 ) . unwrap ( ) . unwrap ( ) ;
let index = index_cf . get ( 0 ) . unwrap ( ) . unwrap ( ) ;
assert! ( Blockstore ::is_data_shred_present (
& shreds [ 6 ] ,
& slot_meta ,
index . data ( )
) , ) ;
2019-09-04 17:14:42 -07:00
}
2020-12-09 23:14:31 -08:00
#[ test ]
2021-11-30 09:18:36 -08:00
pub fn test_check_insert_coding_shred ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2020-12-09 23:14:31 -08:00
2021-09-10 05:33:08 -07:00
let slot = 1 ;
2022-04-27 11:04:10 -07:00
let coding_shred = Shred ::new_from_parity_shard (
slot ,
11 , // index
& [ ] , // parity_shard
11 , // fec_set_index
11 , // num_data_shreds
11 , // num_coding_shreds
8 , // position
0 , // version
2021-12-05 06:42:09 -08:00
) ;
2020-12-09 23:14:31 -08:00
2021-09-10 05:33:08 -07:00
let mut erasure_metas = HashMap ::new ( ) ;
let mut index_working_set = HashMap ::new ( ) ;
2021-12-14 09:34:02 -08:00
let mut just_received_shreds = HashMap ::new ( ) ;
2021-11-30 09:18:36 -08:00
let mut write_batch = blockstore . db . batch ( ) . unwrap ( ) ;
2021-09-10 05:33:08 -07:00
let mut index_meta_time = 0 ;
2021-11-30 09:18:36 -08:00
assert! ( blockstore . check_insert_coding_shred (
2021-09-10 05:33:08 -07:00
coding_shred . clone ( ) ,
& mut erasure_metas ,
& mut index_working_set ,
2021-11-30 09:18:36 -08:00
& mut write_batch ,
2021-12-14 09:34:02 -08:00
& mut just_received_shreds ,
2021-09-10 05:33:08 -07:00
& mut index_meta_time ,
& | _shred | {
panic! ( " no dupes " ) ;
} ,
false ,
2022-03-25 12:32:22 -07:00
ShredSource ::Turbine ,
2021-10-17 08:02:34 -07:00
& mut BlockstoreInsertionMetrics ::default ( ) ,
2021-09-10 05:33:08 -07:00
) ) ;
// insert again fails on dupe
use std ::sync ::atomic ::{ AtomicUsize , Ordering } ;
let counter = AtomicUsize ::new ( 0 ) ;
2021-11-30 09:18:36 -08:00
assert! ( ! blockstore . check_insert_coding_shred (
2021-09-10 05:33:08 -07:00
coding_shred ,
& mut erasure_metas ,
& mut index_working_set ,
2021-11-30 09:18:36 -08:00
& mut write_batch ,
2021-12-14 09:34:02 -08:00
& mut just_received_shreds ,
2021-09-10 05:33:08 -07:00
& mut index_meta_time ,
& | _shred | {
counter . fetch_add ( 1 , Ordering ::Relaxed ) ;
} ,
false ,
2022-03-25 12:32:22 -07:00
ShredSource ::Turbine ,
2021-10-17 08:02:34 -07:00
& mut BlockstoreInsertionMetrics ::default ( ) ,
2021-09-10 05:33:08 -07:00
) ) ;
assert_eq! ( counter . load ( Ordering ::Relaxed ) , 1 ) ;
2020-12-09 23:14:31 -08:00
}
2019-09-04 17:14:42 -07:00
#[ test ]
pub fn test_should_insert_coding_shred ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
let last_root = RwLock ::new ( 0 ) ;
let slot = 1 ;
2022-04-27 11:04:10 -07:00
let mut coding_shred = Shred ::new_from_parity_shard (
slot ,
11 , // index
& [ ] , // parity_shard
11 , // fec_set_index
11 , // num_data_shreds
11 , // num_coding_shreds
8 , // position
0 , // version
2021-12-05 06:42:09 -08:00
) ;
2021-09-10 05:33:08 -07:00
// Insert a good coding shred
assert! ( Blockstore ::should_insert_coding_shred (
& coding_shred ,
& last_root
) ) ;
// Insertion should succeed
blockstore
. insert_shreds ( vec! [ coding_shred . clone ( ) ] , None , false )
. unwrap ( ) ;
// Trying to insert the same shred again should pass since this doesn't check for
// duplicate index
2019-09-04 17:14:42 -07:00
{
2021-09-10 05:33:08 -07:00
assert! ( Blockstore ::should_insert_coding_shred (
& coding_shred ,
& last_root
) ) ;
}
// Establish a baseline that works
2022-04-25 05:43:22 -07:00
coding_shred . set_index ( coding_shred . index ( ) + 1 ) ;
assert! ( Blockstore ::should_insert_coding_shred (
& coding_shred ,
& last_root
) ) ;
2019-09-04 17:14:42 -07:00
2021-09-10 05:33:08 -07:00
// Trying to insert a shred with index < position should fail
{
2022-04-25 05:43:22 -07:00
let mut coding_shred = coding_shred . clone ( ) ;
2021-12-10 12:08:04 -08:00
let index = coding_shred . index ( ) - coding_shred . fec_set_index ( ) - 1 ;
2021-09-10 05:33:08 -07:00
coding_shred . set_index ( index as u32 ) ;
2019-09-04 17:14:42 -07:00
2021-09-10 05:33:08 -07:00
assert! ( ! Blockstore ::should_insert_coding_shred (
& coding_shred ,
& last_root
) ) ;
}
2019-09-04 17:14:42 -07:00
2021-09-10 05:33:08 -07:00
// Trying to insert value into slot <= than last root should fail
{
2022-04-25 05:43:22 -07:00
let mut coding_shred = coding_shred . clone ( ) ;
2021-09-10 05:33:08 -07:00
coding_shred . set_slot ( * last_root . read ( ) . unwrap ( ) ) ;
assert! ( ! Blockstore ::should_insert_coding_shred (
& coding_shred ,
& last_root
) ) ;
}
2019-04-25 00:04:49 -07:00
}
#[ test ]
pub fn test_insert_multiple_is_last ( ) {
2020-12-09 23:14:31 -08:00
solana_logger ::setup ( ) ;
2019-09-03 21:32:51 -07:00
let ( shreds , _ ) = make_slot_entries ( 0 , 0 , 20 ) ;
let num_shreds = shreds . len ( ) as u64 ;
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2019-04-25 00:04:49 -07:00
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
let slot_meta = blockstore . meta ( 0 ) . unwrap ( ) . unwrap ( ) ;
2019-09-03 21:32:51 -07:00
assert_eq! ( slot_meta . consumed , num_shreds ) ;
assert_eq! ( slot_meta . received , num_shreds ) ;
2021-12-11 06:47:20 -08:00
assert_eq! ( slot_meta . last_index , Some ( num_shreds - 1 ) ) ;
2019-09-03 21:32:51 -07:00
assert! ( slot_meta . is_full ( ) ) ;
2019-04-25 00:04:49 -07:00
2019-09-03 21:32:51 -07:00
let ( shreds , _ ) = make_slot_entries ( 0 , 0 , 22 ) ;
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
let slot_meta = blockstore . meta ( 0 ) . unwrap ( ) . unwrap ( ) ;
2019-04-25 00:04:49 -07:00
2019-09-03 21:32:51 -07:00
assert_eq! ( slot_meta . consumed , num_shreds ) ;
assert_eq! ( slot_meta . received , num_shreds ) ;
2021-12-11 06:47:20 -08:00
assert_eq! ( slot_meta . last_index , Some ( num_shreds - 1 ) ) ;
2019-04-25 00:04:49 -07:00
assert! ( slot_meta . is_full ( ) ) ;
2020-12-09 23:14:31 -08:00
assert! ( blockstore . has_duplicate_shreds_in_slot ( 0 ) ) ;
2019-04-25 00:04:49 -07:00
}
2019-05-13 22:04:54 -07:00
#[ test ]
fn test_slot_data_iterator ( ) {
2019-09-03 21:32:51 -07:00
// Construct the shreds
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2019-09-03 21:32:51 -07:00
let shreds_per_slot = 10 ;
2019-05-13 22:04:54 -07:00
let slots = vec! [ 2 , 4 , 8 , 12 ] ;
2019-09-03 21:32:51 -07:00
let all_shreds = make_chaining_slot_entries ( & slots , shreds_per_slot ) ;
2019-09-17 18:22:46 -07:00
let slot_8_shreds = all_shreds [ 2 ] . 0. clone ( ) ;
2019-09-03 21:32:51 -07:00
for ( slot_shreds , _ ) in all_shreds {
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( slot_shreds , None , false ) . unwrap ( ) ;
2019-05-13 22:04:54 -07:00
}
// Slot doesnt exist, iterator should be empty
2020-04-24 15:04:23 -07:00
let shred_iter = blockstore . slot_data_iterator ( 5 , 0 ) . unwrap ( ) ;
let result : Vec < _ > = shred_iter . collect ( ) ;
assert_eq! ( result , vec! [ ] ) ;
2019-05-13 22:04:54 -07:00
// Test that the iterator for slot 8 contains what was inserted earlier
2020-03-19 23:35:01 -07:00
let shred_iter = blockstore . slot_data_iterator ( 8 , 0 ) . unwrap ( ) ;
2019-09-18 16:24:30 -07:00
let result : Vec < Shred > = shred_iter
. filter_map ( | ( _ , bytes ) | Shred ::new_from_serialized_shred ( bytes . to_vec ( ) ) . ok ( ) )
2019-09-03 21:32:51 -07:00
. collect ( ) ;
2019-09-17 18:22:46 -07:00
assert_eq! ( result . len ( ) , slot_8_shreds . len ( ) ) ;
assert_eq! ( result , slot_8_shreds ) ;
2019-05-13 22:04:54 -07:00
}
2019-05-20 19:04:18 -07:00
#[ test ]
2019-05-29 09:43:22 -07:00
fn test_set_roots ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2019-05-20 19:04:18 -07:00
let chained_slots = vec! [ 0 , 2 , 4 , 7 , 12 , 15 ] ;
2020-01-13 13:13:52 -08:00
assert_eq! ( blockstore . last_root ( ) , 0 ) ;
2019-05-20 19:04:18 -07:00
2021-07-01 20:02:40 -07:00
blockstore . set_roots ( chained_slots . iter ( ) ) . unwrap ( ) ;
2019-05-20 19:04:18 -07:00
2020-01-13 13:13:52 -08:00
assert_eq! ( blockstore . last_root ( ) , 15 ) ;
2019-08-27 15:09:41 -07:00
2019-05-20 19:04:18 -07:00
for i in chained_slots {
2020-01-13 13:13:52 -08:00
assert! ( blockstore . is_root ( i ) ) ;
2019-05-20 19:04:18 -07:00
}
}
2020-12-16 12:40:36 -08:00
#[ test ]
fn test_is_skipped ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2020-12-16 12:40:36 -08:00
let roots = vec! [ 2 , 4 , 7 , 12 , 15 ] ;
2021-07-01 20:02:40 -07:00
blockstore . set_roots ( roots . iter ( ) ) . unwrap ( ) ;
2020-12-16 12:40:36 -08:00
for i in 0 .. 20 {
if i < 2 | | roots . contains ( & i ) | | i > 15 {
assert! ( ! blockstore . is_skipped ( i ) ) ;
} else {
assert! ( blockstore . is_skipped ( i ) ) ;
}
}
}
2019-07-17 14:42:29 -07:00
#[ test ]
fn test_iter_bounds ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2019-07-17 14:42:29 -07:00
// slot 5 does not exist, iter should be ok and should be a noop
2020-01-13 13:13:52 -08:00
blockstore
2019-07-17 14:42:29 -07:00
. slot_meta_iterator ( 5 )
. unwrap ( )
2020-05-15 09:35:43 -07:00
. for_each ( | _ | panic! ( ) ) ;
2019-07-17 14:42:29 -07:00
}
2019-10-21 16:15:10 -07:00
#[ test ]
fn test_get_completed_data_ranges ( ) {
2021-10-31 05:56:25 -07:00
let completed_data_end_indexes = [ 2 , 4 , 9 , 11 ] . iter ( ) . copied ( ) . collect ( ) ;
2019-10-21 16:15:10 -07:00
// Consumed is 1, which means we're missing shred with index 1, should return empty
let start_index = 0 ;
let consumed = 1 ;
assert_eq! (
2020-01-13 13:13:52 -08:00
Blockstore ::get_completed_data_ranges (
2019-10-21 16:15:10 -07:00
start_index ,
2021-10-31 05:56:25 -07:00
& completed_data_end_indexes ,
2019-10-21 16:15:10 -07:00
consumed
) ,
vec! [ ]
) ;
let start_index = 0 ;
let consumed = 3 ;
assert_eq! (
2020-01-13 13:13:52 -08:00
Blockstore ::get_completed_data_ranges (
2019-10-21 16:15:10 -07:00
start_index ,
2021-10-31 05:56:25 -07:00
& completed_data_end_indexes ,
2019-10-21 16:15:10 -07:00
consumed
) ,
vec! [ ( 0 , 2 ) ]
) ;
// Test all possible ranges:
//
// `consumed == completed_data_end_indexes[j] + 1`, means we have all the shreds up to index
// `completed_data_end_indexes[j] + 1`. Thus the completed data blocks is everything in the
// range:
// [start_index, completed_data_end_indexes[j]] ==
// [completed_data_end_indexes[i], completed_data_end_indexes[j]],
2021-10-31 05:56:25 -07:00
let completed_data_end_indexes : Vec < _ > = completed_data_end_indexes . into_iter ( ) . collect ( ) ;
2019-10-21 16:15:10 -07:00
for i in 0 .. completed_data_end_indexes . len ( ) {
for j in i .. completed_data_end_indexes . len ( ) {
let start_index = completed_data_end_indexes [ i ] ;
let consumed = completed_data_end_indexes [ j ] + 1 ;
// When start_index == completed_data_end_indexes[i], then that means
// the shred with index == start_index is a single-shred data block,
// so the start index is the end index for that data block.
let mut expected = vec! [ ( start_index , start_index ) ] ;
expected . extend (
completed_data_end_indexes [ i ..= j ]
. windows ( 2 )
. map ( | end_indexes | ( end_indexes [ 0 ] + 1 , end_indexes [ 1 ] ) ) ,
) ;
2021-10-31 05:56:25 -07:00
let completed_data_end_indexes =
completed_data_end_indexes . iter ( ) . copied ( ) . collect ( ) ;
2019-10-21 16:15:10 -07:00
assert_eq! (
2020-01-13 13:13:52 -08:00
Blockstore ::get_completed_data_ranges (
2019-10-21 16:15:10 -07:00
start_index ,
2021-10-31 05:56:25 -07:00
& completed_data_end_indexes ,
2019-10-21 16:15:10 -07:00
consumed
) ,
expected
) ;
}
}
}
#[ test ]
fn test_get_slot_entries_with_shred_count_corruption ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
let num_ticks = 8 ;
let entries = create_ticks ( num_ticks , 0 , Hash ::default ( ) ) ;
let slot = 1 ;
2021-12-24 12:32:43 -08:00
let shreds = entries_to_test_shreds ( & entries , slot , 0 , false , 0 ) ;
2021-09-10 05:33:08 -07:00
let next_shred_index = shreds . len ( ) ;
blockstore
. insert_shreds ( shreds , None , false )
. expect ( " Expected successful write of shreds " ) ;
assert_eq! (
blockstore . get_slot_entries ( slot , 0 ) . unwrap ( ) . len ( ) as u64 ,
num_ticks
) ;
2019-10-21 16:15:10 -07:00
2021-09-10 05:33:08 -07:00
// Insert an empty shred that won't deshred into entries
let shreds = vec! [ Shred ::new_from_data (
slot ,
next_shred_index as u32 ,
1 ,
2022-04-27 11:04:10 -07:00
& [ 1 , 1 , 1 ] ,
2022-05-02 16:33:53 -07:00
ShredFlags ::LAST_SHRED_IN_SLOT ,
2021-09-10 05:33:08 -07:00
0 ,
0 ,
next_shred_index as u32 ,
) ] ;
2019-10-21 16:15:10 -07:00
2021-09-10 05:33:08 -07:00
// With the corruption, nothing should be returned, even though an
// earlier data block was valid
blockstore
. insert_shreds ( shreds , None , false )
. expect ( " Expected successful write of shreds " ) ;
assert! ( blockstore . get_slot_entries ( slot , 0 ) . is_err ( ) ) ;
2019-10-31 14:03:41 -07:00
}
#[ test ]
fn test_no_insert_but_modify_slot_meta ( ) {
// This tests correctness of the SlotMeta in various cases in which a shred
// that gets filtered out by checks
let ( shreds0 , _ ) = make_slot_entries ( 0 , 0 , 200 ) ;
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2019-10-31 14:03:41 -07:00
2021-09-10 05:33:08 -07:00
// Insert the first 5 shreds, we don't have a "is_last" shred yet
blockstore
. insert_shreds ( shreds0 [ 0 .. 5 ] . to_vec ( ) , None , false )
. unwrap ( ) ;
2019-10-31 14:03:41 -07:00
2021-09-10 05:33:08 -07:00
// Insert a repetitive shred for slot 's', should get ignored, but also
// insert shreds that chains to 's', should see the update in the SlotMeta
// for 's'.
let ( mut shreds2 , _ ) = make_slot_entries ( 2 , 0 , 200 ) ;
let ( mut shreds3 , _ ) = make_slot_entries ( 3 , 0 , 200 ) ;
shreds2 . push ( shreds0 [ 1 ] . clone ( ) ) ;
shreds3 . insert ( 0 , shreds0 [ 1 ] . clone ( ) ) ;
blockstore . insert_shreds ( shreds2 , None , false ) . unwrap ( ) ;
let slot_meta = blockstore . meta ( 0 ) . unwrap ( ) . unwrap ( ) ;
assert_eq! ( slot_meta . next_slots , vec! [ 2 ] ) ;
blockstore . insert_shreds ( shreds3 , None , false ) . unwrap ( ) ;
let slot_meta = blockstore . meta ( 0 ) . unwrap ( ) . unwrap ( ) ;
assert_eq! ( slot_meta . next_slots , vec! [ 2 , 3 ] ) ;
2019-10-21 16:15:10 -07:00
}
2019-11-14 00:32:07 -08:00
#[ test ]
fn test_trusted_insert_shreds ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2019-11-14 00:32:07 -08:00
// Make shred for slot 1
let ( shreds1 , _ ) = make_slot_entries ( 1 , 0 , 1 ) ;
let last_root = 100 ;
2021-09-10 05:33:08 -07:00
blockstore . set_roots ( std ::iter ::once ( & last_root ) ) . unwrap ( ) ;
2019-11-14 00:32:07 -08:00
2021-09-10 05:33:08 -07:00
// Insert will fail, slot < root
blockstore
. insert_shreds ( shreds1 [ .. ] . to_vec ( ) , None , false )
. unwrap ( ) ;
assert! ( blockstore . get_data_shred ( 1 , 0 ) . unwrap ( ) . is_none ( ) ) ;
// Insert through trusted path will succeed
blockstore
. insert_shreds ( shreds1 [ .. ] . to_vec ( ) , None , true )
. unwrap ( ) ;
assert! ( blockstore . get_data_shred ( 1 , 0 ) . unwrap ( ) . is_some ( ) ) ;
2019-11-14 00:32:07 -08:00
}
2019-11-14 15:34:39 -08:00
2022-04-27 23:36:19 -07:00
#[ test ]
fn test_get_first_available_block ( ) {
let mint_total = 1_000_000_000_000 ;
let GenesisConfigInfo { genesis_config , .. } = create_genesis_config ( mint_total ) ;
let ( ledger_path , _blockhash ) = create_new_tmp_ledger_auto_delete! ( & genesis_config ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
assert_eq! ( blockstore . get_first_available_block ( ) . unwrap ( ) , 0 ) ;
assert_eq! ( blockstore . lowest_slot_with_genesis ( ) , 0 ) ;
assert_eq! ( blockstore . lowest_slot ( ) , 0 ) ;
for slot in 1 .. 4 {
let entries = make_slot_entries_with_transactions ( 100 ) ;
let shreds = entries_to_test_shreds ( & entries , slot , slot - 1 , true , 0 ) ;
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
blockstore . set_roots ( vec! [ slot ] . iter ( ) ) . unwrap ( ) ;
}
assert_eq! ( blockstore . get_first_available_block ( ) . unwrap ( ) , 0 ) ;
assert_eq! ( blockstore . lowest_slot_with_genesis ( ) , 0 ) ;
assert_eq! ( blockstore . lowest_slot ( ) , 1 ) ;
blockstore . purge_slots ( 0 , 1 , PurgeType ::CompactionFilter ) ;
assert_eq! ( blockstore . get_first_available_block ( ) . unwrap ( ) , 3 ) ;
assert_eq! ( blockstore . lowest_slot_with_genesis ( ) , 2 ) ;
assert_eq! ( blockstore . lowest_slot ( ) , 2 ) ;
}
2019-11-14 15:34:39 -08:00
#[ test ]
2021-03-26 15:47:35 -07:00
fn test_get_rooted_block ( ) {
2019-12-11 14:06:54 -08:00
let slot = 10 ;
2019-12-09 00:13:36 -08:00
let entries = make_slot_entries_with_transactions ( 100 ) ;
2019-12-11 14:06:54 -08:00
let blockhash = get_last_hash ( entries . iter ( ) ) . unwrap ( ) ;
2021-12-24 12:32:43 -08:00
let shreds = entries_to_test_shreds ( & entries , slot , slot - 1 , true , 0 ) ;
let more_shreds = entries_to_test_shreds ( & entries , slot + 1 , slot , true , 0 ) ;
let unrooted_shreds = entries_to_test_shreds ( & entries , slot + 2 , slot + 1 , true , 0 ) ;
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
blockstore . insert_shreds ( more_shreds , None , false ) . unwrap ( ) ;
blockstore
. insert_shreds ( unrooted_shreds , None , false )
. unwrap ( ) ;
blockstore
2021-07-01 20:02:40 -07:00
. set_roots ( vec! [ slot - 1 , slot , slot + 1 ] . iter ( ) )
. unwrap ( ) ;
2019-12-11 14:06:54 -08:00
2021-12-14 10:57:11 -08:00
let parent_meta = SlotMeta ::default ( ) ;
2021-09-10 05:33:08 -07:00
blockstore
2019-12-11 14:06:54 -08:00
. put_meta_bytes ( slot - 1 , & serialize ( & parent_meta ) . unwrap ( ) )
. unwrap ( ) ;
2019-11-14 15:34:39 -08:00
2022-01-13 23:24:41 -08:00
let expected_transactions : Vec < VersionedTransactionWithStatusMeta > = entries
2019-11-14 15:34:39 -08:00
. iter ( )
. cloned ( )
. filter ( | entry | ! entry . is_tick ( ) )
. flat_map ( | entry | entry . transactions )
2019-11-18 08:12:42 -08:00
. map ( | transaction | {
2019-12-18 09:56:29 -08:00
let mut pre_balances : Vec < u64 > = vec! [ ] ;
let mut post_balances : Vec < u64 > = vec! [ ] ;
2022-02-05 04:00:31 -08:00
for i in 0 .. transaction . message . static_account_keys ( ) . len ( ) {
2019-12-18 09:56:29 -08:00
pre_balances . push ( i as u64 * 10 ) ;
post_balances . push ( i as u64 * 11 ) ;
}
2019-11-18 08:12:42 -08:00
let signature = transaction . signatures [ 0 ] ;
2021-03-05 08:05:35 -08:00
let status = TransactionStatusMeta {
status : Ok ( ( ) ) ,
fee : 42 ,
pre_balances : pre_balances . clone ( ) ,
post_balances : post_balances . clone ( ) ,
inner_instructions : Some ( vec! [ ] ) ,
log_messages : Some ( vec! [ ] ) ,
pre_token_balances : Some ( vec! [ ] ) ,
post_token_balances : Some ( vec! [ ] ) ,
2021-05-26 14:43:15 -07:00
rewards : Some ( vec! [ ] ) ,
2022-01-13 23:24:41 -08:00
loaded_addresses : LoadedAddresses ::default ( ) ,
2022-03-22 15:17:05 -07:00
return_data : Some ( TransactionReturnData ::default ( ) ) ,
2021-03-05 08:05:35 -08:00
}
. into ( ) ;
2021-09-10 05:33:08 -07:00
blockstore
2019-11-18 08:12:42 -08:00
. transaction_status_cf
2021-03-05 08:05:35 -08:00
. put_protobuf ( ( 0 , signature , slot ) , & status )
2019-11-18 08:12:42 -08:00
. unwrap ( ) ;
2021-03-05 08:05:35 -08:00
let status = TransactionStatusMeta {
status : Ok ( ( ) ) ,
fee : 42 ,
pre_balances : pre_balances . clone ( ) ,
post_balances : post_balances . clone ( ) ,
inner_instructions : Some ( vec! [ ] ) ,
log_messages : Some ( vec! [ ] ) ,
pre_token_balances : Some ( vec! [ ] ) ,
post_token_balances : Some ( vec! [ ] ) ,
2021-05-26 14:43:15 -07:00
rewards : Some ( vec! [ ] ) ,
2022-01-13 23:24:41 -08:00
loaded_addresses : LoadedAddresses ::default ( ) ,
2022-03-22 15:17:05 -07:00
return_data : Some ( TransactionReturnData ::default ( ) ) ,
2021-03-05 08:05:35 -08:00
}
. into ( ) ;
2021-09-10 05:33:08 -07:00
blockstore
2019-12-11 14:06:54 -08:00
. transaction_status_cf
2021-03-05 08:05:35 -08:00
. put_protobuf ( ( 0 , signature , slot + 1 ) , & status )
2019-12-11 14:06:54 -08:00
. unwrap ( ) ;
2021-03-26 15:47:35 -07:00
let status = TransactionStatusMeta {
status : Ok ( ( ) ) ,
fee : 42 ,
pre_balances : pre_balances . clone ( ) ,
post_balances : post_balances . clone ( ) ,
inner_instructions : Some ( vec! [ ] ) ,
log_messages : Some ( vec! [ ] ) ,
pre_token_balances : Some ( vec! [ ] ) ,
post_token_balances : Some ( vec! [ ] ) ,
2021-05-26 14:43:15 -07:00
rewards : Some ( vec! [ ] ) ,
2022-01-13 23:24:41 -08:00
loaded_addresses : LoadedAddresses ::default ( ) ,
2022-03-22 15:17:05 -07:00
return_data : Some ( TransactionReturnData ::default ( ) ) ,
2021-03-26 15:47:35 -07:00
}
. into ( ) ;
2021-09-10 05:33:08 -07:00
blockstore
2021-03-26 15:47:35 -07:00
. transaction_status_cf
. put_protobuf ( ( 0 , signature , slot + 2 ) , & status )
. unwrap ( ) ;
2022-01-13 23:24:41 -08:00
VersionedTransactionWithStatusMeta {
2019-11-18 08:12:42 -08:00
transaction ,
2022-02-09 21:28:18 -08:00
meta : TransactionStatusMeta {
2020-09-23 22:10:29 -07:00
status : Ok ( ( ) ) ,
fee : 42 ,
pre_balances ,
post_balances ,
2020-09-24 07:36:22 -07:00
inner_instructions : Some ( vec! [ ] ) ,
2020-10-08 12:06:15 -07:00
log_messages : Some ( vec! [ ] ) ,
2020-12-10 19:25:07 -08:00
pre_token_balances : Some ( vec! [ ] ) ,
post_token_balances : Some ( vec! [ ] ) ,
2021-05-26 14:43:15 -07:00
rewards : Some ( vec! [ ] ) ,
2022-01-13 23:24:41 -08:00
loaded_addresses : LoadedAddresses ::default ( ) ,
2022-03-22 15:17:05 -07:00
return_data : Some ( TransactionReturnData ::default ( ) ) ,
2022-02-09 21:28:18 -08:00
} ,
2020-09-23 22:10:29 -07:00
}
2019-11-18 08:12:42 -08:00
} )
2019-11-14 15:34:39 -08:00
. collect ( ) ;
2019-12-11 14:06:54 -08:00
// Even if marked as root, a slot that is empty of entries should return an error
2022-02-09 21:28:18 -08:00
assert_matches! (
blockstore . get_rooted_block ( slot - 1 , true ) ,
Err ( BlockstoreError ::SlotUnavailable )
) ;
2019-12-11 14:06:54 -08:00
2021-02-17 17:04:52 -08:00
// The previous_blockhash of `expected_block` is default because its parent slot is a root,
// but empty of entries (eg. snapshot root slots). This now returns an error.
assert_matches! (
2022-02-09 21:28:18 -08:00
blockstore . get_rooted_block ( slot , true ) ,
Err ( BlockstoreError ::ParentEntriesUnavailable )
2021-02-17 17:04:52 -08:00
) ;
2019-12-11 14:06:54 -08:00
2021-02-17 17:04:52 -08:00
// Test if require_previous_blockhash is false
2021-09-10 05:33:08 -07:00
let confirmed_block = blockstore . get_rooted_block ( slot , false ) . unwrap ( ) ;
2021-02-17 17:04:52 -08:00
assert_eq! ( confirmed_block . transactions . len ( ) , 100 ) ;
2022-01-13 23:24:41 -08:00
let expected_block = VersionedConfirmedBlock {
2020-09-23 22:10:29 -07:00
transactions : expected_transactions . clone ( ) ,
2020-01-12 21:34:30 -08:00
parent_slot : slot - 1 ,
blockhash : blockhash . to_string ( ) ,
previous_blockhash : Hash ::default ( ) . to_string ( ) ,
2020-02-04 18:50:24 -08:00
rewards : vec ! [ ] ,
2020-07-09 21:47:29 -07:00
block_time : None ,
2021-05-26 21:16:16 -07:00
block_height : None ,
2020-01-12 21:34:30 -08:00
} ;
2019-12-11 14:06:54 -08:00
assert_eq! ( confirmed_block , expected_block ) ;
2021-09-10 05:33:08 -07:00
let confirmed_block = blockstore . get_rooted_block ( slot + 1 , true ) . unwrap ( ) ;
2019-11-18 08:12:42 -08:00
assert_eq! ( confirmed_block . transactions . len ( ) , 100 ) ;
2022-01-13 23:24:41 -08:00
let mut expected_block = VersionedConfirmedBlock {
2021-03-26 15:47:35 -07:00
transactions : expected_transactions . clone ( ) ,
2020-01-12 21:34:30 -08:00
parent_slot : slot ,
blockhash : blockhash . to_string ( ) ,
previous_blockhash : blockhash . to_string ( ) ,
2020-02-04 18:50:24 -08:00
rewards : vec ! [ ] ,
2020-07-09 21:47:29 -07:00
block_time : None ,
2021-05-26 21:16:16 -07:00
block_height : None ,
2020-01-12 21:34:30 -08:00
} ;
2019-11-17 19:17:15 -08:00
assert_eq! ( confirmed_block , expected_block ) ;
2021-09-10 05:33:08 -07:00
let not_root = blockstore . get_rooted_block ( slot + 2 , true ) . unwrap_err ( ) ;
2020-01-13 13:13:52 -08:00
assert_matches! ( not_root , BlockstoreError ::SlotNotRooted ) ;
2019-11-14 15:34:39 -08:00
2021-09-10 05:33:08 -07:00
let complete_block = blockstore . get_complete_block ( slot + 2 , true ) . unwrap ( ) ;
2021-03-26 15:47:35 -07:00
assert_eq! ( complete_block . transactions . len ( ) , 100 ) ;
2022-01-13 23:24:41 -08:00
let mut expected_complete_block = VersionedConfirmedBlock {
2021-03-26 15:47:35 -07:00
transactions : expected_transactions ,
parent_slot : slot + 1 ,
blockhash : blockhash . to_string ( ) ,
previous_blockhash : blockhash . to_string ( ) ,
rewards : vec ! [ ] ,
block_time : None ,
2021-05-26 21:16:16 -07:00
block_height : None ,
2021-03-26 15:47:35 -07:00
} ;
assert_eq! ( complete_block , expected_complete_block ) ;
2021-05-26 21:16:16 -07:00
// Test block_time & block_height return, if available
2020-09-09 08:33:14 -07:00
let timestamp = 1_576_183_541 ;
2021-09-10 05:33:08 -07:00
blockstore . blocktime_cf . put ( slot + 1 , & timestamp ) . unwrap ( ) ;
2020-09-09 08:33:14 -07:00
expected_block . block_time = Some ( timestamp ) ;
2021-05-26 21:16:16 -07:00
let block_height = slot - 2 ;
2021-09-10 05:33:08 -07:00
blockstore
. block_height_cf
. put ( slot + 1 , & block_height )
. unwrap ( ) ;
2021-05-26 21:16:16 -07:00
expected_block . block_height = Some ( block_height ) ;
2020-09-09 08:33:14 -07:00
2021-09-10 05:33:08 -07:00
let confirmed_block = blockstore . get_rooted_block ( slot + 1 , true ) . unwrap ( ) ;
2020-09-09 08:33:14 -07:00
assert_eq! ( confirmed_block , expected_block ) ;
2021-03-26 15:47:35 -07:00
let timestamp = 1_576_183_542 ;
2021-09-10 05:33:08 -07:00
blockstore . blocktime_cf . put ( slot + 2 , & timestamp ) . unwrap ( ) ;
2021-03-26 15:47:35 -07:00
expected_complete_block . block_time = Some ( timestamp ) ;
2021-05-26 21:16:16 -07:00
let block_height = slot - 1 ;
2021-09-10 05:33:08 -07:00
blockstore
. block_height_cf
. put ( slot + 2 , & block_height )
. unwrap ( ) ;
2021-05-26 21:16:16 -07:00
expected_complete_block . block_height = Some ( block_height ) ;
2021-03-26 15:47:35 -07:00
2021-09-10 05:33:08 -07:00
let complete_block = blockstore . get_complete_block ( slot + 2 , true ) . unwrap ( ) ;
2021-03-26 15:47:35 -07:00
assert_eq! ( complete_block , expected_complete_block ) ;
2019-11-14 15:34:39 -08:00
}
2019-11-17 08:26:01 -08:00
2019-12-14 11:23:02 -08:00
#[ test ]
fn test_persist_transaction_status ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
let transaction_status_cf = & blockstore . transaction_status_cf ;
let pre_balances_vec = vec! [ 1 , 2 , 3 ] ;
let post_balances_vec = vec! [ 3 , 2 , 1 ] ;
let inner_instructions_vec = vec! [ InnerInstructions {
index : 0 ,
instructions : vec ! [ CompiledInstruction ::new ( 1 , & ( ) , vec! [ 0 ] ) ] ,
} ] ;
let log_messages_vec = vec! [ String ::from ( " Test message \n " ) ] ;
let pre_token_balances_vec = vec! [ ] ;
let post_token_balances_vec = vec! [ ] ;
let rewards_vec = vec! [ ] ;
2022-01-13 23:24:41 -08:00
let test_loaded_addresses = LoadedAddresses {
writable : vec ! [ Pubkey ::new_unique ( ) ] ,
readonly : vec ! [ Pubkey ::new_unique ( ) ] ,
} ;
2022-03-22 15:17:05 -07:00
let test_return_data = TransactionReturnData {
program_id : Pubkey ::new_unique ( ) ,
data : vec ! [ 1 , 2 , 3 ] ,
} ;
2021-09-10 05:33:08 -07:00
// result not found
assert! ( transaction_status_cf
. get_protobuf_or_bincode ::< StoredTransactionStatusMeta > ( ( 0 , Signature ::default ( ) , 0 ) )
. unwrap ( )
. is_none ( ) ) ;
// insert value
let status = TransactionStatusMeta {
status : solana_sdk ::transaction ::Result ::< ( ) > ::Err ( TransactionError ::AccountNotFound ) ,
fee : 5 u64 ,
pre_balances : pre_balances_vec . clone ( ) ,
post_balances : post_balances_vec . clone ( ) ,
inner_instructions : Some ( inner_instructions_vec . clone ( ) ) ,
log_messages : Some ( log_messages_vec . clone ( ) ) ,
pre_token_balances : Some ( pre_token_balances_vec . clone ( ) ) ,
post_token_balances : Some ( post_token_balances_vec . clone ( ) ) ,
rewards : Some ( rewards_vec . clone ( ) ) ,
2022-01-13 23:24:41 -08:00
loaded_addresses : test_loaded_addresses . clone ( ) ,
2022-03-22 15:17:05 -07:00
return_data : Some ( test_return_data . clone ( ) ) ,
2021-09-10 05:33:08 -07:00
}
. into ( ) ;
assert! ( transaction_status_cf
. put_protobuf ( ( 0 , Signature ::default ( ) , 0 ) , & status , )
. is_ok ( ) ) ;
// result found
let TransactionStatusMeta {
status ,
fee ,
pre_balances ,
post_balances ,
inner_instructions ,
log_messages ,
pre_token_balances ,
post_token_balances ,
rewards ,
2022-01-13 23:24:41 -08:00
loaded_addresses ,
2022-03-22 15:17:05 -07:00
return_data ,
2021-09-10 05:33:08 -07:00
} = transaction_status_cf
. get_protobuf_or_bincode ::< StoredTransactionStatusMeta > ( ( 0 , Signature ::default ( ) , 0 ) )
. unwrap ( )
. unwrap ( )
. try_into ( )
. unwrap ( ) ;
assert_eq! ( status , Err ( TransactionError ::AccountNotFound ) ) ;
assert_eq! ( fee , 5 u64 ) ;
assert_eq! ( pre_balances , pre_balances_vec ) ;
assert_eq! ( post_balances , post_balances_vec ) ;
assert_eq! ( inner_instructions . unwrap ( ) , inner_instructions_vec ) ;
assert_eq! ( log_messages . unwrap ( ) , log_messages_vec ) ;
assert_eq! ( pre_token_balances . unwrap ( ) , pre_token_balances_vec ) ;
assert_eq! ( post_token_balances . unwrap ( ) , post_token_balances_vec ) ;
assert_eq! ( rewards . unwrap ( ) , rewards_vec ) ;
2022-01-13 23:24:41 -08:00
assert_eq! ( loaded_addresses , test_loaded_addresses ) ;
2022-03-22 15:17:05 -07:00
assert_eq! ( return_data . unwrap ( ) , test_return_data ) ;
2021-09-10 05:33:08 -07:00
// insert value
let status = TransactionStatusMeta {
status : solana_sdk ::transaction ::Result ::< ( ) > ::Ok ( ( ) ) ,
fee : 9 u64 ,
pre_balances : pre_balances_vec . clone ( ) ,
post_balances : post_balances_vec . clone ( ) ,
inner_instructions : Some ( inner_instructions_vec . clone ( ) ) ,
log_messages : Some ( log_messages_vec . clone ( ) ) ,
pre_token_balances : Some ( pre_token_balances_vec . clone ( ) ) ,
post_token_balances : Some ( post_token_balances_vec . clone ( ) ) ,
rewards : Some ( rewards_vec . clone ( ) ) ,
2022-01-13 23:24:41 -08:00
loaded_addresses : test_loaded_addresses . clone ( ) ,
2022-03-22 15:17:05 -07:00
return_data : Some ( test_return_data . clone ( ) ) ,
2021-09-10 05:33:08 -07:00
}
. into ( ) ;
assert! ( transaction_status_cf
. put_protobuf ( ( 0 , Signature ::new ( & [ 2 u8 ; 64 ] ) , 9 ) , & status , )
. is_ok ( ) ) ;
// result found
let TransactionStatusMeta {
status ,
fee ,
pre_balances ,
post_balances ,
inner_instructions ,
log_messages ,
pre_token_balances ,
post_token_balances ,
rewards ,
2022-01-13 23:24:41 -08:00
loaded_addresses ,
2022-03-22 15:17:05 -07:00
return_data ,
2021-09-10 05:33:08 -07:00
} = transaction_status_cf
. get_protobuf_or_bincode ::< StoredTransactionStatusMeta > ( (
0 ,
Signature ::new ( & [ 2 u8 ; 64 ] ) ,
9 ,
) )
. unwrap ( )
. unwrap ( )
. try_into ( )
. unwrap ( ) ;
2019-11-17 08:26:01 -08:00
2021-09-10 05:33:08 -07:00
// deserialize
assert_eq! ( status , Ok ( ( ) ) ) ;
assert_eq! ( fee , 9 u64 ) ;
assert_eq! ( pre_balances , pre_balances_vec ) ;
assert_eq! ( post_balances , post_balances_vec ) ;
assert_eq! ( inner_instructions . unwrap ( ) , inner_instructions_vec ) ;
assert_eq! ( log_messages . unwrap ( ) , log_messages_vec ) ;
assert_eq! ( pre_token_balances . unwrap ( ) , pre_token_balances_vec ) ;
assert_eq! ( post_token_balances . unwrap ( ) , post_token_balances_vec ) ;
assert_eq! ( rewards . unwrap ( ) , rewards_vec ) ;
2022-01-13 23:24:41 -08:00
assert_eq! ( loaded_addresses , test_loaded_addresses ) ;
2022-03-22 15:17:05 -07:00
assert_eq! ( return_data . unwrap ( ) , test_return_data ) ;
2019-11-17 08:26:01 -08:00
}
2019-11-17 19:17:15 -08:00
2020-04-04 20:24:06 -07:00
#[ test ]
2020-05-15 09:35:43 -07:00
#[ allow(clippy::cognitive_complexity) ]
2020-04-04 20:24:06 -07:00
fn test_transaction_status_index ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2020-04-04 20:24:06 -07:00
2021-09-10 05:33:08 -07:00
let transaction_status_index_cf = & blockstore . transaction_status_index_cf ;
let slot0 = 10 ;
2020-04-04 20:24:06 -07:00
2021-09-10 05:33:08 -07:00
// Primary index column is initialized on Blockstore::open
assert! ( transaction_status_index_cf . get ( 0 ) . unwrap ( ) . is_some ( ) ) ;
assert! ( transaction_status_index_cf . get ( 1 ) . unwrap ( ) . is_some ( ) ) ;
2020-04-04 20:24:06 -07:00
2021-09-10 05:33:08 -07:00
for _ in 0 .. 5 {
let random_bytes : Vec < u8 > = ( 0 .. 64 ) . map ( | _ | rand ::random ::< u8 > ( ) ) . collect ( ) ;
2020-06-02 18:49:31 -07:00
blockstore
2021-09-10 05:33:08 -07:00
. write_transaction_status (
slot0 ,
Signature ::new ( & random_bytes ) ,
vec! [ & Pubkey ::new ( & random_bytes [ 0 .. 32 ] ) ] ,
vec! [ & Pubkey ::new ( & random_bytes [ 32 .. ] ) ] ,
TransactionStatusMeta ::default ( ) ,
)
2020-06-02 18:49:31 -07:00
. unwrap ( ) ;
2020-04-04 20:24:06 -07:00
}
2021-09-10 05:33:08 -07:00
// New statuses bump index 0 max_slot
assert_eq! (
transaction_status_index_cf . get ( 0 ) . unwrap ( ) . unwrap ( ) ,
TransactionStatusIndexMeta {
max_slot : slot0 ,
frozen : false ,
2021-03-05 08:05:35 -08:00
}
2021-09-10 05:33:08 -07:00
) ;
assert_eq! (
transaction_status_index_cf . get ( 1 ) . unwrap ( ) . unwrap ( ) ,
TransactionStatusIndexMeta ::default ( )
) ;
2020-04-06 03:04:54 -07:00
2021-09-10 05:33:08 -07:00
let first_status_entry = blockstore
. db
. iter ::< cf ::TransactionStatus > ( IteratorMode ::From (
cf ::TransactionStatus ::as_index ( 0 ) ,
IteratorDirection ::Forward ,
) )
. unwrap ( )
. next ( )
. unwrap ( )
. 0 ;
assert_eq! ( first_status_entry . 0 , 0 ) ;
assert_eq! ( first_status_entry . 2 , slot0 ) ;
let first_address_entry = blockstore
. db
. iter ::< cf ::AddressSignatures > ( IteratorMode ::From (
cf ::AddressSignatures ::as_index ( 0 ) ,
IteratorDirection ::Forward ,
) )
. unwrap ( )
. next ( )
. unwrap ( )
. 0 ;
assert_eq! ( first_address_entry . 0 , 0 ) ;
assert_eq! ( first_address_entry . 2 , slot0 ) ;
2020-04-06 03:04:54 -07:00
2021-09-10 05:33:08 -07:00
blockstore . run_purge ( 0 , 8 , PurgeType ::PrimaryIndex ) . unwrap ( ) ;
// First successful prune freezes index 0
assert_eq! (
transaction_status_index_cf . get ( 0 ) . unwrap ( ) . unwrap ( ) ,
TransactionStatusIndexMeta {
max_slot : slot0 ,
frozen : true ,
}
) ;
assert_eq! (
transaction_status_index_cf . get ( 1 ) . unwrap ( ) . unwrap ( ) ,
TransactionStatusIndexMeta ::default ( )
) ;
2020-04-06 03:04:54 -07:00
2021-09-10 05:33:08 -07:00
let slot1 = 20 ;
for _ in 0 .. 5 {
let random_bytes : Vec < u8 > = ( 0 .. 64 ) . map ( | _ | rand ::random ::< u8 > ( ) ) . collect ( ) ;
blockstore
. write_transaction_status (
slot1 ,
Signature ::new ( & random_bytes ) ,
vec! [ & Pubkey ::new ( & random_bytes [ 0 .. 32 ] ) ] ,
vec! [ & Pubkey ::new ( & random_bytes [ 32 .. ] ) ] ,
TransactionStatusMeta ::default ( ) ,
)
2020-04-06 03:04:54 -07:00
. unwrap ( ) ;
2021-09-10 05:33:08 -07:00
}
2020-04-06 03:04:54 -07:00
2021-09-10 05:33:08 -07:00
assert_eq! (
transaction_status_index_cf . get ( 0 ) . unwrap ( ) . unwrap ( ) ,
TransactionStatusIndexMeta {
max_slot : slot0 ,
frozen : true ,
}
) ;
// Index 0 is frozen, so new statuses bump index 1 max_slot
assert_eq! (
transaction_status_index_cf . get ( 1 ) . unwrap ( ) . unwrap ( ) ,
TransactionStatusIndexMeta {
max_slot : slot1 ,
frozen : false ,
}
) ;
2020-04-06 03:04:54 -07:00
2021-09-10 05:33:08 -07:00
// Index 0 statuses and address records still exist
let first_status_entry = blockstore
. db
. iter ::< cf ::TransactionStatus > ( IteratorMode ::From (
cf ::TransactionStatus ::as_index ( 0 ) ,
IteratorDirection ::Forward ,
) )
. unwrap ( )
. next ( )
. unwrap ( )
. 0 ;
assert_eq! ( first_status_entry . 0 , 0 ) ;
assert_eq! ( first_status_entry . 2 , 10 ) ;
let first_address_entry = blockstore
. db
. iter ::< cf ::AddressSignatures > ( IteratorMode ::From (
cf ::AddressSignatures ::as_index ( 0 ) ,
IteratorDirection ::Forward ,
) )
. unwrap ( )
. next ( )
. unwrap ( )
. 0 ;
assert_eq! ( first_address_entry . 0 , 0 ) ;
assert_eq! ( first_address_entry . 2 , slot0 ) ;
// New statuses and address records are stored in index 1
let index1_first_status_entry = blockstore
. db
. iter ::< cf ::TransactionStatus > ( IteratorMode ::From (
cf ::TransactionStatus ::as_index ( 1 ) ,
IteratorDirection ::Forward ,
) )
. unwrap ( )
. next ( )
. unwrap ( )
. 0 ;
assert_eq! ( index1_first_status_entry . 0 , 1 ) ;
assert_eq! ( index1_first_status_entry . 2 , slot1 ) ;
let index1_first_address_entry = blockstore
. db
. iter ::< cf ::AddressSignatures > ( IteratorMode ::From (
cf ::AddressSignatures ::as_index ( 1 ) ,
IteratorDirection ::Forward ,
) )
. unwrap ( )
. next ( )
. unwrap ( )
. 0 ;
assert_eq! ( index1_first_address_entry . 0 , 1 ) ;
assert_eq! ( index1_first_address_entry . 2 , slot1 ) ;
2020-04-06 03:04:54 -07:00
2021-09-10 05:33:08 -07:00
blockstore
. run_purge ( 0 , 18 , PurgeType ::PrimaryIndex )
. unwrap ( ) ;
// Successful prune toggles TransactionStatusIndex
assert_eq! (
transaction_status_index_cf . get ( 0 ) . unwrap ( ) . unwrap ( ) ,
TransactionStatusIndexMeta {
max_slot : 0 ,
frozen : false ,
}
) ;
assert_eq! (
transaction_status_index_cf . get ( 1 ) . unwrap ( ) . unwrap ( ) ,
TransactionStatusIndexMeta {
max_slot : slot1 ,
frozen : true ,
}
) ;
2020-04-06 03:04:54 -07:00
2021-09-10 05:33:08 -07:00
// Index 0 has been pruned, so first status and address entries are now index 1
let first_status_entry = blockstore
. db
. iter ::< cf ::TransactionStatus > ( IteratorMode ::From (
cf ::TransactionStatus ::as_index ( 0 ) ,
IteratorDirection ::Forward ,
) )
. unwrap ( )
. next ( )
. unwrap ( )
. 0 ;
assert_eq! ( first_status_entry . 0 , 1 ) ;
assert_eq! ( first_status_entry . 2 , slot1 ) ;
let first_address_entry = blockstore
. db
. iter ::< cf ::AddressSignatures > ( IteratorMode ::From (
cf ::AddressSignatures ::as_index ( 0 ) ,
IteratorDirection ::Forward ,
) )
. unwrap ( )
. next ( )
. unwrap ( )
. 0 ;
assert_eq! ( first_address_entry . 0 , 1 ) ;
assert_eq! ( first_address_entry . 2 , slot1 ) ;
}
2020-04-06 03:04:54 -07:00
2021-09-10 05:33:08 -07:00
#[ test ]
fn test_get_transaction_status ( ) {
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
// TransactionStatus column opens initialized with one entry at index 2
let transaction_status_cf = & blockstore . transaction_status_cf ;
let pre_balances_vec = vec! [ 1 , 2 , 3 ] ;
let post_balances_vec = vec! [ 3 , 2 , 1 ] ;
let status = TransactionStatusMeta {
status : solana_sdk ::transaction ::Result ::< ( ) > ::Ok ( ( ) ) ,
fee : 42 u64 ,
pre_balances : pre_balances_vec ,
post_balances : post_balances_vec ,
inner_instructions : Some ( vec! [ ] ) ,
log_messages : Some ( vec! [ ] ) ,
pre_token_balances : Some ( vec! [ ] ) ,
post_token_balances : Some ( vec! [ ] ) ,
rewards : Some ( vec! [ ] ) ,
2022-01-13 23:24:41 -08:00
loaded_addresses : LoadedAddresses ::default ( ) ,
2022-03-22 15:17:05 -07:00
return_data : Some ( TransactionReturnData ::default ( ) ) ,
2021-09-10 05:33:08 -07:00
}
. into ( ) ;
let signature1 = Signature ::new ( & [ 1 u8 ; 64 ] ) ;
let signature2 = Signature ::new ( & [ 2 u8 ; 64 ] ) ;
let signature3 = Signature ::new ( & [ 3 u8 ; 64 ] ) ;
let signature4 = Signature ::new ( & [ 4 u8 ; 64 ] ) ;
let signature5 = Signature ::new ( & [ 5 u8 ; 64 ] ) ;
let signature6 = Signature ::new ( & [ 6 u8 ; 64 ] ) ;
let signature7 = Signature ::new ( & [ 7 u8 ; 64 ] ) ;
// Insert slots with fork
// 0 (root)
// / \
// 1 |
// 2 (root)
// |
// 3
2021-12-14 10:57:11 -08:00
let meta0 = SlotMeta ::new ( 0 , Some ( 0 ) ) ;
2021-09-10 05:33:08 -07:00
blockstore . meta_cf . put ( 0 , & meta0 ) . unwrap ( ) ;
2021-12-14 10:57:11 -08:00
let meta1 = SlotMeta ::new ( 1 , Some ( 0 ) ) ;
2021-09-10 05:33:08 -07:00
blockstore . meta_cf . put ( 1 , & meta1 ) . unwrap ( ) ;
2021-12-14 10:57:11 -08:00
let meta2 = SlotMeta ::new ( 2 , Some ( 0 ) ) ;
2021-09-10 05:33:08 -07:00
blockstore . meta_cf . put ( 2 , & meta2 ) . unwrap ( ) ;
2021-12-14 10:57:11 -08:00
let meta3 = SlotMeta ::new ( 3 , Some ( 2 ) ) ;
2021-09-10 05:33:08 -07:00
blockstore . meta_cf . put ( 3 , & meta3 ) . unwrap ( ) ;
blockstore . set_roots ( vec! [ 0 , 2 ] . iter ( ) ) . unwrap ( ) ;
// Initialize index 0, including:
// signature2 in non-root and root,
// signature4 in non-root,
// signature5 in skipped slot and non-root,
// signature6 in skipped slot,
transaction_status_cf
. put_protobuf ( ( 0 , signature2 , 1 ) , & status )
. unwrap ( ) ;
2020-04-06 03:04:54 -07:00
2021-09-10 05:33:08 -07:00
transaction_status_cf
. put_protobuf ( ( 0 , signature2 , 2 ) , & status )
. unwrap ( ) ;
2020-04-06 03:04:54 -07:00
2021-09-10 05:33:08 -07:00
transaction_status_cf
. put_protobuf ( ( 0 , signature4 , 1 ) , & status )
. unwrap ( ) ;
2020-04-06 03:04:54 -07:00
2021-09-10 05:33:08 -07:00
transaction_status_cf
. put_protobuf ( ( 0 , signature5 , 1 ) , & status )
. unwrap ( ) ;
2020-04-06 03:04:54 -07:00
2021-09-10 05:33:08 -07:00
transaction_status_cf
. put_protobuf ( ( 0 , signature5 , 3 ) , & status )
. unwrap ( ) ;
2021-03-26 15:47:35 -07:00
2021-09-10 05:33:08 -07:00
transaction_status_cf
. put_protobuf ( ( 0 , signature6 , 1 ) , & status )
. unwrap ( ) ;
2021-03-31 20:04:00 -07:00
2021-09-10 05:33:08 -07:00
// Initialize index 1, including:
// signature4 in root,
// signature6 in non-root,
// signature5 extra entries
transaction_status_cf
. put_protobuf ( ( 1 , signature4 , 2 ) , & status )
. unwrap ( ) ;
2020-04-06 03:04:54 -07:00
2021-09-10 05:33:08 -07:00
transaction_status_cf
. put_protobuf ( ( 1 , signature5 , 4 ) , & status )
. unwrap ( ) ;
2020-04-06 03:04:54 -07:00
2021-09-10 05:33:08 -07:00
transaction_status_cf
. put_protobuf ( ( 1 , signature5 , 5 ) , & status )
. unwrap ( ) ;
2021-03-26 15:47:35 -07:00
2021-09-10 05:33:08 -07:00
transaction_status_cf
. put_protobuf ( ( 1 , signature6 , 3 ) , & status )
. unwrap ( ) ;
2021-03-26 15:47:35 -07:00
2021-09-10 05:33:08 -07:00
// Signature exists, root found in index 0
if let ( Some ( ( slot , _status ) ) , counter ) = blockstore
. get_transaction_status_with_counter ( signature2 , & [ ] )
. unwrap ( )
{
assert_eq! ( slot , 2 ) ;
2020-04-06 03:04:54 -07:00
assert_eq! ( counter , 2 ) ;
2021-09-10 05:33:08 -07:00
}
2020-04-06 03:04:54 -07:00
2021-09-10 05:33:08 -07:00
// Signature exists, root found although not required
if let ( Some ( ( slot , _status ) ) , counter ) = blockstore
. get_transaction_status_with_counter ( signature2 , & [ 3 ] )
. unwrap ( )
{
assert_eq! ( slot , 2 ) ;
2021-03-26 15:47:35 -07:00
assert_eq! ( counter , 2 ) ;
2021-09-10 05:33:08 -07:00
}
2021-03-26 15:47:35 -07:00
2021-09-10 05:33:08 -07:00
// Signature exists, root found in index 1
if let ( Some ( ( slot , _status ) ) , counter ) = blockstore
. get_transaction_status_with_counter ( signature4 , & [ ] )
. unwrap ( )
{
assert_eq! ( slot , 2 ) ;
assert_eq! ( counter , 3 ) ;
}
2020-04-06 03:04:54 -07:00
2021-09-10 05:33:08 -07:00
// Signature exists, root found although not required, in index 1
if let ( Some ( ( slot , _status ) ) , counter ) = blockstore
. get_transaction_status_with_counter ( signature4 , & [ 3 ] )
. unwrap ( )
{
assert_eq! ( slot , 2 ) ;
assert_eq! ( counter , 3 ) ;
}
2021-03-26 15:47:35 -07:00
2021-09-10 05:33:08 -07:00
// Signature exists, no root found
let ( status , counter ) = blockstore
. get_transaction_status_with_counter ( signature5 , & [ ] )
. unwrap ( ) ;
assert_eq! ( status , None ) ;
assert_eq! ( counter , 6 ) ;
// Signature exists, root not required
if let ( Some ( ( slot , _status ) ) , counter ) = blockstore
. get_transaction_status_with_counter ( signature5 , & [ 3 ] )
. unwrap ( )
{
assert_eq! ( slot , 3 ) ;
2020-04-08 12:50:39 -07:00
assert_eq! ( counter , 2 ) ;
2020-04-06 03:04:54 -07:00
}
2021-09-10 05:33:08 -07:00
// Signature does not exist, smaller than existing entries
let ( status , counter ) = blockstore
. get_transaction_status_with_counter ( signature1 , & [ ] )
. unwrap ( ) ;
assert_eq! ( status , None ) ;
assert_eq! ( counter , 2 ) ;
let ( status , counter ) = blockstore
. get_transaction_status_with_counter ( signature1 , & [ 3 ] )
. unwrap ( ) ;
assert_eq! ( status , None ) ;
assert_eq! ( counter , 2 ) ;
// Signature does not exist, between existing entries
let ( status , counter ) = blockstore
. get_transaction_status_with_counter ( signature3 , & [ ] )
. unwrap ( ) ;
assert_eq! ( status , None ) ;
assert_eq! ( counter , 2 ) ;
let ( status , counter ) = blockstore
. get_transaction_status_with_counter ( signature3 , & [ 3 ] )
. unwrap ( ) ;
assert_eq! ( status , None ) ;
assert_eq! ( counter , 2 ) ;
// Signature does not exist, larger than existing entries
let ( status , counter ) = blockstore
. get_transaction_status_with_counter ( signature7 , & [ ] )
. unwrap ( ) ;
assert_eq! ( status , None ) ;
assert_eq! ( counter , 2 ) ;
let ( status , counter ) = blockstore
. get_transaction_status_with_counter ( signature7 , & [ 3 ] )
. unwrap ( ) ;
assert_eq! ( status , None ) ;
assert_eq! ( counter , 2 ) ;
2020-04-06 03:04:54 -07:00
}
2021-05-28 00:42:56 -07:00
fn do_test_lowest_cleanup_slot_and_special_cfs (
simulate_compaction : bool ,
simulate_ledger_cleanup_service : bool ,
) {
solana_logger ::setup ( ) ;
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
// TransactionStatus column opens initialized with one entry at index 2
let transaction_status_cf = & blockstore . transaction_status_cf ;
let pre_balances_vec = vec! [ 1 , 2 , 3 ] ;
let post_balances_vec = vec! [ 3 , 2 , 1 ] ;
let status = TransactionStatusMeta {
status : solana_sdk ::transaction ::Result ::< ( ) > ::Ok ( ( ) ) ,
fee : 42 u64 ,
pre_balances : pre_balances_vec ,
post_balances : post_balances_vec ,
inner_instructions : Some ( vec! [ ] ) ,
log_messages : Some ( vec! [ ] ) ,
pre_token_balances : Some ( vec! [ ] ) ,
post_token_balances : Some ( vec! [ ] ) ,
rewards : Some ( vec! [ ] ) ,
2022-01-13 23:24:41 -08:00
loaded_addresses : LoadedAddresses ::default ( ) ,
2022-03-22 15:17:05 -07:00
return_data : Some ( TransactionReturnData ::default ( ) ) ,
2021-09-10 05:33:08 -07:00
}
. into ( ) ;
let signature1 = Signature ::new ( & [ 2 u8 ; 64 ] ) ;
let signature2 = Signature ::new ( & [ 3 u8 ; 64 ] ) ;
// Insert rooted slots 0..=3 with no fork
2021-12-14 10:57:11 -08:00
let meta0 = SlotMeta ::new ( 0 , Some ( 0 ) ) ;
2021-09-10 05:33:08 -07:00
blockstore . meta_cf . put ( 0 , & meta0 ) . unwrap ( ) ;
2021-12-14 10:57:11 -08:00
let meta1 = SlotMeta ::new ( 1 , Some ( 0 ) ) ;
2021-09-10 05:33:08 -07:00
blockstore . meta_cf . put ( 1 , & meta1 ) . unwrap ( ) ;
2021-12-14 10:57:11 -08:00
let meta2 = SlotMeta ::new ( 2 , Some ( 1 ) ) ;
2021-09-10 05:33:08 -07:00
blockstore . meta_cf . put ( 2 , & meta2 ) . unwrap ( ) ;
2021-12-14 10:57:11 -08:00
let meta3 = SlotMeta ::new ( 3 , Some ( 2 ) ) ;
2021-09-10 05:33:08 -07:00
blockstore . meta_cf . put ( 3 , & meta3 ) . unwrap ( ) ;
blockstore . set_roots ( vec! [ 0 , 1 , 2 , 3 ] . iter ( ) ) . unwrap ( ) ;
let lowest_cleanup_slot = 1 ;
let lowest_available_slot = lowest_cleanup_slot + 1 ;
transaction_status_cf
. put_protobuf ( ( 0 , signature1 , lowest_cleanup_slot ) , & status )
. unwrap ( ) ;
2021-05-28 00:42:56 -07:00
2021-09-10 05:33:08 -07:00
transaction_status_cf
. put_protobuf ( ( 0 , signature2 , lowest_available_slot ) , & status )
. unwrap ( ) ;
2021-05-28 00:42:56 -07:00
2021-09-10 05:33:08 -07:00
let address0 = solana_sdk ::pubkey ::new_rand ( ) ;
let address1 = solana_sdk ::pubkey ::new_rand ( ) ;
blockstore
. write_transaction_status (
lowest_cleanup_slot ,
signature1 ,
vec! [ & address0 ] ,
vec! [ ] ,
TransactionStatusMeta ::default ( ) ,
)
. unwrap ( ) ;
blockstore
. write_transaction_status (
lowest_available_slot ,
signature2 ,
vec! [ & address1 ] ,
vec! [ ] ,
TransactionStatusMeta ::default ( ) ,
)
. unwrap ( ) ;
2021-05-28 00:42:56 -07:00
2021-09-10 05:33:08 -07:00
let check_for_missing = | | {
(
blockstore
. get_transaction_status_with_counter ( signature1 , & [ ] )
. unwrap ( )
. 0
. is_none ( ) ,
blockstore
. find_address_signatures_for_slot ( address0 , lowest_cleanup_slot )
. unwrap ( )
. is_empty ( ) ,
blockstore
. find_address_signatures ( address0 , lowest_cleanup_slot , lowest_cleanup_slot )
. unwrap ( )
. is_empty ( ) ,
)
} ;
2021-05-28 00:42:56 -07:00
2021-09-10 05:33:08 -07:00
let assert_existing_always = | | {
let are_existing_always = (
blockstore
. get_transaction_status_with_counter ( signature2 , & [ ] )
. unwrap ( )
. 0
. is_some ( ) ,
! blockstore
. find_address_signatures_for_slot ( address1 , lowest_available_slot )
. unwrap ( )
. is_empty ( ) ,
! blockstore
. find_address_signatures ( address1 , lowest_available_slot , lowest_available_slot )
. unwrap ( )
. is_empty ( ) ,
) ;
assert_eq! ( are_existing_always , ( true , true , true ) ) ;
} ;
2021-05-28 00:42:56 -07:00
2021-09-10 05:33:08 -07:00
let are_missing = check_for_missing ( ) ;
// should never be missing before the conditional compaction & simulation...
assert_eq! ( are_missing , ( false , false , false ) ) ;
assert_existing_always ( ) ;
2021-05-28 00:42:56 -07:00
2021-09-10 05:33:08 -07:00
if simulate_compaction {
blockstore . set_max_expired_slot ( lowest_cleanup_slot ) ;
// force compaction filters to run across whole key range.
2021-05-28 00:42:56 -07:00
blockstore
2021-09-10 05:33:08 -07:00
. compact_storage ( Slot ::min_value ( ) , Slot ::max_value ( ) )
2021-05-28 00:42:56 -07:00
. unwrap ( ) ;
2021-09-10 05:33:08 -07:00
}
2021-05-28 00:42:56 -07:00
2021-09-10 05:33:08 -07:00
if simulate_ledger_cleanup_service {
* blockstore . lowest_cleanup_slot . write ( ) . unwrap ( ) = lowest_cleanup_slot ;
}
2021-05-28 00:42:56 -07:00
2021-09-10 05:33:08 -07:00
let are_missing = check_for_missing ( ) ;
if simulate_compaction | | simulate_ledger_cleanup_service {
// ... when either simulation (or both) is effective, we should observe to be missing
// consistently
assert_eq! ( are_missing , ( true , true , true ) ) ;
} else {
// ... otherwise, we should observe to be existing...
2021-05-28 00:42:56 -07:00
assert_eq! ( are_missing , ( false , false , false ) ) ;
}
2021-09-10 05:33:08 -07:00
assert_existing_always ( ) ;
2021-05-28 00:42:56 -07:00
}
#[ test ]
fn test_lowest_cleanup_slot_and_special_cfs_with_compact_with_ledger_cleanup_service_simulation (
) {
do_test_lowest_cleanup_slot_and_special_cfs ( true , true ) ;
}
#[ test ]
fn test_lowest_cleanup_slot_and_special_cfs_with_compact_without_ledger_cleanup_service_simulation (
) {
do_test_lowest_cleanup_slot_and_special_cfs ( true , false ) ;
}
#[ test ]
fn test_lowest_cleanup_slot_and_special_cfs_without_compact_with_ledger_cleanup_service_simulation (
) {
do_test_lowest_cleanup_slot_and_special_cfs ( false , true ) ;
}
#[ test ]
fn test_lowest_cleanup_slot_and_special_cfs_without_compact_without_ledger_cleanup_service_simulation (
) {
do_test_lowest_cleanup_slot_and_special_cfs ( false , false ) ;
}
2020-04-08 23:57:30 -07:00
#[ test ]
2021-03-31 20:04:00 -07:00
fn test_get_rooted_transaction ( ) {
2020-04-08 23:57:30 -07:00
let slot = 2 ;
let entries = make_slot_entries_with_transactions ( 5 ) ;
2021-12-24 12:32:43 -08:00
let shreds = entries_to_test_shreds ( & entries , slot , slot - 1 , true , 0 ) ;
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2020-04-08 23:57:30 -07:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2021-07-01 20:02:40 -07:00
blockstore . set_roots ( vec! [ slot - 1 , slot ] . iter ( ) ) . unwrap ( ) ;
2020-04-08 23:57:30 -07:00
2022-01-13 23:24:41 -08:00
let expected_transactions : Vec < VersionedTransactionWithStatusMeta > = entries
2020-04-08 23:57:30 -07:00
. iter ( )
. cloned ( )
. filter ( | entry | ! entry . is_tick ( ) )
. flat_map ( | entry | entry . transactions )
. map ( | transaction | {
let mut pre_balances : Vec < u64 > = vec! [ ] ;
let mut post_balances : Vec < u64 > = vec! [ ] ;
2022-02-05 04:00:31 -08:00
for i in 0 .. transaction . message . static_account_keys ( ) . len ( ) {
2020-04-08 23:57:30 -07:00
pre_balances . push ( i as u64 * 10 ) ;
post_balances . push ( i as u64 * 11 ) ;
}
2020-09-24 07:36:22 -07:00
let inner_instructions = Some ( vec! [ InnerInstructions {
index : 0 ,
instructions : vec ! [ CompiledInstruction ::new ( 1 , & ( ) , vec! [ 0 ] ) ] ,
} ] ) ;
2020-10-08 12:06:15 -07:00
let log_messages = Some ( vec! [ String ::from ( " Test message \n " ) ] ) ;
2020-12-10 19:25:07 -08:00
let pre_token_balances = Some ( vec! [ ] ) ;
let post_token_balances = Some ( vec! [ ] ) ;
2021-05-26 14:43:15 -07:00
let rewards = Some ( vec! [ ] ) ;
2020-04-08 23:57:30 -07:00
let signature = transaction . signatures [ 0 ] ;
2022-03-22 15:17:05 -07:00
let return_data = Some ( TransactionReturnData {
program_id : Pubkey ::new_unique ( ) ,
data : vec ! [ 1 , 2 , 3 ] ,
} ) ;
2021-03-05 08:05:35 -08:00
let status = TransactionStatusMeta {
status : Ok ( ( ) ) ,
fee : 42 ,
pre_balances : pre_balances . clone ( ) ,
post_balances : post_balances . clone ( ) ,
inner_instructions : inner_instructions . clone ( ) ,
log_messages : log_messages . clone ( ) ,
pre_token_balances : pre_token_balances . clone ( ) ,
post_token_balances : post_token_balances . clone ( ) ,
2021-05-26 14:43:15 -07:00
rewards : rewards . clone ( ) ,
2022-01-13 23:24:41 -08:00
loaded_addresses : LoadedAddresses ::default ( ) ,
2022-03-22 15:17:05 -07:00
return_data : return_data . clone ( ) ,
2021-03-05 08:05:35 -08:00
}
. into ( ) ;
2020-04-08 23:57:30 -07:00
blockstore
. transaction_status_cf
2021-03-05 08:05:35 -08:00
. put_protobuf ( ( 0 , signature , slot ) , & status )
2020-04-08 23:57:30 -07:00
. unwrap ( ) ;
2022-01-13 23:24:41 -08:00
VersionedTransactionWithStatusMeta {
2020-04-08 23:57:30 -07:00
transaction ,
2022-02-09 21:28:18 -08:00
meta : TransactionStatusMeta {
2020-09-23 22:10:29 -07:00
status : Ok ( ( ) ) ,
fee : 42 ,
pre_balances ,
post_balances ,
2020-09-24 07:36:22 -07:00
inner_instructions ,
2020-10-08 12:06:15 -07:00
log_messages ,
2020-12-10 19:25:07 -08:00
pre_token_balances ,
post_token_balances ,
2021-05-26 14:43:15 -07:00
rewards ,
2022-01-13 23:24:41 -08:00
loaded_addresses : LoadedAddresses ::default ( ) ,
2022-03-22 15:17:05 -07:00
return_data ,
2022-02-09 21:28:18 -08:00
} ,
2020-09-23 22:10:29 -07:00
}
2020-04-08 23:57:30 -07:00
} )
. collect ( ) ;
2022-01-13 23:24:41 -08:00
for tx_with_meta in expected_transactions . clone ( ) {
let signature = tx_with_meta . transaction . signatures [ 0 ] ;
2020-04-08 23:57:30 -07:00
assert_eq! (
2021-03-31 20:04:00 -07:00
blockstore . get_rooted_transaction ( signature ) . unwrap ( ) ,
2022-02-09 21:28:18 -08:00
Some ( ConfirmedTransactionWithStatusMeta {
2021-03-26 15:47:35 -07:00
slot ,
2022-02-09 21:28:18 -08:00
tx_with_meta : TransactionWithStatusMeta ::Complete ( tx_with_meta . clone ( ) ) ,
2021-03-26 15:47:35 -07:00
block_time : None
} )
) ;
assert_eq! (
2021-03-31 20:04:00 -07:00
blockstore
. get_complete_transaction ( signature , slot + 1 )
. unwrap ( ) ,
2022-02-09 21:28:18 -08:00
Some ( ConfirmedTransactionWithStatusMeta {
2021-03-26 15:47:35 -07:00
slot ,
2022-02-09 21:28:18 -08:00
tx_with_meta : TransactionWithStatusMeta ::Complete ( tx_with_meta ) ,
2021-03-26 15:47:35 -07:00
block_time : None
} )
) ;
}
blockstore . run_purge ( 0 , 2 , PurgeType ::PrimaryIndex ) . unwrap ( ) ;
* blockstore . lowest_cleanup_slot . write ( ) . unwrap ( ) = slot ;
2022-01-13 23:24:41 -08:00
for VersionedTransactionWithStatusMeta { transaction , .. } in expected_transactions {
2021-03-26 15:47:35 -07:00
let signature = transaction . signatures [ 0 ] ;
2021-03-31 20:04:00 -07:00
assert_eq! ( blockstore . get_rooted_transaction ( signature ) . unwrap ( ) , None , ) ;
2021-03-26 15:47:35 -07:00
assert_eq! (
2021-03-31 20:04:00 -07:00
blockstore
. get_complete_transaction ( signature , slot + 1 )
. unwrap ( ) ,
2021-03-26 15:47:35 -07:00
None ,
) ;
}
}
#[ test ]
fn test_get_complete_transaction ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2021-03-26 15:47:35 -07:00
let slot = 2 ;
let entries = make_slot_entries_with_transactions ( 5 ) ;
2021-12-24 12:32:43 -08:00
let shreds = entries_to_test_shreds ( & entries , slot , slot - 1 , true , 0 ) ;
2021-03-26 15:47:35 -07:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2022-01-13 23:24:41 -08:00
let expected_transactions : Vec < VersionedTransactionWithStatusMeta > = entries
2021-03-26 15:47:35 -07:00
. iter ( )
. cloned ( )
. filter ( | entry | ! entry . is_tick ( ) )
. flat_map ( | entry | entry . transactions )
. map ( | transaction | {
let mut pre_balances : Vec < u64 > = vec! [ ] ;
let mut post_balances : Vec < u64 > = vec! [ ] ;
2022-02-05 04:00:31 -08:00
for i in 0 .. transaction . message . static_account_keys ( ) . len ( ) {
2021-03-26 15:47:35 -07:00
pre_balances . push ( i as u64 * 10 ) ;
post_balances . push ( i as u64 * 11 ) ;
}
let inner_instructions = Some ( vec! [ InnerInstructions {
index : 0 ,
instructions : vec ! [ CompiledInstruction ::new ( 1 , & ( ) , vec! [ 0 ] ) ] ,
} ] ) ;
let log_messages = Some ( vec! [ String ::from ( " Test message \n " ) ] ) ;
let pre_token_balances = Some ( vec! [ ] ) ;
let post_token_balances = Some ( vec! [ ] ) ;
2021-05-26 14:43:15 -07:00
let rewards = Some ( vec! [ ] ) ;
2022-03-22 15:17:05 -07:00
let return_data = Some ( TransactionReturnData {
program_id : Pubkey ::new_unique ( ) ,
data : vec ! [ 1 , 2 , 3 ] ,
} ) ;
2021-03-26 15:47:35 -07:00
let signature = transaction . signatures [ 0 ] ;
let status = TransactionStatusMeta {
status : Ok ( ( ) ) ,
fee : 42 ,
pre_balances : pre_balances . clone ( ) ,
post_balances : post_balances . clone ( ) ,
inner_instructions : inner_instructions . clone ( ) ,
log_messages : log_messages . clone ( ) ,
pre_token_balances : pre_token_balances . clone ( ) ,
post_token_balances : post_token_balances . clone ( ) ,
2021-05-26 14:43:15 -07:00
rewards : rewards . clone ( ) ,
2022-01-13 23:24:41 -08:00
loaded_addresses : LoadedAddresses ::default ( ) ,
2022-03-22 15:17:05 -07:00
return_data : return_data . clone ( ) ,
2021-03-26 15:47:35 -07:00
}
. into ( ) ;
blockstore
. transaction_status_cf
. put_protobuf ( ( 0 , signature , slot ) , & status )
. unwrap ( ) ;
2022-01-13 23:24:41 -08:00
VersionedTransactionWithStatusMeta {
2021-03-26 15:47:35 -07:00
transaction ,
2022-02-09 21:28:18 -08:00
meta : TransactionStatusMeta {
2021-03-26 15:47:35 -07:00
status : Ok ( ( ) ) ,
fee : 42 ,
pre_balances ,
post_balances ,
inner_instructions ,
log_messages ,
pre_token_balances ,
post_token_balances ,
2021-05-26 14:43:15 -07:00
rewards ,
2022-01-13 23:24:41 -08:00
loaded_addresses : LoadedAddresses ::default ( ) ,
2022-03-22 15:17:05 -07:00
return_data ,
2022-02-09 21:28:18 -08:00
} ,
2021-03-26 15:47:35 -07:00
}
} )
. collect ( ) ;
2022-01-13 23:24:41 -08:00
for tx_with_meta in expected_transactions . clone ( ) {
let signature = tx_with_meta . transaction . signatures [ 0 ] ;
2021-03-26 15:47:35 -07:00
assert_eq! (
2021-03-31 20:04:00 -07:00
blockstore
. get_complete_transaction ( signature , slot )
. unwrap ( ) ,
2022-02-09 21:28:18 -08:00
Some ( ConfirmedTransactionWithStatusMeta {
2021-01-20 22:10:35 -08:00
slot ,
2022-02-09 21:28:18 -08:00
tx_with_meta : TransactionWithStatusMeta ::Complete ( tx_with_meta ) ,
2021-01-20 22:10:35 -08:00
block_time : None
} )
2020-04-08 23:57:30 -07:00
) ;
2021-03-31 20:04:00 -07:00
assert_eq! ( blockstore . get_rooted_transaction ( signature ) . unwrap ( ) , None ) ;
2020-04-08 23:57:30 -07:00
}
2020-06-02 18:49:31 -07:00
blockstore . run_purge ( 0 , 2 , PurgeType ::PrimaryIndex ) . unwrap ( ) ;
2020-04-08 23:57:30 -07:00
* blockstore . lowest_cleanup_slot . write ( ) . unwrap ( ) = slot ;
2022-01-13 23:24:41 -08:00
for VersionedTransactionWithStatusMeta { transaction , .. } in expected_transactions {
2020-04-08 23:57:30 -07:00
let signature = transaction . signatures [ 0 ] ;
2021-03-26 15:47:35 -07:00
assert_eq! (
2021-03-31 20:04:00 -07:00
blockstore
. get_complete_transaction ( signature , slot )
. unwrap ( ) ,
2020-04-08 23:57:30 -07:00
None ,
) ;
2021-03-31 20:04:00 -07:00
assert_eq! ( blockstore . get_rooted_transaction ( signature ) . unwrap ( ) , None , ) ;
2020-04-08 23:57:30 -07:00
}
}
2020-08-06 15:21:46 -07:00
#[ test ]
fn test_empty_transaction_status ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2021-07-01 20:02:40 -07:00
blockstore . set_roots ( std ::iter ::once ( & 0 ) ) . unwrap ( ) ;
2020-08-06 15:21:46 -07:00
assert_eq! (
blockstore
2021-03-31 20:04:00 -07:00
. get_rooted_transaction ( Signature ::default ( ) )
2020-08-06 15:21:46 -07:00
. unwrap ( ) ,
None
) ;
}
2020-04-09 20:21:31 -07:00
#[ test ]
fn test_get_confirmed_signatures_for_address ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2020-04-09 20:21:31 -07:00
2021-09-10 05:33:08 -07:00
let address0 = solana_sdk ::pubkey ::new_rand ( ) ;
let address1 = solana_sdk ::pubkey ::new_rand ( ) ;
2020-04-09 20:21:31 -07:00
2021-09-10 05:33:08 -07:00
let slot0 = 10 ;
for x in 1 .. 5 {
let signature = Signature ::new ( & [ x ; 64 ] ) ;
blockstore
. write_transaction_status (
slot0 ,
signature ,
vec! [ & address0 ] ,
vec! [ & address1 ] ,
TransactionStatusMeta ::default ( ) ,
)
2020-04-09 20:21:31 -07:00
. unwrap ( ) ;
2021-09-10 05:33:08 -07:00
}
let slot1 = 20 ;
for x in 5 .. 9 {
let signature = Signature ::new ( & [ x ; 64 ] ) ;
blockstore
. write_transaction_status (
slot1 ,
signature ,
vec! [ & address0 ] ,
vec! [ & address1 ] ,
TransactionStatusMeta ::default ( ) ,
)
2020-04-09 20:21:31 -07:00
. unwrap ( ) ;
2021-09-10 05:33:08 -07:00
}
blockstore . set_roots ( vec! [ slot0 , slot1 ] . iter ( ) ) . unwrap ( ) ;
2020-04-09 20:21:31 -07:00
2021-09-10 05:33:08 -07:00
let all0 = blockstore
. get_confirmed_signatures_for_address ( address0 , 0 , 50 )
. unwrap ( ) ;
assert_eq! ( all0 . len ( ) , 8 ) ;
for x in 1 .. 9 {
let expected_signature = Signature ::new ( & [ x ; 64 ] ) ;
assert_eq! ( all0 [ x as usize - 1 ] , expected_signature ) ;
}
assert_eq! (
2020-06-02 18:49:31 -07:00
blockstore
2021-09-10 05:33:08 -07:00
. get_confirmed_signatures_for_address ( address0 , 20 , 50 )
2020-04-09 20:21:31 -07:00
. unwrap ( )
2021-09-10 05:33:08 -07:00
. len ( ) ,
4
) ;
assert_eq! (
blockstore
. get_confirmed_signatures_for_address ( address0 , 0 , 10 )
2020-04-09 20:21:31 -07:00
. unwrap ( )
2021-09-10 05:33:08 -07:00
. len ( ) ,
4
) ;
assert! ( blockstore
. get_confirmed_signatures_for_address ( address0 , 1 , 5 )
. unwrap ( )
. is_empty ( ) ) ;
assert_eq! (
blockstore
. get_confirmed_signatures_for_address ( address0 , 1 , 15 )
. unwrap ( )
. len ( ) ,
4
) ;
2020-04-09 20:21:31 -07:00
2021-09-10 05:33:08 -07:00
let all1 = blockstore
. get_confirmed_signatures_for_address ( address1 , 0 , 50 )
. unwrap ( ) ;
assert_eq! ( all1 . len ( ) , 8 ) ;
for x in 1 .. 9 {
let expected_signature = Signature ::new ( & [ x ; 64 ] ) ;
assert_eq! ( all1 [ x as usize - 1 ] , expected_signature ) ;
}
// Purge index 0
blockstore
. run_purge ( 0 , 10 , PurgeType ::PrimaryIndex )
. unwrap ( ) ;
assert_eq! (
blockstore
. get_confirmed_signatures_for_address ( address0 , 0 , 50 )
. unwrap ( )
. len ( ) ,
4
) ;
assert_eq! (
blockstore
. get_confirmed_signatures_for_address ( address0 , 20 , 50 )
. unwrap ( )
. len ( ) ,
4
) ;
assert! ( blockstore
. get_confirmed_signatures_for_address ( address0 , 0 , 10 )
. unwrap ( )
. is_empty ( ) ) ;
assert! ( blockstore
. get_confirmed_signatures_for_address ( address0 , 1 , 5 )
. unwrap ( )
. is_empty ( ) ) ;
assert_eq! (
blockstore
. get_confirmed_signatures_for_address ( address0 , 1 , 25 )
. unwrap ( )
. len ( ) ,
4
) ;
// Test sort, regardless of entry order or signature value
for slot in ( 21 .. 25 ) . rev ( ) {
let random_bytes : Vec < u8 > = ( 0 .. 64 ) . map ( | _ | rand ::random ::< u8 > ( ) ) . collect ( ) ;
let signature = Signature ::new ( & random_bytes ) ;
blockstore
. write_transaction_status (
slot ,
signature ,
vec! [ & address0 ] ,
vec! [ & address1 ] ,
TransactionStatusMeta ::default ( ) ,
)
. unwrap ( ) ;
}
blockstore . set_roots ( vec! [ 21 , 22 , 23 , 24 ] . iter ( ) ) . unwrap ( ) ;
let mut past_slot = 0 ;
for ( slot , _ ) in blockstore . find_address_signatures ( address0 , 1 , 25 ) . unwrap ( ) {
assert! ( slot > = past_slot ) ;
past_slot = slot ;
2020-04-09 20:21:31 -07:00
}
2020-08-06 15:29:25 -07:00
}
2021-03-31 21:35:57 -07:00
#[ test ]
fn test_find_address_signatures_for_slot ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2021-03-31 21:35:57 -07:00
2021-09-10 05:33:08 -07:00
let address0 = solana_sdk ::pubkey ::new_rand ( ) ;
let address1 = solana_sdk ::pubkey ::new_rand ( ) ;
2021-03-31 21:35:57 -07:00
2021-09-10 05:33:08 -07:00
let slot1 = 1 ;
for x in 1 .. 5 {
let signature = Signature ::new ( & [ x ; 64 ] ) ;
blockstore
. write_transaction_status (
slot1 ,
signature ,
vec! [ & address0 ] ,
vec! [ & address1 ] ,
TransactionStatusMeta ::default ( ) ,
)
2021-03-31 21:35:57 -07:00
. unwrap ( ) ;
2021-09-10 05:33:08 -07:00
}
let slot2 = 2 ;
for x in 5 .. 7 {
let signature = Signature ::new ( & [ x ; 64 ] ) ;
blockstore
. write_transaction_status (
slot2 ,
signature ,
vec! [ & address0 ] ,
vec! [ & address1 ] ,
TransactionStatusMeta ::default ( ) ,
)
2021-03-31 21:35:57 -07:00
. unwrap ( ) ;
2021-09-10 05:33:08 -07:00
}
for x in 7 .. 9 {
let signature = Signature ::new ( & [ x ; 64 ] ) ;
blockstore
. write_transaction_status (
slot2 ,
signature ,
vec! [ & address0 ] ,
vec! [ & address1 ] ,
TransactionStatusMeta ::default ( ) ,
)
2021-03-31 21:35:57 -07:00
. unwrap ( ) ;
2021-09-10 05:33:08 -07:00
}
let slot3 = 3 ;
for x in 9 .. 13 {
let signature = Signature ::new ( & [ x ; 64 ] ) ;
blockstore
. write_transaction_status (
slot3 ,
signature ,
vec! [ & address0 ] ,
vec! [ & address1 ] ,
TransactionStatusMeta ::default ( ) ,
)
. unwrap ( ) ;
}
blockstore . set_roots ( std ::iter ::once ( & slot1 ) ) . unwrap ( ) ;
let slot1_signatures = blockstore
. find_address_signatures_for_slot ( address0 , 1 )
. unwrap ( ) ;
for ( i , ( slot , signature ) ) in slot1_signatures . iter ( ) . enumerate ( ) {
assert_eq! ( * slot , slot1 ) ;
assert_eq! ( * signature , Signature ::new ( & [ i as u8 + 1 ; 64 ] ) ) ;
}
let slot2_signatures = blockstore
. find_address_signatures_for_slot ( address0 , 2 )
. unwrap ( ) ;
for ( i , ( slot , signature ) ) in slot2_signatures . iter ( ) . enumerate ( ) {
assert_eq! ( * slot , slot2 ) ;
assert_eq! ( * signature , Signature ::new ( & [ i as u8 + 5 ; 64 ] ) ) ;
}
let slot3_signatures = blockstore
. find_address_signatures_for_slot ( address0 , 3 )
. unwrap ( ) ;
for ( i , ( slot , signature ) ) in slot3_signatures . iter ( ) . enumerate ( ) {
assert_eq! ( * slot , slot3 ) ;
assert_eq! ( * signature , Signature ::new ( & [ i as u8 + 9 ; 64 ] ) ) ;
2021-03-31 21:35:57 -07:00
}
}
2020-07-27 11:42:49 -07:00
#[ test ]
fn test_get_confirmed_signatures_for_address2 ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2022-02-28 22:57:41 -08:00
let ( shreds , _ ) = make_slot_entries ( 1 , 0 , 4 ) ;
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2021-09-10 05:33:08 -07:00
fn make_slot_entries_with_transaction_addresses ( addresses : & [ Pubkey ] ) -> Vec < Entry > {
let mut entries : Vec < Entry > = Vec ::new ( ) ;
for address in addresses {
let transaction = Transaction ::new_with_compiled_instructions (
& [ & Keypair ::new ( ) ] ,
& [ * address ] ,
Hash ::default ( ) ,
vec! [ solana_sdk ::pubkey ::new_rand ( ) ] ,
vec! [ CompiledInstruction ::new ( 1 , & ( ) , vec! [ 0 ] ) ] ,
) ;
entries . push ( next_entry_mut ( & mut Hash ::default ( ) , 0 , vec! [ transaction ] ) ) ;
let mut tick = create_ticks ( 1 , 0 , hash ( & serialize ( address ) . unwrap ( ) ) ) ;
entries . append ( & mut tick ) ;
2020-07-27 11:42:49 -07:00
}
2021-09-10 05:33:08 -07:00
entries
}
2020-07-27 11:42:49 -07:00
2021-09-10 05:33:08 -07:00
let address0 = solana_sdk ::pubkey ::new_rand ( ) ;
let address1 = solana_sdk ::pubkey ::new_rand ( ) ;
2020-07-27 11:42:49 -07:00
2021-09-10 05:33:08 -07:00
for slot in 2 ..= 8 {
let entries = make_slot_entries_with_transaction_addresses ( & [
address0 , address1 , address0 , address1 ,
] ) ;
2021-12-24 12:32:43 -08:00
let shreds = entries_to_test_shreds ( & entries , slot , slot - 1 , true , 0 ) ;
2021-09-10 05:33:08 -07:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2020-07-27 11:42:49 -07:00
2022-02-28 22:57:41 -08:00
for entry in entries . into_iter ( ) {
2022-01-13 23:24:41 -08:00
for transaction in entry . transactions {
2021-09-10 05:33:08 -07:00
assert_eq! ( transaction . signatures . len ( ) , 1 ) ;
blockstore
. write_transaction_status (
slot ,
transaction . signatures [ 0 ] ,
2022-02-05 04:00:31 -08:00
transaction . message . static_account_keys ( ) . iter ( ) . collect ( ) ,
2021-09-10 05:33:08 -07:00
vec! [ ] ,
TransactionStatusMeta ::default ( ) ,
)
. unwrap ( ) ;
2020-07-27 11:42:49 -07:00
}
}
2021-09-10 05:33:08 -07:00
}
2021-03-31 21:35:57 -07:00
2021-09-10 05:33:08 -07:00
// Add 2 slots that both descend from slot 8
for slot in 9 ..= 10 {
let entries = make_slot_entries_with_transaction_addresses ( & [
address0 , address1 , address0 , address1 ,
] ) ;
2021-12-24 12:32:43 -08:00
let shreds = entries_to_test_shreds ( & entries , slot , 8 , true , 0 ) ;
2021-09-10 05:33:08 -07:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
for entry in entries . into_iter ( ) {
2022-01-13 23:24:41 -08:00
for transaction in entry . transactions {
2021-09-10 05:33:08 -07:00
assert_eq! ( transaction . signatures . len ( ) , 1 ) ;
blockstore
. write_transaction_status (
slot ,
transaction . signatures [ 0 ] ,
2022-02-05 04:00:31 -08:00
transaction . message . static_account_keys ( ) . iter ( ) . collect ( ) ,
2021-09-10 05:33:08 -07:00
vec! [ ] ,
TransactionStatusMeta ::default ( ) ,
)
. unwrap ( ) ;
2021-03-31 21:35:57 -07:00
}
}
2021-09-10 05:33:08 -07:00
}
2021-03-31 21:35:57 -07:00
2021-09-10 05:33:08 -07:00
// Leave one slot unrooted to test only returns confirmed signatures
blockstore
. set_roots ( vec! [ 1 , 2 , 4 , 5 , 6 , 7 , 8 ] . iter ( ) )
. unwrap ( ) ;
let highest_confirmed_root = 8 ;
2020-07-27 11:42:49 -07:00
2021-09-10 05:33:08 -07:00
// Fetch all rooted signatures for address 0 at once...
2021-12-29 09:25:10 -08:00
let sig_infos = blockstore
2021-09-10 05:33:08 -07:00
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_root ,
None ,
None ,
usize ::MAX ,
)
. unwrap ( ) ;
2021-12-29 09:25:10 -08:00
assert! ( sig_infos . found_before ) ;
let all0 = sig_infos . infos ;
2021-09-10 05:33:08 -07:00
assert_eq! ( all0 . len ( ) , 12 ) ;
2020-07-27 11:42:49 -07:00
2021-09-10 05:33:08 -07:00
// Fetch all rooted signatures for address 1 at once...
let all1 = blockstore
. get_confirmed_signatures_for_address2 (
address1 ,
highest_confirmed_root ,
None ,
None ,
usize ::MAX ,
)
2021-12-29 09:25:10 -08:00
. unwrap ( )
. infos ;
2021-09-10 05:33:08 -07:00
assert_eq! ( all1 . len ( ) , 12 ) ;
2020-07-27 11:42:49 -07:00
2021-09-10 05:33:08 -07:00
// Fetch all signatures for address 0 individually
for i in 0 .. all0 . len ( ) {
2021-12-29 09:25:10 -08:00
let sig_infos = blockstore
2020-07-27 11:42:49 -07:00
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_root ,
2021-09-10 05:33:08 -07:00
if i = = 0 {
None
} else {
Some ( all0 [ i - 1 ] . signature )
} ,
2020-08-15 09:42:17 -07:00
None ,
2020-07-27 11:42:49 -07:00
1 ,
)
2021-09-10 05:33:08 -07:00
. unwrap ( ) ;
2021-12-29 09:25:10 -08:00
assert! ( sig_infos . found_before ) ;
let results = sig_infos . infos ;
2021-09-10 05:33:08 -07:00
assert_eq! ( results . len ( ) , 1 ) ;
assert_eq! ( results [ 0 ] , all0 [ i ] , " Unexpected result for {} " , i ) ;
}
// Fetch all signatures for address 0 individually using `until`
for i in 0 .. all0 . len ( ) {
let results = blockstore
2020-08-15 09:42:17 -07:00
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_root ,
2021-09-10 05:33:08 -07:00
if i = = 0 {
None
} else {
Some ( all0 [ i - 1 ] . signature )
} ,
if i = = all0 . len ( ) - 1 | | i = = all0 . len ( ) {
None
} else {
Some ( all0 [ i + 1 ] . signature )
} ,
10 ,
2020-08-15 09:42:17 -07:00
)
2021-12-29 09:25:10 -08:00
. unwrap ( )
. infos ;
2021-09-10 05:33:08 -07:00
assert_eq! ( results . len ( ) , 1 ) ;
assert_eq! ( results [ 0 ] , all0 [ i ] , " Unexpected result for {} " , i ) ;
}
2020-07-27 11:42:49 -07:00
2021-12-29 09:25:10 -08:00
let sig_infos = blockstore
2021-09-10 05:33:08 -07:00
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_root ,
Some ( all0 [ all0 . len ( ) - 1 ] . signature ) ,
None ,
1 ,
)
2021-12-29 09:25:10 -08:00
. unwrap ( ) ;
assert! ( sig_infos . found_before ) ;
assert! ( sig_infos . infos . is_empty ( ) ) ;
2020-08-05 11:21:22 -07:00
2021-09-10 05:33:08 -07:00
assert! ( blockstore
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_root ,
None ,
Some ( all0 [ 0 ] . signature ) ,
2 ,
)
. unwrap ( )
2021-12-29 09:25:10 -08:00
. infos
2021-09-10 05:33:08 -07:00
. is_empty ( ) ) ;
// Fetch all signatures for address 0, three at a time
assert! ( all0 . len ( ) % 3 = = 0 ) ;
for i in ( 0 .. all0 . len ( ) ) . step_by ( 3 ) {
2020-08-05 11:21:22 -07:00
let results = blockstore
2020-07-27 11:42:49 -07:00
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_root ,
2021-09-10 05:33:08 -07:00
if i = = 0 {
None
} else {
Some ( all0 [ i - 1 ] . signature )
} ,
2020-08-15 09:42:17 -07:00
None ,
2021-09-10 05:33:08 -07:00
3 ,
2020-07-27 11:42:49 -07:00
)
2021-12-29 09:25:10 -08:00
. unwrap ( )
. infos ;
2021-09-10 05:33:08 -07:00
assert_eq! ( results . len ( ) , 3 ) ;
assert_eq! ( results [ 0 ] , all0 [ i ] ) ;
assert_eq! ( results [ 1 ] , all0 [ i + 1 ] ) ;
assert_eq! ( results [ 2 ] , all0 [ i + 2 ] ) ;
}
2020-08-15 09:42:17 -07:00
2021-09-10 05:33:08 -07:00
// Ensure that the signatures within a slot are reverse ordered by signature
// (current limitation of the .get_confirmed_signatures_for_address2())
for i in ( 0 .. all1 . len ( ) ) . step_by ( 2 ) {
let results = blockstore
2020-08-15 09:42:17 -07:00
. get_confirmed_signatures_for_address2 (
2021-09-10 05:33:08 -07:00
address1 ,
2020-08-15 09:42:17 -07:00
highest_confirmed_root ,
2021-09-10 05:33:08 -07:00
if i = = 0 {
None
} else {
Some ( all1 [ i - 1 ] . signature )
} ,
None ,
2 ,
2020-08-15 09:42:17 -07:00
)
2021-12-29 09:25:10 -08:00
. unwrap ( )
. infos ;
2021-09-10 05:33:08 -07:00
assert_eq! ( results . len ( ) , 2 ) ;
assert_eq! ( results [ 0 ] . slot , results [ 1 ] . slot ) ;
assert! ( results [ 0 ] . signature > = results [ 1 ] . signature ) ;
assert_eq! ( results [ 0 ] , all1 [ i ] ) ;
assert_eq! ( results [ 1 ] , all1 [ i + 1 ] ) ;
}
// A search for address 0 with `before` and/or `until` signatures from address1 should also work
2021-12-29 09:25:10 -08:00
let sig_infos = blockstore
2021-09-10 05:33:08 -07:00
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_root ,
Some ( all1 [ 0 ] . signature ) ,
None ,
usize ::MAX ,
)
. unwrap ( ) ;
2021-12-29 09:25:10 -08:00
assert! ( sig_infos . found_before ) ;
let results = sig_infos . infos ;
2021-09-10 05:33:08 -07:00
// The exact number of results returned is variable, based on the sort order of the
// random signatures that are generated
assert! ( ! results . is_empty ( ) ) ;
let results2 = blockstore
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_root ,
Some ( all1 [ 0 ] . signature ) ,
Some ( all1 [ 4 ] . signature ) ,
usize ::MAX ,
)
2021-12-29 09:25:10 -08:00
. unwrap ( )
. infos ;
2021-09-10 05:33:08 -07:00
assert! ( results2 . len ( ) < results . len ( ) ) ;
2021-03-31 21:35:57 -07:00
2021-09-10 05:33:08 -07:00
// Duplicate all tests using confirmed signatures
let highest_confirmed_slot = 10 ;
2021-03-31 21:35:57 -07:00
2021-09-10 05:33:08 -07:00
// Fetch all signatures for address 0 at once...
let all0 = blockstore
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_slot ,
None ,
None ,
usize ::MAX ,
)
2021-12-29 09:25:10 -08:00
. unwrap ( )
. infos ;
2021-09-10 05:33:08 -07:00
assert_eq! ( all0 . len ( ) , 14 ) ;
2021-03-31 21:35:57 -07:00
2021-09-10 05:33:08 -07:00
// Fetch all signatures for address 1 at once...
let all1 = blockstore
. get_confirmed_signatures_for_address2 (
address1 ,
highest_confirmed_slot ,
None ,
None ,
usize ::MAX ,
)
2021-12-29 09:25:10 -08:00
. unwrap ( )
. infos ;
2021-09-10 05:33:08 -07:00
assert_eq! ( all1 . len ( ) , 14 ) ;
2021-03-31 21:35:57 -07:00
2021-09-10 05:33:08 -07:00
// Fetch all signatures for address 0 individually
for i in 0 .. all0 . len ( ) {
let results = blockstore
2021-03-31 21:35:57 -07:00
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_slot ,
2021-09-10 05:33:08 -07:00
if i = = 0 {
None
} else {
Some ( all0 [ i - 1 ] . signature )
} ,
2021-03-31 21:35:57 -07:00
None ,
1 ,
)
2021-12-29 09:25:10 -08:00
. unwrap ( )
. infos ;
2021-09-10 05:33:08 -07:00
assert_eq! ( results . len ( ) , 1 ) ;
assert_eq! ( results [ 0 ] , all0 [ i ] , " Unexpected result for {} " , i ) ;
}
// Fetch all signatures for address 0 individually using `until`
for i in 0 .. all0 . len ( ) {
let results = blockstore
2021-03-31 21:35:57 -07:00
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_slot ,
2021-09-10 05:33:08 -07:00
if i = = 0 {
None
} else {
Some ( all0 [ i - 1 ] . signature )
} ,
if i = = all0 . len ( ) - 1 | | i = = all0 . len ( ) {
None
} else {
Some ( all0 [ i + 1 ] . signature )
} ,
10 ,
2021-03-31 21:35:57 -07:00
)
2021-12-29 09:25:10 -08:00
. unwrap ( )
. infos ;
2021-09-10 05:33:08 -07:00
assert_eq! ( results . len ( ) , 1 ) ;
assert_eq! ( results [ 0 ] , all0 [ i ] , " Unexpected result for {} " , i ) ;
}
2021-03-31 21:35:57 -07:00
2021-09-10 05:33:08 -07:00
assert! ( blockstore
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_slot ,
Some ( all0 [ all0 . len ( ) - 1 ] . signature ) ,
None ,
1 ,
)
. unwrap ( )
2021-12-29 09:25:10 -08:00
. infos
2021-09-10 05:33:08 -07:00
. is_empty ( ) ) ;
assert! ( blockstore
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_slot ,
None ,
Some ( all0 [ 0 ] . signature ) ,
2 ,
)
. unwrap ( )
2021-12-29 09:25:10 -08:00
. infos
2021-09-10 05:33:08 -07:00
. is_empty ( ) ) ;
2021-03-31 21:35:57 -07:00
2021-09-10 05:33:08 -07:00
// Fetch all signatures for address 0, three at a time
assert! ( all0 . len ( ) % 3 = = 2 ) ;
for i in ( 0 .. all0 . len ( ) ) . step_by ( 3 ) {
2021-03-31 21:35:57 -07:00
let results = blockstore
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_slot ,
2021-09-10 05:33:08 -07:00
if i = = 0 {
None
} else {
Some ( all0 [ i - 1 ] . signature )
} ,
2021-03-31 21:35:57 -07:00
None ,
2021-09-10 05:33:08 -07:00
3 ,
2021-03-31 21:35:57 -07:00
)
2021-12-29 09:25:10 -08:00
. unwrap ( )
. infos ;
2021-09-10 05:33:08 -07:00
if i < 12 {
assert_eq! ( results . len ( ) , 3 ) ;
assert_eq! ( results [ 2 ] , all0 [ i + 2 ] ) ;
} else {
assert_eq! ( results . len ( ) , 2 ) ;
}
assert_eq! ( results [ 0 ] , all0 [ i ] ) ;
assert_eq! ( results [ 1 ] , all0 [ i + 1 ] ) ;
}
2021-03-31 21:35:57 -07:00
2021-09-10 05:33:08 -07:00
// Ensure that the signatures within a slot are reverse ordered by signature
// (current limitation of the .get_confirmed_signatures_for_address2())
for i in ( 0 .. all1 . len ( ) ) . step_by ( 2 ) {
let results = blockstore
2021-03-31 21:35:57 -07:00
. get_confirmed_signatures_for_address2 (
2021-09-10 05:33:08 -07:00
address1 ,
2021-03-31 21:35:57 -07:00
highest_confirmed_slot ,
2021-09-10 05:33:08 -07:00
if i = = 0 {
None
} else {
Some ( all1 [ i - 1 ] . signature )
} ,
None ,
2 ,
2021-03-31 21:35:57 -07:00
)
2021-12-29 09:25:10 -08:00
. unwrap ( )
. infos ;
2021-09-10 05:33:08 -07:00
assert_eq! ( results . len ( ) , 2 ) ;
assert_eq! ( results [ 0 ] . slot , results [ 1 ] . slot ) ;
assert! ( results [ 0 ] . signature > = results [ 1 ] . signature ) ;
assert_eq! ( results [ 0 ] , all1 [ i ] ) ;
assert_eq! ( results [ 1 ] , all1 [ i + 1 ] ) ;
}
// A search for address 0 with `before` and/or `until` signatures from address1 should also work
let results = blockstore
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_slot ,
Some ( all1 [ 0 ] . signature ) ,
None ,
usize ::MAX ,
)
2021-12-29 09:25:10 -08:00
. unwrap ( )
. infos ;
2021-09-10 05:33:08 -07:00
// The exact number of results returned is variable, based on the sort order of the
// random signatures that are generated
assert! ( ! results . is_empty ( ) ) ;
let results2 = blockstore
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_slot ,
Some ( all1 [ 0 ] . signature ) ,
Some ( all1 [ 4 ] . signature ) ,
usize ::MAX ,
)
2021-12-29 09:25:10 -08:00
. unwrap ( )
. infos ;
2021-09-10 05:33:08 -07:00
assert! ( results2 . len ( ) < results . len ( ) ) ;
2021-12-29 09:25:10 -08:00
// Remove signature
blockstore
. address_signatures_cf
. delete ( ( 0 , address0 , 2 , all0 [ 0 ] . signature ) )
. unwrap ( ) ;
let sig_infos = blockstore
. get_confirmed_signatures_for_address2 (
address0 ,
highest_confirmed_root ,
Some ( all0 [ 0 ] . signature ) ,
None ,
usize ::MAX ,
)
. unwrap ( ) ;
assert! ( ! sig_infos . found_before ) ;
assert! ( sig_infos . infos . is_empty ( ) ) ;
2020-07-27 11:42:49 -07:00
}
2019-11-17 19:17:15 -08:00
#[ test ]
2020-08-14 11:43:14 -07:00
#[ allow(clippy::same_item_push) ]
2019-11-17 19:17:15 -08:00
fn test_get_last_hash ( ) {
let mut entries : Vec < Entry > = vec! [ ] ;
let empty_entries_iterator = entries . iter ( ) ;
assert! ( get_last_hash ( empty_entries_iterator ) . is_none ( ) ) ;
let mut prev_hash = hash ::hash ( & [ 42 u8 ] ) ;
for _ in 0 .. 10 {
let entry = next_entry ( & prev_hash , 1 , vec! [ ] ) ;
prev_hash = entry . hash ;
entries . push ( entry ) ;
}
let entries_iterator = entries . iter ( ) ;
assert_eq! ( get_last_hash ( entries_iterator ) . unwrap ( ) , entries [ 9 ] . hash ) ;
}
2019-11-18 08:12:42 -08:00
#[ test ]
fn test_map_transactions_to_statuses ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2019-11-18 08:12:42 -08:00
2021-09-10 05:33:08 -07:00
let transaction_status_cf = & blockstore . transaction_status_cf ;
2019-11-18 08:12:42 -08:00
2021-09-10 05:33:08 -07:00
let slot = 0 ;
let mut transactions : Vec < VersionedTransaction > = vec! [ ] ;
for x in 0 .. 4 {
let transaction = Transaction ::new_with_compiled_instructions (
& [ & Keypair ::new ( ) ] ,
& [ solana_sdk ::pubkey ::new_rand ( ) ] ,
Hash ::default ( ) ,
vec! [ solana_sdk ::pubkey ::new_rand ( ) ] ,
vec! [ CompiledInstruction ::new ( 1 , & ( ) , vec! [ 0 ] ) ] ,
) ;
let status = TransactionStatusMeta {
status : solana_sdk ::transaction ::Result ::< ( ) > ::Err (
TransactionError ::AccountNotFound ,
) ,
fee : x ,
pre_balances : vec ! [ ] ,
post_balances : vec ! [ ] ,
inner_instructions : Some ( vec! [ ] ) ,
log_messages : Some ( vec! [ ] ) ,
pre_token_balances : Some ( vec! [ ] ) ,
post_token_balances : Some ( vec! [ ] ) ,
rewards : Some ( vec! [ ] ) ,
2022-01-13 23:24:41 -08:00
loaded_addresses : LoadedAddresses ::default ( ) ,
2022-03-22 15:17:05 -07:00
return_data : Some ( TransactionReturnData ::default ( ) ) ,
2019-11-18 08:12:42 -08:00
}
2021-09-10 05:33:08 -07:00
. into ( ) ;
transaction_status_cf
. put_protobuf ( ( 0 , transaction . signatures [ 0 ] , slot ) , & status )
. unwrap ( ) ;
transactions . push ( transaction . into ( ) ) ;
}
2022-02-09 21:28:18 -08:00
let map_result =
blockstore . map_transactions_to_statuses ( slot , transactions . clone ( ) . into_iter ( ) ) ;
assert! ( map_result . is_ok ( ) ) ;
let map = map_result . unwrap ( ) ;
assert_eq! ( map . len ( ) , 4 ) ;
for ( x , m ) in map . iter ( ) . enumerate ( ) {
assert_eq! ( m . meta . fee , x as u64 ) ;
}
2021-09-10 05:33:08 -07:00
// Push transaction that will not have matching status, as a test case
transactions . push (
Transaction ::new_with_compiled_instructions (
& [ & Keypair ::new ( ) ] ,
& [ solana_sdk ::pubkey ::new_rand ( ) ] ,
Hash ::default ( ) ,
vec! [ solana_sdk ::pubkey ::new_rand ( ) ] ,
vec! [ CompiledInstruction ::new ( 1 , & ( ) , vec! [ 0 ] ) ] ,
)
. into ( ) ,
) ;
2022-02-09 21:28:18 -08:00
let map_result =
blockstore . map_transactions_to_statuses ( slot , transactions . clone ( ) . into_iter ( ) ) ;
assert_matches! ( map_result , Err ( BlockstoreError ::MissingTransactionMetadata ) ) ;
2020-09-22 12:26:32 -07:00
}
#[ test ]
fn test_write_get_perf_samples ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
let num_entries : usize = 10 ;
let mut perf_samples : Vec < ( Slot , PerfSample ) > = vec! [ ] ;
for x in 1 .. num_entries + 1 {
perf_samples . push ( (
x as u64 * 50 ,
PerfSample {
num_transactions : 1000 + x as u64 ,
num_slots : 50 ,
sample_period_secs : 20 ,
} ,
) ) ;
}
for ( slot , sample ) in perf_samples . iter ( ) {
blockstore . write_perf_sample ( * slot , sample ) . unwrap ( ) ;
}
for x in 0 .. num_entries {
let mut expected_samples = perf_samples [ num_entries - 1 - x .. ] . to_vec ( ) ;
expected_samples . sort_by ( | a , b | b . 0. cmp ( & a . 0 ) ) ;
assert_eq! (
blockstore . get_recent_perf_samples ( x + 1 ) . unwrap ( ) ,
expected_samples
) ;
2020-09-22 12:26:32 -07:00
}
2019-11-18 08:12:42 -08:00
}
2019-12-05 11:25:13 -08:00
#[ test ]
fn test_lowest_slot ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
for i in 0 .. 10 {
let slot = i ;
let ( shreds , _ ) = make_slot_entries ( slot , 0 , 1 ) ;
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2019-12-05 11:25:13 -08:00
}
2021-09-10 05:33:08 -07:00
assert_eq! ( blockstore . lowest_slot ( ) , 1 ) ;
blockstore . run_purge ( 0 , 5 , PurgeType ::PrimaryIndex ) . unwrap ( ) ;
assert_eq! ( blockstore . lowest_slot ( ) , 6 ) ;
2019-12-05 11:25:13 -08:00
}
2019-12-09 00:13:36 -08:00
#[ test ]
fn test_recovery ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2019-12-09 00:13:36 -08:00
let slot = 1 ;
let ( data_shreds , coding_shreds , leader_schedule_cache ) =
2021-04-21 05:47:50 -07:00
setup_erasure_shreds ( slot , 0 , 100 ) ;
2019-12-09 00:13:36 -08:00
2021-09-10 05:33:08 -07:00
blockstore
. insert_shreds ( coding_shreds , Some ( & leader_schedule_cache ) , false )
. unwrap ( ) ;
2022-04-25 05:43:22 -07:00
let shred_bufs : Vec < _ > = data_shreds . iter ( ) . map ( Shred ::payload ) . cloned ( ) . collect ( ) ;
2019-12-09 00:13:36 -08:00
2021-09-10 05:33:08 -07:00
// Check all the data shreds were recovered
for ( s , buf ) in data_shreds . iter ( ) . zip ( shred_bufs ) {
assert_eq! (
blockstore
. get_data_shred ( s . slot ( ) , s . index ( ) as u64 )
. unwrap ( )
. unwrap ( ) ,
buf
) ;
2019-12-09 00:13:36 -08:00
}
2021-09-10 05:33:08 -07:00
verify_index_integrity ( & blockstore , slot ) ;
2019-12-09 00:13:36 -08:00
}
#[ test ]
fn test_index_integrity ( ) {
let slot = 1 ;
let num_entries = 100 ;
let ( data_shreds , coding_shreds , leader_schedule_cache ) =
2021-04-21 05:47:50 -07:00
setup_erasure_shreds ( slot , 0 , num_entries ) ;
2019-12-09 00:13:36 -08:00
assert! ( data_shreds . len ( ) > 3 ) ;
assert! ( coding_shreds . len ( ) > 3 ) ;
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2019-12-09 00:13:36 -08:00
2021-09-10 05:33:08 -07:00
// Test inserting all the shreds
let all_shreds : Vec < _ > = data_shreds
. iter ( )
. cloned ( )
. chain ( coding_shreds . iter ( ) . cloned ( ) )
. collect ( ) ;
blockstore
. insert_shreds ( all_shreds , Some ( & leader_schedule_cache ) , false )
. unwrap ( ) ;
verify_index_integrity ( & blockstore , slot ) ;
blockstore . purge_and_compact_slots ( 0 , slot ) ;
2019-12-09 00:13:36 -08:00
2021-09-10 05:33:08 -07:00
// Test inserting just the codes, enough for recovery
blockstore
. insert_shreds ( coding_shreds . clone ( ) , Some ( & leader_schedule_cache ) , false )
. unwrap ( ) ;
verify_index_integrity ( & blockstore , slot ) ;
blockstore . purge_and_compact_slots ( 0 , slot ) ;
2019-12-09 00:13:36 -08:00
2021-09-10 05:33:08 -07:00
// Test inserting some codes, but not enough for recovery
blockstore
. insert_shreds (
coding_shreds [ .. coding_shreds . len ( ) - 1 ] . to_vec ( ) ,
Some ( & leader_schedule_cache ) ,
false ,
)
. unwrap ( ) ;
verify_index_integrity ( & blockstore , slot ) ;
blockstore . purge_and_compact_slots ( 0 , slot ) ;
2019-12-09 00:13:36 -08:00
2021-09-10 05:33:08 -07:00
// Test inserting just the codes, and some data, enough for recovery
let shreds : Vec < _ > = data_shreds [ .. data_shreds . len ( ) - 1 ]
. iter ( )
. cloned ( )
. chain ( coding_shreds [ .. coding_shreds . len ( ) - 1 ] . iter ( ) . cloned ( ) )
. collect ( ) ;
blockstore
. insert_shreds ( shreds , Some ( & leader_schedule_cache ) , false )
. unwrap ( ) ;
verify_index_integrity ( & blockstore , slot ) ;
blockstore . purge_and_compact_slots ( 0 , slot ) ;
2019-12-09 00:13:36 -08:00
2021-09-10 05:33:08 -07:00
// Test inserting some codes, and some data, but enough for recovery
let shreds : Vec < _ > = data_shreds [ .. data_shreds . len ( ) / 2 - 1 ]
. iter ( )
. cloned ( )
. chain ( coding_shreds [ .. coding_shreds . len ( ) / 2 - 1 ] . iter ( ) . cloned ( ) )
. collect ( ) ;
blockstore
. insert_shreds ( shreds , Some ( & leader_schedule_cache ) , false )
. unwrap ( ) ;
verify_index_integrity ( & blockstore , slot ) ;
blockstore . purge_and_compact_slots ( 0 , slot ) ;
2019-12-09 00:13:36 -08:00
2021-09-10 05:33:08 -07:00
// Test inserting all shreds in 2 rounds, make sure nothing is lost
let shreds1 : Vec < _ > = data_shreds [ .. data_shreds . len ( ) / 2 - 1 ]
. iter ( )
. cloned ( )
. chain ( coding_shreds [ .. coding_shreds . len ( ) / 2 - 1 ] . iter ( ) . cloned ( ) )
. collect ( ) ;
let shreds2 : Vec < _ > = data_shreds [ data_shreds . len ( ) / 2 - 1 .. ]
. iter ( )
. cloned ( )
. chain ( coding_shreds [ coding_shreds . len ( ) / 2 - 1 .. ] . iter ( ) . cloned ( ) )
. collect ( ) ;
blockstore
. insert_shreds ( shreds1 , Some ( & leader_schedule_cache ) , false )
. unwrap ( ) ;
blockstore
. insert_shreds ( shreds2 , Some ( & leader_schedule_cache ) , false )
. unwrap ( ) ;
verify_index_integrity ( & blockstore , slot ) ;
blockstore . purge_and_compact_slots ( 0 , slot ) ;
// Test not all, but enough data and coding shreds in 2 rounds to trigger recovery,
// make sure nothing is lost
let shreds1 : Vec < _ > = data_shreds [ .. data_shreds . len ( ) / 2 - 1 ]
. iter ( )
. cloned ( )
. chain ( coding_shreds [ .. coding_shreds . len ( ) / 2 - 1 ] . iter ( ) . cloned ( ) )
. collect ( ) ;
let shreds2 : Vec < _ > = data_shreds [ data_shreds . len ( ) / 2 - 1 .. data_shreds . len ( ) / 2 ]
. iter ( )
. cloned ( )
. chain (
coding_shreds [ coding_shreds . len ( ) / 2 - 1 .. coding_shreds . len ( ) / 2 ]
. iter ( )
. cloned ( ) ,
)
. collect ( ) ;
blockstore
. insert_shreds ( shreds1 , Some ( & leader_schedule_cache ) , false )
. unwrap ( ) ;
blockstore
. insert_shreds ( shreds2 , Some ( & leader_schedule_cache ) , false )
. unwrap ( ) ;
verify_index_integrity ( & blockstore , slot ) ;
blockstore . purge_and_compact_slots ( 0 , slot ) ;
// Test insert shreds in 2 rounds, but not enough to trigger
// recovery, make sure nothing is lost
let shreds1 : Vec < _ > = data_shreds [ .. data_shreds . len ( ) / 2 - 2 ]
. iter ( )
. cloned ( )
. chain ( coding_shreds [ .. coding_shreds . len ( ) / 2 - 2 ] . iter ( ) . cloned ( ) )
. collect ( ) ;
let shreds2 : Vec < _ > = data_shreds [ data_shreds . len ( ) / 2 - 2 .. data_shreds . len ( ) / 2 - 1 ]
. iter ( )
. cloned ( )
. chain (
coding_shreds [ coding_shreds . len ( ) / 2 - 2 .. coding_shreds . len ( ) / 2 - 1 ]
. iter ( )
. cloned ( ) ,
)
. collect ( ) ;
blockstore
. insert_shreds ( shreds1 , Some ( & leader_schedule_cache ) , false )
. unwrap ( ) ;
blockstore
. insert_shreds ( shreds2 , Some ( & leader_schedule_cache ) , false )
. unwrap ( ) ;
verify_index_integrity ( & blockstore , slot ) ;
blockstore . purge_and_compact_slots ( 0 , slot ) ;
2019-12-09 00:13:36 -08:00
}
fn setup_erasure_shreds (
slot : u64 ,
parent_slot : u64 ,
num_entries : u64 ,
) -> ( Vec < Shred > , Vec < Shred > , Arc < LeaderScheduleCache > ) {
let entries = make_slot_entries_with_transactions ( num_entries ) ;
let leader_keypair = Arc ::new ( Keypair ::new ( ) ) ;
2021-06-21 13:12:38 -07:00
let shredder = Shredder ::new ( slot , parent_slot , 0 , 0 ) . unwrap ( ) ;
2021-12-19 14:37:55 -08:00
let ( data_shreds , coding_shreds ) = shredder . entries_to_shreds (
& leader_keypair ,
& entries ,
true , // is_last_in_slot
0 , // next_shred_index
0 , // next_code_index
) ;
2019-12-09 00:13:36 -08:00
let genesis_config = create_genesis_config ( 2 ) . genesis_config ;
2021-08-05 06:42:38 -07:00
let bank = Arc ::new ( Bank ::new_for_tests ( & genesis_config ) ) ;
2019-12-09 00:13:36 -08:00
let mut leader_schedule_cache = LeaderScheduleCache ::new_from_bank ( & bank ) ;
let fixed_schedule = FixedSchedule {
leader_schedule : Arc ::new ( LeaderSchedule ::new_from_schedule ( vec! [
leader_keypair . pubkey ( )
] ) ) ,
} ;
leader_schedule_cache . set_fixed_leader_schedule ( Some ( fixed_schedule ) ) ;
( data_shreds , coding_shreds , Arc ::new ( leader_schedule_cache ) )
}
2020-01-13 13:13:52 -08:00
fn verify_index_integrity ( blockstore : & Blockstore , slot : u64 ) {
2021-06-14 11:23:31 -07:00
let shred_index = blockstore . get_index ( slot ) . unwrap ( ) . unwrap ( ) ;
2020-04-24 15:04:23 -07:00
let data_iter = blockstore . slot_data_iterator ( slot , 0 ) . unwrap ( ) ;
2019-12-09 00:13:36 -08:00
let mut num_data = 0 ;
2020-04-24 15:04:23 -07:00
for ( ( slot , index ) , _ ) in data_iter {
num_data + = 1 ;
2021-06-14 11:23:31 -07:00
// Test that iterator and individual shred lookup yield same set
2020-04-24 15:04:23 -07:00
assert! ( blockstore . get_data_shred ( slot , index ) . unwrap ( ) . is_some ( ) ) ;
2021-06-14 11:23:31 -07:00
// Test that the data index has current shred accounted for
2021-12-16 11:17:32 -08:00
assert! ( shred_index . data ( ) . contains ( index ) ) ;
2019-12-09 00:13:36 -08:00
}
// Test the data index doesn't have anything extra
2021-06-14 11:23:31 -07:00
let num_data_in_index = shred_index . data ( ) . num_shreds ( ) ;
2019-12-09 00:13:36 -08:00
assert_eq! ( num_data_in_index , num_data ) ;
2020-03-19 23:35:01 -07:00
let coding_iter = blockstore . slot_coding_iterator ( slot , 0 ) . unwrap ( ) ;
2019-12-09 00:13:36 -08:00
let mut num_coding = 0 ;
for ( ( slot , index ) , _ ) in coding_iter {
num_coding + = 1 ;
2021-06-14 11:23:31 -07:00
// Test that the iterator and individual shred lookup yield same set
2020-01-13 13:13:52 -08:00
assert! ( blockstore . get_coding_shred ( slot , index ) . unwrap ( ) . is_some ( ) ) ;
2021-06-14 11:23:31 -07:00
// Test that the coding index has current shred accounted for
2021-12-16 11:17:32 -08:00
assert! ( shred_index . coding ( ) . contains ( index ) ) ;
2019-12-09 00:13:36 -08:00
}
// Test the data index doesn't have anything extra
2021-06-14 11:23:31 -07:00
let num_coding_in_index = shred_index . coding ( ) . num_shreds ( ) ;
2019-12-09 00:13:36 -08:00
assert_eq! ( num_coding_in_index , num_coding ) ;
}
2020-01-13 17:21:39 -08:00
#[ test ]
fn test_duplicate_slot ( ) {
let slot = 0 ;
let entries1 = make_slot_entries_with_transactions ( 1 ) ;
let entries2 = make_slot_entries_with_transactions ( 1 ) ;
let leader_keypair = Arc ::new ( Keypair ::new ( ) ) ;
2021-06-21 13:12:38 -07:00
let shredder = Shredder ::new ( slot , 0 , 0 , 0 ) . unwrap ( ) ;
2021-12-19 14:37:55 -08:00
let ( shreds , _ ) = shredder . entries_to_shreds (
& leader_keypair ,
& entries1 ,
true , // is_last_in_slot
0 , // next_shred_index
0 , // next_code_index,
) ;
let ( duplicate_shreds , _ ) = shredder . entries_to_shreds (
& leader_keypair ,
& entries2 ,
true , // is_last_in_slot
0 , // next_shred_index
0 , // next_code_index
) ;
2020-01-13 17:21:39 -08:00
let shred = shreds [ 0 ] . clone ( ) ;
let duplicate_shred = duplicate_shreds [ 0 ] . clone ( ) ;
let non_duplicate_shred = shred . clone ( ) ;
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2020-01-13 17:21:39 -08:00
2021-09-10 05:33:08 -07:00
blockstore
. insert_shreds ( vec! [ shred . clone ( ) ] , None , false )
. unwrap ( ) ;
2020-01-13 17:21:39 -08:00
2021-09-10 05:33:08 -07:00
// No duplicate shreds exist yet
assert! ( ! blockstore . has_duplicate_shreds_in_slot ( slot ) ) ;
2020-01-13 17:21:39 -08:00
2021-09-10 05:33:08 -07:00
// Check if shreds are duplicated
assert_eq! (
blockstore . is_shred_duplicate (
2021-12-14 09:34:02 -08:00
ShredId ::new ( slot , /* index: */ 0 , duplicate_shred . shred_type ( ) ) ,
2022-04-25 05:43:22 -07:00
duplicate_shred . payload ( ) . clone ( ) ,
2021-09-10 05:33:08 -07:00
) ,
2022-04-25 05:43:22 -07:00
Some ( shred . payload ( ) . clone ( ) )
2021-09-10 05:33:08 -07:00
) ;
assert! ( blockstore
. is_shred_duplicate (
2021-12-14 09:34:02 -08:00
ShredId ::new ( slot , /* index: */ 0 , non_duplicate_shred . shred_type ( ) ) ,
2022-04-25 05:43:22 -07:00
non_duplicate_shred . into_payload ( ) ,
2021-09-10 05:33:08 -07:00
)
. is_none ( ) ) ;
2020-01-13 17:21:39 -08:00
2021-09-10 05:33:08 -07:00
// Store a duplicate shred
blockstore
2022-04-25 05:43:22 -07:00
. store_duplicate_slot (
slot ,
shred . payload ( ) . clone ( ) ,
duplicate_shred . payload ( ) . clone ( ) ,
)
2021-09-10 05:33:08 -07:00
. unwrap ( ) ;
2020-01-13 17:21:39 -08:00
2021-09-10 05:33:08 -07:00
// Slot is now marked as duplicate
assert! ( blockstore . has_duplicate_shreds_in_slot ( slot ) ) ;
2020-01-13 17:21:39 -08:00
2021-09-10 05:33:08 -07:00
// Check ability to fetch the duplicates
let duplicate_proof = blockstore . get_duplicate_slot ( slot ) . unwrap ( ) ;
2022-04-25 05:43:22 -07:00
assert_eq! ( duplicate_proof . shred1 , * shred . payload ( ) ) ;
assert_eq! ( duplicate_proof . shred2 , * duplicate_shred . payload ( ) ) ;
2020-01-13 17:21:39 -08:00
}
2020-05-05 14:07:21 -07:00
#[ test ]
fn test_clear_unconfirmed_slot ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
let unconfirmed_slot = 9 ;
let unconfirmed_child_slot = 10 ;
let slots = vec! [ 2 , unconfirmed_slot , unconfirmed_child_slot ] ;
// Insert into slot 9, mark it as dead
let shreds : Vec < _ > = make_chaining_slot_entries ( & slots , 1 )
. into_iter ( )
. flat_map ( | x | x . 0 )
. collect ( ) ;
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
// Should only be one shred in slot 9
assert! ( blockstore
. get_data_shred ( unconfirmed_slot , 0 )
. unwrap ( )
. is_some ( ) ) ;
assert! ( blockstore
. get_data_shred ( unconfirmed_slot , 1 )
. unwrap ( )
. is_none ( ) ) ;
blockstore . set_dead_slot ( unconfirmed_slot ) . unwrap ( ) ;
2020-05-05 14:07:21 -07:00
2021-09-10 05:33:08 -07:00
// Purge the slot
blockstore . clear_unconfirmed_slot ( unconfirmed_slot ) ;
assert! ( ! blockstore . is_dead ( unconfirmed_slot ) ) ;
assert_eq! (
blockstore
. meta ( unconfirmed_slot )
2020-05-05 14:07:21 -07:00
. unwrap ( )
. unwrap ( )
2021-09-10 05:33:08 -07:00
. next_slots ,
vec! [ unconfirmed_child_slot ]
) ;
assert! ( blockstore
. get_data_shred ( unconfirmed_slot , 0 )
. unwrap ( )
. is_none ( ) ) ;
2020-05-05 14:07:21 -07:00
}
2020-09-01 22:06:06 -07:00
#[ test ]
fn test_update_completed_data_indexes ( ) {
2021-10-31 05:56:25 -07:00
let mut completed_data_indexes = BTreeSet ::default ( ) ;
2020-09-01 22:06:06 -07:00
let mut shred_index = ShredIndex ::default ( ) ;
for i in 0 .. 10 {
2021-12-16 11:17:32 -08:00
shred_index . insert ( i as u64 ) ;
2020-09-01 22:06:06 -07:00
assert_eq! (
update_completed_data_indexes ( true , i , & shred_index , & mut completed_data_indexes ) ,
vec! [ ( i , i ) ]
) ;
2021-10-31 05:56:25 -07:00
assert! ( completed_data_indexes . iter ( ) . copied ( ) . eq ( 0 ..= i ) ) ;
2020-09-01 22:06:06 -07:00
}
}
#[ test ]
fn test_update_completed_data_indexes_out_of_order ( ) {
2021-10-31 05:56:25 -07:00
let mut completed_data_indexes = BTreeSet ::default ( ) ;
2020-09-01 22:06:06 -07:00
let mut shred_index = ShredIndex ::default ( ) ;
2021-12-16 11:17:32 -08:00
shred_index . insert ( 4 ) ;
2020-09-01 22:06:06 -07:00
assert! (
update_completed_data_indexes ( false , 4 , & shred_index , & mut completed_data_indexes )
. is_empty ( )
) ;
assert! ( completed_data_indexes . is_empty ( ) ) ;
2021-12-16 11:17:32 -08:00
shred_index . insert ( 2 ) ;
2020-09-01 22:06:06 -07:00
assert! (
update_completed_data_indexes ( false , 2 , & shred_index , & mut completed_data_indexes )
. is_empty ( )
) ;
assert! ( completed_data_indexes . is_empty ( ) ) ;
2021-12-16 11:17:32 -08:00
shred_index . insert ( 3 ) ;
2020-09-01 22:06:06 -07:00
assert! (
update_completed_data_indexes ( true , 3 , & shred_index , & mut completed_data_indexes )
. is_empty ( )
) ;
2021-10-31 05:56:25 -07:00
assert! ( completed_data_indexes . iter ( ) . eq ( [ 3 ] . iter ( ) ) ) ;
2020-09-01 22:06:06 -07:00
// Inserting data complete shred 1 now confirms the range of shreds [2, 3]
// is part of the same data set
2021-12-16 11:17:32 -08:00
shred_index . insert ( 1 ) ;
2020-09-01 22:06:06 -07:00
assert_eq! (
update_completed_data_indexes ( true , 1 , & shred_index , & mut completed_data_indexes ) ,
vec! [ ( 2 , 3 ) ]
) ;
2021-10-31 05:56:25 -07:00
assert! ( completed_data_indexes . iter ( ) . eq ( [ 1 , 3 ] . iter ( ) ) ) ;
2020-09-01 22:06:06 -07:00
// Inserting data complete shred 0 now confirms the range of shreds [0]
// is part of the same data set
2021-12-16 11:17:32 -08:00
shred_index . insert ( 0 ) ;
2020-09-01 22:06:06 -07:00
assert_eq! (
update_completed_data_indexes ( true , 0 , & shred_index , & mut completed_data_indexes ) ,
vec! [ ( 0 , 0 ) , ( 1 , 1 ) ]
) ;
2021-10-31 05:56:25 -07:00
assert! ( completed_data_indexes . iter ( ) . eq ( [ 0 , 1 , 3 ] . iter ( ) ) ) ;
2020-09-01 22:06:06 -07:00
}
2020-10-15 17:04:10 -07:00
#[ test ]
fn test_rewards_protobuf_backward_compatability ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
let rewards : Rewards = ( 0 .. 100 )
. map ( | i | Reward {
pubkey : solana_sdk ::pubkey ::new_rand ( ) . to_string ( ) ,
lamports : 42 + i ,
post_balance : std ::u64 ::MAX ,
reward_type : Some ( RewardType ::Fee ) ,
commission : None ,
} )
. collect ( ) ;
let protobuf_rewards : generated ::Rewards = rewards . into ( ) ;
2020-10-15 17:04:10 -07:00
2021-09-10 05:33:08 -07:00
let deprecated_rewards : StoredExtendedRewards = protobuf_rewards . clone ( ) . into ( ) ;
for slot in 0 .. 2 {
let data = serialize ( & deprecated_rewards ) . unwrap ( ) ;
blockstore . rewards_cf . put_bytes ( slot , & data ) . unwrap ( ) ;
}
for slot in 2 .. 4 {
blockstore
. rewards_cf
. put_protobuf ( slot , & protobuf_rewards )
. unwrap ( ) ;
}
for slot in 0 .. 4 {
assert_eq! (
2020-10-15 17:04:10 -07:00
blockstore
. rewards_cf
2021-09-10 05:33:08 -07:00
. get_protobuf_or_bincode ::< StoredExtendedRewards > ( slot )
. unwrap ( )
. unwrap ( ) ,
protobuf_rewards
) ;
2020-10-15 17:04:10 -07:00
}
}
2020-11-16 21:30:38 -08:00
2021-03-05 08:05:35 -08:00
#[ test ]
fn test_transaction_status_protobuf_backward_compatability ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
let status = TransactionStatusMeta {
status : Ok ( ( ) ) ,
fee : 42 ,
pre_balances : vec ! [ 1 , 2 , 3 ] ,
post_balances : vec ! [ 1 , 2 , 3 ] ,
inner_instructions : Some ( vec! [ ] ) ,
log_messages : Some ( vec! [ ] ) ,
pre_token_balances : Some ( vec! [ TransactionTokenBalance {
account_index : 0 ,
mint : Pubkey ::new_unique ( ) . to_string ( ) ,
ui_token_amount : UiTokenAmount {
ui_amount : Some ( 1.1 ) ,
decimals : 1 ,
amount : " 11 " . to_string ( ) ,
ui_amount_string : " 1.1 " . to_string ( ) ,
} ,
2021-10-13 20:46:52 -07:00
owner : Pubkey ::new_unique ( ) . to_string ( ) ,
2022-04-19 21:01:22 -07:00
program_id : Pubkey ::new_unique ( ) . to_string ( ) ,
2021-09-10 05:33:08 -07:00
} ] ) ,
post_token_balances : Some ( vec! [ TransactionTokenBalance {
account_index : 0 ,
mint : Pubkey ::new_unique ( ) . to_string ( ) ,
ui_token_amount : UiTokenAmount {
ui_amount : None ,
decimals : 1 ,
amount : " 11 " . to_string ( ) ,
ui_amount_string : " 1.1 " . to_string ( ) ,
} ,
2021-10-13 20:46:52 -07:00
owner : Pubkey ::new_unique ( ) . to_string ( ) ,
2022-04-19 21:01:22 -07:00
program_id : Pubkey ::new_unique ( ) . to_string ( ) ,
2021-09-10 05:33:08 -07:00
} ] ) ,
rewards : Some ( vec! [ Reward {
pubkey : " My11111111111111111111111111111111111111111 " . to_string ( ) ,
lamports : - 42 ,
post_balance : 42 ,
reward_type : Some ( RewardType ::Rent ) ,
commission : None ,
} ] ) ,
2022-01-13 23:24:41 -08:00
loaded_addresses : LoadedAddresses ::default ( ) ,
2022-03-22 15:17:05 -07:00
return_data : Some ( TransactionReturnData {
program_id : Pubkey ::new_unique ( ) ,
data : vec ! [ 1 , 2 , 3 ] ,
} ) ,
2021-09-10 05:33:08 -07:00
} ;
2022-01-13 23:24:41 -08:00
let deprecated_status : StoredTransactionStatusMeta = status . clone ( ) . try_into ( ) . unwrap ( ) ;
2021-09-10 05:33:08 -07:00
let protobuf_status : generated ::TransactionStatusMeta = status . into ( ) ;
2021-03-05 08:05:35 -08:00
2021-09-10 05:33:08 -07:00
for slot in 0 .. 2 {
let data = serialize ( & deprecated_status ) . unwrap ( ) ;
blockstore
. transaction_status_cf
. put_bytes ( ( 0 , Signature ::default ( ) , slot ) , & data )
. unwrap ( ) ;
}
for slot in 2 .. 4 {
blockstore
. transaction_status_cf
. put_protobuf ( ( 0 , Signature ::default ( ) , slot ) , & protobuf_status )
. unwrap ( ) ;
}
for slot in 0 .. 4 {
assert_eq! (
2021-03-05 08:05:35 -08:00
blockstore
. transaction_status_cf
2021-09-10 05:33:08 -07:00
. get_protobuf_or_bincode ::< StoredTransactionStatusMeta > ( (
0 ,
Signature ::default ( ) ,
slot
) )
. unwrap ( )
. unwrap ( ) ,
protobuf_status
) ;
2021-03-05 08:05:35 -08:00
}
}
2020-12-09 23:14:31 -08:00
fn make_large_tx_entry ( num_txs : usize ) -> Entry {
let txs : Vec < _ > = ( 0 .. num_txs )
. into_iter ( )
. map ( | _ | {
let keypair0 = Keypair ::new ( ) ;
let to = solana_sdk ::pubkey ::new_rand ( ) ;
solana_sdk ::system_transaction ::transfer ( & keypair0 , & to , 1 , Hash ::default ( ) )
} )
. collect ( ) ;
Entry ::new ( & Hash ::default ( ) , 1 , txs )
}
#[ test ]
fn erasure_multiple_config ( ) {
solana_logger ::setup ( ) ;
let slot = 1 ;
let parent = 0 ;
let num_txs = 20 ;
let entry = make_large_tx_entry ( num_txs ) ;
2021-12-24 12:32:43 -08:00
let shreds = entries_to_test_shreds ( & [ entry ] , slot , parent , true , 0 ) ;
2020-12-09 23:14:31 -08:00
assert! ( shreds . len ( ) > 1 ) ;
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2020-12-09 23:14:31 -08:00
2021-12-19 14:37:55 -08:00
let coding1 = Shredder ::generate_coding_shreds (
& shreds , false , // is_last_in_slot
0 , // next_code_index
) ;
let coding2 = Shredder ::generate_coding_shreds (
& shreds , true , // is_last_in_slot
0 , // next_code_index
) ;
2020-12-09 23:14:31 -08:00
for shred in & shreds {
info! ( " shred {:?} " , shred ) ;
}
for shred in & coding1 {
info! ( " coding1 {:?} " , shred ) ;
}
for shred in & coding2 {
info! ( " coding2 {:?} " , shred ) ;
}
2021-09-10 05:33:08 -07:00
blockstore
2020-12-09 23:14:31 -08:00
. insert_shreds ( shreds [ .. shreds . len ( ) - 2 ] . to_vec ( ) , None , false )
. unwrap ( ) ;
2021-09-10 05:33:08 -07:00
blockstore
2020-12-09 23:14:31 -08:00
. insert_shreds ( vec! [ coding1 [ 0 ] . clone ( ) , coding2 [ 1 ] . clone ( ) ] , None , false )
. unwrap ( ) ;
2021-09-10 05:33:08 -07:00
assert! ( blockstore . has_duplicate_shreds_in_slot ( slot ) ) ;
2020-12-09 23:14:31 -08:00
}
2021-05-26 16:12:57 -07:00
#[ test ]
pub fn test_insert_data_shreds_same_slot_last_index ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2021-05-26 16:12:57 -07:00
2021-09-10 05:33:08 -07:00
// Create enough entries to ensure there are at least two shreds created
let num_unique_entries = max_ticks_per_n_shreds ( 1 , None ) + 1 ;
let ( mut original_shreds , original_entries ) = make_slot_entries ( 0 , 0 , num_unique_entries ) ;
2021-05-26 16:12:57 -07:00
2021-09-10 05:33:08 -07:00
// Discard first shred, so that the slot is not full
assert! ( original_shreds . len ( ) > 1 ) ;
let last_index = original_shreds . last ( ) . unwrap ( ) . index ( ) as u64 ;
original_shreds . remove ( 0 ) ;
2021-05-26 16:12:57 -07:00
2021-09-10 05:33:08 -07:00
// Insert the same shreds, including the last shred specifically, multiple
// times
for _ in 0 .. 10 {
2021-05-26 16:12:57 -07:00
blockstore
2021-09-10 05:33:08 -07:00
. insert_shreds ( original_shreds . clone ( ) , None , false )
2021-05-26 16:12:57 -07:00
. unwrap ( ) ;
let meta = blockstore . meta ( 0 ) . unwrap ( ) . unwrap ( ) ;
assert! ( ! blockstore . is_dead ( 0 ) ) ;
2021-09-10 05:33:08 -07:00
assert_eq! ( blockstore . get_slot_entries ( 0 , 0 ) . unwrap ( ) , vec! [ ] ) ;
assert_eq! ( meta . consumed , 0 ) ;
assert_eq! ( meta . received , last_index + 1 ) ;
2021-12-14 10:57:11 -08:00
assert_eq! ( meta . parent_slot , Some ( 0 ) ) ;
2021-12-11 06:47:20 -08:00
assert_eq! ( meta . last_index , Some ( last_index ) ) ;
2021-09-10 05:33:08 -07:00
assert! ( ! blockstore . is_full ( 0 ) ) ;
2021-05-26 16:12:57 -07:00
}
2021-09-10 05:33:08 -07:00
2021-12-24 12:32:43 -08:00
let duplicate_shreds = entries_to_test_shreds ( & original_entries , 0 , 0 , true , 0 ) ;
2021-09-10 05:33:08 -07:00
let num_shreds = duplicate_shreds . len ( ) as u64 ;
blockstore
. insert_shreds ( duplicate_shreds , None , false )
. unwrap ( ) ;
assert_eq! ( blockstore . get_slot_entries ( 0 , 0 ) . unwrap ( ) , original_entries ) ;
let meta = blockstore . meta ( 0 ) . unwrap ( ) . unwrap ( ) ;
assert_eq! ( meta . consumed , num_shreds ) ;
assert_eq! ( meta . received , num_shreds ) ;
2021-12-14 10:57:11 -08:00
assert_eq! ( meta . parent_slot , Some ( 0 ) ) ;
2021-12-11 06:47:20 -08:00
assert_eq! ( meta . last_index , Some ( num_shreds - 1 ) ) ;
2021-09-10 05:33:08 -07:00
assert! ( blockstore . is_full ( 0 ) ) ;
assert! ( ! blockstore . is_dead ( 0 ) ) ;
2021-05-26 16:12:57 -07:00
}
2021-03-22 16:18:22 -07:00
#[ test ]
fn test_duplicate_last_index ( ) {
let num_shreds = 2 ;
let num_entries = max_ticks_per_n_shreds ( num_shreds , None ) ;
let slot = 1 ;
let ( mut shreds , _ ) = make_slot_entries ( slot , 0 , num_entries ) ;
// Mark both as last shred
shreds [ 0 ] . set_last_in_slot ( ) ;
shreds [ 1 ] . set_last_in_slot ( ) ;
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2021-03-22 16:18:22 -07:00
2021-09-10 05:33:08 -07:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
assert! ( blockstore . get_duplicate_slot ( slot ) . is_some ( ) ) ;
2021-03-22 16:18:22 -07:00
}
2021-05-26 16:12:57 -07:00
#[ test ]
fn test_duplicate_last_index_mark_dead ( ) {
let num_shreds = 10 ;
let smaller_last_shred_index = 5 ;
let larger_last_shred_index = 8 ;
let setup_test_shreds = | slot : Slot | -> Vec < Shred > {
let num_entries = max_ticks_per_n_shreds ( num_shreds , None ) ;
let ( mut shreds , _ ) = make_slot_entries ( slot , 0 , num_entries ) ;
shreds [ smaller_last_shred_index ] . set_last_in_slot ( ) ;
shreds [ larger_last_shred_index ] . set_last_in_slot ( ) ;
shreds
} ;
let get_expected_slot_meta_and_index_meta =
| blockstore : & Blockstore , shreds : Vec < Shred > | -> ( SlotMeta , Index ) {
let slot = shreds [ 0 ] . slot ( ) ;
blockstore
. insert_shreds ( shreds . clone ( ) , None , false )
. unwrap ( ) ;
let meta = blockstore . meta ( slot ) . unwrap ( ) . unwrap ( ) ;
assert_eq! ( meta . consumed , shreds . len ( ) as u64 ) ;
let shreds_index = blockstore . get_index ( slot ) . unwrap ( ) . unwrap ( ) ;
for i in 0 .. shreds . len ( ) as u64 {
2021-12-16 11:17:32 -08:00
assert! ( shreds_index . data ( ) . contains ( i ) ) ;
2021-05-26 16:12:57 -07:00
}
// Cleanup the slot
blockstore
. run_purge ( slot , slot , PurgeType ::PrimaryIndex )
. expect ( " Purge database operations failed " ) ;
assert! ( blockstore . meta ( slot ) . unwrap ( ) . is_none ( ) ) ;
( meta , shreds_index )
} ;
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
let mut slot = 0 ;
let shreds = setup_test_shreds ( slot ) ;
// Case 1: Insert in the same batch. Since we're inserting the shreds in order,
// any shreds > smaller_last_shred_index will not be inserted. Slot is not marked
// as dead because no slots > the first "last" index shred are inserted before
// the "last" index shred itself is inserted.
let ( expected_slot_meta , expected_index ) = get_expected_slot_meta_and_index_meta (
& blockstore ,
shreds [ ..= smaller_last_shred_index ] . to_vec ( ) ,
) ;
blockstore
. insert_shreds ( shreds . clone ( ) , None , false )
. unwrap ( ) ;
assert! ( blockstore . get_duplicate_slot ( slot ) . is_some ( ) ) ;
assert! ( ! blockstore . is_dead ( slot ) ) ;
for i in 0 .. num_shreds {
if i < = smaller_last_shred_index as u64 {
assert_eq! (
blockstore . get_data_shred ( slot , i ) . unwrap ( ) . unwrap ( ) ,
2022-04-25 05:43:22 -07:00
* shreds [ i as usize ] . payload ( )
2021-09-10 05:33:08 -07:00
) ;
} else {
assert! ( blockstore . get_data_shred ( slot , i ) . unwrap ( ) . is_none ( ) ) ;
2021-05-26 16:12:57 -07:00
}
2021-09-10 05:33:08 -07:00
}
let mut meta = blockstore . meta ( slot ) . unwrap ( ) . unwrap ( ) ;
meta . first_shred_timestamp = expected_slot_meta . first_shred_timestamp ;
assert_eq! ( meta , expected_slot_meta ) ;
assert_eq! ( blockstore . get_index ( slot ) . unwrap ( ) . unwrap ( ) , expected_index ) ;
// Case 2: Inserting a duplicate with an even smaller last shred index should not
// mark the slot as dead since the Slotmeta is full.
2022-04-25 05:43:22 -07:00
let even_smaller_last_shred_duplicate = {
let mut payload = shreds [ smaller_last_shred_index - 1 ] . payload ( ) . clone ( ) ;
// Flip a byte to create a duplicate shred
payload [ 0 ] = std ::u8 ::MAX - payload [ 0 ] ;
let mut shred = Shred ::new_from_serialized_shred ( payload ) . unwrap ( ) ;
shred . set_last_in_slot ( ) ;
shred
} ;
2021-09-10 05:33:08 -07:00
assert! ( blockstore
. is_shred_duplicate (
2021-12-14 09:34:02 -08:00
ShredId ::new (
slot ,
even_smaller_last_shred_duplicate . index ( ) ,
ShredType ::Data
) ,
2022-04-25 05:43:22 -07:00
even_smaller_last_shred_duplicate . payload ( ) . clone ( ) ,
2021-09-10 05:33:08 -07:00
)
. is_some ( ) ) ;
blockstore
. insert_shreds ( vec! [ even_smaller_last_shred_duplicate ] , None , false )
. unwrap ( ) ;
assert! ( ! blockstore . is_dead ( slot ) ) ;
for i in 0 .. num_shreds {
if i < = smaller_last_shred_index as u64 {
assert_eq! (
blockstore . get_data_shred ( slot , i ) . unwrap ( ) . unwrap ( ) ,
2022-04-25 05:43:22 -07:00
* shreds [ i as usize ] . payload ( )
2021-09-10 05:33:08 -07:00
) ;
} else {
assert! ( blockstore . get_data_shred ( slot , i ) . unwrap ( ) . is_none ( ) ) ;
2021-05-26 16:12:57 -07:00
}
2021-09-10 05:33:08 -07:00
}
let mut meta = blockstore . meta ( slot ) . unwrap ( ) . unwrap ( ) ;
meta . first_shred_timestamp = expected_slot_meta . first_shred_timestamp ;
assert_eq! ( meta , expected_slot_meta ) ;
assert_eq! ( blockstore . get_index ( slot ) . unwrap ( ) . unwrap ( ) , expected_index ) ;
// Case 3: Insert shreds in reverse so that consumed will not be updated. Now on insert, the
// the slot should be marked as dead
slot + = 1 ;
let mut shreds = setup_test_shreds ( slot ) ;
shreds . reverse ( ) ;
blockstore
. insert_shreds ( shreds . clone ( ) , None , false )
. unwrap ( ) ;
assert! ( blockstore . is_dead ( slot ) ) ;
// All the shreds other than the two last index shreds because those two
// are marked as last, but less than the first received index == 10.
// The others will be inserted even after the slot is marked dead on attempted
// insert of the first last_index shred since dead slots can still be
// inserted into.
for i in 0 .. num_shreds {
let shred_to_check = & shreds [ i as usize ] ;
let shred_index = shred_to_check . index ( ) as u64 ;
if shred_index ! = smaller_last_shred_index as u64
& & shred_index ! = larger_last_shred_index as u64
{
assert_eq! (
blockstore
2021-05-26 16:12:57 -07:00
. get_data_shred ( slot , shred_index )
. unwrap ( )
2021-09-10 05:33:08 -07:00
. unwrap ( ) ,
2022-04-25 05:43:22 -07:00
* shred_to_check . payload ( )
2021-09-10 05:33:08 -07:00
) ;
} else {
assert! ( blockstore
. get_data_shred ( slot , shred_index )
. unwrap ( )
. is_none ( ) ) ;
2021-05-26 16:12:57 -07:00
}
2021-09-10 05:33:08 -07:00
}
2021-05-26 16:12:57 -07:00
2021-09-10 05:33:08 -07:00
// Case 4: Same as Case 3, but this time insert the shreds one at a time to test that the clearing
// of data shreds works even after they've been committed
slot + = 1 ;
let mut shreds = setup_test_shreds ( slot ) ;
shreds . reverse ( ) ;
for shred in shreds . clone ( ) {
blockstore . insert_shreds ( vec! [ shred ] , None , false ) . unwrap ( ) ;
}
assert! ( blockstore . is_dead ( slot ) ) ;
// All the shreds will be inserted since dead slots can still be inserted into.
for i in 0 .. num_shreds {
let shred_to_check = & shreds [ i as usize ] ;
let shred_index = shred_to_check . index ( ) as u64 ;
if shred_index ! = smaller_last_shred_index as u64
& & shred_index ! = larger_last_shred_index as u64
{
assert_eq! (
blockstore
2021-05-26 16:12:57 -07:00
. get_data_shred ( slot , shred_index )
. unwrap ( )
2021-09-10 05:33:08 -07:00
. unwrap ( ) ,
2022-04-25 05:43:22 -07:00
* shred_to_check . payload ( )
2021-09-10 05:33:08 -07:00
) ;
} else {
assert! ( blockstore
. get_data_shred ( slot , shred_index )
. unwrap ( )
. is_none ( ) ) ;
2021-05-26 16:12:57 -07:00
}
}
}
#[ test ]
fn test_get_slot_entries_dead_slot_race ( ) {
let setup_test_shreds = move | slot : Slot | -> Vec < Shred > {
let num_shreds = 10 ;
let middle_shred_index = 5 ;
let num_entries = max_ticks_per_n_shreds ( num_shreds , None ) ;
let ( shreds , _ ) = make_slot_entries ( slot , 0 , num_entries ) ;
// Reverse shreds so that last shred gets inserted first and sets meta.received
let mut shreds : Vec < Shred > = shreds . into_iter ( ) . rev ( ) . collect ( ) ;
// Push the real middle shred to the end of the shreds list
shreds . push ( shreds [ middle_shred_index ] . clone ( ) ) ;
// Set the middle shred as a last shred to cause the slot to be marked dead
shreds [ middle_shred_index ] . set_last_in_slot ( ) ;
shreds
} ;
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
2021-05-26 16:12:57 -07:00
{
2021-09-10 05:33:08 -07:00
let blockstore = Arc ::new ( Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ) ;
2022-01-11 02:44:46 -08:00
let ( slot_sender , slot_receiver ) = unbounded ( ) ;
let ( shred_sender , shred_receiver ) = unbounded ::< Vec < Shred > > ( ) ;
let ( signal_sender , signal_receiver ) = unbounded ( ) ;
2021-05-26 16:12:57 -07:00
let t_entry_getter = {
let blockstore = blockstore . clone ( ) ;
let signal_sender = signal_sender . clone ( ) ;
Builder ::new ( )
. spawn ( move | | {
while let Ok ( slot ) = slot_receiver . recv ( ) {
match blockstore . get_slot_entries_with_shred_info ( slot , 0 , false ) {
Ok ( ( _entries , _num_shreds , is_full ) ) = > {
if is_full {
signal_sender
. send ( Err ( IoError ::new (
ErrorKind ::Other ,
" got full slot entries for dead slot " ,
) ) )
. unwrap ( ) ;
}
}
Err ( err ) = > {
assert_matches! ( err , BlockstoreError ::DeadSlot ) ;
}
}
signal_sender . send ( Ok ( ( ) ) ) . unwrap ( ) ;
}
} )
. unwrap ( )
} ;
2021-09-10 05:33:08 -07:00
let t_shred_inserter = {
let blockstore = blockstore . clone ( ) ;
Builder ::new ( )
. spawn ( move | | {
while let Ok ( shreds ) = shred_receiver . recv ( ) {
let slot = shreds [ 0 ] . slot ( ) ;
// Grab this lock to block `get_slot_entries` before it fetches completed datasets
// and then mark the slot as dead, but full, by inserting carefully crafted shreds.
let _lowest_cleanup_slot =
blockstore . lowest_cleanup_slot . write ( ) . unwrap ( ) ;
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
assert! ( blockstore . get_duplicate_slot ( slot ) . is_some ( ) ) ;
assert! ( blockstore . is_dead ( slot ) ) ;
assert! ( blockstore . meta ( slot ) . unwrap ( ) . unwrap ( ) . is_full ( ) ) ;
signal_sender . send ( Ok ( ( ) ) ) . unwrap ( ) ;
}
} )
. unwrap ( )
} ;
2021-05-26 16:12:57 -07:00
for slot in 0 .. 100 {
let shreds = setup_test_shreds ( slot ) ;
// Start a task on each thread to trigger a race condition
slot_sender . send ( slot ) . unwrap ( ) ;
shred_sender . send ( shreds ) . unwrap ( ) ;
// Check that each thread processed their task before continuing
for _ in 1 ..= 2 {
let res = signal_receiver . recv ( ) . unwrap ( ) ;
assert! ( res . is_ok ( ) , " race condition: {:?} " , res ) ;
}
}
drop ( slot_sender ) ;
drop ( shred_sender ) ;
let handles = vec! [ t_entry_getter , t_shred_inserter ] ;
for handle in handles {
assert! ( handle . join ( ) . is_ok ( ) ) ;
}
2021-09-10 05:33:08 -07:00
assert! ( Arc ::strong_count ( & blockstore ) = = 1 ) ;
}
2021-05-26 16:12:57 -07:00
}
2021-07-01 09:32:41 -07:00
#[ test ]
fn test_read_write_cost_table ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2021-07-01 09:32:41 -07:00
2021-09-10 05:33:08 -07:00
let num_entries : usize = 10 ;
let mut cost_table : HashMap < Pubkey , u64 > = HashMap ::new ( ) ;
for x in 1 .. num_entries + 1 {
cost_table . insert ( Pubkey ::new_unique ( ) , ( x + 100 ) as u64 ) ;
}
2021-07-01 09:32:41 -07:00
2021-09-10 05:33:08 -07:00
// write to db
for ( key , cost ) in cost_table . iter ( ) {
2021-07-01 09:32:41 -07:00
blockstore
2021-09-10 05:33:08 -07:00
. write_program_cost ( key , cost )
. expect ( " write a program " ) ;
}
2021-07-01 09:32:41 -07:00
2021-09-10 05:33:08 -07:00
// read back from db
let read_back = blockstore . read_program_costs ( ) . expect ( " read programs " ) ;
// verify
assert_eq! ( read_back . len ( ) , cost_table . len ( ) ) ;
for ( read_key , read_cost ) in read_back {
assert_eq! ( read_cost , * cost_table . get ( & read_key ) . unwrap ( ) ) ;
}
2021-07-01 09:32:41 -07:00
2021-09-10 05:33:08 -07:00
// update value, write to db
for val in cost_table . values_mut ( ) {
* val + = 100 ;
}
for ( key , cost ) in cost_table . iter ( ) {
2021-07-01 09:32:41 -07:00
blockstore
2021-09-10 05:33:08 -07:00
. write_program_cost ( key , cost )
. expect ( " write a program " ) ;
}
// add a new record
let new_program_key = Pubkey ::new_unique ( ) ;
let new_program_cost = 999 ;
blockstore
. write_program_cost ( & new_program_key , & new_program_cost )
. unwrap ( ) ;
// confirm value updated
let read_back = blockstore . read_program_costs ( ) . expect ( " read programs " ) ;
// verify
assert_eq! ( read_back . len ( ) , cost_table . len ( ) + 1 ) ;
for ( key , cost ) in cost_table . iter ( ) {
assert_eq! ( * cost , read_back . iter ( ) . find ( | ( k , _v ) | k = = key ) . unwrap ( ) . 1 ) ;
}
assert_eq! (
new_program_cost ,
read_back
. iter ( )
. find ( | ( k , _v ) | * k = = new_program_key )
. unwrap ( )
. 1
) ;
// test delete
blockstore
. delete_program_cost ( & new_program_key )
. expect ( " delete a progrma " ) ;
let read_back = blockstore . read_program_costs ( ) . expect ( " read programs " ) ;
// verify
assert_eq! ( read_back . len ( ) , cost_table . len ( ) ) ;
for ( read_key , read_cost ) in read_back {
assert_eq! ( read_cost , * cost_table . get ( & read_key ) . unwrap ( ) ) ;
2021-07-01 09:32:41 -07:00
}
}
#[ test ]
fn test_delete_old_records_from_cost_table ( ) {
2021-09-10 05:33:08 -07:00
let ledger_path = get_tmp_ledger_path_auto_delete! ( ) ;
let blockstore = Blockstore ::open ( ledger_path . path ( ) ) . unwrap ( ) ;
2021-07-01 09:32:41 -07:00
2021-09-10 05:33:08 -07:00
let num_entries : usize = 10 ;
let mut cost_table : HashMap < Pubkey , u64 > = HashMap ::new ( ) ;
for x in 1 .. num_entries + 1 {
cost_table . insert ( Pubkey ::new_unique ( ) , ( x + 100 ) as u64 ) ;
}
2021-07-01 09:32:41 -07:00
2021-09-10 05:33:08 -07:00
// write to db
for ( key , cost ) in cost_table . iter ( ) {
blockstore
. write_program_cost ( key , cost )
. expect ( " write a program " ) ;
}
2021-07-01 09:32:41 -07:00
2021-09-10 05:33:08 -07:00
// remove a record
let mut removed_key = Pubkey ::new_unique ( ) ;
for ( key , cost ) in cost_table . iter ( ) {
if * cost = = 101_ u64 {
removed_key = * key ;
break ;
}
}
cost_table . remove ( & removed_key ) ;
2021-07-01 09:32:41 -07:00
2021-09-10 05:33:08 -07:00
// delete records from blockstore if they are no longer in cost_table
let db_records = blockstore . read_program_costs ( ) . expect ( " read programs " ) ;
db_records . iter ( ) . for_each ( | ( pubkey , _ ) | {
if ! cost_table . iter ( ) . any ( | ( key , _ ) | key = = pubkey ) {
assert_eq! ( * pubkey , removed_key ) ;
blockstore
. delete_program_cost ( pubkey )
. expect ( " delete old program " ) ;
2021-07-01 09:32:41 -07:00
}
2021-09-10 05:33:08 -07:00
} ) ;
// read back from db
let read_back = blockstore . read_program_costs ( ) . expect ( " read programs " ) ;
// verify
assert_eq! ( read_back . len ( ) , cost_table . len ( ) ) ;
for ( read_key , read_cost ) in read_back {
assert_eq! ( read_cost , * cost_table . get ( & read_key ) . unwrap ( ) ) ;
2021-07-01 09:32:41 -07:00
}
}
2018-11-15 15:53:31 -08:00
}