Add block height to ConfirmedBlock structs (#17523)

* Add BlockHeight CF to blockstore

* Rename CacheBlockTimeService to be more general

* Cache block-height using service

* Fixup previous proto mishandling

* Add block_height to block structs

* Add block-height to solana block

* Fallback to BankForks if block time or block height are not yet written to Blockstore

* Add docs

* Review comments
This commit is contained in:
Tyera Eulberg 2021-05-26 22:16:16 -06:00 committed by GitHub
parent 9541411c15
commit ab581dafc2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 184 additions and 80 deletions

View File

@ -2126,6 +2126,9 @@ impl fmt::Display for CliBlock {
if let Some(block_time) = self.encoded_confirmed_block.block_time {
writeln!(f, "Block Time: {:?}", Local.timestamp(block_time, 0))?;
}
if let Some(block_height) = self.encoded_confirmed_block.block_height {
writeln!(f, "Block Height: {:?}", block_height)?;
}
if !self.encoded_confirmed_block.rewards.is_empty() {
let mut rewards = self.encoded_confirmed_block.rewards.clone();
rewards.sort_by(|a, b| a.pubkey.cmp(&b.pubkey));

View File

@ -8,10 +8,10 @@ use std::{
use tokio::runtime::Runtime;
// Delay uploading the largest confirmed root for this many slots. This is done in an attempt to
// ensure that the `CacheBlockTimeService` has had enough time to add the block time for the root
// ensure that the `CacheBlockMetaService` has had enough time to add the block time for the root
// before it's uploaded to BigTable.
//
// A more direct connection between CacheBlockTimeService and BigTableUploadService would be
// A more direct connection between CacheBlockMetaService and BigTableUploadService would be
// preferable...
const LARGEST_CONFIRMED_ROOT_UPLOAD_DELAY: usize = 100;

View File

@ -1,4 +1,4 @@
pub use solana_ledger::blockstore_processor::CacheBlockTimeSender;
pub use solana_ledger::blockstore_processor::CacheBlockMetaSender;
use {
crossbeam_channel::{Receiver, RecvTimeoutError},
solana_ledger::blockstore::Blockstore,
@ -14,18 +14,18 @@ use {
},
};
pub type CacheBlockTimeReceiver = Receiver<Arc<Bank>>;
pub type CacheBlockMetaReceiver = Receiver<Arc<Bank>>;
pub struct CacheBlockTimeService {
pub struct CacheBlockMetaService {
thread_hdl: JoinHandle<()>,
}
const CACHE_BLOCK_TIME_WARNING_MS: u64 = 150;
impl CacheBlockTimeService {
impl CacheBlockMetaService {
#[allow(clippy::new_ret_no_self)]
pub fn new(
cache_block_time_receiver: CacheBlockTimeReceiver,
cache_block_meta_receiver: CacheBlockMetaReceiver,
blockstore: Arc<Blockstore>,
exit: &Arc<AtomicBool>,
) -> Self {
@ -36,19 +36,19 @@ impl CacheBlockTimeService {
if exit.load(Ordering::Relaxed) {
break;
}
let recv_result = cache_block_time_receiver.recv_timeout(Duration::from_secs(1));
let recv_result = cache_block_meta_receiver.recv_timeout(Duration::from_secs(1));
match recv_result {
Err(RecvTimeoutError::Disconnected) => {
break;
}
Ok(bank) => {
let mut cache_block_time_timer = Measure::start("cache_block_time_timer");
Self::cache_block_time(bank, &blockstore);
cache_block_time_timer.stop();
if cache_block_time_timer.as_ms() > CACHE_BLOCK_TIME_WARNING_MS {
let mut cache_block_meta_timer = Measure::start("cache_block_meta_timer");
Self::cache_block_meta(bank, &blockstore);
cache_block_meta_timer.stop();
if cache_block_meta_timer.as_ms() > CACHE_BLOCK_TIME_WARNING_MS {
warn!(
"cache_block_time operation took: {}ms",
cache_block_time_timer.as_ms()
"cache_block_meta operation took: {}ms",
cache_block_meta_timer.as_ms()
);
}
}
@ -59,10 +59,13 @@ impl CacheBlockTimeService {
Self { thread_hdl }
}
fn cache_block_time(bank: Arc<Bank>, blockstore: &Arc<Blockstore>) {
fn cache_block_meta(bank: Arc<Bank>, blockstore: &Arc<Blockstore>) {
if let Err(e) = blockstore.cache_block_time(bank.slot(), bank.clock().unix_timestamp) {
error!("cache_block_time failed: slot {:?} {:?}", bank.slot(), e);
}
if let Err(e) = blockstore.cache_block_height(bank.slot(), bank.block_height()) {
error!("cache_block_height failed: slot {:?} {:?}", bank.slot(), e);
}
}
pub fn join(self) -> thread::Result<()> {

View File

@ -11,7 +11,7 @@ pub mod accounts_hash_verifier;
pub mod banking_stage;
pub mod bigtable_upload_service;
pub mod broadcast_stage;
pub mod cache_block_time_service;
pub mod cache_block_meta_service;
pub mod cluster_info_vote_listener;
pub mod cluster_slot_state_verifier;
pub mod cluster_slots;

View File

@ -2,7 +2,7 @@
use crate::{
broadcast_stage::RetransmitSlotsSender,
cache_block_time_service::CacheBlockTimeSender,
cache_block_meta_service::CacheBlockMetaSender,
cluster_info_vote_listener::{
GossipDuplicateConfirmedSlotsReceiver, GossipVerifiedVoteHashReceiver, VoteTracker,
},
@ -123,7 +123,7 @@ pub struct ReplayStageConfig {
pub block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
pub transaction_status_sender: Option<TransactionStatusSender>,
pub rewards_recorder_sender: Option<RewardsRecorderSender>,
pub cache_block_time_sender: Option<CacheBlockTimeSender>,
pub cache_block_meta_sender: Option<CacheBlockMetaSender>,
pub bank_notification_sender: Option<BankNotificationSender>,
pub wait_for_vote_to_start_leader: bool,
}
@ -305,7 +305,7 @@ impl ReplayStage {
block_commitment_cache,
transaction_status_sender,
rewards_recorder_sender,
cache_block_time_sender,
cache_block_meta_sender,
bank_notification_sender,
wait_for_vote_to_start_leader,
} = config;
@ -375,7 +375,7 @@ impl ReplayStage {
&vote_account,
&mut progress,
transaction_status_sender.as_ref(),
cache_block_time_sender.as_ref(),
cache_block_meta_sender.as_ref(),
&verify_recyclers,
&mut heaviest_subtree_fork_choice,
&replay_vote_sender,
@ -1626,7 +1626,7 @@ impl ReplayStage {
vote_account: &Pubkey,
progress: &mut ProgressMap,
transaction_status_sender: Option<&TransactionStatusSender>,
cache_block_time_sender: Option<&CacheBlockTimeSender>,
cache_block_meta_sender: Option<&CacheBlockMetaSender>,
verify_recyclers: &VerifyRecyclers,
heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice,
replay_vote_sender: &ReplayVoteSender,
@ -1751,7 +1751,7 @@ impl ReplayStage {
.send(BankNotification::Frozen(bank.clone()))
.unwrap_or_else(|err| warn!("bank_notification_sender failed: {:?}", err));
}
blockstore_processor::cache_block_time(&bank, cache_block_time_sender);
blockstore_processor::cache_block_meta(&bank, cache_block_meta_sender);
let bank_hash = bank.hash();
if let Some(new_frozen_voters) =

View File

@ -953,7 +953,21 @@ impl JsonRpcRequestProcessor {
.load(Ordering::SeqCst)
{
let result = self.blockstore.get_complete_block(slot, true);
return Ok(result.ok().map(|confirmed_block| {
return Ok(result.ok().map(|mut confirmed_block| {
if confirmed_block.block_time.is_none()
|| confirmed_block.block_height.is_none()
{
let r_bank_forks = self.bank_forks.read().unwrap();
let bank = r_bank_forks.get(slot).cloned();
if let Some(bank) = bank {
if confirmed_block.block_time.is_none() {
confirmed_block.block_time = Some(bank.clock().unix_timestamp);
}
if confirmed_block.block_height.is_none() {
confirmed_block.block_height = Some(bank.block_height());
}
}
}
confirmed_block.configure(encoding, transaction_details, show_rewards)
}));
}

View File

@ -4,7 +4,7 @@
use crate::{
accounts_hash_verifier::AccountsHashVerifier,
broadcast_stage::RetransmitSlotsSender,
cache_block_time_service::CacheBlockTimeSender,
cache_block_meta_service::CacheBlockMetaSender,
cluster_info_vote_listener::{
GossipDuplicateConfirmedSlotsReceiver, GossipVerifiedVoteHashReceiver,
VerifiedVoteReceiver, VoteTracker,
@ -116,7 +116,7 @@ impl Tvu {
cfg: Option<Arc<AtomicBool>>,
transaction_status_sender: Option<TransactionStatusSender>,
rewards_recorder_sender: Option<RewardsRecorderSender>,
cache_block_time_sender: Option<CacheBlockTimeSender>,
cache_block_meta_sender: Option<CacheBlockMetaSender>,
snapshot_config_and_pending_package: Option<(SnapshotConfig, PendingSnapshotPackage)>,
vote_tracker: Arc<VoteTracker>,
retransmit_slots_sender: RetransmitSlotsSender,
@ -267,7 +267,7 @@ impl Tvu {
block_commitment_cache,
transaction_status_sender,
rewards_recorder_sender,
cache_block_time_sender,
cache_block_meta_sender,
bank_notification_sender,
wait_for_vote_to_start_leader: tvu_config.wait_for_vote_to_start_leader,
};

View File

@ -2,7 +2,7 @@
use crate::{
broadcast_stage::BroadcastStageType,
cache_block_time_service::{CacheBlockTimeSender, CacheBlockTimeService},
cache_block_meta_service::{CacheBlockMetaSender, CacheBlockMetaService},
cluster_info_vote_listener::VoteTracker,
completed_data_sets_service::CompletedDataSetsService,
consensus::{reconcile_blockstore_roots_with_tower, Tower},
@ -259,8 +259,8 @@ struct TransactionHistoryServices {
max_complete_transaction_status_slot: Arc<AtomicU64>,
rewards_recorder_sender: Option<RewardsRecorderSender>,
rewards_recorder_service: Option<RewardsRecorderService>,
cache_block_time_sender: Option<CacheBlockTimeSender>,
cache_block_time_service: Option<CacheBlockTimeService>,
cache_block_meta_sender: Option<CacheBlockMetaSender>,
cache_block_meta_service: Option<CacheBlockMetaService>,
}
pub struct Validator {
@ -270,7 +270,7 @@ pub struct Validator {
optimistically_confirmed_bank_tracker: Option<OptimisticallyConfirmedBankTracker>,
transaction_status_service: Option<TransactionStatusService>,
rewards_recorder_service: Option<RewardsRecorderService>,
cache_block_time_service: Option<CacheBlockTimeService>,
cache_block_meta_service: Option<CacheBlockMetaService>,
sample_performance_service: Option<SamplePerformanceService>,
gossip_service: GossipService,
serve_repair_service: ServeRepairService,
@ -396,8 +396,8 @@ impl Validator {
max_complete_transaction_status_slot,
rewards_recorder_sender,
rewards_recorder_service,
cache_block_time_sender,
cache_block_time_service,
cache_block_meta_sender,
cache_block_meta_service,
},
tower,
) = new_banks_from_ledger(
@ -724,7 +724,7 @@ impl Validator {
config.enable_partition.clone(),
transaction_status_sender.clone(),
rewards_recorder_sender,
cache_block_time_sender,
cache_block_meta_sender,
snapshot_config_and_pending_package,
vote_tracker.clone(),
retransmit_slots_sender,
@ -787,7 +787,7 @@ impl Validator {
optimistically_confirmed_bank_tracker,
transaction_status_service,
rewards_recorder_service,
cache_block_time_service,
cache_block_meta_service,
sample_performance_service,
snapshot_packager_service,
completed_data_sets_service,
@ -867,10 +867,10 @@ impl Validator {
.expect("rewards_recorder_service");
}
if let Some(cache_block_time_service) = self.cache_block_time_service {
cache_block_time_service
if let Some(cache_block_meta_service) = self.cache_block_meta_service {
cache_block_meta_service
.join()
.expect("cache_block_time_service");
.expect("cache_block_meta_service");
}
if let Some(sample_performance_service) = self.sample_performance_service {
@ -1144,7 +1144,7 @@ fn new_banks_from_ledger(
.transaction_status_sender
.as_ref(),
transaction_history_services
.cache_block_time_sender
.cache_block_meta_sender
.as_ref(),
)
.unwrap_or_else(|err| {
@ -1331,10 +1331,10 @@ fn initialize_rpc_transaction_history_services(
exit,
));
let (cache_block_time_sender, cache_block_time_receiver) = unbounded();
let cache_block_time_sender = Some(cache_block_time_sender);
let cache_block_time_service = Some(CacheBlockTimeService::new(
cache_block_time_receiver,
let (cache_block_meta_sender, cache_block_meta_receiver) = unbounded();
let cache_block_meta_sender = Some(cache_block_meta_sender);
let cache_block_meta_service = Some(CacheBlockMetaService::new(
cache_block_meta_receiver,
blockstore,
exit,
));
@ -1344,8 +1344,8 @@ fn initialize_rpc_transaction_history_services(
max_complete_transaction_status_slot,
rewards_recorder_sender,
rewards_recorder_service,
cache_block_time_sender,
cache_block_time_service,
cache_block_meta_sender,
cache_block_meta_service,
}
}

View File

@ -394,6 +394,7 @@ The result field will be an object with the following fields:
- `postBalance: <u64>` - account balance in lamports after the reward was applied
- `rewardType: <string|undefined>` - type of reward: "fee", "rent", "voting", "staking"
- `blockTime: <i64 | null>` - estimated production time, as Unix timestamp (seconds since the Unix epoch). null if not available
- `blockHeight: <u64 | null>` - the number of blocks beneath this block
#### Example:
@ -409,6 +410,7 @@ Result:
{
"jsonrpc": "2.0",
"result": {
"blockHeight": 428,
"blockTime": null,
"blockhash": "3Eq21vXNB5s86c62bVuUfTeaMif1N2kUqRPBmGRJhyTA",
"parentSlot": 429,
@ -492,6 +494,7 @@ Result:
{
"jsonrpc": "2.0",
"result": {
"blockHeight": 428,
"blockTime": null,
"blockhash": "3Eq21vXNB5s86c62bVuUfTeaMif1N2kUqRPBmGRJhyTA",
"parentSlot": 429,

View File

@ -1,7 +1,7 @@
use crate::{
blockstore::Blockstore,
blockstore_processor::{
self, BlockstoreProcessorError, BlockstoreProcessorResult, CacheBlockTimeSender,
self, BlockstoreProcessorError, BlockstoreProcessorResult, CacheBlockMetaSender,
ProcessOptions, TransactionStatusSender,
},
entry::VerifyRecyclers,
@ -37,7 +37,7 @@ pub fn load(
snapshot_config: Option<&SnapshotConfig>,
process_options: ProcessOptions,
transaction_status_sender: Option<&TransactionStatusSender>,
cache_block_time_sender: Option<&CacheBlockTimeSender>,
cache_block_meta_sender: Option<&CacheBlockMetaSender>,
) -> LoadResult {
if let Some(snapshot_config) = snapshot_config.as_ref() {
info!(
@ -102,7 +102,7 @@ pub fn load(
&process_options,
&VerifyRecyclers::default(),
transaction_status_sender,
cache_block_time_sender,
cache_block_meta_sender,
),
Some(deserialized_snapshot_hash),
);
@ -120,7 +120,7 @@ pub fn load(
&blockstore,
account_paths,
process_options,
cache_block_time_sender,
cache_block_meta_sender,
),
None,
)

View File

@ -139,6 +139,7 @@ pub struct Blockstore {
rewards_cf: LedgerColumn<cf::Rewards>,
blocktime_cf: LedgerColumn<cf::Blocktime>,
perf_samples_cf: LedgerColumn<cf::PerfSamples>,
block_height_cf: LedgerColumn<cf::BlockHeight>,
last_root: Arc<RwLock<Slot>>,
insert_shreds_lock: Arc<Mutex<()>>,
pub new_shreds_signals: Vec<SyncSender<bool>>,
@ -309,6 +310,7 @@ impl Blockstore {
let rewards_cf = db.column();
let blocktime_cf = db.column();
let perf_samples_cf = db.column();
let block_height_cf = db.column();
let db = Arc::new(db);
@ -356,6 +358,7 @@ impl Blockstore {
rewards_cf,
blocktime_cf,
perf_samples_cf,
block_height_cf,
new_shreds_signals: vec![],
completed_slots_senders: vec![],
insert_shreds_lock: Arc::new(Mutex::new(())),
@ -1773,11 +1776,25 @@ impl Blockstore {
}
pub fn cache_block_time(&self, slot: Slot, timestamp: UnixTimestamp) -> Result<()> {
if self.get_block_time(slot).unwrap_or_default().is_none() {
self.blocktime_cf.put(slot, &timestamp)
} else {
Ok(())
self.blocktime_cf.put(slot, &timestamp)
}
pub fn get_block_height(&self, slot: Slot) -> Result<Option<u64>> {
datapoint_info!(
"blockstore-rpc-api",
("method", "get_block_height".to_string(), String)
);
let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap();
// lowest_cleanup_slot is the last slot that was not cleaned up by
// LedgerCleanupService
if *lowest_cleanup_slot > 0 && *lowest_cleanup_slot >= slot {
return Err(BlockstoreError::SlotCleanedUp);
}
self.block_height_cf.get(slot)
}
pub fn cache_block_height(&self, slot: Slot, block_height: u64) -> Result<()> {
self.block_height_cf.put(slot, &block_height)
}
pub fn get_first_available_block(&self) -> Result<Slot> {
@ -1857,7 +1874,12 @@ impl Blockstore {
.get_protobuf_or_bincode::<StoredExtendedRewards>(slot)?
.unwrap_or_default()
.into();
// The Blocktime and BlockHeight column families are updated asynchronously; they
// may not be written by the time the complete slot entries are available. In this
// case, these fields will be `None`.
let block_time = self.blocktime_cf.get(slot)?;
let block_height = self.block_height_cf.get(slot)?;
let block = ConfirmedBlock {
previous_blockhash: previous_blockhash.to_string(),
@ -1867,6 +1889,7 @@ impl Blockstore {
.map_transactions_to_statuses(slot, slot_transaction_iterator),
rewards,
block_time,
block_height,
};
return Ok(block);
}
@ -6063,6 +6086,7 @@ pub mod tests {
previous_blockhash: Hash::default().to_string(),
rewards: vec![],
block_time: None,
block_height: None,
};
assert_eq!(confirmed_block, expected_block);
@ -6076,6 +6100,7 @@ pub mod tests {
previous_blockhash: blockhash.to_string(),
rewards: vec![],
block_time: None,
block_height: None,
};
assert_eq!(confirmed_block, expected_block);
@ -6092,13 +6117,17 @@ pub mod tests {
previous_blockhash: blockhash.to_string(),
rewards: vec![],
block_time: None,
block_height: None,
};
assert_eq!(complete_block, expected_complete_block);
// Test block_time returns, if available
// Test block_time & block_height return, if available
let timestamp = 1_576_183_541;
ledger.blocktime_cf.put(slot + 1, &timestamp).unwrap();
expected_block.block_time = Some(timestamp);
let block_height = slot - 2;
ledger.block_height_cf.put(slot + 1, &block_height).unwrap();
expected_block.block_height = Some(block_height);
let confirmed_block = ledger.get_rooted_block(slot + 1, true).unwrap();
assert_eq!(confirmed_block, expected_block);
@ -6106,6 +6135,9 @@ pub mod tests {
let timestamp = 1_576_183_542;
ledger.blocktime_cf.put(slot + 2, &timestamp).unwrap();
expected_complete_block.block_time = Some(timestamp);
let block_height = slot - 1;
ledger.block_height_cf.put(slot + 2, &block_height).unwrap();
expected_complete_block.block_height = Some(block_height);
let complete_block = ledger.get_complete_block(slot + 2, true).unwrap();
assert_eq!(complete_block, expected_complete_block);

View File

@ -55,6 +55,8 @@ const REWARDS_CF: &str = "rewards";
const BLOCKTIME_CF: &str = "blocktime";
/// Column family for Performance Samples
const PERF_SAMPLES_CF: &str = "perf_samples";
/// Column family for BlockHeight
const BLOCK_HEIGHT_CF: &str = "block_height";
#[derive(Error, Debug)]
pub enum BlockstoreError {
@ -151,6 +153,10 @@ pub mod columns {
#[derive(Debug)]
/// The performance samples column
pub struct PerfSamples;
#[derive(Debug)]
/// The block height column
pub struct BlockHeight;
}
pub enum AccessType {
@ -212,9 +218,9 @@ impl Rocks {
recovery_mode: Option<BlockstoreRecoveryMode>,
) -> Result<Rocks> {
use columns::{
AddressSignatures, Blocktime, DeadSlots, DuplicateSlots, ErasureMeta, Index, Orphans,
PerfSamples, Rewards, Root, ShredCode, ShredData, SlotMeta, TransactionStatus,
TransactionStatusIndex,
AddressSignatures, BlockHeight, Blocktime, DeadSlots, DuplicateSlots, ErasureMeta,
Index, Orphans, PerfSamples, Rewards, Root, ShredCode, ShredData, SlotMeta,
TransactionStatus, TransactionStatusIndex,
};
fs::create_dir_all(&path)?;
@ -259,6 +265,8 @@ impl Rocks {
ColumnFamilyDescriptor::new(Blocktime::NAME, get_cf_options(&access_type));
let perf_samples_cf_descriptor =
ColumnFamilyDescriptor::new(PerfSamples::NAME, get_cf_options(&access_type));
let block_height_cf_descriptor =
ColumnFamilyDescriptor::new(BlockHeight::NAME, get_cf_options(&access_type));
let cfs = vec![
(SlotMeta::NAME, meta_cf_descriptor),
@ -279,6 +287,7 @@ impl Rocks {
(Rewards::NAME, rewards_cf_descriptor),
(Blocktime::NAME, blocktime_cf_descriptor),
(PerfSamples::NAME, perf_samples_cf_descriptor),
(BlockHeight::NAME, block_height_cf_descriptor),
];
// Open the database
@ -316,9 +325,9 @@ impl Rocks {
fn columns(&self) -> Vec<&'static str> {
use columns::{
AddressSignatures, Blocktime, DeadSlots, DuplicateSlots, ErasureMeta, Index, Orphans,
PerfSamples, Rewards, Root, ShredCode, ShredData, SlotMeta, TransactionStatus,
TransactionStatusIndex,
AddressSignatures, BlockHeight, Blocktime, DeadSlots, DuplicateSlots, ErasureMeta,
Index, Orphans, PerfSamples, Rewards, Root, ShredCode, ShredData, SlotMeta,
TransactionStatus, TransactionStatusIndex,
};
vec![
@ -337,6 +346,7 @@ impl Rocks {
Rewards::NAME,
Blocktime::NAME,
PerfSamples::NAME,
BlockHeight::NAME,
]
}
@ -579,6 +589,14 @@ impl TypedColumn for columns::PerfSamples {
type Type = blockstore_meta::PerfSample;
}
impl SlotColumn for columns::BlockHeight {}
impl ColumnName for columns::BlockHeight {
const NAME: &'static str = BLOCK_HEIGHT_CF;
}
impl TypedColumn for columns::BlockHeight {
type Type = u64;
}
impl Column for columns::ShredCode {
type Index = (u64, u64);

View File

@ -380,7 +380,7 @@ pub fn process_blockstore(
blockstore: &Blockstore,
account_paths: Vec<PathBuf>,
opts: ProcessOptions,
cache_block_time_sender: Option<&CacheBlockTimeSender>,
cache_block_meta_sender: Option<&CacheBlockMetaSender>,
) -> BlockstoreProcessorResult {
if let Some(num_threads) = opts.override_num_threads {
PAR_THREAD_POOL.with(|pool| {
@ -409,7 +409,7 @@ pub fn process_blockstore(
blockstore,
&opts,
&recyclers,
cache_block_time_sender,
cache_block_meta_sender,
);
do_process_blockstore_from_root(
blockstore,
@ -417,7 +417,7 @@ pub fn process_blockstore(
&opts,
&recyclers,
None,
cache_block_time_sender,
cache_block_meta_sender,
)
}
@ -428,7 +428,7 @@ pub(crate) fn process_blockstore_from_root(
opts: &ProcessOptions,
recyclers: &VerifyRecyclers,
transaction_status_sender: Option<&TransactionStatusSender>,
cache_block_time_sender: Option<&CacheBlockTimeSender>,
cache_block_meta_sender: Option<&CacheBlockMetaSender>,
) -> BlockstoreProcessorResult {
do_process_blockstore_from_root(
blockstore,
@ -436,7 +436,7 @@ pub(crate) fn process_blockstore_from_root(
opts,
recyclers,
transaction_status_sender,
cache_block_time_sender,
cache_block_meta_sender,
)
}
@ -446,7 +446,7 @@ fn do_process_blockstore_from_root(
opts: &ProcessOptions,
recyclers: &VerifyRecyclers,
transaction_status_sender: Option<&TransactionStatusSender>,
cache_block_time_sender: Option<&CacheBlockTimeSender>,
cache_block_meta_sender: Option<&CacheBlockMetaSender>,
) -> BlockstoreProcessorResult {
info!("processing ledger from slot {}...", bank.slot());
@ -507,7 +507,7 @@ fn do_process_blockstore_from_root(
opts,
recyclers,
transaction_status_sender,
cache_block_time_sender,
cache_block_meta_sender,
&mut timing,
)?;
initial_forks.sort_by_key(|bank| bank.slot());
@ -815,7 +815,7 @@ fn process_bank_0(
blockstore: &Blockstore,
opts: &ProcessOptions,
recyclers: &VerifyRecyclers,
cache_block_time_sender: Option<&CacheBlockTimeSender>,
cache_block_meta_sender: Option<&CacheBlockMetaSender>,
) {
assert_eq!(bank0.slot(), 0);
let mut progress = ConfirmationProgress::new(bank0.last_blockhash());
@ -831,7 +831,7 @@ fn process_bank_0(
)
.expect("processing for bank 0 must succeed");
bank0.freeze();
cache_block_time(bank0, cache_block_time_sender);
cache_block_meta(bank0, cache_block_meta_sender);
}
// Given a bank, add its children to the pending slots queue if those children slots are
@ -899,7 +899,7 @@ fn load_frozen_forks(
opts: &ProcessOptions,
recyclers: &VerifyRecyclers,
transaction_status_sender: Option<&TransactionStatusSender>,
cache_block_time_sender: Option<&CacheBlockTimeSender>,
cache_block_meta_sender: Option<&CacheBlockMetaSender>,
timing: &mut ExecuteTimings,
) -> result::Result<Vec<Arc<Bank>>, BlockstoreProcessorError> {
let mut initial_forks = HashMap::new();
@ -954,7 +954,7 @@ fn load_frozen_forks(
recyclers,
&mut progress,
transaction_status_sender,
cache_block_time_sender,
cache_block_meta_sender,
None,
timing,
)
@ -1128,7 +1128,7 @@ fn process_single_slot(
recyclers: &VerifyRecyclers,
progress: &mut ConfirmationProgress,
transaction_status_sender: Option<&TransactionStatusSender>,
cache_block_time_sender: Option<&CacheBlockTimeSender>,
cache_block_meta_sender: Option<&CacheBlockMetaSender>,
replay_vote_sender: Option<&ReplayVoteSender>,
timing: &mut ExecuteTimings,
) -> result::Result<(), BlockstoreProcessorError> {
@ -1148,7 +1148,7 @@ fn process_single_slot(
})?;
bank.freeze(); // all banks handled by this routine are created from complete slots
cache_block_time(bank, cache_block_time_sender);
cache_block_meta(bank, cache_block_meta_sender);
Ok(())
}
@ -1226,13 +1226,13 @@ impl TransactionStatusSender {
}
}
pub type CacheBlockTimeSender = Sender<Arc<Bank>>;
pub type CacheBlockMetaSender = Sender<Arc<Bank>>;
pub fn cache_block_time(bank: &Arc<Bank>, cache_block_time_sender: Option<&CacheBlockTimeSender>) {
if let Some(cache_block_time_sender) = cache_block_time_sender {
cache_block_time_sender
pub fn cache_block_meta(bank: &Arc<Bank>, cache_block_meta_sender: Option<&CacheBlockMetaSender>) {
if let Some(cache_block_meta_sender) = cache_block_meta_sender {
cache_block_meta_sender
.send(bank.clone())
.unwrap_or_else(|err| warn!("cache_block_time_sender failed: {:?}", err));
.unwrap_or_else(|err| warn!("cache_block_meta_sender failed: {:?}", err));
}
}

View File

@ -694,6 +694,7 @@ mod tests {
previous_blockhash: Hash::default().to_string(),
rewards: vec![],
block_time: Some(1_234_567_890),
block_height: Some(1),
};
let bincode_block = compress_best(
&bincode::serialize::<StoredConfirmedBlock>(&block.clone().into()).unwrap(),

View File

@ -89,6 +89,7 @@ struct StoredConfirmedBlock {
transactions: Vec<StoredConfirmedBlockTransaction>,
rewards: StoredConfirmedBlockRewards,
block_time: Option<UnixTimestamp>,
block_height: Option<u64>,
}
impl From<ConfirmedBlock> for StoredConfirmedBlock {
@ -100,6 +101,7 @@ impl From<ConfirmedBlock> for StoredConfirmedBlock {
transactions,
rewards,
block_time,
block_height,
} = confirmed_block;
Self {
@ -109,6 +111,7 @@ impl From<ConfirmedBlock> for StoredConfirmedBlock {
transactions: transactions.into_iter().map(|tx| tx.into()).collect(),
rewards: rewards.into_iter().map(|reward| reward.into()).collect(),
block_time,
block_height,
}
}
}
@ -122,6 +125,7 @@ impl From<StoredConfirmedBlock> for ConfirmedBlock {
transactions,
rewards,
block_time,
block_height,
} = confirmed_block;
Self {
@ -131,6 +135,7 @@ impl From<StoredConfirmedBlock> for ConfirmedBlock {
transactions: transactions.into_iter().map(|tx| tx.into()).collect(),
rewards: rewards.into_iter().map(|reward| reward.into()).collect(),
block_time,
block_height,
}
}
}

View File

@ -12,6 +12,8 @@ pub struct ConfirmedBlock {
pub rewards: ::prost::alloc::vec::Vec<Reward>,
#[prost(message, optional, tag = "6")]
pub block_time: ::core::option::Option<UnixTimestamp>,
#[prost(message, optional, tag = "7")]
pub block_height: ::core::option::Option<BlockHeight>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ConfirmedTransaction {
@ -130,6 +132,11 @@ pub struct UnixTimestamp {
#[prost(int64, tag = "1")]
pub timestamp: i64,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BlockHeight {
#[prost(uint64, tag = "1")]
pub block_height: u64,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum RewardType {

View File

@ -66,7 +66,7 @@ pub enum TransactionErrorType {
InvalidProgramForExecution = 13,
SanitizeFailure = 14,
ClusterMaintenance = 15,
AccountBorrowOutstanding = 16,
AccountBorrowOutstandingTx = 16,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]

View File

@ -9,6 +9,7 @@ message ConfirmedBlock {
repeated ConfirmedTransaction transactions = 4;
repeated Reward rewards = 5;
UnixTimestamp block_time = 6;
BlockHeight block_height = 7;
}
message ConfirmedTransaction {
@ -96,3 +97,7 @@ message Rewards {
message UnixTimestamp {
int64 timestamp = 1;
}
message BlockHeight {
uint64 block_height = 1;
}

View File

@ -118,6 +118,7 @@ impl From<ConfirmedBlock> for generated::ConfirmedBlock {
transactions,
rewards,
block_time,
block_height,
} = confirmed_block;
Self {
@ -127,6 +128,7 @@ impl From<ConfirmedBlock> for generated::ConfirmedBlock {
transactions: transactions.into_iter().map(|tx| tx.into()).collect(),
rewards: rewards.into_iter().map(|r| r.into()).collect(),
block_time: block_time.map(|timestamp| generated::UnixTimestamp { timestamp }),
block_height: block_height.map(|block_height| generated::BlockHeight { block_height }),
}
}
}
@ -143,6 +145,7 @@ impl TryFrom<generated::ConfirmedBlock> for ConfirmedBlock {
transactions,
rewards,
block_time,
block_height,
} = confirmed_block;
Ok(Self {
@ -155,6 +158,7 @@ impl TryFrom<generated::ConfirmedBlock> for ConfirmedBlock {
.collect::<std::result::Result<Vec<TransactionWithStatusMeta>, Self::Error>>()?,
rewards: rewards.into_iter().map(|r| r.into()).collect(),
block_time: block_time.map(|generated::UnixTimestamp { timestamp }| timestamp),
block_height: block_height.map(|generated::BlockHeight { block_height }| block_height),
})
}
}
@ -596,7 +600,7 @@ impl From<TransactionError> for tx_by_addr::TransactionError {
tx_by_addr::TransactionErrorType::InstructionError
}
TransactionError::AccountBorrowOutstanding => {
tx_by_addr::TransactionErrorType::AccountBorrowOutstanding
tx_by_addr::TransactionErrorType::AccountBorrowOutstandingTx
}
} as i32,
instruction_error: match transaction_error {

View File

@ -30,7 +30,7 @@ enum TransactionErrorType {
PROGRAM_ACCOUNT_NOT_FOUND = 3;
INSUFFICIENT_FUNDS_FOR_FEE = 4;
INVALID_ACCOUNT_FOR_FEE = 5;
DUPLICATE_SIGNATURE = 6;
ALREADY_PROCESSED = 6;
BLOCKHASH_NOT_FOUND = 7;
INSTRUCTION_ERROR = 8;
CALL_CHAIN_TOO_DEEP = 9;
@ -40,6 +40,7 @@ enum TransactionErrorType {
INVALID_PROGRAM_FOR_EXECUTION = 13;
SANITIZE_FAILURE = 14;
CLUSTER_MAINTENANCE = 15;
ACCOUNT_BORROW_OUTSTANDING_TX = 16;
}
message InstructionError {
@ -97,6 +98,7 @@ enum InstructionErrorType {
ACCOUNT_NOT_RENT_EXEMPT = 45;
INVALID_ACCOUNT_OWNER = 46;
ARITHMETIC_OVERFLOW = 47;
UNSUPPORTED_SYSVAR = 48;
}
message UnixTimestamp {

View File

@ -352,6 +352,7 @@ pub struct ConfirmedBlock {
pub transactions: Vec<TransactionWithStatusMeta>,
pub rewards: Rewards,
pub block_time: Option<UnixTimestamp>,
pub block_height: Option<u64>,
}
impl ConfirmedBlock {
@ -367,6 +368,7 @@ impl ConfirmedBlock {
.collect(),
rewards: self.rewards,
block_time: self.block_time,
block_height: self.block_height,
}
}
@ -409,6 +411,7 @@ impl ConfirmedBlock {
None
},
block_time: self.block_time,
block_height: self.block_height,
}
}
}
@ -422,6 +425,7 @@ pub struct EncodedConfirmedBlock {
pub transactions: Vec<EncodedTransactionWithStatusMeta>,
pub rewards: Rewards,
pub block_time: Option<UnixTimestamp>,
pub block_height: Option<u64>,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
@ -437,6 +441,7 @@ pub struct UiConfirmedBlock {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub rewards: Option<Rewards>,
pub block_time: Option<UnixTimestamp>,
pub block_height: Option<u64>,
}
impl From<EncodedConfirmedBlock> for UiConfirmedBlock {
@ -449,6 +454,7 @@ impl From<EncodedConfirmedBlock> for UiConfirmedBlock {
signatures: None,
rewards: Some(block.rewards),
block_time: block.block_time,
block_height: block.block_height,
}
}
}
@ -462,6 +468,7 @@ impl From<UiConfirmedBlock> for EncodedConfirmedBlock {
transactions: block.transactions.unwrap_or_default(),
rewards: block.rewards.unwrap_or_default(),
block_time: block.block_time,
block_height: block.block_height,
}
}
}