Add block height to ConfirmedBlock structs (#17523)

* Add BlockHeight CF to blockstore

* Rename CacheBlockTimeService to be more general

* Cache block-height using service

* Fixup previous proto mishandling

* Add block_height to block structs

* Add block-height to solana block

* Fallback to BankForks if block time or block height are not yet written to Blockstore

* Add docs

* Review comments
This commit is contained in:
Tyera Eulberg 2021-05-26 22:16:16 -06:00 committed by GitHub
parent 9541411c15
commit ab581dafc2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 184 additions and 80 deletions

View File

@ -2126,6 +2126,9 @@ impl fmt::Display for CliBlock {
if let Some(block_time) = self.encoded_confirmed_block.block_time { if let Some(block_time) = self.encoded_confirmed_block.block_time {
writeln!(f, "Block Time: {:?}", Local.timestamp(block_time, 0))?; writeln!(f, "Block Time: {:?}", Local.timestamp(block_time, 0))?;
} }
if let Some(block_height) = self.encoded_confirmed_block.block_height {
writeln!(f, "Block Height: {:?}", block_height)?;
}
if !self.encoded_confirmed_block.rewards.is_empty() { if !self.encoded_confirmed_block.rewards.is_empty() {
let mut rewards = self.encoded_confirmed_block.rewards.clone(); let mut rewards = self.encoded_confirmed_block.rewards.clone();
rewards.sort_by(|a, b| a.pubkey.cmp(&b.pubkey)); rewards.sort_by(|a, b| a.pubkey.cmp(&b.pubkey));

View File

@ -8,10 +8,10 @@ use std::{
use tokio::runtime::Runtime; use tokio::runtime::Runtime;
// Delay uploading the largest confirmed root for this many slots. This is done in an attempt to // Delay uploading the largest confirmed root for this many slots. This is done in an attempt to
// ensure that the `CacheBlockTimeService` has had enough time to add the block time for the root // ensure that the `CacheBlockMetaService` has had enough time to add the block time for the root
// before it's uploaded to BigTable. // before it's uploaded to BigTable.
// //
// A more direct connection between CacheBlockTimeService and BigTableUploadService would be // A more direct connection between CacheBlockMetaService and BigTableUploadService would be
// preferable... // preferable...
const LARGEST_CONFIRMED_ROOT_UPLOAD_DELAY: usize = 100; const LARGEST_CONFIRMED_ROOT_UPLOAD_DELAY: usize = 100;

View File

@ -1,4 +1,4 @@
pub use solana_ledger::blockstore_processor::CacheBlockTimeSender; pub use solana_ledger::blockstore_processor::CacheBlockMetaSender;
use { use {
crossbeam_channel::{Receiver, RecvTimeoutError}, crossbeam_channel::{Receiver, RecvTimeoutError},
solana_ledger::blockstore::Blockstore, solana_ledger::blockstore::Blockstore,
@ -14,18 +14,18 @@ use {
}, },
}; };
pub type CacheBlockTimeReceiver = Receiver<Arc<Bank>>; pub type CacheBlockMetaReceiver = Receiver<Arc<Bank>>;
pub struct CacheBlockTimeService { pub struct CacheBlockMetaService {
thread_hdl: JoinHandle<()>, thread_hdl: JoinHandle<()>,
} }
const CACHE_BLOCK_TIME_WARNING_MS: u64 = 150; const CACHE_BLOCK_TIME_WARNING_MS: u64 = 150;
impl CacheBlockTimeService { impl CacheBlockMetaService {
#[allow(clippy::new_ret_no_self)] #[allow(clippy::new_ret_no_self)]
pub fn new( pub fn new(
cache_block_time_receiver: CacheBlockTimeReceiver, cache_block_meta_receiver: CacheBlockMetaReceiver,
blockstore: Arc<Blockstore>, blockstore: Arc<Blockstore>,
exit: &Arc<AtomicBool>, exit: &Arc<AtomicBool>,
) -> Self { ) -> Self {
@ -36,19 +36,19 @@ impl CacheBlockTimeService {
if exit.load(Ordering::Relaxed) { if exit.load(Ordering::Relaxed) {
break; break;
} }
let recv_result = cache_block_time_receiver.recv_timeout(Duration::from_secs(1)); let recv_result = cache_block_meta_receiver.recv_timeout(Duration::from_secs(1));
match recv_result { match recv_result {
Err(RecvTimeoutError::Disconnected) => { Err(RecvTimeoutError::Disconnected) => {
break; break;
} }
Ok(bank) => { Ok(bank) => {
let mut cache_block_time_timer = Measure::start("cache_block_time_timer"); let mut cache_block_meta_timer = Measure::start("cache_block_meta_timer");
Self::cache_block_time(bank, &blockstore); Self::cache_block_meta(bank, &blockstore);
cache_block_time_timer.stop(); cache_block_meta_timer.stop();
if cache_block_time_timer.as_ms() > CACHE_BLOCK_TIME_WARNING_MS { if cache_block_meta_timer.as_ms() > CACHE_BLOCK_TIME_WARNING_MS {
warn!( warn!(
"cache_block_time operation took: {}ms", "cache_block_meta operation took: {}ms",
cache_block_time_timer.as_ms() cache_block_meta_timer.as_ms()
); );
} }
} }
@ -59,10 +59,13 @@ impl CacheBlockTimeService {
Self { thread_hdl } Self { thread_hdl }
} }
fn cache_block_time(bank: Arc<Bank>, blockstore: &Arc<Blockstore>) { fn cache_block_meta(bank: Arc<Bank>, blockstore: &Arc<Blockstore>) {
if let Err(e) = blockstore.cache_block_time(bank.slot(), bank.clock().unix_timestamp) { if let Err(e) = blockstore.cache_block_time(bank.slot(), bank.clock().unix_timestamp) {
error!("cache_block_time failed: slot {:?} {:?}", bank.slot(), e); error!("cache_block_time failed: slot {:?} {:?}", bank.slot(), e);
} }
if let Err(e) = blockstore.cache_block_height(bank.slot(), bank.block_height()) {
error!("cache_block_height failed: slot {:?} {:?}", bank.slot(), e);
}
} }
pub fn join(self) -> thread::Result<()> { pub fn join(self) -> thread::Result<()> {

View File

@ -11,7 +11,7 @@ pub mod accounts_hash_verifier;
pub mod banking_stage; pub mod banking_stage;
pub mod bigtable_upload_service; pub mod bigtable_upload_service;
pub mod broadcast_stage; pub mod broadcast_stage;
pub mod cache_block_time_service; pub mod cache_block_meta_service;
pub mod cluster_info_vote_listener; pub mod cluster_info_vote_listener;
pub mod cluster_slot_state_verifier; pub mod cluster_slot_state_verifier;
pub mod cluster_slots; pub mod cluster_slots;

View File

@ -2,7 +2,7 @@
use crate::{ use crate::{
broadcast_stage::RetransmitSlotsSender, broadcast_stage::RetransmitSlotsSender,
cache_block_time_service::CacheBlockTimeSender, cache_block_meta_service::CacheBlockMetaSender,
cluster_info_vote_listener::{ cluster_info_vote_listener::{
GossipDuplicateConfirmedSlotsReceiver, GossipVerifiedVoteHashReceiver, VoteTracker, GossipDuplicateConfirmedSlotsReceiver, GossipVerifiedVoteHashReceiver, VoteTracker,
}, },
@ -123,7 +123,7 @@ pub struct ReplayStageConfig {
pub block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>, pub block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
pub transaction_status_sender: Option<TransactionStatusSender>, pub transaction_status_sender: Option<TransactionStatusSender>,
pub rewards_recorder_sender: Option<RewardsRecorderSender>, pub rewards_recorder_sender: Option<RewardsRecorderSender>,
pub cache_block_time_sender: Option<CacheBlockTimeSender>, pub cache_block_meta_sender: Option<CacheBlockMetaSender>,
pub bank_notification_sender: Option<BankNotificationSender>, pub bank_notification_sender: Option<BankNotificationSender>,
pub wait_for_vote_to_start_leader: bool, pub wait_for_vote_to_start_leader: bool,
} }
@ -305,7 +305,7 @@ impl ReplayStage {
block_commitment_cache, block_commitment_cache,
transaction_status_sender, transaction_status_sender,
rewards_recorder_sender, rewards_recorder_sender,
cache_block_time_sender, cache_block_meta_sender,
bank_notification_sender, bank_notification_sender,
wait_for_vote_to_start_leader, wait_for_vote_to_start_leader,
} = config; } = config;
@ -375,7 +375,7 @@ impl ReplayStage {
&vote_account, &vote_account,
&mut progress, &mut progress,
transaction_status_sender.as_ref(), transaction_status_sender.as_ref(),
cache_block_time_sender.as_ref(), cache_block_meta_sender.as_ref(),
&verify_recyclers, &verify_recyclers,
&mut heaviest_subtree_fork_choice, &mut heaviest_subtree_fork_choice,
&replay_vote_sender, &replay_vote_sender,
@ -1626,7 +1626,7 @@ impl ReplayStage {
vote_account: &Pubkey, vote_account: &Pubkey,
progress: &mut ProgressMap, progress: &mut ProgressMap,
transaction_status_sender: Option<&TransactionStatusSender>, transaction_status_sender: Option<&TransactionStatusSender>,
cache_block_time_sender: Option<&CacheBlockTimeSender>, cache_block_meta_sender: Option<&CacheBlockMetaSender>,
verify_recyclers: &VerifyRecyclers, verify_recyclers: &VerifyRecyclers,
heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice,
replay_vote_sender: &ReplayVoteSender, replay_vote_sender: &ReplayVoteSender,
@ -1751,7 +1751,7 @@ impl ReplayStage {
.send(BankNotification::Frozen(bank.clone())) .send(BankNotification::Frozen(bank.clone()))
.unwrap_or_else(|err| warn!("bank_notification_sender failed: {:?}", err)); .unwrap_or_else(|err| warn!("bank_notification_sender failed: {:?}", err));
} }
blockstore_processor::cache_block_time(&bank, cache_block_time_sender); blockstore_processor::cache_block_meta(&bank, cache_block_meta_sender);
let bank_hash = bank.hash(); let bank_hash = bank.hash();
if let Some(new_frozen_voters) = if let Some(new_frozen_voters) =

View File

@ -953,7 +953,21 @@ impl JsonRpcRequestProcessor {
.load(Ordering::SeqCst) .load(Ordering::SeqCst)
{ {
let result = self.blockstore.get_complete_block(slot, true); let result = self.blockstore.get_complete_block(slot, true);
return Ok(result.ok().map(|confirmed_block| { return Ok(result.ok().map(|mut confirmed_block| {
if confirmed_block.block_time.is_none()
|| confirmed_block.block_height.is_none()
{
let r_bank_forks = self.bank_forks.read().unwrap();
let bank = r_bank_forks.get(slot).cloned();
if let Some(bank) = bank {
if confirmed_block.block_time.is_none() {
confirmed_block.block_time = Some(bank.clock().unix_timestamp);
}
if confirmed_block.block_height.is_none() {
confirmed_block.block_height = Some(bank.block_height());
}
}
}
confirmed_block.configure(encoding, transaction_details, show_rewards) confirmed_block.configure(encoding, transaction_details, show_rewards)
})); }));
} }

View File

@ -4,7 +4,7 @@
use crate::{ use crate::{
accounts_hash_verifier::AccountsHashVerifier, accounts_hash_verifier::AccountsHashVerifier,
broadcast_stage::RetransmitSlotsSender, broadcast_stage::RetransmitSlotsSender,
cache_block_time_service::CacheBlockTimeSender, cache_block_meta_service::CacheBlockMetaSender,
cluster_info_vote_listener::{ cluster_info_vote_listener::{
GossipDuplicateConfirmedSlotsReceiver, GossipVerifiedVoteHashReceiver, GossipDuplicateConfirmedSlotsReceiver, GossipVerifiedVoteHashReceiver,
VerifiedVoteReceiver, VoteTracker, VerifiedVoteReceiver, VoteTracker,
@ -116,7 +116,7 @@ impl Tvu {
cfg: Option<Arc<AtomicBool>>, cfg: Option<Arc<AtomicBool>>,
transaction_status_sender: Option<TransactionStatusSender>, transaction_status_sender: Option<TransactionStatusSender>,
rewards_recorder_sender: Option<RewardsRecorderSender>, rewards_recorder_sender: Option<RewardsRecorderSender>,
cache_block_time_sender: Option<CacheBlockTimeSender>, cache_block_meta_sender: Option<CacheBlockMetaSender>,
snapshot_config_and_pending_package: Option<(SnapshotConfig, PendingSnapshotPackage)>, snapshot_config_and_pending_package: Option<(SnapshotConfig, PendingSnapshotPackage)>,
vote_tracker: Arc<VoteTracker>, vote_tracker: Arc<VoteTracker>,
retransmit_slots_sender: RetransmitSlotsSender, retransmit_slots_sender: RetransmitSlotsSender,
@ -267,7 +267,7 @@ impl Tvu {
block_commitment_cache, block_commitment_cache,
transaction_status_sender, transaction_status_sender,
rewards_recorder_sender, rewards_recorder_sender,
cache_block_time_sender, cache_block_meta_sender,
bank_notification_sender, bank_notification_sender,
wait_for_vote_to_start_leader: tvu_config.wait_for_vote_to_start_leader, wait_for_vote_to_start_leader: tvu_config.wait_for_vote_to_start_leader,
}; };

View File

@ -2,7 +2,7 @@
use crate::{ use crate::{
broadcast_stage::BroadcastStageType, broadcast_stage::BroadcastStageType,
cache_block_time_service::{CacheBlockTimeSender, CacheBlockTimeService}, cache_block_meta_service::{CacheBlockMetaSender, CacheBlockMetaService},
cluster_info_vote_listener::VoteTracker, cluster_info_vote_listener::VoteTracker,
completed_data_sets_service::CompletedDataSetsService, completed_data_sets_service::CompletedDataSetsService,
consensus::{reconcile_blockstore_roots_with_tower, Tower}, consensus::{reconcile_blockstore_roots_with_tower, Tower},
@ -259,8 +259,8 @@ struct TransactionHistoryServices {
max_complete_transaction_status_slot: Arc<AtomicU64>, max_complete_transaction_status_slot: Arc<AtomicU64>,
rewards_recorder_sender: Option<RewardsRecorderSender>, rewards_recorder_sender: Option<RewardsRecorderSender>,
rewards_recorder_service: Option<RewardsRecorderService>, rewards_recorder_service: Option<RewardsRecorderService>,
cache_block_time_sender: Option<CacheBlockTimeSender>, cache_block_meta_sender: Option<CacheBlockMetaSender>,
cache_block_time_service: Option<CacheBlockTimeService>, cache_block_meta_service: Option<CacheBlockMetaService>,
} }
pub struct Validator { pub struct Validator {
@ -270,7 +270,7 @@ pub struct Validator {
optimistically_confirmed_bank_tracker: Option<OptimisticallyConfirmedBankTracker>, optimistically_confirmed_bank_tracker: Option<OptimisticallyConfirmedBankTracker>,
transaction_status_service: Option<TransactionStatusService>, transaction_status_service: Option<TransactionStatusService>,
rewards_recorder_service: Option<RewardsRecorderService>, rewards_recorder_service: Option<RewardsRecorderService>,
cache_block_time_service: Option<CacheBlockTimeService>, cache_block_meta_service: Option<CacheBlockMetaService>,
sample_performance_service: Option<SamplePerformanceService>, sample_performance_service: Option<SamplePerformanceService>,
gossip_service: GossipService, gossip_service: GossipService,
serve_repair_service: ServeRepairService, serve_repair_service: ServeRepairService,
@ -396,8 +396,8 @@ impl Validator {
max_complete_transaction_status_slot, max_complete_transaction_status_slot,
rewards_recorder_sender, rewards_recorder_sender,
rewards_recorder_service, rewards_recorder_service,
cache_block_time_sender, cache_block_meta_sender,
cache_block_time_service, cache_block_meta_service,
}, },
tower, tower,
) = new_banks_from_ledger( ) = new_banks_from_ledger(
@ -724,7 +724,7 @@ impl Validator {
config.enable_partition.clone(), config.enable_partition.clone(),
transaction_status_sender.clone(), transaction_status_sender.clone(),
rewards_recorder_sender, rewards_recorder_sender,
cache_block_time_sender, cache_block_meta_sender,
snapshot_config_and_pending_package, snapshot_config_and_pending_package,
vote_tracker.clone(), vote_tracker.clone(),
retransmit_slots_sender, retransmit_slots_sender,
@ -787,7 +787,7 @@ impl Validator {
optimistically_confirmed_bank_tracker, optimistically_confirmed_bank_tracker,
transaction_status_service, transaction_status_service,
rewards_recorder_service, rewards_recorder_service,
cache_block_time_service, cache_block_meta_service,
sample_performance_service, sample_performance_service,
snapshot_packager_service, snapshot_packager_service,
completed_data_sets_service, completed_data_sets_service,
@ -867,10 +867,10 @@ impl Validator {
.expect("rewards_recorder_service"); .expect("rewards_recorder_service");
} }
if let Some(cache_block_time_service) = self.cache_block_time_service { if let Some(cache_block_meta_service) = self.cache_block_meta_service {
cache_block_time_service cache_block_meta_service
.join() .join()
.expect("cache_block_time_service"); .expect("cache_block_meta_service");
} }
if let Some(sample_performance_service) = self.sample_performance_service { if let Some(sample_performance_service) = self.sample_performance_service {
@ -1144,7 +1144,7 @@ fn new_banks_from_ledger(
.transaction_status_sender .transaction_status_sender
.as_ref(), .as_ref(),
transaction_history_services transaction_history_services
.cache_block_time_sender .cache_block_meta_sender
.as_ref(), .as_ref(),
) )
.unwrap_or_else(|err| { .unwrap_or_else(|err| {
@ -1331,10 +1331,10 @@ fn initialize_rpc_transaction_history_services(
exit, exit,
)); ));
let (cache_block_time_sender, cache_block_time_receiver) = unbounded(); let (cache_block_meta_sender, cache_block_meta_receiver) = unbounded();
let cache_block_time_sender = Some(cache_block_time_sender); let cache_block_meta_sender = Some(cache_block_meta_sender);
let cache_block_time_service = Some(CacheBlockTimeService::new( let cache_block_meta_service = Some(CacheBlockMetaService::new(
cache_block_time_receiver, cache_block_meta_receiver,
blockstore, blockstore,
exit, exit,
)); ));
@ -1344,8 +1344,8 @@ fn initialize_rpc_transaction_history_services(
max_complete_transaction_status_slot, max_complete_transaction_status_slot,
rewards_recorder_sender, rewards_recorder_sender,
rewards_recorder_service, rewards_recorder_service,
cache_block_time_sender, cache_block_meta_sender,
cache_block_time_service, cache_block_meta_service,
} }
} }

View File

@ -394,6 +394,7 @@ The result field will be an object with the following fields:
- `postBalance: <u64>` - account balance in lamports after the reward was applied - `postBalance: <u64>` - account balance in lamports after the reward was applied
- `rewardType: <string|undefined>` - type of reward: "fee", "rent", "voting", "staking" - `rewardType: <string|undefined>` - type of reward: "fee", "rent", "voting", "staking"
- `blockTime: <i64 | null>` - estimated production time, as Unix timestamp (seconds since the Unix epoch). null if not available - `blockTime: <i64 | null>` - estimated production time, as Unix timestamp (seconds since the Unix epoch). null if not available
- `blockHeight: <u64 | null>` - the number of blocks beneath this block
#### Example: #### Example:
@ -409,6 +410,7 @@ Result:
{ {
"jsonrpc": "2.0", "jsonrpc": "2.0",
"result": { "result": {
"blockHeight": 428,
"blockTime": null, "blockTime": null,
"blockhash": "3Eq21vXNB5s86c62bVuUfTeaMif1N2kUqRPBmGRJhyTA", "blockhash": "3Eq21vXNB5s86c62bVuUfTeaMif1N2kUqRPBmGRJhyTA",
"parentSlot": 429, "parentSlot": 429,
@ -492,6 +494,7 @@ Result:
{ {
"jsonrpc": "2.0", "jsonrpc": "2.0",
"result": { "result": {
"blockHeight": 428,
"blockTime": null, "blockTime": null,
"blockhash": "3Eq21vXNB5s86c62bVuUfTeaMif1N2kUqRPBmGRJhyTA", "blockhash": "3Eq21vXNB5s86c62bVuUfTeaMif1N2kUqRPBmGRJhyTA",
"parentSlot": 429, "parentSlot": 429,

View File

@ -1,7 +1,7 @@
use crate::{ use crate::{
blockstore::Blockstore, blockstore::Blockstore,
blockstore_processor::{ blockstore_processor::{
self, BlockstoreProcessorError, BlockstoreProcessorResult, CacheBlockTimeSender, self, BlockstoreProcessorError, BlockstoreProcessorResult, CacheBlockMetaSender,
ProcessOptions, TransactionStatusSender, ProcessOptions, TransactionStatusSender,
}, },
entry::VerifyRecyclers, entry::VerifyRecyclers,
@ -37,7 +37,7 @@ pub fn load(
snapshot_config: Option<&SnapshotConfig>, snapshot_config: Option<&SnapshotConfig>,
process_options: ProcessOptions, process_options: ProcessOptions,
transaction_status_sender: Option<&TransactionStatusSender>, transaction_status_sender: Option<&TransactionStatusSender>,
cache_block_time_sender: Option<&CacheBlockTimeSender>, cache_block_meta_sender: Option<&CacheBlockMetaSender>,
) -> LoadResult { ) -> LoadResult {
if let Some(snapshot_config) = snapshot_config.as_ref() { if let Some(snapshot_config) = snapshot_config.as_ref() {
info!( info!(
@ -102,7 +102,7 @@ pub fn load(
&process_options, &process_options,
&VerifyRecyclers::default(), &VerifyRecyclers::default(),
transaction_status_sender, transaction_status_sender,
cache_block_time_sender, cache_block_meta_sender,
), ),
Some(deserialized_snapshot_hash), Some(deserialized_snapshot_hash),
); );
@ -120,7 +120,7 @@ pub fn load(
&blockstore, &blockstore,
account_paths, account_paths,
process_options, process_options,
cache_block_time_sender, cache_block_meta_sender,
), ),
None, None,
) )

View File

@ -139,6 +139,7 @@ pub struct Blockstore {
rewards_cf: LedgerColumn<cf::Rewards>, rewards_cf: LedgerColumn<cf::Rewards>,
blocktime_cf: LedgerColumn<cf::Blocktime>, blocktime_cf: LedgerColumn<cf::Blocktime>,
perf_samples_cf: LedgerColumn<cf::PerfSamples>, perf_samples_cf: LedgerColumn<cf::PerfSamples>,
block_height_cf: LedgerColumn<cf::BlockHeight>,
last_root: Arc<RwLock<Slot>>, last_root: Arc<RwLock<Slot>>,
insert_shreds_lock: Arc<Mutex<()>>, insert_shreds_lock: Arc<Mutex<()>>,
pub new_shreds_signals: Vec<SyncSender<bool>>, pub new_shreds_signals: Vec<SyncSender<bool>>,
@ -309,6 +310,7 @@ impl Blockstore {
let rewards_cf = db.column(); let rewards_cf = db.column();
let blocktime_cf = db.column(); let blocktime_cf = db.column();
let perf_samples_cf = db.column(); let perf_samples_cf = db.column();
let block_height_cf = db.column();
let db = Arc::new(db); let db = Arc::new(db);
@ -356,6 +358,7 @@ impl Blockstore {
rewards_cf, rewards_cf,
blocktime_cf, blocktime_cf,
perf_samples_cf, perf_samples_cf,
block_height_cf,
new_shreds_signals: vec![], new_shreds_signals: vec![],
completed_slots_senders: vec![], completed_slots_senders: vec![],
insert_shreds_lock: Arc::new(Mutex::new(())), insert_shreds_lock: Arc::new(Mutex::new(())),
@ -1773,11 +1776,25 @@ impl Blockstore {
} }
pub fn cache_block_time(&self, slot: Slot, timestamp: UnixTimestamp) -> Result<()> { pub fn cache_block_time(&self, slot: Slot, timestamp: UnixTimestamp) -> Result<()> {
if self.get_block_time(slot).unwrap_or_default().is_none() { self.blocktime_cf.put(slot, &timestamp)
self.blocktime_cf.put(slot, &timestamp) }
} else {
Ok(()) pub fn get_block_height(&self, slot: Slot) -> Result<Option<u64>> {
datapoint_info!(
"blockstore-rpc-api",
("method", "get_block_height".to_string(), String)
);
let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap();
// lowest_cleanup_slot is the last slot that was not cleaned up by
// LedgerCleanupService
if *lowest_cleanup_slot > 0 && *lowest_cleanup_slot >= slot {
return Err(BlockstoreError::SlotCleanedUp);
} }
self.block_height_cf.get(slot)
}
pub fn cache_block_height(&self, slot: Slot, block_height: u64) -> Result<()> {
self.block_height_cf.put(slot, &block_height)
} }
pub fn get_first_available_block(&self) -> Result<Slot> { pub fn get_first_available_block(&self) -> Result<Slot> {
@ -1857,7 +1874,12 @@ impl Blockstore {
.get_protobuf_or_bincode::<StoredExtendedRewards>(slot)? .get_protobuf_or_bincode::<StoredExtendedRewards>(slot)?
.unwrap_or_default() .unwrap_or_default()
.into(); .into();
// The Blocktime and BlockHeight column families are updated asynchronously; they
// may not be written by the time the complete slot entries are available. In this
// case, these fields will be `None`.
let block_time = self.blocktime_cf.get(slot)?; let block_time = self.blocktime_cf.get(slot)?;
let block_height = self.block_height_cf.get(slot)?;
let block = ConfirmedBlock { let block = ConfirmedBlock {
previous_blockhash: previous_blockhash.to_string(), previous_blockhash: previous_blockhash.to_string(),
@ -1867,6 +1889,7 @@ impl Blockstore {
.map_transactions_to_statuses(slot, slot_transaction_iterator), .map_transactions_to_statuses(slot, slot_transaction_iterator),
rewards, rewards,
block_time, block_time,
block_height,
}; };
return Ok(block); return Ok(block);
} }
@ -6063,6 +6086,7 @@ pub mod tests {
previous_blockhash: Hash::default().to_string(), previous_blockhash: Hash::default().to_string(),
rewards: vec![], rewards: vec![],
block_time: None, block_time: None,
block_height: None,
}; };
assert_eq!(confirmed_block, expected_block); assert_eq!(confirmed_block, expected_block);
@ -6076,6 +6100,7 @@ pub mod tests {
previous_blockhash: blockhash.to_string(), previous_blockhash: blockhash.to_string(),
rewards: vec![], rewards: vec![],
block_time: None, block_time: None,
block_height: None,
}; };
assert_eq!(confirmed_block, expected_block); assert_eq!(confirmed_block, expected_block);
@ -6092,13 +6117,17 @@ pub mod tests {
previous_blockhash: blockhash.to_string(), previous_blockhash: blockhash.to_string(),
rewards: vec![], rewards: vec![],
block_time: None, block_time: None,
block_height: None,
}; };
assert_eq!(complete_block, expected_complete_block); assert_eq!(complete_block, expected_complete_block);
// Test block_time returns, if available // Test block_time & block_height return, if available
let timestamp = 1_576_183_541; let timestamp = 1_576_183_541;
ledger.blocktime_cf.put(slot + 1, &timestamp).unwrap(); ledger.blocktime_cf.put(slot + 1, &timestamp).unwrap();
expected_block.block_time = Some(timestamp); expected_block.block_time = Some(timestamp);
let block_height = slot - 2;
ledger.block_height_cf.put(slot + 1, &block_height).unwrap();
expected_block.block_height = Some(block_height);
let confirmed_block = ledger.get_rooted_block(slot + 1, true).unwrap(); let confirmed_block = ledger.get_rooted_block(slot + 1, true).unwrap();
assert_eq!(confirmed_block, expected_block); assert_eq!(confirmed_block, expected_block);
@ -6106,6 +6135,9 @@ pub mod tests {
let timestamp = 1_576_183_542; let timestamp = 1_576_183_542;
ledger.blocktime_cf.put(slot + 2, &timestamp).unwrap(); ledger.blocktime_cf.put(slot + 2, &timestamp).unwrap();
expected_complete_block.block_time = Some(timestamp); expected_complete_block.block_time = Some(timestamp);
let block_height = slot - 1;
ledger.block_height_cf.put(slot + 2, &block_height).unwrap();
expected_complete_block.block_height = Some(block_height);
let complete_block = ledger.get_complete_block(slot + 2, true).unwrap(); let complete_block = ledger.get_complete_block(slot + 2, true).unwrap();
assert_eq!(complete_block, expected_complete_block); assert_eq!(complete_block, expected_complete_block);

View File

@ -55,6 +55,8 @@ const REWARDS_CF: &str = "rewards";
const BLOCKTIME_CF: &str = "blocktime"; const BLOCKTIME_CF: &str = "blocktime";
/// Column family for Performance Samples /// Column family for Performance Samples
const PERF_SAMPLES_CF: &str = "perf_samples"; const PERF_SAMPLES_CF: &str = "perf_samples";
/// Column family for BlockHeight
const BLOCK_HEIGHT_CF: &str = "block_height";
#[derive(Error, Debug)] #[derive(Error, Debug)]
pub enum BlockstoreError { pub enum BlockstoreError {
@ -151,6 +153,10 @@ pub mod columns {
#[derive(Debug)] #[derive(Debug)]
/// The performance samples column /// The performance samples column
pub struct PerfSamples; pub struct PerfSamples;
#[derive(Debug)]
/// The block height column
pub struct BlockHeight;
} }
pub enum AccessType { pub enum AccessType {
@ -212,9 +218,9 @@ impl Rocks {
recovery_mode: Option<BlockstoreRecoveryMode>, recovery_mode: Option<BlockstoreRecoveryMode>,
) -> Result<Rocks> { ) -> Result<Rocks> {
use columns::{ use columns::{
AddressSignatures, Blocktime, DeadSlots, DuplicateSlots, ErasureMeta, Index, Orphans, AddressSignatures, BlockHeight, Blocktime, DeadSlots, DuplicateSlots, ErasureMeta,
PerfSamples, Rewards, Root, ShredCode, ShredData, SlotMeta, TransactionStatus, Index, Orphans, PerfSamples, Rewards, Root, ShredCode, ShredData, SlotMeta,
TransactionStatusIndex, TransactionStatus, TransactionStatusIndex,
}; };
fs::create_dir_all(&path)?; fs::create_dir_all(&path)?;
@ -259,6 +265,8 @@ impl Rocks {
ColumnFamilyDescriptor::new(Blocktime::NAME, get_cf_options(&access_type)); ColumnFamilyDescriptor::new(Blocktime::NAME, get_cf_options(&access_type));
let perf_samples_cf_descriptor = let perf_samples_cf_descriptor =
ColumnFamilyDescriptor::new(PerfSamples::NAME, get_cf_options(&access_type)); ColumnFamilyDescriptor::new(PerfSamples::NAME, get_cf_options(&access_type));
let block_height_cf_descriptor =
ColumnFamilyDescriptor::new(BlockHeight::NAME, get_cf_options(&access_type));
let cfs = vec![ let cfs = vec![
(SlotMeta::NAME, meta_cf_descriptor), (SlotMeta::NAME, meta_cf_descriptor),
@ -279,6 +287,7 @@ impl Rocks {
(Rewards::NAME, rewards_cf_descriptor), (Rewards::NAME, rewards_cf_descriptor),
(Blocktime::NAME, blocktime_cf_descriptor), (Blocktime::NAME, blocktime_cf_descriptor),
(PerfSamples::NAME, perf_samples_cf_descriptor), (PerfSamples::NAME, perf_samples_cf_descriptor),
(BlockHeight::NAME, block_height_cf_descriptor),
]; ];
// Open the database // Open the database
@ -316,9 +325,9 @@ impl Rocks {
fn columns(&self) -> Vec<&'static str> { fn columns(&self) -> Vec<&'static str> {
use columns::{ use columns::{
AddressSignatures, Blocktime, DeadSlots, DuplicateSlots, ErasureMeta, Index, Orphans, AddressSignatures, BlockHeight, Blocktime, DeadSlots, DuplicateSlots, ErasureMeta,
PerfSamples, Rewards, Root, ShredCode, ShredData, SlotMeta, TransactionStatus, Index, Orphans, PerfSamples, Rewards, Root, ShredCode, ShredData, SlotMeta,
TransactionStatusIndex, TransactionStatus, TransactionStatusIndex,
}; };
vec![ vec![
@ -337,6 +346,7 @@ impl Rocks {
Rewards::NAME, Rewards::NAME,
Blocktime::NAME, Blocktime::NAME,
PerfSamples::NAME, PerfSamples::NAME,
BlockHeight::NAME,
] ]
} }
@ -579,6 +589,14 @@ impl TypedColumn for columns::PerfSamples {
type Type = blockstore_meta::PerfSample; type Type = blockstore_meta::PerfSample;
} }
impl SlotColumn for columns::BlockHeight {}
impl ColumnName for columns::BlockHeight {
const NAME: &'static str = BLOCK_HEIGHT_CF;
}
impl TypedColumn for columns::BlockHeight {
type Type = u64;
}
impl Column for columns::ShredCode { impl Column for columns::ShredCode {
type Index = (u64, u64); type Index = (u64, u64);

View File

@ -380,7 +380,7 @@ pub fn process_blockstore(
blockstore: &Blockstore, blockstore: &Blockstore,
account_paths: Vec<PathBuf>, account_paths: Vec<PathBuf>,
opts: ProcessOptions, opts: ProcessOptions,
cache_block_time_sender: Option<&CacheBlockTimeSender>, cache_block_meta_sender: Option<&CacheBlockMetaSender>,
) -> BlockstoreProcessorResult { ) -> BlockstoreProcessorResult {
if let Some(num_threads) = opts.override_num_threads { if let Some(num_threads) = opts.override_num_threads {
PAR_THREAD_POOL.with(|pool| { PAR_THREAD_POOL.with(|pool| {
@ -409,7 +409,7 @@ pub fn process_blockstore(
blockstore, blockstore,
&opts, &opts,
&recyclers, &recyclers,
cache_block_time_sender, cache_block_meta_sender,
); );
do_process_blockstore_from_root( do_process_blockstore_from_root(
blockstore, blockstore,
@ -417,7 +417,7 @@ pub fn process_blockstore(
&opts, &opts,
&recyclers, &recyclers,
None, None,
cache_block_time_sender, cache_block_meta_sender,
) )
} }
@ -428,7 +428,7 @@ pub(crate) fn process_blockstore_from_root(
opts: &ProcessOptions, opts: &ProcessOptions,
recyclers: &VerifyRecyclers, recyclers: &VerifyRecyclers,
transaction_status_sender: Option<&TransactionStatusSender>, transaction_status_sender: Option<&TransactionStatusSender>,
cache_block_time_sender: Option<&CacheBlockTimeSender>, cache_block_meta_sender: Option<&CacheBlockMetaSender>,
) -> BlockstoreProcessorResult { ) -> BlockstoreProcessorResult {
do_process_blockstore_from_root( do_process_blockstore_from_root(
blockstore, blockstore,
@ -436,7 +436,7 @@ pub(crate) fn process_blockstore_from_root(
opts, opts,
recyclers, recyclers,
transaction_status_sender, transaction_status_sender,
cache_block_time_sender, cache_block_meta_sender,
) )
} }
@ -446,7 +446,7 @@ fn do_process_blockstore_from_root(
opts: &ProcessOptions, opts: &ProcessOptions,
recyclers: &VerifyRecyclers, recyclers: &VerifyRecyclers,
transaction_status_sender: Option<&TransactionStatusSender>, transaction_status_sender: Option<&TransactionStatusSender>,
cache_block_time_sender: Option<&CacheBlockTimeSender>, cache_block_meta_sender: Option<&CacheBlockMetaSender>,
) -> BlockstoreProcessorResult { ) -> BlockstoreProcessorResult {
info!("processing ledger from slot {}...", bank.slot()); info!("processing ledger from slot {}...", bank.slot());
@ -507,7 +507,7 @@ fn do_process_blockstore_from_root(
opts, opts,
recyclers, recyclers,
transaction_status_sender, transaction_status_sender,
cache_block_time_sender, cache_block_meta_sender,
&mut timing, &mut timing,
)?; )?;
initial_forks.sort_by_key(|bank| bank.slot()); initial_forks.sort_by_key(|bank| bank.slot());
@ -815,7 +815,7 @@ fn process_bank_0(
blockstore: &Blockstore, blockstore: &Blockstore,
opts: &ProcessOptions, opts: &ProcessOptions,
recyclers: &VerifyRecyclers, recyclers: &VerifyRecyclers,
cache_block_time_sender: Option<&CacheBlockTimeSender>, cache_block_meta_sender: Option<&CacheBlockMetaSender>,
) { ) {
assert_eq!(bank0.slot(), 0); assert_eq!(bank0.slot(), 0);
let mut progress = ConfirmationProgress::new(bank0.last_blockhash()); let mut progress = ConfirmationProgress::new(bank0.last_blockhash());
@ -831,7 +831,7 @@ fn process_bank_0(
) )
.expect("processing for bank 0 must succeed"); .expect("processing for bank 0 must succeed");
bank0.freeze(); bank0.freeze();
cache_block_time(bank0, cache_block_time_sender); cache_block_meta(bank0, cache_block_meta_sender);
} }
// Given a bank, add its children to the pending slots queue if those children slots are // Given a bank, add its children to the pending slots queue if those children slots are
@ -899,7 +899,7 @@ fn load_frozen_forks(
opts: &ProcessOptions, opts: &ProcessOptions,
recyclers: &VerifyRecyclers, recyclers: &VerifyRecyclers,
transaction_status_sender: Option<&TransactionStatusSender>, transaction_status_sender: Option<&TransactionStatusSender>,
cache_block_time_sender: Option<&CacheBlockTimeSender>, cache_block_meta_sender: Option<&CacheBlockMetaSender>,
timing: &mut ExecuteTimings, timing: &mut ExecuteTimings,
) -> result::Result<Vec<Arc<Bank>>, BlockstoreProcessorError> { ) -> result::Result<Vec<Arc<Bank>>, BlockstoreProcessorError> {
let mut initial_forks = HashMap::new(); let mut initial_forks = HashMap::new();
@ -954,7 +954,7 @@ fn load_frozen_forks(
recyclers, recyclers,
&mut progress, &mut progress,
transaction_status_sender, transaction_status_sender,
cache_block_time_sender, cache_block_meta_sender,
None, None,
timing, timing,
) )
@ -1128,7 +1128,7 @@ fn process_single_slot(
recyclers: &VerifyRecyclers, recyclers: &VerifyRecyclers,
progress: &mut ConfirmationProgress, progress: &mut ConfirmationProgress,
transaction_status_sender: Option<&TransactionStatusSender>, transaction_status_sender: Option<&TransactionStatusSender>,
cache_block_time_sender: Option<&CacheBlockTimeSender>, cache_block_meta_sender: Option<&CacheBlockMetaSender>,
replay_vote_sender: Option<&ReplayVoteSender>, replay_vote_sender: Option<&ReplayVoteSender>,
timing: &mut ExecuteTimings, timing: &mut ExecuteTimings,
) -> result::Result<(), BlockstoreProcessorError> { ) -> result::Result<(), BlockstoreProcessorError> {
@ -1148,7 +1148,7 @@ fn process_single_slot(
})?; })?;
bank.freeze(); // all banks handled by this routine are created from complete slots bank.freeze(); // all banks handled by this routine are created from complete slots
cache_block_time(bank, cache_block_time_sender); cache_block_meta(bank, cache_block_meta_sender);
Ok(()) Ok(())
} }
@ -1226,13 +1226,13 @@ impl TransactionStatusSender {
} }
} }
pub type CacheBlockTimeSender = Sender<Arc<Bank>>; pub type CacheBlockMetaSender = Sender<Arc<Bank>>;
pub fn cache_block_time(bank: &Arc<Bank>, cache_block_time_sender: Option<&CacheBlockTimeSender>) { pub fn cache_block_meta(bank: &Arc<Bank>, cache_block_meta_sender: Option<&CacheBlockMetaSender>) {
if let Some(cache_block_time_sender) = cache_block_time_sender { if let Some(cache_block_meta_sender) = cache_block_meta_sender {
cache_block_time_sender cache_block_meta_sender
.send(bank.clone()) .send(bank.clone())
.unwrap_or_else(|err| warn!("cache_block_time_sender failed: {:?}", err)); .unwrap_or_else(|err| warn!("cache_block_meta_sender failed: {:?}", err));
} }
} }

View File

@ -694,6 +694,7 @@ mod tests {
previous_blockhash: Hash::default().to_string(), previous_blockhash: Hash::default().to_string(),
rewards: vec![], rewards: vec![],
block_time: Some(1_234_567_890), block_time: Some(1_234_567_890),
block_height: Some(1),
}; };
let bincode_block = compress_best( let bincode_block = compress_best(
&bincode::serialize::<StoredConfirmedBlock>(&block.clone().into()).unwrap(), &bincode::serialize::<StoredConfirmedBlock>(&block.clone().into()).unwrap(),

View File

@ -89,6 +89,7 @@ struct StoredConfirmedBlock {
transactions: Vec<StoredConfirmedBlockTransaction>, transactions: Vec<StoredConfirmedBlockTransaction>,
rewards: StoredConfirmedBlockRewards, rewards: StoredConfirmedBlockRewards,
block_time: Option<UnixTimestamp>, block_time: Option<UnixTimestamp>,
block_height: Option<u64>,
} }
impl From<ConfirmedBlock> for StoredConfirmedBlock { impl From<ConfirmedBlock> for StoredConfirmedBlock {
@ -100,6 +101,7 @@ impl From<ConfirmedBlock> for StoredConfirmedBlock {
transactions, transactions,
rewards, rewards,
block_time, block_time,
block_height,
} = confirmed_block; } = confirmed_block;
Self { Self {
@ -109,6 +111,7 @@ impl From<ConfirmedBlock> for StoredConfirmedBlock {
transactions: transactions.into_iter().map(|tx| tx.into()).collect(), transactions: transactions.into_iter().map(|tx| tx.into()).collect(),
rewards: rewards.into_iter().map(|reward| reward.into()).collect(), rewards: rewards.into_iter().map(|reward| reward.into()).collect(),
block_time, block_time,
block_height,
} }
} }
} }
@ -122,6 +125,7 @@ impl From<StoredConfirmedBlock> for ConfirmedBlock {
transactions, transactions,
rewards, rewards,
block_time, block_time,
block_height,
} = confirmed_block; } = confirmed_block;
Self { Self {
@ -131,6 +135,7 @@ impl From<StoredConfirmedBlock> for ConfirmedBlock {
transactions: transactions.into_iter().map(|tx| tx.into()).collect(), transactions: transactions.into_iter().map(|tx| tx.into()).collect(),
rewards: rewards.into_iter().map(|reward| reward.into()).collect(), rewards: rewards.into_iter().map(|reward| reward.into()).collect(),
block_time, block_time,
block_height,
} }
} }
} }

View File

@ -12,6 +12,8 @@ pub struct ConfirmedBlock {
pub rewards: ::prost::alloc::vec::Vec<Reward>, pub rewards: ::prost::alloc::vec::Vec<Reward>,
#[prost(message, optional, tag = "6")] #[prost(message, optional, tag = "6")]
pub block_time: ::core::option::Option<UnixTimestamp>, pub block_time: ::core::option::Option<UnixTimestamp>,
#[prost(message, optional, tag = "7")]
pub block_height: ::core::option::Option<BlockHeight>,
} }
#[derive(Clone, PartialEq, ::prost::Message)] #[derive(Clone, PartialEq, ::prost::Message)]
pub struct ConfirmedTransaction { pub struct ConfirmedTransaction {
@ -130,6 +132,11 @@ pub struct UnixTimestamp {
#[prost(int64, tag = "1")] #[prost(int64, tag = "1")]
pub timestamp: i64, pub timestamp: i64,
} }
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BlockHeight {
#[prost(uint64, tag = "1")]
pub block_height: u64,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)] #[repr(i32)]
pub enum RewardType { pub enum RewardType {

View File

@ -66,7 +66,7 @@ pub enum TransactionErrorType {
InvalidProgramForExecution = 13, InvalidProgramForExecution = 13,
SanitizeFailure = 14, SanitizeFailure = 14,
ClusterMaintenance = 15, ClusterMaintenance = 15,
AccountBorrowOutstanding = 16, AccountBorrowOutstandingTx = 16,
} }
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)] #[repr(i32)]

View File

@ -9,6 +9,7 @@ message ConfirmedBlock {
repeated ConfirmedTransaction transactions = 4; repeated ConfirmedTransaction transactions = 4;
repeated Reward rewards = 5; repeated Reward rewards = 5;
UnixTimestamp block_time = 6; UnixTimestamp block_time = 6;
BlockHeight block_height = 7;
} }
message ConfirmedTransaction { message ConfirmedTransaction {
@ -96,3 +97,7 @@ message Rewards {
message UnixTimestamp { message UnixTimestamp {
int64 timestamp = 1; int64 timestamp = 1;
} }
message BlockHeight {
uint64 block_height = 1;
}

View File

@ -118,6 +118,7 @@ impl From<ConfirmedBlock> for generated::ConfirmedBlock {
transactions, transactions,
rewards, rewards,
block_time, block_time,
block_height,
} = confirmed_block; } = confirmed_block;
Self { Self {
@ -127,6 +128,7 @@ impl From<ConfirmedBlock> for generated::ConfirmedBlock {
transactions: transactions.into_iter().map(|tx| tx.into()).collect(), transactions: transactions.into_iter().map(|tx| tx.into()).collect(),
rewards: rewards.into_iter().map(|r| r.into()).collect(), rewards: rewards.into_iter().map(|r| r.into()).collect(),
block_time: block_time.map(|timestamp| generated::UnixTimestamp { timestamp }), block_time: block_time.map(|timestamp| generated::UnixTimestamp { timestamp }),
block_height: block_height.map(|block_height| generated::BlockHeight { block_height }),
} }
} }
} }
@ -143,6 +145,7 @@ impl TryFrom<generated::ConfirmedBlock> for ConfirmedBlock {
transactions, transactions,
rewards, rewards,
block_time, block_time,
block_height,
} = confirmed_block; } = confirmed_block;
Ok(Self { Ok(Self {
@ -155,6 +158,7 @@ impl TryFrom<generated::ConfirmedBlock> for ConfirmedBlock {
.collect::<std::result::Result<Vec<TransactionWithStatusMeta>, Self::Error>>()?, .collect::<std::result::Result<Vec<TransactionWithStatusMeta>, Self::Error>>()?,
rewards: rewards.into_iter().map(|r| r.into()).collect(), rewards: rewards.into_iter().map(|r| r.into()).collect(),
block_time: block_time.map(|generated::UnixTimestamp { timestamp }| timestamp), block_time: block_time.map(|generated::UnixTimestamp { timestamp }| timestamp),
block_height: block_height.map(|generated::BlockHeight { block_height }| block_height),
}) })
} }
} }
@ -596,7 +600,7 @@ impl From<TransactionError> for tx_by_addr::TransactionError {
tx_by_addr::TransactionErrorType::InstructionError tx_by_addr::TransactionErrorType::InstructionError
} }
TransactionError::AccountBorrowOutstanding => { TransactionError::AccountBorrowOutstanding => {
tx_by_addr::TransactionErrorType::AccountBorrowOutstanding tx_by_addr::TransactionErrorType::AccountBorrowOutstandingTx
} }
} as i32, } as i32,
instruction_error: match transaction_error { instruction_error: match transaction_error {

View File

@ -30,7 +30,7 @@ enum TransactionErrorType {
PROGRAM_ACCOUNT_NOT_FOUND = 3; PROGRAM_ACCOUNT_NOT_FOUND = 3;
INSUFFICIENT_FUNDS_FOR_FEE = 4; INSUFFICIENT_FUNDS_FOR_FEE = 4;
INVALID_ACCOUNT_FOR_FEE = 5; INVALID_ACCOUNT_FOR_FEE = 5;
DUPLICATE_SIGNATURE = 6; ALREADY_PROCESSED = 6;
BLOCKHASH_NOT_FOUND = 7; BLOCKHASH_NOT_FOUND = 7;
INSTRUCTION_ERROR = 8; INSTRUCTION_ERROR = 8;
CALL_CHAIN_TOO_DEEP = 9; CALL_CHAIN_TOO_DEEP = 9;
@ -40,6 +40,7 @@ enum TransactionErrorType {
INVALID_PROGRAM_FOR_EXECUTION = 13; INVALID_PROGRAM_FOR_EXECUTION = 13;
SANITIZE_FAILURE = 14; SANITIZE_FAILURE = 14;
CLUSTER_MAINTENANCE = 15; CLUSTER_MAINTENANCE = 15;
ACCOUNT_BORROW_OUTSTANDING_TX = 16;
} }
message InstructionError { message InstructionError {
@ -97,6 +98,7 @@ enum InstructionErrorType {
ACCOUNT_NOT_RENT_EXEMPT = 45; ACCOUNT_NOT_RENT_EXEMPT = 45;
INVALID_ACCOUNT_OWNER = 46; INVALID_ACCOUNT_OWNER = 46;
ARITHMETIC_OVERFLOW = 47; ARITHMETIC_OVERFLOW = 47;
UNSUPPORTED_SYSVAR = 48;
} }
message UnixTimestamp { message UnixTimestamp {

View File

@ -352,6 +352,7 @@ pub struct ConfirmedBlock {
pub transactions: Vec<TransactionWithStatusMeta>, pub transactions: Vec<TransactionWithStatusMeta>,
pub rewards: Rewards, pub rewards: Rewards,
pub block_time: Option<UnixTimestamp>, pub block_time: Option<UnixTimestamp>,
pub block_height: Option<u64>,
} }
impl ConfirmedBlock { impl ConfirmedBlock {
@ -367,6 +368,7 @@ impl ConfirmedBlock {
.collect(), .collect(),
rewards: self.rewards, rewards: self.rewards,
block_time: self.block_time, block_time: self.block_time,
block_height: self.block_height,
} }
} }
@ -409,6 +411,7 @@ impl ConfirmedBlock {
None None
}, },
block_time: self.block_time, block_time: self.block_time,
block_height: self.block_height,
} }
} }
} }
@ -422,6 +425,7 @@ pub struct EncodedConfirmedBlock {
pub transactions: Vec<EncodedTransactionWithStatusMeta>, pub transactions: Vec<EncodedTransactionWithStatusMeta>,
pub rewards: Rewards, pub rewards: Rewards,
pub block_time: Option<UnixTimestamp>, pub block_time: Option<UnixTimestamp>,
pub block_height: Option<u64>,
} }
#[derive(Debug, PartialEq, Serialize, Deserialize)] #[derive(Debug, PartialEq, Serialize, Deserialize)]
@ -437,6 +441,7 @@ pub struct UiConfirmedBlock {
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub rewards: Option<Rewards>, pub rewards: Option<Rewards>,
pub block_time: Option<UnixTimestamp>, pub block_time: Option<UnixTimestamp>,
pub block_height: Option<u64>,
} }
impl From<EncodedConfirmedBlock> for UiConfirmedBlock { impl From<EncodedConfirmedBlock> for UiConfirmedBlock {
@ -449,6 +454,7 @@ impl From<EncodedConfirmedBlock> for UiConfirmedBlock {
signatures: None, signatures: None,
rewards: Some(block.rewards), rewards: Some(block.rewards),
block_time: block.block_time, block_time: block.block_time,
block_height: block.block_height,
} }
} }
} }
@ -462,6 +468,7 @@ impl From<UiConfirmedBlock> for EncodedConfirmedBlock {
transactions: block.transactions.unwrap_or_default(), transactions: block.transactions.unwrap_or_default(),
rewards: block.rewards.unwrap_or_default(), rewards: block.rewards.unwrap_or_default(),
block_time: block.block_time, block_time: block.block_time,
block_height: block.block_height,
} }
} }
} }