2021-02-16 13:48:20 -08:00
|
|
|
#![allow(clippy::integer_arithmetic)]
|
2021-07-09 13:06:06 -07:00
|
|
|
use {
|
|
|
|
log::*,
|
|
|
|
serde::{Deserialize, Serialize},
|
2021-11-22 15:22:46 -08:00
|
|
|
solana_metrics::inc_new_counter_debug,
|
2021-07-09 13:06:06 -07:00
|
|
|
solana_sdk::{
|
|
|
|
clock::{Slot, UnixTimestamp},
|
|
|
|
deserialize_utils::default_on_eof,
|
2022-01-13 23:24:41 -08:00
|
|
|
message::v0::LoadedAddresses,
|
2021-07-09 13:06:06 -07:00
|
|
|
pubkey::Pubkey,
|
|
|
|
signature::Signature,
|
|
|
|
sysvar::is_sysvar_id,
|
2022-01-13 23:24:41 -08:00
|
|
|
transaction::{TransactionError, VersionedTransaction},
|
2021-07-09 13:06:06 -07:00
|
|
|
},
|
|
|
|
solana_storage_proto::convert::{generated, tx_by_addr},
|
|
|
|
solana_transaction_status::{
|
2022-02-09 21:28:18 -08:00
|
|
|
extract_and_fmt_memos, ConfirmedBlock, ConfirmedTransactionStatusWithSignature,
|
|
|
|
ConfirmedTransactionWithStatusMeta, Reward, TransactionByAddrInfo,
|
|
|
|
TransactionConfirmationStatus, TransactionStatus, TransactionStatusMeta,
|
|
|
|
TransactionWithStatusMeta, VersionedConfirmedBlock, VersionedTransactionWithStatusMeta,
|
2021-07-09 13:06:06 -07:00
|
|
|
},
|
2021-09-16 16:37:45 -07:00
|
|
|
std::{
|
|
|
|
collections::{HashMap, HashSet},
|
|
|
|
convert::TryInto,
|
|
|
|
},
|
2021-07-09 13:06:06 -07:00
|
|
|
thiserror::Error,
|
2020-07-20 11:56:44 -07:00
|
|
|
};
|
|
|
|
|
2021-11-22 15:22:46 -08:00
|
|
|
#[macro_use]
|
|
|
|
extern crate solana_metrics;
|
|
|
|
|
2020-07-20 11:56:44 -07:00
|
|
|
#[macro_use]
|
|
|
|
extern crate serde_derive;
|
|
|
|
|
2020-07-17 13:31:10 -07:00
|
|
|
mod access_token;
|
2020-07-20 11:56:44 -07:00
|
|
|
mod bigtable;
|
2020-07-20 12:15:53 -07:00
|
|
|
mod compression;
|
2020-07-17 13:36:11 -07:00
|
|
|
mod root_ca_certificate;
|
2020-07-20 12:15:53 -07:00
|
|
|
|
2020-07-20 11:56:44 -07:00
|
|
|
#[derive(Debug, Error)]
|
|
|
|
pub enum Error {
|
|
|
|
#[error("BigTable: {0}")]
|
|
|
|
BigTableError(bigtable::Error),
|
|
|
|
|
|
|
|
#[error("I/O Error: {0}")]
|
|
|
|
IoError(std::io::Error),
|
|
|
|
|
|
|
|
#[error("Transaction encoded is not supported")]
|
|
|
|
UnsupportedTransactionEncoding,
|
|
|
|
|
|
|
|
#[error("Block not found: {0}")]
|
|
|
|
BlockNotFound(Slot),
|
|
|
|
|
|
|
|
#[error("Signature not found")]
|
|
|
|
SignatureNotFound,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl std::convert::From<bigtable::Error> for Error {
|
|
|
|
fn from(err: bigtable::Error) -> Self {
|
|
|
|
Self::BigTableError(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl std::convert::From<std::io::Error> for Error {
|
|
|
|
fn from(err: std::io::Error) -> Self {
|
|
|
|
Self::IoError(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub type Result<T> = std::result::Result<T, Error>;
|
|
|
|
|
|
|
|
// Convert a slot to its bucket representation whereby lower slots are always lexically ordered
|
|
|
|
// before higher slots
|
|
|
|
fn slot_to_key(slot: Slot) -> String {
|
|
|
|
format!("{:016x}", slot)
|
|
|
|
}
|
|
|
|
|
2021-09-21 08:46:46 -07:00
|
|
|
fn slot_to_blocks_key(slot: Slot) -> String {
|
|
|
|
slot_to_key(slot)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn slot_to_tx_by_addr_key(slot: Slot) -> String {
|
|
|
|
slot_to_key(!slot)
|
|
|
|
}
|
|
|
|
|
2020-07-20 11:56:44 -07:00
|
|
|
// Reverse of `slot_to_key`
|
|
|
|
fn key_to_slot(key: &str) -> Option<Slot> {
|
|
|
|
match Slot::from_str_radix(key, 16) {
|
|
|
|
Ok(slot) => Some(slot),
|
|
|
|
Err(err) => {
|
|
|
|
// bucket data is probably corrupt
|
|
|
|
warn!("Failed to parse object key as a slot: {}: {}", key, err);
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// A serialized `StoredConfirmedBlock` is stored in the `block` table
|
|
|
|
//
|
|
|
|
// StoredConfirmedBlock holds the same contents as ConfirmedBlock, but is slightly compressed and avoids
|
|
|
|
// some serde JSON directives that cause issues with bincode
|
|
|
|
//
|
2021-06-15 16:34:07 -07:00
|
|
|
// Note: in order to continue to support old bincode-serialized bigtable entries, if new fields are
|
|
|
|
// added to ConfirmedBlock, they must either be excluded or set to `default_on_eof` here
|
|
|
|
//
|
2020-07-20 11:56:44 -07:00
|
|
|
#[derive(Serialize, Deserialize)]
|
|
|
|
struct StoredConfirmedBlock {
|
|
|
|
previous_blockhash: String,
|
|
|
|
blockhash: String,
|
|
|
|
parent_slot: Slot,
|
|
|
|
transactions: Vec<StoredConfirmedBlockTransaction>,
|
2020-09-30 16:57:06 -07:00
|
|
|
rewards: StoredConfirmedBlockRewards,
|
2020-07-20 11:56:44 -07:00
|
|
|
block_time: Option<UnixTimestamp>,
|
2021-06-15 16:34:07 -07:00
|
|
|
#[serde(deserialize_with = "default_on_eof")]
|
2021-05-26 21:16:16 -07:00
|
|
|
block_height: Option<u64>,
|
2020-07-20 11:56:44 -07:00
|
|
|
}
|
|
|
|
|
2022-02-09 21:28:18 -08:00
|
|
|
#[cfg(test)]
|
2020-09-23 22:10:29 -07:00
|
|
|
impl From<ConfirmedBlock> for StoredConfirmedBlock {
|
|
|
|
fn from(confirmed_block: ConfirmedBlock) -> Self {
|
|
|
|
let ConfirmedBlock {
|
2020-07-20 11:56:44 -07:00
|
|
|
previous_blockhash,
|
|
|
|
blockhash,
|
|
|
|
parent_slot,
|
|
|
|
transactions,
|
|
|
|
rewards,
|
|
|
|
block_time,
|
2021-05-26 21:16:16 -07:00
|
|
|
block_height,
|
2020-09-23 22:10:29 -07:00
|
|
|
} = confirmed_block;
|
2020-07-20 11:56:44 -07:00
|
|
|
|
2020-09-23 22:10:29 -07:00
|
|
|
Self {
|
2020-07-20 11:56:44 -07:00
|
|
|
previous_blockhash,
|
|
|
|
blockhash,
|
|
|
|
parent_slot,
|
2020-09-23 22:10:29 -07:00
|
|
|
transactions: transactions.into_iter().map(|tx| tx.into()).collect(),
|
2020-09-30 16:57:06 -07:00
|
|
|
rewards: rewards.into_iter().map(|reward| reward.into()).collect(),
|
2020-07-20 11:56:44 -07:00
|
|
|
block_time,
|
2021-05-26 21:16:16 -07:00
|
|
|
block_height,
|
2020-07-20 11:56:44 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-09 21:28:18 -08:00
|
|
|
impl From<StoredConfirmedBlock> for ConfirmedBlock {
|
2020-09-23 22:10:29 -07:00
|
|
|
fn from(confirmed_block: StoredConfirmedBlock) -> Self {
|
|
|
|
let StoredConfirmedBlock {
|
2020-07-20 11:56:44 -07:00
|
|
|
previous_blockhash,
|
|
|
|
blockhash,
|
|
|
|
parent_slot,
|
|
|
|
transactions,
|
|
|
|
rewards,
|
|
|
|
block_time,
|
2021-05-26 21:16:16 -07:00
|
|
|
block_height,
|
2020-07-20 11:56:44 -07:00
|
|
|
} = confirmed_block;
|
|
|
|
|
2020-09-23 22:10:29 -07:00
|
|
|
Self {
|
2020-07-20 11:56:44 -07:00
|
|
|
previous_blockhash,
|
|
|
|
blockhash,
|
|
|
|
parent_slot,
|
2020-09-23 22:10:29 -07:00
|
|
|
transactions: transactions.into_iter().map(|tx| tx.into()).collect(),
|
2020-09-30 16:57:06 -07:00
|
|
|
rewards: rewards.into_iter().map(|reward| reward.into()).collect(),
|
2020-07-20 11:56:44 -07:00
|
|
|
block_time,
|
2021-05-26 21:16:16 -07:00
|
|
|
block_height,
|
2020-09-23 22:10:29 -07:00
|
|
|
}
|
2020-07-20 11:56:44 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Serialize, Deserialize)]
|
|
|
|
struct StoredConfirmedBlockTransaction {
|
2022-01-13 23:24:41 -08:00
|
|
|
transaction: VersionedTransaction,
|
2020-07-20 11:56:44 -07:00
|
|
|
meta: Option<StoredConfirmedBlockTransactionStatusMeta>,
|
|
|
|
}
|
|
|
|
|
2022-02-09 21:28:18 -08:00
|
|
|
#[cfg(test)]
|
2020-09-23 22:10:29 -07:00
|
|
|
impl From<TransactionWithStatusMeta> for StoredConfirmedBlockTransaction {
|
|
|
|
fn from(value: TransactionWithStatusMeta) -> Self {
|
2022-02-09 21:28:18 -08:00
|
|
|
match value {
|
|
|
|
TransactionWithStatusMeta::MissingMetadata(transaction) => Self {
|
|
|
|
transaction: VersionedTransaction::from(transaction),
|
|
|
|
meta: None,
|
|
|
|
},
|
|
|
|
TransactionWithStatusMeta::Complete(VersionedTransactionWithStatusMeta {
|
|
|
|
transaction,
|
|
|
|
meta,
|
|
|
|
}) => Self {
|
|
|
|
transaction,
|
|
|
|
meta: Some(meta.into()),
|
|
|
|
},
|
2020-07-20 11:56:44 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-09 21:28:18 -08:00
|
|
|
impl From<StoredConfirmedBlockTransaction> for TransactionWithStatusMeta {
|
|
|
|
fn from(tx_with_meta: StoredConfirmedBlockTransaction) -> Self {
|
|
|
|
let StoredConfirmedBlockTransaction { transaction, meta } = tx_with_meta;
|
|
|
|
match meta {
|
|
|
|
None => Self::MissingMetadata(
|
|
|
|
transaction
|
|
|
|
.into_legacy_transaction()
|
|
|
|
.expect("versioned transactions always have meta"),
|
|
|
|
),
|
|
|
|
Some(meta) => Self::Complete(VersionedTransactionWithStatusMeta {
|
|
|
|
transaction,
|
|
|
|
meta: meta.into(),
|
|
|
|
}),
|
2020-09-23 22:10:29 -07:00
|
|
|
}
|
2020-07-20 11:56:44 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Serialize, Deserialize)]
|
|
|
|
struct StoredConfirmedBlockTransactionStatusMeta {
|
|
|
|
err: Option<TransactionError>,
|
|
|
|
fee: u64,
|
|
|
|
pre_balances: Vec<u64>,
|
|
|
|
post_balances: Vec<u64>,
|
|
|
|
}
|
|
|
|
|
2020-09-23 22:10:29 -07:00
|
|
|
impl From<StoredConfirmedBlockTransactionStatusMeta> for TransactionStatusMeta {
|
2020-07-20 11:56:44 -07:00
|
|
|
fn from(value: StoredConfirmedBlockTransactionStatusMeta) -> Self {
|
|
|
|
let StoredConfirmedBlockTransactionStatusMeta {
|
|
|
|
err,
|
|
|
|
fee,
|
|
|
|
pre_balances,
|
|
|
|
post_balances,
|
|
|
|
} = value;
|
|
|
|
let status = match &err {
|
|
|
|
None => Ok(()),
|
|
|
|
Some(err) => Err(err.clone()),
|
|
|
|
};
|
|
|
|
Self {
|
|
|
|
status,
|
|
|
|
fee,
|
|
|
|
pre_balances,
|
|
|
|
post_balances,
|
2020-09-30 10:55:22 -07:00
|
|
|
inner_instructions: None,
|
2020-10-08 12:06:15 -07:00
|
|
|
log_messages: None,
|
2020-12-10 19:25:07 -08:00
|
|
|
pre_token_balances: None,
|
|
|
|
post_token_balances: None,
|
2021-05-26 14:43:15 -07:00
|
|
|
rewards: None,
|
2022-01-13 23:24:41 -08:00
|
|
|
loaded_addresses: LoadedAddresses::default(),
|
2020-07-20 11:56:44 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-23 22:10:29 -07:00
|
|
|
impl From<TransactionStatusMeta> for StoredConfirmedBlockTransactionStatusMeta {
|
|
|
|
fn from(value: TransactionStatusMeta) -> Self {
|
|
|
|
let TransactionStatusMeta {
|
|
|
|
status,
|
2020-07-20 11:56:44 -07:00
|
|
|
fee,
|
|
|
|
pre_balances,
|
|
|
|
post_balances,
|
|
|
|
..
|
|
|
|
} = value;
|
|
|
|
Self {
|
2020-09-23 22:10:29 -07:00
|
|
|
err: status.err(),
|
2020-07-20 11:56:44 -07:00
|
|
|
fee,
|
|
|
|
pre_balances,
|
|
|
|
post_balances,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-30 16:57:06 -07:00
|
|
|
type StoredConfirmedBlockRewards = Vec<StoredConfirmedBlockReward>;
|
|
|
|
|
|
|
|
#[derive(Serialize, Deserialize)]
|
|
|
|
struct StoredConfirmedBlockReward {
|
|
|
|
pubkey: String,
|
|
|
|
lamports: i64,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<StoredConfirmedBlockReward> for Reward {
|
|
|
|
fn from(value: StoredConfirmedBlockReward) -> Self {
|
|
|
|
let StoredConfirmedBlockReward { pubkey, lamports } = value;
|
|
|
|
Self {
|
|
|
|
pubkey,
|
|
|
|
lamports,
|
|
|
|
post_balance: 0,
|
2020-10-09 12:55:35 -07:00
|
|
|
reward_type: None,
|
2021-07-10 23:18:42 -07:00
|
|
|
commission: None,
|
2020-09-30 16:57:06 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<Reward> for StoredConfirmedBlockReward {
|
|
|
|
fn from(value: Reward) -> Self {
|
|
|
|
let Reward {
|
|
|
|
pubkey, lamports, ..
|
|
|
|
} = value;
|
|
|
|
Self { pubkey, lamports }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-20 11:56:44 -07:00
|
|
|
// A serialized `TransactionInfo` is stored in the `tx` table
|
2021-09-21 08:46:46 -07:00
|
|
|
#[derive(Serialize, Deserialize, PartialEq, Debug)]
|
2020-07-20 11:56:44 -07:00
|
|
|
struct TransactionInfo {
|
|
|
|
slot: Slot, // The slot that contains the block with this transaction in it
|
|
|
|
index: u32, // Where the transaction is located in the block
|
|
|
|
err: Option<TransactionError>, // None if the transaction executed successfully
|
|
|
|
memo: Option<String>, // Transaction memo
|
|
|
|
}
|
|
|
|
|
2021-09-21 08:46:46 -07:00
|
|
|
// Part of a serialized `TransactionInfo` which is stored in the `tx` table
|
|
|
|
#[derive(PartialEq, Debug)]
|
|
|
|
struct UploadedTransaction {
|
|
|
|
slot: Slot, // The slot that contains the block with this transaction in it
|
|
|
|
index: u32, // Where the transaction is located in the block
|
|
|
|
err: Option<TransactionError>, // None if the transaction executed successfully
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<TransactionInfo> for UploadedTransaction {
|
|
|
|
fn from(transaction_info: TransactionInfo) -> Self {
|
|
|
|
Self {
|
|
|
|
slot: transaction_info.slot,
|
|
|
|
index: transaction_info.index,
|
|
|
|
err: transaction_info.err,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-20 11:56:44 -07:00
|
|
|
impl From<TransactionInfo> for TransactionStatus {
|
|
|
|
fn from(transaction_info: TransactionInfo) -> Self {
|
|
|
|
let TransactionInfo { slot, err, .. } = transaction_info;
|
|
|
|
let status = match &err {
|
|
|
|
None => Ok(()),
|
|
|
|
Some(err) => Err(err.clone()),
|
|
|
|
};
|
|
|
|
Self {
|
|
|
|
slot,
|
|
|
|
confirmations: None,
|
|
|
|
status,
|
|
|
|
err,
|
2021-01-15 08:05:05 -08:00
|
|
|
confirmation_status: Some(TransactionConfirmationStatus::Finalized),
|
2020-07-20 11:56:44 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-20 22:10:35 -08:00
|
|
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
|
|
|
struct LegacyTransactionByAddrInfo {
|
|
|
|
pub signature: Signature, // The transaction signature
|
|
|
|
pub err: Option<TransactionError>, // None if the transaction executed successfully
|
|
|
|
pub index: u32, // Where the transaction is located in the block
|
|
|
|
pub memo: Option<String>, // Transaction memo
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<LegacyTransactionByAddrInfo> for TransactionByAddrInfo {
|
|
|
|
fn from(legacy: LegacyTransactionByAddrInfo) -> Self {
|
|
|
|
let LegacyTransactionByAddrInfo {
|
|
|
|
signature,
|
|
|
|
err,
|
|
|
|
index,
|
|
|
|
memo,
|
|
|
|
} = legacy;
|
|
|
|
|
|
|
|
Self {
|
|
|
|
signature,
|
|
|
|
err,
|
|
|
|
index,
|
|
|
|
memo,
|
|
|
|
block_time: None,
|
|
|
|
}
|
|
|
|
}
|
2020-07-20 11:56:44 -07:00
|
|
|
}
|
|
|
|
|
2022-03-19 00:04:17 -07:00
|
|
|
pub const DEFAULT_INSTANCE_NAME: &str = "solana-ledger";
|
|
|
|
|
2022-03-19 00:03:51 -07:00
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct LedgerStorageConfig {
|
|
|
|
pub read_only: bool,
|
|
|
|
pub timeout: Option<std::time::Duration>,
|
|
|
|
pub credential_path: Option<String>,
|
2022-03-19 00:04:17 -07:00
|
|
|
pub instance_name: String,
|
2022-03-19 00:03:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Default for LedgerStorageConfig {
|
|
|
|
fn default() -> Self {
|
|
|
|
Self {
|
|
|
|
read_only: true,
|
|
|
|
timeout: None,
|
|
|
|
credential_path: None,
|
2022-03-19 00:04:17 -07:00
|
|
|
instance_name: DEFAULT_INSTANCE_NAME.to_string(),
|
2022-03-19 00:03:51 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-20 11:56:44 -07:00
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct LedgerStorage {
|
|
|
|
connection: bigtable::BigTableConnection,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl LedgerStorage {
|
2022-01-05 22:36:03 -08:00
|
|
|
pub async fn new(
|
|
|
|
read_only: bool,
|
|
|
|
timeout: Option<std::time::Duration>,
|
|
|
|
credential_path: Option<String>,
|
|
|
|
) -> Result<Self> {
|
2022-03-19 00:03:51 -07:00
|
|
|
Self::new_with_config(LedgerStorageConfig {
|
|
|
|
read_only,
|
|
|
|
timeout,
|
|
|
|
credential_path,
|
2022-03-19 00:04:17 -07:00
|
|
|
..LedgerStorageConfig::default()
|
2022-03-19 00:03:51 -07:00
|
|
|
})
|
|
|
|
.await
|
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn new_with_config(config: LedgerStorageConfig) -> Result<Self> {
|
|
|
|
let LedgerStorageConfig {
|
|
|
|
read_only,
|
|
|
|
timeout,
|
|
|
|
credential_path,
|
2022-03-19 00:04:17 -07:00
|
|
|
instance_name,
|
2022-03-19 00:03:51 -07:00
|
|
|
} = config;
|
2022-03-19 00:04:17 -07:00
|
|
|
let connection = bigtable::BigTableConnection::new(
|
|
|
|
instance_name.as_str(),
|
|
|
|
read_only,
|
|
|
|
timeout,
|
|
|
|
credential_path,
|
|
|
|
)
|
|
|
|
.await?;
|
2020-07-20 11:56:44 -07:00
|
|
|
Ok(Self { connection })
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Return the available slot that contains a block
|
|
|
|
pub async fn get_first_available_block(&self) -> Result<Option<Slot>> {
|
2021-11-22 15:22:46 -08:00
|
|
|
debug!("LedgerStorage::get_first_available_block request received");
|
|
|
|
inc_new_counter_debug!("storage-bigtable-query", 1);
|
2020-07-20 11:56:44 -07:00
|
|
|
let mut bigtable = self.connection.client();
|
2020-08-15 09:42:17 -07:00
|
|
|
let blocks = bigtable.get_row_keys("blocks", None, None, 1).await?;
|
2020-07-20 11:56:44 -07:00
|
|
|
if blocks.is_empty() {
|
|
|
|
return Ok(None);
|
|
|
|
}
|
|
|
|
Ok(key_to_slot(&blocks[0]))
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Fetch the next slots after the provided slot that contains a block
|
|
|
|
///
|
|
|
|
/// start_slot: slot to start the search from (inclusive)
|
2021-01-18 22:33:51 -08:00
|
|
|
/// limit: stop after this many slots have been found; if limit==0, all records in the table
|
|
|
|
/// after start_slot will be read
|
2020-07-20 11:56:44 -07:00
|
|
|
pub async fn get_confirmed_blocks(&self, start_slot: Slot, limit: usize) -> Result<Vec<Slot>> {
|
2021-11-22 15:22:46 -08:00
|
|
|
debug!(
|
|
|
|
"LedgerStorage::get_confirmed_blocks request received: {:?} {:?}",
|
|
|
|
start_slot, limit
|
|
|
|
);
|
|
|
|
inc_new_counter_debug!("storage-bigtable-query", 1);
|
2020-07-20 11:56:44 -07:00
|
|
|
let mut bigtable = self.connection.client();
|
|
|
|
let blocks = bigtable
|
2021-09-21 08:46:46 -07:00
|
|
|
.get_row_keys(
|
|
|
|
"blocks",
|
|
|
|
Some(slot_to_blocks_key(start_slot)),
|
|
|
|
None,
|
|
|
|
limit as i64,
|
|
|
|
)
|
2020-07-20 11:56:44 -07:00
|
|
|
.await?;
|
|
|
|
Ok(blocks.into_iter().filter_map(|s| key_to_slot(&s)).collect())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Fetch the confirmed block from the desired slot
|
2022-02-09 21:28:18 -08:00
|
|
|
pub async fn get_confirmed_block(&self, slot: Slot) -> Result<ConfirmedBlock> {
|
2021-11-22 15:22:46 -08:00
|
|
|
debug!(
|
|
|
|
"LedgerStorage::get_confirmed_block request received: {:?}",
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
inc_new_counter_debug!("storage-bigtable-query", 1);
|
2020-07-20 11:56:44 -07:00
|
|
|
let mut bigtable = self.connection.client();
|
2020-09-30 10:55:22 -07:00
|
|
|
let block_cell_data = bigtable
|
|
|
|
.get_protobuf_or_bincode_cell::<StoredConfirmedBlock, generated::ConfirmedBlock>(
|
|
|
|
"blocks",
|
2021-09-21 08:46:46 -07:00
|
|
|
slot_to_blocks_key(slot),
|
2020-09-30 10:55:22 -07:00
|
|
|
)
|
2021-01-21 20:40:47 -08:00
|
|
|
.await
|
|
|
|
.map_err(|err| match err {
|
|
|
|
bigtable::Error::RowNotFound => Error::BlockNotFound(slot),
|
|
|
|
_ => err.into(),
|
|
|
|
})?;
|
2020-09-30 10:55:22 -07:00
|
|
|
Ok(match block_cell_data {
|
|
|
|
bigtable::CellData::Bincode(block) => block.into(),
|
|
|
|
bigtable::CellData::Protobuf(block) => block.try_into().map_err(|_err| {
|
2021-09-21 08:46:46 -07:00
|
|
|
bigtable::Error::ObjectCorrupt(format!("blocks/{}", slot_to_blocks_key(slot)))
|
2020-09-30 10:55:22 -07:00
|
|
|
})?,
|
|
|
|
})
|
2020-07-20 11:56:44 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn get_signature_status(&self, signature: &Signature) -> Result<TransactionStatus> {
|
2021-11-22 15:22:46 -08:00
|
|
|
debug!(
|
|
|
|
"LedgerStorage::get_signature_status request received: {:?}",
|
|
|
|
signature
|
|
|
|
);
|
|
|
|
inc_new_counter_debug!("storage-bigtable-query", 1);
|
2020-07-20 11:56:44 -07:00
|
|
|
let mut bigtable = self.connection.client();
|
|
|
|
let transaction_info = bigtable
|
|
|
|
.get_bincode_cell::<TransactionInfo>("tx", signature.to_string())
|
2021-01-21 20:40:47 -08:00
|
|
|
.await
|
|
|
|
.map_err(|err| match err {
|
|
|
|
bigtable::Error::RowNotFound => Error::SignatureNotFound,
|
|
|
|
_ => err.into(),
|
|
|
|
})?;
|
2020-07-20 11:56:44 -07:00
|
|
|
Ok(transaction_info.into())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Fetch a confirmed transaction
|
|
|
|
pub async fn get_confirmed_transaction(
|
|
|
|
&self,
|
|
|
|
signature: &Signature,
|
2022-02-09 21:28:18 -08:00
|
|
|
) -> Result<Option<ConfirmedTransactionWithStatusMeta>> {
|
2021-11-22 15:22:46 -08:00
|
|
|
debug!(
|
|
|
|
"LedgerStorage::get_confirmed_transaction request received: {:?}",
|
|
|
|
signature
|
|
|
|
);
|
|
|
|
inc_new_counter_debug!("storage-bigtable-query", 1);
|
2020-07-20 11:56:44 -07:00
|
|
|
let mut bigtable = self.connection.client();
|
|
|
|
|
|
|
|
// Figure out which block the transaction is located in
|
|
|
|
let TransactionInfo { slot, index, .. } = bigtable
|
|
|
|
.get_bincode_cell("tx", signature.to_string())
|
2021-01-21 20:40:47 -08:00
|
|
|
.await
|
|
|
|
.map_err(|err| match err {
|
|
|
|
bigtable::Error::RowNotFound => Error::SignatureNotFound,
|
|
|
|
_ => err.into(),
|
|
|
|
})?;
|
2020-07-20 11:56:44 -07:00
|
|
|
|
|
|
|
// Load the block and return the transaction
|
2020-10-15 12:56:32 -07:00
|
|
|
let block = self.get_confirmed_block(slot).await?;
|
2020-07-20 11:56:44 -07:00
|
|
|
match block.transactions.into_iter().nth(index as usize) {
|
|
|
|
None => {
|
2021-09-16 16:37:45 -07:00
|
|
|
// report this somewhere actionable?
|
2020-07-20 11:56:44 -07:00
|
|
|
warn!("Transaction info for {} is corrupt", signature);
|
|
|
|
Ok(None)
|
|
|
|
}
|
2022-02-09 21:28:18 -08:00
|
|
|
Some(tx_with_meta) => {
|
|
|
|
if tx_with_meta.transaction_signature() != signature {
|
2020-07-20 11:56:44 -07:00
|
|
|
warn!(
|
|
|
|
"Transaction info or confirmed block for {} is corrupt",
|
|
|
|
signature
|
|
|
|
);
|
|
|
|
Ok(None)
|
|
|
|
} else {
|
2022-02-09 21:28:18 -08:00
|
|
|
Ok(Some(ConfirmedTransactionWithStatusMeta {
|
2020-07-20 11:56:44 -07:00
|
|
|
slot,
|
2022-02-09 21:28:18 -08:00
|
|
|
tx_with_meta,
|
2021-01-20 22:10:35 -08:00
|
|
|
block_time: block.block_time,
|
2020-07-20 11:56:44 -07:00
|
|
|
}))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Get confirmed signatures for the provided address, in descending ledger order
|
|
|
|
///
|
|
|
|
/// address: address to search for
|
2020-08-05 12:26:37 -07:00
|
|
|
/// before_signature: start with the first signature older than this one
|
2021-01-18 22:33:51 -08:00
|
|
|
/// until_signature: end with the last signature more recent than this one
|
|
|
|
/// limit: stop after this many signatures; if limit==0, all records in the table will be read
|
2020-07-20 11:56:44 -07:00
|
|
|
pub async fn get_confirmed_signatures_for_address(
|
|
|
|
&self,
|
|
|
|
address: &Pubkey,
|
2020-08-05 12:26:37 -07:00
|
|
|
before_signature: Option<&Signature>,
|
2020-08-15 09:42:17 -07:00
|
|
|
until_signature: Option<&Signature>,
|
2020-07-20 11:56:44 -07:00
|
|
|
limit: usize,
|
2020-09-09 20:21:52 -07:00
|
|
|
) -> Result<
|
|
|
|
Vec<(
|
|
|
|
ConfirmedTransactionStatusWithSignature,
|
|
|
|
u32, /*slot index*/
|
|
|
|
)>,
|
|
|
|
> {
|
2021-11-22 15:22:46 -08:00
|
|
|
debug!(
|
|
|
|
"LedgerStorage::get_confirmed_signatures_for_address request received: {:?}",
|
|
|
|
address
|
|
|
|
);
|
|
|
|
inc_new_counter_debug!("storage-bigtable-query", 1);
|
2020-07-20 11:56:44 -07:00
|
|
|
let mut bigtable = self.connection.client();
|
|
|
|
let address_prefix = format!("{}/", address);
|
|
|
|
|
2020-08-05 12:26:37 -07:00
|
|
|
// Figure out where to start listing from based on `before_signature`
|
2020-08-15 09:42:17 -07:00
|
|
|
let (first_slot, before_transaction_index) = match before_signature {
|
2020-07-20 11:56:44 -07:00
|
|
|
None => (Slot::MAX, 0),
|
2020-08-05 12:26:37 -07:00
|
|
|
Some(before_signature) => {
|
2020-07-20 11:56:44 -07:00
|
|
|
let TransactionInfo { slot, index, .. } = bigtable
|
2020-08-05 12:26:37 -07:00
|
|
|
.get_bincode_cell("tx", before_signature.to_string())
|
2020-07-20 11:56:44 -07:00
|
|
|
.await?;
|
|
|
|
|
2020-08-14 12:41:27 -07:00
|
|
|
(slot, index)
|
2020-07-20 11:56:44 -07:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-08-15 09:42:17 -07:00
|
|
|
// Figure out where to end listing from based on `until_signature`
|
|
|
|
let (last_slot, until_transaction_index) = match until_signature {
|
|
|
|
None => (0, u32::MAX),
|
|
|
|
Some(until_signature) => {
|
|
|
|
let TransactionInfo { slot, index, .. } = bigtable
|
|
|
|
.get_bincode_cell("tx", until_signature.to_string())
|
|
|
|
.await?;
|
|
|
|
|
|
|
|
(slot, index)
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-07-20 11:56:44 -07:00
|
|
|
let mut infos = vec![];
|
|
|
|
|
2020-09-04 19:29:11 -07:00
|
|
|
let starting_slot_tx_len = bigtable
|
2021-01-20 22:10:35 -08:00
|
|
|
.get_protobuf_or_bincode_cell::<Vec<LegacyTransactionByAddrInfo>, tx_by_addr::TransactionByAddr>(
|
2020-08-14 12:41:27 -07:00
|
|
|
"tx-by-addr",
|
2021-09-21 08:46:46 -07:00
|
|
|
format!("{}{}", address_prefix, slot_to_tx_by_addr_key(first_slot)),
|
2020-08-14 12:41:27 -07:00
|
|
|
)
|
2020-09-04 19:29:11 -07:00
|
|
|
.await
|
2021-01-20 22:10:35 -08:00
|
|
|
.map(|cell_data| {
|
|
|
|
match cell_data {
|
|
|
|
bigtable::CellData::Bincode(tx_by_addr) => tx_by_addr.len(),
|
|
|
|
bigtable::CellData::Protobuf(tx_by_addr) => tx_by_addr.tx_by_addrs.len(),
|
|
|
|
}
|
|
|
|
})
|
2020-09-04 19:29:11 -07:00
|
|
|
.unwrap_or(0);
|
2020-08-14 12:41:27 -07:00
|
|
|
|
2020-08-15 09:42:17 -07:00
|
|
|
// Return the next tx-by-addr data of amount `limit` plus extra to account for the largest
|
2020-08-14 12:41:27 -07:00
|
|
|
// number that might be flitered out
|
2020-08-15 09:42:17 -07:00
|
|
|
let tx_by_addr_data = bigtable
|
|
|
|
.get_row_data(
|
2020-07-20 11:56:44 -07:00
|
|
|
"tx-by-addr",
|
2021-09-21 08:46:46 -07:00
|
|
|
Some(format!(
|
|
|
|
"{}{}",
|
|
|
|
address_prefix,
|
|
|
|
slot_to_tx_by_addr_key(first_slot),
|
|
|
|
)),
|
|
|
|
Some(format!(
|
|
|
|
"{}{}",
|
|
|
|
address_prefix,
|
|
|
|
slot_to_tx_by_addr_key(last_slot),
|
|
|
|
)),
|
2020-09-04 19:29:11 -07:00
|
|
|
limit as i64 + starting_slot_tx_len as i64,
|
2020-07-20 11:56:44 -07:00
|
|
|
)
|
|
|
|
.await?;
|
|
|
|
|
2020-08-15 09:42:17 -07:00
|
|
|
'outer: for (row_key, data) in tx_by_addr_data {
|
|
|
|
let slot = !key_to_slot(&row_key[address_prefix.len()..]).ok_or_else(|| {
|
2020-07-20 11:56:44 -07:00
|
|
|
bigtable::Error::ObjectCorrupt(format!(
|
|
|
|
"Failed to convert key to slot: tx-by-addr/{}",
|
2020-08-15 09:42:17 -07:00
|
|
|
row_key
|
2020-07-20 11:56:44 -07:00
|
|
|
))
|
|
|
|
})?;
|
2021-01-20 22:10:35 -08:00
|
|
|
|
|
|
|
let deserialized_cell_data = bigtable::deserialize_protobuf_or_bincode_cell_data::<
|
|
|
|
Vec<LegacyTransactionByAddrInfo>,
|
|
|
|
tx_by_addr::TransactionByAddr,
|
|
|
|
>(&data, "tx-by-addr", row_key.clone())?;
|
|
|
|
|
|
|
|
let mut cell_data: Vec<TransactionByAddrInfo> = match deserialized_cell_data {
|
|
|
|
bigtable::CellData::Bincode(tx_by_addr) => {
|
|
|
|
tx_by_addr.into_iter().map(|legacy| legacy.into()).collect()
|
|
|
|
}
|
|
|
|
bigtable::CellData::Protobuf(tx_by_addr) => {
|
|
|
|
tx_by_addr.try_into().map_err(|error| {
|
|
|
|
bigtable::Error::ObjectCorrupt(format!(
|
|
|
|
"Failed to deserialize: {}: tx-by-addr/{}",
|
|
|
|
error,
|
|
|
|
row_key.clone()
|
|
|
|
))
|
|
|
|
})?
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-09-04 19:29:11 -07:00
|
|
|
cell_data.reverse();
|
2020-08-15 09:42:17 -07:00
|
|
|
for tx_by_addr_info in cell_data.into_iter() {
|
|
|
|
// Filter out records before `before_transaction_index`
|
|
|
|
if slot == first_slot && tx_by_addr_info.index >= before_transaction_index {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Filter out records after `until_transaction_index`
|
|
|
|
if slot == last_slot && tx_by_addr_info.index <= until_transaction_index {
|
|
|
|
continue;
|
|
|
|
}
|
2020-09-09 20:21:52 -07:00
|
|
|
infos.push((
|
|
|
|
ConfirmedTransactionStatusWithSignature {
|
|
|
|
signature: tx_by_addr_info.signature,
|
|
|
|
slot,
|
|
|
|
err: tx_by_addr_info.err,
|
|
|
|
memo: tx_by_addr_info.memo,
|
2021-01-20 22:10:35 -08:00
|
|
|
block_time: tx_by_addr_info.block_time,
|
2020-09-09 20:21:52 -07:00
|
|
|
},
|
|
|
|
tx_by_addr_info.index,
|
|
|
|
));
|
2020-08-15 09:42:17 -07:00
|
|
|
// Respect limit
|
2020-07-20 11:56:44 -07:00
|
|
|
if infos.len() >= limit {
|
|
|
|
break 'outer;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(infos)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Upload a new confirmed block and associated meta data.
|
|
|
|
pub async fn upload_confirmed_block(
|
|
|
|
&self,
|
|
|
|
slot: Slot,
|
2022-01-13 23:24:41 -08:00
|
|
|
confirmed_block: VersionedConfirmedBlock,
|
2020-07-20 11:56:44 -07:00
|
|
|
) -> Result<()> {
|
|
|
|
let mut bytes_written = 0;
|
|
|
|
|
2020-09-23 22:10:29 -07:00
|
|
|
let mut by_addr: HashMap<&Pubkey, Vec<TransactionByAddrInfo>> = HashMap::new();
|
2020-07-20 11:56:44 -07:00
|
|
|
|
|
|
|
let mut tx_cells = vec![];
|
|
|
|
for (index, transaction_with_meta) in confirmed_block.transactions.iter().enumerate() {
|
2022-01-13 23:24:41 -08:00
|
|
|
let VersionedTransactionWithStatusMeta { meta, transaction } = transaction_with_meta;
|
2022-02-09 21:28:18 -08:00
|
|
|
let err = meta.status.clone().err();
|
2020-07-20 11:56:44 -07:00
|
|
|
let index = index as u32;
|
|
|
|
let signature = transaction.signatures[0];
|
2022-01-13 23:24:41 -08:00
|
|
|
let memo = extract_and_fmt_memos(transaction_with_meta);
|
2020-07-20 11:56:44 -07:00
|
|
|
|
2022-02-05 04:00:31 -08:00
|
|
|
for address in transaction_with_meta.account_keys().iter() {
|
2021-06-18 06:34:46 -07:00
|
|
|
if !is_sysvar_id(address) {
|
2020-07-20 11:56:44 -07:00
|
|
|
by_addr
|
|
|
|
.entry(address)
|
|
|
|
.or_default()
|
|
|
|
.push(TransactionByAddrInfo {
|
|
|
|
signature,
|
|
|
|
err: err.clone(),
|
|
|
|
index,
|
2021-08-30 20:14:18 -07:00
|
|
|
memo: memo.clone(),
|
2021-01-20 22:10:35 -08:00
|
|
|
block_time: confirmed_block.block_time,
|
2020-07-20 11:56:44 -07:00
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
tx_cells.push((
|
|
|
|
signature.to_string(),
|
|
|
|
TransactionInfo {
|
|
|
|
slot,
|
|
|
|
index,
|
|
|
|
err,
|
2021-08-30 20:14:18 -07:00
|
|
|
memo,
|
2020-07-20 11:56:44 -07:00
|
|
|
},
|
|
|
|
));
|
|
|
|
}
|
|
|
|
|
|
|
|
let tx_by_addr_cells: Vec<_> = by_addr
|
|
|
|
.into_iter()
|
|
|
|
.map(|(address, transaction_info_by_addr)| {
|
|
|
|
(
|
2021-09-21 08:46:46 -07:00
|
|
|
format!("{}/{}", address, slot_to_tx_by_addr_key(slot)),
|
2021-01-20 22:10:35 -08:00
|
|
|
tx_by_addr::TransactionByAddr {
|
|
|
|
tx_by_addrs: transaction_info_by_addr
|
|
|
|
.into_iter()
|
|
|
|
.map(|by_addr| by_addr.into())
|
|
|
|
.collect(),
|
|
|
|
},
|
2020-07-20 11:56:44 -07:00
|
|
|
)
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
if !tx_cells.is_empty() {
|
|
|
|
bytes_written += self
|
|
|
|
.connection
|
|
|
|
.put_bincode_cells_with_retry::<TransactionInfo>("tx", &tx_cells)
|
|
|
|
.await?;
|
|
|
|
}
|
|
|
|
|
|
|
|
if !tx_by_addr_cells.is_empty() {
|
|
|
|
bytes_written += self
|
|
|
|
.connection
|
2021-01-20 22:10:35 -08:00
|
|
|
.put_protobuf_cells_with_retry::<tx_by_addr::TransactionByAddr>(
|
2020-07-20 11:56:44 -07:00
|
|
|
"tx-by-addr",
|
|
|
|
&tx_by_addr_cells,
|
|
|
|
)
|
|
|
|
.await?;
|
|
|
|
}
|
|
|
|
|
|
|
|
let num_transactions = confirmed_block.transactions.len();
|
|
|
|
|
|
|
|
// Store the block itself last, after all other metadata about the block has been
|
|
|
|
// successfully stored. This avoids partial uploaded blocks from becoming visible to
|
|
|
|
// `get_confirmed_block()` and `get_confirmed_blocks()`
|
2021-09-21 08:46:46 -07:00
|
|
|
let blocks_cells = [(slot_to_blocks_key(slot), confirmed_block.into())];
|
2020-07-20 11:56:44 -07:00
|
|
|
bytes_written += self
|
|
|
|
.connection
|
2020-09-30 10:55:22 -07:00
|
|
|
.put_protobuf_cells_with_retry::<generated::ConfirmedBlock>("blocks", &blocks_cells)
|
2020-07-20 11:56:44 -07:00
|
|
|
.await?;
|
|
|
|
info!(
|
|
|
|
"uploaded block for slot {}: {} transactions, {} bytes",
|
|
|
|
slot, num_transactions, bytes_written
|
|
|
|
);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
2021-09-16 16:37:45 -07:00
|
|
|
|
|
|
|
// Delete a confirmed block and associated meta data.
|
|
|
|
pub async fn delete_confirmed_block(&self, slot: Slot, dry_run: bool) -> Result<()> {
|
|
|
|
let mut addresses: HashSet<&Pubkey> = HashSet::new();
|
2021-09-21 08:46:46 -07:00
|
|
|
let mut expected_tx_infos: HashMap<String, UploadedTransaction> = HashMap::new();
|
2021-09-16 16:37:45 -07:00
|
|
|
let confirmed_block = self.get_confirmed_block(slot).await?;
|
|
|
|
for (index, transaction_with_meta) in confirmed_block.transactions.iter().enumerate() {
|
2022-02-09 21:28:18 -08:00
|
|
|
match transaction_with_meta {
|
|
|
|
TransactionWithStatusMeta::MissingMetadata(transaction) => {
|
|
|
|
let signature = transaction.signatures[0];
|
|
|
|
let index = index as u32;
|
|
|
|
let err = None;
|
|
|
|
|
|
|
|
for address in transaction.message.account_keys.iter() {
|
|
|
|
if !is_sysvar_id(address) {
|
|
|
|
addresses.insert(address);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
expected_tx_infos.insert(
|
|
|
|
signature.to_string(),
|
|
|
|
UploadedTransaction { slot, index, err },
|
|
|
|
);
|
|
|
|
}
|
|
|
|
TransactionWithStatusMeta::Complete(tx_with_meta) => {
|
|
|
|
let VersionedTransactionWithStatusMeta { transaction, meta } = tx_with_meta;
|
|
|
|
let signature = transaction.signatures[0];
|
|
|
|
let index = index as u32;
|
|
|
|
let err = meta.status.clone().err();
|
|
|
|
|
|
|
|
for address in tx_with_meta.account_keys().iter() {
|
|
|
|
if !is_sysvar_id(address) {
|
|
|
|
addresses.insert(address);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
expected_tx_infos.insert(
|
|
|
|
signature.to_string(),
|
|
|
|
UploadedTransaction { slot, index, err },
|
|
|
|
);
|
2021-09-16 16:37:45 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let address_slot_rows: Vec<_> = addresses
|
|
|
|
.into_iter()
|
2021-09-21 08:46:46 -07:00
|
|
|
.map(|address| format!("{}/{}", address, slot_to_tx_by_addr_key(slot)))
|
2021-09-16 16:37:45 -07:00
|
|
|
.collect();
|
|
|
|
|
|
|
|
let tx_deletion_rows = if !expected_tx_infos.is_empty() {
|
|
|
|
let signatures = expected_tx_infos
|
|
|
|
.iter()
|
|
|
|
.map(|(signature, _info)| signature)
|
|
|
|
.cloned()
|
|
|
|
.collect::<Vec<_>>();
|
2021-09-21 08:46:46 -07:00
|
|
|
let fetched_tx_infos: HashMap<String, std::result::Result<UploadedTransaction, _>> =
|
|
|
|
self.connection
|
|
|
|
.get_bincode_cells_with_retry::<TransactionInfo>("tx", &signatures)
|
|
|
|
.await?
|
|
|
|
.into_iter()
|
|
|
|
.map(|(signature, tx_info_res)| (signature, tx_info_res.map(Into::into)))
|
|
|
|
.collect::<HashMap<_, _>>();
|
2021-09-16 16:37:45 -07:00
|
|
|
|
|
|
|
let mut deletion_rows = Vec::with_capacity(expected_tx_infos.len());
|
|
|
|
for (signature, expected_tx_info) in expected_tx_infos {
|
|
|
|
match fetched_tx_infos.get(&signature) {
|
|
|
|
Some(Ok(fetched_tx_info)) if fetched_tx_info == &expected_tx_info => {
|
|
|
|
deletion_rows.push(signature);
|
|
|
|
}
|
2021-09-21 08:46:46 -07:00
|
|
|
Some(Ok(fetched_tx_info)) => {
|
2021-09-16 16:37:45 -07:00
|
|
|
warn!(
|
2021-09-21 08:46:46 -07:00
|
|
|
"skipped tx row {} because the bigtable entry ({:?}) did not match to {:?}",
|
|
|
|
signature,
|
|
|
|
fetched_tx_info,
|
|
|
|
&expected_tx_info,
|
2021-09-16 16:37:45 -07:00
|
|
|
);
|
|
|
|
}
|
|
|
|
Some(Err(err)) => {
|
|
|
|
warn!(
|
|
|
|
"skipped tx row {} because the bigtable entry was corrupted: {:?}",
|
|
|
|
signature, err
|
|
|
|
);
|
|
|
|
}
|
|
|
|
None => {
|
|
|
|
warn!("skipped tx row {} because it was not found", signature);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
deletion_rows
|
|
|
|
} else {
|
|
|
|
vec![]
|
|
|
|
};
|
|
|
|
|
|
|
|
if !dry_run {
|
|
|
|
if !address_slot_rows.is_empty() {
|
|
|
|
self.connection
|
|
|
|
.delete_rows_with_retry("tx-by-addr", &address_slot_rows)
|
|
|
|
.await?;
|
|
|
|
}
|
|
|
|
|
|
|
|
if !tx_deletion_rows.is_empty() {
|
|
|
|
self.connection
|
|
|
|
.delete_rows_with_retry("tx", &tx_deletion_rows)
|
|
|
|
.await?;
|
|
|
|
}
|
|
|
|
|
|
|
|
self.connection
|
2021-09-21 08:46:46 -07:00
|
|
|
.delete_rows_with_retry("blocks", &[slot_to_blocks_key(slot)])
|
2021-09-16 16:37:45 -07:00
|
|
|
.await?;
|
|
|
|
}
|
|
|
|
|
|
|
|
info!(
|
|
|
|
"{}deleted ledger data for slot {}: {} transaction rows, {} address slot rows",
|
|
|
|
if dry_run { "[dry run] " } else { "" },
|
|
|
|
slot,
|
|
|
|
tx_deletion_rows.len(),
|
|
|
|
address_slot_rows.len()
|
|
|
|
);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
2020-07-20 11:56:44 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
|
|
|
use super::*;
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_slot_to_key() {
|
|
|
|
assert_eq!(slot_to_key(0), "0000000000000000");
|
|
|
|
assert_eq!(slot_to_key(!0), "ffffffffffffffff");
|
|
|
|
}
|
|
|
|
}
|