2019-10-11 12:30:52 -07:00
|
|
|
//! The `validator` module hosts all the validator microservices.
|
2018-07-02 15:24:40 -07:00
|
|
|
|
2021-10-09 15:57:17 -07:00
|
|
|
pub use solana_perf::report_target_features;
|
2021-08-13 05:12:40 -07:00
|
|
|
use {
|
|
|
|
crate::{
|
2022-03-15 13:14:49 -07:00
|
|
|
accounts_hash_verifier::AccountsHashVerifier,
|
2021-08-13 05:12:40 -07:00
|
|
|
broadcast_stage::BroadcastStageType,
|
|
|
|
cache_block_meta_service::{CacheBlockMetaSender, CacheBlockMetaService},
|
|
|
|
cluster_info_vote_listener::VoteTracker,
|
|
|
|
completed_data_sets_service::CompletedDataSetsService,
|
|
|
|
consensus::{reconcile_blockstore_roots_with_tower, Tower},
|
2022-03-15 14:00:06 -07:00
|
|
|
ledger_metric_report_service::LedgerMetricReportService,
|
2022-03-30 07:04:49 -07:00
|
|
|
poh_timing_report_service::PohTimingReportService,
|
2021-08-13 05:12:40 -07:00
|
|
|
rewards_recorder_service::{RewardsRecorderSender, RewardsRecorderService},
|
|
|
|
sample_performance_service::SamplePerformanceService,
|
|
|
|
serve_repair::ServeRepair,
|
|
|
|
serve_repair_service::ServeRepairService,
|
|
|
|
sigverify,
|
2021-08-17 11:01:59 -07:00
|
|
|
snapshot_packager_service::SnapshotPackagerService,
|
2021-12-17 15:21:05 -08:00
|
|
|
stats_reporter_service::StatsReporterService,
|
2021-10-15 15:11:11 -07:00
|
|
|
system_monitor_service::{verify_udp_stats_access, SystemMonitorService},
|
2021-08-13 05:12:40 -07:00
|
|
|
tower_storage::TowerStorage,
|
2022-01-10 09:29:48 -08:00
|
|
|
tpu::{Tpu, TpuSockets, DEFAULT_TPU_COALESCE_MS},
|
|
|
|
tvu::{Tvu, TvuConfig, TvuSockets},
|
2021-05-26 08:15:46 -07:00
|
|
|
},
|
2022-01-11 02:44:46 -08:00
|
|
|
crossbeam_channel::{bounded, unbounded, Receiver},
|
2021-08-13 05:12:40 -07:00
|
|
|
rand::{thread_rng, Rng},
|
|
|
|
solana_entry::poh::compute_hash_time_ns,
|
2022-03-14 18:18:46 -07:00
|
|
|
solana_geyser_plugin_manager::geyser_plugin_service::GeyserPluginService,
|
2021-08-13 05:12:40 -07:00
|
|
|
solana_gossip::{
|
|
|
|
cluster_info::{
|
|
|
|
ClusterInfo, Node, DEFAULT_CONTACT_DEBUG_INTERVAL_MILLIS,
|
|
|
|
DEFAULT_CONTACT_SAVE_INTERVAL_MILLIS,
|
|
|
|
},
|
|
|
|
contact_info::ContactInfo,
|
|
|
|
crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS,
|
|
|
|
gossip_service::GossipService,
|
|
|
|
},
|
|
|
|
solana_ledger::{
|
|
|
|
bank_forks_utils,
|
2022-03-15 20:53:46 -07:00
|
|
|
blockstore::{
|
|
|
|
Blockstore, BlockstoreError, BlockstoreSignals, CompletedSlotsReceiver, PurgeType,
|
|
|
|
},
|
2022-03-18 11:13:35 -07:00
|
|
|
blockstore_db::{BlockstoreOptions, BlockstoreRecoveryMode, LedgerColumnOptions},
|
2021-08-13 05:12:40 -07:00
|
|
|
blockstore_processor::{self, TransactionStatusSender},
|
|
|
|
leader_schedule::FixedSchedule,
|
|
|
|
leader_schedule_cache::LeaderScheduleCache,
|
|
|
|
},
|
|
|
|
solana_measure::measure::Measure,
|
2022-03-30 07:04:49 -07:00
|
|
|
solana_metrics::{datapoint_info, poh_timing_point::PohTimingSender},
|
2021-08-13 05:12:40 -07:00
|
|
|
solana_poh::{
|
2022-04-20 17:53:29 -07:00
|
|
|
poh_recorder::PohRecorder,
|
2021-08-13 05:12:40 -07:00
|
|
|
poh_service::{self, PohService},
|
|
|
|
},
|
|
|
|
solana_rpc::{
|
|
|
|
max_slots::MaxSlots,
|
|
|
|
optimistically_confirmed_bank_tracker::{
|
|
|
|
OptimisticallyConfirmedBank, OptimisticallyConfirmedBankTracker,
|
|
|
|
},
|
|
|
|
rpc::JsonRpcConfig,
|
|
|
|
rpc_completed_slots_service::RpcCompletedSlotsService,
|
|
|
|
rpc_pubsub_service::{PubSubConfig, PubSubService},
|
|
|
|
rpc_service::JsonRpcService,
|
|
|
|
rpc_subscriptions::RpcSubscriptions,
|
2021-11-23 09:55:53 -08:00
|
|
|
transaction_notifier_interface::TransactionNotifierLock,
|
2021-08-13 05:12:40 -07:00
|
|
|
transaction_status_service::TransactionStatusService,
|
|
|
|
},
|
|
|
|
solana_runtime::{
|
2022-03-15 13:14:49 -07:00
|
|
|
accounts_background_service::{
|
|
|
|
AbsRequestHandler, AbsRequestSender, AccountsBackgroundService, DroppedSlotsReceiver,
|
|
|
|
SnapshotRequestHandler,
|
|
|
|
},
|
2021-09-07 21:30:38 -07:00
|
|
|
accounts_db::{AccountShrinkThreshold, AccountsDbConfig},
|
|
|
|
accounts_index::AccountSecondaryIndexes,
|
2021-09-30 14:26:17 -07:00
|
|
|
accounts_update_notifier_interface::AccountsUpdateNotifier,
|
2021-08-13 05:12:40 -07:00
|
|
|
bank::Bank,
|
|
|
|
bank_forks::BankForks,
|
|
|
|
commitment::BlockCommitmentCache,
|
2021-10-12 06:51:33 -07:00
|
|
|
cost_model::CostModel,
|
2021-08-13 05:12:40 -07:00
|
|
|
hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE},
|
2022-04-11 17:28:10 -07:00
|
|
|
runtime_config::RuntimeConfig,
|
2021-08-13 05:12:40 -07:00
|
|
|
snapshot_archive_info::SnapshotArchiveInfoGetter,
|
|
|
|
snapshot_config::SnapshotConfig,
|
2021-10-08 13:14:56 -07:00
|
|
|
snapshot_hash::StartingSnapshotHashes,
|
2022-04-06 03:47:19 -07:00
|
|
|
snapshot_package::{PendingAccountsPackage, PendingSnapshotPackage},
|
2021-08-13 05:12:40 -07:00
|
|
|
snapshot_utils,
|
|
|
|
},
|
|
|
|
solana_sdk::{
|
|
|
|
clock::Slot,
|
|
|
|
epoch_schedule::MAX_LEADER_SCHEDULE_EPOCH_OFFSET,
|
|
|
|
exit::Exit,
|
|
|
|
genesis_config::GenesisConfig,
|
|
|
|
hash::Hash,
|
|
|
|
pubkey::Pubkey,
|
|
|
|
shred_version::compute_shred_version,
|
|
|
|
signature::{Keypair, Signer},
|
|
|
|
timing::timestamp,
|
|
|
|
},
|
2021-10-19 16:11:46 -07:00
|
|
|
solana_send_transaction_service::send_transaction_service,
|
2021-08-13 05:12:40 -07:00
|
|
|
solana_streamer::socket::SocketAddrSpace,
|
|
|
|
solana_vote_program::vote_state::VoteState,
|
|
|
|
std::{
|
|
|
|
collections::{HashMap, HashSet},
|
|
|
|
net::SocketAddr,
|
|
|
|
path::{Path, PathBuf},
|
|
|
|
sync::{
|
|
|
|
atomic::{AtomicBool, AtomicU64, Ordering},
|
|
|
|
Arc, Mutex, RwLock,
|
|
|
|
},
|
|
|
|
thread::{sleep, Builder, JoinHandle},
|
|
|
|
time::{Duration, Instant},
|
2021-05-18 23:54:28 -07:00
|
|
|
},
|
2019-11-04 10:03:39 -08:00
|
|
|
};
|
2018-07-02 11:20:35 -07:00
|
|
|
|
2020-09-18 14:35:20 -07:00
|
|
|
const MAX_COMPLETED_DATA_SETS_IN_CHANNEL: usize = 100_000;
|
2021-09-09 20:05:37 -07:00
|
|
|
const WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT: u64 = 80;
|
2020-09-01 22:06:06 -07:00
|
|
|
|
2022-03-18 12:43:20 -07:00
|
|
|
/// maximum drop bank signal queue length
|
|
|
|
const MAX_DROP_BANK_SIGNAL_QUEUE_SIZE: usize = 10_000;
|
|
|
|
|
2019-05-23 22:05:16 -07:00
|
|
|
pub struct ValidatorConfig {
|
2022-04-19 15:06:30 -07:00
|
|
|
pub halt_at_slot: Option<Slot>,
|
2019-11-08 20:56:57 -08:00
|
|
|
pub expected_genesis_hash: Option<Hash>,
|
2020-06-30 12:43:48 -07:00
|
|
|
pub expected_bank_hash: Option<Hash>,
|
2020-01-28 15:56:55 -08:00
|
|
|
pub expected_shred_version: Option<u16>,
|
2019-01-30 16:16:55 -08:00
|
|
|
pub voting_disabled: bool,
|
2019-12-05 18:41:29 -08:00
|
|
|
pub account_paths: Vec<PathBuf>,
|
2020-12-21 21:33:37 -08:00
|
|
|
pub account_shrink_paths: Option<Vec<PathBuf>>,
|
2019-03-03 22:01:09 -08:00
|
|
|
pub rpc_config: JsonRpcConfig,
|
2022-03-14 18:18:46 -07:00
|
|
|
pub geyser_plugin_config_files: Option<Vec<PathBuf>>,
|
2020-12-01 19:25:09 -08:00
|
|
|
pub rpc_addrs: Option<(SocketAddr, SocketAddr)>, // (JsonRpc, JsonRpcPubSub)
|
2020-10-01 12:36:58 -07:00
|
|
|
pub pubsub_config: PubSubConfig,
|
2019-07-31 17:58:10 -07:00
|
|
|
pub snapshot_config: Option<SnapshotConfig>,
|
2020-03-31 17:21:19 -07:00
|
|
|
pub max_ledger_shreds: Option<u64>,
|
2019-06-19 00:13:19 -07:00
|
|
|
pub broadcast_stage_type: BroadcastStageType,
|
2022-04-21 21:14:07 -07:00
|
|
|
pub turbine_disabled: Option<Arc<AtomicBool>>,
|
2020-12-16 17:56:38 -08:00
|
|
|
pub enforce_ulimit_nofile: bool,
|
2019-12-03 16:31:59 -08:00
|
|
|
pub fixed_leader_schedule: Option<FixedSchedule>,
|
2020-03-02 10:47:58 -08:00
|
|
|
pub wait_for_supermajority: Option<Slot>,
|
2020-01-24 17:27:04 -08:00
|
|
|
pub new_hard_forks: Option<Vec<Slot>>,
|
2021-11-12 10:57:55 -08:00
|
|
|
pub known_validators: Option<HashSet<Pubkey>>, // None = trust all
|
|
|
|
pub repair_validators: Option<HashSet<Pubkey>>, // None = repair from all
|
|
|
|
pub gossip_validators: Option<HashSet<Pubkey>>, // None = gossip with all
|
|
|
|
pub halt_on_known_validators_accounts_hash_mismatch: bool,
|
2020-03-16 08:37:31 -07:00
|
|
|
pub accounts_hash_fault_injection_slots: u64, // 0 = no fault injection
|
2020-03-23 08:42:32 -07:00
|
|
|
pub no_rocksdb_compaction: bool,
|
2021-02-14 10:16:30 -08:00
|
|
|
pub rocksdb_compaction_interval: Option<u64>,
|
|
|
|
pub rocksdb_max_compaction_jitter: Option<u64>,
|
2020-04-16 15:12:20 -07:00
|
|
|
pub accounts_hash_interval_slots: u64,
|
2020-04-29 18:53:34 -07:00
|
|
|
pub max_genesis_archive_unpacked_size: u64,
|
2020-07-06 12:43:45 -07:00
|
|
|
pub wal_recovery_mode: Option<BlockstoreRecoveryMode>,
|
2020-09-18 14:35:20 -07:00
|
|
|
pub poh_verify: bool, // Perform PoH verification during blockstore processing at boo
|
2020-09-18 22:03:54 -07:00
|
|
|
pub require_tower: bool,
|
2021-07-20 22:25:13 -07:00
|
|
|
pub tower_storage: Arc<dyn TowerStorage>,
|
2020-09-23 18:46:42 -07:00
|
|
|
pub debug_keys: Option<Arc<HashSet<Pubkey>>>,
|
2020-11-20 14:47:37 -08:00
|
|
|
pub contact_debug_interval: u64,
|
2020-12-25 22:31:25 -08:00
|
|
|
pub contact_save_interval: u64,
|
2021-10-19 16:11:46 -07:00
|
|
|
pub send_transaction_service_config: send_transaction_service::Config,
|
2020-12-29 09:35:57 -08:00
|
|
|
pub no_poh_speed_test: bool,
|
2022-03-14 19:38:04 -07:00
|
|
|
pub no_os_memory_stats_reporting: bool,
|
2021-11-16 10:26:03 -08:00
|
|
|
pub no_os_network_stats_reporting: bool,
|
2020-12-29 11:09:47 -08:00
|
|
|
pub poh_pinned_cpu_core: usize,
|
2021-03-05 16:01:21 -08:00
|
|
|
pub poh_hashes_per_batch: u64,
|
2021-05-10 07:22:48 -07:00
|
|
|
pub account_indexes: AccountSecondaryIndexes,
|
2021-01-11 17:00:23 -08:00
|
|
|
pub accounts_db_caching_enabled: bool,
|
2021-09-07 21:30:38 -07:00
|
|
|
pub accounts_db_config: Option<AccountsDbConfig>,
|
2021-01-21 18:34:51 -08:00
|
|
|
pub warp_slot: Option<Slot>,
|
2021-02-04 07:00:33 -08:00
|
|
|
pub accounts_db_test_hash_calculation: bool,
|
2021-08-04 15:28:33 -07:00
|
|
|
pub accounts_db_skip_shrink: bool,
|
2021-02-26 09:15:45 -08:00
|
|
|
pub tpu_coalesce_ms: u64,
|
2021-06-03 20:06:13 -07:00
|
|
|
pub validator_exit: Arc<RwLock<Exit>>,
|
2021-03-25 18:54:51 -07:00
|
|
|
pub no_wait_for_vote_to_start_leader: bool,
|
2021-06-09 21:21:32 -07:00
|
|
|
pub accounts_shrink_ratio: AccountShrinkThreshold,
|
2022-02-15 12:19:34 -08:00
|
|
|
pub wait_to_vote_slot: Option<Slot>,
|
2022-03-18 11:13:35 -07:00
|
|
|
pub ledger_column_options: LedgerColumnOptions,
|
2022-04-11 17:28:10 -07:00
|
|
|
pub runtime_config: RuntimeConfig,
|
2019-01-29 08:51:01 -08:00
|
|
|
}
|
2019-06-19 00:13:19 -07:00
|
|
|
|
2019-05-23 22:05:16 -07:00
|
|
|
impl Default for ValidatorConfig {
|
2019-01-29 08:51:01 -08:00
|
|
|
fn default() -> Self {
|
|
|
|
Self {
|
2022-04-19 15:06:30 -07:00
|
|
|
halt_at_slot: None,
|
2019-11-08 20:56:57 -08:00
|
|
|
expected_genesis_hash: None,
|
2020-06-30 12:43:48 -07:00
|
|
|
expected_bank_hash: None,
|
2020-01-28 15:56:55 -08:00
|
|
|
expected_shred_version: None,
|
2019-01-30 16:16:55 -08:00
|
|
|
voting_disabled: false,
|
2020-03-31 17:21:19 -07:00
|
|
|
max_ledger_shreds: None,
|
2019-12-05 18:41:29 -08:00
|
|
|
account_paths: Vec::new(),
|
2020-12-21 21:33:37 -08:00
|
|
|
account_shrink_paths: None,
|
2019-03-03 22:01:09 -08:00
|
|
|
rpc_config: JsonRpcConfig::default(),
|
2022-03-14 18:18:46 -07:00
|
|
|
geyser_plugin_config_files: None,
|
2020-09-10 08:56:26 -07:00
|
|
|
rpc_addrs: None,
|
2020-10-01 12:36:58 -07:00
|
|
|
pubsub_config: PubSubConfig::default(),
|
2019-07-31 17:58:10 -07:00
|
|
|
snapshot_config: None,
|
2019-06-19 00:13:19 -07:00
|
|
|
broadcast_stage_type: BroadcastStageType::Standard,
|
2022-04-21 21:14:07 -07:00
|
|
|
turbine_disabled: None,
|
2020-12-16 17:56:38 -08:00
|
|
|
enforce_ulimit_nofile: true,
|
2019-12-03 16:31:59 -08:00
|
|
|
fixed_leader_schedule: None,
|
2020-03-02 10:47:58 -08:00
|
|
|
wait_for_supermajority: None,
|
2020-01-24 17:27:04 -08:00
|
|
|
new_hard_forks: None,
|
2021-11-12 10:57:55 -08:00
|
|
|
known_validators: None,
|
2020-08-21 00:35:11 -07:00
|
|
|
repair_validators: None,
|
2020-09-11 12:00:16 -07:00
|
|
|
gossip_validators: None,
|
2021-11-12 10:57:55 -08:00
|
|
|
halt_on_known_validators_accounts_hash_mismatch: false,
|
2020-03-16 08:37:31 -07:00
|
|
|
accounts_hash_fault_injection_slots: 0,
|
2020-03-23 08:42:32 -07:00
|
|
|
no_rocksdb_compaction: false,
|
2021-02-14 10:16:30 -08:00
|
|
|
rocksdb_compaction_interval: None,
|
|
|
|
rocksdb_max_compaction_jitter: None,
|
2020-04-16 15:12:20 -07:00
|
|
|
accounts_hash_interval_slots: std::u64::MAX,
|
2020-04-29 18:53:34 -07:00
|
|
|
max_genesis_archive_unpacked_size: MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
|
2020-07-06 12:43:45 -07:00
|
|
|
wal_recovery_mode: None,
|
2020-09-18 14:35:20 -07:00
|
|
|
poh_verify: true,
|
2020-09-18 22:03:54 -07:00
|
|
|
require_tower: false,
|
2021-08-10 20:16:18 -07:00
|
|
|
tower_storage: Arc::new(crate::tower_storage::NullTowerStorage::default()),
|
2020-09-23 18:46:42 -07:00
|
|
|
debug_keys: None,
|
2020-12-25 22:31:25 -08:00
|
|
|
contact_debug_interval: DEFAULT_CONTACT_DEBUG_INTERVAL_MILLIS,
|
|
|
|
contact_save_interval: DEFAULT_CONTACT_SAVE_INTERVAL_MILLIS,
|
2021-10-19 16:11:46 -07:00
|
|
|
send_transaction_service_config: send_transaction_service::Config::default(),
|
2020-12-29 09:35:57 -08:00
|
|
|
no_poh_speed_test: true,
|
2022-03-14 19:38:04 -07:00
|
|
|
no_os_memory_stats_reporting: true,
|
2021-11-16 10:26:03 -08:00
|
|
|
no_os_network_stats_reporting: true,
|
2020-12-29 11:09:47 -08:00
|
|
|
poh_pinned_cpu_core: poh_service::DEFAULT_PINNED_CPU_CORE,
|
2021-03-05 16:01:21 -08:00
|
|
|
poh_hashes_per_batch: poh_service::DEFAULT_HASHES_PER_BATCH,
|
2021-05-10 07:22:48 -07:00
|
|
|
account_indexes: AccountSecondaryIndexes::default(),
|
2021-01-11 17:00:23 -08:00
|
|
|
accounts_db_caching_enabled: false,
|
2021-01-21 18:34:51 -08:00
|
|
|
warp_slot: None,
|
2021-02-04 07:00:33 -08:00
|
|
|
accounts_db_test_hash_calculation: false,
|
2021-08-04 15:28:33 -07:00
|
|
|
accounts_db_skip_shrink: false,
|
2021-02-26 09:15:45 -08:00
|
|
|
tpu_coalesce_ms: DEFAULT_TPU_COALESCE_MS,
|
2021-06-03 20:06:13 -07:00
|
|
|
validator_exit: Arc::new(RwLock::new(Exit::default())),
|
2021-03-25 18:54:51 -07:00
|
|
|
no_wait_for_vote_to_start_leader: true,
|
2021-06-09 21:21:32 -07:00
|
|
|
accounts_shrink_ratio: AccountShrinkThreshold::default(),
|
2021-09-07 21:30:38 -07:00
|
|
|
accounts_db_config: None,
|
2022-02-15 12:19:34 -08:00
|
|
|
wait_to_vote_slot: None,
|
2022-03-18 11:13:35 -07:00
|
|
|
ledger_column_options: LedgerColumnOptions::default(),
|
2022-04-11 17:28:10 -07:00
|
|
|
runtime_config: RuntimeConfig::default(),
|
2019-01-29 08:51:01 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-28 18:00:18 -08:00
|
|
|
impl ValidatorConfig {
|
|
|
|
pub fn default_for_test() -> Self {
|
2022-02-02 14:46:16 -08:00
|
|
|
Self {
|
2022-04-12 13:06:37 -07:00
|
|
|
enforce_ulimit_nofile: false,
|
2022-02-02 14:46:16 -08:00
|
|
|
rpc_config: JsonRpcConfig::default_for_test(),
|
|
|
|
..Self::default()
|
|
|
|
}
|
2022-01-28 18:00:18 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-04 13:01:11 -08:00
|
|
|
// `ValidatorStartProgress` contains status information that is surfaced to the node operator over
|
|
|
|
// the admin RPC channel to help them to follow the general progress of node startup without
|
|
|
|
// having to watch log messages.
|
|
|
|
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
|
|
|
|
pub enum ValidatorStartProgress {
|
|
|
|
Initializing, // Catch all, default state
|
|
|
|
SearchingForRpcService,
|
|
|
|
DownloadingSnapshot { slot: Slot, rpc_addr: SocketAddr },
|
|
|
|
CleaningBlockStore,
|
|
|
|
CleaningAccounts,
|
|
|
|
LoadingLedger,
|
2022-04-19 16:40:22 -07:00
|
|
|
ProcessingLedger { slot: Slot, max_slot: Slot },
|
2021-03-04 13:01:11 -08:00
|
|
|
StartingServices,
|
|
|
|
Halted, // Validator halted due to `--dev-halt-at-slot` argument
|
|
|
|
WaitingForSupermajority,
|
|
|
|
|
|
|
|
// `Running` is the terminal state once the validator fully starts and all services are
|
|
|
|
// operational
|
|
|
|
Running,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Default for ValidatorStartProgress {
|
|
|
|
fn default() -> Self {
|
|
|
|
Self::Initializing
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-15 20:53:46 -07:00
|
|
|
struct BlockstoreRootScan {
|
|
|
|
thread: Option<JoinHandle<Result<(), BlockstoreError>>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl BlockstoreRootScan {
|
|
|
|
fn new(config: &ValidatorConfig, blockstore: &Arc<Blockstore>, exit: &Arc<AtomicBool>) -> Self {
|
|
|
|
let thread = if config.rpc_addrs.is_some()
|
|
|
|
&& config.rpc_config.enable_rpc_transaction_history
|
|
|
|
&& config.rpc_config.rpc_scan_and_fix_roots
|
|
|
|
{
|
|
|
|
let blockstore = blockstore.clone();
|
|
|
|
let exit = exit.clone();
|
|
|
|
Some(
|
|
|
|
Builder::new()
|
|
|
|
.name("blockstore-root-scan".to_string())
|
|
|
|
.spawn(move || blockstore.scan_and_fix_roots(&exit))
|
|
|
|
.unwrap(),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
Self { thread }
|
|
|
|
}
|
|
|
|
|
|
|
|
fn join(self) {
|
|
|
|
if let Some(blockstore_root_scan) = self.thread {
|
|
|
|
if let Err(err) = blockstore_root_scan.join() {
|
|
|
|
warn!("blockstore_root_scan failed to join {:?}", err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-14 20:14:48 -07:00
|
|
|
#[derive(Default)]
|
|
|
|
struct TransactionHistoryServices {
|
|
|
|
transaction_status_sender: Option<TransactionStatusSender>,
|
|
|
|
transaction_status_service: Option<TransactionStatusService>,
|
2021-03-26 15:47:35 -07:00
|
|
|
max_complete_transaction_status_slot: Arc<AtomicU64>,
|
2020-07-14 20:14:48 -07:00
|
|
|
rewards_recorder_sender: Option<RewardsRecorderSender>,
|
|
|
|
rewards_recorder_service: Option<RewardsRecorderService>,
|
2021-05-26 21:16:16 -07:00
|
|
|
cache_block_meta_sender: Option<CacheBlockMetaSender>,
|
|
|
|
cache_block_meta_service: Option<CacheBlockMetaService>,
|
2020-07-14 20:14:48 -07:00
|
|
|
}
|
|
|
|
|
2019-05-23 22:05:16 -07:00
|
|
|
pub struct Validator {
|
2021-06-03 20:06:13 -07:00
|
|
|
validator_exit: Arc<RwLock<Exit>>,
|
2021-02-26 21:42:09 -08:00
|
|
|
json_rpc_service: Option<JsonRpcService>,
|
|
|
|
pubsub_service: Option<PubSubService>,
|
2021-06-16 10:57:52 -07:00
|
|
|
rpc_completed_slots_service: JoinHandle<()>,
|
2021-02-26 21:42:09 -08:00
|
|
|
optimistically_confirmed_bank_tracker: Option<OptimisticallyConfirmedBankTracker>,
|
2019-11-20 15:43:10 -08:00
|
|
|
transaction_status_service: Option<TransactionStatusService>,
|
2020-02-04 18:50:24 -08:00
|
|
|
rewards_recorder_service: Option<RewardsRecorderService>,
|
2021-05-26 21:16:16 -07:00
|
|
|
cache_block_meta_service: Option<CacheBlockMetaService>,
|
2021-10-15 15:11:11 -07:00
|
|
|
system_monitor_service: Option<SystemMonitorService>,
|
2020-09-22 12:26:32 -07:00
|
|
|
sample_performance_service: Option<SamplePerformanceService>,
|
2022-03-30 07:04:49 -07:00
|
|
|
poh_timing_report_service: PohTimingReportService,
|
2021-12-17 15:21:05 -08:00
|
|
|
stats_reporter_service: StatsReporterService,
|
2018-12-06 12:52:47 -08:00
|
|
|
gossip_service: GossipService,
|
2020-01-31 14:23:51 -08:00
|
|
|
serve_repair_service: ServeRepairService,
|
2020-09-01 22:06:06 -07:00
|
|
|
completed_data_sets_service: CompletedDataSetsService,
|
2020-03-05 22:52:31 -08:00
|
|
|
snapshot_packager_service: Option<SnapshotPackagerService>,
|
2019-02-26 10:48:18 -08:00
|
|
|
poh_recorder: Arc<Mutex<PohRecorder>>,
|
2019-03-04 19:02:03 -08:00
|
|
|
poh_service: PohService,
|
|
|
|
tpu: Tpu,
|
|
|
|
tvu: Tvu,
|
2021-05-24 08:28:44 -07:00
|
|
|
ip_echo_server: Option<solana_net_utils::IpEchoServer>,
|
2021-06-17 13:51:06 -07:00
|
|
|
pub cluster_info: Arc<ClusterInfo>,
|
2022-02-14 10:27:11 -08:00
|
|
|
pub bank_forks: Arc<RwLock<BankForks>>,
|
2022-04-04 09:38:05 -07:00
|
|
|
pub blockstore: Arc<Blockstore>,
|
2022-03-14 18:18:46 -07:00
|
|
|
geyser_plugin_service: Option<GeyserPluginService>,
|
2022-03-15 14:00:06 -07:00
|
|
|
ledger_metric_report_service: LedgerMetricReportService,
|
2022-03-15 13:14:49 -07:00
|
|
|
accounts_background_service: AccountsBackgroundService,
|
|
|
|
accounts_hash_verifier: AccountsHashVerifier,
|
2018-07-02 15:24:40 -07:00
|
|
|
}
|
2018-07-02 11:20:35 -07:00
|
|
|
|
2020-12-07 08:43:03 -08:00
|
|
|
// in the distant future, get rid of ::new()/exit() and use Result properly...
|
2021-10-28 18:27:07 -07:00
|
|
|
pub fn abort() -> ! {
|
2020-12-07 08:43:03 -08:00
|
|
|
#[cfg(not(test))]
|
2021-01-21 18:34:51 -08:00
|
|
|
{
|
|
|
|
// standard error is usually redirected to a log file, cry for help on standard output as
|
|
|
|
// well
|
|
|
|
println!("Validator process aborted. The validator log may contain further details");
|
|
|
|
std::process::exit(1);
|
|
|
|
}
|
2020-12-07 08:43:03 -08:00
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
panic!("process::exit(1) is intercepted for friendly test failure...");
|
|
|
|
}
|
|
|
|
|
2019-05-23 22:05:16 -07:00
|
|
|
impl Validator {
|
2021-07-23 08:25:03 -07:00
|
|
|
#[allow(clippy::too_many_arguments)]
|
2019-05-20 13:32:32 -07:00
|
|
|
pub fn new(
|
2019-01-29 18:12:32 -08:00
|
|
|
mut node: Node,
|
2021-06-17 13:51:06 -07:00
|
|
|
identity_keypair: Arc<Keypair>,
|
2019-07-30 15:53:41 -07:00
|
|
|
ledger_path: &Path,
|
2019-03-09 19:28:43 -08:00
|
|
|
vote_account: &Pubkey,
|
2021-04-11 20:38:30 -07:00
|
|
|
authorized_voter_keypairs: Arc<RwLock<Vec<Arc<Keypair>>>>,
|
2020-12-18 10:54:48 -08:00
|
|
|
cluster_entrypoints: Vec<ContactInfo>,
|
2019-05-23 22:05:16 -07:00
|
|
|
config: &ValidatorConfig,
|
2021-02-03 08:26:17 -08:00
|
|
|
should_check_duplicate_instance: bool,
|
2021-03-04 13:01:11 -08:00
|
|
|
start_progress: Arc<RwLock<ValidatorStartProgress>>,
|
2021-07-23 08:25:03 -07:00
|
|
|
socket_addr_space: SocketAddrSpace,
|
2022-04-30 20:52:38 -07:00
|
|
|
use_quic: bool,
|
2019-05-20 13:32:32 -07:00
|
|
|
) -> Self {
|
2020-09-18 14:35:20 -07:00
|
|
|
let id = identity_keypair.pubkey();
|
2019-08-08 15:38:23 -07:00
|
|
|
assert_eq!(id, node.info.id);
|
|
|
|
|
2020-03-13 11:41:18 -07:00
|
|
|
warn!("identity: {}", id);
|
|
|
|
warn!("vote account: {}", vote_account);
|
2020-03-31 08:23:42 -07:00
|
|
|
|
2022-03-18 12:24:42 -07:00
|
|
|
if !config.no_os_network_stats_reporting {
|
|
|
|
verify_udp_stats_access().unwrap_or_else(|err| {
|
|
|
|
error!("Failed to access UDP stats: {}. Bypass check with --no-os-network-stats-reporting.", err);
|
|
|
|
abort();
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2021-09-30 14:26:17 -07:00
|
|
|
let mut bank_notification_senders = Vec::new();
|
|
|
|
|
2022-03-14 18:18:46 -07:00
|
|
|
let geyser_plugin_service =
|
|
|
|
if let Some(geyser_plugin_config_files) = &config.geyser_plugin_config_files {
|
2021-09-30 14:26:17 -07:00
|
|
|
let (confirmed_bank_sender, confirmed_bank_receiver) = unbounded();
|
|
|
|
bank_notification_senders.push(confirmed_bank_sender);
|
2022-03-14 18:18:46 -07:00
|
|
|
let result =
|
|
|
|
GeyserPluginService::new(confirmed_bank_receiver, geyser_plugin_config_files);
|
2021-09-30 14:26:17 -07:00
|
|
|
match result {
|
2022-03-14 18:18:46 -07:00
|
|
|
Ok(geyser_plugin_service) => Some(geyser_plugin_service),
|
2021-09-30 14:26:17 -07:00
|
|
|
Err(err) => {
|
2022-03-14 18:18:46 -07:00
|
|
|
error!("Failed to load the Geyser plugin: {:?}", err);
|
2021-09-30 14:26:17 -07:00
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
2020-03-31 08:23:42 -07:00
|
|
|
if config.voting_disabled {
|
|
|
|
warn!("voting disabled");
|
2021-04-11 20:38:30 -07:00
|
|
|
authorized_voter_keypairs.write().unwrap().clear();
|
2020-03-31 08:23:42 -07:00
|
|
|
} else {
|
2021-04-11 20:38:30 -07:00
|
|
|
for authorized_voter_keypair in authorized_voter_keypairs.read().unwrap().iter() {
|
2020-03-31 08:23:42 -07:00
|
|
|
warn!("authorized voter: {}", authorized_voter_keypair.pubkey());
|
|
|
|
}
|
|
|
|
}
|
2021-04-11 20:38:30 -07:00
|
|
|
|
2020-12-18 10:54:48 -08:00
|
|
|
for cluster_entrypoint in &cluster_entrypoints {
|
|
|
|
info!("entrypoint: {:?}", cluster_entrypoint);
|
|
|
|
}
|
2019-09-14 12:32:57 -07:00
|
|
|
|
2020-06-16 23:03:26 -07:00
|
|
|
if solana_perf::perf_libs::api().is_some() {
|
|
|
|
info!("Initializing sigverify, this could take a while...");
|
|
|
|
} else {
|
|
|
|
info!("Initializing sigverify...");
|
|
|
|
}
|
2019-09-14 12:32:57 -07:00
|
|
|
sigverify::init();
|
|
|
|
info!("Done.");
|
2019-02-06 19:21:31 -08:00
|
|
|
|
2020-09-18 14:35:20 -07:00
|
|
|
if !ledger_path.is_dir() {
|
|
|
|
error!(
|
|
|
|
"ledger directory does not exist or is not accessible: {:?}",
|
|
|
|
ledger_path
|
|
|
|
);
|
2020-12-07 08:43:03 -08:00
|
|
|
abort();
|
2020-09-18 14:35:20 -07:00
|
|
|
}
|
|
|
|
|
2020-06-23 14:29:07 -07:00
|
|
|
if let Some(shred_version) = config.expected_shred_version {
|
|
|
|
if let Some(wait_for_supermajority_slot) = config.wait_for_supermajority {
|
2021-03-04 13:01:11 -08:00
|
|
|
*start_progress.write().unwrap() = ValidatorStartProgress::CleaningBlockStore;
|
2020-06-23 14:29:07 -07:00
|
|
|
backup_and_clear_blockstore(
|
|
|
|
ledger_path,
|
|
|
|
wait_for_supermajority_slot + 1,
|
|
|
|
shred_version,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-15 09:37:40 -07:00
|
|
|
info!("Cleaning accounts paths..");
|
2021-03-04 13:01:11 -08:00
|
|
|
*start_progress.write().unwrap() = ValidatorStartProgress::CleaningAccounts;
|
2020-07-15 09:37:40 -07:00
|
|
|
let mut start = Measure::start("clean_accounts_paths");
|
2020-07-07 09:41:45 -07:00
|
|
|
for accounts_path in &config.account_paths {
|
|
|
|
cleanup_accounts_path(accounts_path);
|
|
|
|
}
|
2020-12-21 21:33:37 -08:00
|
|
|
if let Some(ref shrink_paths) = config.account_shrink_paths {
|
|
|
|
for accounts_path in shrink_paths {
|
|
|
|
cleanup_accounts_path(accounts_path);
|
|
|
|
}
|
|
|
|
}
|
2020-07-15 09:37:40 -07:00
|
|
|
start.stop();
|
|
|
|
info!("done. {}", start);
|
2020-07-07 09:41:45 -07:00
|
|
|
|
2020-07-14 20:14:48 -07:00
|
|
|
let exit = Arc::new(AtomicBool::new(false));
|
2021-03-01 13:20:04 -08:00
|
|
|
{
|
|
|
|
let exit = exit.clone();
|
|
|
|
config
|
|
|
|
.validator_exit
|
|
|
|
.write()
|
|
|
|
.unwrap()
|
|
|
|
.register_exit(Box::new(move || exit.store(true, Ordering::Relaxed)));
|
|
|
|
}
|
2020-07-14 20:14:48 -07:00
|
|
|
|
2022-03-14 18:18:46 -07:00
|
|
|
let accounts_update_notifier = geyser_plugin_service
|
|
|
|
.as_ref()
|
|
|
|
.and_then(|geyser_plugin_service| geyser_plugin_service.get_accounts_update_notifier());
|
2021-11-23 09:55:53 -08:00
|
|
|
|
2022-03-14 18:18:46 -07:00
|
|
|
let transaction_notifier = geyser_plugin_service
|
|
|
|
.as_ref()
|
|
|
|
.and_then(|geyser_plugin_service| geyser_plugin_service.get_transaction_notifier());
|
2021-11-23 09:55:53 -08:00
|
|
|
|
2022-03-14 18:18:46 -07:00
|
|
|
let block_metadata_notifier = geyser_plugin_service
|
|
|
|
.as_ref()
|
|
|
|
.and_then(|geyser_plugin_service| geyser_plugin_service.get_block_metadata_notifier());
|
2021-12-29 15:12:01 -08:00
|
|
|
|
2021-11-17 17:11:38 -08:00
|
|
|
info!(
|
2022-03-14 18:18:46 -07:00
|
|
|
"Geyser plugin: accounts_update_notifier: {} transaction_notifier: {}",
|
2021-11-23 09:55:53 -08:00
|
|
|
accounts_update_notifier.is_some(),
|
|
|
|
transaction_notifier.is_some()
|
2021-11-17 17:11:38 -08:00
|
|
|
);
|
|
|
|
|
2021-11-22 12:37:17 -08:00
|
|
|
let system_monitor_service = Some(SystemMonitorService::new(
|
|
|
|
Arc::clone(&exit),
|
2022-03-14 19:38:04 -07:00
|
|
|
!config.no_os_memory_stats_reporting,
|
2021-11-22 12:37:17 -08:00
|
|
|
!config.no_os_network_stats_reporting,
|
|
|
|
));
|
|
|
|
|
2022-03-30 07:04:49 -07:00
|
|
|
let (poh_timing_point_sender, poh_timing_point_receiver) = unbounded();
|
|
|
|
let poh_timing_report_service =
|
|
|
|
PohTimingReportService::new(poh_timing_point_receiver, exit.clone());
|
|
|
|
|
2019-05-09 14:10:04 -07:00
|
|
|
let (
|
2020-02-20 18:53:26 -08:00
|
|
|
genesis_config,
|
2022-03-04 01:52:22 -08:00
|
|
|
bank_forks,
|
2020-01-13 13:13:52 -08:00
|
|
|
blockstore,
|
2019-05-09 14:10:04 -07:00
|
|
|
ledger_signal_receiver,
|
2021-06-02 17:20:00 -07:00
|
|
|
completed_slots_receiver,
|
2019-05-09 14:10:04 -07:00
|
|
|
leader_schedule_cache,
|
2021-10-08 13:14:56 -07:00
|
|
|
starting_snapshot_hashes,
|
2020-07-14 20:14:48 -07:00
|
|
|
TransactionHistoryServices {
|
|
|
|
transaction_status_sender,
|
|
|
|
transaction_status_service,
|
2021-03-26 15:47:35 -07:00
|
|
|
max_complete_transaction_status_slot,
|
2020-07-14 20:14:48 -07:00
|
|
|
rewards_recorder_sender,
|
|
|
|
rewards_recorder_service,
|
2021-05-26 21:16:16 -07:00
|
|
|
cache_block_meta_sender,
|
|
|
|
cache_block_meta_service,
|
2020-07-14 20:14:48 -07:00
|
|
|
},
|
2022-03-15 20:53:46 -07:00
|
|
|
blockstore_process_options,
|
|
|
|
blockstore_root_scan,
|
2022-04-05 11:02:33 -07:00
|
|
|
pruned_banks_receiver,
|
2022-03-15 20:53:46 -07:00
|
|
|
) = load_blockstore(
|
2020-09-18 14:35:20 -07:00
|
|
|
config,
|
|
|
|
ledger_path,
|
|
|
|
&exit,
|
2021-03-04 13:01:11 -08:00
|
|
|
&start_progress,
|
2021-11-17 17:11:38 -08:00
|
|
|
accounts_update_notifier,
|
2021-11-23 09:55:53 -08:00
|
|
|
transaction_notifier,
|
2022-03-30 07:04:49 -07:00
|
|
|
Some(poh_timing_point_sender.clone()),
|
2020-09-18 14:35:20 -07:00
|
|
|
);
|
2019-02-20 17:05:57 -08:00
|
|
|
|
2018-11-15 13:23:26 -08:00
|
|
|
node.info.wallclock = timestamp();
|
2020-02-20 18:53:26 -08:00
|
|
|
node.info.shred_version = compute_shred_version(
|
|
|
|
&genesis_config.hash(),
|
2022-03-18 12:43:20 -07:00
|
|
|
Some(
|
|
|
|
&bank_forks
|
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
.working_bank()
|
|
|
|
.hard_forks()
|
|
|
|
.read()
|
|
|
|
.unwrap(),
|
|
|
|
),
|
2020-02-20 18:53:26 -08:00
|
|
|
);
|
2020-06-25 02:24:16 -07:00
|
|
|
|
2020-01-13 14:59:31 -08:00
|
|
|
Self::print_node_info(&node);
|
|
|
|
|
2020-01-28 15:56:55 -08:00
|
|
|
if let Some(expected_shred_version) = config.expected_shred_version {
|
|
|
|
if expected_shred_version != node.info.shred_version {
|
|
|
|
error!(
|
2020-06-30 12:43:48 -07:00
|
|
|
"shred version mismatch: expected {} found: {}",
|
|
|
|
expected_shred_version, node.info.shred_version,
|
2020-01-28 15:56:55 -08:00
|
|
|
);
|
2020-12-07 08:43:03 -08:00
|
|
|
abort();
|
2020-01-28 15:56:55 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-04 06:27:09 -08:00
|
|
|
let mut cluster_info = ClusterInfo::new(
|
|
|
|
node.info.clone(),
|
|
|
|
identity_keypair.clone(),
|
|
|
|
socket_addr_space,
|
|
|
|
);
|
2020-11-20 14:47:37 -08:00
|
|
|
cluster_info.set_contact_debug_interval(config.contact_debug_interval);
|
2020-12-25 22:31:25 -08:00
|
|
|
cluster_info.set_entrypoints(cluster_entrypoints);
|
|
|
|
cluster_info.restore_contact_info(ledger_path, config.contact_save_interval);
|
2020-11-20 14:47:37 -08:00
|
|
|
let cluster_info = Arc::new(cluster_info);
|
2022-03-15 13:14:49 -07:00
|
|
|
|
|
|
|
let (
|
|
|
|
accounts_background_service,
|
|
|
|
accounts_hash_verifier,
|
|
|
|
snapshot_packager_service,
|
|
|
|
accounts_background_request_sender,
|
|
|
|
) = {
|
2022-03-18 12:43:20 -07:00
|
|
|
let pending_accounts_package = PendingAccountsPackage::default();
|
2022-03-15 13:14:49 -07:00
|
|
|
let (
|
|
|
|
accounts_background_request_sender,
|
|
|
|
snapshot_request_handler,
|
|
|
|
pending_snapshot_package,
|
|
|
|
snapshot_packager_service,
|
|
|
|
) = if let Some(snapshot_config) = config.snapshot_config.clone() {
|
|
|
|
if !is_snapshot_config_valid(
|
|
|
|
snapshot_config.full_snapshot_archive_interval_slots,
|
|
|
|
snapshot_config.incremental_snapshot_archive_interval_slots,
|
|
|
|
config.accounts_hash_interval_slots,
|
|
|
|
) {
|
|
|
|
error!("Snapshot config is invalid");
|
|
|
|
}
|
|
|
|
|
|
|
|
let pending_snapshot_package = PendingSnapshotPackage::default();
|
|
|
|
|
|
|
|
// filler accounts make snapshots invalid for use
|
|
|
|
// so, do not publish that we have snapshots
|
|
|
|
let enable_gossip_push = config
|
|
|
|
.accounts_db_config
|
|
|
|
.as_ref()
|
|
|
|
.map(|config| config.filler_accounts_config.count == 0)
|
|
|
|
.unwrap_or(true);
|
|
|
|
|
|
|
|
let snapshot_packager_service = SnapshotPackagerService::new(
|
|
|
|
pending_snapshot_package.clone(),
|
|
|
|
starting_snapshot_hashes,
|
|
|
|
&exit,
|
|
|
|
&cluster_info,
|
|
|
|
snapshot_config.clone(),
|
|
|
|
enable_gossip_push,
|
|
|
|
);
|
|
|
|
|
|
|
|
let (snapshot_request_sender, snapshot_request_receiver) = unbounded();
|
|
|
|
(
|
|
|
|
AbsRequestSender::new(snapshot_request_sender),
|
|
|
|
Some(SnapshotRequestHandler {
|
|
|
|
snapshot_config,
|
|
|
|
snapshot_request_receiver,
|
|
|
|
pending_accounts_package: pending_accounts_package.clone(),
|
|
|
|
}),
|
|
|
|
Some(pending_snapshot_package),
|
|
|
|
Some(snapshot_packager_service),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
(AbsRequestSender::default(), None, None, None)
|
|
|
|
};
|
|
|
|
|
|
|
|
let accounts_hash_verifier = AccountsHashVerifier::new(
|
|
|
|
Arc::clone(&pending_accounts_package),
|
|
|
|
pending_snapshot_package,
|
|
|
|
&exit,
|
|
|
|
&cluster_info,
|
|
|
|
config.known_validators.clone(),
|
|
|
|
config.halt_on_known_validators_accounts_hash_mismatch,
|
|
|
|
config.accounts_hash_fault_injection_slots,
|
|
|
|
config.snapshot_config.clone(),
|
|
|
|
);
|
|
|
|
|
2022-03-18 12:43:20 -07:00
|
|
|
let last_full_snapshot_slot = starting_snapshot_hashes.map(|x| x.full.hash.0);
|
2022-03-15 13:14:49 -07:00
|
|
|
let accounts_background_service = AccountsBackgroundService::new(
|
|
|
|
bank_forks.clone(),
|
|
|
|
&exit,
|
|
|
|
AbsRequestHandler {
|
|
|
|
snapshot_request_handler,
|
|
|
|
pruned_banks_receiver,
|
|
|
|
},
|
|
|
|
config.accounts_db_caching_enabled,
|
|
|
|
config.accounts_db_test_hash_calculation,
|
|
|
|
last_full_snapshot_slot,
|
|
|
|
);
|
|
|
|
|
|
|
|
(
|
|
|
|
accounts_background_service,
|
|
|
|
accounts_hash_verifier,
|
|
|
|
snapshot_packager_service,
|
|
|
|
accounts_background_request_sender,
|
|
|
|
)
|
|
|
|
};
|
|
|
|
|
2022-03-23 10:04:58 -07:00
|
|
|
let leader_schedule_cache = Arc::new(leader_schedule_cache);
|
|
|
|
let mut process_blockstore = ProcessBlockStore::new(
|
|
|
|
&id,
|
|
|
|
vote_account,
|
|
|
|
&start_progress,
|
2022-03-18 12:43:20 -07:00
|
|
|
&blockstore,
|
|
|
|
&bank_forks,
|
|
|
|
&leader_schedule_cache,
|
|
|
|
&blockstore_process_options,
|
|
|
|
transaction_status_sender.as_ref(),
|
2022-03-23 10:04:58 -07:00
|
|
|
cache_block_meta_sender.clone(),
|
2022-03-18 12:43:20 -07:00
|
|
|
blockstore_root_scan,
|
2022-03-23 10:04:58 -07:00
|
|
|
accounts_background_request_sender.clone(),
|
|
|
|
config,
|
2022-03-18 12:43:20 -07:00
|
|
|
);
|
|
|
|
|
2022-03-23 10:04:58 -07:00
|
|
|
maybe_warp_slot(
|
|
|
|
config,
|
|
|
|
&mut process_blockstore,
|
|
|
|
ledger_path,
|
|
|
|
&bank_forks,
|
|
|
|
&leader_schedule_cache,
|
|
|
|
);
|
2022-03-18 12:43:20 -07:00
|
|
|
|
|
|
|
*start_progress.write().unwrap() = ValidatorStartProgress::StartingServices;
|
|
|
|
|
|
|
|
let sample_performance_service =
|
|
|
|
if config.rpc_addrs.is_some() && config.rpc_config.enable_rpc_transaction_history {
|
|
|
|
Some(SamplePerformanceService::new(
|
|
|
|
&bank_forks,
|
|
|
|
&blockstore,
|
|
|
|
&exit,
|
|
|
|
))
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
2020-07-23 10:44:57 -07:00
|
|
|
let mut block_commitment_cache = BlockCommitmentCache::default();
|
2022-04-20 17:53:29 -07:00
|
|
|
block_commitment_cache.initialize_slots(
|
|
|
|
bank_forks.read().unwrap().working_bank().slot(),
|
|
|
|
bank_forks.read().unwrap().root(),
|
|
|
|
);
|
2020-07-23 10:44:57 -07:00
|
|
|
let block_commitment_cache = Arc::new(RwLock::new(block_commitment_cache));
|
2019-11-11 10:18:34 -08:00
|
|
|
|
2020-09-28 19:43:05 -07:00
|
|
|
let optimistically_confirmed_bank =
|
|
|
|
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks);
|
|
|
|
|
2021-09-17 12:40:14 -07:00
|
|
|
let rpc_subscriptions = Arc::new(RpcSubscriptions::new_with_config(
|
2020-05-06 23:23:06 -07:00
|
|
|
&exit,
|
2021-12-17 15:03:09 -08:00
|
|
|
max_complete_transaction_status_slot.clone(),
|
|
|
|
blockstore.clone(),
|
2020-05-06 23:23:06 -07:00
|
|
|
bank_forks.clone(),
|
|
|
|
block_commitment_cache.clone(),
|
2020-09-28 19:43:05 -07:00
|
|
|
optimistically_confirmed_bank.clone(),
|
2021-09-17 12:40:14 -07:00
|
|
|
&config.pubsub_config,
|
2020-05-06 23:23:06 -07:00
|
|
|
));
|
2020-01-30 09:17:01 -08:00
|
|
|
|
2021-02-23 13:06:33 -08:00
|
|
|
let max_slots = Arc::new(MaxSlots::default());
|
2020-09-01 22:06:06 -07:00
|
|
|
let (completed_data_sets_sender, completed_data_sets_receiver) =
|
|
|
|
bounded(MAX_COMPLETED_DATA_SETS_IN_CHANNEL);
|
|
|
|
let completed_data_sets_service = CompletedDataSetsService::new(
|
|
|
|
completed_data_sets_receiver,
|
|
|
|
blockstore.clone(),
|
2021-06-16 10:57:52 -07:00
|
|
|
rpc_subscriptions.clone(),
|
2020-09-01 22:06:06 -07:00
|
|
|
&exit,
|
2021-02-23 13:06:33 -08:00
|
|
|
max_slots.clone(),
|
2020-09-01 22:06:06 -07:00
|
|
|
);
|
|
|
|
|
2020-09-08 02:00:49 -07:00
|
|
|
let poh_config = Arc::new(genesis_config.poh_config.clone());
|
2022-04-20 17:53:29 -07:00
|
|
|
let (poh_recorder, entry_receiver, record_receiver) = {
|
|
|
|
let bank = &bank_forks.read().unwrap().working_bank();
|
|
|
|
PohRecorder::new_with_clear_signal(
|
|
|
|
bank.tick_height(),
|
|
|
|
bank.last_blockhash(),
|
|
|
|
bank.clone(),
|
|
|
|
None,
|
|
|
|
bank.ticks_per_slot(),
|
2021-03-23 07:10:04 -07:00
|
|
|
&id,
|
2022-04-20 17:53:29 -07:00
|
|
|
&blockstore,
|
|
|
|
blockstore.get_new_shred_signal(0),
|
|
|
|
&leader_schedule_cache,
|
|
|
|
&poh_config,
|
|
|
|
Some(poh_timing_point_sender),
|
|
|
|
exit.clone(),
|
|
|
|
)
|
|
|
|
};
|
2019-08-08 09:14:30 -07:00
|
|
|
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
|
|
|
|
2020-09-08 02:00:49 -07:00
|
|
|
let rpc_override_health_check = Arc::new(AtomicBool::new(false));
|
2021-02-26 21:42:09 -08:00
|
|
|
let (
|
|
|
|
json_rpc_service,
|
|
|
|
pubsub_service,
|
|
|
|
optimistically_confirmed_bank_tracker,
|
|
|
|
bank_notification_sender,
|
|
|
|
) = if let Some((rpc_addr, rpc_pubsub_addr)) = config.rpc_addrs {
|
2021-07-23 08:25:03 -07:00
|
|
|
if ContactInfo::is_valid_address(&node.info.rpc, &socket_addr_space) {
|
|
|
|
assert!(ContactInfo::is_valid_address(
|
|
|
|
&node.info.rpc_pubsub,
|
|
|
|
&socket_addr_space
|
|
|
|
));
|
2020-09-28 19:43:05 -07:00
|
|
|
} else {
|
2021-07-23 08:25:03 -07:00
|
|
|
assert!(!ContactInfo::is_valid_address(
|
|
|
|
&node.info.rpc_pubsub,
|
|
|
|
&socket_addr_space
|
|
|
|
));
|
2020-12-01 19:25:09 -08:00
|
|
|
}
|
2021-09-01 14:10:16 -07:00
|
|
|
|
2020-12-01 19:25:09 -08:00
|
|
|
let (bank_notification_sender, bank_notification_receiver) = unbounded();
|
2021-09-30 14:26:17 -07:00
|
|
|
let confirmed_bank_subscribers = if !bank_notification_senders.is_empty() {
|
|
|
|
Some(Arc::new(RwLock::new(bank_notification_senders)))
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
2020-12-01 19:25:09 -08:00
|
|
|
(
|
2021-02-26 21:42:09 -08:00
|
|
|
Some(JsonRpcService::new(
|
|
|
|
rpc_addr,
|
|
|
|
config.rpc_config.clone(),
|
|
|
|
config.snapshot_config.clone(),
|
|
|
|
bank_forks.clone(),
|
|
|
|
block_commitment_cache.clone(),
|
|
|
|
blockstore.clone(),
|
|
|
|
cluster_info.clone(),
|
|
|
|
Some(poh_recorder.clone()),
|
|
|
|
genesis_config.hash(),
|
|
|
|
ledger_path,
|
|
|
|
config.validator_exit.clone(),
|
2021-11-12 10:57:55 -08:00
|
|
|
config.known_validators.clone(),
|
2021-02-26 21:42:09 -08:00
|
|
|
rpc_override_health_check.clone(),
|
|
|
|
optimistically_confirmed_bank.clone(),
|
2021-10-19 16:11:46 -07:00
|
|
|
config.send_transaction_service_config.clone(),
|
2021-02-26 21:42:09 -08:00
|
|
|
max_slots.clone(),
|
2021-03-23 10:48:54 -07:00
|
|
|
leader_schedule_cache.clone(),
|
2021-03-26 15:47:35 -07:00
|
|
|
max_complete_transaction_status_slot,
|
2021-02-26 21:42:09 -08:00
|
|
|
)),
|
2022-01-28 18:00:18 -08:00
|
|
|
if !config.rpc_config.full_api {
|
2021-02-26 21:42:09 -08:00
|
|
|
None
|
|
|
|
} else {
|
2021-09-17 12:40:14 -07:00
|
|
|
let (trigger, pubsub_service) = PubSubService::new(
|
2020-12-01 19:25:09 -08:00
|
|
|
config.pubsub_config.clone(),
|
2021-06-16 10:57:52 -07:00
|
|
|
&rpc_subscriptions,
|
2020-12-01 19:25:09 -08:00
|
|
|
rpc_pubsub_addr,
|
2021-09-17 12:40:14 -07:00
|
|
|
);
|
|
|
|
config
|
|
|
|
.validator_exit
|
|
|
|
.write()
|
|
|
|
.unwrap()
|
|
|
|
.register_exit(Box::new(move || trigger.cancel()));
|
|
|
|
|
|
|
|
Some(pubsub_service)
|
2021-02-26 21:42:09 -08:00
|
|
|
},
|
|
|
|
Some(OptimisticallyConfirmedBankTracker::new(
|
|
|
|
bank_notification_receiver,
|
|
|
|
&exit,
|
|
|
|
bank_forks.clone(),
|
|
|
|
optimistically_confirmed_bank,
|
2021-06-16 10:57:52 -07:00
|
|
|
rpc_subscriptions.clone(),
|
2021-09-30 14:26:17 -07:00
|
|
|
confirmed_bank_subscribers,
|
2021-02-26 21:42:09 -08:00
|
|
|
)),
|
2020-12-01 19:25:09 -08:00
|
|
|
Some(bank_notification_sender),
|
|
|
|
)
|
|
|
|
} else {
|
2022-04-06 15:52:19 -07:00
|
|
|
(None, None, None, None)
|
2020-12-01 19:25:09 -08:00
|
|
|
};
|
2020-09-08 02:00:49 -07:00
|
|
|
|
2022-04-19 15:06:30 -07:00
|
|
|
if config.halt_at_slot.is_some() {
|
2021-01-26 11:23:07 -08:00
|
|
|
// Simulate a confirmed root to avoid RPC errors with CommitmentConfig::finalized() and
|
2020-10-19 14:16:13 -07:00
|
|
|
// to ensure RPC endpoints like getConfirmedBlock, which require a confirmed root, work
|
|
|
|
block_commitment_cache
|
|
|
|
.write()
|
|
|
|
.unwrap()
|
|
|
|
.set_highest_confirmed_root(bank_forks.read().unwrap().root());
|
|
|
|
|
|
|
|
// Park with the RPC service running, ready for inspection!
|
|
|
|
warn!("Validator halted");
|
2021-03-04 13:01:11 -08:00
|
|
|
*start_progress.write().unwrap() = ValidatorStartProgress::Halted;
|
2020-10-19 14:16:13 -07:00
|
|
|
std::thread::park();
|
|
|
|
}
|
2021-06-17 11:52:56 -07:00
|
|
|
let ip_echo_server = match node.sockets.ip_echo {
|
|
|
|
None => None,
|
|
|
|
Some(tcp_listener) => Some(solana_net_utils::ip_echo_server(
|
|
|
|
tcp_listener,
|
|
|
|
Some(node.info.shred_version),
|
|
|
|
)),
|
|
|
|
};
|
2021-12-17 15:21:05 -08:00
|
|
|
|
2022-01-11 02:44:46 -08:00
|
|
|
let (stats_reporter_sender, stats_reporter_receiver) = unbounded();
|
2022-01-03 11:46:02 -08:00
|
|
|
|
2021-12-17 15:21:05 -08:00
|
|
|
let stats_reporter_service = StatsReporterService::new(stats_reporter_receiver, &exit);
|
|
|
|
|
2018-12-06 12:52:47 -08:00
|
|
|
let gossip_service = GossipService::new(
|
2018-10-08 19:55:54 -07:00
|
|
|
&cluster_info,
|
2019-02-20 21:36:08 -08:00
|
|
|
Some(bank_forks.clone()),
|
2018-08-22 18:00:56 -07:00
|
|
|
node.sockets.gossip,
|
2020-09-11 12:00:16 -07:00
|
|
|
config.gossip_validators.clone(),
|
2021-02-03 08:26:17 -08:00
|
|
|
should_check_duplicate_instance,
|
2021-12-17 15:21:05 -08:00
|
|
|
Some(stats_reporter_sender.clone()),
|
2019-03-04 16:33:14 -08:00
|
|
|
&exit,
|
2018-09-03 02:23:43 -07:00
|
|
|
);
|
2020-01-31 14:23:51 -08:00
|
|
|
let serve_repair = Arc::new(RwLock::new(ServeRepair::new(cluster_info.clone())));
|
|
|
|
let serve_repair_service = ServeRepairService::new(
|
|
|
|
&serve_repair,
|
|
|
|
Some(blockstore.clone()),
|
|
|
|
node.sockets.serve_repair,
|
2021-07-23 08:25:03 -07:00
|
|
|
socket_addr_space,
|
2021-12-17 15:21:05 -08:00
|
|
|
stats_reporter_sender,
|
2020-01-31 14:23:51 -08:00
|
|
|
&exit,
|
|
|
|
);
|
|
|
|
|
2021-03-25 18:54:51 -07:00
|
|
|
let waited_for_supermajority = if let Ok(waited) = wait_for_supermajority(
|
2021-03-04 13:01:11 -08:00
|
|
|
config,
|
2022-03-23 10:04:58 -07:00
|
|
|
Some(&mut process_blockstore),
|
|
|
|
&bank_forks,
|
2021-03-04 13:01:11 -08:00
|
|
|
&cluster_info,
|
|
|
|
rpc_override_health_check,
|
|
|
|
&start_progress,
|
|
|
|
) {
|
2021-03-25 18:54:51 -07:00
|
|
|
waited
|
|
|
|
} else {
|
2020-12-07 08:43:03 -08:00
|
|
|
abort();
|
2021-03-25 18:54:51 -07:00
|
|
|
};
|
|
|
|
|
2022-03-15 14:00:06 -07:00
|
|
|
let ledger_metric_report_service =
|
|
|
|
LedgerMetricReportService::new(blockstore.clone(), &exit);
|
|
|
|
|
2021-03-25 18:54:51 -07:00
|
|
|
let wait_for_vote_to_start_leader =
|
|
|
|
!waited_for_supermajority && !config.no_wait_for_vote_to_start_leader;
|
2018-12-03 00:10:43 -08:00
|
|
|
|
2020-12-29 09:35:57 -08:00
|
|
|
let poh_service = PohService::new(
|
|
|
|
poh_recorder.clone(),
|
|
|
|
&poh_config,
|
|
|
|
&exit,
|
2022-04-20 17:53:29 -07:00
|
|
|
bank_forks.read().unwrap().root_bank().ticks_per_slot(),
|
2020-12-29 11:09:47 -08:00
|
|
|
config.poh_pinned_cpu_core,
|
2021-03-05 16:01:21 -08:00
|
|
|
config.poh_hashes_per_batch,
|
2021-03-23 07:10:04 -07:00
|
|
|
record_receiver,
|
2020-12-29 09:35:57 -08:00
|
|
|
);
|
2020-01-07 13:18:34 -08:00
|
|
|
assert_eq!(
|
2022-04-04 09:38:05 -07:00
|
|
|
blockstore.get_new_shred_signals_len(),
|
2020-01-07 13:18:34 -08:00
|
|
|
1,
|
|
|
|
"New shred signal for the TVU should be the same as the clear bank signal."
|
|
|
|
);
|
|
|
|
|
2022-02-04 11:01:59 -08:00
|
|
|
let vote_tracker = Arc::<VoteTracker>::default();
|
2021-07-07 21:44:51 -07:00
|
|
|
let mut cost_model = CostModel::default();
|
2022-03-08 10:54:59 -08:00
|
|
|
// initialize cost model with built-in instruction costs only
|
|
|
|
cost_model.initialize_cost_table(&[]);
|
2021-07-07 21:44:51 -07:00
|
|
|
let cost_model = Arc::new(RwLock::new(cost_model));
|
2021-06-09 15:10:59 -07:00
|
|
|
|
2020-03-19 23:35:01 -07:00
|
|
|
let (retransmit_slots_sender, retransmit_slots_receiver) = unbounded();
|
2020-07-09 22:52:54 -07:00
|
|
|
let (verified_vote_sender, verified_vote_receiver) = unbounded();
|
2021-04-10 17:34:45 -07:00
|
|
|
let (gossip_verified_vote_hash_sender, gossip_verified_vote_hash_receiver) = unbounded();
|
2021-03-24 23:41:52 -07:00
|
|
|
let (cluster_confirmed_slot_sender, cluster_confirmed_slot_receiver) = unbounded();
|
2021-06-16 10:57:52 -07:00
|
|
|
|
2022-03-31 14:44:23 -07:00
|
|
|
let rpc_completed_slots_service = RpcCompletedSlotsService::spawn(
|
|
|
|
completed_slots_receiver,
|
|
|
|
rpc_subscriptions.clone(),
|
|
|
|
exit.clone(),
|
|
|
|
);
|
2021-06-16 10:57:52 -07:00
|
|
|
|
2021-09-07 13:43:43 -07:00
|
|
|
let (replay_vote_sender, replay_vote_receiver) = unbounded();
|
2019-02-11 17:56:52 -08:00
|
|
|
let tvu = Tvu::new(
|
2019-03-08 18:29:08 -08:00
|
|
|
vote_account,
|
2020-03-31 08:23:42 -07:00
|
|
|
authorized_voter_keypairs,
|
2019-02-21 11:19:45 -08:00
|
|
|
&bank_forks,
|
2019-01-26 00:28:08 -08:00
|
|
|
&cluster_info,
|
2022-01-10 09:29:48 -08:00
|
|
|
TvuSockets {
|
|
|
|
repair: node.sockets.repair,
|
|
|
|
retransmit: node.sockets.retransmit_sockets,
|
|
|
|
fetch: node.sockets.tvu,
|
|
|
|
forwards: node.sockets.tvu_forwards,
|
|
|
|
ancestor_hashes_requests: node.sockets.ancestor_hashes_requests,
|
2020-01-30 09:26:27 -08:00
|
|
|
},
|
2020-01-13 13:13:52 -08:00
|
|
|
blockstore.clone(),
|
2019-02-04 15:33:43 -08:00
|
|
|
ledger_signal_receiver,
|
2021-06-16 10:57:52 -07:00
|
|
|
&rpc_subscriptions,
|
2019-03-03 16:44:06 -08:00
|
|
|
&poh_recorder,
|
2022-03-23 10:04:58 -07:00
|
|
|
process_blockstore,
|
2021-07-20 22:25:13 -07:00
|
|
|
config.tower_storage.clone(),
|
2019-04-19 02:39:44 -07:00
|
|
|
&leader_schedule_cache,
|
2019-03-04 16:33:14 -08:00
|
|
|
&exit,
|
2019-11-04 15:44:27 -08:00
|
|
|
block_commitment_cache,
|
2022-04-21 21:14:07 -07:00
|
|
|
config.turbine_disabled.clone(),
|
2019-11-20 15:43:10 -08:00
|
|
|
transaction_status_sender.clone(),
|
2020-02-11 17:01:49 -08:00
|
|
|
rewards_recorder_sender,
|
2021-05-26 21:16:16 -07:00
|
|
|
cache_block_meta_sender,
|
2020-03-09 22:03:09 -07:00
|
|
|
vote_tracker.clone(),
|
2020-03-19 23:35:01 -07:00
|
|
|
retransmit_slots_sender,
|
2021-04-10 17:34:45 -07:00
|
|
|
gossip_verified_vote_hash_receiver,
|
2020-07-09 22:52:54 -07:00
|
|
|
verified_vote_receiver,
|
2020-08-07 11:21:35 -07:00
|
|
|
replay_vote_sender.clone(),
|
2020-09-01 22:06:06 -07:00
|
|
|
completed_data_sets_sender,
|
2020-09-28 19:43:05 -07:00
|
|
|
bank_notification_sender.clone(),
|
2021-03-24 23:41:52 -07:00
|
|
|
cluster_confirmed_slot_receiver,
|
2020-03-16 08:37:31 -07:00
|
|
|
TvuConfig {
|
2020-03-31 17:21:19 -07:00
|
|
|
max_ledger_shreds: config.max_ledger_shreds,
|
2020-03-16 08:37:31 -07:00
|
|
|
shred_version: node.info.shred_version,
|
2020-08-21 00:35:11 -07:00
|
|
|
repair_validators: config.repair_validators.clone(),
|
2021-02-14 10:16:30 -08:00
|
|
|
rocksdb_compaction_interval: config.rocksdb_compaction_interval,
|
|
|
|
rocksdb_max_compaction_jitter: config.rocksdb_compaction_interval,
|
2021-03-25 18:54:51 -07:00
|
|
|
wait_for_vote_to_start_leader,
|
2020-03-16 08:37:31 -07:00
|
|
|
},
|
2021-02-23 13:06:33 -08:00
|
|
|
&max_slots,
|
2021-06-09 15:10:59 -07:00
|
|
|
&cost_model,
|
2021-12-29 15:12:01 -08:00
|
|
|
block_metadata_notifier,
|
2022-02-15 12:19:34 -08:00
|
|
|
config.wait_to_vote_slot,
|
2022-03-15 13:14:49 -07:00
|
|
|
accounts_background_request_sender,
|
2022-04-30 20:52:38 -07:00
|
|
|
use_quic,
|
2019-03-03 16:44:06 -08:00
|
|
|
);
|
2019-04-19 14:18:19 -07:00
|
|
|
|
2019-03-03 16:44:06 -08:00
|
|
|
let tpu = Tpu::new(
|
|
|
|
&cluster_info,
|
|
|
|
&poh_recorder,
|
|
|
|
entry_receiver,
|
2020-03-19 23:35:01 -07:00
|
|
|
retransmit_slots_receiver,
|
2022-01-10 09:29:48 -08:00
|
|
|
TpuSockets {
|
|
|
|
transactions: node.sockets.tpu,
|
|
|
|
transaction_forwards: node.sockets.tpu_forwards,
|
|
|
|
vote: node.sockets.tpu_vote,
|
|
|
|
broadcast: node.sockets.broadcast,
|
2022-02-04 06:27:09 -08:00
|
|
|
transactions_quic: node.sockets.tpu_quic,
|
2022-01-10 09:29:48 -08:00
|
|
|
},
|
2021-06-16 10:57:52 -07:00
|
|
|
&rpc_subscriptions,
|
2019-11-20 15:43:10 -08:00
|
|
|
transaction_status_sender,
|
2020-01-13 13:13:52 -08:00
|
|
|
&blockstore,
|
2019-06-19 00:13:19 -07:00
|
|
|
&config.broadcast_stage_type,
|
2019-03-04 16:33:14 -08:00
|
|
|
&exit,
|
2020-01-13 14:59:31 -08:00
|
|
|
node.info.shred_version,
|
2020-03-09 22:03:09 -07:00
|
|
|
vote_tracker,
|
2022-02-14 10:27:11 -08:00
|
|
|
bank_forks.clone(),
|
2020-07-09 22:52:54 -07:00
|
|
|
verified_vote_sender,
|
2021-04-10 17:34:45 -07:00
|
|
|
gossip_verified_vote_hash_sender,
|
2020-08-07 11:21:35 -07:00
|
|
|
replay_vote_receiver,
|
|
|
|
replay_vote_sender,
|
2020-09-28 19:43:05 -07:00
|
|
|
bank_notification_sender,
|
2021-02-26 09:15:45 -08:00
|
|
|
config.tpu_coalesce_ms,
|
2021-03-24 23:41:52 -07:00
|
|
|
cluster_confirmed_slot_sender,
|
2021-06-09 15:10:59 -07:00
|
|
|
&cost_model,
|
2022-02-04 06:27:09 -08:00
|
|
|
&identity_keypair,
|
2019-01-26 00:28:08 -08:00
|
|
|
);
|
2018-12-12 12:38:00 -08:00
|
|
|
|
2019-07-30 13:18:33 -07:00
|
|
|
datapoint_info!("validator-new", ("id", id.to_string(), String));
|
2021-09-01 14:10:16 -07:00
|
|
|
|
2021-03-04 13:01:11 -08:00
|
|
|
*start_progress.write().unwrap() = ValidatorStartProgress::Running;
|
2019-02-21 11:37:48 -08:00
|
|
|
Self {
|
2021-12-17 15:21:05 -08:00
|
|
|
stats_reporter_service,
|
2018-12-06 12:52:47 -08:00
|
|
|
gossip_service,
|
2020-01-31 14:23:51 -08:00
|
|
|
serve_repair_service,
|
2021-02-26 21:42:09 -08:00
|
|
|
json_rpc_service,
|
|
|
|
pubsub_service,
|
2021-06-16 10:57:52 -07:00
|
|
|
rpc_completed_slots_service,
|
2021-02-26 21:42:09 -08:00
|
|
|
optimistically_confirmed_bank_tracker,
|
2019-11-20 15:43:10 -08:00
|
|
|
transaction_status_service,
|
2020-02-04 18:50:24 -08:00
|
|
|
rewards_recorder_service,
|
2021-05-26 21:16:16 -07:00
|
|
|
cache_block_meta_service,
|
2021-10-15 15:11:11 -07:00
|
|
|
system_monitor_service,
|
2020-09-22 12:26:32 -07:00
|
|
|
sample_performance_service,
|
2022-03-30 07:04:49 -07:00
|
|
|
poh_timing_report_service,
|
2020-03-05 22:52:31 -08:00
|
|
|
snapshot_packager_service,
|
2020-09-01 22:06:06 -07:00
|
|
|
completed_data_sets_service,
|
2019-03-04 19:02:03 -08:00
|
|
|
tpu,
|
|
|
|
tvu,
|
2019-02-26 10:48:18 -08:00
|
|
|
poh_service,
|
|
|
|
poh_recorder,
|
2019-05-03 11:01:35 -07:00
|
|
|
ip_echo_server,
|
2021-03-01 13:20:04 -08:00
|
|
|
validator_exit: config.validator_exit.clone(),
|
2021-06-17 13:51:06 -07:00
|
|
|
cluster_info,
|
2022-02-14 10:27:11 -08:00
|
|
|
bank_forks,
|
2022-04-19 15:06:30 -07:00
|
|
|
blockstore,
|
2022-03-14 18:18:46 -07:00
|
|
|
geyser_plugin_service,
|
2022-03-15 14:00:06 -07:00
|
|
|
ledger_metric_report_service,
|
2022-03-15 13:14:49 -07:00
|
|
|
accounts_background_service,
|
|
|
|
accounts_hash_verifier,
|
2019-02-01 18:09:38 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-28 20:10:38 -08:00
|
|
|
// Used for notifying many nodes in parallel to exit
|
2019-08-20 23:59:31 -07:00
|
|
|
pub fn exit(&mut self) {
|
2021-03-01 13:20:04 -08:00
|
|
|
self.validator_exit.write().unwrap().exit();
|
2022-04-04 09:38:05 -07:00
|
|
|
|
|
|
|
// drop all signals in blockstore
|
|
|
|
self.blockstore.drop_signal();
|
2018-07-16 22:22:29 -07:00
|
|
|
}
|
2018-09-13 14:00:17 -07:00
|
|
|
|
2020-12-10 17:28:52 -08:00
|
|
|
pub fn close(mut self) {
|
2018-07-17 08:18:42 -07:00
|
|
|
self.exit();
|
2020-12-10 17:28:52 -08:00
|
|
|
self.join();
|
2018-07-02 15:24:40 -07:00
|
|
|
}
|
2019-09-14 12:32:57 -07:00
|
|
|
|
|
|
|
fn print_node_info(node: &Node) {
|
|
|
|
info!("{:?}", node.info);
|
|
|
|
info!(
|
|
|
|
"local gossip address: {}",
|
|
|
|
node.sockets.gossip.local_addr().unwrap()
|
|
|
|
);
|
|
|
|
info!(
|
|
|
|
"local broadcast address: {}",
|
2019-12-16 17:11:18 -08:00
|
|
|
node.sockets
|
|
|
|
.broadcast
|
|
|
|
.first()
|
|
|
|
.unwrap()
|
|
|
|
.local_addr()
|
|
|
|
.unwrap()
|
2019-09-14 12:32:57 -07:00
|
|
|
);
|
|
|
|
info!(
|
|
|
|
"local repair address: {}",
|
|
|
|
node.sockets.repair.local_addr().unwrap()
|
|
|
|
);
|
|
|
|
info!(
|
|
|
|
"local retransmit address: {}",
|
2019-10-10 13:24:03 -07:00
|
|
|
node.sockets.retransmit_sockets[0].local_addr().unwrap()
|
2019-09-14 12:32:57 -07:00
|
|
|
);
|
|
|
|
}
|
2019-11-13 10:12:09 -08:00
|
|
|
|
2020-12-10 17:28:52 -08:00
|
|
|
pub fn join(self) {
|
2022-02-14 10:27:11 -08:00
|
|
|
drop(self.bank_forks);
|
2021-06-17 13:51:06 -07:00
|
|
|
drop(self.cluster_info);
|
|
|
|
|
2020-11-30 22:18:41 -08:00
|
|
|
self.poh_service.join().expect("poh_service");
|
2019-11-13 10:12:09 -08:00
|
|
|
drop(self.poh_recorder);
|
2021-02-26 21:42:09 -08:00
|
|
|
|
|
|
|
if let Some(json_rpc_service) = self.json_rpc_service {
|
2020-11-30 22:18:41 -08:00
|
|
|
json_rpc_service.join().expect("rpc_service");
|
2021-02-26 21:42:09 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(pubsub_service) = self.pubsub_service {
|
2020-11-30 22:18:41 -08:00
|
|
|
pubsub_service.join().expect("pubsub_service");
|
2021-02-26 21:42:09 -08:00
|
|
|
}
|
|
|
|
|
2021-06-16 10:57:52 -07:00
|
|
|
self.rpc_completed_slots_service
|
|
|
|
.join()
|
|
|
|
.expect("rpc_completed_slots_service");
|
|
|
|
|
2021-02-26 21:42:09 -08:00
|
|
|
if let Some(optimistically_confirmed_bank_tracker) =
|
|
|
|
self.optimistically_confirmed_bank_tracker
|
|
|
|
{
|
2020-11-30 22:18:41 -08:00
|
|
|
optimistically_confirmed_bank_tracker
|
|
|
|
.join()
|
|
|
|
.expect("optimistically_confirmed_bank_tracker");
|
2019-11-13 10:12:09 -08:00
|
|
|
}
|
2021-02-26 21:42:09 -08:00
|
|
|
|
2019-11-20 15:43:10 -08:00
|
|
|
if let Some(transaction_status_service) = self.transaction_status_service {
|
2020-11-30 22:18:41 -08:00
|
|
|
transaction_status_service
|
|
|
|
.join()
|
|
|
|
.expect("transaction_status_service");
|
2019-11-20 15:43:10 -08:00
|
|
|
}
|
2019-11-13 10:12:09 -08:00
|
|
|
|
2020-02-04 18:50:24 -08:00
|
|
|
if let Some(rewards_recorder_service) = self.rewards_recorder_service {
|
2020-11-30 22:18:41 -08:00
|
|
|
rewards_recorder_service
|
|
|
|
.join()
|
|
|
|
.expect("rewards_recorder_service");
|
2020-02-04 18:50:24 -08:00
|
|
|
}
|
|
|
|
|
2021-05-26 21:16:16 -07:00
|
|
|
if let Some(cache_block_meta_service) = self.cache_block_meta_service {
|
|
|
|
cache_block_meta_service
|
2020-11-30 22:18:41 -08:00
|
|
|
.join()
|
2021-05-26 21:16:16 -07:00
|
|
|
.expect("cache_block_meta_service");
|
2020-09-09 08:33:14 -07:00
|
|
|
}
|
|
|
|
|
2021-10-15 15:11:11 -07:00
|
|
|
if let Some(system_monitor_service) = self.system_monitor_service {
|
|
|
|
system_monitor_service
|
|
|
|
.join()
|
|
|
|
.expect("system_monitor_service");
|
|
|
|
}
|
|
|
|
|
2020-09-22 12:26:32 -07:00
|
|
|
if let Some(sample_performance_service) = self.sample_performance_service {
|
2020-11-30 22:18:41 -08:00
|
|
|
sample_performance_service
|
|
|
|
.join()
|
|
|
|
.expect("sample_performance_service");
|
2020-09-22 12:26:32 -07:00
|
|
|
}
|
|
|
|
|
2020-03-05 22:52:31 -08:00
|
|
|
if let Some(s) = self.snapshot_packager_service {
|
2020-11-30 22:18:41 -08:00
|
|
|
s.join().expect("snapshot_packager_service");
|
2020-03-05 22:52:31 -08:00
|
|
|
}
|
|
|
|
|
2020-11-30 22:18:41 -08:00
|
|
|
self.gossip_service.join().expect("gossip_service");
|
|
|
|
self.serve_repair_service
|
|
|
|
.join()
|
|
|
|
.expect("serve_repair_service");
|
2021-12-17 15:21:05 -08:00
|
|
|
self.stats_reporter_service
|
|
|
|
.join()
|
|
|
|
.expect("stats_reporter_service");
|
2022-03-15 13:14:49 -07:00
|
|
|
self.ledger_metric_report_service
|
|
|
|
.join()
|
|
|
|
.expect("ledger_metric_report_service");
|
|
|
|
self.accounts_background_service
|
|
|
|
.join()
|
|
|
|
.expect("accounts_background_service");
|
|
|
|
self.accounts_hash_verifier
|
|
|
|
.join()
|
|
|
|
.expect("accounts_hash_verifier");
|
2020-11-30 22:18:41 -08:00
|
|
|
self.tpu.join().expect("tpu");
|
|
|
|
self.tvu.join().expect("tvu");
|
|
|
|
self.completed_data_sets_service
|
|
|
|
.join()
|
|
|
|
.expect("completed_data_sets_service");
|
2021-05-24 08:28:44 -07:00
|
|
|
if let Some(ip_echo_server) = self.ip_echo_server {
|
|
|
|
ip_echo_server.shutdown_background();
|
|
|
|
}
|
2021-09-01 14:10:16 -07:00
|
|
|
|
2022-03-14 18:18:46 -07:00
|
|
|
if let Some(geyser_plugin_service) = self.geyser_plugin_service {
|
|
|
|
geyser_plugin_service.join().expect("geyser_plugin_service");
|
2021-09-30 14:26:17 -07:00
|
|
|
}
|
2022-03-30 07:04:49 -07:00
|
|
|
|
|
|
|
self.poh_timing_report_service
|
|
|
|
.join()
|
|
|
|
.expect("poh_timing_report_service");
|
2019-11-13 10:12:09 -08:00
|
|
|
}
|
2019-02-06 19:47:55 -08:00
|
|
|
}
|
2018-09-14 01:53:18 -07:00
|
|
|
|
2020-09-18 22:03:54 -07:00
|
|
|
fn active_vote_account_exists_in_bank(bank: &Arc<Bank>, vote_account: &Pubkey) -> bool {
|
|
|
|
if let Some(account) = &bank.get_account(vote_account) {
|
2021-03-09 13:06:07 -08:00
|
|
|
if let Some(vote_state) = VoteState::from(account) {
|
2020-09-18 22:03:54 -07:00
|
|
|
return !vote_state.votes.is_empty();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
false
|
|
|
|
}
|
|
|
|
|
2020-12-29 09:35:57 -08:00
|
|
|
fn check_poh_speed(genesis_config: &GenesisConfig, maybe_hash_samples: Option<u64>) {
|
|
|
|
if let Some(hashes_per_tick) = genesis_config.hashes_per_tick() {
|
|
|
|
let ticks_per_slot = genesis_config.ticks_per_slot();
|
|
|
|
let hashes_per_slot = hashes_per_tick * ticks_per_slot;
|
|
|
|
|
|
|
|
let hash_samples = maybe_hash_samples.unwrap_or(hashes_per_slot);
|
|
|
|
let hash_time_ns = compute_hash_time_ns(hash_samples);
|
|
|
|
|
|
|
|
let my_ns_per_slot = (hash_time_ns * hashes_per_slot) / hash_samples;
|
|
|
|
debug!("computed: ns_per_slot: {}", my_ns_per_slot);
|
|
|
|
let target_ns_per_slot = genesis_config.ns_per_slot() as u64;
|
|
|
|
debug!(
|
|
|
|
"cluster ns_per_hash: {}ns ns_per_slot: {}",
|
|
|
|
target_ns_per_slot / hashes_per_slot,
|
|
|
|
target_ns_per_slot
|
|
|
|
);
|
|
|
|
if my_ns_per_slot < target_ns_per_slot {
|
|
|
|
let extra_ns = target_ns_per_slot - my_ns_per_slot;
|
|
|
|
info!("PoH speed check: Will sleep {}ns per slot.", extra_ns);
|
|
|
|
} else {
|
|
|
|
error!(
|
2020-12-29 11:09:09 -08:00
|
|
|
"PoH is slower than cluster target tick rate! mine: {} cluster: {}. If you wish to continue, try --no-poh-speed-test",
|
2020-12-29 09:35:57 -08:00
|
|
|
my_ns_per_slot, target_ns_per_slot,
|
|
|
|
);
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-18 22:03:54 -07:00
|
|
|
fn post_process_restored_tower(
|
|
|
|
restored_tower: crate::consensus::Result<Tower>,
|
|
|
|
validator_identity: &Pubkey,
|
|
|
|
vote_account: &Pubkey,
|
|
|
|
config: &ValidatorConfig,
|
|
|
|
bank_forks: &BankForks,
|
|
|
|
) -> Tower {
|
2020-11-12 06:29:04 -08:00
|
|
|
let mut should_require_tower = config.require_tower;
|
|
|
|
|
2020-09-18 22:03:54 -07:00
|
|
|
restored_tower
|
|
|
|
.and_then(|tower| {
|
|
|
|
let root_bank = bank_forks.root_bank();
|
|
|
|
let slot_history = root_bank.get_slot_history();
|
2020-11-12 06:29:04 -08:00
|
|
|
let tower = tower.adjust_lockouts_after_replay(root_bank.slot(), &slot_history);
|
|
|
|
|
|
|
|
if let Some(wait_slot_for_supermajority) = config.wait_for_supermajority {
|
|
|
|
if root_bank.slot() == wait_slot_for_supermajority {
|
|
|
|
// intentionally fail to restore tower; we're supposedly in a new hard fork; past
|
|
|
|
// out-of-chain vote state doesn't make sense at all
|
|
|
|
// what if --wait-for-supermajority again if the validator restarted?
|
|
|
|
let message = format!("Hardfork is detected; discarding tower restoration result: {:?}", tower);
|
|
|
|
datapoint_error!(
|
|
|
|
"tower_error",
|
|
|
|
(
|
|
|
|
"error",
|
|
|
|
message,
|
|
|
|
String
|
|
|
|
),
|
|
|
|
);
|
|
|
|
error!("{}", message);
|
|
|
|
|
|
|
|
// unconditionally relax tower requirement so that we can always restore tower
|
|
|
|
// from root bank.
|
|
|
|
should_require_tower = false;
|
|
|
|
return Err(crate::consensus::TowerError::HardFork(wait_slot_for_supermajority));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-21 18:34:51 -08:00
|
|
|
if let Some(warp_slot) = config.warp_slot {
|
|
|
|
// unconditionally relax tower requirement so that we can always restore tower
|
|
|
|
// from root bank after the warp
|
|
|
|
should_require_tower = false;
|
|
|
|
return Err(crate::consensus::TowerError::HardFork(warp_slot));
|
|
|
|
}
|
|
|
|
|
2020-11-12 06:29:04 -08:00
|
|
|
tower
|
2020-09-18 22:03:54 -07:00
|
|
|
})
|
|
|
|
.unwrap_or_else(|err| {
|
|
|
|
let voting_has_been_active =
|
2021-06-18 06:34:46 -07:00
|
|
|
active_vote_account_exists_in_bank(&bank_forks.working_bank(), vote_account);
|
2020-10-15 02:30:33 -07:00
|
|
|
if !err.is_file_missing() {
|
2020-09-18 22:03:54 -07:00
|
|
|
datapoint_error!(
|
|
|
|
"tower_error",
|
|
|
|
(
|
|
|
|
"error",
|
|
|
|
format!("Unable to restore tower: {}", err),
|
|
|
|
String
|
|
|
|
),
|
|
|
|
);
|
|
|
|
}
|
2020-11-12 06:29:04 -08:00
|
|
|
if should_require_tower && voting_has_been_active {
|
2020-09-18 22:03:54 -07:00
|
|
|
error!("Requested mandatory tower restore failed: {}", err);
|
|
|
|
error!(
|
|
|
|
"And there is an existing vote_account containing actual votes. \
|
2020-10-20 18:26:20 -07:00
|
|
|
Aborting due to possible conflicting duplicate votes",
|
2020-09-18 22:03:54 -07:00
|
|
|
);
|
2020-12-07 08:43:03 -08:00
|
|
|
abort();
|
2020-09-18 22:03:54 -07:00
|
|
|
}
|
2020-10-15 02:30:33 -07:00
|
|
|
if err.is_file_missing() && !voting_has_been_active {
|
2020-09-18 22:03:54 -07:00
|
|
|
// Currently, don't protect against spoofed snapshots with no tower at all
|
|
|
|
info!(
|
|
|
|
"Ignoring expected failed tower restore because this is the initial \
|
|
|
|
validator start with the vote account..."
|
|
|
|
);
|
|
|
|
} else {
|
|
|
|
error!(
|
|
|
|
"Rebuilding a new tower from the latest vote account due to failed tower restore: {}",
|
|
|
|
err
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
Tower::new_from_bankforks(
|
2021-06-18 06:34:46 -07:00
|
|
|
bank_forks,
|
|
|
|
validator_identity,
|
|
|
|
vote_account,
|
2020-09-18 22:03:54 -07:00
|
|
|
)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-03-07 12:04:36 -08:00
|
|
|
#[allow(clippy::type_complexity)]
|
2022-03-15 20:53:46 -07:00
|
|
|
fn load_blockstore(
|
2020-02-24 23:27:19 -08:00
|
|
|
config: &ValidatorConfig,
|
2020-06-30 23:19:40 -07:00
|
|
|
ledger_path: &Path,
|
2020-07-14 20:14:48 -07:00
|
|
|
exit: &Arc<AtomicBool>,
|
2021-03-04 13:01:11 -08:00
|
|
|
start_progress: &Arc<RwLock<ValidatorStartProgress>>,
|
2021-09-30 14:26:17 -07:00
|
|
|
accounts_update_notifier: Option<AccountsUpdateNotifier>,
|
2021-11-23 09:55:53 -08:00
|
|
|
transaction_notifier: Option<TransactionNotifierLock>,
|
2022-03-30 07:04:49 -07:00
|
|
|
poh_timing_point_sender: Option<PohTimingSender>,
|
2019-04-19 02:39:44 -07:00
|
|
|
) -> (
|
2020-02-20 18:53:26 -08:00
|
|
|
GenesisConfig,
|
2022-03-04 01:52:22 -08:00
|
|
|
Arc<RwLock<BankForks>>,
|
2020-07-14 20:14:48 -07:00
|
|
|
Arc<Blockstore>,
|
2019-04-19 02:39:44 -07:00
|
|
|
Receiver<bool>,
|
2021-06-02 17:20:00 -07:00
|
|
|
CompletedSlotsReceiver,
|
2019-04-19 02:39:44 -07:00
|
|
|
LeaderScheduleCache,
|
2021-10-08 13:14:56 -07:00
|
|
|
Option<StartingSnapshotHashes>,
|
2020-07-14 20:14:48 -07:00
|
|
|
TransactionHistoryServices,
|
2022-03-15 20:53:46 -07:00
|
|
|
blockstore_processor::ProcessOptions,
|
|
|
|
BlockstoreRootScan,
|
2022-04-05 11:02:33 -07:00
|
|
|
DroppedSlotsReceiver,
|
2019-04-19 02:39:44 -07:00
|
|
|
) {
|
2020-06-30 23:19:40 -07:00
|
|
|
info!("loading ledger from {:?}...", ledger_path);
|
2021-03-04 13:01:11 -08:00
|
|
|
*start_progress.write().unwrap() = ValidatorStartProgress::LoadingLedger;
|
2020-06-30 23:19:40 -07:00
|
|
|
let genesis_config = open_genesis_config(ledger_path, config.max_genesis_archive_unpacked_size);
|
2020-02-24 19:27:04 -08:00
|
|
|
|
|
|
|
// This needs to be limited otherwise the state in the VoteAccount data
|
|
|
|
// grows too large
|
|
|
|
let leader_schedule_slot_offset = genesis_config.epoch_schedule.leader_schedule_slot_offset;
|
|
|
|
let slots_per_epoch = genesis_config.epoch_schedule.slots_per_epoch;
|
|
|
|
let leader_epoch_offset = (leader_schedule_slot_offset + slots_per_epoch - 1) / slots_per_epoch;
|
|
|
|
assert!(leader_epoch_offset <= MAX_LEADER_SCHEDULE_EPOCH_OFFSET);
|
|
|
|
|
2019-11-08 20:56:57 -08:00
|
|
|
let genesis_hash = genesis_config.hash();
|
|
|
|
info!("genesis hash: {}", genesis_hash);
|
|
|
|
|
2020-02-24 23:27:19 -08:00
|
|
|
if let Some(expected_genesis_hash) = config.expected_genesis_hash {
|
2019-11-08 20:56:57 -08:00
|
|
|
if genesis_hash != expected_genesis_hash {
|
|
|
|
error!("genesis hash mismatch: expected {}", expected_genesis_hash);
|
2020-06-30 23:19:40 -07:00
|
|
|
error!("Delete the ledger directory to continue: {:?}", ledger_path);
|
2020-12-07 08:43:03 -08:00
|
|
|
abort();
|
2019-08-21 18:16:40 -07:00
|
|
|
}
|
|
|
|
}
|
2019-02-19 18:31:56 -08:00
|
|
|
|
2022-03-07 12:04:36 -08:00
|
|
|
if !config.no_poh_speed_test {
|
2021-06-16 09:52:27 -07:00
|
|
|
check_poh_speed(&genesis_config, None);
|
|
|
|
}
|
|
|
|
|
2020-09-01 22:06:06 -07:00
|
|
|
let BlockstoreSignals {
|
|
|
|
mut blockstore,
|
|
|
|
ledger_signal_receiver,
|
2021-06-02 17:20:00 -07:00
|
|
|
completed_slots_receiver,
|
2020-09-01 22:06:06 -07:00
|
|
|
..
|
2020-12-16 17:56:38 -08:00
|
|
|
} = Blockstore::open_with_signal(
|
|
|
|
ledger_path,
|
2022-01-03 20:30:45 -08:00
|
|
|
BlockstoreOptions {
|
|
|
|
recovery_mode: config.wal_recovery_mode.clone(),
|
2022-03-18 11:13:35 -07:00
|
|
|
column_options: config.ledger_column_options.clone(),
|
2022-03-25 01:13:33 -07:00
|
|
|
enforce_ulimit_nofile: config.enforce_ulimit_nofile,
|
2022-01-03 20:30:45 -08:00
|
|
|
..BlockstoreOptions::default()
|
|
|
|
},
|
2020-12-16 17:56:38 -08:00
|
|
|
)
|
|
|
|
.expect("Failed to open ledger database");
|
2020-03-23 08:42:32 -07:00
|
|
|
blockstore.set_no_compaction(config.no_rocksdb_compaction);
|
2022-03-30 07:04:49 -07:00
|
|
|
blockstore.shred_timing_point_sender = poh_timing_point_sender;
|
2019-02-26 21:16:18 -08:00
|
|
|
|
2021-05-24 12:24:47 -07:00
|
|
|
let blockstore = Arc::new(blockstore);
|
2022-03-15 20:53:46 -07:00
|
|
|
let blockstore_root_scan = BlockstoreRootScan::new(config, &blockstore, exit);
|
2022-03-23 10:04:58 -07:00
|
|
|
let halt_at_slot = config.halt_at_slot.or_else(|| highest_slot(&blockstore));
|
2021-05-24 12:24:47 -07:00
|
|
|
|
2020-01-13 13:13:52 -08:00
|
|
|
let process_options = blockstore_processor::ProcessOptions {
|
2022-03-07 12:04:36 -08:00
|
|
|
poh_verify: config.poh_verify,
|
2022-03-23 10:04:58 -07:00
|
|
|
halt_at_slot,
|
2020-01-24 17:27:04 -08:00
|
|
|
new_hard_forks: config.new_hard_forks.clone(),
|
2020-09-23 18:46:42 -07:00
|
|
|
debug_keys: config.debug_keys.clone(),
|
2020-12-31 18:06:03 -08:00
|
|
|
account_indexes: config.account_indexes.clone(),
|
2021-01-11 17:00:23 -08:00
|
|
|
accounts_db_caching_enabled: config.accounts_db_caching_enabled,
|
2021-09-07 21:30:38 -07:00
|
|
|
accounts_db_config: config.accounts_db_config.clone(),
|
2021-06-09 21:21:32 -07:00
|
|
|
shrink_ratio: config.accounts_shrink_ratio,
|
2021-08-04 15:28:33 -07:00
|
|
|
accounts_db_test_hash_calculation: config.accounts_db_test_hash_calculation,
|
|
|
|
accounts_db_skip_shrink: config.accounts_db_skip_shrink,
|
2022-04-11 17:28:10 -07:00
|
|
|
runtime_config: config.runtime_config.clone(),
|
2020-01-13 13:13:52 -08:00
|
|
|
..blockstore_processor::ProcessOptions::default()
|
2019-11-04 18:10:06 -08:00
|
|
|
};
|
|
|
|
|
2021-11-23 09:55:53 -08:00
|
|
|
let enable_rpc_transaction_history =
|
|
|
|
config.rpc_addrs.is_some() && config.rpc_config.enable_rpc_transaction_history;
|
|
|
|
let is_plugin_transaction_history_required = transaction_notifier.as_ref().is_some();
|
2020-07-14 20:14:48 -07:00
|
|
|
let transaction_history_services =
|
2021-11-23 09:55:53 -08:00
|
|
|
if enable_rpc_transaction_history || is_plugin_transaction_history_required {
|
2021-02-01 13:00:51 -08:00
|
|
|
initialize_rpc_transaction_history_services(
|
|
|
|
blockstore.clone(),
|
|
|
|
exit,
|
2021-11-23 09:55:53 -08:00
|
|
|
enable_rpc_transaction_history,
|
2022-03-22 15:17:05 -07:00
|
|
|
config.rpc_config.enable_extended_tx_metadata_storage,
|
2021-11-23 09:55:53 -08:00
|
|
|
transaction_notifier,
|
2021-02-01 13:00:51 -08:00
|
|
|
)
|
2020-07-14 20:14:48 -07:00
|
|
|
} else {
|
|
|
|
TransactionHistoryServices::default()
|
|
|
|
};
|
|
|
|
|
2022-03-18 12:43:20 -07:00
|
|
|
let (bank_forks, mut leader_schedule_cache, starting_snapshot_hashes) =
|
2022-03-04 01:52:22 -08:00
|
|
|
bank_forks_utils::load_bank_forks(
|
|
|
|
&genesis_config,
|
|
|
|
&blockstore,
|
|
|
|
config.account_paths.clone(),
|
|
|
|
config.account_shrink_paths.clone(),
|
|
|
|
config.snapshot_config.as_ref(),
|
|
|
|
&process_options,
|
|
|
|
transaction_history_services
|
|
|
|
.cache_block_meta_sender
|
|
|
|
.as_ref(),
|
|
|
|
accounts_update_notifier,
|
|
|
|
);
|
2022-03-14 09:16:12 -07:00
|
|
|
|
2022-03-18 12:43:20 -07:00
|
|
|
// Before replay starts, set the callbacks in each of the banks in BankForks so that
|
|
|
|
// all dropped banks come through the `pruned_banks_receiver` channel. This way all bank
|
|
|
|
// drop behavior can be safely synchronized with any other ongoing accounts activity like
|
|
|
|
// cache flush, clean, shrink, as long as the same thread performing those activities also
|
|
|
|
// is processing the dropped banks from the `pruned_banks_receiver` channel.
|
|
|
|
|
|
|
|
// There should only be one bank, the root bank in BankForks. Thus all banks added to
|
|
|
|
// BankForks from now on will be descended from the root bank and thus will inherit
|
|
|
|
// the bank drop callback.
|
|
|
|
assert_eq!(bank_forks.read().unwrap().banks().len(), 1);
|
|
|
|
let (pruned_banks_sender, pruned_banks_receiver) = bounded(MAX_DROP_BANK_SIGNAL_QUEUE_SIZE);
|
|
|
|
{
|
|
|
|
let root_bank = bank_forks.read().unwrap().root_bank();
|
|
|
|
root_bank.set_callback(Some(Box::new(
|
|
|
|
root_bank
|
|
|
|
.rc
|
|
|
|
.accounts
|
|
|
|
.accounts_db
|
|
|
|
.create_drop_bank_callback(pruned_banks_sender),
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
|
2022-03-15 22:21:11 -07:00
|
|
|
{
|
|
|
|
let hard_forks: Vec<_> = bank_forks
|
2022-03-04 01:52:22 -08:00
|
|
|
.read()
|
|
|
|
.unwrap()
|
2022-03-15 22:21:11 -07:00
|
|
|
.working_bank()
|
|
|
|
.hard_forks()
|
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
.iter()
|
|
|
|
.copied()
|
|
|
|
.collect();
|
|
|
|
if !hard_forks.is_empty() {
|
|
|
|
info!("Hard forks: {:?}", hard_forks);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-14 09:16:12 -07:00
|
|
|
leader_schedule_cache.set_fixed_leader_schedule(config.fixed_leader_schedule.clone());
|
2022-03-04 01:52:22 -08:00
|
|
|
{
|
|
|
|
let mut bank_forks = bank_forks.write().unwrap();
|
|
|
|
bank_forks.set_snapshot_config(config.snapshot_config.clone());
|
|
|
|
bank_forks.set_accounts_hash_interval_slots(config.accounts_hash_interval_slots);
|
|
|
|
if let Some(ref shrink_paths) = config.account_shrink_paths {
|
|
|
|
bank_forks
|
|
|
|
.working_bank()
|
|
|
|
.set_shrink_paths(shrink_paths.clone());
|
|
|
|
}
|
2022-03-15 20:59:51 -07:00
|
|
|
}
|
2022-03-14 09:16:12 -07:00
|
|
|
|
2022-03-14 11:12:03 -07:00
|
|
|
(
|
|
|
|
genesis_config,
|
|
|
|
bank_forks,
|
|
|
|
blockstore,
|
|
|
|
ledger_signal_receiver,
|
|
|
|
completed_slots_receiver,
|
|
|
|
leader_schedule_cache,
|
|
|
|
starting_snapshot_hashes,
|
|
|
|
transaction_history_services,
|
2022-03-15 20:53:46 -07:00
|
|
|
process_options,
|
|
|
|
blockstore_root_scan,
|
2022-04-05 11:02:33 -07:00
|
|
|
pruned_banks_receiver,
|
2022-03-14 11:12:03 -07:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2022-04-19 16:40:22 -07:00
|
|
|
fn highest_slot(blockstore: &Blockstore) -> Option<Slot> {
|
|
|
|
let mut start = Measure::start("Blockstore search for highest slot");
|
|
|
|
let highest_slot = blockstore
|
|
|
|
.slot_meta_iterator(0)
|
|
|
|
.map(|metas| {
|
|
|
|
let slots: Vec<_> = metas.map(|(slot, _)| slot).collect();
|
|
|
|
if slots.is_empty() {
|
|
|
|
println!("Ledger is empty");
|
|
|
|
None
|
|
|
|
} else {
|
|
|
|
let first = slots.first().unwrap();
|
|
|
|
Some(*slots.last().unwrap_or(first))
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.unwrap_or_else(|err| {
|
|
|
|
warn!("Failed to ledger slot meta: {}", err);
|
|
|
|
None
|
|
|
|
});
|
|
|
|
start.stop();
|
|
|
|
info!("{}. Found slot {:?}", start, highest_slot);
|
|
|
|
highest_slot
|
|
|
|
}
|
|
|
|
|
2022-03-23 10:04:58 -07:00
|
|
|
struct ProcessBlockStore<'a> {
|
|
|
|
id: &'a Pubkey,
|
|
|
|
vote_account: &'a Pubkey,
|
|
|
|
start_progress: &'a Arc<RwLock<ValidatorStartProgress>>,
|
|
|
|
blockstore: &'a Blockstore,
|
|
|
|
bank_forks: &'a Arc<RwLock<BankForks>>,
|
|
|
|
leader_schedule_cache: &'a LeaderScheduleCache,
|
|
|
|
process_options: &'a blockstore_processor::ProcessOptions,
|
|
|
|
transaction_status_sender: Option<&'a TransactionStatusSender>,
|
|
|
|
cache_block_meta_sender: Option<CacheBlockMetaSender>,
|
|
|
|
blockstore_root_scan: Option<BlockstoreRootScan>,
|
|
|
|
accounts_background_request_sender: AbsRequestSender,
|
|
|
|
config: &'a ValidatorConfig,
|
|
|
|
tower: Option<Tower>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a> ProcessBlockStore<'a> {
|
|
|
|
#[allow(clippy::too_many_arguments)]
|
|
|
|
fn new(
|
|
|
|
id: &'a Pubkey,
|
|
|
|
vote_account: &'a Pubkey,
|
|
|
|
start_progress: &'a Arc<RwLock<ValidatorStartProgress>>,
|
|
|
|
blockstore: &'a Blockstore,
|
|
|
|
bank_forks: &'a Arc<RwLock<BankForks>>,
|
|
|
|
leader_schedule_cache: &'a LeaderScheduleCache,
|
|
|
|
process_options: &'a blockstore_processor::ProcessOptions,
|
|
|
|
transaction_status_sender: Option<&'a TransactionStatusSender>,
|
|
|
|
cache_block_meta_sender: Option<CacheBlockMetaSender>,
|
|
|
|
blockstore_root_scan: BlockstoreRootScan,
|
|
|
|
accounts_background_request_sender: AbsRequestSender,
|
|
|
|
config: &'a ValidatorConfig,
|
|
|
|
) -> Self {
|
|
|
|
Self {
|
|
|
|
id,
|
|
|
|
vote_account,
|
|
|
|
start_progress,
|
|
|
|
blockstore,
|
|
|
|
bank_forks,
|
|
|
|
leader_schedule_cache,
|
|
|
|
process_options,
|
|
|
|
transaction_status_sender,
|
|
|
|
cache_block_meta_sender,
|
|
|
|
blockstore_root_scan: Some(blockstore_root_scan),
|
|
|
|
accounts_background_request_sender,
|
|
|
|
config,
|
|
|
|
tower: None,
|
|
|
|
}
|
2022-04-19 16:40:22 -07:00
|
|
|
}
|
2022-03-15 20:53:46 -07:00
|
|
|
|
2022-03-23 10:04:58 -07:00
|
|
|
fn process(&mut self) {
|
|
|
|
if self.tower.is_none() {
|
|
|
|
let previous_start_process = *self.start_progress.read().unwrap();
|
|
|
|
*self.start_progress.write().unwrap() = ValidatorStartProgress::LoadingLedger;
|
|
|
|
|
|
|
|
/*
|
|
|
|
#[allow(clippy::too_many_arguments)]
|
|
|
|
fn process_blockstore(
|
|
|
|
blockstore: &Blockstore,
|
|
|
|
bank_forks: &Arc<RwLock<BankForks>>,
|
|
|
|
leader_schedule_cache: &LeaderScheduleCache,
|
|
|
|
process_options: &blockstore_processor::ProcessOptions,
|
|
|
|
transaction_status_sender: Option<&TransactionStatusSender>,
|
|
|
|
cache_block_meta_sender: Option<&CacheBlockMetaSender>,
|
|
|
|
blockstore_root_scan: BlockstoreRootScan,
|
|
|
|
accounts_background_request_sender: &AbsRequestSender,
|
|
|
|
start_progress: &Arc<RwLock<ValidatorStartProgress>>,
|
|
|
|
) {
|
|
|
|
*/
|
|
|
|
let exit = Arc::new(AtomicBool::new(false));
|
|
|
|
if let Some(max_slot) = highest_slot(self.blockstore) {
|
|
|
|
let bank_forks = self.bank_forks.clone();
|
|
|
|
let exit = exit.clone();
|
|
|
|
let start_progress = self.start_progress.clone();
|
|
|
|
|
|
|
|
let _ = std::thread::spawn(move || {
|
|
|
|
while !exit.load(Ordering::Relaxed) {
|
|
|
|
let slot = bank_forks.read().unwrap().working_bank().slot();
|
|
|
|
*start_progress.write().unwrap() =
|
|
|
|
ValidatorStartProgress::ProcessingLedger { slot, max_slot };
|
|
|
|
sleep(Duration::from_secs(2));
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
blockstore_processor::process_blockstore_from_root(
|
|
|
|
self.blockstore,
|
|
|
|
self.bank_forks,
|
|
|
|
self.leader_schedule_cache,
|
|
|
|
self.process_options,
|
|
|
|
self.transaction_status_sender,
|
|
|
|
self.cache_block_meta_sender.as_ref(),
|
|
|
|
&self.accounts_background_request_sender,
|
|
|
|
)
|
|
|
|
.unwrap_or_else(|err| {
|
|
|
|
error!("Failed to load ledger: {:?}", err);
|
|
|
|
abort()
|
|
|
|
});
|
|
|
|
|
|
|
|
exit.store(true, Ordering::Relaxed);
|
|
|
|
|
|
|
|
if let Some(blockstore_root_scan) = self.blockstore_root_scan.take() {
|
|
|
|
blockstore_root_scan.join();
|
|
|
|
}
|
|
|
|
|
|
|
|
self.tower = Some({
|
|
|
|
let restored_tower = Tower::restore(self.config.tower_storage.as_ref(), self.id);
|
|
|
|
if let Ok(tower) = &restored_tower {
|
|
|
|
reconcile_blockstore_roots_with_tower(tower, self.blockstore).unwrap_or_else(
|
|
|
|
|err| {
|
|
|
|
error!("Failed to reconcile blockstore with tower: {:?}", err);
|
|
|
|
abort()
|
|
|
|
},
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
post_process_restored_tower(
|
|
|
|
restored_tower,
|
|
|
|
self.id,
|
|
|
|
self.vote_account,
|
|
|
|
self.config,
|
|
|
|
&self.bank_forks.read().unwrap(),
|
|
|
|
)
|
|
|
|
});
|
2022-04-19 16:40:22 -07:00
|
|
|
|
2022-03-23 10:04:58 -07:00
|
|
|
*self.start_progress.write().unwrap() = previous_start_process;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a> From<ProcessBlockStore<'a>> for Tower {
|
|
|
|
fn from(mut process_blockstore: ProcessBlockStore<'a>) -> Self {
|
|
|
|
process_blockstore.process();
|
|
|
|
process_blockstore.tower.expect("valid tower")
|
|
|
|
}
|
2022-03-15 20:53:46 -07:00
|
|
|
}
|
|
|
|
|
2022-03-14 11:12:03 -07:00
|
|
|
fn maybe_warp_slot(
|
|
|
|
config: &ValidatorConfig,
|
2022-03-23 10:04:58 -07:00
|
|
|
process_blockstore: &mut ProcessBlockStore,
|
2022-03-14 11:12:03 -07:00
|
|
|
ledger_path: &Path,
|
2022-03-04 01:52:22 -08:00
|
|
|
bank_forks: &RwLock<BankForks>,
|
2022-03-14 11:12:03 -07:00
|
|
|
leader_schedule_cache: &LeaderScheduleCache,
|
|
|
|
) {
|
2021-01-21 18:34:51 -08:00
|
|
|
if let Some(warp_slot) = config.warp_slot {
|
|
|
|
let snapshot_config = config.snapshot_config.as_ref().unwrap_or_else(|| {
|
|
|
|
error!("warp slot requires a snapshot config");
|
|
|
|
abort();
|
|
|
|
});
|
|
|
|
|
2022-03-23 10:04:58 -07:00
|
|
|
process_blockstore.process();
|
|
|
|
|
2022-03-04 01:52:22 -08:00
|
|
|
let mut bank_forks = bank_forks.write().unwrap();
|
|
|
|
|
2021-01-21 18:34:51 -08:00
|
|
|
let working_bank = bank_forks.working_bank();
|
|
|
|
|
|
|
|
if warp_slot <= working_bank.slot() {
|
|
|
|
error!(
|
|
|
|
"warp slot ({}) cannot be less than the working bank slot ({})",
|
|
|
|
warp_slot,
|
|
|
|
working_bank.slot()
|
|
|
|
);
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
info!("warping to slot {}", warp_slot);
|
|
|
|
|
2022-03-04 01:52:22 -08:00
|
|
|
let root_bank = bank_forks.root_bank();
|
2021-01-21 18:34:51 -08:00
|
|
|
bank_forks.insert(Bank::warp_from_parent(
|
2022-03-04 01:52:22 -08:00
|
|
|
&root_bank,
|
2021-01-21 18:34:51 -08:00
|
|
|
&Pubkey::default(),
|
|
|
|
warp_slot,
|
|
|
|
));
|
|
|
|
bank_forks.set_root(
|
|
|
|
warp_slot,
|
2021-02-18 23:42:09 -08:00
|
|
|
&solana_runtime::accounts_background_service::AbsRequestSender::default(),
|
2021-01-21 18:34:51 -08:00
|
|
|
Some(warp_slot),
|
|
|
|
);
|
|
|
|
leader_schedule_cache.set_root(&bank_forks.root_bank());
|
|
|
|
|
2021-08-06 18:16:06 -07:00
|
|
|
let full_snapshot_archive_info = snapshot_utils::bank_to_full_snapshot_archive(
|
2021-01-21 18:34:51 -08:00
|
|
|
ledger_path,
|
|
|
|
&bank_forks.root_bank(),
|
|
|
|
None,
|
2022-05-10 13:37:41 -07:00
|
|
|
&snapshot_config.full_snapshot_archives_dir,
|
|
|
|
&snapshot_config.incremental_snapshot_archives_dir,
|
2021-01-21 18:34:51 -08:00
|
|
|
snapshot_config.archive_format,
|
2021-09-03 09:28:10 -07:00
|
|
|
snapshot_config.maximum_full_snapshot_archives_to_retain,
|
2021-09-06 16:01:56 -07:00
|
|
|
snapshot_config.maximum_incremental_snapshot_archives_to_retain,
|
2021-01-21 18:34:51 -08:00
|
|
|
)
|
|
|
|
.unwrap_or_else(|err| {
|
|
|
|
error!("Unable to create snapshot: {}", err);
|
|
|
|
abort();
|
|
|
|
});
|
2021-08-06 18:16:06 -07:00
|
|
|
info!(
|
|
|
|
"created snapshot: {}",
|
|
|
|
full_snapshot_archive_info.path().display()
|
|
|
|
);
|
2021-01-21 18:34:51 -08:00
|
|
|
}
|
2019-02-16 19:58:07 -08:00
|
|
|
}
|
|
|
|
|
2020-11-12 14:01:13 -08:00
|
|
|
fn blockstore_contains_bad_shred_version(
|
|
|
|
blockstore: &Blockstore,
|
|
|
|
start_slot: Slot,
|
|
|
|
shred_version: u16,
|
|
|
|
) -> bool {
|
|
|
|
let now = Instant::now();
|
2020-06-23 14:29:07 -07:00
|
|
|
// Search for shreds with incompatible version in blockstore
|
2020-06-22 20:27:25 -07:00
|
|
|
if let Ok(slot_meta_iterator) = blockstore.slot_meta_iterator(start_slot) {
|
2020-11-12 14:01:13 -08:00
|
|
|
info!("Searching for incorrect shreds..");
|
2020-06-23 14:29:07 -07:00
|
|
|
for (slot, _meta) in slot_meta_iterator {
|
|
|
|
if let Ok(shreds) = blockstore.get_data_shreds_for_slot(slot, 0) {
|
|
|
|
for shred in &shreds {
|
|
|
|
if shred.version() != shred_version {
|
2020-11-12 14:01:13 -08:00
|
|
|
return true;
|
2020-06-23 14:29:07 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-11-12 14:01:13 -08:00
|
|
|
if now.elapsed().as_secs() > 60 {
|
|
|
|
info!("Didn't find incorrect shreds after 60 seconds, aborting");
|
|
|
|
return false;
|
|
|
|
}
|
2020-06-23 14:29:07 -07:00
|
|
|
}
|
|
|
|
}
|
2020-11-12 14:01:13 -08:00
|
|
|
false
|
|
|
|
}
|
|
|
|
|
|
|
|
fn backup_and_clear_blockstore(ledger_path: &Path, start_slot: Slot, shred_version: u16) {
|
|
|
|
let blockstore = Blockstore::open(ledger_path).unwrap();
|
|
|
|
let do_copy_and_clear =
|
|
|
|
blockstore_contains_bad_shred_version(&blockstore, start_slot, shred_version);
|
2020-06-23 14:29:07 -07:00
|
|
|
|
|
|
|
// If found, then copy shreds to another db and clear from start_slot
|
|
|
|
if do_copy_and_clear {
|
|
|
|
let folder_name = format!("backup_rocksdb_{}", thread_rng().gen_range(0, 99999));
|
|
|
|
let backup_blockstore = Blockstore::open(&ledger_path.join(folder_name));
|
2020-06-22 20:27:25 -07:00
|
|
|
let mut last_print = Instant::now();
|
|
|
|
let mut copied = 0;
|
2020-06-23 14:29:07 -07:00
|
|
|
let mut last_slot = None;
|
|
|
|
let slot_meta_iterator = blockstore.slot_meta_iterator(start_slot).unwrap();
|
2020-06-22 20:27:25 -07:00
|
|
|
for (slot, _meta) in slot_meta_iterator {
|
|
|
|
if let Ok(shreds) = blockstore.get_data_shreds_for_slot(slot, 0) {
|
|
|
|
if let Ok(ref backup_blockstore) = backup_blockstore {
|
|
|
|
copied += shreds.len();
|
|
|
|
let _ = backup_blockstore.insert_shreds(shreds, None, true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if last_print.elapsed().as_millis() > 3000 {
|
|
|
|
info!(
|
|
|
|
"Copying shreds from slot {} copied {} so far.",
|
|
|
|
start_slot, copied
|
|
|
|
);
|
|
|
|
last_print = Instant::now();
|
|
|
|
}
|
2020-06-23 14:29:07 -07:00
|
|
|
last_slot = Some(slot);
|
2020-06-22 20:27:25 -07:00
|
|
|
}
|
|
|
|
|
2020-06-23 14:29:07 -07:00
|
|
|
let end_slot = last_slot.unwrap();
|
2020-06-22 20:27:25 -07:00
|
|
|
info!("Purging slots {} to {}", start_slot, end_slot);
|
2020-06-23 14:29:07 -07:00
|
|
|
blockstore.purge_from_next_slots(start_slot, end_slot);
|
2020-10-21 10:45:21 -07:00
|
|
|
blockstore.purge_slots(start_slot, end_slot, PurgeType::Exact);
|
2020-06-22 20:27:25 -07:00
|
|
|
info!("Purging done, compacting db..");
|
|
|
|
if let Err(e) = blockstore.compact_storage(start_slot, end_slot) {
|
|
|
|
warn!(
|
|
|
|
"Error from compacting storage from {} to {}: {:?}",
|
|
|
|
start_slot, end_slot, e
|
|
|
|
);
|
|
|
|
}
|
|
|
|
info!("done");
|
|
|
|
}
|
2020-06-23 14:29:07 -07:00
|
|
|
drop(blockstore);
|
2020-06-22 20:27:25 -07:00
|
|
|
}
|
|
|
|
|
2020-07-14 20:14:48 -07:00
|
|
|
fn initialize_rpc_transaction_history_services(
|
|
|
|
blockstore: Arc<Blockstore>,
|
|
|
|
exit: &Arc<AtomicBool>,
|
2021-11-23 09:55:53 -08:00
|
|
|
enable_rpc_transaction_history: bool,
|
2022-03-22 15:17:05 -07:00
|
|
|
enable_extended_tx_metadata_storage: bool,
|
2021-11-23 09:55:53 -08:00
|
|
|
transaction_notifier: Option<TransactionNotifierLock>,
|
2020-07-14 20:14:48 -07:00
|
|
|
) -> TransactionHistoryServices {
|
2021-03-26 15:47:35 -07:00
|
|
|
let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(blockstore.max_root()));
|
2020-07-14 20:14:48 -07:00
|
|
|
let (transaction_status_sender, transaction_status_receiver) = unbounded();
|
2021-02-01 13:00:51 -08:00
|
|
|
let transaction_status_sender = Some(TransactionStatusSender {
|
|
|
|
sender: transaction_status_sender,
|
|
|
|
});
|
2020-07-14 20:14:48 -07:00
|
|
|
let transaction_status_service = Some(TransactionStatusService::new(
|
|
|
|
transaction_status_receiver,
|
2021-03-26 15:47:35 -07:00
|
|
|
max_complete_transaction_status_slot.clone(),
|
2021-11-23 09:55:53 -08:00
|
|
|
enable_rpc_transaction_history,
|
|
|
|
transaction_notifier.clone(),
|
2020-07-14 20:14:48 -07:00
|
|
|
blockstore.clone(),
|
2022-03-22 15:17:05 -07:00
|
|
|
enable_extended_tx_metadata_storage,
|
2020-07-14 20:14:48 -07:00
|
|
|
exit,
|
|
|
|
));
|
|
|
|
|
|
|
|
let (rewards_recorder_sender, rewards_receiver) = unbounded();
|
|
|
|
let rewards_recorder_sender = Some(rewards_recorder_sender);
|
|
|
|
let rewards_recorder_service = Some(RewardsRecorderService::new(
|
|
|
|
rewards_receiver,
|
2020-09-09 08:33:14 -07:00
|
|
|
blockstore.clone(),
|
|
|
|
exit,
|
|
|
|
));
|
|
|
|
|
2021-05-26 21:16:16 -07:00
|
|
|
let (cache_block_meta_sender, cache_block_meta_receiver) = unbounded();
|
|
|
|
let cache_block_meta_sender = Some(cache_block_meta_sender);
|
|
|
|
let cache_block_meta_service = Some(CacheBlockMetaService::new(
|
|
|
|
cache_block_meta_receiver,
|
2020-07-14 20:14:48 -07:00
|
|
|
blockstore,
|
|
|
|
exit,
|
|
|
|
));
|
|
|
|
TransactionHistoryServices {
|
|
|
|
transaction_status_sender,
|
|
|
|
transaction_status_service,
|
2021-03-26 15:47:35 -07:00
|
|
|
max_complete_transaction_status_slot,
|
2020-07-14 20:14:48 -07:00
|
|
|
rewards_recorder_sender,
|
|
|
|
rewards_recorder_service,
|
2021-05-26 21:16:16 -07:00
|
|
|
cache_block_meta_sender,
|
|
|
|
cache_block_meta_service,
|
2020-07-14 20:14:48 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-25 18:54:51 -07:00
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
enum ValidatorError {
|
|
|
|
BadExpectedBankHash,
|
|
|
|
NotEnoughLedgerData,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return if the validator waited on other nodes to start. In this case
|
|
|
|
// it should not wait for one of it's votes to land to produce blocks
|
|
|
|
// because if the whole network is waiting, then it will stall.
|
|
|
|
//
|
|
|
|
// Error indicates that a bad hash was encountered or another condition
|
|
|
|
// that is unrecoverable and the validator should exit.
|
2020-05-28 12:22:19 -07:00
|
|
|
fn wait_for_supermajority(
|
|
|
|
config: &ValidatorConfig,
|
2022-03-23 10:04:58 -07:00
|
|
|
process_blockstore: Option<&mut ProcessBlockStore>,
|
|
|
|
bank_forks: &RwLock<BankForks>,
|
2020-05-28 12:22:19 -07:00
|
|
|
cluster_info: &ClusterInfo,
|
|
|
|
rpc_override_health_check: Arc<AtomicBool>,
|
2021-03-04 13:01:11 -08:00
|
|
|
start_progress: &Arc<RwLock<ValidatorStartProgress>>,
|
2021-03-25 18:54:51 -07:00
|
|
|
) -> Result<bool, ValidatorError> {
|
2022-03-23 10:04:58 -07:00
|
|
|
match config.wait_for_supermajority {
|
|
|
|
None => Ok(false),
|
|
|
|
Some(wait_for_supermajority) => {
|
|
|
|
if let Some(process_blockstore) = process_blockstore {
|
|
|
|
process_blockstore.process();
|
2020-06-30 12:43:48 -07:00
|
|
|
}
|
|
|
|
|
2022-03-23 10:04:58 -07:00
|
|
|
let bank = bank_forks.read().unwrap().working_bank();
|
|
|
|
match wait_for_supermajority.cmp(&bank.slot()) {
|
|
|
|
std::cmp::Ordering::Less => return Ok(false),
|
|
|
|
std::cmp::Ordering::Greater => {
|
|
|
|
error!(
|
|
|
|
"Ledger does not have enough data to wait for supermajority, \
|
|
|
|
please enable snapshot fetch. Has {} needs {}",
|
|
|
|
bank.slot(),
|
|
|
|
wait_for_supermajority
|
|
|
|
);
|
|
|
|
return Err(ValidatorError::NotEnoughLedgerData);
|
|
|
|
}
|
|
|
|
_ => {}
|
|
|
|
}
|
2020-01-30 09:26:27 -08:00
|
|
|
|
2022-03-23 10:04:58 -07:00
|
|
|
if let Some(expected_bank_hash) = config.expected_bank_hash {
|
|
|
|
if bank.hash() != expected_bank_hash {
|
|
|
|
error!(
|
|
|
|
"Bank hash({}) does not match expected value: {}",
|
|
|
|
bank.hash(),
|
|
|
|
expected_bank_hash
|
|
|
|
);
|
|
|
|
return Err(ValidatorError::BadExpectedBankHash);
|
|
|
|
}
|
|
|
|
}
|
2020-11-11 10:32:06 -08:00
|
|
|
|
2022-03-23 10:04:58 -07:00
|
|
|
*start_progress.write().unwrap() = ValidatorStartProgress::WaitingForSupermajority;
|
|
|
|
for i in 1.. {
|
|
|
|
if i % 10 == 1 {
|
|
|
|
info!(
|
|
|
|
"Waiting for {}% of activated stake at slot {} to be in gossip...",
|
|
|
|
WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT,
|
|
|
|
bank.slot()
|
|
|
|
);
|
|
|
|
}
|
2020-01-30 09:26:27 -08:00
|
|
|
|
2022-03-23 10:04:58 -07:00
|
|
|
let gossip_stake_percent =
|
|
|
|
get_stake_percent_in_gossip(&bank, cluster_info, i % 10 == 0);
|
|
|
|
|
|
|
|
if gossip_stake_percent >= WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT {
|
|
|
|
info!(
|
|
|
|
"Supermajority reached, {}% active stake detected, starting up now.",
|
|
|
|
gossip_stake_percent,
|
|
|
|
);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// The normal RPC health checks don't apply as the node is waiting, so feign health to
|
|
|
|
// prevent load balancers from removing the node from their list of candidates during a
|
|
|
|
// manual restart.
|
|
|
|
rpc_override_health_check.store(true, Ordering::Relaxed);
|
|
|
|
sleep(Duration::new(1, 0));
|
|
|
|
}
|
|
|
|
rpc_override_health_check.store(false, Ordering::Relaxed);
|
|
|
|
Ok(true)
|
2020-01-30 09:26:27 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-07 13:18:34 -08:00
|
|
|
// Get the activated stake percentage (based on the provided bank) that is visible in gossip
|
2020-04-21 12:54:45 -07:00
|
|
|
fn get_stake_percent_in_gossip(bank: &Bank, cluster_info: &ClusterInfo, log: bool) -> u64 {
|
2020-04-17 11:05:59 -07:00
|
|
|
let mut online_stake = 0;
|
|
|
|
let mut wrong_shred_stake = 0;
|
|
|
|
let mut wrong_shred_nodes = vec![];
|
|
|
|
let mut offline_stake = 0;
|
|
|
|
let mut offline_nodes = vec![];
|
|
|
|
|
2020-01-07 13:18:34 -08:00
|
|
|
let mut total_activated_stake = 0;
|
2021-08-13 05:12:40 -07:00
|
|
|
let now = timestamp();
|
|
|
|
// Nodes contact infos are saved to disk and restored on validator startup.
|
|
|
|
// Staked nodes entries will not expire until an epoch after. So it
|
|
|
|
// is necessary here to filter for recent entries to establish liveness.
|
|
|
|
let peers: HashMap<_, _> = cluster_info
|
|
|
|
.all_tvu_peers()
|
|
|
|
.into_iter()
|
|
|
|
.filter(|node| {
|
|
|
|
let age = now.saturating_sub(node.wallclock);
|
|
|
|
// Contact infos are refreshed twice during this period.
|
|
|
|
age < CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS
|
|
|
|
})
|
|
|
|
.map(|node| (node.id, node))
|
|
|
|
.collect();
|
2020-04-21 12:54:45 -07:00
|
|
|
let my_shred_version = cluster_info.my_shred_version();
|
|
|
|
let my_id = cluster_info.id();
|
2020-01-07 13:18:34 -08:00
|
|
|
|
2021-08-30 08:54:01 -07:00
|
|
|
for (activated_stake, vote_account) in bank.vote_accounts().values() {
|
|
|
|
let activated_stake = *activated_stake;
|
2020-01-07 13:18:34 -08:00
|
|
|
total_activated_stake += activated_stake;
|
2020-04-17 11:05:59 -07:00
|
|
|
|
2020-12-21 11:18:19 -08:00
|
|
|
if activated_stake == 0 {
|
2020-04-17 11:05:59 -07:00
|
|
|
continue;
|
|
|
|
}
|
2020-11-30 09:18:33 -08:00
|
|
|
let vote_state_node_pubkey = vote_account
|
|
|
|
.vote_state()
|
|
|
|
.as_ref()
|
|
|
|
.map(|vote_state| vote_state.node_pubkey)
|
|
|
|
.unwrap_or_default();
|
2020-04-17 11:05:59 -07:00
|
|
|
|
2021-08-13 05:12:40 -07:00
|
|
|
if let Some(peer) = peers.get(&vote_state_node_pubkey) {
|
2020-04-18 10:16:19 -07:00
|
|
|
if peer.shred_version == my_shred_version {
|
2020-04-17 11:05:59 -07:00
|
|
|
trace!(
|
|
|
|
"observed {} in gossip, (activated_stake={})",
|
2020-11-30 09:18:33 -08:00
|
|
|
vote_state_node_pubkey,
|
2020-04-17 11:05:59 -07:00
|
|
|
activated_stake
|
|
|
|
);
|
|
|
|
online_stake += activated_stake;
|
|
|
|
} else {
|
|
|
|
wrong_shred_stake += activated_stake;
|
2020-12-21 11:18:19 -08:00
|
|
|
wrong_shred_nodes.push((activated_stake, vote_state_node_pubkey));
|
2020-04-17 11:05:59 -07:00
|
|
|
}
|
2020-11-30 09:18:33 -08:00
|
|
|
} else if vote_state_node_pubkey == my_id {
|
2020-04-18 10:16:19 -07:00
|
|
|
online_stake += activated_stake; // This node is online
|
2020-04-17 11:05:59 -07:00
|
|
|
} else {
|
|
|
|
offline_stake += activated_stake;
|
2020-12-21 11:18:19 -08:00
|
|
|
offline_nodes.push((activated_stake, vote_state_node_pubkey));
|
2020-04-17 11:05:59 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-14 22:36:30 -07:00
|
|
|
let online_stake_percentage = (online_stake as f64 / total_activated_stake as f64) * 100.;
|
2020-04-17 11:05:59 -07:00
|
|
|
if log {
|
|
|
|
info!(
|
2021-09-14 22:36:30 -07:00
|
|
|
"{:.3}% of active stake visible in gossip",
|
|
|
|
online_stake_percentage
|
2020-04-17 11:05:59 -07:00
|
|
|
);
|
|
|
|
|
|
|
|
if !wrong_shred_nodes.is_empty() {
|
|
|
|
info!(
|
2021-09-14 22:36:30 -07:00
|
|
|
"{:.3}% of active stake has the wrong shred version in gossip",
|
|
|
|
(wrong_shred_stake as f64 / total_activated_stake as f64) * 100.,
|
2020-01-07 13:18:34 -08:00
|
|
|
);
|
2020-04-17 11:05:59 -07:00
|
|
|
for (stake, identity) in wrong_shred_nodes {
|
|
|
|
info!(
|
2021-09-14 22:36:30 -07:00
|
|
|
" {:.3}% - {}",
|
|
|
|
(stake as f64 / total_activated_stake as f64) * 100.,
|
2020-04-17 11:05:59 -07:00
|
|
|
identity
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !offline_nodes.is_empty() {
|
|
|
|
info!(
|
2021-09-14 22:36:30 -07:00
|
|
|
"{:.3}% of active stake is not visible in gossip",
|
|
|
|
(offline_stake as f64 / total_activated_stake as f64) * 100.
|
2020-04-17 11:05:59 -07:00
|
|
|
);
|
|
|
|
for (stake, identity) in offline_nodes {
|
|
|
|
info!(
|
2021-09-14 22:36:30 -07:00
|
|
|
" {:.3}% - {}",
|
|
|
|
(stake as f64 / total_activated_stake as f64) * 100.,
|
2020-04-17 11:05:59 -07:00
|
|
|
identity
|
|
|
|
);
|
|
|
|
}
|
2020-01-07 13:18:34 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-14 22:36:30 -07:00
|
|
|
online_stake_percentage as u64
|
2020-01-07 13:18:34 -08:00
|
|
|
}
|
|
|
|
|
2020-07-07 09:41:45 -07:00
|
|
|
// Cleanup anything that looks like an accounts append-vec
|
|
|
|
fn cleanup_accounts_path(account_path: &std::path::Path) {
|
2021-02-22 04:24:09 -08:00
|
|
|
if let Err(e) = std::fs::remove_dir_all(account_path) {
|
2020-07-15 09:37:40 -07:00
|
|
|
warn!(
|
2021-02-22 04:24:09 -08:00
|
|
|
"encountered error removing accounts path: {:?}: {}",
|
|
|
|
account_path, e
|
2020-07-15 09:37:40 -07:00
|
|
|
);
|
2020-07-07 09:41:45 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-07 07:31:54 -07:00
|
|
|
pub fn is_snapshot_config_valid(
|
2021-09-10 13:59:26 -07:00
|
|
|
full_snapshot_interval_slots: Slot,
|
|
|
|
incremental_snapshot_interval_slots: Slot,
|
|
|
|
accounts_hash_interval_slots: Slot,
|
2020-12-08 23:18:27 -08:00
|
|
|
) -> bool {
|
2021-09-10 13:59:26 -07:00
|
|
|
// if full snapshot interval is MAX, that means snapshots are turned off, so yes, valid
|
|
|
|
if full_snapshot_interval_slots == Slot::MAX {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
let is_incremental_config_valid = if incremental_snapshot_interval_slots == Slot::MAX {
|
|
|
|
true
|
|
|
|
} else {
|
|
|
|
incremental_snapshot_interval_slots >= accounts_hash_interval_slots
|
|
|
|
&& incremental_snapshot_interval_slots % accounts_hash_interval_slots == 0
|
|
|
|
&& full_snapshot_interval_slots > incremental_snapshot_interval_slots
|
|
|
|
};
|
|
|
|
|
|
|
|
full_snapshot_interval_slots >= accounts_hash_interval_slots
|
|
|
|
&& full_snapshot_interval_slots % accounts_hash_interval_slots == 0
|
|
|
|
&& is_incremental_config_valid
|
2020-12-08 23:18:27 -08:00
|
|
|
}
|
|
|
|
|
2018-07-02 15:24:40 -07:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
2021-12-03 09:00:31 -08:00
|
|
|
use {
|
|
|
|
super::*,
|
2022-03-31 14:44:23 -07:00
|
|
|
crossbeam_channel::{bounded, RecvTimeoutError},
|
2021-12-03 09:00:31 -08:00
|
|
|
solana_ledger::{create_new_tmp_ledger, genesis_utils::create_genesis_config_with_leader},
|
|
|
|
solana_sdk::{genesis_config::create_genesis_config, poh_config::PohConfig},
|
2022-03-31 14:44:23 -07:00
|
|
|
std::{fs::remove_dir_all, thread, time::Duration},
|
2021-12-03 09:00:31 -08:00
|
|
|
};
|
2018-08-03 11:06:06 -07:00
|
|
|
|
2022-04-02 08:22:47 -07:00
|
|
|
#[test]
|
2018-07-02 15:24:40 -07:00
|
|
|
fn validator_exit() {
|
2019-05-23 14:50:23 -07:00
|
|
|
solana_logger::setup();
|
2019-01-29 18:12:32 -08:00
|
|
|
let leader_keypair = Keypair::new();
|
2019-03-09 19:28:43 -08:00
|
|
|
let leader_node = Node::new_localhost_with_pubkey(&leader_keypair.pubkey());
|
2018-10-25 16:58:40 -07:00
|
|
|
|
2019-01-29 18:12:32 -08:00
|
|
|
let validator_keypair = Keypair::new();
|
2019-03-09 19:28:43 -08:00
|
|
|
let validator_node = Node::new_localhost_with_pubkey(&validator_keypair.pubkey());
|
2019-11-08 20:56:57 -08:00
|
|
|
let genesis_config =
|
|
|
|
create_genesis_config_with_leader(10_000, &leader_keypair.pubkey(), 1000)
|
|
|
|
.genesis_config;
|
|
|
|
let (validator_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
|
2019-01-29 18:12:32 -08:00
|
|
|
|
2019-05-15 15:19:29 -07:00
|
|
|
let voting_keypair = Arc::new(Keypair::new());
|
2020-01-30 10:58:39 -08:00
|
|
|
let config = ValidatorConfig {
|
2020-12-01 19:25:09 -08:00
|
|
|
rpc_addrs: Some((validator_node.info.rpc, validator_node.info.rpc_pubsub)),
|
2022-01-28 18:00:18 -08:00
|
|
|
..ValidatorConfig::default_for_test()
|
2020-01-30 10:58:39 -08:00
|
|
|
};
|
2021-03-04 13:01:11 -08:00
|
|
|
let start_progress = Arc::new(RwLock::new(ValidatorStartProgress::default()));
|
2019-05-23 22:05:16 -07:00
|
|
|
let validator = Validator::new(
|
2019-01-29 18:12:32 -08:00
|
|
|
validator_node,
|
2021-06-17 13:51:06 -07:00
|
|
|
Arc::new(validator_keypair),
|
2019-01-24 12:04:04 -08:00
|
|
|
&validator_ledger_path,
|
2019-03-09 19:28:43 -08:00
|
|
|
&voting_keypair.pubkey(),
|
2021-04-11 20:38:30 -07:00
|
|
|
Arc::new(RwLock::new(vec![voting_keypair.clone()])),
|
2020-12-18 10:54:48 -08:00
|
|
|
vec![leader_node.info],
|
2019-11-20 15:43:10 -08:00
|
|
|
&config,
|
2021-02-03 08:26:17 -08:00
|
|
|
true, // should_check_duplicate_instance
|
2021-03-04 13:01:11 -08:00
|
|
|
start_progress.clone(),
|
2021-07-23 08:25:03 -07:00
|
|
|
SocketAddrSpace::Unspecified,
|
2022-04-30 20:52:38 -07:00
|
|
|
false, // use_quic
|
2021-03-04 13:01:11 -08:00
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
*start_progress.read().unwrap(),
|
|
|
|
ValidatorStartProgress::Running
|
2018-09-14 01:53:18 -07:00
|
|
|
);
|
2022-04-07 18:20:13 -07:00
|
|
|
|
|
|
|
// spawn a new thread to wait for validator close
|
|
|
|
let (sender, receiver) = bounded(0);
|
|
|
|
let _ = thread::spawn(move || {
|
|
|
|
validator.close();
|
|
|
|
sender.send(()).unwrap();
|
|
|
|
});
|
|
|
|
|
|
|
|
// exit can deadlock. put an upper-bound on how long we wait for it
|
|
|
|
let timeout = Duration::from_secs(30);
|
|
|
|
if let Err(RecvTimeoutError::Timeout) = receiver.recv_timeout(timeout) {
|
|
|
|
panic!("timeout for closing validator");
|
|
|
|
}
|
|
|
|
|
2018-09-14 01:53:18 -07:00
|
|
|
remove_dir_all(validator_ledger_path).unwrap();
|
2018-07-02 11:20:35 -07:00
|
|
|
}
|
2018-09-25 15:41:29 -07:00
|
|
|
|
2018-07-17 08:18:42 -07:00
|
|
|
#[test]
|
2020-06-22 20:27:25 -07:00
|
|
|
fn test_backup_and_clear_blockstore() {
|
|
|
|
use std::time::Instant;
|
|
|
|
solana_logger::setup();
|
2021-12-03 09:00:31 -08:00
|
|
|
use {
|
|
|
|
solana_entry::entry,
|
|
|
|
solana_ledger::{blockstore, get_tmp_ledger_path},
|
|
|
|
};
|
2020-06-22 20:27:25 -07:00
|
|
|
let blockstore_path = get_tmp_ledger_path!();
|
|
|
|
{
|
2020-06-23 14:29:07 -07:00
|
|
|
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
|
|
|
|
|
|
|
let entries = entry::create_ticks(1, 0, Hash::default());
|
2020-06-22 20:27:25 -07:00
|
|
|
|
|
|
|
info!("creating shreds");
|
|
|
|
let mut last_print = Instant::now();
|
|
|
|
for i in 1..10 {
|
2021-12-24 12:32:43 -08:00
|
|
|
let shreds = blockstore::entries_to_test_shreds(&entries, i, i - 1, true, 1);
|
2020-06-22 20:27:25 -07:00
|
|
|
blockstore.insert_shreds(shreds, None, true).unwrap();
|
|
|
|
if last_print.elapsed().as_millis() > 5000 {
|
|
|
|
info!("inserted {}", i);
|
|
|
|
last_print = Instant::now();
|
|
|
|
}
|
|
|
|
}
|
2020-06-23 14:29:07 -07:00
|
|
|
drop(blockstore);
|
2020-06-22 20:27:25 -07:00
|
|
|
|
2021-05-28 00:42:56 -07:00
|
|
|
// this purges and compacts all slots greater than or equal to 5
|
2020-06-23 14:29:07 -07:00
|
|
|
backup_and_clear_blockstore(&blockstore_path, 5, 2);
|
2020-06-22 20:27:25 -07:00
|
|
|
|
2020-06-23 14:29:07 -07:00
|
|
|
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
2021-05-28 00:42:56 -07:00
|
|
|
// assert that slots less than 5 aren't affected
|
2020-06-23 14:29:07 -07:00
|
|
|
assert!(blockstore.meta(4).unwrap().unwrap().next_slots.is_empty());
|
|
|
|
for i in 5..10 {
|
2020-06-22 20:27:25 -07:00
|
|
|
assert!(blockstore
|
|
|
|
.get_data_shreds_for_slot(i, 0)
|
|
|
|
.unwrap()
|
|
|
|
.is_empty());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-02 08:22:47 -07:00
|
|
|
#[test]
|
2018-07-17 08:18:42 -07:00
|
|
|
fn validator_parallel_exit() {
|
2019-01-29 18:12:32 -08:00
|
|
|
let leader_keypair = Keypair::new();
|
2019-03-09 19:28:43 -08:00
|
|
|
let leader_node = Node::new_localhost_with_pubkey(&leader_keypair.pubkey());
|
2019-01-29 18:12:32 -08:00
|
|
|
|
2018-09-14 01:53:18 -07:00
|
|
|
let mut ledger_paths = vec![];
|
2019-08-20 23:59:31 -07:00
|
|
|
let mut validators: Vec<Validator> = (0..2)
|
2019-02-26 19:36:46 -08:00
|
|
|
.map(|_| {
|
2019-01-29 18:12:32 -08:00
|
|
|
let validator_keypair = Keypair::new();
|
2019-03-09 19:28:43 -08:00
|
|
|
let validator_node = Node::new_localhost_with_pubkey(&validator_keypair.pubkey());
|
2019-11-08 20:56:57 -08:00
|
|
|
let genesis_config =
|
|
|
|
create_genesis_config_with_leader(10_000, &leader_keypair.pubkey(), 1000)
|
|
|
|
.genesis_config;
|
|
|
|
let (validator_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
|
2018-09-14 01:53:18 -07:00
|
|
|
ledger_paths.push(validator_ledger_path.clone());
|
2020-08-19 22:04:38 -07:00
|
|
|
let vote_account_keypair = Keypair::new();
|
2020-01-30 10:58:39 -08:00
|
|
|
let config = ValidatorConfig {
|
2020-12-01 19:25:09 -08:00
|
|
|
rpc_addrs: Some((validator_node.info.rpc, validator_node.info.rpc_pubsub)),
|
2022-01-28 18:00:18 -08:00
|
|
|
..ValidatorConfig::default_for_test()
|
2020-01-30 10:58:39 -08:00
|
|
|
};
|
2019-05-23 22:05:16 -07:00
|
|
|
Validator::new(
|
2019-01-29 18:12:32 -08:00
|
|
|
validator_node,
|
2021-06-17 13:51:06 -07:00
|
|
|
Arc::new(validator_keypair),
|
2019-01-24 12:04:04 -08:00
|
|
|
&validator_ledger_path,
|
2020-03-31 08:23:42 -07:00
|
|
|
&vote_account_keypair.pubkey(),
|
2021-04-11 20:38:30 -07:00
|
|
|
Arc::new(RwLock::new(vec![Arc::new(vote_account_keypair)])),
|
2020-12-18 10:54:48 -08:00
|
|
|
vec![leader_node.info.clone()],
|
2019-11-20 15:43:10 -08:00
|
|
|
&config,
|
2021-02-03 08:26:17 -08:00
|
|
|
true, // should_check_duplicate_instance
|
2021-03-04 13:01:11 -08:00
|
|
|
Arc::new(RwLock::new(ValidatorStartProgress::default())),
|
2021-07-23 08:25:03 -07:00
|
|
|
SocketAddrSpace::Unspecified,
|
2022-04-30 20:52:38 -07:00
|
|
|
false, // use_quic
|
2018-09-14 01:53:18 -07:00
|
|
|
)
|
2018-12-07 19:01:28 -08:00
|
|
|
})
|
|
|
|
.collect();
|
2018-09-14 01:53:18 -07:00
|
|
|
|
2020-03-25 18:09:19 -07:00
|
|
|
// Each validator can exit in parallel to speed many sequential calls to join`
|
2019-08-20 23:59:31 -07:00
|
|
|
validators.iter_mut().for_each(|v| v.exit());
|
2022-03-31 14:44:23 -07:00
|
|
|
|
|
|
|
// spawn a new thread to wait for the join of the validator
|
|
|
|
let (sender, receiver) = bounded(0);
|
|
|
|
let _ = thread::spawn(move || {
|
|
|
|
validators.into_iter().for_each(|validator| {
|
|
|
|
validator.join();
|
|
|
|
});
|
|
|
|
sender.send(()).unwrap();
|
2018-08-03 11:06:06 -07:00
|
|
|
});
|
2018-09-14 01:53:18 -07:00
|
|
|
|
2022-03-31 14:44:23 -07:00
|
|
|
// timeout of 30s for shutting down the validators
|
|
|
|
let timeout = Duration::from_secs(30);
|
|
|
|
if let Err(RecvTimeoutError::Timeout) = receiver.recv_timeout(timeout) {
|
|
|
|
panic!("timeout for shutting down validators",);
|
|
|
|
}
|
|
|
|
|
2018-09-14 01:53:18 -07:00
|
|
|
for path in ledger_paths {
|
|
|
|
remove_dir_all(path).unwrap();
|
|
|
|
}
|
2018-07-17 08:18:42 -07:00
|
|
|
}
|
2020-06-30 12:43:48 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_wait_for_supermajority() {
|
|
|
|
solana_logger::setup();
|
|
|
|
use solana_sdk::hash::hash;
|
|
|
|
let node_keypair = Arc::new(Keypair::new());
|
|
|
|
let cluster_info = ClusterInfo::new(
|
|
|
|
ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()),
|
|
|
|
node_keypair,
|
2021-07-23 08:25:03 -07:00
|
|
|
SocketAddrSpace::Unspecified,
|
2020-06-30 12:43:48 -07:00
|
|
|
);
|
|
|
|
|
|
|
|
let (genesis_config, _mint_keypair) = create_genesis_config(1);
|
2022-03-23 10:04:58 -07:00
|
|
|
let bank_forks = RwLock::new(BankForks::new(Bank::new_for_tests(&genesis_config)));
|
2022-01-28 18:00:18 -08:00
|
|
|
let mut config = ValidatorConfig::default_for_test();
|
2020-06-30 12:43:48 -07:00
|
|
|
let rpc_override_health_check = Arc::new(AtomicBool::new(false));
|
2021-03-04 13:01:11 -08:00
|
|
|
let start_progress = Arc::new(RwLock::new(ValidatorStartProgress::default()));
|
|
|
|
|
2020-06-30 12:43:48 -07:00
|
|
|
assert!(!wait_for_supermajority(
|
|
|
|
&config,
|
2022-03-23 10:04:58 -07:00
|
|
|
None,
|
|
|
|
&bank_forks,
|
2020-06-30 12:43:48 -07:00
|
|
|
&cluster_info,
|
2021-03-04 13:01:11 -08:00
|
|
|
rpc_override_health_check.clone(),
|
|
|
|
&start_progress,
|
2021-03-25 18:54:51 -07:00
|
|
|
)
|
|
|
|
.unwrap());
|
2020-06-30 12:43:48 -07:00
|
|
|
|
|
|
|
// bank=0, wait=1, should fail
|
|
|
|
config.wait_for_supermajority = Some(1);
|
2021-03-25 18:54:51 -07:00
|
|
|
assert_eq!(
|
|
|
|
wait_for_supermajority(
|
|
|
|
&config,
|
2022-03-23 10:04:58 -07:00
|
|
|
None,
|
|
|
|
&bank_forks,
|
2021-03-25 18:54:51 -07:00
|
|
|
&cluster_info,
|
|
|
|
rpc_override_health_check.clone(),
|
|
|
|
&start_progress,
|
|
|
|
),
|
|
|
|
Err(ValidatorError::NotEnoughLedgerData)
|
|
|
|
);
|
2020-06-30 12:43:48 -07:00
|
|
|
|
|
|
|
// bank=1, wait=0, should pass, bank is past the wait slot
|
2022-03-23 10:04:58 -07:00
|
|
|
let bank_forks = RwLock::new(BankForks::new(Bank::new_from_parent(
|
|
|
|
&bank_forks.read().unwrap().root_bank(),
|
|
|
|
&Pubkey::default(),
|
|
|
|
1,
|
|
|
|
)));
|
2020-06-30 12:43:48 -07:00
|
|
|
config.wait_for_supermajority = Some(0);
|
|
|
|
assert!(!wait_for_supermajority(
|
|
|
|
&config,
|
2022-03-23 10:04:58 -07:00
|
|
|
None,
|
|
|
|
&bank_forks,
|
2020-06-30 12:43:48 -07:00
|
|
|
&cluster_info,
|
2021-03-04 13:01:11 -08:00
|
|
|
rpc_override_health_check.clone(),
|
|
|
|
&start_progress,
|
2021-03-25 18:54:51 -07:00
|
|
|
)
|
|
|
|
.unwrap());
|
2020-06-30 12:43:48 -07:00
|
|
|
|
|
|
|
// bank=1, wait=1, equal, but bad hash provided
|
|
|
|
config.wait_for_supermajority = Some(1);
|
|
|
|
config.expected_bank_hash = Some(hash(&[1]));
|
2021-03-25 18:54:51 -07:00
|
|
|
assert_eq!(
|
|
|
|
wait_for_supermajority(
|
|
|
|
&config,
|
2022-03-23 10:04:58 -07:00
|
|
|
None,
|
|
|
|
&bank_forks,
|
2021-03-25 18:54:51 -07:00
|
|
|
&cluster_info,
|
|
|
|
rpc_override_health_check,
|
|
|
|
&start_progress,
|
|
|
|
),
|
|
|
|
Err(ValidatorError::BadExpectedBankHash)
|
|
|
|
);
|
2020-06-30 12:43:48 -07:00
|
|
|
}
|
2020-12-08 23:18:27 -08:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_interval_check() {
|
2021-09-10 13:59:26 -07:00
|
|
|
assert!(is_snapshot_config_valid(300, 200, 100));
|
|
|
|
|
|
|
|
let default_accounts_hash_interval =
|
|
|
|
snapshot_utils::DEFAULT_INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS;
|
|
|
|
assert!(is_snapshot_config_valid(
|
|
|
|
snapshot_utils::DEFAULT_FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS,
|
|
|
|
snapshot_utils::DEFAULT_INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS,
|
|
|
|
default_accounts_hash_interval,
|
|
|
|
));
|
|
|
|
|
|
|
|
assert!(is_snapshot_config_valid(
|
|
|
|
Slot::MAX,
|
|
|
|
snapshot_utils::DEFAULT_INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS,
|
|
|
|
default_accounts_hash_interval
|
|
|
|
));
|
|
|
|
assert!(is_snapshot_config_valid(
|
|
|
|
snapshot_utils::DEFAULT_FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS,
|
|
|
|
Slot::MAX,
|
|
|
|
default_accounts_hash_interval
|
|
|
|
));
|
|
|
|
assert!(is_snapshot_config_valid(
|
|
|
|
snapshot_utils::DEFAULT_INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS,
|
|
|
|
Slot::MAX,
|
|
|
|
default_accounts_hash_interval
|
|
|
|
));
|
|
|
|
|
|
|
|
assert!(!is_snapshot_config_valid(0, 100, 100));
|
|
|
|
assert!(!is_snapshot_config_valid(100, 0, 100));
|
|
|
|
assert!(!is_snapshot_config_valid(42, 100, 100));
|
|
|
|
assert!(!is_snapshot_config_valid(100, 42, 100));
|
|
|
|
assert!(!is_snapshot_config_valid(100, 100, 100));
|
|
|
|
assert!(!is_snapshot_config_valid(100, 200, 100));
|
|
|
|
assert!(!is_snapshot_config_valid(444, 200, 100));
|
|
|
|
assert!(!is_snapshot_config_valid(400, 222, 100));
|
2020-12-08 23:18:27 -08:00
|
|
|
}
|
2020-12-29 09:35:57 -08:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
#[should_panic]
|
|
|
|
fn test_poh_speed() {
|
|
|
|
solana_logger::setup();
|
|
|
|
let poh_config = PohConfig {
|
|
|
|
target_tick_duration: Duration::from_millis(solana_sdk::clock::MS_PER_TICK),
|
|
|
|
// make PoH rate really fast to cause the panic condition
|
2021-01-07 07:49:42 -08:00
|
|
|
hashes_per_tick: Some(100 * solana_sdk::clock::DEFAULT_HASHES_PER_TICK),
|
2020-12-29 09:35:57 -08:00
|
|
|
..PohConfig::default()
|
|
|
|
};
|
|
|
|
let genesis_config = GenesisConfig {
|
|
|
|
poh_config,
|
|
|
|
..GenesisConfig::default()
|
|
|
|
};
|
|
|
|
check_poh_speed(&genesis_config, Some(10_000));
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_poh_speed_no_hashes_per_tick() {
|
|
|
|
let poh_config = PohConfig {
|
|
|
|
target_tick_duration: Duration::from_millis(solana_sdk::clock::MS_PER_TICK),
|
|
|
|
hashes_per_tick: None,
|
|
|
|
..PohConfig::default()
|
|
|
|
};
|
|
|
|
let genesis_config = GenesisConfig {
|
|
|
|
poh_config,
|
|
|
|
..GenesisConfig::default()
|
|
|
|
};
|
|
|
|
check_poh_speed(&genesis_config, Some(10_000));
|
|
|
|
}
|
2018-07-02 11:20:35 -07:00
|
|
|
}
|