refactor core to create repair module (#32303)

This commit is contained in:
Jeff Biseda 2023-07-05 12:20:46 -07:00 committed by GitHub
parent c2e45773af
commit bad5197cb0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 123 additions and 104 deletions

View File

@ -10,41 +10,29 @@
pub mod accounts_hash_verifier;
pub mod admin_rpc_post_init;
pub mod ancestor_hashes_service;
pub mod banking_stage;
pub mod banking_trace;
pub mod cache_block_meta_service;
pub mod cluster_info_vote_listener;
pub mod cluster_slot_state_verifier;
pub mod cluster_slots_service;
pub mod commitment_service;
pub mod completed_data_sets_service;
pub mod consensus;
pub mod cost_update_service;
pub mod drop_bank_service;
pub mod duplicate_repair_status;
pub mod fetch_stage;
pub mod gen_keys;
pub mod ledger_cleanup_service;
pub mod ledger_metric_report_service;
pub mod next_leader;
pub mod optimistic_confirmation_verifier;
pub mod outstanding_requests;
pub mod packet_threshold;
pub mod poh_timing_report_service;
pub mod poh_timing_reporter;
pub mod repair_generic_traversal;
pub mod repair_response;
pub mod repair_service;
pub mod repair_weight;
pub mod repair_weighted_traversal;
pub mod repair;
pub mod replay_stage;
pub mod request_response;
mod result;
pub mod rewards_recorder_service;
pub mod sample_performance_service;
pub mod serve_repair;
pub mod serve_repair_service;
mod shred_fetch_stage;
pub mod sigverify;
pub mod sigverify_stage;

View File

@ -1,16 +1,18 @@
use {
crate::{
cluster_slots_service::cluster_slots::ClusterSlots,
duplicate_repair_status::{
AncestorRequestDecision, AncestorRequestStatus, AncestorRequestType,
repair::{
duplicate_repair_status::{
AncestorRequestDecision, AncestorRequestStatus, AncestorRequestType,
},
outstanding_requests::OutstandingRequests,
packet_threshold::DynamicPacketToProcessThreshold,
repair_service::{AncestorDuplicateSlotsSender, RepairInfo, RepairStatsGroup},
serve_repair::{
AncestorHashesRepairType, AncestorHashesResponse, RepairProtocol, ServeRepair,
},
},
outstanding_requests::OutstandingRequests,
packet_threshold::DynamicPacketToProcessThreshold,
repair_service::{AncestorDuplicateSlotsSender, RepairInfo, RepairStatsGroup},
replay_stage::DUPLICATE_THRESHOLD,
serve_repair::{
AncestorHashesRepairType, AncestorHashesResponse, RepairProtocol, ServeRepair,
},
},
bincode::serialize,
crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender},
@ -865,13 +867,15 @@ mod test {
use {
super::*,
crate::{
cluster_slot_state_verifier::{DuplicateSlotsToRepair, PurgeRepairSlotCounter},
duplicate_repair_status::DuplicateAncestorDecision,
repair::{
cluster_slot_state_verifier::{DuplicateSlotsToRepair, PurgeRepairSlotCounter},
duplicate_repair_status::DuplicateAncestorDecision,
serve_repair::MAX_ANCESTOR_RESPONSES,
},
replay_stage::{
tests::{replay_blockstore_components, ReplayBlockstoreComponents},
ReplayStage,
},
serve_repair::MAX_ANCESTOR_RESPONSES,
vote_simulator::VoteSimulator,
},
solana_gossip::{

View File

@ -1,9 +1,11 @@
use {
crate::{
ancestor_hashes_service::{AncestorHashesReplayUpdate, AncestorHashesReplayUpdateSender},
consensus::{
fork_choice::ForkChoice, heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
},
repair::ancestor_hashes_service::{
AncestorHashesReplayUpdate, AncestorHashesReplayUpdateSender,
},
},
solana_ledger::blockstore::Blockstore,
solana_sdk::{clock::Slot, hash::Hash},

14
core/src/repair/mod.rs Normal file
View File

@ -0,0 +1,14 @@
pub mod ancestor_hashes_service;
pub mod cluster_slot_state_verifier;
pub mod duplicate_repair_status;
pub mod outstanding_requests;
pub mod packet_threshold;
pub mod repair_generic_traversal;
pub mod repair_response;
pub mod repair_service;
pub mod repair_weight;
pub mod repair_weighted_traversal;
pub mod request_response;
pub mod result;
pub mod serve_repair;
pub mod serve_repair_service;

View File

@ -1,5 +1,5 @@
use {
crate::request_response::RequestResponse,
crate::repair::request_response::RequestResponse,
lru::LruCache,
rand::{thread_rng, Rng},
solana_ledger::shred::Nonce,
@ -86,7 +86,7 @@ pub struct RequestStatus<T> {
pub(crate) mod tests {
use {
super::*,
crate::serve_repair::ShredRepairType,
crate::repair::serve_repair::ShredRepairType,
solana_ledger::shred::{Shred, ShredFlags},
solana_sdk::timing::timestamp,
};

View File

@ -1,8 +1,7 @@
use {
crate::{
consensus::{heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice, tree_diff::TreeDiff},
repair_service::RepairService,
serve_repair::ShredRepairType,
repair::{repair_service::RepairService, serve_repair::ShredRepairType},
},
solana_ledger::{blockstore::Blockstore, blockstore_meta::SlotMeta},
solana_sdk::{clock::Slot, hash::Hash},
@ -205,7 +204,7 @@ pub fn get_closest_completion(
pub mod test {
use {
super::*,
crate::repair_service::sleep_shred_deferment_period,
crate::repair::repair_service::sleep_shred_deferment_period,
solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path},
solana_sdk::hash::Hash,
trees::{tr, Tree, TreeWalk},

View File

@ -2,18 +2,20 @@
//! regularly finds missing shreds in the ledger and sends repair requests for those shreds
#[cfg(test)]
use {
crate::duplicate_repair_status::DuplicateSlotRepairStatus,
crate::repair::duplicate_repair_status::DuplicateSlotRepairStatus,
solana_sdk::clock::DEFAULT_MS_PER_SLOT,
};
use {
crate::{
ancestor_hashes_service::{AncestorHashesReplayUpdateReceiver, AncestorHashesService},
cluster_info_vote_listener::VerifiedVoteReceiver,
cluster_slots_service::cluster_slots::ClusterSlots,
duplicate_repair_status::AncestorDuplicateSlotsToRepair,
outstanding_requests::OutstandingRequests,
repair_weight::RepairWeight,
serve_repair::{ServeRepair, ShredRepairType, REPAIR_PEERS_CACHE_CAPACITY},
repair::{
ancestor_hashes_service::{AncestorHashesReplayUpdateReceiver, AncestorHashesService},
duplicate_repair_status::AncestorDuplicateSlotsToRepair,
outstanding_requests::OutstandingRequests,
repair_weight::RepairWeight,
serve_repair::{ServeRepair, ShredRepairType, REPAIR_PEERS_CACHE_CAPACITY},
},
},
crossbeam_channel::{Receiver as CrossbeamReceiver, Sender as CrossbeamSender},
lru::LruCache,

View File

@ -1,11 +1,13 @@
use {
crate::{
consensus::{heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice, tree_diff::TreeDiff},
repair_generic_traversal::{get_closest_completion, get_unknown_last_index},
repair_service::{BestRepairsStats, RepairTiming},
repair_weighted_traversal,
repair::{
repair_generic_traversal::{get_closest_completion, get_unknown_last_index},
repair_service::{BestRepairsStats, RepairTiming},
repair_weighted_traversal,
serve_repair::ShredRepairType,
},
replay_stage::DUPLICATE_THRESHOLD,
serve_repair::ShredRepairType,
},
solana_ledger::{
ancestor_iterator::AncestorIterator, blockstore::Blockstore, blockstore_meta::SlotMeta,

View File

@ -1,8 +1,7 @@
use {
crate::{
consensus::{heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice, tree_diff::TreeDiff},
repair_service::RepairService,
serve_repair::ShredRepairType,
repair::{repair_service::RepairService, serve_repair::ShredRepairType},
},
solana_ledger::{blockstore::Blockstore, blockstore_meta::SlotMeta},
solana_sdk::{clock::Slot, hash::Hash},
@ -137,7 +136,7 @@ pub fn get_best_repair_shreds(
pub mod test {
use {
super::*,
crate::repair_service::sleep_shred_deferment_period,
crate::repair::repair_service::sleep_shred_deferment_period,
solana_ledger::{
get_tmp_ledger_path,
shred::{Shred, ShredFlags},

36
core/src/repair/result.rs Normal file
View File

@ -0,0 +1,36 @@
use {
solana_gossip::{cluster_info::ClusterInfoError, contact_info},
thiserror::Error,
};
#[derive(Error, Debug)]
pub enum RepairVerifyError {
#[error("IdMismatch")]
IdMismatch,
#[error("Malformed")]
Malformed,
#[error("SelfRepair")]
SelfRepair,
#[error("SigVerify")]
SigVerify,
#[error("TimeSkew")]
TimeSkew,
#[error("Unsigned")]
Unsigned,
}
#[derive(Debug, Error)]
pub enum Error {
#[error(transparent)]
ClusterInfo(#[from] ClusterInfoError),
#[error(transparent)]
InvalidContactInfo(#[from] contact_info::Error),
#[error(transparent)]
Serialize(#[from] std::boxed::Box<bincode::ErrorKind>),
#[error(transparent)]
WeightedIndex(#[from] rand::distributions::weighted::WeightedError),
#[error(transparent)]
RepairVerify(#[from] RepairVerifyError),
}
pub type Result<T> = std::result::Result<T, Error>;

View File

@ -1,11 +1,13 @@
use {
crate::{
cluster_slots_service::cluster_slots::ClusterSlots,
duplicate_repair_status::get_ancestor_hash_repair_sample_size,
repair_response,
repair_service::{OutstandingShredRepairs, RepairStats, REPAIR_MS},
request_response::RequestResponse,
result::{Error, Result},
repair::{
duplicate_repair_status::get_ancestor_hash_repair_sample_size,
repair_response,
repair_service::{OutstandingShredRepairs, RepairStats, REPAIR_MS},
request_response::RequestResponse,
result::{Error, RepairVerifyError, Result},
},
},
bincode::serialize,
crossbeam_channel::RecvTimeoutError,
@ -55,7 +57,6 @@ use {
thread::{Builder, JoinHandle},
time::{Duration, Instant},
},
thiserror::Error,
};
/// the number of slots to respond with when responding to `Orphan` requests
@ -87,22 +88,6 @@ const SIGNED_REPAIR_TIME_WINDOW: Duration = Duration::from_secs(60 * 10); // 10
#[cfg(test)]
static_assertions::const_assert_eq!(MAX_ANCESTOR_RESPONSES, 30);
#[derive(Error, Debug)]
pub enum RepairVerifyError {
#[error("IdMismatch")]
IdMismatch,
#[error("Malformed")]
Malformed,
#[error("SelfRepair")]
SelfRepair,
#[error("SigVerify")]
SigVerify,
#[error("TimeSkew")]
TimeSkew,
#[error("Unsigned")]
Unsigned,
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, Hash, PartialEq, Eq)]
pub enum ShredRepairType {
/// Requesting `MAX_ORPHAN_REPAIR_RESPONSES ` parent shreds
@ -229,7 +214,7 @@ pub(crate) type Ping = ping_pong::Ping<[u8; REPAIR_PING_TOKEN_SIZE]>;
/// Window protocol messages
#[derive(Debug, AbiEnumVisitor, AbiExample, Deserialize, Serialize, strum_macros::Display)]
#[frozen_abi(digest = "6VyBwHjkAMXAN97fdhQgFv6VdPEnfJo9LdUAd2SFtwF3")]
#[frozen_abi(digest = "HXKJuZAK4LsweUTRbsxEcG9jHA9JR9s8MYmmjx2Nb5X1")]
pub enum RepairProtocol {
LegacyWindowIndex(LegacyContactInfo, Slot, u64),
LegacyHighestWindowIndex(LegacyContactInfo, Slot, u64),
@ -1371,7 +1356,7 @@ impl ServeRepair {
mod tests {
use {
super::*,
crate::repair_response,
crate::repair::repair_response,
solana_gossip::{contact_info::ContactInfo, socketaddr, socketaddr_any},
solana_ledger::{
blockstore::make_many_slot_entries,

View File

@ -1,5 +1,5 @@
use {
crate::serve_repair::ServeRepair,
crate::repair::serve_repair::ServeRepair,
crossbeam_channel::{unbounded, Sender},
solana_ledger::blockstore::Blockstore,
solana_perf::recycler::Recycler,

View File

@ -2,13 +2,11 @@
use {
crate::{
ancestor_hashes_service::AncestorHashesReplayUpdateSender,
banking_trace::BankingTracer,
cache_block_meta_service::CacheBlockMetaSender,
cluster_info_vote_listener::{
GossipDuplicateConfirmedSlotsReceiver, GossipVerifiedVoteHashReceiver, VoteTracker,
},
cluster_slot_state_verifier::*,
cluster_slots_service::{cluster_slots::ClusterSlots, ClusterSlotsUpdateSender},
commitment_service::{AggregateCommitmentService, CommitmentAggregationData},
consensus::{
@ -21,9 +19,13 @@ use {
SWITCH_FORK_THRESHOLD,
},
cost_update_service::CostUpdate,
duplicate_repair_status::AncestorDuplicateSlotsToRepair,
repair_service::{
AncestorDuplicateSlotsReceiver, DumpedSlotsSender, PopularPrunedForksReceiver,
repair::{
ancestor_hashes_service::AncestorHashesReplayUpdateSender,
cluster_slot_state_verifier::*,
duplicate_repair_status::AncestorDuplicateSlotsToRepair,
repair_service::{
AncestorDuplicateSlotsReceiver, DumpedSlotsSender, PopularPrunedForksReceiver,
},
},
rewards_recorder_service::{RewardsMessage, RewardsRecorderSender},
unfrozen_gossip_verified_vote_hashes::UnfrozenGossipVerifiedVoteHashes,

View File

@ -1,23 +1,14 @@
//! The `result` module exposes a Result type that propagates one of many different Error types.
use {
crate::serve_repair::RepairVerifyError,
solana_gossip::{cluster_info, contact_info, gossip_error::GossipError},
solana_ledger::blockstore,
thiserror::Error,
};
use {solana_gossip::gossip_error::GossipError, solana_ledger::blockstore, thiserror::Error};
#[derive(Debug, Error)]
pub enum Error {
#[error(transparent)]
Blockstore(#[from] blockstore::BlockstoreError),
#[error(transparent)]
ClusterInfo(#[from] cluster_info::ClusterInfoError),
#[error(transparent)]
Gossip(#[from] GossipError),
#[error(transparent)]
InvalidContactInfo(#[from] contact_info::Error),
#[error(transparent)]
Io(#[from] std::io::Error),
#[error("ReadyTimeout")]
ReadyTimeout,
@ -29,12 +20,6 @@ pub enum Error {
Send,
#[error("TrySend")]
TrySend,
#[error(transparent)]
Serialize(#[from] std::boxed::Box<bincode::ErrorKind>),
#[error(transparent)]
WeightedIndex(#[from] rand::distributions::weighted::WeightedError),
#[error(transparent)]
RepairVerify(#[from] RepairVerifyError),
}
pub type Result<T> = std::result::Result<T, Error>;

View File

@ -1,7 +1,7 @@
//! The `shred_fetch_stage` pulls shreds from UDP sockets and sends it to a channel.
use {
crate::serve_repair::ServeRepair,
crate::repair::serve_repair::ServeRepair,
crossbeam_channel::{unbounded, Sender},
solana_gossip::{cluster_info::ClusterInfo, contact_info::Protocol},
solana_ledger::shred::{should_discard_shred, ShredFetchStats},

View File

@ -15,7 +15,7 @@ use {
cost_update_service::CostUpdateService,
drop_bank_service::DropBankService,
ledger_cleanup_service::LedgerCleanupService,
repair_service::RepairInfo,
repair::repair_service::RepairInfo,
replay_stage::{ReplayStage, ReplayStageConfig},
rewards_recorder_service::RewardsRecorderSender,
shred_fetch_stage::ShredFetchStage,

View File

@ -16,10 +16,9 @@ use {
},
ledger_metric_report_service::LedgerMetricReportService,
poh_timing_report_service::PohTimingReportService,
repair::{serve_repair::ServeRepair, serve_repair_service::ServeRepairService},
rewards_recorder_service::{RewardsRecorderSender, RewardsRecorderService},
sample_performance_service::SamplePerformanceService,
serve_repair::ServeRepair,
serve_repair_service::ServeRepairService,
sigverify,
snapshot_packager_service::SnapshotPackagerService,
stats_reporter_service::StatsReporterService,

View File

@ -1,9 +1,6 @@
use {
crate::{
cluster_info_vote_listener::VoteTracker,
cluster_slot_state_verifier::{
DuplicateSlotsTracker, EpochSlotsFrozenSlots, GossipDuplicateConfirmedSlots,
},
cluster_slots_service::cluster_slots::ClusterSlots,
consensus::{
fork_choice::SelectVoteAndResetForkResult,
@ -12,6 +9,9 @@ use {
progress_map::{ForkProgress, ProgressMap},
Tower,
},
repair::cluster_slot_state_verifier::{
DuplicateSlotsTracker, EpochSlotsFrozenSlots, GossipDuplicateConfirmedSlots,
},
replay_stage::{HeaviestForkFailures, ReplayStage},
unfrozen_gossip_verified_vote_hashes::UnfrozenGossipVerifiedVoteHashes,
},

View File

@ -3,13 +3,15 @@
//!
use {
crate::{
ancestor_hashes_service::AncestorHashesReplayUpdateReceiver,
cluster_info_vote_listener::VerifiedVoteReceiver,
completed_data_sets_service::CompletedDataSetsSender,
repair_response,
repair_service::{
DumpedSlotsReceiver, OutstandingShredRepairs, PopularPrunedForksSender, RepairInfo,
RepairService,
repair::{
ancestor_hashes_service::AncestorHashesReplayUpdateReceiver,
repair_response,
repair_service::{
DumpedSlotsReceiver, OutstandingShredRepairs, PopularPrunedForksSender, RepairInfo,
RepairService,
},
},
result::{Error, Result},
},
@ -482,6 +484,7 @@ impl WindowService {
mod test {
use {
super::*,
crate::repair::serve_repair::ShredRepairType,
solana_entry::entry::{create_ticks, Entry},
solana_gossip::contact_info::ContactInfo,
solana_ledger::{
@ -576,7 +579,6 @@ mod test {
#[test]
fn test_prune_shreds() {
use crate::serve_repair::ShredRepairType;
solana_logger::setup();
let shred = Shred::new_from_parity_shard(
5, // slot

View File

@ -46,7 +46,7 @@ use {
rand::{thread_rng, Rng},
solana_bench_tps::{bench::generate_and_fund_keypairs, bench_tps_client::BenchTpsClient},
solana_client::{connection_cache::ConnectionCache, tpu_connection::TpuConnection},
solana_core::serve_repair::{RepairProtocol, RepairRequestHeader, ServeRepair},
solana_core::repair::serve_repair::{RepairProtocol, RepairRequestHeader, ServeRepair},
solana_dos::cli::*,
solana_gossip::{
contact_info::Protocol,

View File

@ -4520,7 +4520,7 @@ fn test_slot_hash_expiry() {
#[serial]
fn test_duplicate_with_pruned_ancestor() {
solana_logger::setup_with("info,solana_metrics=off");
solana_core::duplicate_repair_status::set_ancestor_hash_repair_sample_size_for_tests_only(3);
solana_core::repair::duplicate_repair_status::set_ancestor_hash_repair_sample_size_for_tests_only(3);
let majority_leader_stake = 10_000_000 * DEFAULT_NODE_STAKE;
let minority_leader_stake = 2_000_000 * DEFAULT_NODE_STAKE;