refactor core to create repair module (#32303)

This commit is contained in:
Jeff Biseda 2023-07-05 12:20:46 -07:00 committed by GitHub
parent c2e45773af
commit bad5197cb0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 123 additions and 104 deletions

View File

@ -10,41 +10,29 @@
pub mod accounts_hash_verifier; pub mod accounts_hash_verifier;
pub mod admin_rpc_post_init; pub mod admin_rpc_post_init;
pub mod ancestor_hashes_service;
pub mod banking_stage; pub mod banking_stage;
pub mod banking_trace; pub mod banking_trace;
pub mod cache_block_meta_service; pub mod cache_block_meta_service;
pub mod cluster_info_vote_listener; pub mod cluster_info_vote_listener;
pub mod cluster_slot_state_verifier;
pub mod cluster_slots_service; pub mod cluster_slots_service;
pub mod commitment_service; pub mod commitment_service;
pub mod completed_data_sets_service; pub mod completed_data_sets_service;
pub mod consensus; pub mod consensus;
pub mod cost_update_service; pub mod cost_update_service;
pub mod drop_bank_service; pub mod drop_bank_service;
pub mod duplicate_repair_status;
pub mod fetch_stage; pub mod fetch_stage;
pub mod gen_keys; pub mod gen_keys;
pub mod ledger_cleanup_service; pub mod ledger_cleanup_service;
pub mod ledger_metric_report_service; pub mod ledger_metric_report_service;
pub mod next_leader; pub mod next_leader;
pub mod optimistic_confirmation_verifier; pub mod optimistic_confirmation_verifier;
pub mod outstanding_requests;
pub mod packet_threshold;
pub mod poh_timing_report_service; pub mod poh_timing_report_service;
pub mod poh_timing_reporter; pub mod poh_timing_reporter;
pub mod repair_generic_traversal; pub mod repair;
pub mod repair_response;
pub mod repair_service;
pub mod repair_weight;
pub mod repair_weighted_traversal;
pub mod replay_stage; pub mod replay_stage;
pub mod request_response;
mod result; mod result;
pub mod rewards_recorder_service; pub mod rewards_recorder_service;
pub mod sample_performance_service; pub mod sample_performance_service;
pub mod serve_repair;
pub mod serve_repair_service;
mod shred_fetch_stage; mod shred_fetch_stage;
pub mod sigverify; pub mod sigverify;
pub mod sigverify_stage; pub mod sigverify_stage;

View File

@ -1,16 +1,18 @@
use { use {
crate::{ crate::{
cluster_slots_service::cluster_slots::ClusterSlots, cluster_slots_service::cluster_slots::ClusterSlots,
duplicate_repair_status::{ repair::{
AncestorRequestDecision, AncestorRequestStatus, AncestorRequestType, duplicate_repair_status::{
AncestorRequestDecision, AncestorRequestStatus, AncestorRequestType,
},
outstanding_requests::OutstandingRequests,
packet_threshold::DynamicPacketToProcessThreshold,
repair_service::{AncestorDuplicateSlotsSender, RepairInfo, RepairStatsGroup},
serve_repair::{
AncestorHashesRepairType, AncestorHashesResponse, RepairProtocol, ServeRepair,
},
}, },
outstanding_requests::OutstandingRequests,
packet_threshold::DynamicPacketToProcessThreshold,
repair_service::{AncestorDuplicateSlotsSender, RepairInfo, RepairStatsGroup},
replay_stage::DUPLICATE_THRESHOLD, replay_stage::DUPLICATE_THRESHOLD,
serve_repair::{
AncestorHashesRepairType, AncestorHashesResponse, RepairProtocol, ServeRepair,
},
}, },
bincode::serialize, bincode::serialize,
crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender}, crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender},
@ -865,13 +867,15 @@ mod test {
use { use {
super::*, super::*,
crate::{ crate::{
cluster_slot_state_verifier::{DuplicateSlotsToRepair, PurgeRepairSlotCounter}, repair::{
duplicate_repair_status::DuplicateAncestorDecision, cluster_slot_state_verifier::{DuplicateSlotsToRepair, PurgeRepairSlotCounter},
duplicate_repair_status::DuplicateAncestorDecision,
serve_repair::MAX_ANCESTOR_RESPONSES,
},
replay_stage::{ replay_stage::{
tests::{replay_blockstore_components, ReplayBlockstoreComponents}, tests::{replay_blockstore_components, ReplayBlockstoreComponents},
ReplayStage, ReplayStage,
}, },
serve_repair::MAX_ANCESTOR_RESPONSES,
vote_simulator::VoteSimulator, vote_simulator::VoteSimulator,
}, },
solana_gossip::{ solana_gossip::{

View File

@ -1,9 +1,11 @@
use { use {
crate::{ crate::{
ancestor_hashes_service::{AncestorHashesReplayUpdate, AncestorHashesReplayUpdateSender},
consensus::{ consensus::{
fork_choice::ForkChoice, heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice, fork_choice::ForkChoice, heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
}, },
repair::ancestor_hashes_service::{
AncestorHashesReplayUpdate, AncestorHashesReplayUpdateSender,
},
}, },
solana_ledger::blockstore::Blockstore, solana_ledger::blockstore::Blockstore,
solana_sdk::{clock::Slot, hash::Hash}, solana_sdk::{clock::Slot, hash::Hash},

14
core/src/repair/mod.rs Normal file
View File

@ -0,0 +1,14 @@
pub mod ancestor_hashes_service;
pub mod cluster_slot_state_verifier;
pub mod duplicate_repair_status;
pub mod outstanding_requests;
pub mod packet_threshold;
pub mod repair_generic_traversal;
pub mod repair_response;
pub mod repair_service;
pub mod repair_weight;
pub mod repair_weighted_traversal;
pub mod request_response;
pub mod result;
pub mod serve_repair;
pub mod serve_repair_service;

View File

@ -1,5 +1,5 @@
use { use {
crate::request_response::RequestResponse, crate::repair::request_response::RequestResponse,
lru::LruCache, lru::LruCache,
rand::{thread_rng, Rng}, rand::{thread_rng, Rng},
solana_ledger::shred::Nonce, solana_ledger::shred::Nonce,
@ -86,7 +86,7 @@ pub struct RequestStatus<T> {
pub(crate) mod tests { pub(crate) mod tests {
use { use {
super::*, super::*,
crate::serve_repair::ShredRepairType, crate::repair::serve_repair::ShredRepairType,
solana_ledger::shred::{Shred, ShredFlags}, solana_ledger::shred::{Shred, ShredFlags},
solana_sdk::timing::timestamp, solana_sdk::timing::timestamp,
}; };

View File

@ -1,8 +1,7 @@
use { use {
crate::{ crate::{
consensus::{heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice, tree_diff::TreeDiff}, consensus::{heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice, tree_diff::TreeDiff},
repair_service::RepairService, repair::{repair_service::RepairService, serve_repair::ShredRepairType},
serve_repair::ShredRepairType,
}, },
solana_ledger::{blockstore::Blockstore, blockstore_meta::SlotMeta}, solana_ledger::{blockstore::Blockstore, blockstore_meta::SlotMeta},
solana_sdk::{clock::Slot, hash::Hash}, solana_sdk::{clock::Slot, hash::Hash},
@ -205,7 +204,7 @@ pub fn get_closest_completion(
pub mod test { pub mod test {
use { use {
super::*, super::*,
crate::repair_service::sleep_shred_deferment_period, crate::repair::repair_service::sleep_shred_deferment_period,
solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path}, solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path},
solana_sdk::hash::Hash, solana_sdk::hash::Hash,
trees::{tr, Tree, TreeWalk}, trees::{tr, Tree, TreeWalk},

View File

@ -2,18 +2,20 @@
//! regularly finds missing shreds in the ledger and sends repair requests for those shreds //! regularly finds missing shreds in the ledger and sends repair requests for those shreds
#[cfg(test)] #[cfg(test)]
use { use {
crate::duplicate_repair_status::DuplicateSlotRepairStatus, crate::repair::duplicate_repair_status::DuplicateSlotRepairStatus,
solana_sdk::clock::DEFAULT_MS_PER_SLOT, solana_sdk::clock::DEFAULT_MS_PER_SLOT,
}; };
use { use {
crate::{ crate::{
ancestor_hashes_service::{AncestorHashesReplayUpdateReceiver, AncestorHashesService},
cluster_info_vote_listener::VerifiedVoteReceiver, cluster_info_vote_listener::VerifiedVoteReceiver,
cluster_slots_service::cluster_slots::ClusterSlots, cluster_slots_service::cluster_slots::ClusterSlots,
duplicate_repair_status::AncestorDuplicateSlotsToRepair, repair::{
outstanding_requests::OutstandingRequests, ancestor_hashes_service::{AncestorHashesReplayUpdateReceiver, AncestorHashesService},
repair_weight::RepairWeight, duplicate_repair_status::AncestorDuplicateSlotsToRepair,
serve_repair::{ServeRepair, ShredRepairType, REPAIR_PEERS_CACHE_CAPACITY}, outstanding_requests::OutstandingRequests,
repair_weight::RepairWeight,
serve_repair::{ServeRepair, ShredRepairType, REPAIR_PEERS_CACHE_CAPACITY},
},
}, },
crossbeam_channel::{Receiver as CrossbeamReceiver, Sender as CrossbeamSender}, crossbeam_channel::{Receiver as CrossbeamReceiver, Sender as CrossbeamSender},
lru::LruCache, lru::LruCache,

View File

@ -1,11 +1,13 @@
use { use {
crate::{ crate::{
consensus::{heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice, tree_diff::TreeDiff}, consensus::{heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice, tree_diff::TreeDiff},
repair_generic_traversal::{get_closest_completion, get_unknown_last_index}, repair::{
repair_service::{BestRepairsStats, RepairTiming}, repair_generic_traversal::{get_closest_completion, get_unknown_last_index},
repair_weighted_traversal, repair_service::{BestRepairsStats, RepairTiming},
repair_weighted_traversal,
serve_repair::ShredRepairType,
},
replay_stage::DUPLICATE_THRESHOLD, replay_stage::DUPLICATE_THRESHOLD,
serve_repair::ShredRepairType,
}, },
solana_ledger::{ solana_ledger::{
ancestor_iterator::AncestorIterator, blockstore::Blockstore, blockstore_meta::SlotMeta, ancestor_iterator::AncestorIterator, blockstore::Blockstore, blockstore_meta::SlotMeta,

View File

@ -1,8 +1,7 @@
use { use {
crate::{ crate::{
consensus::{heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice, tree_diff::TreeDiff}, consensus::{heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice, tree_diff::TreeDiff},
repair_service::RepairService, repair::{repair_service::RepairService, serve_repair::ShredRepairType},
serve_repair::ShredRepairType,
}, },
solana_ledger::{blockstore::Blockstore, blockstore_meta::SlotMeta}, solana_ledger::{blockstore::Blockstore, blockstore_meta::SlotMeta},
solana_sdk::{clock::Slot, hash::Hash}, solana_sdk::{clock::Slot, hash::Hash},
@ -137,7 +136,7 @@ pub fn get_best_repair_shreds(
pub mod test { pub mod test {
use { use {
super::*, super::*,
crate::repair_service::sleep_shred_deferment_period, crate::repair::repair_service::sleep_shred_deferment_period,
solana_ledger::{ solana_ledger::{
get_tmp_ledger_path, get_tmp_ledger_path,
shred::{Shred, ShredFlags}, shred::{Shred, ShredFlags},

36
core/src/repair/result.rs Normal file
View File

@ -0,0 +1,36 @@
use {
solana_gossip::{cluster_info::ClusterInfoError, contact_info},
thiserror::Error,
};
#[derive(Error, Debug)]
pub enum RepairVerifyError {
#[error("IdMismatch")]
IdMismatch,
#[error("Malformed")]
Malformed,
#[error("SelfRepair")]
SelfRepair,
#[error("SigVerify")]
SigVerify,
#[error("TimeSkew")]
TimeSkew,
#[error("Unsigned")]
Unsigned,
}
#[derive(Debug, Error)]
pub enum Error {
#[error(transparent)]
ClusterInfo(#[from] ClusterInfoError),
#[error(transparent)]
InvalidContactInfo(#[from] contact_info::Error),
#[error(transparent)]
Serialize(#[from] std::boxed::Box<bincode::ErrorKind>),
#[error(transparent)]
WeightedIndex(#[from] rand::distributions::weighted::WeightedError),
#[error(transparent)]
RepairVerify(#[from] RepairVerifyError),
}
pub type Result<T> = std::result::Result<T, Error>;

View File

@ -1,11 +1,13 @@
use { use {
crate::{ crate::{
cluster_slots_service::cluster_slots::ClusterSlots, cluster_slots_service::cluster_slots::ClusterSlots,
duplicate_repair_status::get_ancestor_hash_repair_sample_size, repair::{
repair_response, duplicate_repair_status::get_ancestor_hash_repair_sample_size,
repair_service::{OutstandingShredRepairs, RepairStats, REPAIR_MS}, repair_response,
request_response::RequestResponse, repair_service::{OutstandingShredRepairs, RepairStats, REPAIR_MS},
result::{Error, Result}, request_response::RequestResponse,
result::{Error, RepairVerifyError, Result},
},
}, },
bincode::serialize, bincode::serialize,
crossbeam_channel::RecvTimeoutError, crossbeam_channel::RecvTimeoutError,
@ -55,7 +57,6 @@ use {
thread::{Builder, JoinHandle}, thread::{Builder, JoinHandle},
time::{Duration, Instant}, time::{Duration, Instant},
}, },
thiserror::Error,
}; };
/// the number of slots to respond with when responding to `Orphan` requests /// the number of slots to respond with when responding to `Orphan` requests
@ -87,22 +88,6 @@ const SIGNED_REPAIR_TIME_WINDOW: Duration = Duration::from_secs(60 * 10); // 10
#[cfg(test)] #[cfg(test)]
static_assertions::const_assert_eq!(MAX_ANCESTOR_RESPONSES, 30); static_assertions::const_assert_eq!(MAX_ANCESTOR_RESPONSES, 30);
#[derive(Error, Debug)]
pub enum RepairVerifyError {
#[error("IdMismatch")]
IdMismatch,
#[error("Malformed")]
Malformed,
#[error("SelfRepair")]
SelfRepair,
#[error("SigVerify")]
SigVerify,
#[error("TimeSkew")]
TimeSkew,
#[error("Unsigned")]
Unsigned,
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, Hash, PartialEq, Eq)] #[derive(Serialize, Deserialize, Debug, Clone, Copy, Hash, PartialEq, Eq)]
pub enum ShredRepairType { pub enum ShredRepairType {
/// Requesting `MAX_ORPHAN_REPAIR_RESPONSES ` parent shreds /// Requesting `MAX_ORPHAN_REPAIR_RESPONSES ` parent shreds
@ -229,7 +214,7 @@ pub(crate) type Ping = ping_pong::Ping<[u8; REPAIR_PING_TOKEN_SIZE]>;
/// Window protocol messages /// Window protocol messages
#[derive(Debug, AbiEnumVisitor, AbiExample, Deserialize, Serialize, strum_macros::Display)] #[derive(Debug, AbiEnumVisitor, AbiExample, Deserialize, Serialize, strum_macros::Display)]
#[frozen_abi(digest = "6VyBwHjkAMXAN97fdhQgFv6VdPEnfJo9LdUAd2SFtwF3")] #[frozen_abi(digest = "HXKJuZAK4LsweUTRbsxEcG9jHA9JR9s8MYmmjx2Nb5X1")]
pub enum RepairProtocol { pub enum RepairProtocol {
LegacyWindowIndex(LegacyContactInfo, Slot, u64), LegacyWindowIndex(LegacyContactInfo, Slot, u64),
LegacyHighestWindowIndex(LegacyContactInfo, Slot, u64), LegacyHighestWindowIndex(LegacyContactInfo, Slot, u64),
@ -1371,7 +1356,7 @@ impl ServeRepair {
mod tests { mod tests {
use { use {
super::*, super::*,
crate::repair_response, crate::repair::repair_response,
solana_gossip::{contact_info::ContactInfo, socketaddr, socketaddr_any}, solana_gossip::{contact_info::ContactInfo, socketaddr, socketaddr_any},
solana_ledger::{ solana_ledger::{
blockstore::make_many_slot_entries, blockstore::make_many_slot_entries,

View File

@ -1,5 +1,5 @@
use { use {
crate::serve_repair::ServeRepair, crate::repair::serve_repair::ServeRepair,
crossbeam_channel::{unbounded, Sender}, crossbeam_channel::{unbounded, Sender},
solana_ledger::blockstore::Blockstore, solana_ledger::blockstore::Blockstore,
solana_perf::recycler::Recycler, solana_perf::recycler::Recycler,

View File

@ -2,13 +2,11 @@
use { use {
crate::{ crate::{
ancestor_hashes_service::AncestorHashesReplayUpdateSender,
banking_trace::BankingTracer, banking_trace::BankingTracer,
cache_block_meta_service::CacheBlockMetaSender, cache_block_meta_service::CacheBlockMetaSender,
cluster_info_vote_listener::{ cluster_info_vote_listener::{
GossipDuplicateConfirmedSlotsReceiver, GossipVerifiedVoteHashReceiver, VoteTracker, GossipDuplicateConfirmedSlotsReceiver, GossipVerifiedVoteHashReceiver, VoteTracker,
}, },
cluster_slot_state_verifier::*,
cluster_slots_service::{cluster_slots::ClusterSlots, ClusterSlotsUpdateSender}, cluster_slots_service::{cluster_slots::ClusterSlots, ClusterSlotsUpdateSender},
commitment_service::{AggregateCommitmentService, CommitmentAggregationData}, commitment_service::{AggregateCommitmentService, CommitmentAggregationData},
consensus::{ consensus::{
@ -21,9 +19,13 @@ use {
SWITCH_FORK_THRESHOLD, SWITCH_FORK_THRESHOLD,
}, },
cost_update_service::CostUpdate, cost_update_service::CostUpdate,
duplicate_repair_status::AncestorDuplicateSlotsToRepair, repair::{
repair_service::{ ancestor_hashes_service::AncestorHashesReplayUpdateSender,
AncestorDuplicateSlotsReceiver, DumpedSlotsSender, PopularPrunedForksReceiver, cluster_slot_state_verifier::*,
duplicate_repair_status::AncestorDuplicateSlotsToRepair,
repair_service::{
AncestorDuplicateSlotsReceiver, DumpedSlotsSender, PopularPrunedForksReceiver,
},
}, },
rewards_recorder_service::{RewardsMessage, RewardsRecorderSender}, rewards_recorder_service::{RewardsMessage, RewardsRecorderSender},
unfrozen_gossip_verified_vote_hashes::UnfrozenGossipVerifiedVoteHashes, unfrozen_gossip_verified_vote_hashes::UnfrozenGossipVerifiedVoteHashes,

View File

@ -1,23 +1,14 @@
//! The `result` module exposes a Result type that propagates one of many different Error types. //! The `result` module exposes a Result type that propagates one of many different Error types.
use { use {solana_gossip::gossip_error::GossipError, solana_ledger::blockstore, thiserror::Error};
crate::serve_repair::RepairVerifyError,
solana_gossip::{cluster_info, contact_info, gossip_error::GossipError},
solana_ledger::blockstore,
thiserror::Error,
};
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum Error { pub enum Error {
#[error(transparent)] #[error(transparent)]
Blockstore(#[from] blockstore::BlockstoreError), Blockstore(#[from] blockstore::BlockstoreError),
#[error(transparent)] #[error(transparent)]
ClusterInfo(#[from] cluster_info::ClusterInfoError),
#[error(transparent)]
Gossip(#[from] GossipError), Gossip(#[from] GossipError),
#[error(transparent)] #[error(transparent)]
InvalidContactInfo(#[from] contact_info::Error),
#[error(transparent)]
Io(#[from] std::io::Error), Io(#[from] std::io::Error),
#[error("ReadyTimeout")] #[error("ReadyTimeout")]
ReadyTimeout, ReadyTimeout,
@ -29,12 +20,6 @@ pub enum Error {
Send, Send,
#[error("TrySend")] #[error("TrySend")]
TrySend, TrySend,
#[error(transparent)]
Serialize(#[from] std::boxed::Box<bincode::ErrorKind>),
#[error(transparent)]
WeightedIndex(#[from] rand::distributions::weighted::WeightedError),
#[error(transparent)]
RepairVerify(#[from] RepairVerifyError),
} }
pub type Result<T> = std::result::Result<T, Error>; pub type Result<T> = std::result::Result<T, Error>;

View File

@ -1,7 +1,7 @@
//! The `shred_fetch_stage` pulls shreds from UDP sockets and sends it to a channel. //! The `shred_fetch_stage` pulls shreds from UDP sockets and sends it to a channel.
use { use {
crate::serve_repair::ServeRepair, crate::repair::serve_repair::ServeRepair,
crossbeam_channel::{unbounded, Sender}, crossbeam_channel::{unbounded, Sender},
solana_gossip::{cluster_info::ClusterInfo, contact_info::Protocol}, solana_gossip::{cluster_info::ClusterInfo, contact_info::Protocol},
solana_ledger::shred::{should_discard_shred, ShredFetchStats}, solana_ledger::shred::{should_discard_shred, ShredFetchStats},

View File

@ -15,7 +15,7 @@ use {
cost_update_service::CostUpdateService, cost_update_service::CostUpdateService,
drop_bank_service::DropBankService, drop_bank_service::DropBankService,
ledger_cleanup_service::LedgerCleanupService, ledger_cleanup_service::LedgerCleanupService,
repair_service::RepairInfo, repair::repair_service::RepairInfo,
replay_stage::{ReplayStage, ReplayStageConfig}, replay_stage::{ReplayStage, ReplayStageConfig},
rewards_recorder_service::RewardsRecorderSender, rewards_recorder_service::RewardsRecorderSender,
shred_fetch_stage::ShredFetchStage, shred_fetch_stage::ShredFetchStage,

View File

@ -16,10 +16,9 @@ use {
}, },
ledger_metric_report_service::LedgerMetricReportService, ledger_metric_report_service::LedgerMetricReportService,
poh_timing_report_service::PohTimingReportService, poh_timing_report_service::PohTimingReportService,
repair::{serve_repair::ServeRepair, serve_repair_service::ServeRepairService},
rewards_recorder_service::{RewardsRecorderSender, RewardsRecorderService}, rewards_recorder_service::{RewardsRecorderSender, RewardsRecorderService},
sample_performance_service::SamplePerformanceService, sample_performance_service::SamplePerformanceService,
serve_repair::ServeRepair,
serve_repair_service::ServeRepairService,
sigverify, sigverify,
snapshot_packager_service::SnapshotPackagerService, snapshot_packager_service::SnapshotPackagerService,
stats_reporter_service::StatsReporterService, stats_reporter_service::StatsReporterService,

View File

@ -1,9 +1,6 @@
use { use {
crate::{ crate::{
cluster_info_vote_listener::VoteTracker, cluster_info_vote_listener::VoteTracker,
cluster_slot_state_verifier::{
DuplicateSlotsTracker, EpochSlotsFrozenSlots, GossipDuplicateConfirmedSlots,
},
cluster_slots_service::cluster_slots::ClusterSlots, cluster_slots_service::cluster_slots::ClusterSlots,
consensus::{ consensus::{
fork_choice::SelectVoteAndResetForkResult, fork_choice::SelectVoteAndResetForkResult,
@ -12,6 +9,9 @@ use {
progress_map::{ForkProgress, ProgressMap}, progress_map::{ForkProgress, ProgressMap},
Tower, Tower,
}, },
repair::cluster_slot_state_verifier::{
DuplicateSlotsTracker, EpochSlotsFrozenSlots, GossipDuplicateConfirmedSlots,
},
replay_stage::{HeaviestForkFailures, ReplayStage}, replay_stage::{HeaviestForkFailures, ReplayStage},
unfrozen_gossip_verified_vote_hashes::UnfrozenGossipVerifiedVoteHashes, unfrozen_gossip_verified_vote_hashes::UnfrozenGossipVerifiedVoteHashes,
}, },

View File

@ -3,13 +3,15 @@
//! //!
use { use {
crate::{ crate::{
ancestor_hashes_service::AncestorHashesReplayUpdateReceiver,
cluster_info_vote_listener::VerifiedVoteReceiver, cluster_info_vote_listener::VerifiedVoteReceiver,
completed_data_sets_service::CompletedDataSetsSender, completed_data_sets_service::CompletedDataSetsSender,
repair_response, repair::{
repair_service::{ ancestor_hashes_service::AncestorHashesReplayUpdateReceiver,
DumpedSlotsReceiver, OutstandingShredRepairs, PopularPrunedForksSender, RepairInfo, repair_response,
RepairService, repair_service::{
DumpedSlotsReceiver, OutstandingShredRepairs, PopularPrunedForksSender, RepairInfo,
RepairService,
},
}, },
result::{Error, Result}, result::{Error, Result},
}, },
@ -482,6 +484,7 @@ impl WindowService {
mod test { mod test {
use { use {
super::*, super::*,
crate::repair::serve_repair::ShredRepairType,
solana_entry::entry::{create_ticks, Entry}, solana_entry::entry::{create_ticks, Entry},
solana_gossip::contact_info::ContactInfo, solana_gossip::contact_info::ContactInfo,
solana_ledger::{ solana_ledger::{
@ -576,7 +579,6 @@ mod test {
#[test] #[test]
fn test_prune_shreds() { fn test_prune_shreds() {
use crate::serve_repair::ShredRepairType;
solana_logger::setup(); solana_logger::setup();
let shred = Shred::new_from_parity_shard( let shred = Shred::new_from_parity_shard(
5, // slot 5, // slot

View File

@ -46,7 +46,7 @@ use {
rand::{thread_rng, Rng}, rand::{thread_rng, Rng},
solana_bench_tps::{bench::generate_and_fund_keypairs, bench_tps_client::BenchTpsClient}, solana_bench_tps::{bench::generate_and_fund_keypairs, bench_tps_client::BenchTpsClient},
solana_client::{connection_cache::ConnectionCache, tpu_connection::TpuConnection}, solana_client::{connection_cache::ConnectionCache, tpu_connection::TpuConnection},
solana_core::serve_repair::{RepairProtocol, RepairRequestHeader, ServeRepair}, solana_core::repair::serve_repair::{RepairProtocol, RepairRequestHeader, ServeRepair},
solana_dos::cli::*, solana_dos::cli::*,
solana_gossip::{ solana_gossip::{
contact_info::Protocol, contact_info::Protocol,

View File

@ -4520,7 +4520,7 @@ fn test_slot_hash_expiry() {
#[serial] #[serial]
fn test_duplicate_with_pruned_ancestor() { fn test_duplicate_with_pruned_ancestor() {
solana_logger::setup_with("info,solana_metrics=off"); solana_logger::setup_with("info,solana_metrics=off");
solana_core::duplicate_repair_status::set_ancestor_hash_repair_sample_size_for_tests_only(3); solana_core::repair::duplicate_repair_status::set_ancestor_hash_repair_sample_size_for_tests_only(3);
let majority_leader_stake = 10_000_000 * DEFAULT_NODE_STAKE; let majority_leader_stake = 10_000_000 * DEFAULT_NODE_STAKE;
let minority_leader_stake = 2_000_000 * DEFAULT_NODE_STAKE; let minority_leader_stake = 2_000_000 * DEFAULT_NODE_STAKE;