Standardize thread names

Tenets:
1. Limit thread names to 15 characters
2. Prefix all Solana-controlled threads with "sol"
3. Use Camel case. It's more character dense than Snake or Kebab case
This commit is contained in:
Michael Vines 2022-08-17 08:40:23 -07:00
parent c17f15a34f
commit 3f4731b37f
72 changed files with 261 additions and 242 deletions

View File

@ -105,7 +105,7 @@ impl BanksServer {
}
let server_bank_forks = bank_forks.clone();
Builder::new()
.name("solana-bank-forks-client".to_string())
.name("solBankForksCli".to_string())
.spawn(move || Self::run(server_bank_forks, transaction_receiver))
.unwrap();
Self::new(

View File

@ -88,7 +88,7 @@ impl RpcBanksService {
let connection_cache = connection_cache.clone();
let exit = exit.clone();
let thread_hdl = Builder::new()
.name("solana-rpc-banks".to_string())
.name("solRpcBanksSvc".to_string())
.spawn(move || {
Self::run(
listen_addr,

View File

@ -178,7 +178,7 @@ impl RpcClient {
)),
runtime: Some(
tokio::runtime::Builder::new_current_thread()
.thread_name("rpc-client")
.thread_name("solRpcClient")
.enable_io()
.enable_time()
.build()

View File

@ -91,7 +91,7 @@ impl TransactionExecutor {
let exit = exit.clone();
let cleared = cleared.clone();
Builder::new()
.name("sig_clear".to_string())
.name("solSigClear".to_string())
.spawn(move || {
let client = RpcClient::new_socket_with_commitment(
entrypoint_addr,

View File

@ -50,7 +50,7 @@ impl AccountsHashVerifier {
let exit = exit.clone();
let cluster_info = cluster_info.clone();
let t_accounts_hash_verifier = Builder::new()
.name("solana-hash-accounts".to_string())
.name("solAcctHashVer".to_string())
.spawn(move || {
let mut hashes = vec![];
loop {

View File

@ -215,7 +215,7 @@ impl AncestorHashesService {
ancestor_socket: Arc<UdpSocket>,
) -> JoinHandle<()> {
Builder::new()
.name("solana-ancestor-hashes-responses-service".to_string())
.name("solAncHashesSvc".to_string())
.spawn(move || {
let mut last_stats_report = Instant::now();
let mut stats = AncestorHashesResponsesStats::default();
@ -538,7 +538,7 @@ impl AncestorHashesService {
// to MAX_ANCESTOR_HASHES_SLOT_REQUESTS_PER_SECOND/second
let mut request_throttle = vec![];
Builder::new()
.name("solana-manage-ancestor-requests".to_string())
.name("solManAncReqs".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;

View File

@ -466,7 +466,7 @@ impl BankingStage {
let connection_cache = connection_cache.clone();
let bank_forks = bank_forks.clone();
Builder::new()
.name(format!("solana-banking-stage-tx-{}", i))
.name(format!("solBanknStgTx{:02}", i))
.spawn(move || {
Self::process_loop(
&verified_receiver,

View File

@ -255,7 +255,7 @@ impl BroadcastStage {
let blockstore = blockstore.clone();
let cluster_info = cluster_info.clone();
Builder::new()
.name("solana-broadcaster".to_string())
.name("solBroadcast".to_string())
.spawn(move || {
let _finalizer = Finalizer::new(exit);
Self::run(
@ -277,7 +277,7 @@ impl BroadcastStage {
let cluster_info = cluster_info.clone();
let bank_forks = bank_forks.clone();
let t = Builder::new()
.name("solana-broadcaster-transmit".to_string())
.name("solBroadcastTx".to_string())
.spawn(move || loop {
let res =
bs_transmit.transmit(&socket_receiver, &cluster_info, &sock, &bank_forks);
@ -295,7 +295,7 @@ impl BroadcastStage {
let mut bs_record = broadcast_stage_run.clone();
let btree = blockstore.clone();
let t = Builder::new()
.name("solana-broadcaster-record".to_string())
.name("solBroadcastRec".to_string())
.spawn(move || loop {
let res = bs_record.record(&blockstore_receiver, &btree);
let res = Self::handle_error(res, "solana-broadcaster-record");
@ -308,7 +308,7 @@ impl BroadcastStage {
}
let retransmit_thread = Builder::new()
.name("solana-broadcaster-retransmit".to_string())
.name("solBroadcastRtx".to_string())
.spawn(move || loop {
if let Some(res) = Self::handle_error(
Self::check_retransmit_signals(

View File

@ -31,7 +31,7 @@ impl CacheBlockMetaService {
) -> Self {
let exit = exit.clone();
let thread_hdl = Builder::new()
.name("solana-cache-block-time".to_string())
.name("solCacheBlkTime".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;

View File

@ -252,7 +252,7 @@ impl ClusterInfoVoteListener {
let exit = exit.clone();
let bank_forks = bank_forks.clone();
Builder::new()
.name("solana-cluster_info_vote_listener".to_string())
.name("solCiVoteLstnr".to_string())
.spawn(move || {
let _ = Self::recv_loop(
exit,
@ -266,7 +266,7 @@ impl ClusterInfoVoteListener {
};
let exit_ = exit.clone();
let bank_send_thread = Builder::new()
.name("solana-cluster_info_bank_send".to_string())
.name("solCiBankSend".to_string())
.spawn(move || {
let _ = Self::bank_send_loop(
exit_,
@ -278,7 +278,7 @@ impl ClusterInfoVoteListener {
.unwrap();
let send_thread = Builder::new()
.name("solana-cluster_info_process_votes".to_string())
.name("solCiProcVotes".to_string())
.spawn(move || {
let _ = Self::process_votes_loop(
exit,

View File

@ -48,7 +48,7 @@ impl ClusterSlotsService {
Self::initialize_lowest_slot(&blockstore, &cluster_info);
Self::initialize_epoch_slots(&bank_forks, &cluster_info);
let t_cluster_slots_service = Builder::new()
.name("solana-cluster-slots-service".to_string())
.name("solClusterSlots".to_string())
.spawn(move || {
Self::run(
blockstore,

View File

@ -69,7 +69,7 @@ impl AggregateCommitmentService {
sender,
Self {
t_commitment: Builder::new()
.name("solana-aggregate-stake-lockouts".to_string())
.name("solAggCommitSvc".to_string())
.spawn(move || loop {
if exit_.load(Ordering::Relaxed) {
break;

View File

@ -31,7 +31,7 @@ impl CompletedDataSetsService {
) -> Self {
let exit = exit.clone();
let thread_hdl = Builder::new()
.name("completed-data-set-service".to_string())
.name("solComplDataSet".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;

View File

@ -75,7 +75,7 @@ impl CostUpdateService {
cost_update_receiver: CostUpdateReceiver,
) -> Self {
let thread_hdl = Builder::new()
.name("solana-cost-update-service".to_string())
.name("solCostUpdtSvc".to_string())
.spawn(move || {
Self::service_loop(blockstore, cost_model, cost_update_receiver);
})

View File

@ -15,7 +15,7 @@ pub struct DropBankService {
impl DropBankService {
pub fn new(bank_receiver: Receiver<Vec<Arc<Bank>>>) -> Self {
let thread_hdl = Builder::new()
.name("sol-drop-b-service".to_string())
.name("solDropBankSrvc".to_string())
.spawn(move || {
for banks in bank_receiver.iter() {
let len = banks.len();

View File

@ -208,7 +208,7 @@ impl FetchStage {
let poh_recorder = poh_recorder.clone();
let fwd_thread_hdl = Builder::new()
.name("solana-fetch-stage-fwd-rcvr".to_string())
.name("solFetchStgFwRx".to_string())
.spawn(move || loop {
if let Err(e) =
Self::handle_forwarded_packets(&forward_receiver, &sender, &poh_recorder)
@ -226,7 +226,7 @@ impl FetchStage {
let exit = exit.clone();
let metrics_thread_hdl = Builder::new()
.name("solana-fetch-stage-metrics".to_string())
.name("solFetchStgMetr".to_string())
.spawn(move || loop {
sleep(Duration::from_secs(1));

View File

@ -84,7 +84,7 @@ impl FindPacketSenderStakeStage {
) -> Self {
let mut stats = FindPacketSenderStakeStats::default();
let thread_hdl = Builder::new()
.name("find-packet-sender-stake".to_string())
.name("solPktStake".to_string())
.spawn(move || loop {
match streamer::recv_packet_batches(&packet_receiver) {
Ok((mut batches, num_packets, recv_duration)) => {

View File

@ -76,7 +76,7 @@ impl LedgerCleanupService {
let blockstore_compact = blockstore.clone();
let t_cleanup = Builder::new()
.name("sol-led-cleanup".to_string())
.name("solLedgerClean".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;
@ -98,7 +98,7 @@ impl LedgerCleanupService {
.unwrap();
let t_compact = Builder::new()
.name("sol-led-compact".to_string())
.name("solLedgerComp".to_string())
.spawn(move || loop {
if exit_compact.load(Ordering::Relaxed) {
break;
@ -238,7 +238,7 @@ impl LedgerCleanupService {
let purge_complete1 = purge_complete.clone();
let last_compact_slot1 = last_compact_slot.clone();
let _t_purge = Builder::new()
.name("solana-ledger-purge".to_string())
.name("solLedgerPurge".to_string())
.spawn(move || {
let mut slot_update_time = Measure::start("slot_update");
*blockstore.lowest_cleanup_slot.write().unwrap() = lowest_cleanup_slot;

View File

@ -26,7 +26,7 @@ impl LedgerMetricReportService {
pub fn new(blockstore: Arc<Blockstore>, exit: &Arc<AtomicBool>) -> Self {
let exit_signal = exit.clone();
let t_cf_metric = Builder::new()
.name("metric_report_rocksdb_cf_metrics".to_string())
.name("solRocksCfMtrcs".to_string())
.spawn(move || loop {
if exit_signal.load(Ordering::Relaxed) {
break;

View File

@ -28,7 +28,7 @@ impl PohTimingReportService {
let exit_signal = exit.clone();
let mut poh_timing_reporter = PohTimingReporter::default();
let t_poh_timing = Builder::new()
.name("poh_timing_report".to_string())
.name("solPohTimingRpt".to_string())
.spawn(move || loop {
if exit_signal.load(Ordering::Relaxed) {
break;

View File

@ -72,7 +72,7 @@ impl QosService {
let metrics_clone = Arc::clone(&metrics);
let reporting_thread = Some(
Builder::new()
.name("solana-qos-service-metrics-repoting".to_string())
.name("solQosSvcMetr".to_string())
.spawn(move || {
Self::reporting_loop(running_flag_clone, metrics_clone, report_receiver);
})

View File

@ -212,7 +212,7 @@ impl RepairService {
let exit = exit.clone();
let repair_info = repair_info.clone();
Builder::new()
.name("solana-repair-service".to_string())
.name("solRepairSvc".to_string())
.spawn(move || {
Self::run(
&blockstore,

View File

@ -97,7 +97,7 @@ const MAX_CONCURRENT_FORKS_TO_REPLAY: usize = 4;
lazy_static! {
static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new()
.num_threads(MAX_CONCURRENT_FORKS_TO_REPLAY)
.thread_name(|ix| format!("replay_{}", ix))
.thread_name(|ix| format!("solReplay{:02}", ix))
.build()
.unwrap();
}
@ -436,7 +436,7 @@ impl ReplayStage {
#[allow(clippy::cognitive_complexity)]
let t_replay = Builder::new()
.name("solana-replay-stage".to_string())
.name("solReplayStage".to_string())
.spawn(move || {
let verify_recyclers = VerifyRecyclers::default();
let _exit = Finalizer::new(exit.clone());

View File

@ -364,11 +364,11 @@ pub fn retransmitter(
let num_threads = get_thread_count().min(8).max(sockets.len());
let thread_pool = ThreadPoolBuilder::new()
.num_threads(num_threads)
.thread_name(|i| format!("retransmit-{}", i))
.thread_name(|i| format!("solRetransmit{:02}", i))
.build()
.unwrap();
Builder::new()
.name("solana-retransmitter".to_string())
.name("solRetransmittr".to_string())
.spawn(move || loop {
match retransmit(
&thread_pool,

View File

@ -30,7 +30,7 @@ impl RewardsRecorderService {
) -> Self {
let exit = exit.clone();
let thread_hdl = Builder::new()
.name("solana-rewards-writer".to_string())
.name("solRewardsWritr".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;

View File

@ -567,7 +567,7 @@ impl ServeRepair {
let recycler = PacketBatchRecycler::default();
Builder::new()
.name("solana-repair-listen".to_string())
.name("solRepairListen".to_string())
.spawn(move || {
let mut last_print = Instant::now();
let mut stats = ServeRepairStats::default();

View File

@ -46,7 +46,7 @@ impl ServeRepairService {
);
let (response_sender, response_receiver) = unbounded();
let t_responder = streamer::responder(
"serve-repairs",
"Repair",
serve_repair_socket,
response_receiver,
socket_addr_space,

View File

@ -135,7 +135,7 @@ impl ShredFetchStage {
})
.collect();
let modifier_hdl = Builder::new()
.name("solana-tvu-fetch-stage-packet-modifier".to_string())
.name("solTvuFetchPMod".to_string())
.spawn(move || {
let repair_context = repair_context
.as_ref()

View File

@ -37,7 +37,7 @@ pub(crate) fn spawn_shred_sigverify(
let recycler_cache = RecyclerCache::warmed();
let mut stats = ShredSigVerifyStats::new(Instant::now());
Builder::new()
.name("shred-verifier".to_string())
.name("solShredVerifr".to_string())
.spawn(move || loop {
match run_shred_sigverify(
&self_pubkey,

View File

@ -412,7 +412,7 @@ impl SigVerifyStage {
const MAX_DEDUPER_AGE: Duration = Duration::from_secs(2);
const MAX_DEDUPER_ITEMS: u32 = 1_000_000;
Builder::new()
.name("solana-verifier".to_string())
.name("solSigVerifier".to_string())
.spawn(move || {
let mut deduper = Deduper::new(MAX_DEDUPER_ITEMS, MAX_DEDUPER_AGE);
loop {

View File

@ -49,7 +49,7 @@ impl SnapshotPackagerService {
);
let t_snapshot_packager = Builder::new()
.name("snapshot-packager".to_string())
.name("solSnapshotPkgr".to_string())
.spawn(move || {
renice_this_thread(snapshot_config.packager_thread_niceness_adj).unwrap();
let mut snapshot_gossip_manager = if enable_gossip_push {

View File

@ -30,7 +30,7 @@ impl StakedNodesUpdaterService {
shared_staked_nodes_overrides: Arc<RwLock<HashMap<Pubkey, u64>>>,
) -> Self {
let thread_hdl = Builder::new()
.name("sol-sn-updater".to_string())
.name("solStakedNodeUd".to_string())
.spawn(move || {
let mut last_stakes = Instant::now();
while !exit.load(Ordering::Relaxed) {

View File

@ -22,7 +22,7 @@ impl StatsReporterService {
) -> Self {
let exit = exit.clone();
let thread_hdl = Builder::new()
.name("solana-stats-reporter".to_owned())
.name("solStatsReport".to_owned())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;

View File

@ -363,7 +363,7 @@ impl SystemMonitorService {
) -> Self {
info!("Starting SystemMonitorService");
let thread_hdl = Builder::new()
.name("system-monitor".to_string())
.name("solSystemMonitr".to_string())
.spawn(move || {
Self::run(
exit,

View File

@ -140,7 +140,7 @@ impl Tpu {
packet_receiver,
find_packet_sender_stake_sender,
staked_nodes.clone(),
"tpu-find-packet-sender-stake",
"Tpu",
);
let (vote_find_packet_sender_stake_sender, vote_find_packet_sender_stake_receiver) =
@ -150,7 +150,7 @@ impl Tpu {
vote_packet_receiver,
vote_find_packet_sender_stake_sender,
staked_nodes.clone(),
"tpu-vote-find-packet-sender-stake",
"Vote",
);
let (verified_sender, verified_receiver) = unbounded();

View File

@ -302,7 +302,7 @@ impl BlockstoreRootScan {
let exit = exit.clone();
Some(
Builder::new()
.name("blockstore-root-scan".to_string())
.name("solBStoreRtScan".to_string())
.spawn(move || blockstore.scan_and_fix_roots(&exit))
.unwrap(),
)
@ -1588,34 +1588,23 @@ impl<'a> ProcessBlockStore<'a> {
let previous_start_process = *self.start_progress.read().unwrap();
*self.start_progress.write().unwrap() = ValidatorStartProgress::LoadingLedger;
/*
#[allow(clippy::too_many_arguments)]
fn process_blockstore(
blockstore: &Blockstore,
bank_forks: &Arc<RwLock<BankForks>>,
leader_schedule_cache: &LeaderScheduleCache,
process_options: &blockstore_processor::ProcessOptions,
transaction_status_sender: Option<&TransactionStatusSender>,
cache_block_meta_sender: Option<&CacheBlockMetaSender>,
blockstore_root_scan: BlockstoreRootScan,
accounts_background_request_sender: &AbsRequestSender,
start_progress: &Arc<RwLock<ValidatorStartProgress>>,
) {
*/
let exit = Arc::new(AtomicBool::new(false));
if let Some(max_slot) = highest_slot(self.blockstore) {
let bank_forks = self.bank_forks.clone();
let exit = exit.clone();
let start_progress = self.start_progress.clone();
let _ = std::thread::spawn(move || {
while !exit.load(Ordering::Relaxed) {
let slot = bank_forks.read().unwrap().working_bank().slot();
*start_progress.write().unwrap() =
ValidatorStartProgress::ProcessingLedger { slot, max_slot };
sleep(Duration::from_secs(2));
}
});
let _ = Builder::new()
.name("solRptLdgrStat".to_string())
.spawn(move || {
while !exit.load(Ordering::Relaxed) {
let slot = bank_forks.read().unwrap().working_bank().slot();
*start_progress.write().unwrap() =
ValidatorStartProgress::ProcessingLedger { slot, max_slot };
sleep(Duration::from_secs(2));
}
})
.unwrap();
}
if let Err(e) = blockstore_processor::process_blockstore_from_root(
self.blockstore,

View File

@ -46,7 +46,7 @@ impl VotingService {
bank_forks: Arc<RwLock<BankForks>>,
) -> Self {
let thread_hdl = Builder::new()
.name("sol-vote-service".to_string())
.name("solVoteService".to_string())
.spawn(move || {
for vote_op in vote_receiver.iter() {
let rooted_bank = bank_forks.read().unwrap().root_bank().clone();

View File

@ -32,7 +32,7 @@ impl WarmQuicCacheService {
exit: Arc<AtomicBool>,
) -> Self {
let thread_hdl = Builder::new()
.name("sol-warm-quic-service".to_string())
.name("solWarmQuicSvc".to_string())
.spawn(move || {
let slot_jitter = thread_rng().gen_range(-CACHE_JITTER_SLOT, CACHE_JITTER_SLOT);
let mut maybe_last_leader = None;

View File

@ -375,7 +375,7 @@ impl WindowService {
inc_new_counter_error!("solana-check-duplicate-error", 1, 1);
};
Builder::new()
.name("solana-check-duplicate".to_string())
.name("solWinCheckDup".to_string())
.spawn(move || {
while !exit.load(Ordering::Relaxed) {
if let Err(e) = run_check_duplicate(
@ -408,11 +408,11 @@ impl WindowService {
};
let thread_pool = rayon::ThreadPoolBuilder::new()
.num_threads(get_thread_count().min(8))
.thread_name(|i| format!("window-insert-{}", i))
.thread_name(|i| format!("solWinInsert{:02}", i))
.build()
.unwrap();
Builder::new()
.name("solana-window-insert".to_string())
.name("solWinInsert".to_string())
.spawn(move || {
let handle_duplicate = |shred| {
let _ = check_duplicate_sender.send(shred);

View File

@ -46,7 +46,7 @@ use {
lazy_static! {
static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new()
.num_threads(get_max_thread_count())
.thread_name(|ix| format!("entry_{}", ix))
.thread_name(|ix| format!("solEntry{:02}", ix))
.build()
.unwrap();
}
@ -525,21 +525,24 @@ pub fn start_verify_transactions(
let tx_offset_recycler = verify_recyclers.tx_offset_recycler;
let out_recycler = verify_recyclers.out_recycler;
let num_packets = entry_txs.len();
let gpu_verify_thread = thread::spawn(move || {
let mut verify_time = Measure::start("sigverify");
sigverify::ed25519_verify(
&mut packet_batches,
&tx_offset_recycler,
&out_recycler,
false,
num_packets,
);
let verified = packet_batches
.iter()
.all(|batch| batch.iter().all(|p| !p.meta.discard()));
verify_time.stop();
(verified, verify_time.as_us())
});
let gpu_verify_thread = thread::Builder::new()
.name("solGpuSigVerify".into())
.spawn(move || {
let mut verify_time = Measure::start("sigverify");
sigverify::ed25519_verify(
&mut packet_batches,
&tx_offset_recycler,
&out_recycler,
false,
num_packets,
);
let verified = packet_batches
.iter()
.all(|batch| batch.iter().all(|p| !p.meta.discard()));
verify_time.stop();
(verified, verify_time.as_us())
})
.unwrap();
Ok(EntrySigVerificationState {
verification_status: EntryVerificationStatus::Pending,
entries: Some(entries),
@ -770,25 +773,28 @@ impl EntrySlice for [Entry] {
let hashes = Arc::new(Mutex::new(hashes_pinned));
let hashes_clone = hashes.clone();
let gpu_verify_thread = thread::spawn(move || {
let mut hashes = hashes_clone.lock().unwrap();
let gpu_wait = Instant::now();
let res;
unsafe {
res = (api.poh_verify_many)(
hashes.as_mut_ptr() as *mut u8,
num_hashes_vec.as_ptr(),
length,
1,
let gpu_verify_thread = thread::Builder::new()
.name("solGpuPohVerify".into())
.spawn(move || {
let mut hashes = hashes_clone.lock().unwrap();
let gpu_wait = Instant::now();
let res;
unsafe {
res = (api.poh_verify_many)(
hashes.as_mut_ptr() as *mut u8,
num_hashes_vec.as_ptr(),
length,
1,
);
}
assert!(res == 0, "GPU PoH verify many failed");
inc_new_counter_info!(
"entry_verify-gpu_thread",
timing::duration_as_us(&gpu_wait.elapsed()) as usize
);
}
assert!(res == 0, "GPU PoH verify many failed");
inc_new_counter_info!(
"entry_verify-gpu_thread",
timing::duration_as_us(&gpu_wait.elapsed()) as usize
);
timing::duration_as_us(&gpu_wait.elapsed())
});
timing::duration_as_us(&gpu_wait.elapsed())
})
.unwrap();
let verifications = PAR_THREAD_POOL.install(|| {
self.into_par_iter()

View File

@ -48,7 +48,7 @@ impl SlotStatusObserver {
slot_status_notifier: SlotStatusNotifier,
) -> JoinHandle<()> {
Builder::new()
.name("bank_notification_receiver".to_string())
.name("solBankNotif".to_string())
.spawn(move || {
while !exit.load(Ordering::Relaxed) {
if let Ok(slot) = bank_notification_receiver.recv() {

View File

@ -1682,11 +1682,11 @@ impl ClusterInfo {
) -> JoinHandle<()> {
let thread_pool = ThreadPoolBuilder::new()
.num_threads(std::cmp::min(get_thread_count(), 8))
.thread_name(|i| format!("ClusterInfo::gossip-{}", i))
.thread_name(|i| format!("solRunGossip{:02}", i))
.build()
.unwrap();
Builder::new()
.name("solana-gossip".to_string())
.name("solGossip".to_string())
.spawn(move || {
let mut last_push = timestamp();
let mut last_contact_info_trace = timestamp();
@ -2560,7 +2560,7 @@ impl ClusterInfo {
) -> JoinHandle<()> {
let thread_pool = ThreadPoolBuilder::new()
.num_threads(get_thread_count().min(8))
.thread_name(|i| format!("gossip-consume-{}", i))
.thread_name(|i| format!("solGossipCons{:02}", i))
.build()
.unwrap();
let run_consume = move || {
@ -2576,7 +2576,7 @@ impl ClusterInfo {
}
}
};
let thread_name = String::from("gossip-consume");
let thread_name = String::from("solGossipConsum");
Builder::new().name(thread_name).spawn(run_consume).unwrap()
}
@ -2592,11 +2592,11 @@ impl ClusterInfo {
let recycler = PacketBatchRecycler::default();
let thread_pool = ThreadPoolBuilder::new()
.num_threads(get_thread_count().min(8))
.thread_name(|i| format!("sol-gossip-work-{}", i))
.thread_name(|i| format!("solGossipWork{:02}", i))
.build()
.unwrap();
Builder::new()
.name("solana-listen".to_string())
.name("solGossipListen".to_string())
.spawn(move || {
while !exit.load(Ordering::Relaxed) {
if let Err(err) = self.run_listen(

View File

@ -80,7 +80,7 @@ impl GossipService {
exit.clone(),
);
let t_responder = streamer::responder(
"gossip",
"Gossip",
gossip_socket,
response_receiver,
socket_addr_space,

View File

@ -608,7 +608,7 @@ fn network_run_pull(
fn build_gossip_thread_pool() -> ThreadPool {
ThreadPoolBuilder::new()
.num_threads(get_thread_count().min(2))
.thread_name(|i| format!("crds_gossip_test_{}", i))
.thread_name(|i| format!("gossipTest{:02}", i))
.build()
.unwrap()
}

View File

@ -164,35 +164,37 @@ pub async fn upload_confirmed_blocks(
let sender = sender.clone();
let slot_receiver = slot_receiver.clone();
let exit = exit.clone();
std::thread::Builder::new()
.name("solBigTGetBlk".into())
.spawn(move || {
let start = Instant::now();
let mut num_blocks_read = 0;
std::thread::spawn(move || {
let start = Instant::now();
let mut num_blocks_read = 0;
while let Ok(slot) = slot_receiver.recv() {
if exit.load(Ordering::Relaxed) {
break;
}
while let Ok(slot) = slot_receiver.recv() {
if exit.load(Ordering::Relaxed) {
break;
let _ = match blockstore.get_rooted_block(slot, true) {
Ok(confirmed_block) => {
num_blocks_read += 1;
sender.send((slot, Some(confirmed_block)))
}
Err(err) => {
warn!(
"Failed to get load confirmed block from slot {}: {:?}",
slot, err
);
sender.send((slot, None))
}
};
}
let _ = match blockstore.get_rooted_block(slot, true) {
Ok(confirmed_block) => {
num_blocks_read += 1;
sender.send((slot, Some(confirmed_block)))
}
Err(err) => {
warn!(
"Failed to get load confirmed block from slot {}: {:?}",
slot, err
);
sender.send((slot, None))
}
};
}
BlockstoreLoadStats {
num_blocks_read,
elapsed: start.elapsed(),
}
})
BlockstoreLoadStats {
num_blocks_read,
elapsed: start.elapsed(),
}
})
.unwrap()
})
.collect(),
receiver,

View File

@ -50,7 +50,7 @@ impl BigTableUploadService {
) -> Self {
info!("Starting BigTable upload service");
let thread = Builder::new()
.name("bigtable-upload".to_string())
.name("solBigTUpload".to_string())
.spawn(move || {
Self::run(
runtime,

View File

@ -90,12 +90,12 @@ pub use {
lazy_static! {
static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new()
.num_threads(get_max_thread_count())
.thread_name(|ix| format!("blockstore_{}", ix))
.thread_name(|ix| format!("solBstore{:02}", ix))
.build()
.unwrap();
static ref PAR_THREAD_POOL_ALL_CPUS: ThreadPool = rayon::ThreadPoolBuilder::new()
.num_threads(num_cpus::get())
.thread_name(|ix| format!("blockstore_{}", ix))
.thread_name(|ix| format!("solBstoreAll{:02}", ix))
.build()
.unwrap();
}

View File

@ -105,7 +105,7 @@ struct ReplayEntry {
lazy_static! {
static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new()
.num_threads(get_max_thread_count())
.thread_name(|ix| format!("blockstore_processor_{}", ix))
.thread_name(|ix| format!("solBstoreProc{:02}", ix))
.build()
.unwrap();
}

View File

@ -19,7 +19,7 @@ use {
lazy_static! {
static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new()
.num_threads(get_thread_count())
.thread_name(|ix| format!("shredder_{}", ix))
.thread_name(|ix| format!("solShredder{:02}", ix))
.build()
.unwrap();
}

View File

@ -26,7 +26,7 @@ const SIGN_SHRED_GPU_MIN: usize = 256;
lazy_static! {
static ref SIGVERIFY_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new()
.num_threads(get_thread_count())
.thread_name(|ix| format!("sigverify_shreds_{}", ix))
.thread_name(|ix| format!("solSvrfyShred{:02}", ix))
.build()
.unwrap();
}

View File

@ -173,7 +173,11 @@ impl MetricsAgent {
max_points_per_sec: usize,
) -> Self {
let (sender, receiver) = unbounded::<MetricsCommand>();
thread::spawn(move || Self::run(&receiver, &writer, write_frequency, max_points_per_sec));
thread::Builder::new()
.name("solMetricsAgent".into())
.spawn(move || Self::run(&receiver, &writer, write_frequency, max_points_per_sec))
.unwrap();
Self { sender }
}

View File

@ -144,15 +144,18 @@ fn do_verify_reachable_ports(
for (port, tcp_listener) in tcp_listeners {
let (sender, receiver) = unbounded();
let listening_addr = tcp_listener.local_addr().unwrap();
let thread_handle = std::thread::spawn(move || {
debug!("Waiting for incoming connection on tcp/{}", port);
match tcp_listener.incoming().next() {
Some(_) => sender
.send(())
.unwrap_or_else(|err| warn!("send failure: {}", err)),
None => warn!("tcp incoming failed"),
}
});
let thread_handle = std::thread::Builder::new()
.name(format!("solVrfyTcp{:05}", port))
.spawn(move || {
debug!("Waiting for incoming connection on tcp/{}", port);
match tcp_listener.incoming().next() {
Some(_) => sender
.send(())
.unwrap_or_else(|err| warn!("send failure: {}", err)),
None => warn!("tcp incoming failed"),
}
})
.unwrap();
match receiver.recv_timeout(timeout) {
Ok(_) => {
info!("tcp/{} is reachable", port);
@ -222,33 +225,37 @@ fn do_verify_reachable_ports(
let port = udp_socket.local_addr().unwrap().port();
let udp_socket = udp_socket.try_clone().expect("Unable to clone udp socket");
let reachable_ports = reachable_ports.clone();
std::thread::spawn(move || {
let start = Instant::now();
let original_read_timeout = udp_socket.read_timeout().unwrap();
udp_socket
.set_read_timeout(Some(Duration::from_millis(250)))
.unwrap();
loop {
if reachable_ports.read().unwrap().contains(&port)
|| Instant::now().duration_since(start) >= timeout
{
break;
std::thread::Builder::new()
.name(format!("solVrfyUdp{:05}", port))
.spawn(move || {
let start = Instant::now();
let original_read_timeout = udp_socket.read_timeout().unwrap();
udp_socket
.set_read_timeout(Some(Duration::from_millis(250)))
.unwrap();
loop {
if reachable_ports.read().unwrap().contains(&port)
|| Instant::now().duration_since(start) >= timeout
{
break;
}
let recv_result = udp_socket.recv(&mut [0; 1]);
debug!(
"Waited for incoming datagram on udp/{}: {:?}",
port, recv_result
);
if recv_result.is_ok() {
reachable_ports.write().unwrap().insert(port);
break;
}
}
let recv_result = udp_socket.recv(&mut [0; 1]);
debug!(
"Waited for incoming datagram on udp/{}: {:?}",
port, recv_result
);
if recv_result.is_ok() {
reachable_ports.write().unwrap().insert(port);
break;
}
}
udp_socket.set_read_timeout(original_read_timeout).unwrap();
})
udp_socket.set_read_timeout(original_read_timeout).unwrap();
})
.unwrap()
})
.collect();

View File

@ -45,7 +45,7 @@ pub const VERIFY_MIN_PACKETS_PER_THREAD: usize = 128;
lazy_static! {
static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new()
.num_threads(get_thread_count())
.thread_name(|ix| format!("sigverify_{}", ix))
.thread_name(|ix| format!("solSigVerify{:02}", ix))
.build()
.unwrap();
}

View File

@ -106,7 +106,7 @@ impl PohService {
let poh_exit_ = poh_exit.clone();
let poh_config = poh_config.clone();
let tick_producer = Builder::new()
.name("solana-poh-service-tick_producer".to_string())
.name("solPohTickProd".to_string())
.spawn(move || {
solana_sys_tuner::request_realtime_poh();
if poh_config.hashes_per_tick.is_none() {
@ -452,7 +452,7 @@ mod tests {
let exit = exit.clone();
Builder::new()
.name("solana-poh-service-entry_producer".to_string())
.name("solPohEntryProd".to_string())
.spawn(move || {
let now = Instant::now();
let mut total_us = 0;

View File

@ -71,7 +71,7 @@ impl OptimisticallyConfirmedBankTracker {
let mut last_notified_confirmed_slot: Slot = 0;
let mut highest_confirmed_slot: Slot = 0;
let thread_hdl = Builder::new()
.name("solana-optimistic-bank-tracker".to_string())
.name("solOpConfBnkTrk".to_string())
.spawn(move || loop {
if exit_.load(Ordering::Relaxed) {
break;

View File

@ -24,7 +24,7 @@ impl RpcCompletedSlotsService {
exit: Arc<AtomicBool>,
) -> JoinHandle<()> {
Builder::new()
.name("solana-rpc-completed-slots-service".to_string())
.name("solRpcComplSlot".to_string())
.spawn(move || loop {
// received exit signal, shutdown the service
if exit.load(Ordering::Relaxed) {

View File

@ -85,7 +85,7 @@ impl PubSubService {
let (trigger, tripwire) = Tripwire::new();
let thread_hdl = Builder::new()
.name("solana-pubsub".to_string())
.name("solRpcPubSub".to_string())
.spawn(move || {
let runtime = tokio::runtime::Builder::new_multi_thread()
.worker_threads(pubsub_config.worker_threads)
@ -416,6 +416,6 @@ mod tests {
let (_trigger, pubsub_service) =
PubSubService::new(PubSubConfig::default(), &subscriptions, pubsub_addr);
let thread = pubsub_service.thread_hdl.thread();
assert_eq!(thread.name().unwrap(), "solana-pubsub");
assert_eq!(thread.name().unwrap(), "solRpcPubSub");
}
}

View File

@ -385,7 +385,7 @@ impl JsonRpcService {
tokio::runtime::Builder::new_multi_thread()
.worker_threads(rpc_threads)
.on_thread_start(move || renice_this_thread(rpc_niceness_adj).unwrap())
.thread_name("sol-rpc-el")
.thread_name("solRpcEl")
.enable_all()
.build()
.expect("Runtime"),
@ -483,7 +483,7 @@ impl JsonRpcService {
let (close_handle_sender, close_handle_receiver) = unbounded();
let thread_hdl = Builder::new()
.name("solana-jsonrpc".to_string())
.name("solJsonRpcSvc".to_string())
.spawn(move || {
renice_this_thread(rpc_niceness_adj).unwrap();
@ -648,7 +648,7 @@ mod tests {
)
.expect("assume successful JsonRpcService start");
let thread = rpc_service.thread_hdl.thread();
assert_eq!(thread.name().unwrap(), "solana-jsonrpc");
assert_eq!(thread.name().unwrap(), "solJsonRpcSvc");
assert_eq!(
10_000,

View File

@ -632,11 +632,11 @@ impl RpcSubscriptions {
} else {
Some(
Builder::new()
.name("solana-rpc-notifications".to_string())
.name("solRpcNotifier".to_string())
.spawn(move || {
let pool = rayon::ThreadPoolBuilder::new()
.num_threads(notification_threads)
.thread_name(|i| format!("sol-sub-notif-{}", i))
.thread_name(|i| format!("solRpcNotify{:02}", i))
.build()
.unwrap();
pool.install(|| {

View File

@ -37,7 +37,7 @@ impl TransactionStatusService {
) -> Self {
let exit = exit.clone();
let thread_hdl = Builder::new()
.name("solana-transaction-status-writer".to_string())
.name("solTxStatusWrtr".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;

View File

@ -461,7 +461,7 @@ impl AccountsBackgroundService {
let mut total_remove_slots_time = 0;
let mut last_expiration_check_time = Instant::now();
let t_background = Builder::new()
.name("solana-bg-accounts".to_string())
.name("solBgAccounts".to_string())
.spawn(move || {
let mut stats = StatsManager::new();
let mut last_snapshot_end_time = None;

View File

@ -1707,7 +1707,7 @@ pub fn make_min_priority_thread_pool() -> ThreadPool {
// Use lower thread count to reduce priority.
let num_threads = quarter_thread_count();
rayon::ThreadPoolBuilder::new()
.thread_name(|i| format!("solana-cleanup-accounts-{}", i))
.thread_name(|i| format!("solAccountsLo{:02}", i))
.num_threads(num_threads)
.build()
.unwrap()
@ -1946,7 +1946,7 @@ impl AccountsDb {
file_size: DEFAULT_FILE_SIZE,
thread_pool: rayon::ThreadPoolBuilder::new()
.num_threads(num_threads)
.thread_name(|i| format!("solana-db-accounts-{}", i))
.thread_name(|i| format!("solAccounts{:02}", i))
.stack_size(ACCOUNTS_STACK_SIZE)
.build()
.unwrap(),
@ -2299,7 +2299,7 @@ impl AccountsDb {
fn start_background_hasher(&mut self) {
let (sender, receiver) = unbounded();
Builder::new()
.name("solana-db-store-hasher-accounts".to_string())
.name("solDbStoreHashr".to_string())
.spawn(move || {
Self::background_hasher(receiver);
})

View File

@ -72,7 +72,7 @@ impl BgThreads {
// note that using rayon here causes us to exhaust # rayon threads and many tests running in parallel deadlock
Builder::new()
.name("solana-idx-flusher".to_string())
.name(format!("solIdxFlusher{:02}", idx))
.spawn(move || {
storage_.background(exit_, in_mem_, can_advance_age);
})

View File

@ -312,7 +312,7 @@ impl BankClient {
let thread_bank = bank.clone();
let bank = bank.clone();
Builder::new()
.name("solana-bank-client".to_string())
.name("solBankClient".to_string())
.spawn(move || Self::run(&thread_bank, transaction_receiver))
.unwrap();
Self {

View File

@ -791,7 +791,7 @@ where
let accounts_db = Arc::new(accounts_db);
let accounts_db_clone = accounts_db.clone();
let handle = Builder::new()
.name("notify_account_restore_from_snapshot".to_string())
.name("solNfyAccRestor".to_string())
.spawn(move || {
accounts_db_clone.notify_account_restore_from_snapshot();
})

View File

@ -75,7 +75,7 @@ impl SharedBuffer {
let bg_reader_data = instance.bg_reader_data.clone();
let handle = Builder::new()
.name("solana-compressed_file_reader".to_string())
.name("solCompFileRead".to_string())
.spawn(move || {
// importantly, this thread does NOT hold a refcount on the arc of 'instance'
bg_reader_data.read_entire_file_in_bg(reader, total_buffer_budget, chunk_size);

View File

@ -117,7 +117,7 @@ pub(crate) mod tests {
let verify_ = Arc::clone(verify);
verify.start(|| {
Builder::new()
.name("solana-bg-hash-verifier".to_string())
.name("solBgHashVerfy".to_string())
.spawn(move || {
// should have been marked not complete before thread started
assert!(!verify_.check_complete());

View File

@ -410,7 +410,7 @@ impl SendTransactionService {
config
);
Builder::new()
.name("send-tx-receive".to_string())
.name("solStxReceive".to_string())
.spawn(move || loop {
let recv_timeout_ms = config.batch_send_rate_ms;
let stats = &stats_report.stats;
@ -510,7 +510,7 @@ impl SendTransactionService {
config
);
Builder::new()
.name("send-tx-retry".to_string())
.name("solStxRetry".to_string())
.spawn(move || loop {
let retry_interval_ms = config.retry_rate_ms;
let stats = &stats_report.stats;

View File

@ -323,11 +323,14 @@ pub fn spawn_server(
stats,
)
}?;
let handle = thread::spawn(move || {
if let Err(e) = runtime.block_on(task) {
warn!("error from runtime.block_on: {:?}", e);
}
});
let handle = thread::Builder::new()
.name("solQuicServer".into())
.spawn(move || {
if let Err(e) = runtime.block_on(task) {
warn!("error from runtime.block_on: {:?}", e);
}
})
.unwrap();
Ok(handle)
}

View File

@ -168,7 +168,7 @@ pub fn receiver(
let res = socket.set_read_timeout(Some(Duration::new(1, 0)));
assert!(res.is_ok(), "streamer::receiver set_read_timeout error");
Builder::new()
.name("solana-receiver".to_string())
.name("solReceiver".to_string())
.spawn(move || {
let _ = recv_loop(
&socket,
@ -372,7 +372,7 @@ pub fn responder(
stats_reporter_sender: Option<Sender<Box<dyn FnOnce() + Send>>>,
) -> JoinHandle<()> {
Builder::new()
.name(format!("solana-responder-{}", name))
.name(format!("solRspndr{}", name))
.spawn(move || {
let mut errors = 0;
let mut last_error = None;
@ -477,7 +477,7 @@ mod test {
let t_responder = {
let (s_responder, r_responder) = unbounded();
let t_responder = responder(
"streamer_send_test",
"SendTest",
Arc::new(send),
r_responder,
SocketAddrSpace::Unspecified,

View File

@ -192,22 +192,25 @@ impl AdminRpc for AdminRpcImpl {
fn exit(&self, meta: Self::Metadata) -> Result<()> {
debug!("exit admin rpc request received");
thread::spawn(move || {
// Delay exit signal until this RPC request completes, otherwise the caller of `exit` might
// receive a confusing error as the validator shuts down before a response is sent back.
thread::sleep(Duration::from_millis(100));
thread::Builder::new()
.name("solProcessExit".into())
.spawn(move || {
// Delay exit signal until this RPC request completes, otherwise the caller of `exit` might
// receive a confusing error as the validator shuts down before a response is sent back.
thread::sleep(Duration::from_millis(100));
warn!("validator exit requested");
meta.validator_exit.write().unwrap().exit();
warn!("validator exit requested");
meta.validator_exit.write().unwrap().exit();
// TODO: Debug why Exit doesn't always cause the validator to fully exit
// (rocksdb background processing or some other stuck thread perhaps?).
//
// If the process is still alive after five seconds, exit harder
thread::sleep(Duration::from_secs(5));
warn!("validator exit timeout");
std::process::exit(0);
});
// TODO: Debug why Exit doesn't always cause the validator to fully exit
// (rocksdb background processing or some other stuck thread perhaps?).
//
// If the process is still alive after five seconds, exit harder
thread::sleep(Duration::from_secs(5));
warn!("validator exit timeout");
std::process::exit(0);
})
.unwrap();
Ok(())
}
@ -375,14 +378,14 @@ pub fn run(ledger_path: &Path, metadata: AdminRpcRequestMetadata) {
let admin_rpc_path = admin_rpc_path(ledger_path);
let event_loop = tokio::runtime::Builder::new_multi_thread()
.thread_name("sol-adminrpc-el")
.thread_name("solAdminRpcEl")
.worker_threads(3) // Three still seems like a lot, and better than the default of available core count
.enable_all()
.build()
.unwrap();
Builder::new()
.name("solana-adminrpc".to_string())
.name("solAdminRpc".to_string())
.spawn(move || {
let mut io = MetaIoHandler::default();
io.extend_with(AdminRpcImpl.to_delegate());

View File

@ -66,15 +66,20 @@ pub fn redirect_stderr_to_file(logfile: Option<String>) -> Option<JoinHandle<()>
solana_logger::setup_with_default(filter);
redirect_stderr(&logfile);
Some(std::thread::spawn(move || {
for signal in signals.forever() {
info!(
"received SIGUSR1 ({}), reopening log file: {:?}",
signal, logfile
);
redirect_stderr(&logfile);
}
}))
Some(
std::thread::Builder::new()
.name("solSigUsr1".into())
.spawn(move || {
for signal in signals.forever() {
info!(
"received SIGUSR1 ({}), reopening log file: {:?}",
signal, logfile
);
redirect_stderr(&logfile);
}
})
.unwrap(),
)
}
#[cfg(not(unix))]
{