solana/core/src/tpu.rs

143 lines
4.0 KiB
Rust
Raw Normal View History

2018-05-14 16:36:19 -07:00
//! The `tpu` module implements the Transaction Processing Unit, a
//! multi-stage transaction processing pipeline in software.
2018-05-14 16:36:19 -07:00
use crate::banking_stage::BankingStage;
use crate::blocktree::Blocktree;
use crate::broadcast_stage::BroadcastStage;
use crate::cluster_info::ClusterInfo;
use crate::cluster_info_vote_listener::ClusterInfoVoteListener;
2018-12-07 19:16:27 -08:00
use crate::fetch_stage::FetchStage;
use crate::poh_recorder::{PohRecorder, WorkingBankEntries};
2018-12-07 19:16:27 -08:00
use crate::service::Service;
use crate::sigverify_stage::SigVerifyStage;
use solana_sdk::pubkey::Pubkey;
2018-05-14 16:36:19 -07:00
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver};
use std::sync::{Arc, Mutex, RwLock};
use std::thread;
pub struct LeaderServices {
fetch_stage: FetchStage,
sigverify_stage: SigVerifyStage,
banking_stage: BankingStage,
cluster_info_vote_listener: ClusterInfoVoteListener,
broadcast_stage: BroadcastStage,
}
impl LeaderServices {
fn new(
fetch_stage: FetchStage,
sigverify_stage: SigVerifyStage,
banking_stage: BankingStage,
cluster_info_vote_listener: ClusterInfoVoteListener,
broadcast_stage: BroadcastStage,
) -> Self {
LeaderServices {
fetch_stage,
sigverify_stage,
banking_stage,
cluster_info_vote_listener,
broadcast_stage,
}
}
pub fn exit(&self) {
self.fetch_stage.close();
}
fn join(self) -> thread::Result<()> {
let mut results = vec![];
results.push(self.fetch_stage.join());
results.push(self.sigverify_stage.join());
results.push(self.cluster_info_vote_listener.join());
results.push(self.banking_stage.join());
let broadcast_result = self.broadcast_stage.join();
for result in results {
result?;
}
let _ = broadcast_result?;
Ok(())
}
pub fn close(self) -> thread::Result<()> {
self.exit();
self.join()
}
}
pub struct Tpu {
leader_services: LeaderServices,
exit: Arc<AtomicBool>,
pub id: Pubkey,
2018-05-14 16:36:19 -07:00
}
impl Tpu {
pub fn new(
id: Pubkey,
cluster_info: &Arc<RwLock<ClusterInfo>>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
entry_receiver: Receiver<WorkingBankEntries>,
transactions_sockets: Vec<UdpSocket>,
broadcast_socket: UdpSocket,
sigverify_disabled: bool,
blocktree: &Arc<Blocktree>,
exit: &Arc<AtomicBool>,
) -> Self {
cluster_info.write().unwrap().set_leader(id);
let (packet_sender, packet_receiver) = channel();
let fetch_stage =
FetchStage::new_with_sender(transactions_sockets, exit.clone(), &packet_sender.clone());
let cluster_info_vote_listener =
ClusterInfoVoteListener::new(exit.clone(), cluster_info.clone(), packet_sender);
2018-05-14 16:36:19 -07:00
let (sigverify_stage, verified_receiver) =
SigVerifyStage::new(packet_receiver, sigverify_disabled);
2018-05-14 16:36:19 -07:00
let banking_stage = BankingStage::new(&cluster_info, poh_recorder, verified_receiver);
let broadcast_stage = BroadcastStage::new(
broadcast_socket,
cluster_info.clone(),
entry_receiver,
exit.clone(),
blocktree,
);
2018-05-14 16:36:19 -07:00
let leader_services = LeaderServices::new(
fetch_stage,
sigverify_stage,
banking_stage,
cluster_info_vote_listener,
broadcast_stage,
);
Self {
leader_services,
exit: exit.clone(),
id,
}
}
pub fn exit(&self) {
self.exit.store(true, Ordering::Relaxed);
}
2018-07-09 13:53:18 -07:00
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
pub fn is_exited(&self) -> bool {
self.exit.load(Ordering::Relaxed)
}
pub fn close(self) -> thread::Result<()> {
self.exit();
2018-07-09 13:53:18 -07:00
self.join()
}
}
impl Service for Tpu {
2019-02-08 08:06:27 -08:00
type JoinReturnType = ();
2019-02-08 08:06:27 -08:00
fn join(self) -> thread::Result<()> {
self.leader_services.join()
2018-05-14 16:36:19 -07:00
}
}