solana/src/tpu.rs

243 lines
6.7 KiB
Rust
Raw Normal View History

2018-05-14 16:36:19 -07:00
//! The `tpu` module implements the Transaction Processing Unit, a
//! multi-stage transaction processing pipeline in software.
2018-05-14 16:36:19 -07:00
2018-12-07 19:16:27 -08:00
use crate::bank::Bank;
2019-02-08 08:06:27 -08:00
use crate::banking_stage::BankingStage;
use crate::broadcast_service::BroadcastService;
use crate::cluster_info::ClusterInfo;
use crate::cluster_info_vote_listener::ClusterInfoVoteListener;
2018-12-07 19:16:27 -08:00
use crate::fetch_stage::FetchStage;
use crate::poh_service::PohServiceConfig;
2018-12-07 19:16:27 -08:00
use crate::service::Service;
use crate::sigverify_stage::SigVerifyStage;
use crate::streamer::BlobSender;
use crate::tpu_forwarder::TpuForwarder;
2018-11-16 08:04:46 -08:00
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
2018-05-14 16:36:19 -07:00
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::{Arc, RwLock};
use std::thread;
pub type TpuReturnType = u64; // tick_height to initiate a rotation
pub type TpuRotationSender = Sender<TpuReturnType>;
pub type TpuRotationReceiver = Receiver<TpuReturnType>;
pub enum TpuMode {
Leader(LeaderServices),
Forwarder(ForwarderServices),
}
pub struct LeaderServices {
fetch_stage: FetchStage,
sigverify_stage: SigVerifyStage,
banking_stage: BankingStage,
cluster_info_vote_listener: ClusterInfoVoteListener,
broadcast_service: BroadcastService,
}
impl LeaderServices {
fn new(
fetch_stage: FetchStage,
sigverify_stage: SigVerifyStage,
banking_stage: BankingStage,
cluster_info_vote_listener: ClusterInfoVoteListener,
broadcast_service: BroadcastService,
) -> Self {
LeaderServices {
fetch_stage,
sigverify_stage,
banking_stage,
cluster_info_vote_listener,
broadcast_service,
}
}
}
pub struct ForwarderServices {
tpu_forwarder: TpuForwarder,
}
impl ForwarderServices {
fn new(tpu_forwarder: TpuForwarder) -> Self {
ForwarderServices { tpu_forwarder }
}
}
pub struct Tpu {
2019-02-07 21:13:39 -08:00
tpu_mode: Option<TpuMode>,
exit: Arc<AtomicBool>,
2018-05-14 16:36:19 -07:00
}
impl Tpu {
#[allow(clippy::too_many_arguments)]
pub fn new(
2018-07-11 13:40:46 -07:00
bank: &Arc<Bank>,
tick_duration: PohServiceConfig,
transactions_sockets: Vec<UdpSocket>,
broadcast_socket: UdpSocket,
cluster_info: Arc<RwLock<ClusterInfo>>,
sigverify_disabled: bool,
max_tick_height: u64,
blob_index: u64,
last_entry_id: &Hash,
leader_id: Pubkey,
to_validator_sender: &TpuRotationSender,
blob_sender: &BlobSender,
is_leader: bool,
) -> Self {
2019-02-07 21:13:39 -08:00
let mut tpu = Self {
tpu_mode: None,
exit: Arc::new(AtomicBool::new(false)),
};
2019-02-07 21:13:39 -08:00
if is_leader {
tpu.switch_to_leader(
bank,
tick_duration,
2019-02-07 21:13:39 -08:00
transactions_sockets,
broadcast_socket,
cluster_info,
2019-02-07 21:13:39 -08:00
sigverify_disabled,
max_tick_height,
2019-02-07 21:13:39 -08:00
blob_index,
last_entry_id,
leader_id,
to_validator_sender,
blob_sender,
);
} else {
2019-02-07 21:13:39 -08:00
tpu.switch_to_forwarder(transactions_sockets, cluster_info);
}
2019-02-07 21:13:39 -08:00
tpu
}
2019-02-07 21:13:39 -08:00
fn tpu_mode_close(&self) {
match &self.tpu_mode {
2019-02-07 21:13:39 -08:00
Some(TpuMode::Leader(svcs)) => {
svcs.fetch_stage.close();
}
2019-02-07 21:13:39 -08:00
Some(TpuMode::Forwarder(svcs)) => {
svcs.tpu_forwarder.close();
}
2019-02-07 21:13:39 -08:00
None => (),
}
2019-02-07 21:13:39 -08:00
}
pub fn switch_to_forwarder(
&mut self,
transactions_sockets: Vec<UdpSocket>,
cluster_info: Arc<RwLock<ClusterInfo>>,
) {
self.tpu_mode_close();
let tpu_forwarder = TpuForwarder::new(transactions_sockets, cluster_info);
2019-02-07 21:13:39 -08:00
self.tpu_mode = Some(TpuMode::Forwarder(ForwarderServices::new(tpu_forwarder)));
}
#[allow(clippy::too_many_arguments)]
pub fn switch_to_leader(
&mut self,
bank: &Arc<Bank>,
tick_duration: PohServiceConfig,
transactions_sockets: Vec<UdpSocket>,
broadcast_socket: UdpSocket,
cluster_info: Arc<RwLock<ClusterInfo>>,
sigverify_disabled: bool,
max_tick_height: u64,
blob_index: u64,
last_entry_id: &Hash,
leader_id: Pubkey,
to_validator_sender: &TpuRotationSender,
blob_sender: &BlobSender,
) {
2019-02-07 21:13:39 -08:00
self.tpu_mode_close();
self.exit = Arc::new(AtomicBool::new(false));
let (packet_sender, packet_receiver) = channel();
let fetch_stage = FetchStage::new_with_sender(
transactions_sockets,
self.exit.clone(),
&packet_sender.clone(),
);
let cluster_info_vote_listener =
ClusterInfoVoteListener::new(self.exit.clone(), cluster_info.clone(), packet_sender);
2018-05-14 16:36:19 -07:00
let (sigverify_stage, verified_receiver) =
SigVerifyStage::new(packet_receiver, sigverify_disabled);
2018-05-14 16:36:19 -07:00
let (banking_stage, entry_receiver) = BankingStage::new(
&bank,
verified_receiver,
tick_duration,
last_entry_id,
max_tick_height,
leader_id,
&to_validator_sender,
);
let broadcast_service = BroadcastService::new(
bank.clone(),
broadcast_socket,
cluster_info,
blob_index,
bank.leader_scheduler.clone(),
entry_receiver,
max_tick_height,
self.exit.clone(),
blob_sender,
);
2018-05-14 16:36:19 -07:00
let svcs = LeaderServices::new(
fetch_stage,
sigverify_stage,
banking_stage,
cluster_info_vote_listener,
broadcast_service,
);
2019-02-07 21:13:39 -08:00
self.tpu_mode = Some(TpuMode::Leader(svcs));
}
pub fn is_leader(&self) -> bool {
match self.tpu_mode {
2019-02-07 21:13:39 -08:00
Some(TpuMode::Leader(_)) => true,
_ => false,
}
}
pub fn exit(&self) {
self.exit.store(true, Ordering::Relaxed);
}
2018-07-09 13:53:18 -07:00
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
pub fn is_exited(&self) -> bool {
self.exit.load(Ordering::Relaxed)
}
2019-02-08 08:06:27 -08:00
pub fn close(self) -> thread::Result<()> {
2019-02-07 21:13:39 -08:00
self.tpu_mode_close();
2018-07-09 13:53:18 -07:00
self.join()
}
}
impl Service for Tpu {
2019-02-08 08:06:27 -08:00
type JoinReturnType = ();
2019-02-08 08:06:27 -08:00
fn join(self) -> thread::Result<()> {
match self.tpu_mode {
2019-02-07 21:13:39 -08:00
Some(TpuMode::Leader(svcs)) => {
svcs.broadcast_service.join()?;
svcs.fetch_stage.join()?;
svcs.sigverify_stage.join()?;
svcs.cluster_info_vote_listener.join()?;
2019-02-08 08:06:27 -08:00
svcs.banking_stage.join()?;
}
2019-02-07 21:13:39 -08:00
Some(TpuMode::Forwarder(svcs)) => {
svcs.tpu_forwarder.join()?;
}
2019-02-08 08:06:27 -08:00
None => (),
}
2019-02-08 08:06:27 -08:00
Ok(())
2018-05-14 16:36:19 -07:00
}
}