solana/core/src/tvu.rs

306 lines
11 KiB
Rust
Raw Normal View History

2019-11-08 15:43:18 -08:00
//! The `tvu` module implements the Transaction Validation Unit, a multi-stage transaction
//! validation pipeline in software.
2019-02-21 15:16:09 -08:00
use crate::blockstream_service::BlockstreamService;
2018-12-07 19:16:27 -08:00
use crate::cluster_info::ClusterInfo;
use crate::commitment::BlockCommitmentCache;
use crate::ledger_cleanup_service::LedgerCleanupService;
use crate::partition_cfg::PartitionCfg;
use crate::poh_recorder::PohRecorder;
use crate::replay_stage::ReplayStage;
2018-12-07 19:16:27 -08:00
use crate::retransmit_stage::RetransmitStage;
2019-02-18 18:08:54 -08:00
use crate::rpc_subscriptions::RpcSubscriptions;
use crate::shred_fetch_stage::ShredFetchStage;
2019-10-28 16:07:51 -07:00
use crate::sigverify_shreds::ShredSigVerifier;
use crate::sigverify_stage::{DisabledSigVerifier, SigVerifyStage};
use crate::snapshot_packager_service::SnapshotPackagerService;
use crate::storage_stage::{StorageStage, StorageState};
2019-10-28 16:07:51 -07:00
use crossbeam_channel::unbounded;
use solana_ledger::bank_forks::BankForks;
use solana_ledger::blocktree::{Blocktree, CompletedSlotsReceiver};
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_sdk::pubkey::Pubkey;
2019-01-30 19:28:48 -08:00
use solana_sdk::signature::{Keypair, KeypairUtil};
use std::net::UdpSocket;
use std::path::PathBuf;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::{channel, Receiver};
use std::sync::{Arc, Mutex, RwLock};
use std::thread;
pub struct Tvu {
fetch_stage: ShredFetchStage,
2019-10-28 16:07:51 -07:00
sigverify_stage: SigVerifyStage,
retransmit_stage: RetransmitStage,
replay_stage: ReplayStage,
2019-02-21 15:16:09 -08:00
blockstream_service: Option<BlockstreamService>,
ledger_cleanup_service: Option<LedgerCleanupService>,
storage_stage: StorageStage,
snapshot_packager_service: Option<SnapshotPackagerService>,
}
pub struct Sockets {
pub fetch: Vec<UdpSocket>,
pub repair: UdpSocket,
pub retransmit: Vec<UdpSocket>,
pub forwards: Vec<UdpSocket>,
}
impl Tvu {
/// This service receives messages from a leader in the network and processes the transactions
2018-05-14 14:33:11 -07:00
/// on the bank state.
/// # Arguments
2018-10-08 19:55:54 -07:00
/// * `cluster_info` - The cluster_info state.
2019-02-21 11:37:48 -08:00
/// * `sockets` - fetch, repair, and retransmit sockets
2019-02-07 20:52:39 -08:00
/// * `blocktree` - the ledger itself
#[allow(clippy::new_ret_no_self, clippy::too_many_arguments)]
2019-02-21 21:43:35 -08:00
pub fn new<T>(
vote_account: &Pubkey,
voting_keypair: Option<&Arc<T>>,
storage_keypair: &Arc<Keypair>,
2019-02-21 11:19:45 -08:00
bank_forks: &Arc<RwLock<BankForks>>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
sockets: Sockets,
2019-02-07 20:52:39 -08:00
blocktree: Arc<Blocktree>,
storage_state: &StorageState,
blockstream_unix_socket: Option<&PathBuf>,
max_ledger_slots: Option<u64>,
ledger_signal_receiver: Receiver<bool>,
2019-02-18 18:08:54 -08:00
subscriptions: &Arc<RpcSubscriptions>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
exit: &Arc<AtomicBool>,
completed_slots_receiver: CompletedSlotsReceiver,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
2019-10-28 16:07:51 -07:00
sigverify_disabled: bool,
cfg: Option<PartitionCfg>,
shred_version: u16,
2019-02-21 21:43:35 -08:00
) -> Self
where
T: 'static + KeypairUtil + Sync + Send,
{
let keypair: Arc<Keypair> = cluster_info
.read()
.expect("Unable to read from cluster_info during Tvu creation")
.keypair
.clone();
let Sockets {
repair: repair_socket,
fetch: fetch_sockets,
retransmit: retransmit_sockets,
forwards: tvu_forward_sockets,
} = sockets;
let (fetch_sender, fetch_receiver) = channel();
let repair_socket = Arc::new(repair_socket);
let fetch_sockets: Vec<Arc<UdpSocket>> = fetch_sockets.into_iter().map(Arc::new).collect();
let forward_sockets: Vec<Arc<UdpSocket>> =
tvu_forward_sockets.into_iter().map(Arc::new).collect();
let fetch_stage = ShredFetchStage::new(
fetch_sockets,
forward_sockets,
repair_socket.clone(),
&fetch_sender,
&exit,
);
2019-10-28 16:07:51 -07:00
let (verified_sender, verified_receiver) = unbounded();
let sigverify_stage = if !sigverify_disabled {
SigVerifyStage::new(
fetch_receiver,
verified_sender.clone(),
ShredSigVerifier::new(bank_forks.clone(), leader_schedule_cache.clone()),
)
} else {
SigVerifyStage::new(
fetch_receiver,
verified_sender.clone(),
DisabledSigVerifier::default(),
)
};
let retransmit_stage = RetransmitStage::new(
bank_forks.clone(),
leader_schedule_cache,
2019-02-07 20:52:39 -08:00
blocktree.clone(),
2018-10-08 19:55:54 -07:00
&cluster_info,
Arc::new(retransmit_sockets),
repair_socket,
2019-10-28 16:07:51 -07:00
verified_receiver,
2019-03-04 20:50:02 -08:00
&exit,
completed_slots_receiver,
*bank_forks.read().unwrap().working_bank().epoch_schedule(),
cfg,
shred_version,
2018-05-22 15:17:59 -07:00
);
let (blockstream_slot_sender, blockstream_slot_receiver) = channel();
let (ledger_cleanup_slot_sender, ledger_cleanup_slot_receiver) = channel();
let (snapshot_packager_service, snapshot_package_sender) = {
let snapshot_config = { bank_forks.read().unwrap().snapshot_config().clone() };
if snapshot_config.is_some() {
// Start a snapshot packaging service
let (sender, receiver) = channel();
let snapshot_packager_service = SnapshotPackagerService::new(receiver, exit);
(Some(snapshot_packager_service), Some(sender))
} else {
(None, None)
}
};
let (replay_stage, root_bank_receiver) = ReplayStage::new(
&keypair.pubkey(),
vote_account,
voting_keypair,
2019-02-07 20:52:39 -08:00
blocktree.clone(),
2019-02-21 11:37:48 -08:00
&bank_forks,
cluster_info.clone(),
2019-03-04 20:50:02 -08:00
&exit,
ledger_signal_receiver,
2019-02-18 18:08:54 -08:00
subscriptions,
poh_recorder,
leader_schedule_cache,
vec![blockstream_slot_sender, ledger_cleanup_slot_sender],
snapshot_package_sender,
block_commitment_cache,
);
2019-10-02 18:33:01 -07:00
let blockstream_service = if let Some(blockstream_unix_socket) = blockstream_unix_socket {
let blockstream_service = BlockstreamService::new(
blockstream_slot_receiver,
blocktree.clone(),
2019-10-02 18:33:01 -07:00
blockstream_unix_socket,
2019-03-04 20:50:02 -08:00
&exit,
2019-02-11 13:51:14 -08:00
);
2019-02-21 15:16:09 -08:00
Some(blockstream_service)
2019-02-11 13:51:14 -08:00
} else {
None
};
let ledger_cleanup_service = max_ledger_slots.map(|max_ledger_slots| {
LedgerCleanupService::new(
ledger_cleanup_slot_receiver,
blocktree.clone(),
max_ledger_slots,
&exit,
)
});
let storage_stage = StorageStage::new(
storage_state,
root_bank_receiver,
2019-02-07 20:52:39 -08:00
Some(blocktree),
2019-01-17 14:41:48 -08:00
&keypair,
storage_keypair,
2019-03-04 20:50:02 -08:00
&exit,
&bank_forks,
2019-01-17 14:41:48 -08:00
&cluster_info,
);
Tvu {
fetch_stage,
2019-10-28 16:07:51 -07:00
sigverify_stage,
retransmit_stage,
replay_stage,
2019-02-21 15:16:09 -08:00
blockstream_service,
ledger_cleanup_service,
storage_stage,
snapshot_packager_service,
}
}
2019-11-13 10:12:09 -08:00
pub fn join(self) -> thread::Result<()> {
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
self.retransmit_stage.join()?;
self.fetch_stage.join()?;
2019-10-28 16:07:51 -07:00
self.sigverify_stage.join()?;
self.storage_stage.join()?;
2019-02-21 15:16:09 -08:00
if self.blockstream_service.is_some() {
self.blockstream_service.unwrap().join()?;
2019-02-11 13:51:14 -08:00
}
if self.ledger_cleanup_service.is_some() {
self.ledger_cleanup_service.unwrap().join()?;
}
2019-02-08 08:06:27 -08:00
self.replay_stage.join()?;
if let Some(s) = self.snapshot_packager_service {
s.join()?;
}
2019-02-08 08:06:27 -08:00
Ok(())
}
}
#[cfg(test)]
2018-05-23 10:49:48 -07:00
pub mod tests {
2019-02-08 08:10:28 -08:00
use super::*;
use crate::banking_stage::create_test_recorder;
2018-12-07 19:16:27 -08:00
use crate::cluster_info::{ClusterInfo, Node};
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_ledger::create_new_tmp_ledger;
2019-02-21 11:19:45 -08:00
use solana_runtime::bank::Bank;
use std::sync::atomic::Ordering;
#[test]
fn test_tvu_exit() {
solana_logger::setup();
let leader = Node::new_localhost();
let target1_keypair = Keypair::new();
let target1 = Node::new_localhost_with_pubkey(&target1_keypair.pubkey());
let starting_balance = 10_000;
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(starting_balance);
2019-02-21 11:19:45 -08:00
let bank_forks = BankForks::new(0, Bank::new(&genesis_config));
//start cluster_info1
let mut cluster_info1 = ClusterInfo::new_with_invalid_keypair(target1.info.clone());
cluster_info1.insert_info(leader.info.clone());
let cref1 = Arc::new(RwLock::new(cluster_info1));
let (blocktree_path, _) = create_new_tmp_ledger!(&genesis_config);
let (blocktree, l_receiver, completed_slots_receiver) =
Blocktree::open_with_signal(&blocktree_path)
.expect("Expected to successfully open ledger");
let blocktree = Arc::new(blocktree);
let bank = bank_forks.working_bank();
let (exit, poh_recorder, poh_service, _entry_receiver) =
create_test_recorder(&bank, &blocktree, None);
let voting_keypair = Keypair::new();
let storage_keypair = Arc::new(Keypair::new());
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let tvu = Tvu::new(
&voting_keypair.pubkey(),
Some(&Arc::new(voting_keypair)),
&storage_keypair,
2019-02-21 11:19:45 -08:00
&Arc::new(RwLock::new(bank_forks)),
&cref1,
{
Sockets {
repair: target1.sockets.repair,
retransmit: target1.sockets.retransmit_sockets,
fetch: target1.sockets.tvu,
forwards: target1.sockets.tvu_forwards,
}
},
blocktree,
&StorageState::default(),
None,
None,
l_receiver,
2019-02-18 18:08:54 -08:00
&Arc::new(RpcSubscriptions::default()),
&poh_recorder,
&leader_schedule_cache,
&exit,
completed_slots_receiver,
block_commitment_cache,
2019-10-28 16:07:51 -07:00
false,
None,
0,
);
exit.store(true, Ordering::Relaxed);
tvu.join().unwrap();
2019-03-04 20:50:02 -08:00
poh_service.join().unwrap();
}
}