moves turbine-disabled check to shred-fetch-stage (#30799)
If turbine_disabled is true, the commit discards turbine packets earlier in the pipeline so that they won't interfere with the deduper and the packets can get through once turbine is enabled again. This is a prerequisite of: https://github.com/solana-labs/solana/pull/30786 so that local-cluster tests pass.
This commit is contained in:
parent
c6e7aaf96c
commit
e66edeb180
|
@ -17,7 +17,10 @@ use {
|
||||||
solana_streamer::streamer::{self, PacketBatchReceiver, StreamerReceiveStats},
|
solana_streamer::streamer::{self, PacketBatchReceiver, StreamerReceiveStats},
|
||||||
std::{
|
std::{
|
||||||
net::UdpSocket,
|
net::UdpSocket,
|
||||||
sync::{atomic::AtomicBool, Arc, RwLock},
|
sync::{
|
||||||
|
atomic::{AtomicBool, Ordering},
|
||||||
|
Arc, RwLock,
|
||||||
|
},
|
||||||
thread::{self, Builder, JoinHandle},
|
thread::{self, Builder, JoinHandle},
|
||||||
time::{Duration, Instant},
|
time::{Duration, Instant},
|
||||||
},
|
},
|
||||||
|
@ -41,6 +44,7 @@ impl ShredFetchStage {
|
||||||
name: &'static str,
|
name: &'static str,
|
||||||
flags: PacketFlags,
|
flags: PacketFlags,
|
||||||
repair_context: Option<(&UdpSocket, &ClusterInfo)>,
|
repair_context: Option<(&UdpSocket, &ClusterInfo)>,
|
||||||
|
turbine_disabled: Arc<AtomicBool>,
|
||||||
) {
|
) {
|
||||||
const STATS_SUBMIT_CADENCE: Duration = Duration::from_secs(1);
|
const STATS_SUBMIT_CADENCE: Duration = Duration::from_secs(1);
|
||||||
let mut rng = rand::thread_rng();
|
let mut rng = rand::thread_rng();
|
||||||
|
@ -95,16 +99,19 @@ impl ShredFetchStage {
|
||||||
let max_slot = last_slot + 2 * slots_per_epoch;
|
let max_slot = last_slot + 2 * slots_per_epoch;
|
||||||
let should_drop_merkle_shreds =
|
let should_drop_merkle_shreds =
|
||||||
|shred_slot| should_drop_merkle_shreds(shred_slot, &root_bank);
|
|shred_slot| should_drop_merkle_shreds(shred_slot, &root_bank);
|
||||||
|
let turbine_disabled = turbine_disabled.load(Ordering::Relaxed);
|
||||||
for packet in packet_batch.iter_mut() {
|
for packet in packet_batch.iter_mut() {
|
||||||
if should_discard_packet(
|
if turbine_disabled
|
||||||
packet,
|
|| should_discard_packet(
|
||||||
last_root,
|
packet,
|
||||||
max_slot,
|
last_root,
|
||||||
shred_version,
|
max_slot,
|
||||||
&deduper,
|
shred_version,
|
||||||
should_drop_merkle_shreds,
|
&deduper,
|
||||||
&mut stats,
|
should_drop_merkle_shreds,
|
||||||
) {
|
&mut stats,
|
||||||
|
)
|
||||||
|
{
|
||||||
packet.meta_mut().set_discard(true);
|
packet.meta_mut().set_discard(true);
|
||||||
} else {
|
} else {
|
||||||
packet.meta_mut().flags.insert(flags);
|
packet.meta_mut().flags.insert(flags);
|
||||||
|
@ -117,6 +124,7 @@ impl ShredFetchStage {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
fn packet_modifier(
|
fn packet_modifier(
|
||||||
sockets: Vec<Arc<UdpSocket>>,
|
sockets: Vec<Arc<UdpSocket>>,
|
||||||
exit: &Arc<AtomicBool>,
|
exit: &Arc<AtomicBool>,
|
||||||
|
@ -127,6 +135,7 @@ impl ShredFetchStage {
|
||||||
name: &'static str,
|
name: &'static str,
|
||||||
flags: PacketFlags,
|
flags: PacketFlags,
|
||||||
repair_context: Option<(Arc<UdpSocket>, Arc<ClusterInfo>)>,
|
repair_context: Option<(Arc<UdpSocket>, Arc<ClusterInfo>)>,
|
||||||
|
turbine_disabled: Arc<AtomicBool>,
|
||||||
) -> (Vec<JoinHandle<()>>, JoinHandle<()>) {
|
) -> (Vec<JoinHandle<()>>, JoinHandle<()>) {
|
||||||
let (packet_sender, packet_receiver) = unbounded();
|
let (packet_sender, packet_receiver) = unbounded();
|
||||||
let streamers = sockets
|
let streamers = sockets
|
||||||
|
@ -158,6 +167,7 @@ impl ShredFetchStage {
|
||||||
name,
|
name,
|
||||||
flags,
|
flags,
|
||||||
repair_context,
|
repair_context,
|
||||||
|
turbine_disabled,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
@ -172,6 +182,7 @@ impl ShredFetchStage {
|
||||||
shred_version: u16,
|
shred_version: u16,
|
||||||
bank_forks: Arc<RwLock<BankForks>>,
|
bank_forks: Arc<RwLock<BankForks>>,
|
||||||
cluster_info: Arc<ClusterInfo>,
|
cluster_info: Arc<ClusterInfo>,
|
||||||
|
turbine_disabled: Arc<AtomicBool>,
|
||||||
exit: &Arc<AtomicBool>,
|
exit: &Arc<AtomicBool>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let recycler = PacketBatchRecycler::warmed(100, 1024);
|
let recycler = PacketBatchRecycler::warmed(100, 1024);
|
||||||
|
@ -186,6 +197,7 @@ impl ShredFetchStage {
|
||||||
"shred_fetch",
|
"shred_fetch",
|
||||||
PacketFlags::empty(),
|
PacketFlags::empty(),
|
||||||
None, // repair_context
|
None, // repair_context
|
||||||
|
turbine_disabled.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let (tvu_forwards_threads, fwd_thread_hdl) = Self::packet_modifier(
|
let (tvu_forwards_threads, fwd_thread_hdl) = Self::packet_modifier(
|
||||||
|
@ -198,6 +210,7 @@ impl ShredFetchStage {
|
||||||
"shred_fetch_tvu_forwards",
|
"shred_fetch_tvu_forwards",
|
||||||
PacketFlags::FORWARDED,
|
PacketFlags::FORWARDED,
|
||||||
None, // repair_context
|
None, // repair_context
|
||||||
|
turbine_disabled.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let (repair_receiver, repair_handler) = Self::packet_modifier(
|
let (repair_receiver, repair_handler) = Self::packet_modifier(
|
||||||
|
@ -210,6 +223,7 @@ impl ShredFetchStage {
|
||||||
"shred_fetch_repair",
|
"shred_fetch_repair",
|
||||||
PacketFlags::REPAIR,
|
PacketFlags::REPAIR,
|
||||||
Some((repair_socket, cluster_info)),
|
Some((repair_socket, cluster_info)),
|
||||||
|
turbine_disabled,
|
||||||
);
|
);
|
||||||
|
|
||||||
tvu_threads.extend(tvu_forwards_threads.into_iter());
|
tvu_threads.extend(tvu_forwards_threads.into_iter());
|
||||||
|
|
|
@ -11,10 +11,7 @@ use {
|
||||||
solana_sdk::{clock::Slot, pubkey::Pubkey},
|
solana_sdk::{clock::Slot, pubkey::Pubkey},
|
||||||
std::{
|
std::{
|
||||||
collections::HashMap,
|
collections::HashMap,
|
||||||
sync::{
|
sync::{Arc, RwLock},
|
||||||
atomic::{AtomicBool, Ordering},
|
|
||||||
Arc, RwLock,
|
|
||||||
},
|
|
||||||
thread::{Builder, JoinHandle},
|
thread::{Builder, JoinHandle},
|
||||||
time::{Duration, Instant},
|
time::{Duration, Instant},
|
||||||
},
|
},
|
||||||
|
@ -34,7 +31,6 @@ pub(crate) fn spawn_shred_sigverify(
|
||||||
shred_fetch_receiver: Receiver<PacketBatch>,
|
shred_fetch_receiver: Receiver<PacketBatch>,
|
||||||
retransmit_sender: Sender<Vec</*shred:*/ Vec<u8>>>,
|
retransmit_sender: Sender<Vec</*shred:*/ Vec<u8>>>,
|
||||||
verified_sender: Sender<Vec<PacketBatch>>,
|
verified_sender: Sender<Vec<PacketBatch>>,
|
||||||
turbine_disabled: Arc<AtomicBool>,
|
|
||||||
) -> JoinHandle<()> {
|
) -> JoinHandle<()> {
|
||||||
let recycler_cache = RecyclerCache::warmed();
|
let recycler_cache = RecyclerCache::warmed();
|
||||||
let mut stats = ShredSigVerifyStats::new(Instant::now());
|
let mut stats = ShredSigVerifyStats::new(Instant::now());
|
||||||
|
@ -56,7 +52,6 @@ pub(crate) fn spawn_shred_sigverify(
|
||||||
&shred_fetch_receiver,
|
&shred_fetch_receiver,
|
||||||
&retransmit_sender,
|
&retransmit_sender,
|
||||||
&verified_sender,
|
&verified_sender,
|
||||||
&turbine_disabled,
|
|
||||||
&mut stats,
|
&mut stats,
|
||||||
) {
|
) {
|
||||||
Ok(()) => (),
|
Ok(()) => (),
|
||||||
|
@ -83,7 +78,6 @@ fn run_shred_sigverify(
|
||||||
shred_fetch_receiver: &Receiver<PacketBatch>,
|
shred_fetch_receiver: &Receiver<PacketBatch>,
|
||||||
retransmit_sender: &Sender<Vec</*shred:*/ Vec<u8>>>,
|
retransmit_sender: &Sender<Vec</*shred:*/ Vec<u8>>>,
|
||||||
verified_sender: &Sender<Vec<PacketBatch>>,
|
verified_sender: &Sender<Vec<PacketBatch>>,
|
||||||
turbine_disabled: &AtomicBool,
|
|
||||||
stats: &mut ShredSigVerifyStats,
|
stats: &mut ShredSigVerifyStats,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
const RECV_TIMEOUT: Duration = Duration::from_secs(1);
|
const RECV_TIMEOUT: Duration = Duration::from_secs(1);
|
||||||
|
@ -113,10 +107,8 @@ fn run_shred_sigverify(
|
||||||
.map(<[u8]>::to_vec)
|
.map(<[u8]>::to_vec)
|
||||||
.collect();
|
.collect();
|
||||||
stats.num_retransmit_shreds += shreds.len();
|
stats.num_retransmit_shreds += shreds.len();
|
||||||
if !turbine_disabled.load(Ordering::Relaxed) {
|
retransmit_sender.send(shreds)?;
|
||||||
retransmit_sender.send(shreds)?;
|
verified_sender.send(packets)?;
|
||||||
verified_sender.send(packets)?;
|
|
||||||
}
|
|
||||||
stats.elapsed_micros += now.elapsed().as_micros() as u64;
|
stats.elapsed_micros += now.elapsed().as_micros() as u64;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
@ -161,6 +161,7 @@ impl Tvu {
|
||||||
tvu_config.shred_version,
|
tvu_config.shred_version,
|
||||||
bank_forks.clone(),
|
bank_forks.clone(),
|
||||||
cluster_info.clone(),
|
cluster_info.clone(),
|
||||||
|
turbine_disabled,
|
||||||
exit,
|
exit,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -173,7 +174,6 @@ impl Tvu {
|
||||||
fetch_receiver,
|
fetch_receiver,
|
||||||
retransmit_sender.clone(),
|
retransmit_sender.clone(),
|
||||||
verified_sender,
|
verified_sender,
|
||||||
turbine_disabled,
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let retransmit_stage = RetransmitStage::new(
|
let retransmit_stage = RetransmitStage::new(
|
||||||
|
|
Loading…
Reference in New Issue