removes Packet Meta.sender_stake and find_packet_sender_stake_stage (#31077)

Packet Meta.sender_stake is unused since
https://github.com/solana-labs/solana/pull/26512
removed sender_stake from banking-stage buffer prioritization.
This commit is contained in:
behzad nouri 2023-04-06 21:33:43 +00:00 committed by GitHub
parent 5f413db04f
commit 4d0abebe0e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 15 additions and 257 deletions

View File

@ -177,7 +177,7 @@ fn bench_sigverify_stage(bencher: &mut Bencher, use_same_tx: bool) {
batch batch
.iter_mut() .iter_mut()
.for_each(|packet| packet.meta_mut().flags |= PacketFlags::TRACER_PACKET); .for_each(|packet| packet.meta_mut().flags |= PacketFlags::TRACER_PACKET);
packet_s.send(vec![batch]).unwrap(); packet_s.send(batch).unwrap();
} }
let mut received = 0; let mut received = 0;
let mut total_tracer_packets_received_in_sigverify_stage = 0; let mut total_tracer_packets_received_in_sigverify_stage = 0;

View File

@ -4,7 +4,6 @@
extern crate test; extern crate test;
use { use {
rand::distributions::{Distribution, Uniform},
solana_core::{ solana_core::{
forward_packet_batches_by_accounts::ForwardPacketBatchesByAccounts, forward_packet_batches_by_accounts::ForwardPacketBatchesByAccounts,
unprocessed_packet_batches::*, unprocessed_packet_batches::*,
@ -30,16 +29,14 @@ fn build_packet_batch(
) -> (PacketBatch, Vec<usize>) { ) -> (PacketBatch, Vec<usize>) {
let packet_batch = PacketBatch::new( let packet_batch = PacketBatch::new(
(0..packet_per_batch_count) (0..packet_per_batch_count)
.map(|sender_stake| { .map(|_| {
let tx = system_transaction::transfer( let tx = system_transaction::transfer(
&Keypair::new(), &Keypair::new(),
&solana_sdk::pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
1, 1,
recent_blockhash.unwrap_or_else(Hash::new_unique), recent_blockhash.unwrap_or_else(Hash::new_unique),
); );
let mut packet = Packet::from_data(None, tx).unwrap(); Packet::from_data(None, tx).unwrap()
packet.meta_mut().sender_stake = sender_stake as u64;
packet
}) })
.collect(), .collect(),
); );
@ -52,9 +49,6 @@ fn build_randomized_packet_batch(
packet_per_batch_count: usize, packet_per_batch_count: usize,
recent_blockhash: Option<Hash>, recent_blockhash: Option<Hash>,
) -> (PacketBatch, Vec<usize>) { ) -> (PacketBatch, Vec<usize>) {
let mut rng = rand::thread_rng();
let distribution = Uniform::from(0..200_000);
let packet_batch = PacketBatch::new( let packet_batch = PacketBatch::new(
(0..packet_per_batch_count) (0..packet_per_batch_count)
.map(|_| { .map(|_| {
@ -64,10 +58,7 @@ fn build_randomized_packet_batch(
1, 1,
recent_blockhash.unwrap_or_else(Hash::new_unique), recent_blockhash.unwrap_or_else(Hash::new_unique),
); );
let mut packet = Packet::from_data(None, tx).unwrap(); Packet::from_data(None, tx).unwrap()
let sender_stake = distribution.sample(&mut rng);
packet.meta_mut().sender_stake = sender_stake as u64;
packet
}) })
.collect(), .collect(),
); );
@ -119,11 +110,7 @@ fn bench_packet_clone(bencher: &mut Bencher) {
let mut timer = Measure::start("insert_batch"); let mut timer = Measure::start("insert_batch");
packet_batch.iter().for_each(|packet| { packet_batch.iter().for_each(|packet| {
let mut packet = packet.clone(); outer_packet = packet.clone();
packet.meta_mut().sender_stake *= 2;
if packet.meta().sender_stake > 2 {
outer_packet = packet;
}
}); });
timer.stop(); timer.stop();

View File

@ -1,163 +0,0 @@
use {
crossbeam_channel::{Receiver, RecvTimeoutError, Sender},
solana_measure::measure::Measure,
solana_perf::packet::PacketBatch,
solana_sdk::timing::timestamp,
solana_streamer::streamer::{self, StakedNodes, StreamerError},
std::{
collections::HashMap,
net::IpAddr,
sync::{Arc, RwLock},
thread::{self, Builder, JoinHandle},
},
};
// Try to target 50ms, rough timings from a testnet validator
//
// 50ms/(200ns/packet) = 250k packets
const MAX_FINDPACKETSENDERSTAKE_BATCH: usize = 250_000;
pub type FindPacketSenderStakeSender = Sender<Vec<PacketBatch>>;
pub type FindPacketSenderStakeReceiver = Receiver<Vec<PacketBatch>>;
#[derive(Debug, Default)]
struct FindPacketSenderStakeStats {
last_print: u64,
refresh_ip_to_stake_time: u64,
apply_sender_stakes_time: u64,
send_batches_time: u64,
receive_batches_time: u64,
total_batches: u64,
total_packets: u64,
total_discard_random: usize,
total_discard_random_time_us: usize,
}
impl FindPacketSenderStakeStats {
fn report(&mut self, name: &'static str) {
let now = timestamp();
let elapsed_ms = now - self.last_print;
if elapsed_ms > 2000 {
datapoint_info!(
name,
(
"refresh_ip_to_stake_time_us",
self.refresh_ip_to_stake_time as i64,
i64
),
(
"apply_sender_stakes_time_us",
self.apply_sender_stakes_time as i64,
i64
),
("send_batches_time_us", self.send_batches_time as i64, i64),
(
"receive_batches_time_ns",
self.receive_batches_time as i64,
i64
),
("total_batches", self.total_batches as i64, i64),
("total_packets", self.total_packets as i64, i64),
("total_discard_random", self.total_discard_random, i64),
(
"total_discard_random_time_us",
self.total_discard_random_time_us,
i64
),
);
*self = FindPacketSenderStakeStats::default();
self.last_print = now;
}
}
}
pub struct FindPacketSenderStakeStage {
thread_hdl: JoinHandle<()>,
}
impl FindPacketSenderStakeStage {
pub fn new(
packet_receiver: streamer::PacketBatchReceiver,
sender: FindPacketSenderStakeSender,
staked_nodes: Arc<RwLock<StakedNodes>>,
name: &'static str,
) -> Self {
let mut stats = FindPacketSenderStakeStats::default();
let thread_hdl = Builder::new()
.name("solPktStake".to_string())
.spawn(move || loop {
match streamer::recv_packet_batches(&packet_receiver) {
Ok((mut batches, num_packets, recv_duration)) => {
let num_batches = batches.len();
let mut discard_random_time =
Measure::start("findpacketsenderstake_discard_random_time");
let non_discarded_packets = solana_perf::discard::discard_batches_randomly(
&mut batches,
MAX_FINDPACKETSENDERSTAKE_BATCH,
num_packets,
);
let num_discarded_randomly =
num_packets.saturating_sub(non_discarded_packets);
discard_random_time.stop();
let mut apply_sender_stakes_time =
Measure::start("apply_sender_stakes_time");
let mut apply_stake = || {
let ip_to_stake = staked_nodes.read().unwrap();
Self::apply_sender_stakes(&mut batches, &ip_to_stake.ip_stake_map);
};
apply_stake();
apply_sender_stakes_time.stop();
let mut send_batches_time = Measure::start("send_batches_time");
if let Err(e) = sender.send(batches) {
info!("Sender error: {:?}", e);
}
send_batches_time.stop();
stats.apply_sender_stakes_time = stats
.apply_sender_stakes_time
.saturating_add(apply_sender_stakes_time.as_us());
stats.send_batches_time = stats
.send_batches_time
.saturating_add(send_batches_time.as_us());
stats.receive_batches_time = stats
.receive_batches_time
.saturating_add(recv_duration.as_nanos() as u64);
stats.total_batches =
stats.total_batches.saturating_add(num_batches as u64);
stats.total_packets =
stats.total_packets.saturating_add(num_packets as u64);
stats.total_discard_random_time_us += discard_random_time.as_us() as usize;
stats.total_discard_random += num_discarded_randomly;
}
Err(e) => match e {
StreamerError::RecvTimeout(RecvTimeoutError::Disconnected) => break,
StreamerError::RecvTimeout(RecvTimeoutError::Timeout) => (),
_ => error!("error: {:?}", e),
},
}
stats.report(name);
})
.unwrap();
Self { thread_hdl }
}
fn apply_sender_stakes(batches: &mut [PacketBatch], ip_to_stake: &HashMap<IpAddr, u64>) {
batches
.iter_mut()
.flat_map(|batch| batch.iter_mut())
.for_each(|packet| {
packet.meta_mut().sender_stake = ip_to_stake
.get(&packet.meta().addr)
.copied()
.unwrap_or_default();
});
}
pub fn join(self) -> thread::Result<()> {
self.thread_hdl.join()
}
}

View File

@ -27,7 +27,6 @@ pub mod cost_update_service;
pub mod drop_bank_service; pub mod drop_bank_service;
pub mod duplicate_repair_status; pub mod duplicate_repair_status;
pub mod fetch_stage; pub mod fetch_stage;
pub mod find_packet_sender_stake_stage;
pub mod fork_choice; pub mod fork_choice;
pub mod forward_packet_batches_by_accounts; pub mod forward_packet_batches_by_accounts;
pub mod gen_keys; pub mod gen_keys;

View File

@ -6,9 +6,9 @@
//! if perf-libs are available //! if perf-libs are available
use { use {
crate::{find_packet_sender_stake_stage::FindPacketSenderStakeReceiver, sigverify}, crate::sigverify,
core::time::Duration, core::time::Duration,
crossbeam_channel::{RecvTimeoutError, SendError}, crossbeam_channel::{Receiver, RecvTimeoutError, SendError},
itertools::Itertools, itertools::Itertools,
solana_measure::measure::Measure, solana_measure::measure::Measure,
solana_perf::{ solana_perf::{
@ -236,7 +236,7 @@ impl SigVerifier for DisabledSigVerifier {
impl SigVerifyStage { impl SigVerifyStage {
pub fn new<T: SigVerifier + 'static + Send>( pub fn new<T: SigVerifier + 'static + Send>(
packet_receiver: FindPacketSenderStakeReceiver, packet_receiver: Receiver<PacketBatch>,
verifier: T, verifier: T,
name: &'static str, name: &'static str,
) -> Self { ) -> Self {
@ -292,11 +292,11 @@ impl SigVerifyStage {
fn verifier<const K: usize, T: SigVerifier>( fn verifier<const K: usize, T: SigVerifier>(
deduper: &Deduper<K, [u8]>, deduper: &Deduper<K, [u8]>,
recvr: &FindPacketSenderStakeReceiver, recvr: &Receiver<PacketBatch>,
verifier: &mut T, verifier: &mut T,
stats: &mut SigVerifierStats, stats: &mut SigVerifierStats,
) -> Result<(), T::SendType> { ) -> Result<(), T::SendType> {
let (mut batches, num_packets, recv_duration) = streamer::recv_vec_packet_batches(recvr)?; let (mut batches, num_packets, recv_duration) = streamer::recv_packet_batches(recvr)?;
let batches_len = batches.len(); let batches_len = batches.len();
debug!( debug!(
@ -405,7 +405,7 @@ impl SigVerifyStage {
} }
fn verifier_service<T: SigVerifier + 'static + Send>( fn verifier_service<T: SigVerifier + 'static + Send>(
packet_receiver: FindPacketSenderStakeReceiver, packet_receiver: Receiver<PacketBatch>,
mut verifier: T, mut verifier: T,
name: &'static str, name: &'static str,
) -> JoinHandle<()> { ) -> JoinHandle<()> {
@ -450,7 +450,7 @@ impl SigVerifyStage {
} }
fn verifier_services<T: SigVerifier + 'static + Send>( fn verifier_services<T: SigVerifier + 'static + Send>(
packet_receiver: FindPacketSenderStakeReceiver, packet_receiver: Receiver<PacketBatch>,
verifier: T, verifier: T,
name: &'static str, name: &'static str,
) -> JoinHandle<()> { ) -> JoinHandle<()> {
@ -574,7 +574,7 @@ mod tests {
.iter_mut() .iter_mut()
.for_each(|packet| packet.meta_mut().flags |= PacketFlags::TRACER_PACKET); .for_each(|packet| packet.meta_mut().flags |= PacketFlags::TRACER_PACKET);
assert_eq!(batch.len(), packets_per_batch); assert_eq!(batch.len(), packets_per_batch);
packet_s.send(vec![batch]).unwrap(); packet_s.send(batch).unwrap();
} }
let mut received = 0; let mut received = 0;
let mut total_tracer_packets_received_in_sigverify_stage = 0; let mut total_tracer_packets_received_in_sigverify_stage = 0;

View File

@ -12,7 +12,6 @@ use {
GossipVerifiedVoteHashSender, VerifiedVoteSender, VoteTracker, GossipVerifiedVoteHashSender, VerifiedVoteSender, VoteTracker,
}, },
fetch_stage::FetchStage, fetch_stage::FetchStage,
find_packet_sender_stake_stage::FindPacketSenderStakeStage,
sigverify::TransactionSigVerifier, sigverify::TransactionSigVerifier,
sigverify_stage::SigVerifyStage, sigverify_stage::SigVerifyStage,
staked_nodes_updater_service::StakedNodesUpdaterService, staked_nodes_updater_service::StakedNodesUpdaterService,
@ -67,8 +66,6 @@ pub struct Tpu {
broadcast_stage: BroadcastStage, broadcast_stage: BroadcastStage,
tpu_quic_t: thread::JoinHandle<()>, tpu_quic_t: thread::JoinHandle<()>,
tpu_forwards_quic_t: thread::JoinHandle<()>, tpu_forwards_quic_t: thread::JoinHandle<()>,
find_packet_sender_stake_stage: FindPacketSenderStakeStage,
vote_find_packet_sender_stake_stage: FindPacketSenderStakeStage,
staked_nodes_updater_service: StakedNodesUpdaterService, staked_nodes_updater_service: StakedNodesUpdaterService,
tracer_thread_hdl: TracerThread, tracer_thread_hdl: TracerThread,
} }
@ -141,25 +138,6 @@ impl Tpu {
shared_staked_nodes_overrides, shared_staked_nodes_overrides,
); );
let (find_packet_sender_stake_sender, find_packet_sender_stake_receiver) = unbounded();
let find_packet_sender_stake_stage = FindPacketSenderStakeStage::new(
packet_receiver,
find_packet_sender_stake_sender,
staked_nodes.clone(),
"Tpu",
);
let (vote_find_packet_sender_stake_sender, vote_find_packet_sender_stake_receiver) =
unbounded();
let vote_find_packet_sender_stake_stage = FindPacketSenderStakeStage::new(
vote_packet_receiver,
vote_find_packet_sender_stake_sender,
staked_nodes.clone(),
"Vote",
);
let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote(); let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote();
let stats = Arc::new(StreamStats::default()); let stats = Arc::new(StreamStats::default());
@ -205,18 +183,14 @@ impl Tpu {
let sigverify_stage = { let sigverify_stage = {
let verifier = TransactionSigVerifier::new(non_vote_sender); let verifier = TransactionSigVerifier::new(non_vote_sender);
SigVerifyStage::new(find_packet_sender_stake_receiver, verifier, "tpu-verifier") SigVerifyStage::new(packet_receiver, verifier, "tpu-verifier")
}; };
let (tpu_vote_sender, tpu_vote_receiver) = banking_tracer.create_channel_tpu_vote(); let (tpu_vote_sender, tpu_vote_receiver) = banking_tracer.create_channel_tpu_vote();
let vote_sigverify_stage = { let vote_sigverify_stage = {
let verifier = TransactionSigVerifier::new_reject_non_vote(tpu_vote_sender); let verifier = TransactionSigVerifier::new_reject_non_vote(tpu_vote_sender);
SigVerifyStage::new( SigVerifyStage::new(vote_packet_receiver, verifier, "tpu-vote-verifier")
vote_find_packet_sender_stake_receiver,
verifier,
"tpu-vote-verifier",
)
}; };
let (gossip_vote_sender, gossip_vote_receiver) = let (gossip_vote_sender, gossip_vote_receiver) =
@ -271,8 +245,6 @@ impl Tpu {
broadcast_stage, broadcast_stage,
tpu_quic_t, tpu_quic_t,
tpu_forwards_quic_t, tpu_forwards_quic_t,
find_packet_sender_stake_stage,
vote_find_packet_sender_stake_stage,
staked_nodes_updater_service, staked_nodes_updater_service,
tracer_thread_hdl, tracer_thread_hdl,
} }
@ -285,8 +257,6 @@ impl Tpu {
self.vote_sigverify_stage.join(), self.vote_sigverify_stage.join(),
self.cluster_info_vote_listener.join(), self.cluster_info_vote_listener.join(),
self.banking_stage.join(), self.banking_stage.join(),
self.find_packet_sender_stake_stage.join(),
self.vote_find_packet_sender_stake_stage.join(),
self.staked_nodes_updater_service.join(), self.staked_nodes_updater_service.join(),
self.tpu_quic_t.join(), self.tpu_quic_t.join(),
self.tpu_forwards_quic_t.join(), self.tpu_forwards_quic_t.join(),

View File

@ -39,7 +39,6 @@ pub struct Meta {
pub addr: IpAddr, pub addr: IpAddr,
pub port: u16, pub port: u16,
pub flags: PacketFlags, pub flags: PacketFlags,
pub sender_stake: u64,
} }
// serde_as is used as a work around because array isn't supported by serde // serde_as is used as a work around because array isn't supported by serde
@ -243,7 +242,6 @@ impl Default for Meta {
addr: IpAddr::V4(Ipv4Addr::UNSPECIFIED), addr: IpAddr::V4(Ipv4Addr::UNSPECIFIED),
port: 0, port: 0,
flags: PacketFlags::empty(), flags: PacketFlags::empty(),
sender_stake: 0,
} }
} }
} }

View File

@ -351,7 +351,6 @@ fn handle_and_cache_new_connection(
connection_table, connection_table,
stream_exit, stream_exit,
params.stats.clone(), params.stats.clone(),
params.stake,
peer_type, peer_type,
wait_for_chunk_timeout, wait_for_chunk_timeout,
)); ));
@ -689,7 +688,6 @@ async fn handle_connection(
connection_table: Arc<Mutex<ConnectionTable>>, connection_table: Arc<Mutex<ConnectionTable>>,
stream_exit: Arc<AtomicBool>, stream_exit: Arc<AtomicBool>,
stats: Arc<StreamStats>, stats: Arc<StreamStats>,
stake: u64,
peer_type: ConnectionPeerType, peer_type: ConnectionPeerType,
wait_for_chunk_timeout: Duration, wait_for_chunk_timeout: Duration,
) { ) {
@ -736,7 +734,6 @@ async fn handle_connection(
&remote_addr, &remote_addr,
&packet_sender, &packet_sender,
stats.clone(), stats.clone(),
stake,
peer_type, peer_type,
) )
.await .await
@ -788,7 +785,6 @@ async fn handle_chunk(
remote_addr: &SocketAddr, remote_addr: &SocketAddr,
packet_sender: &AsyncSender<PacketAccumulator>, packet_sender: &AsyncSender<PacketAccumulator>,
stats: Arc<StreamStats>, stats: Arc<StreamStats>,
stake: u64,
peer_type: ConnectionPeerType, peer_type: ConnectionPeerType,
) -> bool { ) -> bool {
match chunk { match chunk {
@ -817,7 +813,6 @@ async fn handle_chunk(
if packet_accum.is_none() { if packet_accum.is_none() {
let mut meta = Meta::default(); let mut meta = Meta::default();
meta.set_socket_addr(remote_addr); meta.set_socket_addr(remote_addr);
meta.sender_stake = stake;
*packet_accum = Some(PacketAccumulator { *packet_accum = Some(PacketAccumulator {
meta, meta,
chunks: Vec::new(), chunks: Vec::new(),

View File

@ -313,34 +313,6 @@ fn recv_send(
Ok(()) Ok(())
} }
pub fn recv_vec_packet_batches(
recvr: &Receiver<Vec<PacketBatch>>,
) -> Result<(Vec<PacketBatch>, usize, Duration)> {
let timer = Duration::new(1, 0);
let mut packet_batches = recvr.recv_timeout(timer)?;
let recv_start = Instant::now();
trace!("got packets");
let mut num_packets = packet_batches
.iter()
.map(|packets| packets.len())
.sum::<usize>();
while let Ok(packet_batch) = recvr.try_recv() {
trace!("got more packets");
num_packets += packet_batch
.iter()
.map(|packets| packets.len())
.sum::<usize>();
packet_batches.extend(packet_batch);
}
let recv_duration = recv_start.elapsed();
trace!(
"packet batches len: {}, num packets: {}",
packet_batches.len(),
num_packets
);
Ok((packet_batches, num_packets, recv_duration))
}
pub fn recv_packet_batches( pub fn recv_packet_batches(
recvr: &PacketBatchReceiver, recvr: &PacketBatchReceiver,
) -> Result<(Vec<PacketBatch>, usize, Duration)> { ) -> Result<(Vec<PacketBatch>, usize, Duration)> {