sdk: Make Packet::meta private, use accessor functions (#29092)

sdk: Make packet meta private
This commit is contained in:
Jon Cinque 2022-12-06 12:54:49 +01:00 committed by GitHub
parent c106c7e349
commit b1340d77a2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
39 changed files with 270 additions and 249 deletions

View File

@ -25,8 +25,8 @@ fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
let mut packet_batch = PacketBatch::with_capacity(batch_size); let mut packet_batch = PacketBatch::with_capacity(batch_size);
packet_batch.resize(batch_size, Packet::default()); packet_batch.resize(batch_size, Packet::default());
for w in packet_batch.iter_mut() { for w in packet_batch.iter_mut() {
w.meta.size = PACKET_DATA_SIZE; w.meta_mut().size = PACKET_DATA_SIZE;
w.meta.set_socket_addr(addr); w.meta_mut().set_socket_addr(addr);
} }
let packet_batch = Arc::new(packet_batch); let packet_batch = Arc::new(packet_batch);
spawn(move || loop { spawn(move || loop {
@ -35,8 +35,8 @@ fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
} }
let mut num = 0; let mut num = 0;
for p in packet_batch.iter() { for p in packet_batch.iter() {
let a = p.meta.socket_addr(); let a = p.meta().socket_addr();
assert!(p.meta.size <= PACKET_DATA_SIZE); assert!(p.meta().size <= PACKET_DATA_SIZE);
let data = p.data(..).unwrap_or_default(); let data = p.data(..).unwrap_or_default();
send.send_to(data, a).unwrap(); send.send_to(data, a).unwrap();
num += 1; num += 1;

View File

@ -255,7 +255,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
let mut packet_batches = to_packet_batches(&vote_txs, PACKETS_PER_BATCH); let mut packet_batches = to_packet_batches(&vote_txs, PACKETS_PER_BATCH);
for batch in packet_batches.iter_mut() { for batch in packet_batches.iter_mut() {
for packet in batch.iter_mut() { for packet in batch.iter_mut() {
packet.meta.set_simple_vote(true); packet.meta_mut().set_simple_vote(true);
} }
} }
packet_batches packet_batches

View File

@ -52,7 +52,7 @@ fn run_bench_packet_discard(num_ips: usize, bencher: &mut Bencher) {
total += batch.len(); total += batch.len();
for p in batch.iter_mut() { for p in batch.iter_mut() {
let ip_index = thread_rng().gen_range(0, ips.len()); let ip_index = thread_rng().gen_range(0, ips.len());
p.meta.addr = ips[ip_index]; p.meta_mut().addr = ips[ip_index];
} }
} }
info!("total packets: {}", total); info!("total packets: {}", total);
@ -62,10 +62,10 @@ fn run_bench_packet_discard(num_ips: usize, bencher: &mut Bencher) {
let mut num_packets = 0; let mut num_packets = 0;
for batch in batches.iter_mut() { for batch in batches.iter_mut() {
for p in batch.iter_mut() { for p in batch.iter_mut() {
if !p.meta.discard() { if !p.meta().discard() {
num_packets += 1; num_packets += 1;
} }
p.meta.set_discard(false); p.meta_mut().set_discard(false);
} }
} }
assert_eq!(num_packets, 10_000); assert_eq!(num_packets, 10_000);
@ -97,7 +97,7 @@ fn bench_packet_discard_mixed_senders(bencher: &mut Bencher) {
for batch in batches.iter_mut() { for batch in batches.iter_mut() {
for packet in batch.iter_mut() { for packet in batch.iter_mut() {
// One spam address, ~1000 unique addresses. // One spam address, ~1000 unique addresses.
packet.meta.addr = if rng.gen_ratio(1, 30) { packet.meta_mut().addr = if rng.gen_ratio(1, 30) {
new_rand_addr(&mut rng) new_rand_addr(&mut rng)
} else { } else {
spam_addr spam_addr
@ -109,10 +109,10 @@ fn bench_packet_discard_mixed_senders(bencher: &mut Bencher) {
let mut num_packets = 0; let mut num_packets = 0;
for batch in batches.iter_mut() { for batch in batches.iter_mut() {
for packet in batch.iter_mut() { for packet in batch.iter_mut() {
if !packet.meta.discard() { if !packet.meta().discard() {
num_packets += 1; num_packets += 1;
} }
packet.meta.set_discard(false); packet.meta_mut().set_discard(false);
} }
} }
assert_eq!(num_packets, 10_000); assert_eq!(num_packets, 10_000);
@ -215,7 +215,7 @@ fn prepare_batches(discard_factor: i32) -> (Vec<PacketBatch>, usize) {
batch.iter_mut().for_each(|p| { batch.iter_mut().for_each(|p| {
let throw = die.sample(&mut rng); let throw = die.sample(&mut rng);
if throw < discard_factor { if throw < discard_factor {
p.meta.set_discard(true); p.meta_mut().set_discard(true);
c += 1; c += 1;
} }
}) })

View File

@ -38,7 +38,7 @@ fn build_packet_batch(
recent_blockhash.unwrap_or_else(Hash::new_unique), recent_blockhash.unwrap_or_else(Hash::new_unique),
); );
let mut packet = Packet::from_data(None, tx).unwrap(); let mut packet = Packet::from_data(None, tx).unwrap();
packet.meta.sender_stake = sender_stake as u64; packet.meta_mut().sender_stake = sender_stake as u64;
packet packet
}) })
.collect(), .collect(),
@ -66,7 +66,7 @@ fn build_randomized_packet_batch(
); );
let mut packet = Packet::from_data(None, tx).unwrap(); let mut packet = Packet::from_data(None, tx).unwrap();
let sender_stake = distribution.sample(&mut rng); let sender_stake = distribution.sample(&mut rng);
packet.meta.sender_stake = sender_stake as u64; packet.meta_mut().sender_stake = sender_stake as u64;
packet packet
}) })
.collect(), .collect(),
@ -120,8 +120,8 @@ fn bench_packet_clone(bencher: &mut Bencher) {
let mut timer = Measure::start("insert_batch"); let mut timer = Measure::start("insert_batch");
packet_batch.iter().for_each(|packet| { packet_batch.iter().for_each(|packet| {
let mut packet = packet.clone(); let mut packet = packet.clone();
packet.meta.sender_stake *= 2; packet.meta_mut().sender_stake *= 2;
if packet.meta.sender_stake > 2 { if packet.meta().sender_stake > 2 {
outer_packet = packet; outer_packet = packet;
} }
}); });

View File

@ -343,7 +343,7 @@ impl AncestorHashesService {
keypair: &Keypair, keypair: &Keypair,
ancestor_socket: &UdpSocket, ancestor_socket: &UdpSocket,
) -> Option<(Slot, DuplicateAncestorDecision)> { ) -> Option<(Slot, DuplicateAncestorDecision)> {
let from_addr = packet.meta.socket_addr(); let from_addr = packet.meta().socket_addr();
let packet_data = match packet.data(..) { let packet_data = match packet.data(..) {
Some(data) => data, Some(data) => data,
None => { None => {
@ -1205,7 +1205,9 @@ mod test {
.recv_timeout(Duration::from_millis(10_000)) .recv_timeout(Duration::from_millis(10_000))
.unwrap(); .unwrap();
let packet = &mut response_packet[0]; let packet = &mut response_packet[0];
packet.meta.set_socket_addr(&responder_info.serve_repair); packet
.meta_mut()
.set_socket_addr(&responder_info.serve_repair);
let decision = AncestorHashesService::verify_and_process_ancestor_response( let decision = AncestorHashesService::verify_and_process_ancestor_response(
packet, packet,
&ancestor_hashes_request_statuses, &ancestor_hashes_request_statuses,
@ -1465,7 +1467,7 @@ mod test {
// Create invalid packet with fewer bytes than the size of the nonce // Create invalid packet with fewer bytes than the size of the nonce
let mut packet = Packet::default(); let mut packet = Packet::default();
packet.meta.size = 0; packet.meta_mut().size = 0;
assert!(AncestorHashesService::verify_and_process_ancestor_response( assert!(AncestorHashesService::verify_and_process_ancestor_response(
&packet, &packet,
@ -1573,7 +1575,9 @@ mod test {
.recv_timeout(Duration::from_millis(10_000)) .recv_timeout(Duration::from_millis(10_000))
.unwrap(); .unwrap();
let packet = &mut response_packet[0]; let packet = &mut response_packet[0];
packet.meta.set_socket_addr(&responder_info.serve_repair); packet
.meta_mut()
.set_socket_addr(&responder_info.serve_repair);
let decision = AncestorHashesService::verify_and_process_ancestor_response( let decision = AncestorHashesService::verify_and_process_ancestor_response(
packet, packet,
&ancestor_hashes_request_statuses, &ancestor_hashes_request_statuses,

View File

@ -556,7 +556,7 @@ impl BankingStage {
let packet_vec: Vec<_> = forwardable_packets let packet_vec: Vec<_> = forwardable_packets
.filter_map(|p| { .filter_map(|p| {
if !p.meta.forwarded() && data_budget.take(p.meta.size) { if !p.meta().forwarded() && data_budget.take(p.meta().size) {
Some(p.data(..)?.to_vec()) Some(p.data(..)?.to_vec())
} else { } else {
None None
@ -2122,7 +2122,7 @@ mod tests {
with_vers.iter_mut().for_each(|(b, v)| { with_vers.iter_mut().for_each(|(b, v)| {
b.iter_mut() b.iter_mut()
.zip(v) .zip(v)
.for_each(|(p, f)| p.meta.set_discard(*f == 0)) .for_each(|(p, f)| p.meta_mut().set_discard(*f == 0))
}); });
with_vers.into_iter().map(|(b, _)| b).collect() with_vers.into_iter().map(|(b, _)| b).collect()
} }
@ -3925,7 +3925,7 @@ mod tests {
let forwarded_packet = { let forwarded_packet = {
let transaction = system_transaction::transfer(&keypair, &pubkey, 1, fwd_block_hash); let transaction = system_transaction::transfer(&keypair, &pubkey, 1, fwd_block_hash);
let mut packet = Packet::from_data(None, transaction).unwrap(); let mut packet = Packet::from_data(None, transaction).unwrap();
packet.meta.flags |= PacketFlags::FORWARDED; packet.meta_mut().flags |= PacketFlags::FORWARDED;
DeserializedPacket::new(packet).unwrap() DeserializedPacket::new(packet).unwrap()
}; };
@ -4005,7 +4005,7 @@ mod tests {
let num_received = recv_mmsg(recv_socket, &mut packets[..]).unwrap_or_default(); let num_received = recv_mmsg(recv_socket, &mut packets[..]).unwrap_or_default();
assert_eq!(num_received, expected_ids.len(), "{}", name); assert_eq!(num_received, expected_ids.len(), "{}", name);
for (i, expected_id) in expected_ids.iter().enumerate() { for (i, expected_id) in expected_ids.iter().enumerate() {
assert_eq!(packets[i].meta.size, 215); assert_eq!(packets[i].meta().size, 215);
let recv_transaction: VersionedTransaction = let recv_transaction: VersionedTransaction =
packets[i].deserialize_slice(..).unwrap(); packets[i].deserialize_slice(..).unwrap();
assert_eq!( assert_eq!(

View File

@ -347,7 +347,7 @@ impl ClusterInfoVoteListener {
.filter(|(_, packet_batch)| { .filter(|(_, packet_batch)| {
// to_packet_batches() above splits into 1 packet long batches // to_packet_batches() above splits into 1 packet long batches
assert_eq!(packet_batch.len(), 1); assert_eq!(packet_batch.len(), 1);
!packet_batch[0].meta.discard() !packet_batch[0].meta().discard()
}) })
.filter_map(|(tx, packet_batch)| { .filter_map(|(tx, packet_batch)| {
let (vote_account_key, vote, ..) = vote_parser::parse_vote_transaction(&tx)?; let (vote_account_key, vote, ..) = vote_parser::parse_vote_transaction(&tx)?;

View File

@ -102,7 +102,7 @@ impl FetchStage {
poh_recorder: &Arc<RwLock<PohRecorder>>, poh_recorder: &Arc<RwLock<PohRecorder>>,
) -> Result<()> { ) -> Result<()> {
let mark_forwarded = |packet: &mut Packet| { let mark_forwarded = |packet: &mut Packet| {
packet.meta.flags |= PacketFlags::FORWARDED; packet.meta_mut().flags |= PacketFlags::FORWARDED;
}; };
let mut packet_batch = recvr.recv()?; let mut packet_batch = recvr.recv()?;

View File

@ -150,8 +150,8 @@ impl FindPacketSenderStakeStage {
.iter_mut() .iter_mut()
.flat_map(|batch| batch.iter_mut()) .flat_map(|batch| batch.iter_mut())
.for_each(|packet| { .for_each(|packet| {
packet.meta.sender_stake = ip_to_stake packet.meta_mut().sender_stake = ip_to_stake
.get(&packet.meta.addr) .get(&packet.meta().addr)
.copied() .copied()
.unwrap_or_default(); .unwrap_or_default();
}); });

View File

@ -54,7 +54,7 @@ impl ImmutableDeserializedPacket {
let sanitized_transaction = SanitizedVersionedTransaction::try_from(versioned_transaction)?; let sanitized_transaction = SanitizedVersionedTransaction::try_from(versioned_transaction)?;
let message_bytes = packet_message(&packet)?; let message_bytes = packet_message(&packet)?;
let message_hash = Message::hash_raw_message(message_bytes); let message_hash = Message::hash_raw_message(message_bytes);
let is_simple_vote = packet.meta.is_simple_vote_tx(); let is_simple_vote = packet.meta().is_simple_vote_tx();
// drop transaction if prioritization fails. // drop transaction if prioritization fails.
let mut priority_details = priority_details let mut priority_details = priority_details

View File

@ -34,7 +34,7 @@ pub struct LatestValidatorVotePacket {
impl LatestValidatorVotePacket { impl LatestValidatorVotePacket {
pub fn new(packet: Packet, vote_source: VoteSource) -> Result<Self, DeserializedPacketError> { pub fn new(packet: Packet, vote_source: VoteSource) -> Result<Self, DeserializedPacketError> {
if !packet.meta.is_simple_vote_tx() { if !packet.meta().is_simple_vote_tx() {
return Err(DeserializedPacketError::VoteTransactionError); return Err(DeserializedPacketError::VoteTransactionError);
} }
@ -347,7 +347,10 @@ mod tests {
None, None,
); );
let mut packet = Packet::from_data(None, vote_tx).unwrap(); let mut packet = Packet::from_data(None, vote_tx).unwrap();
packet.meta.flags.set(PacketFlags::SIMPLE_VOTE_TX, true); packet
.meta_mut()
.flags
.set(PacketFlags::SIMPLE_VOTE_TX, true);
LatestValidatorVotePacket::new(packet, vote_source).unwrap() LatestValidatorVotePacket::new(packet, vote_source).unwrap()
} }
@ -380,7 +383,7 @@ mod tests {
), ),
) )
.unwrap(); .unwrap();
vote.meta.flags.set(PacketFlags::SIMPLE_VOTE_TX, true); vote.meta_mut().flags.set(PacketFlags::SIMPLE_VOTE_TX, true);
let mut vote_switch = Packet::from_data( let mut vote_switch = Packet::from_data(
None, None,
new_vote_transaction( new_vote_transaction(
@ -395,7 +398,7 @@ mod tests {
) )
.unwrap(); .unwrap();
vote_switch vote_switch
.meta .meta_mut()
.flags .flags
.set(PacketFlags::SIMPLE_VOTE_TX, true); .set(PacketFlags::SIMPLE_VOTE_TX, true);
let mut vote_state_update = Packet::from_data( let mut vote_state_update = Packet::from_data(
@ -411,7 +414,7 @@ mod tests {
) )
.unwrap(); .unwrap();
vote_state_update vote_state_update
.meta .meta_mut()
.flags .flags
.set(PacketFlags::SIMPLE_VOTE_TX, true); .set(PacketFlags::SIMPLE_VOTE_TX, true);
let mut vote_state_update_switch = Packet::from_data( let mut vote_state_update_switch = Packet::from_data(
@ -427,7 +430,7 @@ mod tests {
) )
.unwrap(); .unwrap();
vote_state_update_switch vote_state_update_switch
.meta .meta_mut()
.flags .flags
.set(PacketFlags::SIMPLE_VOTE_TX, true); .set(PacketFlags::SIMPLE_VOTE_TX, true);
let random_transaction = Packet::from_data( let random_transaction = Packet::from_data(

View File

@ -122,7 +122,7 @@ impl PacketDeserializer {
packet_batch packet_batch
.iter() .iter()
.enumerate() .enumerate()
.filter(|(_, pkt)| !pkt.meta.discard()) .filter(|(_, pkt)| !pkt.meta().discard())
.map(|(index, _)| index) .map(|(index, _)| index)
.collect() .collect()
} }
@ -179,7 +179,7 @@ mod tests {
let transactions = vec![random_transfer(), random_transfer()]; let transactions = vec![random_transfer(), random_transfer()];
let mut packet_batches = to_packet_batches(&transactions, 1); let mut packet_batches = to_packet_batches(&transactions, 1);
assert_eq!(packet_batches.len(), 2); assert_eq!(packet_batches.len(), 2);
packet_batches[0][0].meta.set_discard(true); packet_batches[0][0].meta_mut().set_discard(true);
let results = PacketDeserializer::deserialize_and_collect_packets(&packet_batches, None); let results = PacketDeserializer::deserialize_and_collect_packets(&packet_batches, None);
assert_eq!(results.deserialized_packets.len(), 1); assert_eq!(results.deserialized_packets.len(), 1);

View File

@ -32,8 +32,8 @@ pub fn repair_response_packet_from_bytes(
if size > packet.buffer_mut().len() { if size > packet.buffer_mut().len() {
return None; return None;
} }
packet.meta.size = size; packet.meta_mut().size = size;
packet.meta.set_socket_addr(dest); packet.meta_mut().set_socket_addr(dest);
packet.buffer_mut()[..bytes.len()].copy_from_slice(&bytes); packet.buffer_mut()[..bytes.len()].copy_from_slice(&bytes);
let mut wr = io::Cursor::new(&mut packet.buffer_mut()[bytes.len()..]); let mut wr = io::Cursor::new(&mut packet.buffer_mut()[bytes.len()..]);
bincode::serialize_into(&mut wr, &nonce).expect("Buffer not large enough to fit nonce"); bincode::serialize_into(&mut wr, &nonce).expect("Buffer not large enough to fit nonce");
@ -90,7 +90,7 @@ mod test {
nonce, nonce,
) )
.unwrap(); .unwrap();
packet.meta.flags |= PacketFlags::REPAIR; packet.meta_mut().flags |= PacketFlags::REPAIR;
let leader_slots = [(slot, keypair.pubkey().to_bytes())] let leader_slots = [(slot, keypair.pubkey().to_bytes())]
.iter() .iter()

View File

@ -485,7 +485,7 @@ impl ServeRepair {
} }
}; };
let from_addr = packet.meta.socket_addr(); let from_addr = packet.meta().socket_addr();
if !ContactInfo::is_valid_address(&from_addr, &socket_addr_space) { if !ContactInfo::is_valid_address(&from_addr, &socket_addr_space) {
stats.err_malformed += 1; stats.err_malformed += 1;
continue; continue;
@ -807,7 +807,7 @@ impl ServeRepair {
Some(rsp) => rsp, Some(rsp) => rsp,
}; };
let num_response_packets = rsp.len(); let num_response_packets = rsp.len();
let num_response_bytes = rsp.iter().map(|p| p.meta.size).sum(); let num_response_bytes = rsp.iter().map(|p| p.meta().size).sum();
if data_budget.take(num_response_bytes) && response_sender.send(rsp).is_ok() { if data_budget.take(num_response_bytes) && response_sender.send(rsp).is_ok() {
stats.total_response_packets += num_response_packets; stats.total_response_packets += num_response_packets;
match stake > 0 { match stake > 0 {
@ -984,7 +984,7 @@ impl ServeRepair {
) { ) {
let mut pending_pongs = Vec::default(); let mut pending_pongs = Vec::default();
for packet in packet_batch.iter_mut() { for packet in packet_batch.iter_mut() {
if packet.meta.size != REPAIR_RESPONSE_SERIALIZED_PING_BYTES { if packet.meta().size != REPAIR_RESPONSE_SERIALIZED_PING_BYTES {
continue; continue;
} }
if let Ok(RepairResponse::Ping(ping)) = packet.deserialize_slice(..) { if let Ok(RepairResponse::Ping(ping)) = packet.deserialize_slice(..) {
@ -998,12 +998,12 @@ impl ServeRepair {
stats.ping_err_verify_count += 1; stats.ping_err_verify_count += 1;
continue; continue;
} }
packet.meta.set_discard(true); packet.meta_mut().set_discard(true);
stats.ping_count += 1; stats.ping_count += 1;
if let Ok(pong) = Pong::new(&ping, keypair) { if let Ok(pong) = Pong::new(&ping, keypair) {
let pong = RepairProtocol::Pong(pong); let pong = RepairProtocol::Pong(pong);
if let Ok(pong_bytes) = serialize(&pong) { if let Ok(pong_bytes) = serialize(&pong) {
let from_addr = packet.meta.socket_addr(); let from_addr = packet.meta().socket_addr();
pending_pongs.push((pong_bytes, from_addr)); pending_pongs.push((pong_bytes, from_addr));
} }
} }
@ -1210,7 +1210,7 @@ mod tests {
let ping = Ping::new_rand(&mut rng, &keypair).unwrap(); let ping = Ping::new_rand(&mut rng, &keypair).unwrap();
let ping = RepairResponse::Ping(ping); let ping = RepairResponse::Ping(ping);
let pkt = Packet::from_data(None, ping).unwrap(); let pkt = Packet::from_data(None, ping).unwrap();
assert_eq!(pkt.meta.size, REPAIR_RESPONSE_SERIALIZED_PING_BYTES); assert_eq!(pkt.meta().size, REPAIR_RESPONSE_SERIALIZED_PING_BYTES);
} }
#[test] #[test]
@ -1230,7 +1230,7 @@ mod tests {
shred.sign(&keypair); shred.sign(&keypair);
let mut pkt = Packet::default(); let mut pkt = Packet::default();
shred.copy_to_packet(&mut pkt); shred.copy_to_packet(&mut pkt);
pkt.meta.size = REPAIR_RESPONSE_SERIALIZED_PING_BYTES; pkt.meta_mut().size = REPAIR_RESPONSE_SERIALIZED_PING_BYTES;
let res = pkt.deserialize_slice::<RepairResponse, _>(..); let res = pkt.deserialize_slice::<RepairResponse, _>(..);
if let Ok(RepairResponse::Ping(ping)) = res { if let Ok(RepairResponse::Ping(ping)) = res {
assert!(!ping.verify()); assert!(!ping.verify());
@ -1870,7 +1870,7 @@ mod tests {
fn test_run_ancestor_hashes() { fn test_run_ancestor_hashes() {
fn deserialize_ancestor_hashes_response(packet: &Packet) -> AncestorHashesResponse { fn deserialize_ancestor_hashes_response(packet: &Packet) -> AncestorHashesResponse {
packet packet
.deserialize_slice(..packet.meta.size - SIZE_OF_NONCE) .deserialize_slice(..packet.meta().size - SIZE_OF_NONCE)
.unwrap() .unwrap()
} }

View File

@ -95,9 +95,9 @@ impl ShredFetchStage {
&mut shreds_received, &mut shreds_received,
&mut stats, &mut stats,
) { ) {
packet.meta.set_discard(true); packet.meta_mut().set_discard(true);
} else { } else {
packet.meta.flags.insert(flags); packet.meta_mut().flags.insert(flags);
} }
} }
stats.maybe_submit(name, STATS_SUBMIT_CADENCE); stats.maybe_submit(name, STATS_SUBMIT_CADENCE);

View File

@ -94,7 +94,7 @@ impl SigVerifier for TransactionSigVerifier {
is_dup: bool, is_dup: bool,
) { ) {
sigverify::check_for_tracer_packet(packet); sigverify::check_for_tracer_packet(packet);
if packet.meta.is_tracer_packet() { if packet.meta().is_tracer_packet() {
if removed_before_sigverify_stage { if removed_before_sigverify_stage {
self.tracer_packet_stats self.tracer_packet_stats
.total_removed_before_sigverify_stage += 1; .total_removed_before_sigverify_stage += 1;
@ -110,14 +110,14 @@ impl SigVerifier for TransactionSigVerifier {
#[inline(always)] #[inline(always)]
fn process_excess_packet(&mut self, packet: &Packet) { fn process_excess_packet(&mut self, packet: &Packet) {
if packet.meta.is_tracer_packet() { if packet.meta().is_tracer_packet() {
self.tracer_packet_stats.total_excess_tracer_packets += 1; self.tracer_packet_stats.total_excess_tracer_packets += 1;
} }
} }
#[inline(always)] #[inline(always)]
fn process_passed_sigverify_packet(&mut self, packet: &Packet) { fn process_passed_sigverify_packet(&mut self, packet: &Packet) {
if packet.meta.is_tracer_packet() { if packet.meta().is_tracer_packet() {
self.tracer_packet_stats self.tracer_packet_stats
.total_tracker_packets_passed_sigverify += 1; .total_tracker_packets_passed_sigverify += 1;
} }

View File

@ -92,7 +92,7 @@ fn run_shred_sigverify(
let shreds: Vec<_> = packets let shreds: Vec<_> = packets
.iter() .iter()
.flat_map(PacketBatch::iter) .flat_map(PacketBatch::iter)
.filter(|packet| !packet.meta.discard() && !packet.meta.repair()) .filter(|packet| !packet.meta().discard() && !packet.meta().repair())
.filter_map(shred::layout::get_shred) .filter_map(shred::layout::get_shred)
.map(<[u8]>::to_vec) .map(<[u8]>::to_vec)
.collect(); .collect();
@ -137,13 +137,13 @@ fn get_slot_leaders(
let mut leaders = HashMap::<Slot, Option<Pubkey>>::new(); let mut leaders = HashMap::<Slot, Option<Pubkey>>::new();
for batch in batches { for batch in batches {
for packet in batch.iter_mut() { for packet in batch.iter_mut() {
if packet.meta.discard() { if packet.meta().discard() {
continue; continue;
} }
let shred = shred::layout::get_shred(packet); let shred = shred::layout::get_shred(packet);
let slot = match shred.and_then(shred::layout::get_slot) { let slot = match shred.and_then(shred::layout::get_slot) {
None => { None => {
packet.meta.set_discard(true); packet.meta_mut().set_discard(true);
continue; continue;
} }
Some(slot) => slot, Some(slot) => slot,
@ -154,7 +154,7 @@ fn get_slot_leaders(
(&leader != self_pubkey).then_some(leader) (&leader != self_pubkey).then_some(leader)
}); });
if leader.is_none() { if leader.is_none() {
packet.meta.set_discard(true); packet.meta_mut().set_discard(true);
} }
} }
} }
@ -165,7 +165,7 @@ fn count_discards(packets: &[PacketBatch]) -> usize {
packets packets
.iter() .iter()
.flat_map(PacketBatch::iter) .flat_map(PacketBatch::iter)
.filter(|packet| packet.meta.discard()) .filter(|packet| packet.meta().discard())
.count() .count()
} }
@ -265,7 +265,7 @@ mod tests {
); );
shred.sign(&leader_keypair); shred.sign(&leader_keypair);
batches[0][0].buffer_mut()[..shred.payload().len()].copy_from_slice(shred.payload()); batches[0][0].buffer_mut()[..shred.payload().len()].copy_from_slice(shred.payload());
batches[0][0].meta.size = shred.payload().len(); batches[0][0].meta_mut().size = shred.payload().len();
let mut shred = Shred::new_from_data( let mut shred = Shred::new_from_data(
0, 0,
@ -280,7 +280,7 @@ mod tests {
let wrong_keypair = Keypair::new(); let wrong_keypair = Keypair::new();
shred.sign(&wrong_keypair); shred.sign(&wrong_keypair);
batches[0][1].buffer_mut()[..shred.payload().len()].copy_from_slice(shred.payload()); batches[0][1].buffer_mut()[..shred.payload().len()].copy_from_slice(shred.payload());
batches[0][1].meta.size = shred.payload().len(); batches[0][1].meta_mut().size = shred.payload().len();
verify_packets( verify_packets(
&Pubkey::new_unique(), // self_pubkey &Pubkey::new_unique(), // self_pubkey
@ -289,7 +289,7 @@ mod tests {
&RecyclerCache::warmed(), &RecyclerCache::warmed(),
&mut batches, &mut batches,
); );
assert!(!batches[0][0].meta.discard()); assert!(!batches[0][0].meta().discard());
assert!(batches[0][1].meta.discard()); assert!(batches[0][1].meta().discard());
} }
} }

View File

@ -253,8 +253,8 @@ impl SigVerifyStage {
.iter_mut() .iter_mut()
.rev() .rev()
.flat_map(|batch| batch.iter_mut().rev()) .flat_map(|batch| batch.iter_mut().rev())
.filter(|packet| !packet.meta.discard()) .filter(|packet| !packet.meta().discard())
.map(|packet| (packet.meta.addr, packet)) .map(|packet| (packet.meta().addr, packet))
.into_group_map(); .into_group_map();
// Allocate max_packets evenly across addresses. // Allocate max_packets evenly across addresses.
while max_packets > 0 && !addrs.is_empty() { while max_packets > 0 && !addrs.is_empty() {
@ -269,7 +269,7 @@ impl SigVerifyStage {
// Discard excess packets from each address. // Discard excess packets from each address.
for packet in addrs.into_values().flatten() { for packet in addrs.into_values().flatten() {
process_excess_packet(packet); process_excess_packet(packet);
packet.meta.set_discard(true); packet.meta_mut().set_discard(true);
} }
} }
@ -473,7 +473,7 @@ mod tests {
packet_batches packet_batches
.iter() .iter()
.flatten() .flatten()
.filter(|p| !p.meta.discard()) .filter(|p| !p.meta().discard())
.count() .count()
} }
@ -483,18 +483,18 @@ mod tests {
let batch_size = 10; let batch_size = 10;
let mut batch = PacketBatch::with_capacity(batch_size); let mut batch = PacketBatch::with_capacity(batch_size);
let mut tracer_packet = Packet::default(); let mut tracer_packet = Packet::default();
tracer_packet.meta.flags |= PacketFlags::TRACER_PACKET; tracer_packet.meta_mut().flags |= PacketFlags::TRACER_PACKET;
batch.resize(batch_size, tracer_packet); batch.resize(batch_size, tracer_packet);
batch[3].meta.addr = std::net::IpAddr::from([1u16; 8]); batch[3].meta_mut().addr = std::net::IpAddr::from([1u16; 8]);
batch[3].meta.set_discard(true); batch[3].meta_mut().set_discard(true);
let num_discarded_before_filter = 1; let num_discarded_before_filter = 1;
batch[4].meta.addr = std::net::IpAddr::from([2u16; 8]); batch[4].meta_mut().addr = std::net::IpAddr::from([2u16; 8]);
let total_num_packets = batch.len(); let total_num_packets = batch.len();
let mut batches = vec![batch]; let mut batches = vec![batch];
let max = 3; let max = 3;
let mut total_tracer_packets_discarded = 0; let mut total_tracer_packets_discarded = 0;
SigVerifyStage::discard_excess_packets(&mut batches, max, |packet| { SigVerifyStage::discard_excess_packets(&mut batches, max, |packet| {
if packet.meta.is_tracer_packet() { if packet.meta().is_tracer_packet() {
total_tracer_packets_discarded += 1; total_tracer_packets_discarded += 1;
} }
}); });
@ -508,9 +508,9 @@ mod tests {
total_discarded - num_discarded_before_filter total_discarded - num_discarded_before_filter
); );
assert_eq!(total_non_discard, max); assert_eq!(total_non_discard, max);
assert!(!batches[0][0].meta.discard()); assert!(!batches[0][0].meta().discard());
assert!(batches[0][3].meta.discard()); assert!(batches[0][3].meta().discard());
assert!(!batches[0][4].meta.discard()); assert!(!batches[0][4].meta().discard());
} }
fn gen_batches( fn gen_batches(
@ -556,7 +556,7 @@ mod tests {
sent_len += batch.len(); sent_len += batch.len();
batch batch
.iter_mut() .iter_mut()
.for_each(|packet| packet.meta.flags |= PacketFlags::TRACER_PACKET); .for_each(|packet| packet.meta_mut().flags |= PacketFlags::TRACER_PACKET);
assert_eq!(batch.len(), packets_per_batch); assert_eq!(batch.len(), packets_per_batch);
packet_s.send(vec![batch]).unwrap(); packet_s.send(vec![batch]).unwrap();
} }
@ -637,7 +637,7 @@ mod tests {
batches.iter_mut().for_each(|batch| { batches.iter_mut().for_each(|batch| {
batch.iter_mut().for_each(|p| { batch.iter_mut().for_each(|p| {
if ((index + 1) as f64 / num_packets as f64) < MAX_DISCARDED_PACKET_RATE { if ((index + 1) as f64 / num_packets as f64) < MAX_DISCARDED_PACKET_RATE {
p.meta.set_discard(true); p.meta_mut().set_discard(true);
} }
index += 1; index += 1;
}) })
@ -647,7 +647,7 @@ mod tests {
assert_eq!(SigVerifyStage::maybe_shrink_batches(&mut batches).1, 0); assert_eq!(SigVerifyStage::maybe_shrink_batches(&mut batches).1, 0);
// discard one more to exceed shrink threshold // discard one more to exceed shrink threshold
batches.last_mut().unwrap()[0].meta.set_discard(true); batches.last_mut().unwrap()[0].meta_mut().set_discard(true);
let expected_num_shrunk_batches = let expected_num_shrunk_batches =
1.max((num_generated_batches as f64 * MAX_DISCARDED_PACKET_RATE) as usize); 1.max((num_generated_batches as f64 * MAX_DISCARDED_PACKET_RATE) as usize);

View File

@ -126,7 +126,7 @@ impl UnprocessedPacketBatches {
if dropped_packet if dropped_packet
.immutable_section() .immutable_section()
.original_packet() .original_packet()
.meta .meta()
.is_tracer_packet() .is_tracer_packet()
{ {
num_dropped_tracer_packets += 1; num_dropped_tracer_packets += 1;
@ -478,7 +478,7 @@ mod tests {
packet_vector.push(Packet::from_data(None, tx).unwrap()); packet_vector.push(Packet::from_data(None, tx).unwrap());
} }
for index in vote_indexes.iter() { for index in vote_indexes.iter() {
packet_vector[*index].meta.flags |= PacketFlags::SIMPLE_VOTE_TX; packet_vector[*index].meta_mut().flags |= PacketFlags::SIMPLE_VOTE_TX;
} }
packet_vector packet_vector

View File

@ -941,7 +941,7 @@ impl ThreadLocalUnprocessedPackets {
.filter_map(|immutable_deserialized_packet| { .filter_map(|immutable_deserialized_packet| {
let is_tracer_packet = immutable_deserialized_packet let is_tracer_packet = immutable_deserialized_packet
.original_packet() .original_packet()
.meta .meta()
.is_tracer_packet(); .is_tracer_packet();
if is_tracer_packet { if is_tracer_packet {
saturating_add_assign!(*total_tracer_packets_in_buffer, 1); saturating_add_assign!(*total_tracer_packets_in_buffer, 1);
@ -1060,8 +1060,8 @@ mod tests {
.enumerate() .enumerate()
.map(|(packets_id, transaction)| { .map(|(packets_id, transaction)| {
let mut p = Packet::from_data(None, transaction).unwrap(); let mut p = Packet::from_data(None, transaction).unwrap();
p.meta.port = packets_id as u16; p.meta_mut().port = packets_id as u16;
p.meta.set_tracer(true); p.meta_mut().set_tracer(true);
DeserializedPacket::new(p).unwrap() DeserializedPacket::new(p).unwrap()
}) })
.collect_vec(); .collect_vec();
@ -1099,7 +1099,7 @@ mod tests {
batch batch
.get_forwardable_packets() .get_forwardable_packets()
.into_iter() .into_iter()
.map(|p| p.meta.port) .map(|p| p.meta().port)
}) })
.collect(); .collect();
forwarded_ports.sort_unstable(); forwarded_ports.sort_unstable();
@ -1196,7 +1196,7 @@ mod tests {
None, None,
), ),
)?; )?;
vote.meta.flags.set(PacketFlags::SIMPLE_VOTE_TX, true); vote.meta_mut().flags.set(PacketFlags::SIMPLE_VOTE_TX, true);
let big_transfer = Packet::from_data( let big_transfer = Packet::from_data(
None, None,
system_transaction::transfer(&keypair, &pubkey, 1000000, Hash::new_unique()), system_transaction::transfer(&keypair, &pubkey, 1000000, Hash::new_unique()),
@ -1269,8 +1269,8 @@ mod tests {
.enumerate() .enumerate()
.map(|(packets_id, transaction)| { .map(|(packets_id, transaction)| {
let mut p = Packet::from_data(None, transaction).unwrap(); let mut p = Packet::from_data(None, transaction).unwrap();
p.meta.port = packets_id as u16; p.meta_mut().port = packets_id as u16;
p.meta.set_tracer(true); p.meta_mut().set_tracer(true);
DeserializedPacket::new(p).unwrap() DeserializedPacket::new(p).unwrap()
}) })
.collect_vec(); .collect_vec();

View File

@ -233,14 +233,14 @@ where
ws_metrics.shred_receiver_elapsed_us += shred_receiver_elapsed.as_us(); ws_metrics.shred_receiver_elapsed_us += shred_receiver_elapsed.as_us();
ws_metrics.run_insert_count += 1; ws_metrics.run_insert_count += 1;
let handle_packet = |packet: &Packet| { let handle_packet = |packet: &Packet| {
if packet.meta.discard() { if packet.meta().discard() {
return None; return None;
} }
let shred = shred::layout::get_shred(packet)?; let shred = shred::layout::get_shred(packet)?;
let shred = Shred::new_from_serialized_shred(shred.to_vec()).ok()?; let shred = Shred::new_from_serialized_shred(shred.to_vec()).ok()?;
if packet.meta.repair() { if packet.meta().repair() {
let repair_info = RepairMeta { let repair_info = RepairMeta {
_from_addr: packet.meta.socket_addr(), _from_addr: packet.meta().socket_addr(),
// If can't parse the nonce, dump the packet. // If can't parse the nonce, dump the packet.
nonce: repair_response::nonce(packet)?, nonce: repair_response::nonce(packet)?,
}; };
@ -261,7 +261,7 @@ where
ws_metrics.num_repairs += repair_infos.iter().filter(|r| r.is_some()).count(); ws_metrics.num_repairs += repair_infos.iter().filter(|r| r.is_some()).count();
ws_metrics.num_shreds_received += shreds.len(); ws_metrics.num_shreds_received += shreds.len();
for packet in packets.iter().flat_map(PacketBatch::iter) { for packet in packets.iter().flat_map(PacketBatch::iter) {
let addr = packet.meta.socket_addr(); let addr = packet.meta().socket_addr();
*ws_metrics.addrs.entry(addr).or_default() += 1; *ws_metrics.addrs.entry(addr).or_default() += 1;
} }

View File

@ -511,7 +511,7 @@ pub fn start_verify_transactions(
.map(|tx| tx.to_versioned_transaction()); .map(|tx| tx.to_versioned_transaction());
let res = packet_batch.par_iter_mut().zip(entry_tx_iter).all(|pair| { let res = packet_batch.par_iter_mut().zip(entry_tx_iter).all(|pair| {
pair.0.meta = Meta::default(); *pair.0.meta_mut() = Meta::default();
Packet::populate_packet(pair.0, None, &pair.1).is_ok() Packet::populate_packet(pair.0, None, &pair.1).is_ok()
}); });
if res { if res {
@ -538,7 +538,7 @@ pub fn start_verify_transactions(
); );
let verified = packet_batches let verified = packet_batches
.iter() .iter()
.all(|batch| batch.iter().all(|p| !p.meta.discard())); .all(|batch| batch.iter().all(|p| !p.meta().discard()));
verify_time.stop(); verify_time.stop();
(verified, verify_time.as_us()) (verified, verify_time.as_us())
}) })

View File

@ -1947,7 +1947,7 @@ impl ClusterInfo {
} }
check check
}; };
// Because pull-responses are sent back to packet.meta.socket_addr() of // Because pull-responses are sent back to packet.meta().socket_addr() of
// incoming pull-requests, pings are also sent to request.from_addr (as // incoming pull-requests, pings are also sent to request.from_addr (as
// opposed to caller.gossip address). // opposed to caller.gossip address).
move |request| { move |request| {
@ -2041,8 +2041,8 @@ impl ClusterInfo {
match Packet::from_data(Some(addr), response) { match Packet::from_data(Some(addr), response) {
Err(err) => error!("failed to write pull-response packet: {:?}", err), Err(err) => error!("failed to write pull-response packet: {:?}", err),
Ok(packet) => { Ok(packet) => {
if self.outbound_budget.take(packet.meta.size) { if self.outbound_budget.take(packet.meta().size) {
total_bytes += packet.meta.size; total_bytes += packet.meta().size;
packet_batch.push(packet); packet_batch.push(packet);
sent += 1; sent += 1;
} else { } else {
@ -2520,7 +2520,7 @@ impl ClusterInfo {
let protocol: Protocol = packet.deserialize_slice(..).ok()?; let protocol: Protocol = packet.deserialize_slice(..).ok()?;
protocol.sanitize().ok()?; protocol.sanitize().ok()?;
let protocol = protocol.par_verify(&self.stats)?; let protocol = protocol.par_verify(&self.stats)?;
Some((packet.meta.socket_addr(), protocol)) Some((packet.meta().socket_addr(), protocol))
}; };
let packets: Vec<_> = { let packets: Vec<_> = {
let _st = ScopedTimer::from(&self.stats.verify_gossip_packets_time); let _st = ScopedTimer::from(&self.stats.verify_gossip_packets_time);
@ -3412,7 +3412,7 @@ RPC Enabled Nodes: 1"#;
remote_nodes.into_iter(), remote_nodes.into_iter(),
pongs.into_iter() pongs.into_iter()
) { ) {
assert_eq!(packet.meta.socket_addr(), socket); assert_eq!(packet.meta().socket_addr(), socket);
let bytes = serialize(&pong).unwrap(); let bytes = serialize(&pong).unwrap();
match packet.deserialize_slice(..).unwrap() { match packet.deserialize_slice(..).unwrap() {
Protocol::PongMessage(pong) => assert_eq!(serialize(&pong).unwrap(), bytes), Protocol::PongMessage(pong) => assert_eq!(serialize(&pong).unwrap(), bytes),

View File

@ -255,7 +255,7 @@ pub fn cluster_info_retransmit() {
} }
assert!(done); assert!(done);
let mut p = Packet::default(); let mut p = Packet::default();
p.meta.size = 10; p.meta_mut().size = 10;
let peers = c1.tvu_peers(); let peers = c1.tvu_peers();
let retransmit_peers: Vec<_> = peers.iter().collect(); let retransmit_peers: Vec<_> = peers.iter().collect();
retransmit_to( retransmit_to(

View File

@ -330,7 +330,7 @@ impl Shred {
let payload = self.payload(); let payload = self.payload();
let size = payload.len(); let size = payload.len();
packet.buffer_mut()[..size].copy_from_slice(&payload[..]); packet.buffer_mut()[..size].copy_from_slice(&payload[..]);
packet.meta.size = size; packet.meta_mut().size = size;
} }
// TODO: Should this sanitize output? // TODO: Should this sanitize output?
@ -542,7 +542,7 @@ pub mod layout {
fn get_shred_size(packet: &Packet) -> Option<usize> { fn get_shred_size(packet: &Packet) -> Option<usize> {
let size = packet.data(..)?.len(); let size = packet.data(..)?.len();
if packet.meta.repair() { if packet.meta().repair() {
size.checked_sub(SIZE_OF_NONCE) size.checked_sub(SIZE_OF_NONCE)
} else { } else {
Some(size) Some(size)
@ -1066,7 +1066,7 @@ mod tests {
)); ));
assert_eq!(stats, ShredFetchStats::default()); assert_eq!(stats, ShredFetchStats::default());
packet.meta.size = OFFSET_OF_SHRED_VARIANT; packet.meta_mut().size = OFFSET_OF_SHRED_VARIANT;
assert!(should_discard_shred( assert!(should_discard_shred(
&packet, &packet,
root, root,
@ -1076,7 +1076,7 @@ mod tests {
)); ));
assert_eq!(stats.index_overrun, 1); assert_eq!(stats.index_overrun, 1);
packet.meta.size = OFFSET_OF_SHRED_INDEX; packet.meta_mut().size = OFFSET_OF_SHRED_INDEX;
assert!(should_discard_shred( assert!(should_discard_shred(
&packet, &packet,
root, root,
@ -1086,7 +1086,7 @@ mod tests {
)); ));
assert_eq!(stats.index_overrun, 2); assert_eq!(stats.index_overrun, 2);
packet.meta.size = OFFSET_OF_SHRED_INDEX + 1; packet.meta_mut().size = OFFSET_OF_SHRED_INDEX + 1;
assert!(should_discard_shred( assert!(should_discard_shred(
&packet, &packet,
root, root,
@ -1096,7 +1096,7 @@ mod tests {
)); ));
assert_eq!(stats.index_overrun, 3); assert_eq!(stats.index_overrun, 3);
packet.meta.size = OFFSET_OF_SHRED_INDEX + SIZE_OF_SHRED_INDEX - 1; packet.meta_mut().size = OFFSET_OF_SHRED_INDEX + SIZE_OF_SHRED_INDEX - 1;
assert!(should_discard_shred( assert!(should_discard_shred(
&packet, &packet,
root, root,
@ -1106,7 +1106,7 @@ mod tests {
)); ));
assert_eq!(stats.index_overrun, 4); assert_eq!(stats.index_overrun, 4);
packet.meta.size = OFFSET_OF_SHRED_INDEX + SIZE_OF_SHRED_INDEX + 2; packet.meta_mut().size = OFFSET_OF_SHRED_INDEX + SIZE_OF_SHRED_INDEX + 2;
assert!(should_discard_shred( assert!(should_discard_shred(
&packet, &packet,
root, root,
@ -1419,7 +1419,7 @@ mod tests {
}); });
let mut packet = Packet::default(); let mut packet = Packet::default();
packet.buffer_mut()[..payload.len()].copy_from_slice(&payload); packet.buffer_mut()[..payload.len()].copy_from_slice(&payload);
packet.meta.size = payload.len(); packet.meta_mut().size = payload.len();
assert_eq!(shred.bytes_to_store(), payload); assert_eq!(shred.bytes_to_store(), payload);
assert_eq!(shred, Shred::new_from_serialized_shred(payload).unwrap()); assert_eq!(shred, Shred::new_from_serialized_shred(payload).unwrap());
verify_shred_layout(&shred, &packet); verify_shred_layout(&shred, &packet);
@ -1452,7 +1452,7 @@ mod tests {
let payload = bs58_decode(PAYLOAD); let payload = bs58_decode(PAYLOAD);
let mut packet = Packet::default(); let mut packet = Packet::default();
packet.buffer_mut()[..payload.len()].copy_from_slice(&payload); packet.buffer_mut()[..payload.len()].copy_from_slice(&payload);
packet.meta.size = payload.len(); packet.meta_mut().size = payload.len();
assert_eq!(shred.bytes_to_store(), payload); assert_eq!(shred.bytes_to_store(), payload);
assert_eq!(shred, Shred::new_from_serialized_shred(payload).unwrap()); assert_eq!(shred, Shred::new_from_serialized_shred(payload).unwrap());
verify_shred_layout(&shred, &packet); verify_shred_layout(&shred, &packet);
@ -1492,7 +1492,7 @@ mod tests {
}); });
let mut packet = Packet::default(); let mut packet = Packet::default();
packet.buffer_mut()[..payload.len()].copy_from_slice(&payload); packet.buffer_mut()[..payload.len()].copy_from_slice(&payload);
packet.meta.size = payload.len(); packet.meta_mut().size = payload.len();
assert_eq!(shred.bytes_to_store(), payload); assert_eq!(shred.bytes_to_store(), payload);
assert_eq!(shred, Shred::new_from_serialized_shred(payload).unwrap()); assert_eq!(shred, Shred::new_from_serialized_shred(payload).unwrap());
verify_shred_layout(&shred, &packet); verify_shred_layout(&shred, &packet);

View File

@ -35,7 +35,7 @@ pub fn verify_shred_cpu(
packet: &Packet, packet: &Packet,
slot_leaders: &HashMap<Slot, /*pubkey:*/ [u8; 32]>, slot_leaders: &HashMap<Slot, /*pubkey:*/ [u8; 32]>,
) -> bool { ) -> bool {
if packet.meta.discard() { if packet.meta().discard() {
return false; return false;
} }
let shred = match shred::layout::get_shred(packet) { let shred = match shred::layout::get_shred(packet) {
@ -101,7 +101,7 @@ where
.into_par_iter() .into_par_iter()
.flat_map_iter(|batch| { .flat_map_iter(|batch| {
batch.iter().map(|packet| { batch.iter().map(|packet| {
if packet.meta.discard() { if packet.meta().discard() {
return Slot::MAX; return Slot::MAX;
} }
let shred = shred::layout::get_shred(packet); let shred = shred::layout::get_shred(packet);
@ -278,7 +278,7 @@ fn sign_shred_cpu(keypair: &Keypair, packet: &mut Packet) {
.and_then(shred::layout::get_signed_message_range) .and_then(shred::layout::get_signed_message_range)
.unwrap(); .unwrap();
assert!( assert!(
packet.meta.size >= sig.end, packet.meta().size >= sig.end,
"packet is not large enough for a signature" "packet is not large enough for a signature"
); );
let signature = keypair.sign_message(packet.data(msg).unwrap()); let signature = keypair.sign_message(packet.data(msg).unwrap());
@ -445,7 +445,7 @@ mod tests {
shred.sign(&keypair); shred.sign(&keypair);
trace!("signature {}", shred.signature()); trace!("signature {}", shred.signature());
packet.buffer_mut()[..shred.payload().len()].copy_from_slice(shred.payload()); packet.buffer_mut()[..shred.payload().len()].copy_from_slice(shred.payload());
packet.meta.size = shred.payload().len(); packet.meta_mut().size = shred.payload().len();
let leader_slots = [(slot, keypair.pubkey().to_bytes())] let leader_slots = [(slot, keypair.pubkey().to_bytes())]
.iter() .iter()
@ -486,7 +486,7 @@ mod tests {
shred.sign(&keypair); shred.sign(&keypair);
batches[0].resize(1, Packet::default()); batches[0].resize(1, Packet::default());
batches[0][0].buffer_mut()[..shred.payload().len()].copy_from_slice(shred.payload()); batches[0][0].buffer_mut()[..shred.payload().len()].copy_from_slice(shred.payload());
batches[0][0].meta.size = shred.payload().len(); batches[0][0].meta_mut().size = shred.payload().len();
let leader_slots = [(slot, keypair.pubkey().to_bytes())] let leader_slots = [(slot, keypair.pubkey().to_bytes())]
.iter() .iter()
@ -511,7 +511,7 @@ mod tests {
.iter() .iter()
.cloned() .cloned()
.collect(); .collect();
batches[0][0].meta.size = 0; batches[0][0].meta_mut().size = 0;
let rv = verify_shreds_cpu(&batches, &leader_slots); let rv = verify_shreds_cpu(&batches, &leader_slots);
assert_eq!(rv, vec![vec![0]]); assert_eq!(rv, vec![vec![0]]);
} }
@ -540,7 +540,7 @@ mod tests {
shred.sign(&keypair); shred.sign(&keypair);
batches[0].resize(1, Packet::default()); batches[0].resize(1, Packet::default());
batches[0][0].buffer_mut()[..shred.payload().len()].copy_from_slice(shred.payload()); batches[0][0].buffer_mut()[..shred.payload().len()].copy_from_slice(shred.payload());
batches[0][0].meta.size = shred.payload().len(); batches[0][0].meta_mut().size = shred.payload().len();
let leader_slots = [ let leader_slots = [
(std::u64::MAX, Pubkey::default().to_bytes()), (std::u64::MAX, Pubkey::default().to_bytes()),
@ -567,7 +567,7 @@ mod tests {
let rv = verify_shreds_gpu(&batches, &leader_slots, &recycler_cache); let rv = verify_shreds_gpu(&batches, &leader_slots, &recycler_cache);
assert_eq!(rv, vec![vec![0]]); assert_eq!(rv, vec![vec![0]]);
batches[0][0].meta.size = 0; batches[0][0].meta_mut().size = 0;
let leader_slots = [ let leader_slots = [
(std::u64::MAX, Pubkey::default().to_bytes()), (std::u64::MAX, Pubkey::default().to_bytes()),
(slot, keypair.pubkey().to_bytes()), (slot, keypair.pubkey().to_bytes()),
@ -651,7 +651,7 @@ mod tests {
); );
batches[0].resize(1, Packet::default()); batches[0].resize(1, Packet::default());
batches[0][0].buffer_mut()[..shred.payload().len()].copy_from_slice(shred.payload()); batches[0][0].buffer_mut()[..shred.payload().len()].copy_from_slice(shred.payload());
batches[0][0].meta.size = shred.payload().len(); batches[0][0].meta_mut().size = shred.payload().len();
let pubkeys = [ let pubkeys = [
(slot, keypair.pubkey().to_bytes()), (slot, keypair.pubkey().to_bytes()),

View File

@ -30,7 +30,7 @@ fn do_bench_dedup_packets(bencher: &mut Bencher, mut batches: Vec<PacketBatch>)
deduper.reset(); deduper.reset();
batches batches
.iter_mut() .iter_mut()
.for_each(|b| b.iter_mut().for_each(|p| p.meta.set_discard(false))); .for_each(|b| b.iter_mut().for_each(|p| p.meta_mut().set_discard(false)));
}); });
} }

View File

@ -27,7 +27,7 @@ fn do_bench_shrink_packets(bencher: &mut Bencher, mut batches: Vec<PacketBatch>)
sigverify::shrink_batches(&mut batches); sigverify::shrink_batches(&mut batches);
batches.iter_mut().for_each(|b| { batches.iter_mut().for_each(|b| {
b.iter_mut() b.iter_mut()
.for_each(|p| p.meta.set_discard(thread_rng().gen())) .for_each(|p| p.meta_mut().set_discard(thread_rng().gen()))
}); });
}); });
} }
@ -75,7 +75,7 @@ fn bench_shrink_count_packets(bencher: &mut Bencher) {
); );
batches.iter_mut().for_each(|b| { batches.iter_mut().for_each(|b| {
b.iter_mut() b.iter_mut()
.for_each(|p| p.meta.set_discard(thread_rng().gen())) .for_each(|p| p.meta_mut().set_discard(thread_rng().gen()))
}); });
bencher.iter(|| { bencher.iter(|| {

View File

@ -155,7 +155,7 @@ fn bench_sigverify_uneven(bencher: &mut Bencher) {
}; };
Packet::populate_packet(packet, None, &tx).expect("serialize request"); Packet::populate_packet(packet, None, &tx).expect("serialize request");
if thread_rng().gen_ratio((num_packets - NUM) as u32, num_packets as u32) { if thread_rng().gen_ratio((num_packets - NUM) as u32, num_packets as u32) {
packet.meta.set_discard(true); packet.meta_mut().set_discard(true);
} else { } else {
num_valid += 1; num_valid += 1;
} }

View File

@ -122,7 +122,7 @@ impl PacketBatch {
pub fn set_addr(&mut self, addr: &SocketAddr) { pub fn set_addr(&mut self, addr: &SocketAddr) {
for p in self.iter_mut() { for p in self.iter_mut() {
p.meta.set_socket_addr(addr); p.meta_mut().set_socket_addr(addr);
} }
} }

View File

@ -121,7 +121,7 @@ pub fn init() {
#[must_use] #[must_use]
fn verify_packet(packet: &mut Packet, reject_non_vote: bool) -> bool { fn verify_packet(packet: &mut Packet, reject_non_vote: bool) -> bool {
// If this packet was already marked as discard, drop it // If this packet was already marked as discard, drop it
if packet.meta.discard() { if packet.meta().discard() {
return false; return false;
} }
@ -134,7 +134,7 @@ fn verify_packet(packet: &mut Packet, reject_non_vote: bool) -> bool {
return false; return false;
} }
if packet.meta.size <= msg_start { if packet.meta().size <= msg_start {
return false; return false;
} }
@ -179,7 +179,7 @@ pub fn count_valid_packets(
batch batch
.iter() .iter()
.filter(|p| { .filter(|p| {
let should_keep = !p.meta.discard(); let should_keep = !p.meta().discard();
if should_keep { if should_keep {
process_valid_packet(p); process_valid_packet(p);
} }
@ -193,7 +193,7 @@ pub fn count_valid_packets(
pub fn count_discarded_packets(batches: &[PacketBatch]) -> usize { pub fn count_discarded_packets(batches: &[PacketBatch]) -> usize {
batches batches
.iter() .iter()
.map(|batch| batch.iter().filter(|p| p.meta.discard()).count()) .map(|batch| batch.iter().filter(|p| p.meta().discard()).count())
.sum() .sum()
} }
@ -205,7 +205,7 @@ fn do_get_packet_offsets(
// should have at least 1 signature and sig lengths // should have at least 1 signature and sig lengths
let _ = 1usize let _ = 1usize
.checked_add(size_of::<Signature>()) .checked_add(size_of::<Signature>())
.filter(|v| *v <= packet.meta.size) .filter(|v| *v <= packet.meta().size)
.ok_or(PacketError::InvalidLen)?; .ok_or(PacketError::InvalidLen)?;
// read the length of Transaction.signatures (serialized with short_vec) // read the length of Transaction.signatures (serialized with short_vec)
@ -223,7 +223,7 @@ fn do_get_packet_offsets(
// Determine the start of the message header by checking the message prefix bit. // Determine the start of the message header by checking the message prefix bit.
let msg_header_offset = { let msg_header_offset = {
// Packet should have data for prefix bit // Packet should have data for prefix bit
if msg_start_offset >= packet.meta.size { if msg_start_offset >= packet.meta().size {
return Err(PacketError::InvalidSignatureLen); return Err(PacketError::InvalidSignatureLen);
} }
@ -258,7 +258,7 @@ fn do_get_packet_offsets(
// Packet should have data at least for MessageHeader and 1 byte for Message.account_keys.len // Packet should have data at least for MessageHeader and 1 byte for Message.account_keys.len
let _ = msg_header_offset_plus_one let _ = msg_header_offset_plus_one
.checked_add(MESSAGE_HEADER_LENGTH) .checked_add(MESSAGE_HEADER_LENGTH)
.filter(|v| *v <= packet.meta.size) .filter(|v| *v <= packet.meta().size)
.ok_or(PacketError::InvalidSignatureLen)?; .ok_or(PacketError::InvalidSignatureLen)?;
// read MessageHeader.num_required_signatures (serialized with u8) // read MessageHeader.num_required_signatures (serialized with u8)
@ -298,7 +298,7 @@ fn do_get_packet_offsets(
let _ = pubkey_len let _ = pubkey_len
.checked_mul(size_of::<Pubkey>()) .checked_mul(size_of::<Pubkey>())
.and_then(|v| v.checked_add(pubkey_start)) .and_then(|v| v.checked_add(pubkey_start))
.filter(|v| *v <= packet.meta.size) .filter(|v| *v <= packet.meta().size)
.ok_or(PacketError::InvalidPubkeyLen)?; .ok_or(PacketError::InvalidPubkeyLen)?;
if pubkey_len < sig_len_untrusted { if pubkey_len < sig_len_untrusted {
@ -333,7 +333,7 @@ pub fn check_for_tracer_packet(packet: &mut Packet) -> bool {
// Check for tracer pubkey // Check for tracer pubkey
match packet.data(first_pubkey_start..first_pubkey_end) { match packet.data(first_pubkey_start..first_pubkey_end) {
Some(pubkey) if pubkey == TRACER_KEY.as_ref() => { Some(pubkey) if pubkey == TRACER_KEY.as_ref() => {
packet.meta.set_tracer(true); packet.meta_mut().set_tracer(true);
true true
} }
_ => false, _ => false,
@ -348,7 +348,7 @@ fn get_packet_offsets(
let unsanitized_packet_offsets = do_get_packet_offsets(packet, current_offset); let unsanitized_packet_offsets = do_get_packet_offsets(packet, current_offset);
if let Ok(offsets) = unsanitized_packet_offsets { if let Ok(offsets) = unsanitized_packet_offsets {
check_for_simple_vote_transaction(packet, &offsets, current_offset).ok(); check_for_simple_vote_transaction(packet, &offsets, current_offset).ok();
if !reject_non_vote || packet.meta.is_simple_vote_tx() { if !reject_non_vote || packet.meta().is_simple_vote_tx() {
return offsets; return offsets;
} }
} }
@ -380,7 +380,7 @@ fn check_for_simple_vote_transaction(
// Packet should have at least 1 more byte for instructions.len // Packet should have at least 1 more byte for instructions.len
let _ = instructions_len_offset let _ = instructions_len_offset
.checked_add(1usize) .checked_add(1usize)
.filter(|v| *v <= packet.meta.size) .filter(|v| *v <= packet.meta().size)
.ok_or(PacketError::InvalidLen)?; .ok_or(PacketError::InvalidLen)?;
let (instruction_len, instruction_len_size) = packet let (instruction_len, instruction_len_size) = packet
@ -399,7 +399,7 @@ fn check_for_simple_vote_transaction(
// Packet should have at least 1 more byte for one instructions_program_id // Packet should have at least 1 more byte for one instructions_program_id
let _ = instruction_start let _ = instruction_start
.checked_add(1usize) .checked_add(1usize)
.filter(|v| *v <= packet.meta.size) .filter(|v| *v <= packet.meta().size)
.ok_or(PacketError::InvalidLen)?; .ok_or(PacketError::InvalidLen)?;
let instruction_program_id_index: usize = usize::from( let instruction_program_id_index: usize = usize::from(
@ -425,7 +425,7 @@ fn check_for_simple_vote_transaction(
.ok_or(PacketError::InvalidLen)? .ok_or(PacketError::InvalidLen)?
== solana_sdk::vote::program::id().as_ref() == solana_sdk::vote::program::id().as_ref()
{ {
packet.meta.flags |= PacketFlags::SIMPLE_VOTE_TX; packet.meta_mut().flags |= PacketFlags::SIMPLE_VOTE_TX;
} }
Ok(()) Ok(())
} }
@ -458,7 +458,7 @@ pub fn generate_offsets(
let mut pubkey_offset = packet_offsets.pubkey_start; let mut pubkey_offset = packet_offsets.pubkey_start;
let mut sig_offset = packet_offsets.sig_start; let mut sig_offset = packet_offsets.sig_start;
let msg_size = current_offset.saturating_add(packet.meta.size) as u32; let msg_size = current_offset.saturating_add(packet.meta().size) as u32;
for _ in 0..packet_offsets.sig_len { for _ in 0..packet_offsets.sig_len {
signature_offsets.push(sig_offset); signature_offsets.push(sig_offset);
sig_offset = sig_offset.saturating_add(size_of::<Signature>() as u32); sig_offset = sig_offset.saturating_add(size_of::<Signature>() as u32);
@ -536,7 +536,7 @@ impl Deduper {
// Deduplicates packets and returns 1 if packet is to be discarded. Else, 0. // Deduplicates packets and returns 1 if packet is to be discarded. Else, 0.
fn dedup_packet(&self, packet: &mut Packet) -> u64 { fn dedup_packet(&self, packet: &mut Packet) -> u64 {
// If this packet was already marked as discard, drop it // If this packet was already marked as discard, drop it
if packet.meta.discard() { if packet.meta().discard() {
return 1; return 1;
} }
let (hash, pos) = self.compute_hash(packet); let (hash, pos) = self.compute_hash(packet);
@ -548,7 +548,7 @@ impl Deduper {
self.filter[pos].store(hash, Ordering::Relaxed); self.filter[pos].store(hash, Ordering::Relaxed);
} }
if hash == prev & hash { if hash == prev & hash {
packet.meta.set_discard(true); packet.meta_mut().set_discard(true);
return 1; return 1;
} }
0 0
@ -562,7 +562,7 @@ impl Deduper {
let mut num_removed: u64 = 0; let mut num_removed: u64 = 0;
batches.iter_mut().for_each(|batch| { batches.iter_mut().for_each(|batch| {
batch.iter_mut().for_each(|p| { batch.iter_mut().for_each(|p| {
let removed_before_sigverify = p.meta.discard(); let removed_before_sigverify = p.meta().discard();
let is_duplicate = self.dedup_packet(p); let is_duplicate = self.dedup_packet(p);
if is_duplicate == 1 { if is_duplicate == 1 {
saturating_add_assign!(num_removed, 1); saturating_add_assign!(num_removed, 1);
@ -581,17 +581,17 @@ pub fn shrink_batches(batches: &mut Vec<PacketBatch>) {
let mut last_valid_batch = 0; let mut last_valid_batch = 0;
for batch_ix in 0..batches.len() { for batch_ix in 0..batches.len() {
for packet_ix in 0..batches[batch_ix].len() { for packet_ix in 0..batches[batch_ix].len() {
if batches[batch_ix][packet_ix].meta.discard() { if batches[batch_ix][packet_ix].meta().discard() {
continue; continue;
} }
last_valid_batch = batch_ix.saturating_add(1); last_valid_batch = batch_ix.saturating_add(1);
let mut found_spot = false; let mut found_spot = false;
while valid_batch_ix < batch_ix && !found_spot { while valid_batch_ix < batch_ix && !found_spot {
while valid_packet_ix < batches[valid_batch_ix].len() { while valid_packet_ix < batches[valid_batch_ix].len() {
if batches[valid_batch_ix][valid_packet_ix].meta.discard() { if batches[valid_batch_ix][valid_packet_ix].meta().discard() {
batches[valid_batch_ix][valid_packet_ix] = batches[valid_batch_ix][valid_packet_ix] =
batches[batch_ix][packet_ix].clone(); batches[batch_ix][packet_ix].clone();
batches[batch_ix][packet_ix].meta.set_discard(true); batches[batch_ix][packet_ix].meta_mut().set_discard(true);
last_valid_batch = valid_batch_ix.saturating_add(1); last_valid_batch = valid_batch_ix.saturating_add(1);
found_spot = true; found_spot = true;
break; break;
@ -617,8 +617,8 @@ pub fn ed25519_verify_cpu(batches: &mut [PacketBatch], reject_non_vote: bool, pa
// When using single thread, skip rayon overhead. // When using single thread, skip rayon overhead.
batches.iter_mut().for_each(|batch| { batches.iter_mut().for_each(|batch| {
batch.iter_mut().for_each(|packet| { batch.iter_mut().for_each(|packet| {
if !packet.meta.discard() && !verify_packet(packet, reject_non_vote) { if !packet.meta().discard() && !verify_packet(packet, reject_non_vote) {
packet.meta.set_discard(true); packet.meta_mut().set_discard(true);
} }
}) })
}); });
@ -633,8 +633,8 @@ pub fn ed25519_verify_cpu(batches: &mut [PacketBatch], reject_non_vote: bool, pa
.into_par_iter() .into_par_iter()
.with_min_len(packets_per_thread) .with_min_len(packets_per_thread)
.for_each(|packet: &mut Packet| { .for_each(|packet: &mut Packet| {
if !packet.meta.discard() && !verify_packet(packet, reject_non_vote) { if !packet.meta().discard() && !verify_packet(packet, reject_non_vote) {
packet.meta.set_discard(true); packet.meta_mut().set_discard(true);
} }
}) })
}); });
@ -643,8 +643,8 @@ pub fn ed25519_verify_cpu(batches: &mut [PacketBatch], reject_non_vote: bool, pa
PAR_THREAD_POOL.install(|| { PAR_THREAD_POOL.install(|| {
batches.into_par_iter().for_each(|batch: &mut PacketBatch| { batches.into_par_iter().for_each(|batch: &mut PacketBatch| {
batch.par_iter_mut().for_each(|packet: &mut Packet| { batch.par_iter_mut().for_each(|packet: &mut Packet| {
if !packet.meta.discard() && !verify_packet(packet, reject_non_vote) { if !packet.meta().discard() && !verify_packet(packet, reject_non_vote) {
packet.meta.set_discard(true); packet.meta_mut().set_discard(true);
} }
}) })
}); });
@ -656,9 +656,11 @@ pub fn ed25519_verify_cpu(batches: &mut [PacketBatch], reject_non_vote: bool, pa
pub fn ed25519_verify_disabled(batches: &mut [PacketBatch]) { pub fn ed25519_verify_disabled(batches: &mut [PacketBatch]) {
let packet_count = count_packets_in_batches(batches); let packet_count = count_packets_in_batches(batches);
debug!("disabled ECDSA for {}", packet_count); debug!("disabled ECDSA for {}", packet_count);
batches batches.into_par_iter().for_each(|batch| {
.into_par_iter() batch
.for_each(|batch| batch.par_iter_mut().for_each(|p| p.meta.set_discard(false))); .par_iter_mut()
.for_each(|p| p.meta_mut().set_discard(false))
});
inc_new_counter_debug!("ed25519_verify_disabled", packet_count); inc_new_counter_debug!("ed25519_verify_disabled", packet_count);
} }
@ -716,8 +718,8 @@ pub fn get_checked_scalar(scalar: &[u8; 32]) -> Result<[u8; 32], PacketError> {
pub fn mark_disabled(batches: &mut [PacketBatch], r: &[Vec<u8>]) { pub fn mark_disabled(batches: &mut [PacketBatch], r: &[Vec<u8>]) {
for (batch, v) in batches.iter_mut().zip(r) { for (batch, v) in batches.iter_mut().zip(r) {
for (pkt, f) in batch.iter_mut().zip(v) { for (pkt, f) in batch.iter_mut().zip(v) {
if !pkt.meta.discard() { if !pkt.meta().discard() {
pkt.meta.set_discard(*f == 0); pkt.meta_mut().set_discard(*f == 0);
} }
} }
} }
@ -840,10 +842,10 @@ mod tests {
batch.resize(batch_size, Packet::default()); batch.resize(batch_size, Packet::default());
let mut batches: Vec<PacketBatch> = vec![batch]; let mut batches: Vec<PacketBatch> = vec![batch];
mark_disabled(&mut batches, &[vec![0]]); mark_disabled(&mut batches, &[vec![0]]);
assert!(batches[0][0].meta.discard()); assert!(batches[0][0].meta().discard());
batches[0][0].meta.set_discard(false); batches[0][0].meta_mut().set_discard(false);
mark_disabled(&mut batches, &[vec![1]]); mark_disabled(&mut batches, &[vec![1]]);
assert!(!batches[0][0].meta.discard()); assert!(!batches[0][0].meta().discard());
} }
#[test] #[test]
@ -921,7 +923,7 @@ mod tests {
packet.buffer_mut()[0] = 0xff; packet.buffer_mut()[0] = 0xff;
packet.buffer_mut()[1] = 0xff; packet.buffer_mut()[1] = 0xff;
packet.meta.size = 2; packet.meta_mut().size = 2;
let res = sigverify::do_get_packet_offsets(&packet, 0); let res = sigverify::do_get_packet_offsets(&packet, 0);
assert_eq!(res, Err(PacketError::InvalidLen)); assert_eq!(res, Err(PacketError::InvalidLen));
@ -943,10 +945,10 @@ mod tests {
assert!(!verify_packet(&mut packet, false)); assert!(!verify_packet(&mut packet, false));
packet.meta.set_discard(false); packet.meta_mut().set_discard(false);
let mut batches = generate_packet_batches(&packet, 1, 1); let mut batches = generate_packet_batches(&packet, 1, 1);
ed25519_verify(&mut batches); ed25519_verify(&mut batches);
assert!(batches[0][0].meta.discard()); assert!(batches[0][0].meta().discard());
} }
#[test] #[test]
@ -977,10 +979,10 @@ mod tests {
assert!(!verify_packet(&mut packet, false)); assert!(!verify_packet(&mut packet, false));
packet.meta.set_discard(false); packet.meta_mut().set_discard(false);
let mut batches = generate_packet_batches(&packet, 1, 1); let mut batches = generate_packet_batches(&packet, 1, 1);
ed25519_verify(&mut batches); ed25519_verify(&mut batches);
assert!(batches[0][0].meta.discard()); assert!(batches[0][0].meta().discard());
} }
#[test] #[test]
@ -1069,8 +1071,8 @@ mod tests {
let msg_start = legacy_offsets.msg_start as usize; let msg_start = legacy_offsets.msg_start as usize;
let msg_bytes = packet.data(msg_start..).unwrap().to_vec(); let msg_bytes = packet.data(msg_start..).unwrap().to_vec();
packet.buffer_mut()[msg_start] = MESSAGE_VERSION_PREFIX; packet.buffer_mut()[msg_start] = MESSAGE_VERSION_PREFIX;
packet.meta.size += 1; packet.meta_mut().size += 1;
let msg_end = packet.meta.size; let msg_end = packet.meta().size;
packet.buffer_mut()[msg_start + 1..msg_end].copy_from_slice(&msg_bytes); packet.buffer_mut()[msg_start + 1..msg_end].copy_from_slice(&msg_bytes);
let offsets = sigverify::do_get_packet_offsets(&packet, 0).unwrap(); let offsets = sigverify::do_get_packet_offsets(&packet, 0).unwrap();
@ -1202,7 +1204,7 @@ mod tests {
assert!(batches assert!(batches
.iter() .iter()
.flat_map(|batch| batch.iter()) .flat_map(|batch| batch.iter())
.all(|p| p.meta.discard() == should_discard)); .all(|p| p.meta().discard() == should_discard));
} }
fn ed25519_verify(batches: &mut [PacketBatch]) { fn ed25519_verify(batches: &mut [PacketBatch]) {
@ -1226,7 +1228,7 @@ mod tests {
assert!(batches assert!(batches
.iter() .iter()
.flat_map(|batch| batch.iter()) .flat_map(|batch| batch.iter())
.all(|p| p.meta.discard())); .all(|p| p.meta().discard()));
} }
#[test] #[test]
@ -1292,9 +1294,9 @@ mod tests {
.zip(ref_vec.into_iter().flatten()) .zip(ref_vec.into_iter().flatten())
.all(|(p, discard)| { .all(|(p, discard)| {
if discard == 0 { if discard == 0 {
p.meta.discard() p.meta().discard()
} else { } else {
!p.meta.discard() !p.meta().discard()
} }
})); }));
} }
@ -1316,7 +1318,7 @@ mod tests {
for _ in 0..num_modifications { for _ in 0..num_modifications {
let batch = thread_rng().gen_range(0, batches.len()); let batch = thread_rng().gen_range(0, batches.len());
let packet = thread_rng().gen_range(0, batches[batch].len()); let packet = thread_rng().gen_range(0, batches[batch].len());
let offset = thread_rng().gen_range(0, batches[batch][packet].meta.size); let offset = thread_rng().gen_range(0, batches[batch][packet].meta().size);
let add = thread_rng().gen_range(0, 255); let add = thread_rng().gen_range(0, 255);
batches[batch][packet].buffer_mut()[offset] = batches[batch][packet] batches[batch][packet].buffer_mut()[offset] = batches[batch][packet]
.data(offset) .data(offset)
@ -1326,7 +1328,7 @@ mod tests {
let batch_to_disable = thread_rng().gen_range(0, batches.len()); let batch_to_disable = thread_rng().gen_range(0, batches.len());
for p in batches[batch_to_disable].iter_mut() { for p in batches[batch_to_disable].iter_mut() {
p.meta.set_discard(true); p.meta_mut().set_discard(true);
} }
// verify from GPU verification pipeline (when GPU verification is enabled) are // verify from GPU verification pipeline (when GPU verification is enabled) are
@ -1439,7 +1441,7 @@ mod tests {
let mut packet = Packet::from_data(None, tx).unwrap(); let mut packet = Packet::from_data(None, tx).unwrap();
let packet_offsets = do_get_packet_offsets(&packet, 0).unwrap(); let packet_offsets = do_get_packet_offsets(&packet, 0).unwrap();
check_for_simple_vote_transaction(&mut packet, &packet_offsets, 0).ok(); check_for_simple_vote_transaction(&mut packet, &packet_offsets, 0).ok();
assert!(!packet.meta.is_simple_vote_tx()); assert!(!packet.meta().is_simple_vote_tx());
} }
// single vote tx is // single vote tx is
@ -1449,7 +1451,7 @@ mod tests {
let mut packet = Packet::from_data(None, tx).unwrap(); let mut packet = Packet::from_data(None, tx).unwrap();
let packet_offsets = do_get_packet_offsets(&packet, 0).unwrap(); let packet_offsets = do_get_packet_offsets(&packet, 0).unwrap();
check_for_simple_vote_transaction(&mut packet, &packet_offsets, 0).ok(); check_for_simple_vote_transaction(&mut packet, &packet_offsets, 0).ok();
assert!(packet.meta.is_simple_vote_tx()); assert!(packet.meta().is_simple_vote_tx());
} }
// multiple mixed tx is not // multiple mixed tx is not
@ -1470,7 +1472,7 @@ mod tests {
let mut packet = Packet::from_data(None, tx).unwrap(); let mut packet = Packet::from_data(None, tx).unwrap();
let packet_offsets = do_get_packet_offsets(&packet, 0).unwrap(); let packet_offsets = do_get_packet_offsets(&packet, 0).unwrap();
check_for_simple_vote_transaction(&mut packet, &packet_offsets, 0).ok(); check_for_simple_vote_transaction(&mut packet, &packet_offsets, 0).ok();
assert!(!packet.meta.is_simple_vote_tx()); assert!(!packet.meta().is_simple_vote_tx());
} }
} }
@ -1488,9 +1490,9 @@ mod tests {
let packet_offsets = do_get_packet_offsets(packet, current_offset).unwrap(); let packet_offsets = do_get_packet_offsets(packet, current_offset).unwrap();
check_for_simple_vote_transaction(packet, &packet_offsets, current_offset).ok(); check_for_simple_vote_transaction(packet, &packet_offsets, current_offset).ok();
if index == 1 { if index == 1 {
assert!(packet.meta.is_simple_vote_tx()); assert!(packet.meta().is_simple_vote_tx());
} else { } else {
assert!(!packet.meta.is_simple_vote_tx()); assert!(!packet.meta().is_simple_vote_tx());
} }
current_offset = current_offset.saturating_add(size_of::<Packet>()); current_offset = current_offset.saturating_add(size_of::<Packet>());
@ -1572,13 +1574,13 @@ mod tests {
); );
batches.iter_mut().for_each(|b| { batches.iter_mut().for_each(|b| {
b.iter_mut() b.iter_mut()
.for_each(|p| p.meta.set_discard(thread_rng().gen())) .for_each(|p| p.meta_mut().set_discard(thread_rng().gen()))
}); });
//find all the non discarded packets //find all the non discarded packets
let mut start = vec![]; let mut start = vec![];
batches.iter_mut().for_each(|b| { batches.iter_mut().for_each(|b| {
b.iter_mut() b.iter_mut()
.filter(|p| !p.meta.discard()) .filter(|p| !p.meta().discard())
.for_each(|p| start.push(p.clone())) .for_each(|p| start.push(p.clone()))
}); });
start.sort_by(|a, b| a.data(..).cmp(&b.data(..))); start.sort_by(|a, b| a.data(..).cmp(&b.data(..)));
@ -1590,7 +1592,7 @@ mod tests {
let mut end = vec![]; let mut end = vec![];
batches.iter_mut().for_each(|b| { batches.iter_mut().for_each(|b| {
b.iter_mut() b.iter_mut()
.filter(|p| !p.meta.discard()) .filter(|p| !p.meta().discard())
.for_each(|p| end.push(p.clone())) .for_each(|p| end.push(p.clone()))
}); });
end.sort_by(|a, b| a.data(..).cmp(&b.data(..))); end.sort_by(|a, b| a.data(..).cmp(&b.data(..)));
@ -1762,13 +1764,13 @@ mod tests {
batches.iter_mut().enumerate().for_each(|(i, b)| { batches.iter_mut().enumerate().for_each(|(i, b)| {
b.iter_mut() b.iter_mut()
.enumerate() .enumerate()
.for_each(|(j, p)| p.meta.set_discard(set_discard(i, j))) .for_each(|(j, p)| p.meta_mut().set_discard(set_discard(i, j)))
}); });
assert_eq!(count_valid_packets(&batches, |_| ()), *expect_valid_packets); assert_eq!(count_valid_packets(&batches, |_| ()), *expect_valid_packets);
debug!("show valid packets for case {}", i); debug!("show valid packets for case {}", i);
batches.iter_mut().enumerate().for_each(|(i, b)| { batches.iter_mut().enumerate().for_each(|(i, b)| {
b.iter_mut().enumerate().for_each(|(j, p)| { b.iter_mut().enumerate().for_each(|(j, p)| {
if !p.meta.discard() { if !p.meta().discard() {
trace!("{} {}", i, j) trace!("{} {}", i, j)
} }
}) })

View File

@ -36,7 +36,7 @@ mod tests {
} }
for batch in all_packets { for batch in all_packets {
for p in &batch { for p in &batch {
assert_eq!(p.meta.size, num_bytes); assert_eq!(p.meta().size, num_bytes);
} }
} }
assert_eq!(total_packets, num_expected_packets); assert_eq!(total_packets, num_expected_packets);

View File

@ -44,7 +44,7 @@ pub struct Packet {
// Bytes past Packet.meta.size are not valid to read from. // Bytes past Packet.meta.size are not valid to read from.
// Use Packet.data(index) to read from the buffer. // Use Packet.data(index) to read from the buffer.
buffer: [u8; PACKET_DATA_SIZE], buffer: [u8; PACKET_DATA_SIZE],
pub meta: Meta, meta: Meta,
} }
impl Packet { impl Packet {
@ -81,6 +81,16 @@ impl Packet {
&mut self.buffer[..] &mut self.buffer[..]
} }
#[inline]
pub fn meta(&self) -> &Meta {
&self.meta
}
#[inline]
pub fn meta_mut(&mut self) -> &mut Meta {
&mut self.meta
}
pub fn from_data<T: Serialize>(dest: Option<&SocketAddr>, data: T) -> Result<Self> { pub fn from_data<T: Serialize>(dest: Option<&SocketAddr>, data: T) -> Result<Self> {
let mut packet = Packet::default(); let mut packet = Packet::default();
Self::populate_packet(&mut packet, dest, &data)?; Self::populate_packet(&mut packet, dest, &data)?;
@ -140,7 +150,7 @@ impl Default for Packet {
impl PartialEq for Packet { impl PartialEq for Packet {
fn eq(&self, other: &Packet) -> bool { fn eq(&self, other: &Packet) -> bool {
self.meta == other.meta && self.data(..) == other.data(..) self.meta() == other.meta() && self.data(..) == other.data(..)
} }
} }

View File

@ -653,8 +653,8 @@ fn handle_chunk(
if maybe_batch.is_none() { if maybe_batch.is_none() {
let mut batch = PacketBatch::with_capacity(1); let mut batch = PacketBatch::with_capacity(1);
let mut packet = Packet::default(); let mut packet = Packet::default();
packet.meta.set_socket_addr(remote_addr); packet.meta_mut().set_socket_addr(remote_addr);
packet.meta.sender_stake = stake; packet.meta_mut().sender_stake = stake;
batch.push(packet); batch.push(packet);
*maybe_batch = Some(batch); *maybe_batch = Some(batch);
stats stats
@ -670,7 +670,7 @@ fn handle_chunk(
}; };
batch[0].buffer_mut()[chunk.offset as usize..end_of_chunk] batch[0].buffer_mut()[chunk.offset as usize..end_of_chunk]
.copy_from_slice(&chunk.bytes); .copy_from_slice(&chunk.bytes);
batch[0].meta.size = std::cmp::max(batch[0].meta.size, end_of_chunk); batch[0].meta_mut().size = std::cmp::max(batch[0].meta().size, end_of_chunk);
stats.total_chunks_received.fetch_add(1, Ordering::Relaxed); stats.total_chunks_received.fetch_add(1, Ordering::Relaxed);
match peer_type { match peer_type {
ConnectionPeerType::Staked => { ConnectionPeerType::Staked => {
@ -689,7 +689,7 @@ fn handle_chunk(
trace!("chunk is none"); trace!("chunk is none");
// done receiving chunks // done receiving chunks
if let Some(batch) = maybe_batch.take() { if let Some(batch) = maybe_batch.take() {
let len = batch[0].meta.size; let len = batch[0].meta().size;
if let Err(e) = packet_sender.send(batch) { if let Err(e) = packet_sender.send(batch) {
stats stats
.total_packet_batch_send_err .total_packet_batch_send_err
@ -1116,7 +1116,7 @@ pub mod test {
} }
for batch in all_packets { for batch in all_packets {
for p in batch.iter() { for p in batch.iter() {
assert_eq!(p.meta.size, 1); assert_eq!(p.meta().size, 1);
} }
} }
assert_eq!(total_packets, num_expected_packets); assert_eq!(total_packets, num_expected_packets);
@ -1152,7 +1152,7 @@ pub mod test {
} }
for batch in all_packets { for batch in all_packets {
for p in batch.iter() { for p in batch.iter() {
assert_eq!(p.meta.size, num_bytes); assert_eq!(p.meta().size, num_bytes);
} }
} }
assert_eq!(total_packets, num_expected_packets); assert_eq!(total_packets, num_expected_packets);

View File

@ -15,12 +15,12 @@ pub async fn recv_mmsg(
socket: &UdpSocket, socket: &UdpSocket,
packets: &mut [Packet], packets: &mut [Packet],
) -> io::Result</*num packets:*/ usize> { ) -> io::Result</*num packets:*/ usize> {
debug_assert!(packets.iter().all(|pkt| pkt.meta == Meta::default())); debug_assert!(packets.iter().all(|pkt| pkt.meta() == &Meta::default()));
let count = cmp::min(NUM_RCVMMSGS, packets.len()); let count = cmp::min(NUM_RCVMMSGS, packets.len());
socket.readable().await?; socket.readable().await?;
let mut i = 0; let mut i = 0;
for p in packets.iter_mut().take(count) { for p in packets.iter_mut().take(count) {
p.meta.size = 0; p.meta_mut().size = 0;
match socket.try_recv_from(p.buffer_mut()) { match socket.try_recv_from(p.buffer_mut()) {
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
break; break;
@ -29,8 +29,8 @@ pub async fn recv_mmsg(
return Err(e); return Err(e);
} }
Ok((nrecv, from)) => { Ok((nrecv, from)) => {
p.meta.size = nrecv; p.meta_mut().size = nrecv;
p.meta.set_socket_addr(&from); p.meta_mut().set_socket_addr(&from);
} }
} }
i += 1; i += 1;
@ -84,8 +84,8 @@ mod tests {
let recv = recv_mmsg_exact(&reader, &mut packets[..]).await.unwrap(); let recv = recv_mmsg_exact(&reader, &mut packets[..]).await.unwrap();
assert_eq!(sent, recv); assert_eq!(sent, recv);
for packet in packets.iter().take(recv) { for packet in packets.iter().take(recv) {
assert_eq!(packet.meta.size, PACKET_DATA_SIZE); assert_eq!(packet.meta().size, PACKET_DATA_SIZE);
assert_eq!(packet.meta.socket_addr(), saddr); assert_eq!(packet.meta().socket_addr(), saddr);
} }
} }
@ -110,19 +110,19 @@ mod tests {
let recv = recv_mmsg_exact(&reader, &mut packets[..]).await.unwrap(); let recv = recv_mmsg_exact(&reader, &mut packets[..]).await.unwrap();
assert_eq!(TEST_NUM_MSGS, recv); assert_eq!(TEST_NUM_MSGS, recv);
for packet in packets.iter().take(recv) { for packet in packets.iter().take(recv) {
assert_eq!(packet.meta.size, PACKET_DATA_SIZE); assert_eq!(packet.meta().size, PACKET_DATA_SIZE);
assert_eq!(packet.meta.socket_addr(), saddr); assert_eq!(packet.meta().socket_addr(), saddr);
} }
let mut packets = vec![Packet::default(); sent - TEST_NUM_MSGS]; let mut packets = vec![Packet::default(); sent - TEST_NUM_MSGS];
packets packets
.iter_mut() .iter_mut()
.for_each(|pkt| pkt.meta = Meta::default()); .for_each(|pkt| *pkt.meta_mut() = Meta::default());
let recv = recv_mmsg_exact(&reader, &mut packets[..]).await.unwrap(); let recv = recv_mmsg_exact(&reader, &mut packets[..]).await.unwrap();
assert_eq!(sent - TEST_NUM_MSGS, recv); assert_eq!(sent - TEST_NUM_MSGS, recv);
for packet in packets.iter().take(recv) { for packet in packets.iter().take(recv) {
assert_eq!(packet.meta.size, PACKET_DATA_SIZE); assert_eq!(packet.meta().size, PACKET_DATA_SIZE);
assert_eq!(packet.meta.socket_addr(), saddr); assert_eq!(packet.meta().socket_addr(), saddr);
} }
} }
@ -153,13 +153,13 @@ mod tests {
let recv = recv_mmsg_exact(&reader, &mut packets[..]).await.unwrap(); let recv = recv_mmsg_exact(&reader, &mut packets[..]).await.unwrap();
assert_eq!(TEST_NUM_MSGS, recv); assert_eq!(TEST_NUM_MSGS, recv);
for packet in packets.iter().take(recv) { for packet in packets.iter().take(recv) {
assert_eq!(packet.meta.size, PACKET_DATA_SIZE); assert_eq!(packet.meta().size, PACKET_DATA_SIZE);
assert_eq!(packet.meta.socket_addr(), saddr); assert_eq!(packet.meta().socket_addr(), saddr);
} }
packets packets
.iter_mut() .iter_mut()
.for_each(|pkt| pkt.meta = Meta::default()); .for_each(|pkt| *pkt.meta_mut() = Meta::default());
let _recv = recv_mmsg(&reader, &mut packets[..]).await; let _recv = recv_mmsg(&reader, &mut packets[..]).await;
assert!(start.elapsed().as_secs() < 5); assert!(start.elapsed().as_secs() < 5);
} }
@ -192,22 +192,22 @@ mod tests {
let recv = recv_mmsg(&reader, &mut packets[..]).await.unwrap(); let recv = recv_mmsg(&reader, &mut packets[..]).await.unwrap();
assert_eq!(TEST_NUM_MSGS, recv); assert_eq!(TEST_NUM_MSGS, recv);
for packet in packets.iter().take(sent1) { for packet in packets.iter().take(sent1) {
assert_eq!(packet.meta.size, PACKET_DATA_SIZE); assert_eq!(packet.meta().size, PACKET_DATA_SIZE);
assert_eq!(packet.meta.socket_addr(), saddr1); assert_eq!(packet.meta().socket_addr(), saddr1);
} }
for packet in packets.iter().skip(sent1).take(recv - sent1) { for packet in packets.iter().skip(sent1).take(recv - sent1) {
assert_eq!(packet.meta.size, PACKET_DATA_SIZE); assert_eq!(packet.meta().size, PACKET_DATA_SIZE);
assert_eq!(packet.meta.socket_addr(), saddr2); assert_eq!(packet.meta().socket_addr(), saddr2);
} }
packets packets
.iter_mut() .iter_mut()
.for_each(|pkt| pkt.meta = Meta::default()); .for_each(|pkt| *pkt.meta_mut() = Meta::default());
let recv = recv_mmsg(&reader, &mut packets[..]).await.unwrap(); let recv = recv_mmsg(&reader, &mut packets[..]).await.unwrap();
assert_eq!(sent1 + sent2 - TEST_NUM_MSGS, recv); assert_eq!(sent1 + sent2 - TEST_NUM_MSGS, recv);
for packet in packets.iter().take(recv) { for packet in packets.iter().take(recv) {
assert_eq!(packet.meta.size, PACKET_DATA_SIZE); assert_eq!(packet.meta().size, PACKET_DATA_SIZE);
assert_eq!(packet.meta.socket_addr(), saddr2); assert_eq!(packet.meta().socket_addr(), saddr2);
} }
} }
} }

View File

@ -65,7 +65,7 @@ pub fn send_to(
socket_addr_space: &SocketAddrSpace, socket_addr_space: &SocketAddrSpace,
) -> Result<()> { ) -> Result<()> {
for p in batch.iter() { for p in batch.iter() {
let addr = p.meta.socket_addr(); let addr = p.meta().socket_addr();
if socket_addr_space.check(&addr) { if socket_addr_space.check(&addr) {
if let Some(data) = p.data(..) { if let Some(data) = p.data(..) {
socket.send_to(data, addr)?; socket.send_to(data, addr)?;
@ -93,7 +93,7 @@ mod tests {
let packets = vec![Packet::default()]; let packets = vec![Packet::default()];
let mut packet_batch = PacketBatch::new(packets); let mut packet_batch = PacketBatch::new(packets);
packet_batch.set_addr(&send_addr); packet_batch.set_addr(&send_addr);
assert_eq!(packet_batch[0].meta.socket_addr(), send_addr); assert_eq!(packet_batch[0].meta().socket_addr(), send_addr);
} }
#[test] #[test]
@ -109,19 +109,21 @@ mod tests {
batch.resize(packet_batch_size, Packet::default()); batch.resize(packet_batch_size, Packet::default());
for m in batch.iter_mut() { for m in batch.iter_mut() {
m.meta.set_socket_addr(&addr); m.meta_mut().set_socket_addr(&addr);
m.meta.size = PACKET_DATA_SIZE; m.meta_mut().size = PACKET_DATA_SIZE;
} }
send_to(&batch, &send_socket, &SocketAddrSpace::Unspecified).unwrap(); send_to(&batch, &send_socket, &SocketAddrSpace::Unspecified).unwrap();
batch.iter_mut().for_each(|pkt| pkt.meta = Meta::default()); batch
.iter_mut()
.for_each(|pkt| *pkt.meta_mut() = Meta::default());
let recvd = recv_from(&mut batch, &recv_socket, 1).unwrap(); let recvd = recv_from(&mut batch, &recv_socket, 1).unwrap();
assert_eq!(recvd, batch.len()); assert_eq!(recvd, batch.len());
for m in batch.iter() { for m in batch.iter() {
assert_eq!(m.meta.size, PACKET_DATA_SIZE); assert_eq!(m.meta().size, PACKET_DATA_SIZE);
assert_eq!(m.meta.socket_addr(), saddr); assert_eq!(m.meta().socket_addr(), saddr);
} }
} }
@ -136,10 +138,10 @@ mod tests {
let mut p1 = Packet::default(); let mut p1 = Packet::default();
let mut p2 = Packet::default(); let mut p2 = Packet::default();
p1.meta.size = 1; p1.meta_mut().size = 1;
p1.buffer_mut()[0] = 0; p1.buffer_mut()[0] = 0;
p2.meta.size = 1; p2.meta_mut().size = 1;
p2.buffer_mut()[0] = 0; p2.buffer_mut()[0] = 0;
assert!(p1 == p2); assert!(p1 == p2);
@ -164,8 +166,8 @@ mod tests {
let mut batch = PacketBatch::with_capacity(batch_size); let mut batch = PacketBatch::with_capacity(batch_size);
batch.resize(batch_size, Packet::default()); batch.resize(batch_size, Packet::default());
for p in batch.iter_mut() { for p in batch.iter_mut() {
p.meta.set_socket_addr(&addr); p.meta_mut().set_socket_addr(&addr);
p.meta.size = 1; p.meta_mut().size = 1;
} }
send_to(&batch, &send_socket, &SocketAddrSpace::Unspecified).unwrap(); send_to(&batch, &send_socket, &SocketAddrSpace::Unspecified).unwrap();
} }

View File

@ -17,11 +17,11 @@ use {
#[cfg(not(target_os = "linux"))] #[cfg(not(target_os = "linux"))]
pub fn recv_mmsg(socket: &UdpSocket, packets: &mut [Packet]) -> io::Result</*num packets:*/ usize> { pub fn recv_mmsg(socket: &UdpSocket, packets: &mut [Packet]) -> io::Result</*num packets:*/ usize> {
debug_assert!(packets.iter().all(|pkt| pkt.meta == Meta::default())); debug_assert!(packets.iter().all(|pkt| pkt.meta() == &Meta::default()));
let mut i = 0; let mut i = 0;
let count = cmp::min(NUM_RCVMMSGS, packets.len()); let count = cmp::min(NUM_RCVMMSGS, packets.len());
for p in packets.iter_mut().take(count) { for p in packets.iter_mut().take(count) {
p.meta.size = 0; p.meta_mut().size = 0;
match socket.recv_from(p.buffer_mut()) { match socket.recv_from(p.buffer_mut()) {
Err(_) if i > 0 => { Err(_) if i > 0 => {
break; break;
@ -30,8 +30,8 @@ pub fn recv_mmsg(socket: &UdpSocket, packets: &mut [Packet]) -> io::Result</*num
return Err(e); return Err(e);
} }
Ok((nrecv, from)) => { Ok((nrecv, from)) => {
p.meta.size = nrecv; p.meta_mut().size = nrecv;
p.meta.set_socket_addr(&from); p.meta_mut().set_socket_addr(&from);
if i == 0 { if i == 0 {
socket.set_nonblocking(true)?; socket.set_nonblocking(true)?;
} }
@ -71,7 +71,7 @@ fn cast_socket_addr(addr: &sockaddr_storage, hdr: &mmsghdr) -> Option<InetAddr>
#[allow(clippy::uninit_assumed_init)] #[allow(clippy::uninit_assumed_init)]
pub fn recv_mmsg(sock: &UdpSocket, packets: &mut [Packet]) -> io::Result</*num packets:*/ usize> { pub fn recv_mmsg(sock: &UdpSocket, packets: &mut [Packet]) -> io::Result</*num packets:*/ usize> {
// Assert that there are no leftovers in packets. // Assert that there are no leftovers in packets.
debug_assert!(packets.iter().all(|pkt| pkt.meta == Meta::default())); debug_assert!(packets.iter().all(|pkt| pkt.meta() == &Meta::default()));
const SOCKADDR_STORAGE_SIZE: usize = mem::size_of::<sockaddr_storage>(); const SOCKADDR_STORAGE_SIZE: usize = mem::size_of::<sockaddr_storage>();
let mut hdrs: [mmsghdr; NUM_RCVMMSGS] = unsafe { mem::zeroed() }; let mut hdrs: [mmsghdr; NUM_RCVMMSGS] = unsafe { mem::zeroed() };
@ -107,9 +107,9 @@ pub fn recv_mmsg(sock: &UdpSocket, packets: &mut [Packet]) -> io::Result</*num p
usize::try_from(nrecv).unwrap() usize::try_from(nrecv).unwrap()
}; };
for (addr, hdr, pkt) in izip!(addrs, hdrs, packets.iter_mut()).take(nrecv) { for (addr, hdr, pkt) in izip!(addrs, hdrs, packets.iter_mut()).take(nrecv) {
pkt.meta.size = hdr.msg_len as usize; pkt.meta_mut().size = hdr.msg_len as usize;
if let Some(addr) = cast_socket_addr(&addr, &hdr) { if let Some(addr) = cast_socket_addr(&addr, &hdr) {
pkt.meta.set_socket_addr(&addr.to_std()); pkt.meta_mut().set_socket_addr(&addr.to_std());
} }
} }
Ok(nrecv) Ok(nrecv)
@ -149,8 +149,8 @@ mod tests {
let recv = recv_mmsg(&reader, &mut packets[..]).unwrap(); let recv = recv_mmsg(&reader, &mut packets[..]).unwrap();
assert_eq!(sent, recv); assert_eq!(sent, recv);
for packet in packets.iter().take(recv) { for packet in packets.iter().take(recv) {
assert_eq!(packet.meta.size, PACKET_DATA_SIZE); assert_eq!(packet.meta().size, PACKET_DATA_SIZE);
assert_eq!(packet.meta.socket_addr(), saddr); assert_eq!(packet.meta().socket_addr(), saddr);
} }
}; };
@ -175,18 +175,18 @@ mod tests {
let recv = recv_mmsg(&reader, &mut packets[..]).unwrap(); let recv = recv_mmsg(&reader, &mut packets[..]).unwrap();
assert_eq!(TEST_NUM_MSGS, recv); assert_eq!(TEST_NUM_MSGS, recv);
for packet in packets.iter().take(recv) { for packet in packets.iter().take(recv) {
assert_eq!(packet.meta.size, PACKET_DATA_SIZE); assert_eq!(packet.meta().size, PACKET_DATA_SIZE);
assert_eq!(packet.meta.socket_addr(), saddr); assert_eq!(packet.meta().socket_addr(), saddr);
} }
packets packets
.iter_mut() .iter_mut()
.for_each(|pkt| pkt.meta = Meta::default()); .for_each(|pkt| *pkt.meta_mut() = Meta::default());
let recv = recv_mmsg(&reader, &mut packets[..]).unwrap(); let recv = recv_mmsg(&reader, &mut packets[..]).unwrap();
assert_eq!(sent - TEST_NUM_MSGS, recv); assert_eq!(sent - TEST_NUM_MSGS, recv);
for packet in packets.iter().take(recv) { for packet in packets.iter().take(recv) {
assert_eq!(packet.meta.size, PACKET_DATA_SIZE); assert_eq!(packet.meta().size, PACKET_DATA_SIZE);
assert_eq!(packet.meta.socket_addr(), saddr); assert_eq!(packet.meta().socket_addr(), saddr);
} }
}; };
@ -217,14 +217,14 @@ mod tests {
let recv = recv_mmsg(&reader, &mut packets[..]).unwrap(); let recv = recv_mmsg(&reader, &mut packets[..]).unwrap();
assert_eq!(TEST_NUM_MSGS, recv); assert_eq!(TEST_NUM_MSGS, recv);
for packet in packets.iter().take(recv) { for packet in packets.iter().take(recv) {
assert_eq!(packet.meta.size, PACKET_DATA_SIZE); assert_eq!(packet.meta().size, PACKET_DATA_SIZE);
assert_eq!(packet.meta.socket_addr(), saddr); assert_eq!(packet.meta().socket_addr(), saddr);
} }
reader.set_nonblocking(true).unwrap(); reader.set_nonblocking(true).unwrap();
packets packets
.iter_mut() .iter_mut()
.for_each(|pkt| pkt.meta = Meta::default()); .for_each(|pkt| *pkt.meta_mut() = Meta::default());
let _recv = recv_mmsg(&reader, &mut packets[..]); let _recv = recv_mmsg(&reader, &mut packets[..]);
assert!(start.elapsed().as_secs() < 5); assert!(start.elapsed().as_secs() < 5);
} }
@ -257,22 +257,22 @@ mod tests {
let recv = recv_mmsg(&reader, &mut packets[..]).unwrap(); let recv = recv_mmsg(&reader, &mut packets[..]).unwrap();
assert_eq!(TEST_NUM_MSGS, recv); assert_eq!(TEST_NUM_MSGS, recv);
for packet in packets.iter().take(sent1) { for packet in packets.iter().take(sent1) {
assert_eq!(packet.meta.size, PACKET_DATA_SIZE); assert_eq!(packet.meta().size, PACKET_DATA_SIZE);
assert_eq!(packet.meta.socket_addr(), saddr1); assert_eq!(packet.meta().socket_addr(), saddr1);
} }
for packet in packets.iter().skip(sent1).take(recv - sent1) { for packet in packets.iter().skip(sent1).take(recv - sent1) {
assert_eq!(packet.meta.size, PACKET_DATA_SIZE); assert_eq!(packet.meta().size, PACKET_DATA_SIZE);
assert_eq!(packet.meta.socket_addr(), saddr2); assert_eq!(packet.meta().socket_addr(), saddr2);
} }
packets packets
.iter_mut() .iter_mut()
.for_each(|pkt| pkt.meta = Meta::default()); .for_each(|pkt| *pkt.meta_mut() = Meta::default());
let recv = recv_mmsg(&reader, &mut packets[..]).unwrap(); let recv = recv_mmsg(&reader, &mut packets[..]).unwrap();
assert_eq!(sent1 + sent2 - TEST_NUM_MSGS, recv); assert_eq!(sent1 + sent2 - TEST_NUM_MSGS, recv);
for packet in packets.iter().take(recv) { for packet in packets.iter().take(recv) {
assert_eq!(packet.meta.size, PACKET_DATA_SIZE); assert_eq!(packet.meta().size, PACKET_DATA_SIZE);
assert_eq!(packet.meta.socket_addr(), saddr2); assert_eq!(packet.meta().socket_addr(), saddr2);
} }
} }
} }

View File

@ -287,7 +287,7 @@ impl StreamerSendStats {
} }
fn record(&mut self, pkt: &Packet) { fn record(&mut self, pkt: &Packet) {
let ent = self.host_map.entry(pkt.meta.addr).or_default(); let ent = self.host_map.entry(pkt.meta().addr).or_default();
ent.count += 1; ent.count += 1;
ent.bytes += pkt.data(..).map(<[u8]>::len).unwrap_or_default() as u64; ent.bytes += pkt.data(..).map(<[u8]>::len).unwrap_or_default() as u64;
} }
@ -305,7 +305,7 @@ fn recv_send(
packet_batch.iter().for_each(|p| stats.record(p)); packet_batch.iter().for_each(|p| stats.record(p));
} }
let packets = packet_batch.iter().filter_map(|pkt| { let packets = packet_batch.iter().filter_map(|pkt| {
let addr = pkt.meta.socket_addr(); let addr = pkt.meta().socket_addr();
let data = pkt.data(..)?; let data = pkt.data(..)?;
socket_addr_space.check(&addr).then_some((data, addr)) socket_addr_space.check(&addr).then_some((data, addr))
}); });
@ -488,8 +488,8 @@ mod test {
let mut p = Packet::default(); let mut p = Packet::default();
{ {
p.buffer_mut()[0] = i as u8; p.buffer_mut()[0] = i as u8;
p.meta.size = PACKET_DATA_SIZE; p.meta_mut().size = PACKET_DATA_SIZE;
p.meta.set_socket_addr(&addr); p.meta_mut().set_socket_addr(&addr);
} }
packet_batch.push(p); packet_batch.push(p);
} }

View File

@ -50,7 +50,7 @@ pub fn test_recv_mmsg_batch_size() {
} }
packets packets
.iter_mut() .iter_mut()
.for_each(|pkt| pkt.meta = Meta::default()); .for_each(|pkt| *pkt.meta_mut() = Meta::default());
} }
elapsed_in_small_batch += now.elapsed().as_nanos(); elapsed_in_small_batch += now.elapsed().as_nanos();
assert_eq!(TEST_BATCH_SIZE, recv); assert_eq!(TEST_BATCH_SIZE, recv);