//! `window_service` handles the data plane incoming blobs, storing them in //! blocktree and retransmitting where required //! use crate::blocktree::{self, Blocktree}; use crate::cluster_info::ClusterInfo; use crate::leader_schedule_cache::LeaderScheduleCache; use crate::repair_service::{RepairService, RepairStrategy}; use crate::result::{Error, Result}; use crate::service::Service; use crate::shred::Shred; use crate::streamer::{PacketReceiver, PacketSender}; use rayon::iter::{IndexedParallelIterator, IntoParallelRefMutIterator, ParallelIterator}; use rayon::ThreadPool; use solana_metrics::{inc_new_counter_debug, inc_new_counter_error}; use solana_rayon_threadlimit::get_thread_count; use solana_runtime::bank::Bank; use solana_sdk::pubkey::Pubkey; use solana_sdk::timing::duration_as_ms; use std::net::UdpSocket; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::RecvTimeoutError; use std::sync::{Arc, RwLock}; use std::thread::{self, Builder, JoinHandle}; use std::time::{Duration, Instant}; pub const NUM_THREADS: u32 = 10; /// drop blobs that are from myself or not from the correct leader for the /// blob's slot pub fn should_retransmit_and_persist( shred: &Shred, shred_buf: &[u8], bank: Option>, leader_schedule_cache: &Arc, my_pubkey: &Pubkey, root: u64, ) -> bool { let slot_leader_pubkey = match bank { None => leader_schedule_cache.slot_leader_at(shred.slot(), None), Some(bank) => leader_schedule_cache.slot_leader_at(shred.slot(), Some(&bank)), }; if let Some(leader_id) = slot_leader_pubkey { if leader_id == *my_pubkey { inc_new_counter_debug!("streamer-recv_window-circular_transmission", 1); false } else if !blocktree::verify_shred_slots(shred.slot(), shred.parent(), root) { inc_new_counter_debug!("streamer-recv_window-outdated_transmission", 1); false } else if !shred.fast_verify(&shred_buf, &leader_id) { inc_new_counter_debug!("streamer-recv_window-invalid_signature", 1); false } else { true } } else { inc_new_counter_debug!("streamer-recv_window-unknown_leader", 1); false } } fn recv_window( blocktree: &Arc, my_pubkey: &Pubkey, r: &PacketReceiver, retransmit: &PacketSender, shred_filter: F, thread_pool: &ThreadPool, leader_schedule_cache: &Arc, ) -> Result<()> where F: Fn(&Shred, &[u8], u64) -> bool, F: Sync, { let timer = Duration::from_millis(200); let mut packets = r.recv_timeout(timer)?; while let Ok(mut more_packets) = r.try_recv() { packets.packets.append(&mut more_packets.packets) } let now = Instant::now(); inc_new_counter_debug!("streamer-recv_window-recv", packets.packets.len()); let last_root = blocktree.last_root(); let (shreds, packets_ix): (Vec<_>, Vec<_>) = thread_pool.install(|| { packets .packets .par_iter_mut() .enumerate() .filter_map(|(i, packet)| { if let Ok(s) = bincode::deserialize(&packet.data) { let shred: Shred = s; if shred_filter(&shred, &packet.data, last_root) { packet.meta.slot = shred.slot(); packet.meta.seed = shred.seed(); Some((shred, i)) } else { None } } else { None } }) .unzip() }); // to avoid lookups into the `packets_ix` vec, this block manually tracks where we are in that vec // and since `packets.packets.retain` and the `packets_ix` vec are both in order, // we should be able to automatically drop any packets in the index gaps. let mut retain_ix = 0; let mut i = 0; packets.packets.retain(|_| { let retain = if !packets_ix.is_empty() && i == packets_ix[retain_ix] { retain_ix = (packets_ix.len() - 1).min(retain_ix + 1); true } else { false }; i += 1; retain }); trace!("{:?} shreds from packets", shreds.len()); trace!( "{} num shreds received: {}", my_pubkey, packets.packets.len() ); if !packets.packets.is_empty() { // Ignore the send error, as the retransmit is optional (e.g. replicators don't retransmit) let _ = retransmit.send(packets); } blocktree.insert_shreds(shreds, Some(leader_schedule_cache))?; trace!( "Elapsed processing time in recv_window(): {}", duration_as_ms(&now.elapsed()) ); Ok(()) } // Implement a destructor for the window_service thread to signal it exited // even on panics struct Finalizer { exit_sender: Arc, } impl Finalizer { fn new(exit_sender: Arc) -> Self { Finalizer { exit_sender } } } // Implement a destructor for Finalizer. impl Drop for Finalizer { fn drop(&mut self) { self.exit_sender.clone().store(true, Ordering::Relaxed); } } pub struct WindowService { t_window: JoinHandle<()>, repair_service: RepairService, } impl WindowService { #[allow(clippy::too_many_arguments)] pub fn new( blocktree: Arc, cluster_info: Arc>, r: PacketReceiver, retransmit: PacketSender, repair_socket: Arc, exit: &Arc, repair_strategy: RepairStrategy, leader_schedule_cache: &Arc, shred_filter: F, ) -> WindowService where F: 'static + Fn(&Pubkey, &Shred, &[u8], Option>, u64) -> bool + std::marker::Send + std::marker::Sync, { let bank_forks = match repair_strategy { RepairStrategy::RepairRange(_) => None, RepairStrategy::RepairAll { ref bank_forks, .. } => Some(bank_forks.clone()), }; let repair_service = RepairService::new( blocktree.clone(), exit.clone(), repair_socket, cluster_info.clone(), repair_strategy, ); let exit = exit.clone(); let shred_filter = Arc::new(shred_filter); let bank_forks = bank_forks.clone(); let leader_schedule_cache = leader_schedule_cache.clone(); let t_window = Builder::new() .name("solana-window".to_string()) // TODO: Mark: Why is it overflowing .stack_size(8 * 1024 * 1024) .spawn(move || { let _exit = Finalizer::new(exit.clone()); let id = cluster_info.read().unwrap().id(); trace!("{}: RECV_WINDOW started", id); let mut now = Instant::now(); let thread_pool = rayon::ThreadPoolBuilder::new() .num_threads(get_thread_count()) .build() .unwrap(); loop { if exit.load(Ordering::Relaxed) { break; } if let Err(e) = recv_window( &blocktree, &id, &r, &retransmit, |shred, shred_buf, last_root| { shred_filter( &id, shred, shred_buf, bank_forks .as_ref() .map(|bank_forks| bank_forks.read().unwrap().working_bank()), last_root, ) }, &thread_pool, &leader_schedule_cache, ) { match e { Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break, Error::RecvTimeoutError(RecvTimeoutError::Timeout) => { if now.elapsed() > Duration::from_secs(30) { warn!("Window does not seem to be receiving data. Ensure port configuration is correct..."); now = Instant::now(); } } _ => { inc_new_counter_error!("streamer-window-error", 1, 1); error!("window error: {:?}", e); } } } else { now = Instant::now(); } } }) .unwrap(); WindowService { t_window, repair_service, } } } impl Service for WindowService { type JoinReturnType = (); fn join(self) -> thread::Result<()> { self.t_window.join()?; self.repair_service.join() } } #[cfg(test)] mod test { use super::*; use crate::{ blocktree::tests::make_many_slot_entries, blocktree::{get_tmp_ledger_path, Blocktree}, cluster_info::ClusterInfo, contact_info::ContactInfo, entry::{create_ticks, Entry}, genesis_utils::create_genesis_block_with_leader, packet::{Packet, Packets}, repair_service::RepairSlotRange, service::Service, shred::Shredder, }; use rand::{seq::SliceRandom, thread_rng}; use solana_runtime::epoch_schedule::MINIMUM_SLOTS_PER_EPOCH; use solana_sdk::{ hash::Hash, signature::{Keypair, KeypairUtil}, }; use std::{ net::UdpSocket, sync::atomic::{AtomicBool, Ordering}, sync::mpsc::{channel, Receiver}, sync::{Arc, RwLock}, thread::sleep, time::Duration, }; fn local_entries_to_shred( entries: Vec, slot: u64, parent: u64, keypair: &Arc, ) -> Vec { let mut shredder = Shredder::new(slot, parent, 0.0, keypair, 0).expect("Failed to create entry shredder"); bincode::serialize_into(&mut shredder, &entries) .expect("Expect to write all entries to shreds"); shredder.finalize_slot(); shredder.shred_tuples.into_iter().map(|(s, _)| s).collect() } #[test] fn test_process_shred() { let blocktree_path = get_tmp_ledger_path!(); let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap()); let num_entries = 10; let original_entries = create_ticks(num_entries, Hash::default()); let mut shreds = local_entries_to_shred(original_entries.clone(), 0, 0, &Arc::new(Keypair::new())); shreds.reverse(); blocktree .insert_shreds(shreds, None) .expect("Expect successful processing of shred"); assert_eq!( blocktree.get_slot_entries(0, 0, None).unwrap(), original_entries ); drop(blocktree); Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); } #[test] fn test_should_retransmit_and_persist() { let me_id = Pubkey::new_rand(); let leader_keypair = Arc::new(Keypair::new()); let leader_pubkey = leader_keypair.pubkey(); let bank = Arc::new(Bank::new( &create_genesis_block_with_leader(100, &leader_pubkey, 10).genesis_block, )); let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); let mut shreds = local_entries_to_shred(vec![Entry::default()], 0, 0, &leader_keypair); let shred_bufs: Vec<_> = shreds .iter() .map(|s| bincode::serialize(s).unwrap()) .collect(); // with a Bank for slot 0, blob continues assert_eq!( should_retransmit_and_persist( &shreds[0], &shred_bufs[0], Some(bank.clone()), &cache, &me_id, 0, ), true ); // set the blob to have come from the wrong leader /* assert_eq!( should_retransmit_and_persist(&shreds[0], Some(bank.clone()), &cache, &me_id), false ); */ // with a Bank and no idea who leader is, blob gets thrown out shreds[0].set_slot(MINIMUM_SLOTS_PER_EPOCH as u64 * 3); assert_eq!( should_retransmit_and_persist( &shreds[0], &shred_bufs[0], Some(bank.clone()), &cache, &me_id, 0 ), false ); // with a shred where shred.slot() == root, blob gets thrown out let slot = MINIMUM_SLOTS_PER_EPOCH as u64 * 3; let shreds = local_entries_to_shred(vec![Entry::default()], slot, slot - 1, &leader_keypair); assert_eq!( should_retransmit_and_persist( &shreds[0], &shred_bufs[0], Some(bank.clone()), &cache, &me_id, slot ), false ); // with a shred where shred.parent() < root, blob gets thrown out let slot = MINIMUM_SLOTS_PER_EPOCH as u64 * 3; let shreds = local_entries_to_shred(vec![Entry::default()], slot + 1, slot - 1, &leader_keypair); assert_eq!( should_retransmit_and_persist( &shreds[0], &shred_bufs[0], Some(bank.clone()), &cache, &me_id, slot ), false ); // if the blob came back from me, it doesn't continue, whether or not I have a bank /* assert_eq!( should_retransmit_and_persist(&shreds[0], None, &cache, &me_id), false ); */ } fn make_test_window( packet_receiver: Receiver, exit: Arc, ) -> WindowService { let blocktree_path = get_tmp_ledger_path!(); let (blocktree, _, _) = Blocktree::open_with_signal(&blocktree_path) .expect("Expected to be able to open database ledger"); let blocktree = Arc::new(blocktree); let (retransmit_sender, _retransmit_receiver) = channel(); let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair( ContactInfo::new_localhost(&Pubkey::default(), 0), ))); let repair_sock = Arc::new(UdpSocket::bind(socketaddr_any!()).unwrap()); let window = WindowService::new( blocktree, cluster_info, packet_receiver, retransmit_sender, repair_sock, &exit, RepairStrategy::RepairRange(RepairSlotRange { start: 0, end: 0 }), &Arc::new(LeaderScheduleCache::default()), |_, _, _, _, _| true, ); window } #[test] fn test_recv_window() { let (packet_sender, packet_receiver) = channel(); let exit = Arc::new(AtomicBool::new(false)); let window = make_test_window(packet_receiver, exit.clone()); // send 5 slots worth of data to the window let (shreds, _) = make_many_slot_entries(0, 5, 10); let packets: Vec<_> = shreds .into_iter() .map(|s| { let mut p = Packet::default(); p.data .copy_from_slice(&mut bincode::serialize(&s).unwrap().as_ref()); p }) .collect(); let mut packets = Packets::new(packets); packet_sender.send(packets.clone()).unwrap(); sleep(Duration::from_millis(500)); // add some empty packets to the data set. These should fail to deserialize packets.packets.append(&mut vec![Packet::default(); 10]); packets.packets.shuffle(&mut thread_rng()); packet_sender.send(packets.clone()).unwrap(); sleep(Duration::from_millis(500)); // send 1 empty packet that cannot deserialize into a shred packet_sender .send(Packets::new(vec![Packet::default(); 1])) .unwrap(); sleep(Duration::from_millis(500)); exit.store(true, Ordering::Relaxed); window.join().unwrap(); } }