2019-11-12 10:40:04 -08:00
|
|
|
//! `seed` subcommand - runs a dns seeder
|
2019-10-24 13:28:42 -07:00
|
|
|
|
2019-11-05 16:40:18 -08:00
|
|
|
use std::{
|
|
|
|
future::Future,
|
|
|
|
pin::Pin,
|
2019-11-07 13:12:18 -08:00
|
|
|
sync::{Arc, Mutex},
|
2019-11-05 16:40:18 -08:00
|
|
|
task::{Context, Poll},
|
|
|
|
};
|
|
|
|
|
2019-12-13 14:25:14 -08:00
|
|
|
use abscissa_core::{Command, Options, Runnable};
|
|
|
|
use futures::{channel::oneshot, prelude::*};
|
2019-11-07 13:12:18 -08:00
|
|
|
use tower::{buffer::Buffer, Service, ServiceExt};
|
2019-11-13 14:03:12 -08:00
|
|
|
|
2019-11-07 13:12:18 -08:00
|
|
|
use zebra_network::{AddressBook, BoxedStdError, Request, Response};
|
|
|
|
|
2019-12-20 11:20:04 -08:00
|
|
|
use crate::{
|
|
|
|
error::{Error, ErrorKind},
|
|
|
|
prelude::*,
|
|
|
|
};
|
2019-10-24 13:28:42 -07:00
|
|
|
|
2019-11-11 15:08:59 -08:00
|
|
|
/// Whether our `SeedService` is poll_ready or not.
|
|
|
|
#[derive(Debug)]
|
|
|
|
enum SeederState {
|
2019-11-11 17:41:46 -08:00
|
|
|
/// Waiting for the address book to be shared with us via the oneshot channel.
|
2019-11-11 15:08:59 -08:00
|
|
|
AwaitingAddressBook(oneshot::Receiver<Arc<Mutex<AddressBook>>>),
|
2019-11-11 17:41:46 -08:00
|
|
|
/// Address book received, ready to service requests.
|
2019-11-11 15:08:59 -08:00
|
|
|
Ready(Arc<Mutex<AddressBook>>),
|
2019-11-07 13:12:18 -08:00
|
|
|
}
|
|
|
|
|
2019-11-11 15:08:59 -08:00
|
|
|
#[derive(Debug)]
|
|
|
|
struct SeedService {
|
|
|
|
state: SeederState,
|
2019-11-07 13:12:18 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Service<Request> for SeedService {
|
|
|
|
type Response = Response;
|
|
|
|
type Error = BoxedStdError;
|
|
|
|
type Future =
|
|
|
|
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
|
|
|
|
2019-11-13 15:02:06 -08:00
|
|
|
#[instrument(skip(self, _cx))]
|
2019-11-11 15:08:59 -08:00
|
|
|
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
2019-11-12 12:00:56 -08:00
|
|
|
match self.state {
|
|
|
|
SeederState::Ready(_) => return Poll::Ready(Ok(())),
|
|
|
|
SeederState::AwaitingAddressBook(ref mut rx) => match rx.try_recv() {
|
|
|
|
Err(e) => {
|
2019-11-13 15:02:06 -08:00
|
|
|
error!("oneshot sender dropped, failing service: {:?}", e);
|
2019-11-12 12:00:56 -08:00
|
|
|
return Poll::Ready(Err(e.into()));
|
|
|
|
}
|
|
|
|
Ok(None) => {
|
2019-11-13 15:02:06 -08:00
|
|
|
trace!("awaiting address book, service is unready");
|
2019-11-12 12:00:56 -08:00
|
|
|
return Poll::Pending;
|
|
|
|
}
|
2019-11-11 15:08:59 -08:00
|
|
|
Ok(Some(address_book)) => {
|
2019-11-13 15:02:06 -08:00
|
|
|
debug!("received address_book via oneshot, service becomes ready");
|
2019-11-12 12:00:56 -08:00
|
|
|
self.state = SeederState::Ready(address_book);
|
|
|
|
return Poll::Ready(Ok(()));
|
2019-11-11 15:08:59 -08:00
|
|
|
}
|
|
|
|
},
|
2019-11-12 12:00:56 -08:00
|
|
|
}
|
2019-11-07 13:12:18 -08:00
|
|
|
}
|
|
|
|
|
2019-11-13 15:02:06 -08:00
|
|
|
// Note: the generated span applies only to this function, not
|
|
|
|
// to the future, but this is OK because the current implementation
|
|
|
|
// is not actually async.
|
|
|
|
#[instrument]
|
2019-11-07 13:12:18 -08:00
|
|
|
fn call(&mut self, req: Request) -> Self::Future {
|
2019-11-12 13:36:56 -08:00
|
|
|
let address_book = if let SeederState::Ready(address_book) = &self.state {
|
|
|
|
address_book
|
|
|
|
} else {
|
|
|
|
panic!("SeedService::call without SeedService::poll_ready");
|
|
|
|
};
|
|
|
|
|
|
|
|
let response = match req {
|
|
|
|
Request::GetPeers => {
|
2019-11-13 14:03:12 -08:00
|
|
|
// Collect a list of known peers from the address book
|
|
|
|
// and sanitize their timestamps.
|
|
|
|
let mut peers = address_book
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.peers()
|
|
|
|
.map(|addr| addr.sanitize())
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
// The peers are still ordered by recency, so shuffle them.
|
|
|
|
use rand::seq::SliceRandom;
|
|
|
|
peers.shuffle(&mut rand::thread_rng());
|
|
|
|
// Finally, truncate the list so that we do not trivially
|
|
|
|
// reveal our entire peer set.
|
|
|
|
peers.truncate(50);
|
2019-11-13 15:02:06 -08:00
|
|
|
debug!(peers.len = peers.len());
|
2019-11-13 14:03:12 -08:00
|
|
|
Ok(Response::Peers(peers))
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
debug!("ignoring request");
|
2019-11-13 15:02:06 -08:00
|
|
|
Ok(Response::Ok)
|
2019-11-11 15:08:59 -08:00
|
|
|
}
|
2019-11-07 13:12:18 -08:00
|
|
|
};
|
|
|
|
return Box::pin(futures::future::ready(response));
|
|
|
|
}
|
|
|
|
}
|
2019-10-24 13:28:42 -07:00
|
|
|
|
|
|
|
/// `seed` subcommand
|
|
|
|
///
|
|
|
|
/// A DNS seeder command to spider and collect as many valid peer
|
|
|
|
/// addresses as we can.
|
2019-11-12 13:27:35 -08:00
|
|
|
// This is not a unit-like struct because it makes Command and Options sad.
|
2019-11-11 15:08:59 -08:00
|
|
|
#[derive(Command, Debug, Default, Options)]
|
2019-11-12 13:27:35 -08:00
|
|
|
pub struct SeedCmd {}
|
2019-10-24 13:28:42 -07:00
|
|
|
|
|
|
|
impl Runnable for SeedCmd {
|
|
|
|
/// Start the application.
|
|
|
|
fn run(&self) {
|
|
|
|
use crate::components::tokio::TokioComponent;
|
|
|
|
|
2019-12-13 14:25:14 -08:00
|
|
|
let _ = app_writer()
|
|
|
|
.state_mut()
|
2019-10-24 13:28:42 -07:00
|
|
|
.components
|
2019-12-13 14:25:14 -08:00
|
|
|
.get_downcast_mut::<TokioComponent>()
|
2019-10-24 13:28:42 -07:00
|
|
|
.expect("TokioComponent should be available")
|
|
|
|
.rt
|
2019-11-12 13:46:06 -08:00
|
|
|
.block_on(self.seed());
|
2019-10-24 13:28:42 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl SeedCmd {
|
2019-12-20 11:20:04 -08:00
|
|
|
async fn seed(&self) -> Result<(), Error> {
|
2019-10-24 13:28:42 -07:00
|
|
|
info!("begin tower-based peer handling test stub");
|
|
|
|
|
2019-11-11 15:08:59 -08:00
|
|
|
let (addressbook_tx, addressbook_rx) = oneshot::channel();
|
|
|
|
let seed_service = SeedService {
|
|
|
|
state: SeederState::AwaitingAddressBook(addressbook_rx),
|
|
|
|
};
|
|
|
|
let node = Buffer::new(seed_service, 1);
|
2019-10-24 13:28:42 -07:00
|
|
|
|
|
|
|
let config = app_config().network.clone();
|
|
|
|
|
2019-11-11 15:08:59 -08:00
|
|
|
let (mut peer_set, address_book) = zebra_network::init(config, node).await;
|
2019-11-07 13:12:18 -08:00
|
|
|
|
2019-11-11 15:08:59 -08:00
|
|
|
let _ = addressbook_tx.send(address_book);
|
2019-10-24 13:28:42 -07:00
|
|
|
|
|
|
|
info!("waiting for peer_set ready");
|
2019-12-20 11:20:04 -08:00
|
|
|
peer_set
|
|
|
|
.ready()
|
|
|
|
.await
|
|
|
|
.map_err(|e| Error::from(ErrorKind::Io.context(e)))?;
|
2019-10-24 13:28:42 -07:00
|
|
|
|
|
|
|
info!("peer_set became ready");
|
|
|
|
|
2019-11-11 15:08:59 -08:00
|
|
|
#[cfg(dos)]
|
2019-11-07 13:12:18 -08:00
|
|
|
use std::time::Duration;
|
|
|
|
|
2019-11-11 15:08:59 -08:00
|
|
|
#[cfg(dos)]
|
2019-11-07 13:12:18 -08:00
|
|
|
// Fire GetPeers requests at ourselves, for testing.
|
|
|
|
tokio::spawn(async move {
|
2019-12-13 14:25:14 -08:00
|
|
|
let mut interval_stream = tokio::time::interval(Duration::from_secs(1));
|
2019-11-07 13:12:18 -08:00
|
|
|
|
|
|
|
loop {
|
|
|
|
interval_stream.next().await;
|
|
|
|
|
|
|
|
let _ = seed_service.call(Request::GetPeers);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
2019-12-13 14:25:14 -08:00
|
|
|
let eternity = future::pending::<()>();
|
2019-11-05 16:40:18 -08:00
|
|
|
eternity.await;
|
2019-10-24 13:28:42 -07:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|