Fix all remaining clippy warnings

Fixes #586
This commit is contained in:
Greg Fitzgerald 2018-07-11 21:10:25 -06:00 committed by Greg Fitzgerald
parent 73ae3c3301
commit 30f0c25b65
18 changed files with 200 additions and 171 deletions

1
.clippy.toml Normal file
View File

@ -0,0 +1 @@
too-many-arguments-threshold = 9

View File

@ -144,12 +144,8 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
let verified_setup_len = verified_setup.len();
verified_sender.send(verified_setup).unwrap();
BankingStage::process_packets(
bank.clone(),
&verified_receiver,
&signal_sender,
&packet_recycler,
).unwrap();
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
.unwrap();
check_txs(verified_setup_len, &signal_receiver, num_src_accounts);
@ -163,12 +159,8 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
let verified_len = verified.len();
verified_sender.send(verified).unwrap();
BankingStage::process_packets(
bank.clone(),
&verified_receiver,
&signal_sender,
&packet_recycler,
).unwrap();
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
.unwrap();
check_txs(verified_len, &signal_receiver, tx);
});
@ -210,12 +202,8 @@ fn bench_banking_stage_single_from(bencher: &mut Bencher) {
.collect();
let verified_len = verified.len();
verified_sender.send(verified).unwrap();
BankingStage::process_packets(
bank.clone(),
&verified_receiver,
&signal_sender,
&packet_recycler,
).unwrap();
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
.unwrap();
check_txs(verified_len, &signal_receiver, tx);
});

View File

@ -40,7 +40,7 @@ impl BankingStage {
.name("solana-banking-stage".to_string())
.spawn(move || loop {
if let Err(e) = Self::process_packets(
bank.clone(),
&bank.clone(),
&verified_receiver,
&signal_sender,
&packet_recycler,
@ -72,7 +72,7 @@ impl BankingStage {
/// Process the incoming packets and send output `Signal` messages to `signal_sender`.
/// Discard packets via `packet_recycler`.
pub fn process_packets(
bank: Arc<Bank>,
bank: &Arc<Bank>,
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
signal_sender: &Sender<Signal>,
packet_recycler: &PacketRecycler,

View File

@ -399,7 +399,7 @@ fn converge(
let window = default_window();
let gossip_send_socket = udp_random_bind(8000, 10000, 5).unwrap();
let ncp = Ncp::new(
spy_ref.clone(),
&spy_ref.clone(),
window.clone(),
spy_gossip,
gossip_send_socket,

View File

@ -18,14 +18,14 @@ impl BlobFetchStage {
pub fn new(
socket: UdpSocket,
exit: Arc<AtomicBool>,
blob_recycler: BlobRecycler,
blob_recycler: &BlobRecycler,
) -> (Self, BlobReceiver) {
Self::new_multi_socket(vec![socket], exit, blob_recycler)
}
pub fn new_multi_socket(
sockets: Vec<UdpSocket>,
exit: Arc<AtomicBool>,
blob_recycler: BlobRecycler,
blob_recycler: &BlobRecycler,
) -> (Self, BlobReceiver) {
let (blob_sender, blob_receiver) = channel();
let thread_hdls: Vec<_> = sockets

View File

@ -17,7 +17,7 @@ use transaction::Transaction;
pub const TIME_SLICE: u64 = 60;
pub const REQUEST_CAP: u64 = 1_000_000;
#[derive(Serialize, Deserialize, Debug)]
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
pub enum DroneRequest {
GetAirdrop {
airdrop_request_amount: u64,

View File

@ -18,14 +18,14 @@ impl FetchStage {
pub fn new(
socket: UdpSocket,
exit: Arc<AtomicBool>,
packet_recycler: PacketRecycler,
packet_recycler: &PacketRecycler,
) -> (Self, PacketReceiver) {
Self::new_multi_socket(vec![socket], exit, packet_recycler)
}
pub fn new_multi_socket(
sockets: Vec<UdpSocket>,
exit: Arc<AtomicBool>,
packet_recycler: PacketRecycler,
packet_recycler: &PacketRecycler,
) -> (Self, PacketReceiver) {
let (packet_sender, packet_receiver) = channel();
let thread_hdls: Vec<_> = sockets

View File

@ -112,7 +112,7 @@ impl FullNode {
entry_height,
Some(ledger_tail),
node,
network_entry_point,
&network_entry_point,
exit.clone(),
);
info!(
@ -208,7 +208,7 @@ impl FullNode {
let bank = Arc::new(bank);
let mut thread_hdls = vec![];
let rpu = Rpu::new(
bank.clone(),
&bank.clone(),
node.sockets.requests,
node.sockets.respond,
exit.clone(),
@ -229,7 +229,7 @@ impl FullNode {
thread_hdls.extend(tpu.thread_hdls());
let window = FullNode::new_window(ledger_tail, entry_height, &crdt, &blob_recycler);
let ncp = Ncp::new(
crdt.clone(),
&crdt.clone(),
window.clone(),
node.sockets.gossip,
node.sockets.gossip_send,
@ -285,13 +285,13 @@ impl FullNode {
entry_height: u64,
ledger_tail: Option<Vec<Entry>>,
node: TestNode,
entry_point: NodeInfo,
entry_point: &NodeInfo,
exit: Arc<AtomicBool>,
) -> Self {
let bank = Arc::new(bank);
let mut thread_hdls = vec![];
let rpu = Rpu::new(
bank.clone(),
&bank.clone(),
node.sockets.requests,
node.sockets.respond,
exit.clone(),
@ -308,7 +308,7 @@ impl FullNode {
let window = FullNode::new_window(ledger_tail, entry_height, &crdt, &blob_recycler);
let ncp = Ncp::new(
crdt.clone(),
&crdt.clone(),
window.clone(),
node.sockets.gossip,
node.sockets.gossip_send,
@ -367,7 +367,7 @@ mod tests {
let bank = Bank::new(&alice);
let exit = Arc::new(AtomicBool::new(false));
let entry = tn.data.clone();
let v = FullNode::new_validator(kp, bank, 0, None, tn, entry, exit);
let v = FullNode::new_validator(kp, bank, 0, None, tn, &entry, exit);
v.close().unwrap();
}
}

View File

@ -81,13 +81,13 @@ impl Default for MetricsAgent {
impl MetricsAgent {
fn new(writer: Arc<MetricsWriter + Send + Sync>, write_frequency: Duration) -> Self {
let (sender, receiver) = channel::<MetricsCommand>();
thread::spawn(move || Self::run(receiver, writer, write_frequency));
thread::spawn(move || Self::run(&receiver, &writer, write_frequency));
MetricsAgent { sender }
}
fn run(
receiver: Receiver<MetricsCommand>,
writer: Arc<MetricsWriter>,
receiver: &Receiver<MetricsCommand>,
writer: &Arc<MetricsWriter + Send + Sync>,
write_frequency: Duration,
) {
trace!("run: enter");

View File

@ -18,7 +18,7 @@ pub struct Ncp {
impl Ncp {
pub fn new(
crdt: Arc<RwLock<Crdt>>,
crdt: &Arc<RwLock<Crdt>>,
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
gossip_listen_socket: UdpSocket,
gossip_send_socket: UdpSocket,
@ -93,7 +93,7 @@ mod tests {
let c = Arc::new(RwLock::new(crdt));
let w = Arc::new(RwLock::new(vec![]));
let d = Ncp::new(
c.clone(),
&c.clone(),
w,
tn.sockets.gossip,
tn.sockets.gossip_send,

View File

@ -4,7 +4,7 @@ use hash::Hash;
use signature::{PublicKey, Signature};
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
#[derive(Serialize, Deserialize, Debug, Clone)]
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
pub enum Request {
GetBalance { key: PublicKey },
GetLastId,

View File

@ -41,7 +41,7 @@ pub struct Rpu {
impl Rpu {
pub fn new(
bank: Arc<Bank>,
bank: &Arc<Bank>,
requests_socket: UdpSocket,
respond_socket: UdpSocket,
exit: Arc<AtomicBool>,

View File

@ -66,6 +66,7 @@ fn batch_size(batches: &[SharedPackets]) -> usize {
.sum()
}
#[cfg_attr(feature = "cargo-clippy", allow(ptr_arg))]
#[cfg(not(feature = "cuda"))]
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
use rayon::prelude::*;

View File

@ -249,40 +249,18 @@ fn repair_window(
Ok(())
}
fn recv_window(
fn retransmit_all_leader_blocks(
maybe_leader: Option<NodeInfo>,
dq: &mut SharedBlobs,
debug_id: u64,
locked_window: &Window,
crdt: &Arc<RwLock<Crdt>>,
recycler: &BlobRecycler,
consumed: &mut u64,
received: &mut u64,
r: &BlobReceiver,
s: &BlobSender,
retransmit: &BlobSender,
) -> Result<()> {
let timer = Duration::from_millis(200);
let mut dq = r.recv_timeout(timer)?;
let maybe_leader: Option<NodeInfo> = crdt.read()
.expect("'crdt' read lock in fn recv_window")
.leader_data()
.cloned();
while let Ok(mut nq) = r.try_recv() {
dq.append(&mut nq)
}
static mut COUNTER_RECV: Counter = create_counter!("streamer-recv_window-recv", LOG_RATE);
inc_counter!(COUNTER_RECV, dq.len());
debug!(
"{:x}: RECV_WINDOW {} {}: got packets {}",
debug_id,
*consumed,
*received,
dq.len(),
);
{
//retransmit all leader blocks
let mut retransmit_queue = VecDeque::new();
if let Some(leader) = maybe_leader {
for b in &dq {
for b in dq {
let p = b.read().expect("'b' read lock in fn recv_window");
//TODO this check isn't safe against adverserial packets
//we need to maintain a sequence window
@ -328,32 +306,19 @@ fn recv_window(
inc_counter!(COUNTER_RETRANSMIT, retransmit_queue.len());
retransmit.send(retransmit_queue)?;
}
}
//send a contiguous set of blocks
let mut consume_queue = VecDeque::new();
while let Some(b) = dq.pop_front() {
let (pix, meta_size) = {
let p = b.write().expect("'b' write lock in fn recv_window");
(p.get_index()?, p.meta.size)
};
if pix > *received {
*received = pix;
}
// Got a blob which has already been consumed, skip it
// probably from a repair window request
if pix < *consumed {
debug!(
"{:x}: received: {} but older than consumed: {} skipping..",
debug_id, pix, *consumed
);
continue;
}
let w = (pix % WINDOW_SIZE) as usize;
//TODO, after the block are authenticated
//if we get different blocks at the same index
//that is a network failure/attack
trace!("window w: {} size: {}", w, meta_size);
{
Ok(())
}
fn process_blob(
b: SharedBlob,
pix: u64,
w: usize,
consume_queue: &mut SharedBlobs,
locked_window: &Window,
debug_id: u64,
recycler: &BlobRecycler,
consumed: &mut u64,
) {
let mut window = locked_window.write().unwrap();
// Search the window for old blobs in the window
@ -410,9 +375,7 @@ fn recv_window(
let coding_end = block_start + erasure::NUM_CODED as u64;
// We've received all this block's data blobs, go and null out the window now
for j in block_start..*consumed {
if let Some(b) =
mem::replace(&mut window[(j % WINDOW_SIZE) as usize], None)
{
if let Some(b) = mem::replace(&mut window[(j % WINDOW_SIZE) as usize], None) {
recycler.recycle(b);
}
}
@ -428,7 +391,83 @@ fn recv_window(
}
}
}
}
fn recv_window(
debug_id: u64,
locked_window: &Window,
crdt: &Arc<RwLock<Crdt>>,
recycler: &BlobRecycler,
consumed: &mut u64,
received: &mut u64,
r: &BlobReceiver,
s: &BlobSender,
retransmit: &BlobSender,
) -> Result<()> {
let timer = Duration::from_millis(200);
let mut dq = r.recv_timeout(timer)?;
let maybe_leader: Option<NodeInfo> = crdt.read()
.expect("'crdt' read lock in fn recv_window")
.leader_data()
.cloned();
while let Ok(mut nq) = r.try_recv() {
dq.append(&mut nq)
}
static mut COUNTER_RECV: Counter = create_counter!("streamer-recv_window-recv", LOG_RATE);
inc_counter!(COUNTER_RECV, dq.len());
debug!(
"{:x}: RECV_WINDOW {} {}: got packets {}",
debug_id,
*consumed,
*received,
dq.len(),
);
retransmit_all_leader_blocks(
maybe_leader,
&mut dq,
debug_id,
recycler,
consumed,
received,
retransmit,
)?;
//send a contiguous set of blocks
let mut consume_queue = VecDeque::new();
while let Some(b) = dq.pop_front() {
let (pix, meta_size) = {
let p = b.write().expect("'b' write lock in fn recv_window");
(p.get_index()?, p.meta.size)
};
if pix > *received {
*received = pix;
}
// Got a blob which has already been consumed, skip it
// probably from a repair window request
if pix < *consumed {
debug!(
"{:x}: received: {} but older than consumed: {} skipping..",
debug_id, pix, *consumed
);
continue;
}
let w = (pix % WINDOW_SIZE) as usize;
//TODO, after the block are authenticated
//if we get different blocks at the same index
//that is a network failure/attack
trace!("window w: {} size: {}", w, meta_size);
process_blob(
b,
pix,
w,
&mut consume_queue,
locked_window,
debug_id,
recycler,
consumed,
);
}
print_window(debug_id, locked_window, *consumed);
trace!("sending consume_queue.len: {}", consume_queue.len());

View File

@ -63,7 +63,7 @@ impl Tpu {
let packet_recycler = PacketRecycler::default();
let (fetch_stage, packet_receiver) =
FetchStage::new(transactions_socket, exit, packet_recycler.clone());
FetchStage::new(transactions_socket, exit, &packet_recycler.clone());
let (sigverify_stage, verified_receiver) = SigVerifyStage::new(packet_receiver);

View File

@ -83,7 +83,7 @@ impl Tvu {
let (fetch_stage, blob_fetch_receiver) = BlobFetchStage::new_multi_socket(
vec![replicate_socket, repair_socket],
exit,
blob_recycler.clone(),
&blob_recycler.clone(),
);
//TODO
//the packets coming out of blob_receiver need to be sent to the GPU and verified
@ -161,7 +161,7 @@ pub mod tests {
) -> Result<(Ncp, Window)> {
let window = streamer::default_window();
let send_sock = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
let ncp = Ncp::new(crdt, window.clone(), listen, send_sock, exit)?;
let ncp = Ncp::new(&crdt, window.clone(), listen, send_sock, exit)?;
Ok((ncp, window))
}
/// Test that message sent from leader to target1 and replicated to target2

View File

@ -21,7 +21,7 @@ fn test_node(exit: Arc<AtomicBool>) -> (Arc<RwLock<Crdt>>, Ncp, UdpSocket) {
let c = Arc::new(RwLock::new(crdt));
let w = Arc::new(RwLock::new(vec![]));
let d = Ncp::new(
c.clone(),
&c.clone(),
w,
tn.sockets.gossip,
tn.sockets.gossip_send,

View File

@ -35,7 +35,7 @@ fn converge(leader: &NodeInfo, num_nodes: usize) -> Vec<NodeInfo> {
let spy_ref = Arc::new(RwLock::new(spy_crdt));
let spy_window = default_window();
let ncp = Ncp::new(
spy_ref.clone(),
&spy_ref.clone(),
spy_window,
spy.sockets.gossip,
spy.sockets.gossip_send,