From 0a83b17cdd458438fc7be1c62534676c104264b7 Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Fri, 7 Dec 2018 20:01:28 -0700 Subject: [PATCH] Upgrade to Rust 1.31.0 (#2052) * Upgrade to Rust 1.31.0 * Upgrade nightly * Fix all clippy warnings * Revert relaxed version check and update --- CONTRIBUTING.md | 2 +- benches/bank.rs | 3 +- benches/banking_stage.rs | 12 ++-- ci/buildkite.yml | 6 +- ci/docker-rust/Dockerfile | 6 +- ci/version-check.sh | 4 +- drone/src/bin/drone.rs | 12 ++-- drone/src/drone.rs | 6 +- metrics/src/metrics.rs | 12 ++-- programs/native/budget/src/budget_program.rs | 3 +- programs/native/erc20/src/token_program.rs | 5 +- programs/native/lua_loader/src/lib.rs | 9 ++- sdk/src/hash.rs | 4 +- sdk/src/system_transaction.rs | 3 +- sdk/src/transaction.rs | 3 +- src/bank.rs | 70 ++++++++++---------- src/banking_stage.rs | 15 +++-- src/bin/bench-streamer.rs | 3 +- src/bin/bench-tps.rs | 32 ++++++--- src/bin/fullnode-config.rs | 12 ++-- src/bin/fullnode.rs | 18 +++-- src/bin/genesis.rs | 12 ++-- src/bin/keygen.rs | 3 +- src/bin/replicator.rs | 9 ++- src/bin/upload-perf.rs | 3 +- src/blob_fetch_stage.rs | 3 +- src/bloom.rs | 4 +- src/broadcast_service.rs | 8 ++- src/chacha_cuda.rs | 26 ++++---- src/cluster_info.rs | 49 +++++++++----- src/compute_leader_finality_service.rs | 15 +++-- src/counter.rs | 3 +- src/crds.rs | 3 +- src/crds_gossip.rs | 30 ++++++--- src/crds_gossip_pull.rs | 9 ++- src/crds_gossip_push.rs | 3 +- src/db_window.rs | 6 +- src/erasure.rs | 51 +++++++------- src/fetch_stage.rs | 3 +- src/fullnode.rs | 14 ++-- src/leader_scheduler.rs | 33 ++++++--- src/ledger.rs | 11 +-- src/ledger_write_stage.rs | 6 +- src/poh_service.rs | 10 +-- src/replicate_stage.rs | 6 +- src/replicator.rs | 3 +- src/result.rs | 3 +- src/retransmit_stage.rs | 6 +- src/rpc.rs | 6 +- src/rpc_pubsub.rs | 7 +- src/rpc_request.rs | 3 +- src/runtime.rs | 9 ++- src/sigverify.rs | 12 ++-- src/sigverify_stage.rs | 6 +- src/storage_stage.rs | 3 +- src/store_ledger_stage.rs | 3 +- src/streamer.rs | 10 +-- src/thin_client.rs | 9 ++- src/tpu.rs | 3 +- src/tpu_forwarder.rs | 10 +-- src/tvu.rs | 2 +- src/wallet.rs | 60 +++++++++++------ src/window.rs | 9 ++- src/window_service.rs | 16 +++-- tests/data_replicator.rs | 3 +- tests/multinode.rs | 30 ++++++--- tests/programs.rs | 3 +- tests/replicator.rs | 3 +- vote-signer/src/bin/main.rs | 3 +- vote-signer/src/rpc.rs | 3 +- 70 files changed, 487 insertions(+), 298 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 83bb01a853..344c0cc399 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -17,7 +17,7 @@ Rust coding conventions * All Rust code is linted with Clippy. If you'd prefer to ignore its advice, do so explicitly: ```rust - #[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))] + #[allow(clippy::too_many_arguments)] ``` Note: Clippy defaults can be overridden in the top-level file `.clippy.toml`. diff --git a/benches/bank.rs b/benches/bank.rs index 6a70d888e9..1583aee6fd 100644 --- a/benches/bank.rs +++ b/benches/bank.rs @@ -40,7 +40,8 @@ fn bench_process_transaction(bencher: &mut Bencher) { // Finally, return the transaction to the benchmark. tx - }).collect(); + }) + .collect(); let mut id = bank.last_id(); diff --git a/benches/banking_stage.rs b/benches/banking_stage.rs index db8b22787f..1699545120 100644 --- a/benches/banking_stage.rs +++ b/benches/banking_stage.rs @@ -68,7 +68,8 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) { new.account_keys[1] = Pubkey::new(&to[0..32]); new.signatures = vec![Signature::new(&sig[0..64])]; new - }).collect(); + }) + .collect(); // fund all the accounts transactions.iter().for_each(|tx| { let fund = Transaction::system_move( @@ -98,7 +99,8 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) { .map(|x| { let len = x.read().unwrap().packets.len(); (x, iter::repeat(1).take(len).collect()) - }).collect(); + }) + .collect(); let (_stage, signal_receiver) = BankingStage::new( &bank, verified_receiver, @@ -170,7 +172,8 @@ fn bench_banking_stage_multi_programs(bencher: &mut Bencher) { assert_eq!(new.instructions.len(), progs); new.signatures = vec![Signature::new(&sig[0..64])]; new - }).collect(); + }) + .collect(); transactions.iter().for_each(|tx| { let fund = Transaction::system_move( &mint.keypair(), @@ -198,7 +201,8 @@ fn bench_banking_stage_multi_programs(bencher: &mut Bencher) { .map(|x| { let len = x.read().unwrap().packets.len(); (x, iter::repeat(1).take(len).collect()) - }).collect(); + }) + .collect(); let (_stage, signal_receiver) = BankingStage::new( &bank, verified_receiver, diff --git a/ci/buildkite.yml b/ci/buildkite.yml index 55bdc2cdb0..d1f1ccacc8 100644 --- a/ci/buildkite.yml +++ b/ci/buildkite.yml @@ -2,7 +2,7 @@ steps: - command: "ci/shellcheck.sh" name: "shellcheck [public]" timeout_in_minutes: 20 - - command: "ci/docker-run.sh solanalabs/rust:1.30.1 ci/test-checks.sh" + - command: "ci/docker-run.sh solanalabs/rust:1.31.0 ci/test-checks.sh" name: "checks [public]" env: CARGO_TARGET_CACHE_NAME: "checks" @@ -20,12 +20,12 @@ steps: env: CARGO_TARGET_CACHE_NAME: "nightly" timeout_in_minutes: 30 - - command: "ci/docker-run.sh solanalabs/rust:1.30.1 ci/test-stable.sh" + - command: "ci/docker-run.sh solanalabs/rust:1.31.0 ci/test-stable.sh" name: "stable [public]" env: CARGO_TARGET_CACHE_NAME: "stable" timeout_in_minutes: 30 - - command: "ci/docker-run.sh solanalabs/rust-nightly:2018-11-12 ci/test-nightly.sh" + - command: "ci/docker-run.sh solanalabs/rust-nightly:2018-12-05 ci/test-nightly.sh" name: "nightly [public]" env: CARGO_TARGET_CACHE_NAME: "nightly" diff --git a/ci/docker-rust/Dockerfile b/ci/docker-rust/Dockerfile index 9e911b179e..a97ef5b84f 100644 --- a/ci/docker-rust/Dockerfile +++ b/ci/docker-rust/Dockerfile @@ -1,6 +1,6 @@ # Note: when the rust version is changed also modify # ci/buildkite.yml to pick up the new image tag -FROM rust:1.30.1 +FROM rust:1.31.0 RUN set -x && \ apt update && \ @@ -20,8 +20,8 @@ RUN set -x && \ rsync \ sudo \ && \ - rustup component add rustfmt-preview && \ - rustup component add clippy-preview && \ + rustup component add rustfmt && \ + rustup component add clippy && \ rm -rf /var/lib/apt/lists/* && \ rustc --version && \ cargo --version diff --git a/ci/version-check.sh b/ci/version-check.sh index 9bcae250cb..9c9ef80ce3 100755 --- a/ci/version-check.sh +++ b/ci/version-check.sh @@ -24,8 +24,8 @@ nightly) require cargo 1.32.[0-9]+-nightly ;; stable) - require rustc 1.3[01].[0-9]+ - require cargo 1.3[01].[0-9]+ + require rustc 1.31.[0-9]+ + require cargo 1.31.[0-9]+ ;; *) echo Error: unknown argument: "$1" diff --git a/drone/src/bin/drone.rs b/drone/src/bin/drone.rs index a61153bfa6..a946a90f9d 100644 --- a/drone/src/bin/drone.rs +++ b/drone/src/bin/drone.rs @@ -40,19 +40,22 @@ fn main() -> Result<(), Box> { .takes_value(true) .required(true) .help("File from which to read the mint's keypair"), - ).arg( + ) + .arg( Arg::with_name("slice") .long("slice") .value_name("SECS") .takes_value(true) .help("Time slice over which to limit requests to drone"), - ).arg( + ) + .arg( Arg::with_name("cap") .long("cap") .value_name("NUM") .takes_value(true) .help("Request limit for time slice"), - ).get_matches(); + ) + .get_matches(); let mint_keypair = read_keypair(matches.value_of("keypair").unwrap()).expect("failed to read client keypair"); @@ -139,7 +142,8 @@ fn main() -> Result<(), Box> { io::ErrorKind::Other, format!("Drone response: {:?}", err), )) - })).then(|_| Ok(())); + })) + .then(|_| Ok(())); tokio::spawn(server) }); tokio::run(done); diff --git a/drone/src/drone.rs b/drone/src/drone.rs index c104ce3645..7968ccdb09 100644 --- a/drone/src/drone.rs +++ b/drone/src/drone.rs @@ -131,7 +131,8 @@ impl Drone { .add_field( "request_current", influxdb::Value::Integer(self.request_current as i64), - ).to_owned(), + ) + .to_owned(), ); info!("Requesting airdrop of {} to {:?}", tokens, to); @@ -283,7 +284,8 @@ pub fn run_local_drone(mint_keypair: Keypair, sender: Sender) { io::ErrorKind::Other, format!("Drone response: {:?}", err), )) - })).then(|_| Ok(())); + })) + .then(|_| Ok(())); tokio::spawn(server) }); tokio::run(done); diff --git a/metrics/src/metrics.rs b/metrics/src/metrics.rs index f8c15d175f..6425da17ac 100644 --- a/metrics/src/metrics.rs +++ b/metrics/src/metrics.rs @@ -236,13 +236,15 @@ pub fn set_panic_hook(program: &'static str) { // TODO: use ono.message() when it becomes stable ono.to_string(), ), - ).add_field( + ) + .add_field( "location", influxdb::Value::String(match ono.location() { Some(location) => location.to_string(), None => "?".to_string(), }), - ).add_field("host_id", influxdb::Value::Integer(*HOST_ID)) + ) + .add_field("host_id", influxdb::Value::Integer(*HOST_ID)) .to_owned(), ); // Flush metrics immediately in case the process exits immediately @@ -359,10 +361,12 @@ mod test { .add_field( "random_bool", influxdb::Value::Boolean(random::() < 128), - ).add_field( + ) + .add_field( "random_int", influxdb::Value::Integer(random::() as i64), - ).to_owned(); + ) + .to_owned(); agent.submit(point); } diff --git a/programs/native/budget/src/budget_program.rs b/programs/native/budget/src/budget_program.rs index eb9b5d5949..a0e78f392b 100644 --- a/programs/native/budget/src/budget_program.rs +++ b/programs/native/budget/src/budget_program.rs @@ -254,7 +254,8 @@ mod test { let index = index as usize; let key = &tx.account_keys[index]; (key, index < tx.signatures.len()) - }).zip(program_accounts.iter_mut()) + }) + .zip(program_accounts.iter_mut()) .map(|((key, is_signer), account)| KeyedAccount::new(key, is_signer, account)) .collect(); diff --git a/programs/native/erc20/src/token_program.rs b/programs/native/erc20/src/token_program.rs index b077199993..1d54dd63aa 100644 --- a/programs/native/erc20/src/token_program.rs +++ b/programs/native/erc20/src/token_program.rs @@ -106,7 +106,7 @@ impl Default for TokenProgram { } impl TokenProgram { - #[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))] + #[allow(clippy::needless_pass_by_value)] fn map_to_invalid_args(err: std::boxed::Box) -> Error { warn!("invalid argument: {:?}", err); Error::InvalidArgument @@ -424,7 +424,8 @@ impl TokenProgram { } else { TokenProgram::Invalid } - }).collect(); + }) + .collect(); for program_account in &input_program_accounts { info!("input_program_account: userdata={:?}", program_account); diff --git a/programs/native/lua_loader/src/lib.rs b/programs/native/lua_loader/src/lib.rs index d034115f59..71653baad2 100644 --- a/programs/native/lua_loader/src/lib.rs +++ b/programs/native/lua_loader/src/lib.rs @@ -169,7 +169,8 @@ mod tests { local tokens, _ = string.unpack("I", data) accounts[1].tokens = accounts[1].tokens - tokens accounts[2].tokens = accounts[2].tokens + tokens - "#.as_bytes() + "# + .as_bytes() .to_vec(); let alice_pubkey = Pubkey::default(); @@ -214,7 +215,8 @@ mod tests { let userdata = r#" local serialize = load(accounts[2].userdata)().serialize accounts[3].userdata = serialize({a=1, b=2, c=3}, nil, "s") - "#.as_bytes() + "# + .as_bytes() .to_vec(); let owner = Pubkey::default(); let program_account = Account { @@ -295,7 +297,8 @@ mod tests { let data = format!( r#"{{m=2, n={{"{}","{}","{}"}}, tokens=100}}"#, carol_pubkey, dan_pubkey, erin_pubkey - ).as_bytes() + ) + .as_bytes() .to_vec(); process(&owner, &mut keyed_accounts, &data, 0).unwrap(); diff --git a/sdk/src/hash.rs b/sdk/src/hash.rs index b0e8386460..4c75538474 100644 --- a/sdk/src/hash.rs +++ b/sdk/src/hash.rs @@ -15,10 +15,10 @@ pub struct Hasher { } impl Hasher { - pub fn hash(&mut self, val: &[u8]) -> () { + pub fn hash(&mut self, val: &[u8]) { self.hasher.input(val); } - pub fn hashv(&mut self, vals: &[&[u8]]) -> () { + pub fn hashv(&mut self, vals: &[&[u8]]) { for val in vals { self.hash(val); } diff --git a/sdk/src/system_transaction.rs b/sdk/src/system_transaction.rs index 7fd00a6dc4..e092c8b915 100644 --- a/sdk/src/system_transaction.rs +++ b/sdk/src/system_transaction.rs @@ -107,7 +107,8 @@ impl SystemTransaction for Transaction { .map(|(i, (_, amount))| { let spend = SystemInstruction::Move { tokens: *amount }; Instruction::new(0, &spend, vec![0, i as u8 + 1]) - }).collect(); + }) + .collect(); let to_keys: Vec<_> = moves.iter().map(|(to_key, _)| *to_key).collect(); Transaction::new_with_instructions( diff --git a/sdk/src/transaction.rs b/sdk/src/transaction.rs index cb4e174ac2..b950ec9abf 100644 --- a/sdk/src/transaction.rs +++ b/sdk/src/transaction.rs @@ -293,7 +293,8 @@ mod tests { 14, 229, 239, 119, 93, 5, 218, 161, 35, 3, 33, 0, 36, 100, 158, 252, 33, 161, 97, 185, 62, 89, 99, 195, 250, 249, 187, 189, 171, 118, 241, 90, 248, 14, 68, 219, 231, 62, 157, 5, 142, 27, 210, 117, - ])).expect("fu"); + ])) + .unwrap(); let to = Pubkey::new(&[ 1, 1, 1, 4, 5, 6, 7, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 7, 6, 5, 4, 1, 1, 1, diff --git a/src/bank.rs b/src/bank.rs index 1da3006aab..8dac7c4f0c 100644 --- a/src/bank.rs +++ b/src/bank.rs @@ -775,7 +775,8 @@ impl Bank { self.load_account(tx, &accounts, &mut last_ids, max_age, error_counters) } (_, Err(e)) => Err(e), - }).collect() + }) + .collect() } fn load_executable_accounts(&self, mut program_id: Pubkey) -> Result> { @@ -815,7 +816,8 @@ impl Bank { .map(|ix| { let program_id = tx.program_ids[ix.program_ids_index as usize]; self.load_executable_accounts(program_id) - }).collect() + }) + .collect() } pub fn store_accounts( @@ -889,7 +891,8 @@ impl Bank { debug!("process transaction failed {:?}", e); None } - }).collect(); + }) + .collect(); // unlock all the accounts with errors which are filtered by the above `filter_map` if !processed_transactions.is_empty() { let hash = Transaction::hash(&processed_transactions); @@ -933,7 +936,8 @@ impl Bank { }, ) } - }).collect(); + }) + .collect(); let execution_elapsed = now.elapsed(); let now = Instant::now(); self.store_accounts(txs, &executed, &loaded_accounts); @@ -1052,7 +1056,8 @@ impl Bank { ); self.unlock_accounts(&e.transactions, &results); Self::first_err(&results) - }).collect(); + }) + .collect(); Self::first_err(&results) } @@ -1169,14 +1174,16 @@ impl Bank { Some(tokens) } else { None - }.expect("invalid ledger, needs to start with mint deposit"); + } + .expect("invalid ledger, needs to start with mint deposit"); instruction = deserialize(tx.userdata(1)).unwrap(); let leader_payment = if let SystemInstruction::Move { tokens } = instruction { Some(tokens) } else { None - }.expect("invalid ledger, bootstrap leader payment expected"); + } + .expect("invalid ledger, bootstrap leader payment expected"); assert!(leader_payment <= mint_deposit); assert!(leader_payment > 0); @@ -1737,7 +1744,8 @@ mod tests { let last_id = hash(&serialize(&i).unwrap()); // Unique hash bank.register_tick(&last_id); last_id - }).collect(); + }) + .collect(); assert_eq!(bank.count_valid_ids(&[]).len(), 0); assert_eq!(bank.count_valid_ids(&[mint.last_id()]).len(), 0); for (i, id) in bank.count_valid_ids(&ids).iter().enumerate() { @@ -1979,12 +1987,11 @@ mod tests { let sink = subscriber.assign_id(sub_id.clone()).unwrap(); bank.add_account_subscription(bank_sub_id, alice.pubkey(), sink); - assert!( - bank.account_subscriptions - .write() - .unwrap() - .contains_key(&alice.pubkey()) - ); + assert!(bank + .account_subscriptions + .write() + .unwrap() + .contains_key(&alice.pubkey())); let account = bank.get_account(&alice.pubkey()).unwrap(); bank.check_account_subscriptions(&alice.pubkey(), &account); @@ -1996,13 +2003,11 @@ mod tests { } bank.remove_account_subscription(&bank_sub_id, &alice.pubkey()); - assert!( - !bank - .account_subscriptions - .write() - .unwrap() - .contains_key(&alice.pubkey()) - ); + assert!(!bank + .account_subscriptions + .write() + .unwrap() + .contains_key(&alice.pubkey())); } #[test] fn test_bank_signature_subscribe() { @@ -2021,12 +2026,11 @@ mod tests { let sink = subscriber.assign_id(sub_id.clone()).unwrap(); bank.add_signature_subscription(bank_sub_id, signature, sink); - assert!( - bank.signature_subscriptions - .write() - .unwrap() - .contains_key(&signature) - ); + assert!(bank + .signature_subscriptions + .write() + .unwrap() + .contains_key(&signature)); bank.check_signature_subscriptions(&signature, RpcSignatureStatus::Confirmed); let string = transport_receiver.poll(); @@ -2037,13 +2041,11 @@ mod tests { } bank.remove_signature_subscription(&bank_sub_id, &signature); - assert!( - !bank - .signature_subscriptions - .write() - .unwrap() - .contains_key(&signature) - ); + assert!(!bank + .signature_subscriptions + .write() + .unwrap() + .contains_key(&signature)); } #[test] fn test_first_err() { diff --git a/src/banking_stage.rs b/src/banking_stage.rs index 88ff342395..94c91f9ff0 100644 --- a/src/banking_stage.rs +++ b/src/banking_stage.rs @@ -44,6 +44,7 @@ pub struct BankingStage { impl BankingStage { /// Create the stage using `bank`. Exit when `verified_receiver` is dropped. + #[allow(clippy::new_ret_no_self)] pub fn new( bank: &Arc, verified_receiver: Receiver, @@ -105,11 +106,13 @@ impl BankingStage { }; thread_banking_exit.store(true, Ordering::Relaxed); return_result - }).unwrap() - }).collect(); + }) + .unwrap() + }) + .collect(); ( - BankingStage { + Self { bank_thread_hdls, poh_service, compute_finality_service, @@ -127,7 +130,8 @@ impl BankingStage { deserialize(&x.data[0..x.meta.size]) .map(|req| (req, x.meta.addr())) .ok() - }).collect() + }) + .collect() } fn process_transactions( @@ -190,7 +194,8 @@ impl BankingStage { None } } - }).collect(); + }) + .collect(); debug!("verified transactions {}", transactions.len()); Self::process_transactions(bank, &transactions, poh)?; new_tx_count += transactions.len(); diff --git a/src/bin/bench-streamer.rs b/src/bin/bench-streamer.rs index 149cc97d50..e1d127dec1 100644 --- a/src/bin/bench-streamer.rs +++ b/src/bin/bench-streamer.rs @@ -62,7 +62,8 @@ fn main() -> Result<()> { .value_name("NUM") .takes_value(true) .help("Use NUM receive sockets"), - ).get_matches(); + ) + .get_matches(); if let Some(n) = matches.value_of("num-recv-sockets") { num_sockets = max(num_sockets, n.to_string().parse().expect("integer")); diff --git a/src/bin/bench-tps.rs b/src/bin/bench-tps.rs index 62cc3bd971..c8fcb83358 100644 --- a/src/bin/bench-tps.rs +++ b/src/bin/bench-tps.rs @@ -136,7 +136,8 @@ fn send_barrier_transaction(barrier_client: &mut ThinClient, last_id: &mut Hash, .add_tag( "op", influxdb::Value::String("send_barrier_transaction".to_string()), - ).add_field("poll_count", influxdb::Value::Integer(poll_count)) + ) + .add_field("poll_count", influxdb::Value::Integer(poll_count)) .add_field("duration", influxdb::Value::Integer(duration_ms as i64)) .to_owned(), ); @@ -147,7 +148,8 @@ fn send_barrier_transaction(barrier_client: &mut ThinClient, last_id: &mut Hash, &id.pubkey(), &Duration::from_millis(100), &Duration::from_secs(10), - ).expect("Failed to get balance"); + ) + .expect("Failed to get balance"); if balance != 1 { panic!("Expected an account balance of 1 (balance: {}", balance); } @@ -201,7 +203,8 @@ fn generate_txs( Transaction::system_new(id, keypair.pubkey(), 1, last_id), timestamp(), ) - }).collect(); + }) + .collect(); let duration = signing_start.elapsed(); let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos()); @@ -220,7 +223,8 @@ fn generate_txs( .add_field( "duration", influxdb::Value::Integer(duration_as_ms(&duration) as i64), - ).to_owned(), + ) + .to_owned(), ); let sz = transactions.len() / threads; @@ -276,7 +280,8 @@ fn do_tx_transfers( .add_field( "duration", influxdb::Value::Integer(duration_as_ms(&transfer_start.elapsed()) as i64), - ).add_field("count", influxdb::Value::Integer(tx_len as i64)) + ) + .add_field("count", influxdb::Value::Integer(tx_len as i64)) .to_owned(), ); } @@ -339,7 +344,7 @@ fn fund_keys(client: &mut ThinClient, source: &Keypair, dests: &[Keypair], token let mut tries = 0; // this set of transactions just initializes us for bookkeeping - #[cfg_attr(feature = "cargo-clippy", allow(clone_double_ref))] // sigh + #[allow(clippy::clone_double_ref)] // sigh let mut to_fund_txs: Vec<_> = chunk .par_iter() .map(|(k, m)| { @@ -347,7 +352,8 @@ fn fund_keys(client: &mut ThinClient, source: &Keypair, dests: &[Keypair], token k.clone(), Transaction::system_move_many(k, &m, Default::default(), 0), ) - }).collect(); + }) + .collect(); let amount = chunk[0].1[0].1; @@ -731,8 +737,10 @@ fn main() { .name("solana-client-sample".to_string()) .spawn(move || { sample_tx_count(&exit_signal, &maxes, first_tx_count, &v, sample_period); - }).unwrap() - }).collect(); + }) + .unwrap() + }) + .collect(); let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new())); @@ -756,8 +764,10 @@ fn main() { &shared_tx_active_thread_count, &total_tx_sent_count, ); - }).unwrap() - }).collect(); + }) + .unwrap() + }) + .collect(); // generate and send transactions for the specified duration let start = Instant::now(); diff --git a/src/bin/fullnode-config.rs b/src/bin/fullnode-config.rs index a1d5aa9bf2..3433094b0f 100644 --- a/src/bin/fullnode-config.rs +++ b/src/bin/fullnode-config.rs @@ -27,27 +27,31 @@ fn main() { .long("local") .takes_value(false) .help("Detect network address from local machine configuration"), - ).arg( + ) + .arg( Arg::with_name("keypair") .short("k") .long("keypair") .value_name("PATH") .takes_value(true) .help("/path/to/id.json"), - ).arg( + ) + .arg( Arg::with_name("public") .short("p") .long("public") .takes_value(false) .help("Detect public network address using public servers"), - ).arg( + ) + .arg( Arg::with_name("bind") .short("b") .long("bind") .value_name("PORT") .takes_value(true) .help("Bind to port or address"), - ).get_matches(); + ) + .get_matches(); let bind_addr: SocketAddr = { let mut bind_addr = parse_port_or_addr(matches.value_of("bind"), FULLNODE_PORT_RANGE.0); diff --git a/src/bin/fullnode.rs b/src/bin/fullnode.rs index f586ece803..00d440dd48 100644 --- a/src/bin/fullnode.rs +++ b/src/bin/fullnode.rs @@ -37,25 +37,29 @@ fn main() { .short("v") .long("nosigverify") .help("Run without signature verification"), - ).arg( + ) + .arg( Arg::with_name("no-leader-rotation") .long("no-leader-rotation") .help("Disable leader rotation"), - ).arg( + ) + .arg( Arg::with_name("identity") .short("i") .long("identity") .value_name("PATH") .takes_value(true) .help("Run with the identity found in FILE"), - ).arg( + ) + .arg( Arg::with_name("network") .short("n") .long("network") .value_name("HOST:PORT") .takes_value(true) .help("Rendezvous with the network at this gossip entry point"), - ).arg( + ) + .arg( Arg::with_name("ledger") .short("l") .long("ledger") @@ -63,13 +67,15 @@ fn main() { .takes_value(true) .required(true) .help("Use DIR as persistent ledger location"), - ).arg( + ) + .arg( Arg::with_name("rpc") .long("rpc") .value_name("PORT") .takes_value(true) .help("Custom RPC port for this node"), - ).get_matches(); + ) + .get_matches(); let nosigverify = matches.is_present("nosigverify"); let use_only_bootstrap_leader = matches.is_present("no-leader-rotation"); diff --git a/src/bin/genesis.rs b/src/bin/genesis.rs index c58b199438..8da12c3de0 100644 --- a/src/bin/genesis.rs +++ b/src/bin/genesis.rs @@ -34,7 +34,8 @@ fn main() -> Result<(), Box> { .takes_value(true) .required(true) .help("Number of tokens to create in the mint"), - ).arg( + ) + .arg( Arg::with_name("mint") .short("m") .long("mint") @@ -42,7 +43,8 @@ fn main() -> Result<(), Box> { .takes_value(true) .required(true) .help("Path to file containing keys of the mint"), - ).arg( + ) + .arg( Arg::with_name("bootstrap-leader-keypair") .short("b") .long("bootstrap-leader-keypair") @@ -50,7 +52,8 @@ fn main() -> Result<(), Box> { .takes_value(true) .required(true) .help("Path to file containing the bootstrap leader's keypair"), - ).arg( + ) + .arg( Arg::with_name("ledger") .short("l") .long("ledger") @@ -58,7 +61,8 @@ fn main() -> Result<(), Box> { .takes_value(true) .required(true) .help("Use directory as persistent ledger location"), - ).get_matches(); + ) + .get_matches(); // Load the bootstreap leader keypair // TODO: Only the public key is really needed, genesis should not have access to the leader's diff --git a/src/bin/keygen.rs b/src/bin/keygen.rs index 10a49ca48b..a914a6a38e 100644 --- a/src/bin/keygen.rs +++ b/src/bin/keygen.rs @@ -19,7 +19,8 @@ fn main() -> Result<(), Box> { .value_name("PATH") .takes_value(true) .help("Path to generated file"), - ).get_matches(); + ) + .get_matches(); let mut path = dirs::home_dir().expect("home directory"); let outfile = if matches.is_present("outfile") { diff --git a/src/bin/replicator.rs b/src/bin/replicator.rs index 6527cf6fa8..fd28ad9366 100644 --- a/src/bin/replicator.rs +++ b/src/bin/replicator.rs @@ -29,14 +29,16 @@ fn main() { .value_name("PATH") .takes_value(true) .help("Run with the identity found in FILE"), - ).arg( + ) + .arg( Arg::with_name("network") .short("n") .long("network") .value_name("HOST:PORT") .takes_value(true) .help("Rendezvous with the network at this gossip entry point"), - ).arg( + ) + .arg( Arg::with_name("ledger") .short("l") .long("ledger") @@ -44,7 +46,8 @@ fn main() { .takes_value(true) .required(true) .help("use DIR as persistent ledger location"), - ).get_matches(); + ) + .get_matches(); let ledger_path = matches.value_of("ledger"); diff --git a/src/bin/upload-perf.rs b/src/bin/upload-perf.rs index d925fdef3c..7548a9c611 100644 --- a/src/bin/upload-perf.rs +++ b/src/bin/upload-perf.rs @@ -78,7 +78,8 @@ fn main() { .add_field( "commit", influxdb::Value::String(git_commit_hash.trim().to_string()), - ).to_owned(), + ) + .to_owned(), ); } let last_median = get_last_metrics(&"median".to_string(), &db, &name, &branch) diff --git a/src/blob_fetch_stage.rs b/src/blob_fetch_stage.rs index 54badfa503..c60f8a11d9 100644 --- a/src/blob_fetch_stage.rs +++ b/src/blob_fetch_stage.rs @@ -14,6 +14,7 @@ pub struct BlobFetchStage { } impl BlobFetchStage { + #[allow(clippy::new_ret_no_self)] pub fn new(socket: Arc, exit: Arc) -> (Self, BlobReceiver) { Self::new_multi_socket(vec![socket], exit) } @@ -27,7 +28,7 @@ impl BlobFetchStage { .map(|socket| streamer::blob_receiver(socket, exit.clone(), sender.clone())) .collect(); - (BlobFetchStage { exit, thread_hdls }, receiver) + (Self { exit, thread_hdls }, receiver) } pub fn close(&self) { diff --git a/src/bloom.rs b/src/bloom.rs index 2f74a2f3e0..fed8a16548 100644 --- a/src/bloom.rs +++ b/src/bloom.rs @@ -24,8 +24,8 @@ impl Bloom { /// https://hur.st/bloomfilter/ pub fn random(num: usize, false_rate: f64, max_bits: usize) -> Self { let min_num_bits = ((num as f64 * false_rate.log(2f64)) - / (1f64 / 2f64.powf(2f64.log(2f64))).log(2f64)).ceil() - as usize; + / (1f64 / 2f64.powf(2f64.log(2f64))).log(2f64)) + .ceil() as usize; let num_bits = cmp::max(1, cmp::min(min_num_bits, max_bits)); let num_keys = ((num_bits as f64 / num as f64) * 2f64.log(2f64)).round() as usize; let keys: Vec = (0..num_keys).map(|_| rand::thread_rng().gen()).collect(); diff --git a/src/broadcast_service.rs b/src/broadcast_service.rs index ab17c92d2f..b01a81c67a 100644 --- a/src/broadcast_service.rs +++ b/src/broadcast_service.rs @@ -29,7 +29,7 @@ pub enum BroadcastServiceReturnType { ChannelDisconnected, } -#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))] +#[allow(clippy::too_many_arguments)] fn broadcast( max_tick_height: Option, tick_height: &mut u64, @@ -167,7 +167,8 @@ fn broadcast( .add_field( "transmit-index", influxdb::Value::Integer(transmit_index.data as i64), - ).to_owned(), + ) + .to_owned(), ); Ok(()) @@ -284,7 +285,8 @@ impl BroadcastService { max_tick_height, tick_height, ) - }).unwrap(); + }) + .unwrap(); Self { thread_hdl } } diff --git a/src/chacha_cuda.rs b/src/chacha_cuda.rs index b9dfb31042..bf4e078aa9 100644 --- a/src/chacha_cuda.rs +++ b/src/chacha_cuda.rs @@ -131,13 +131,12 @@ mod tests { ); let mut cpu_iv = ivecs.clone(); - assert!( - chacha_cbc_encrypt_file( - &Path::new(&ledger_path).join(LEDGER_DATA_FILE), - out_path, - &mut cpu_iv, - ).is_ok() - ); + assert!(chacha_cbc_encrypt_file( + &Path::new(&ledger_path).join(LEDGER_DATA_FILE), + out_path, + &mut cpu_iv, + ) + .is_ok()); let ref_hash = sample_file(&out_path, &samples).unwrap(); @@ -175,13 +174,12 @@ mod tests { ); ivec[0] = i; ivecs.extend(ivec.clone().iter()); - assert!( - chacha_cbc_encrypt_file( - &Path::new(&ledger_path).join(LEDGER_DATA_FILE), - out_path, - &mut ivec, - ).is_ok() - ); + assert!(chacha_cbc_encrypt_file( + &Path::new(&ledger_path).join(LEDGER_DATA_FILE), + out_path, + &mut ivec, + ) + .is_ok()); ref_hashes.push(sample_file(&out_path, &samples).unwrap()); info!( diff --git a/src/cluster_info.rs b/src/cluster_info.rs index 30fab4d2ff..faa2f73a14 100644 --- a/src/cluster_info.rs +++ b/src/cluster_info.rs @@ -112,7 +112,7 @@ impl Signable for PruneData { // TODO These messages should go through the gpu pipeline for spam filtering #[derive(Serialize, Deserialize, Debug)] -#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))] +#[allow(clippy::large_enum_variant)] enum Protocol { /// Gossip protocol messages PullRequest(Bloom, CrdsValue), @@ -204,7 +204,8 @@ impl ClusterInfo { node.tpu.to_string(), node.rpc.to_string() ) - }).collect(); + }) + .collect(); format!( " NodeInfo.contact_info | Node identifier\n\ @@ -216,7 +217,7 @@ impl ClusterInfo { ) } - pub fn set_leader(&mut self, key: Pubkey) -> () { + pub fn set_leader(&mut self, key: Pubkey) { let prev = self.leader_id(); let self_id = self.gossip.id; let now = timestamp(); @@ -373,7 +374,8 @@ impl ClusterInfo { //TODO profile this, may need multiple sockets for par_iter assert!(rblob.meta.size <= BLOB_SIZE); s.send_to(&rblob.data[..rblob.meta.size], &v.tvu) - }).collect(); + }) + .collect(); for e in errs { if let Err(e) = &e { inc_new_counter_info!("cluster_info-retransmit-send_to_error", 1, 1); @@ -429,9 +431,11 @@ impl ClusterInfo { ids_and_tvus ); e - }).collect(); + }) + .collect(); send_errs_for_blob - }).collect() + }) + .collect() } fn create_broadcast_orders<'a>( @@ -531,12 +535,14 @@ impl ClusterInfo { .lookup(&peer_label) .and_then(|v| v.contact_info()) .map(|peer_info| (peer, filter, peer_info.gossip, self_info)) - }).collect(); + }) + .collect(); pr.into_iter() .map(|(peer, filter, gossip, self_info)| { self.gossip.mark_pull_request_creation_time(peer, now); (gossip, Protocol::PullRequest(filter, self_info)) - }).collect() + }) + .collect() } fn new_push_requests(&mut self) -> Vec<(SocketAddr, Protocol)> { let self_id = self.gossip.id; @@ -550,7 +556,8 @@ impl ClusterInfo { .lookup(&peer_label) .and_then(|v| v.contact_info()) .map(|p| p.gossip) - }).map(|peer| (peer, Protocol::PushMessage(self_id, msgs.clone()))) + }) + .map(|peer| (peer, Protocol::PushMessage(self_id, msgs.clone()))) .collect() } @@ -597,7 +604,8 @@ impl ClusterInfo { .and_then(|x| { let leader_label = CrdsValueLabel::ContactInfo(x); self.gossip.crds.lookup(&leader_label) - }).and_then(|x| x.contact_info()) + }) + .and_then(|x| x.contact_info()) } /// randomly pick a node and ask them for updates asynchronously @@ -629,7 +637,8 @@ impl ClusterInfo { sleep(Duration::from_millis(time_left)); } } - }).unwrap() + }) + .unwrap() } fn run_window_request( from: &NodeInfo, @@ -722,7 +731,8 @@ impl ClusterInfo { .into_iter() .flat_map(|request| { ClusterInfo::handle_protocol(obj, &blob.meta.addr(), request, window, ledger_window) - }).collect() + }) + .collect() } fn handle_pull_request( me: &Arc>, @@ -811,7 +821,8 @@ impl ClusterInfo { prune_msg.sign(&me.read().unwrap().keypair); let rsp = Protocol::PruneMessage(self_id, prune_msg); to_blob(rsp, ci.gossip).ok() - }).into_iter() + }) + .into_iter() .collect(); let mut blobs: Vec<_> = pushes .into_iter() @@ -977,7 +988,8 @@ impl ClusterInfo { me.gossip.crds.table.len() ); } - }).unwrap() + }) + .unwrap() } pub fn spy_node() -> (NodeInfo, UdpSocket) { @@ -1282,8 +1294,10 @@ mod tests { &vec![ Entry::new_tick(&zero, 0, &zero), Entry::new_tick(&one, 0, &one), - ].to_vec(), - ).unwrap(); + ] + .to_vec(), + ) + .unwrap(); path } @@ -1340,7 +1354,8 @@ mod tests { &me, leader_id, 0, - )[0].clone(); + )[0] + .clone(); let blob = shared_blob.read().unwrap(); // Test we copied the blob assert_eq!(blob.meta.size, blob_size); diff --git a/src/compute_leader_finality_service.rs b/src/compute_leader_finality_service.rs index d4047b4f48..172eea7ec4 100644 --- a/src/compute_leader_finality_service.rs +++ b/src/compute_leader_finality_service.rs @@ -60,7 +60,8 @@ impl ComputeLeaderFinalityService { } None - }).collect() + }) + .collect() }; let super_majority_stake = (2 * total_stake) / 3; @@ -77,7 +78,8 @@ impl ComputeLeaderFinalityService { .add_field( "duration_ms", influxdb::Value::Integer((now - last_valid_validator_timestamp) as i64), - ).to_owned(), + ) + .to_owned(), ); } @@ -115,7 +117,8 @@ impl ComputeLeaderFinalityService { Self::compute_finality(&bank, &mut last_valid_validator_timestamp); sleep(Duration::from_millis(COMPUTE_FINALITY_MS)); } - }).unwrap(); + }) + .unwrap(); (ComputeLeaderFinalityService { compute_finality_thread, @@ -162,7 +165,8 @@ pub mod tests { // sleep to get a different timestamp in the bank sleep(Duration::from_millis(1)); last_id - }).collect(); + }) + .collect(); // Create a total of 10 vote accounts, each will have a balance of 1 (after giving 1 to // their vote account), for a total staking pool of 10 tokens. @@ -186,7 +190,8 @@ pub mod tests { bank.process_transaction(&vote_tx).unwrap(); } vote_account - }).collect(); + }) + .collect(); // There isn't 2/3 consensus, so the bank's finality value should be the default let mut last_finality_time = 0; diff --git a/src/counter.rs b/src/counter.rs index fa6145ff00..000b6b62d9 100644 --- a/src/counter.rs +++ b/src/counter.rs @@ -88,7 +88,8 @@ impl Counter { .add_field( "count", influxdb::Value::Integer(counts as i64 - lastlog as i64), - ).to_owned(), + ) + .to_owned(), ); } } diff --git a/src/crds.rs b/src/crds.rs index f61920c6d7..9fe00ca523 100644 --- a/src/crds.rs +++ b/src/crds.rs @@ -150,7 +150,8 @@ impl Crds { } else { None } - }).cloned() + }) + .cloned() .collect() } diff --git a/src/crds_gossip.rs b/src/crds_gossip.rs index 40a69700a2..222e4d48fc 100644 --- a/src/crds_gossip.rs +++ b/src/crds_gossip.rs @@ -44,7 +44,8 @@ impl CrdsGossip { .map(|val| { self.push .process_push_message(&mut self.crds, val.clone(), now) - }).collect(); + }) + .collect(); results .into_iter() .zip(values) @@ -58,7 +59,8 @@ impl CrdsGossip { } else { None } - }).collect() + }) + .collect() } pub fn new_push_messages(&mut self, now: u64) -> (Pubkey, Vec, Vec) { @@ -182,7 +184,8 @@ mod test { node.crds.insert(entry.clone(), 0).unwrap(); node.set_self(id); (new.label().pubkey(), Arc::new(Mutex::new(node))) - }).collect(); + }) + .collect(); let mut node = CrdsGossip::default(); let id = entry.label().pubkey(); node.crds.insert(entry.clone(), 0).unwrap(); @@ -207,7 +210,8 @@ mod test { origin.crds.insert(new.clone(), 0).unwrap(); node.set_self(id); (new.label().pubkey(), Arc::new(Mutex::new(node))) - }).collect(); + }) + .collect(); network.insert(id, Arc::new(Mutex::new(origin))); network } @@ -222,7 +226,8 @@ mod test { node.crds.insert(new.clone(), 0).unwrap(); node.set_self(id); (new.label().pubkey(), Arc::new(Mutex::new(node))) - }).collect(); + }) + .collect(); let keys: Vec = network.keys().cloned().collect(); for k in 0..keys.len() { let start_info = { @@ -320,7 +325,8 @@ mod test { .map(|node| { node.lock().unwrap().purge(now); node.lock().unwrap().new_push_messages(now) - }).collect(); + }) + .collect(); let transfered: Vec<_> = requests .par_iter() .map(|(from, peers, msgs)| { @@ -345,11 +351,13 @@ mod test { let now = timestamp(); node.process_prune_msg(*to, destination, &rsps, now, now) .unwrap() - }).unwrap(); + }) + .unwrap(); delivered += rsps.is_empty() as usize; } (bytes, delivered, num_msgs, prunes) - }).collect(); + }) + .collect(); for (b, d, m, p) in transfered { bytes += b; delivered += d; @@ -415,7 +423,8 @@ mod test { node.lock() .unwrap() .process_pull_request(caller_info, request, now) - }).unwrap(); + }) + .unwrap(); bytes += serialized_size(&rsp).unwrap() as usize; msgs += rsp.len(); network.get(&from).map(|node| { @@ -425,7 +434,8 @@ mod test { overhead += node.lock().unwrap().process_pull_response(from, rsp, now); }); (bytes, msgs, overhead) - }).collect(); + }) + .collect(); for (b, m, o) in transfered { bytes += b; msgs += m; diff --git a/src/crds_gossip_pull.rs b/src/crds_gossip_pull.rs index 31debf5d0d..a6133ecbb2 100644 --- a/src/crds_gossip_pull.rs +++ b/src/crds_gossip_pull.rs @@ -60,14 +60,16 @@ impl CrdsGossipPull { .filter_map(|v| v.value.contact_info()) .filter(|v| { v.id != self_id && !v.gossip.ip().is_unspecified() && !v.gossip.ip().is_multicast() - }).map(|item| { + }) + .map(|item| { let req_time: u64 = *self.pull_request_time.get(&item.id).unwrap_or(&0); let weight = cmp::max( 1, cmp::min(u64::from(u16::max_value()) - 1, (now - req_time) / 1024) as u32, ); (weight, item) - }).collect(); + }) + .collect(); if options.is_empty() { return Err(CrdsGossipError::NoPeers); } @@ -179,7 +181,8 @@ impl CrdsGossipPull { .map(|val| (val.value_hash, val.local_timestamp)); crds.remove(label); rv - }).collect(); + }) + .collect(); self.purged_values.append(&mut purged); } /// Purge values from the `self.purged_values` queue that are older then purge_timeout diff --git a/src/crds_gossip_push.rs b/src/crds_gossip_push.rs index dfc9c565eb..1f6af42b59 100644 --- a/src/crds_gossip_push.rs +++ b/src/crds_gossip_push.rs @@ -221,7 +221,8 @@ impl CrdsGossipPush { } else { Some(k) } - }).cloned() + }) + .cloned() .collect(); for k in old_msgs { self.push_messages.remove(&k); diff --git a/src/db_window.rs b/src/db_window.rs index c19343d249..90057b3766 100644 --- a/src/db_window.rs +++ b/src/db_window.rs @@ -238,7 +238,8 @@ pub fn retransmit_all_leader_blocks( .add_field( "count", influxdb::Value::Integer(retransmit_queue.len() as i64), - ).to_owned(), + ) + .to_owned(), ); if !retransmit_queue.is_empty() { @@ -641,7 +642,8 @@ mod test { let begin = k * gap + 1; let end = (k + 1) * gap; (begin..end) - }).collect(); + }) + .collect(); assert_eq!( find_missing_data_indexes( slot, diff --git a/src/erasure.rs b/src/erasure.rs index 79af21a946..852dfa827f 100644 --- a/src/erasure.rs +++ b/src/erasure.rs @@ -324,7 +324,8 @@ pub fn generate_coding( .map(|(i, l)| { trace!("{} i: {} data: {}", id, i, l.data[0]); &l.data[..max_data_size] - }).collect(); + }) + .collect(); let mut coding_locks: Vec<_> = coding_blobs.iter().map(|b| b.write().unwrap()).collect(); @@ -334,7 +335,8 @@ pub fn generate_coding( .map(|(i, l)| { trace!("{} i: {} coding: {}", id, i, l.data[0],); &mut l.data_mut()[..max_data_size] - }).collect(); + }) + .collect(); generate_coding_blocks(coding_ptrs.as_mut_slice(), &data_ptrs)?; debug!( @@ -575,10 +577,11 @@ pub mod test { coding_blocks.iter_mut().map(|x| x.as_mut_slice()).collect(); let v_slices: Vec<_> = vs.iter().map(|x| x.as_slice()).collect(); - assert!( - generate_coding_blocks(coding_blocks_slices.as_mut_slice(), v_slices.as_slice(),) - .is_ok() - ); + assert!(generate_coding_blocks( + coding_blocks_slices.as_mut_slice(), + v_slices.as_slice(), + ) + .is_ok()); } trace!("coding blocks:"); for b in &coding_blocks { @@ -594,13 +597,12 @@ pub mod test { coding_blocks.iter_mut().map(|x| x.as_mut_slice()).collect(); let mut v_slices: Vec<_> = vs.iter_mut().map(|x| x.as_mut_slice()).collect(); - assert!( - decode_blocks( - v_slices.as_mut_slice(), - coding_blocks_slices.as_mut_slice(), - erasures.as_slice(), - ).is_ok() - ); + assert!(decode_blocks( + v_slices.as_mut_slice(), + coding_blocks_slices.as_mut_slice(), + erasures.as_slice(), + ) + .is_ok()); } trace!("vs:"); @@ -635,7 +637,8 @@ pub mod test { slot_height, data_l.index().unwrap(), &data_l.data[..data_l.data_size().unwrap() as usize], - ).expect("Expected successful put into data column of ledger"); + ) + .expect("Expected successful put into data column of ledger"); } else { db_ledger .write_shared_blobs(slot_height, vec![data].into_iter()) @@ -661,7 +664,8 @@ pub mod test { slot_height, index, &coding_lock.data[..data_size as usize + BLOB_HEADER_SIZE], - ).unwrap(); + ) + .unwrap(); } } @@ -692,15 +696,14 @@ pub mod test { // Generate the coding blocks let mut index = (NUM_DATA + 2) as u64; - assert!( - generate_coding( - &Pubkey::default(), - &mut window, - offset as u64, - num_blobs, - &mut index - ).is_ok() - ); + assert!(generate_coding( + &Pubkey::default(), + &mut window, + offset as u64, + num_blobs, + &mut index + ) + .is_ok()); assert_eq!(index, (NUM_DATA - NUM_CODING) as u64); // put junk in the tails, simulates re-used blobs diff --git a/src/fetch_stage.rs b/src/fetch_stage.rs index 34d6dc3f07..422624403c 100644 --- a/src/fetch_stage.rs +++ b/src/fetch_stage.rs @@ -14,6 +14,7 @@ pub struct FetchStage { } impl FetchStage { + #[allow(clippy::new_ret_no_self)] pub fn new(sockets: Vec, exit: Arc) -> (Self, PacketReceiver) { let tx_sockets = sockets.into_iter().map(Arc::new).collect(); Self::new_multi_socket(tx_sockets, exit) @@ -28,7 +29,7 @@ impl FetchStage { .map(|socket| streamer::receiver(socket, exit.clone(), sender.clone(), "fetch-stage")) .collect(); - (FetchStage { exit, thread_hdls }, receiver) + (Self { exit, thread_hdls }, receiver) } pub fn close(&self) { diff --git a/src/fullnode.rs b/src/fullnode.rs index 03c50bcf4d..904aea1c5f 100644 --- a/src/fullnode.rs +++ b/src/fullnode.rs @@ -51,7 +51,7 @@ impl LeaderServices { self.tpu.is_exited() } - pub fn exit(&self) -> () { + pub fn exit(&self) { self.tpu.exit(); } } @@ -63,7 +63,7 @@ pub struct ValidatorServices { impl ValidatorServices { fn new(tvu: Tvu, tpu_forwarder: TpuForwarder) -> Self { - ValidatorServices { tvu, tpu_forwarder } + Self { tvu, tpu_forwarder } } pub fn join(self) -> Result> { @@ -76,7 +76,7 @@ impl ValidatorServices { self.tvu.is_exited() } - pub fn exit(&self) -> () { + pub fn exit(&self) { self.tvu.exit() } } @@ -201,7 +201,7 @@ impl Fullnode { } /// Create a fullnode instance acting as a leader or validator. - #[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))] + #[allow(clippy::too_many_arguments)] pub fn new_with_bank( keypair: Arc, vote_account_keypair: Arc, @@ -733,7 +733,8 @@ mod tests { false, None, ) - }).collect(); + }) + .collect(); //each validator can exit in parallel to speed many sequential calls to `join` vals.iter().for_each(|v| v.exit()); @@ -1034,7 +1035,8 @@ mod tests { ledger_initial_len, last_id, &tvu_address, - ).into_iter() + ) + .into_iter() .rev() .collect(); s_responder.send(msgs).expect("send"); diff --git a/src/leader_scheduler.rs b/src/leader_scheduler.rs index 4bedb277d9..f0629b3e06 100644 --- a/src/leader_scheduler.rs +++ b/src/leader_scheduler.rs @@ -328,12 +328,14 @@ impl LeaderScheduler { .filter(|vote| { vote.tick_height > lower_bound && vote.tick_height <= upper_bound - }).map(|_| vote_state.node_id); + }) + .map(|_| vote_state.node_id); } } None - }).collect() + }) + .collect() } } @@ -415,7 +417,8 @@ impl LeaderScheduler { } else { None } - }).collect(); + }) + .collect(); active_accounts.sort_by( |(pk1, t1), (pk2, t2)| { @@ -583,7 +586,8 @@ mod tests { &mint.keypair(), new_pubkey, last_id, - ).unwrap(); + ) + .unwrap(); // Create a vote account let new_vote_account = create_vote_account( @@ -591,7 +595,8 @@ mod tests { &bank, num_vote_account_tokens as u64, mint.last_id(), - ).unwrap(); + ) + .unwrap(); // Vote to make the validator part of the active set for the entire test // (we made the active_window_length large enough at the beginning of the test) push_vote(&new_vote_account, &bank, 1, mint.last_id()); @@ -780,7 +785,8 @@ mod tests { &mint.keypair(), new_pubkey, last_id, - ).unwrap(); + ) + .unwrap(); } let validators_pk: Vec = validators.iter().map(Keypair::pubkey).collect(); @@ -806,7 +812,8 @@ mod tests { &validators[i], new_pubkey, last_id, - ).unwrap(); + ) + .unwrap(); } let all_validators: Vec = validators @@ -995,7 +1002,8 @@ mod tests { &mint.keypair(), new_pubkey, last_id, - ).unwrap(); + ) + .unwrap(); // Create a vote account let new_vote_account = create_vote_account( @@ -1003,7 +1011,8 @@ mod tests { &bank, num_vote_account_tokens as u64, mint.last_id(), - ).unwrap(); + ) + .unwrap(); // Vote at height i * active_window_length for validator i push_vote( @@ -1226,7 +1235,8 @@ mod tests { &mint.keypair(), bootstrap_leader_id, last_id, - ).unwrap(); + ) + .unwrap(); // Create a vote account let new_vote_account = create_vote_account( @@ -1234,7 +1244,8 @@ mod tests { &bank, vote_account_tokens, mint.last_id(), - ).unwrap(); + ) + .unwrap(); // Add leader to the active set push_vote( diff --git a/src/ledger.rs b/src/ledger.rs index be8787f465..25f50c3a51 100644 --- a/src/ledger.rs +++ b/src/ledger.rs @@ -75,7 +75,7 @@ const LEDGER_INDEX_FILE: &str = "index"; // use a CONST because there's a cast, and we don't want "sizeof:: as u64"... const SIZEOF_U64: u64 = size_of::() as u64; -#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))] +#[allow(clippy::needless_pass_by_value)] fn err_bincode_to_io(e: Box) -> io::Error { io::Error::new(io::ErrorKind::Other, e.to_string()) } @@ -493,7 +493,8 @@ impl Block for [Entry] { .transactions .iter() .flat_map(VoteTransaction::get_votes) - }).collect() + }) + .collect() } } @@ -674,7 +675,8 @@ pub fn make_tiny_test_entries(num: usize) -> Vec { one, )], ) - }).collect() + }) + .collect() } pub fn make_large_test_entries(num_entries: usize) -> Vec { @@ -792,7 +794,8 @@ mod tests { num_hashes: 0, id: Hash::default(), transactions: vec![], - }).unwrap() as usize; + }) + .unwrap() as usize; assert!(tx_small_size < tx_large_size); assert!(tx_large_size < PACKET_DATA_SIZE); diff --git a/src/ledger_write_stage.rs b/src/ledger_write_stage.rs index e41f75f8a6..884c7886ce 100644 --- a/src/ledger_write_stage.rs +++ b/src/ledger_write_stage.rs @@ -54,6 +54,7 @@ impl LedgerWriteStage { Ok(()) } + #[allow(clippy::new_ret_no_self)] pub fn new(ledger_path: Option<&str>, entry_receiver: EntryReceiver) -> (Self, EntryReceiver) { let mut ledger_writer = ledger_path.map(|p| LedgerWriter::open(p, false).unwrap()); @@ -77,9 +78,10 @@ impl LedgerWriteStage { } } }; - }).unwrap(); + }) + .unwrap(); - (LedgerWriteStage { write_thread }, entry_forwarder) + (Self { write_thread }, entry_forwarder) } } diff --git a/src/poh_service.rs b/src/poh_service.rs index 6c9a3520e0..3bbecbeb2d 100644 --- a/src/poh_service.rs +++ b/src/poh_service.rs @@ -33,7 +33,7 @@ pub struct PohService { } impl PohService { - pub fn exit(&self) -> () { + pub fn exit(&self) { self.poh_exit.store(true, Ordering::Relaxed); } @@ -56,9 +56,10 @@ impl PohService { let return_value = Self::tick_producer(&mut poh_recorder_, config, &poh_exit_); poh_exit_.store(true, Ordering::Relaxed); return_value - }).unwrap(); + }) + .unwrap(); - PohService { + Self { tick_producer, poh_exit, } @@ -134,7 +135,8 @@ mod tests { break Ok(()); } } - }).unwrap() + }) + .unwrap() }; const HASHES_PER_TICK: u64 = 2; diff --git a/src/replicate_stage.rs b/src/replicate_stage.rs index 7478babe07..8b8c95549e 100644 --- a/src/replicate_stage.rs +++ b/src/replicate_stage.rs @@ -150,6 +150,7 @@ impl ReplicateStage { Ok(()) } + #[allow(clippy::new_ret_no_self)] pub fn new( keypair: Arc, vote_account_keypair: Arc, @@ -219,10 +220,11 @@ impl ReplicateStage { } None - }).unwrap(); + }) + .unwrap(); ( - ReplicateStage { + Self { t_responder, t_replicate, }, diff --git a/src/replicator.rs b/src/replicator.rs index 15c070797d..59a22e6468 100644 --- a/src/replicator.rs +++ b/src/replicator.rs @@ -83,6 +83,7 @@ pub fn sample_file(in_path: &Path, sample_offsets: &[u64]) -> io::Result { } impl Replicator { + #[allow(clippy::new_ret_no_self)] pub fn new( ledger_path: Option<&str>, node: Node, @@ -246,7 +247,7 @@ impl Replicator { Err(e) => info!("Error occurred while sampling: {:?}", e), } - Ok(Replicator { + Ok(Self { gossip_service, fetch_stage, store_ledger_stage, diff --git a/src/result.rs b/src/result.rs index 7d17f114e5..5ae3f1cf62 100644 --- a/src/result.rs +++ b/src/result.rs @@ -182,6 +182,7 @@ mod tests { io::sink(), "{:?}", Error::from(io::Error::new(io::ErrorKind::NotFound, "hi")) - ).unwrap(); + ) + .unwrap(); } } diff --git a/src/retransmit_stage.rs b/src/retransmit_stage.rs index 5f5aacd7f2..f0e811ad23 100644 --- a/src/retransmit_stage.rs +++ b/src/retransmit_stage.rs @@ -72,7 +72,8 @@ fn retransmitter( } } trace!("exiting retransmitter"); - }).unwrap() + }) + .unwrap() } pub struct RetransmitStage { @@ -80,6 +81,7 @@ pub struct RetransmitStage { } impl RetransmitStage { + #[allow(clippy::new_ret_no_self)] pub fn new( db_ledger: Arc>, cluster_info: &Arc>, @@ -111,7 +113,7 @@ impl RetransmitStage { ); let thread_hdls = vec![t_retransmit, t_window]; - (RetransmitStage { thread_hdls }, entry_receiver) + (Self { thread_hdls }, entry_receiver) } } diff --git a/src/rpc.rs b/src/rpc.rs index 248ac6691b..6cf0eb7718 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -67,10 +67,9 @@ impl JsonRpcService { sleep(Duration::from_millis(100)); } server.unwrap().close(); - () }) .unwrap(); - JsonRpcService { thread_hdl, exit } + Self { thread_hdl, exit } } pub fn exit(&self) { @@ -462,7 +461,8 @@ mod tests { arc_bank .process_transaction(&tx) .expect("process transaction"); - }).join() + }) + .join() .unwrap(); assert_eq!(request_processor.get_transaction_count().unwrap(), 1); } diff --git a/src/rpc_pubsub.rs b/src/rpc_pubsub.rs index a82c771d66..ebcb26d9fe 100644 --- a/src/rpc_pubsub.rs +++ b/src/rpc_pubsub.rs @@ -67,7 +67,6 @@ impl PubSubService { sleep(Duration::from_millis(100)); } server.unwrap().close(); - () }) .unwrap(); PubSubService { thread_hdl, exit } @@ -146,7 +145,8 @@ impl RpcSolPubSub for RpcSolPubSubImpl { code: ErrorCode::InvalidParams, message: "Invalid Request: Invalid pubkey provided".into(), data: None, - }).unwrap(); + }) + .unwrap(); return; } let pubkey = Pubkey::new(&pubkey_vec); @@ -194,7 +194,8 @@ impl RpcSolPubSub for RpcSolPubSubImpl { code: ErrorCode::InvalidParams, message: "Invalid Request: Invalid signature provided".into(), data: None, - }).unwrap(); + }) + .unwrap(); return; } let signature = Signature::new(&signature_vec); diff --git a/src/rpc_request.rs b/src/rpc_request.rs index 42b2e9cc7e..4f1584a364 100644 --- a/src/rpc_request.rs +++ b/src/rpc_request.rs @@ -208,7 +208,8 @@ mod tests { .threads(1) .cors(DomainsValidation::AllowOnly(vec![ AccessControlAllowOrigin::Any, - ])).start_http(&rpc_addr) + ])) + .start_http(&rpc_addr) .expect("Unable to start RPC server"); sender.send(*server.address()).unwrap(); server.wait(); diff --git a/src/runtime.rs b/src/runtime.rs index 8f8b31e8a4..e31d6ab1a1 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -32,7 +32,8 @@ fn process_instruction( let index = index as usize; let key = &tx.account_keys[index]; (key, index < tx.signatures.len()) - }).zip(program_accounts.iter_mut()) + }) + .zip(program_accounts.iter_mut()) .map(|((key, is_signer), account)| KeyedAccount::new(key, is_signer, account)) .collect(); keyed_accounts.append(&mut keyed_accounts2); @@ -128,7 +129,8 @@ where // lifetime of this unsafe is only within the scope of the closure // there is no way to reorder them without breaking borrow checker rules unsafe { &mut *ptr } - }).collect(); + }) + .collect(); func(&mut subset) } @@ -150,7 +152,8 @@ pub fn execute_transaction( executable_accounts, program_accounts, tick_height, - ).map_err(|err| RuntimeError::ProgramError(instruction_index as u8, err))?; + ) + .map_err(|err| RuntimeError::ProgramError(instruction_index as u8, err))?; Ok(()) })?; } diff --git a/src/sigverify.rs b/src/sigverify.rs index 447a065024..0278005975 100644 --- a/src/sigverify.rs +++ b/src/sigverify.rs @@ -97,7 +97,8 @@ fn verify_packet(packet: &Packet) -> u8 { untrusted::Input::from(&packet.data[pubkey_start..pubkey_end]), untrusted::Input::from(&packet.data[msg_start..msg_end]), untrusted::Input::from(&packet.data[sig_start..sig_end]), - ).is_err() + ) + .is_err() { return 0; } @@ -195,7 +196,8 @@ pub fn ed25519_verify_cpu(batches: &[SharedPackets]) -> Vec> { .par_iter() .map(verify_packet) .collect() - }).collect(); + }) + .collect(); inc_new_counter_info!("ed25519_verify_cpu", count); rv } @@ -213,7 +215,8 @@ pub fn ed25519_verify_disabled(batches: &[SharedPackets]) -> Vec> { .par_iter() .map(verify_packet_disabled) .collect() - }).collect(); + }) + .collect(); inc_new_counter_info!("ed25519_verify_disabled", count); rv } @@ -435,7 +438,8 @@ mod tests { } assert_eq!(packets.read().unwrap().packets.len(), num_packets_per_batch); packets - }).collect(); + }) + .collect(); assert_eq!(batches.len(), num_batches); batches diff --git a/src/sigverify_stage.rs b/src/sigverify_stage.rs index b2c28fe193..b4ad28e51d 100644 --- a/src/sigverify_stage.rs +++ b/src/sigverify_stage.rs @@ -29,6 +29,7 @@ pub struct SigVerifyStage { } impl SigVerifyStage { + #[allow(clippy::new_ret_no_self)] pub fn new( packet_receiver: Receiver, sigverify_disabled: bool, @@ -37,7 +38,7 @@ impl SigVerifyStage { let (verified_sender, verified_receiver) = channel(); let thread_hdls = Self::verifier_services(packet_receiver, verified_sender, sigverify_disabled); - (SigVerifyStage { thread_hdls }, verified_receiver) + (Self { thread_hdls }, verified_receiver) } fn verify_batch(batch: Vec, sigverify_disabled: bool) -> VerifiedPackets { @@ -106,7 +107,8 @@ impl SigVerifyStage { .add_field( "total_time_ms", influxdb::Value::Integer(total_time_ms as i64), - ).to_owned(), + ) + .to_owned(), ); Ok(()) diff --git a/src/storage_stage.rs b/src/storage_stage.rs index 5a708e7256..b6631c6789 100644 --- a/src/storage_stage.rs +++ b/src/storage_stage.rs @@ -128,7 +128,8 @@ impl StorageStage { break; } } - }).unwrap(); + }) + .unwrap(); StorageStage { t_storage_mining_verifier, diff --git a/src/store_ledger_stage.rs b/src/store_ledger_stage.rs index 4c618e0cf2..02f4063ad6 100644 --- a/src/store_ledger_stage.rs +++ b/src/store_ledger_stage.rs @@ -52,7 +52,8 @@ impl StoreLedgerStage { _ => error!("{:?}", e), } } - }).unwrap(); + }) + .unwrap(); let thread_hdls = vec![t_store_requests]; diff --git a/src/streamer.rs b/src/streamer.rs index 74b0e1d8ad..47afe0a55b 100644 --- a/src/streamer.rs +++ b/src/streamer.rs @@ -59,8 +59,8 @@ pub fn receiver( .name("solana-receiver".to_string()) .spawn(move || { let _ = recv_loop(&sock, &exit, &packet_sender, sender_tag); - () - }).unwrap() + }) + .unwrap() } fn recv_send(sock: &UdpSocket, r: &BlobReceiver) -> Result<()> { @@ -101,7 +101,8 @@ pub fn responder(name: &'static str, sock: Arc, r: BlobReceiver) -> J _ => warn!("{} responder error: {:?}", name, e), } } - }).unwrap() + }) + .unwrap() } //TODO, we would need to stick block authentication before we create the @@ -128,7 +129,8 @@ pub fn blob_receiver(sock: Arc, exit: Arc, s: BlobSender) break; } let _ = recv_blobs(&sock, &s); - }).unwrap() + }) + .unwrap() } #[cfg(test)] diff --git a/src/thin_client.rs b/src/thin_client.rs index 568eef29f0..741cafb81b 100644 --- a/src/thin_client.rs +++ b/src/thin_client.rs @@ -141,7 +141,8 @@ impl ThinClient { .add_field( "duration_ms", influxdb::Value::Integer(timing::duration_as_ms(&now.elapsed()) as i64), - ).to_owned(), + ) + .to_owned(), ); result } @@ -254,7 +255,8 @@ impl ThinClient { .add_field( "duration_ms", influxdb::Value::Integer(timing::duration_as_ms(elapsed) as i64), - ).to_owned(), + ) + .to_owned(), ); } @@ -329,7 +331,8 @@ impl ThinClient { .add_field( "duration_ms", influxdb::Value::Integer(timing::duration_as_ms(&now.elapsed()) as i64), - ).to_owned(), + ) + .to_owned(), ); self.signature_status } diff --git a/src/tpu.rs b/src/tpu.rs index 9678c6ef11..a4dc7cfe14 100644 --- a/src/tpu.rs +++ b/src/tpu.rs @@ -29,6 +29,7 @@ pub struct Tpu { } impl Tpu { + #[allow(clippy::new_ret_no_self)] pub fn new( bank: &Arc, tick_duration: Config, @@ -56,7 +57,7 @@ impl Tpu { let (ledger_write_stage, entry_forwarder) = LedgerWriteStage::new(Some(ledger_path), entry_receiver); - let tpu = Tpu { + let tpu = Self { fetch_stage, sigverify_stage, banking_stage, diff --git a/src/tpu_forwarder.rs b/src/tpu_forwarder.rs index 8a110645d3..37a2dc04f7 100644 --- a/src/tpu_forwarder.rs +++ b/src/tpu_forwarder.rs @@ -70,14 +70,15 @@ impl TpuForwarder { sender.clone(), "tpu-forwarder", ) - }).collect(); + }) + .collect(); let thread_hdl = Builder::new() .name("solana-tpu_forwarder".to_string()) .spawn(move || { let _ignored = Self::forward(&receiver, &cluster_info); - () - }).unwrap(); + }) + .unwrap(); thread_hdls.push(thread_hdl); @@ -123,7 +124,8 @@ mod tests { s, ContactInfo::new_with_socketaddr(&socketaddr!([127, 0, 0, 1], port)), ) - }).collect(); + }) + .collect(); let mut cluster_info = ClusterInfo::new(nodes[0].1.clone()); diff --git a/src/tvu.rs b/src/tvu.rs index 2193f00526..5f1726c596 100644 --- a/src/tvu.rs +++ b/src/tvu.rs @@ -54,7 +54,7 @@ impl Tvu { /// * `repair_socket` - my repair socket /// * `retransmit_socket` - my retransmit socket /// * `ledger_path` - path to the ledger file - #[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))] + #[allow(clippy::too_many_arguments)] pub fn new( keypair: Arc, vote_account_keypair: Arc, diff --git a/src/wallet.rs b/src/wallet.rs index 5fbc907bf4..f8140b2a1f 100644 --- a/src/wallet.rs +++ b/src/wallet.rs @@ -417,12 +417,14 @@ pub fn process_command(config: &WalletConfig) -> Result Result { let transaction_count = RpcRequest::GetTransactionCount @@ -542,7 +545,8 @@ pub fn process_command(config: &WalletConfig) -> Result Result bool { @@ -226,7 +227,8 @@ impl WindowUtil for Window { } else { " " } - }).collect(); + }) + .collect(); let buf: Vec<_> = self .iter() @@ -242,7 +244,8 @@ impl WindowUtil for Window { // data.is_none() "c" } - }).collect(); + }) + .collect(); format!( "\n{}: WINDOW ({}): {}\n{}: WINDOW ({}): {}", id, diff --git a/src/window_service.rs b/src/window_service.rs index aa62624d75..89bd052b1f 100644 --- a/src/window_service.rs +++ b/src/window_service.rs @@ -50,7 +50,7 @@ fn repair_backoff(last: &mut u64, times: &mut usize, consumed: u64) -> bool { thread_rng().gen_range(0, *times as u64) == 0 } -#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))] +#[allow(clippy::too_many_arguments)] fn recv_window( db_ledger: &mut DbLedger, id: &Pubkey, @@ -119,7 +119,7 @@ fn recv_window( Ok(()) } -#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))] +#[allow(clippy::too_many_arguments)] pub fn window_service( db_ledger: Arc>, cluster_info: Arc>, @@ -221,7 +221,8 @@ pub fn window_service( } } } - }).unwrap() + }) + .unwrap() } #[cfg(test)] @@ -305,7 +306,8 @@ mod test { 0, Hash::default(), &gossip_address, - ).into_iter() + ) + .into_iter() .rev() .collect();; s_responder.send(msgs).expect("send"); @@ -425,7 +427,8 @@ mod test { let rv = repair_backoff(&mut last, &mut times, 1) as usize; assert_eq!(times, x + 2); rv - }).sum(); + }) + .sum(); assert_eq!(times, 128); assert_eq!(last, 1); repair_backoff(&mut last, &mut times, 1); @@ -434,7 +437,8 @@ mod test { assert_eq!(times, 2); assert_eq!(last, 2); total - }).sum(); + }) + .sum(); let avg = res / num_tests; assert!(avg >= 3); assert!(avg <= 5); diff --git a/tests/data_replicator.rs b/tests/data_replicator.rs index 794358b7fd..4fab4219f2 100644 --- a/tests/data_replicator.rs +++ b/tests/data_replicator.rs @@ -184,7 +184,8 @@ pub fn cluster_info_retransmit() -> result::Result<()> { s.set_read_timeout(Some(Duration::new(1, 0))).unwrap(); let res = s.recv_from(&mut b.data); res.is_err() //true if failed to receive the retransmit packet - }).collect(); + }) + .collect(); //true if failed receive the retransmit packet, r2, and r3 should succeed //r1 was the sender, so it should fail to receive the packet assert_eq!(res, [true, false, false]); diff --git a/tests/multinode.rs b/tests/multinode.rs index 6f5a57a1ff..e86c6cfb49 100644 --- a/tests/multinode.rs +++ b/tests/multinode.rs @@ -652,14 +652,16 @@ fn test_multi_node_dynamic_network() { &alice_arc.read().unwrap(), &bob_pubkey, Some(500), - ).unwrap(); + ) + .unwrap(); assert_eq!(leader_balance, 500); let leader_balance = retry_send_tx_and_retry_get_balance( &leader_data, &alice_arc.read().unwrap(), &bob_pubkey, Some(1000), - ).unwrap(); + ) + .unwrap(); assert_eq!(leader_balance, 1000); let t1: Vec<_> = (0..num_nodes) @@ -670,8 +672,10 @@ fn test_multi_node_dynamic_network() { .spawn(move || { info!("Spawned thread {}", n); Keypair::new() - }).unwrap() - }).collect(); + }) + .unwrap() + }) + .collect(); info!("Waiting for keypairs to be created"); let keypairs: Vec<_> = t1.into_iter().map(|t| t.join().unwrap()).collect(); @@ -710,8 +714,10 @@ fn test_multi_node_dynamic_network() { None, ); (rd, val) - }).unwrap() - }).collect(); + }) + .unwrap() + }) + .collect(); let mut validators: Vec<_> = t2.into_iter().map(|t| t.join().unwrap()).collect(); @@ -731,7 +737,8 @@ fn test_multi_node_dynamic_network() { &alice_arc.read().unwrap().keypair(), bob_pubkey, &last_id, - ).unwrap(); + ) + .unwrap(); expected_balance += 500; @@ -1101,7 +1108,8 @@ fn run_node( }; } sleep(Duration::new(1, 0)); - }).unwrap() + }) + .unwrap() } #[test] @@ -1487,7 +1495,8 @@ fn test_full_leader_validator_network() { .filter_map(|(i, x)| match empty_iterators.get(&i) { None => Some(x), _ => None, - }).collect(); + }) + .collect(); if node_entries.len() == 0 { break; @@ -1545,7 +1554,8 @@ fn test_broadcast_last_tick() { Arc::new(node.sockets.replicate.pop().unwrap()), blob_receiver_exit.clone(), ) - }).collect(); + }) + .collect(); // Create fullnode, should take 20 seconds to reach end of bootstrap period let bootstrap_height = (NUM_TICKS_PER_SECOND * 20) as u64; diff --git a/tests/programs.rs b/tests/programs.rs index ac19f84470..7993851d08 100644 --- a/tests/programs.rs +++ b/tests/programs.rs @@ -213,7 +213,8 @@ fn test_program_lua_move_funds() { local tokens, _ = string.unpack("I", data) accounts[1].tokens = accounts[1].tokens - tokens accounts[2].tokens = accounts[2].tokens + tokens - "#.as_bytes() + "# + .as_bytes() .to_vec(); let program = Program::new(&loader, &userdata); let from = Keypair::new(); diff --git a/tests/replicator.rs b/tests/replicator.rs index 1994539c39..beed577de9 100644 --- a/tests/replicator.rs +++ b/tests/replicator.rs @@ -69,7 +69,8 @@ fn test_replicator_startup() { replicator_node, &leader_info, &replicator_keypair, - ).unwrap(); + ) + .unwrap(); let mut num_entries = 0; for _ in 0..60 { diff --git a/vote-signer/src/bin/main.rs b/vote-signer/src/bin/main.rs index 83fc03a35b..5896e38152 100644 --- a/vote-signer/src/bin/main.rs +++ b/vote-signer/src/bin/main.rs @@ -23,7 +23,8 @@ fn main() -> Result<(), Box> { .value_name("NUM") .takes_value(true) .help("JSON RPC listener port"), - ).get_matches(); + ) + .get_matches(); let port = if let Some(p) = matches.value_of("port") { p.to_string() diff --git a/vote-signer/src/rpc.rs b/vote-signer/src/rpc.rs index cecd6486a7..d89731e819 100644 --- a/vote-signer/src/rpc.rs +++ b/vote-signer/src/rpc.rs @@ -46,10 +46,9 @@ impl VoteSignerRpcService { sleep(Duration::from_millis(100)); } server.unwrap().close(); - () }) .unwrap(); - VoteSignerRpcService { thread_hdl, exit } + Self { thread_hdl, exit } } pub fn exit(&self) {