Upgrade to Rust 1.31.0 (#2052)

* Upgrade to Rust 1.31.0
* Upgrade nightly
* Fix all clippy warnings
* Revert relaxed version check and update
This commit is contained in:
Greg Fitzgerald 2018-12-07 20:01:28 -07:00 committed by GitHub
parent 2bad6584f6
commit 0a83b17cdd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
70 changed files with 487 additions and 298 deletions

View File

@ -17,7 +17,7 @@ Rust coding conventions
* All Rust code is linted with Clippy. If you'd prefer to ignore its advice, do so explicitly:
```rust
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
#[allow(clippy::too_many_arguments)]
```
Note: Clippy defaults can be overridden in the top-level file `.clippy.toml`.

View File

@ -40,7 +40,8 @@ fn bench_process_transaction(bencher: &mut Bencher) {
// Finally, return the transaction to the benchmark.
tx
}).collect();
})
.collect();
let mut id = bank.last_id();

View File

@ -68,7 +68,8 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
new.account_keys[1] = Pubkey::new(&to[0..32]);
new.signatures = vec![Signature::new(&sig[0..64])];
new
}).collect();
})
.collect();
// fund all the accounts
transactions.iter().for_each(|tx| {
let fund = Transaction::system_move(
@ -98,7 +99,8 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
.map(|x| {
let len = x.read().unwrap().packets.len();
(x, iter::repeat(1).take(len).collect())
}).collect();
})
.collect();
let (_stage, signal_receiver) = BankingStage::new(
&bank,
verified_receiver,
@ -170,7 +172,8 @@ fn bench_banking_stage_multi_programs(bencher: &mut Bencher) {
assert_eq!(new.instructions.len(), progs);
new.signatures = vec![Signature::new(&sig[0..64])];
new
}).collect();
})
.collect();
transactions.iter().for_each(|tx| {
let fund = Transaction::system_move(
&mint.keypair(),
@ -198,7 +201,8 @@ fn bench_banking_stage_multi_programs(bencher: &mut Bencher) {
.map(|x| {
let len = x.read().unwrap().packets.len();
(x, iter::repeat(1).take(len).collect())
}).collect();
})
.collect();
let (_stage, signal_receiver) = BankingStage::new(
&bank,
verified_receiver,

View File

@ -2,7 +2,7 @@ steps:
- command: "ci/shellcheck.sh"
name: "shellcheck [public]"
timeout_in_minutes: 20
- command: "ci/docker-run.sh solanalabs/rust:1.30.1 ci/test-checks.sh"
- command: "ci/docker-run.sh solanalabs/rust:1.31.0 ci/test-checks.sh"
name: "checks [public]"
env:
CARGO_TARGET_CACHE_NAME: "checks"
@ -20,12 +20,12 @@ steps:
env:
CARGO_TARGET_CACHE_NAME: "nightly"
timeout_in_minutes: 30
- command: "ci/docker-run.sh solanalabs/rust:1.30.1 ci/test-stable.sh"
- command: "ci/docker-run.sh solanalabs/rust:1.31.0 ci/test-stable.sh"
name: "stable [public]"
env:
CARGO_TARGET_CACHE_NAME: "stable"
timeout_in_minutes: 30
- command: "ci/docker-run.sh solanalabs/rust-nightly:2018-11-12 ci/test-nightly.sh"
- command: "ci/docker-run.sh solanalabs/rust-nightly:2018-12-05 ci/test-nightly.sh"
name: "nightly [public]"
env:
CARGO_TARGET_CACHE_NAME: "nightly"

View File

@ -1,6 +1,6 @@
# Note: when the rust version is changed also modify
# ci/buildkite.yml to pick up the new image tag
FROM rust:1.30.1
FROM rust:1.31.0
RUN set -x && \
apt update && \
@ -20,8 +20,8 @@ RUN set -x && \
rsync \
sudo \
&& \
rustup component add rustfmt-preview && \
rustup component add clippy-preview && \
rustup component add rustfmt && \
rustup component add clippy && \
rm -rf /var/lib/apt/lists/* && \
rustc --version && \
cargo --version

View File

@ -24,8 +24,8 @@ nightly)
require cargo 1.32.[0-9]+-nightly
;;
stable)
require rustc 1.3[01].[0-9]+
require cargo 1.3[01].[0-9]+
require rustc 1.31.[0-9]+
require cargo 1.31.[0-9]+
;;
*)
echo Error: unknown argument: "$1"

View File

@ -40,19 +40,22 @@ fn main() -> Result<(), Box<error::Error>> {
.takes_value(true)
.required(true)
.help("File from which to read the mint's keypair"),
).arg(
)
.arg(
Arg::with_name("slice")
.long("slice")
.value_name("SECS")
.takes_value(true)
.help("Time slice over which to limit requests to drone"),
).arg(
)
.arg(
Arg::with_name("cap")
.long("cap")
.value_name("NUM")
.takes_value(true)
.help("Request limit for time slice"),
).get_matches();
)
.get_matches();
let mint_keypair =
read_keypair(matches.value_of("keypair").unwrap()).expect("failed to read client keypair");
@ -139,7 +142,8 @@ fn main() -> Result<(), Box<error::Error>> {
io::ErrorKind::Other,
format!("Drone response: {:?}", err),
))
})).then(|_| Ok(()));
}))
.then(|_| Ok(()));
tokio::spawn(server)
});
tokio::run(done);

View File

@ -131,7 +131,8 @@ impl Drone {
.add_field(
"request_current",
influxdb::Value::Integer(self.request_current as i64),
).to_owned(),
)
.to_owned(),
);
info!("Requesting airdrop of {} to {:?}", tokens, to);
@ -283,7 +284,8 @@ pub fn run_local_drone(mint_keypair: Keypair, sender: Sender<SocketAddr>) {
io::ErrorKind::Other,
format!("Drone response: {:?}", err),
))
})).then(|_| Ok(()));
}))
.then(|_| Ok(()));
tokio::spawn(server)
});
tokio::run(done);

View File

@ -236,13 +236,15 @@ pub fn set_panic_hook(program: &'static str) {
// TODO: use ono.message() when it becomes stable
ono.to_string(),
),
).add_field(
)
.add_field(
"location",
influxdb::Value::String(match ono.location() {
Some(location) => location.to_string(),
None => "?".to_string(),
}),
).add_field("host_id", influxdb::Value::Integer(*HOST_ID))
)
.add_field("host_id", influxdb::Value::Integer(*HOST_ID))
.to_owned(),
);
// Flush metrics immediately in case the process exits immediately
@ -359,10 +361,12 @@ mod test {
.add_field(
"random_bool",
influxdb::Value::Boolean(random::<u8>() < 128),
).add_field(
)
.add_field(
"random_int",
influxdb::Value::Integer(random::<u8>() as i64),
).to_owned();
)
.to_owned();
agent.submit(point);
}

View File

@ -254,7 +254,8 @@ mod test {
let index = index as usize;
let key = &tx.account_keys[index];
(key, index < tx.signatures.len())
}).zip(program_accounts.iter_mut())
})
.zip(program_accounts.iter_mut())
.map(|((key, is_signer), account)| KeyedAccount::new(key, is_signer, account))
.collect();

View File

@ -106,7 +106,7 @@ impl Default for TokenProgram {
}
impl TokenProgram {
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
#[allow(clippy::needless_pass_by_value)]
fn map_to_invalid_args(err: std::boxed::Box<bincode::ErrorKind>) -> Error {
warn!("invalid argument: {:?}", err);
Error::InvalidArgument
@ -424,7 +424,8 @@ impl TokenProgram {
} else {
TokenProgram::Invalid
}
}).collect();
})
.collect();
for program_account in &input_program_accounts {
info!("input_program_account: userdata={:?}", program_account);

View File

@ -169,7 +169,8 @@ mod tests {
local tokens, _ = string.unpack("I", data)
accounts[1].tokens = accounts[1].tokens - tokens
accounts[2].tokens = accounts[2].tokens + tokens
"#.as_bytes()
"#
.as_bytes()
.to_vec();
let alice_pubkey = Pubkey::default();
@ -214,7 +215,8 @@ mod tests {
let userdata = r#"
local serialize = load(accounts[2].userdata)().serialize
accounts[3].userdata = serialize({a=1, b=2, c=3}, nil, "s")
"#.as_bytes()
"#
.as_bytes()
.to_vec();
let owner = Pubkey::default();
let program_account = Account {
@ -295,7 +297,8 @@ mod tests {
let data = format!(
r#"{{m=2, n={{"{}","{}","{}"}}, tokens=100}}"#,
carol_pubkey, dan_pubkey, erin_pubkey
).as_bytes()
)
.as_bytes()
.to_vec();
process(&owner, &mut keyed_accounts, &data, 0).unwrap();

View File

@ -15,10 +15,10 @@ pub struct Hasher {
}
impl Hasher {
pub fn hash(&mut self, val: &[u8]) -> () {
pub fn hash(&mut self, val: &[u8]) {
self.hasher.input(val);
}
pub fn hashv(&mut self, vals: &[&[u8]]) -> () {
pub fn hashv(&mut self, vals: &[&[u8]]) {
for val in vals {
self.hash(val);
}

View File

@ -107,7 +107,8 @@ impl SystemTransaction for Transaction {
.map(|(i, (_, amount))| {
let spend = SystemInstruction::Move { tokens: *amount };
Instruction::new(0, &spend, vec![0, i as u8 + 1])
}).collect();
})
.collect();
let to_keys: Vec<_> = moves.iter().map(|(to_key, _)| *to_key).collect();
Transaction::new_with_instructions(

View File

@ -293,7 +293,8 @@ mod tests {
14, 229, 239, 119, 93, 5, 218, 161, 35, 3, 33, 0, 36, 100, 158, 252, 33, 161, 97, 185,
62, 89, 99, 195, 250, 249, 187, 189, 171, 118, 241, 90, 248, 14, 68, 219, 231, 62, 157,
5, 142, 27, 210, 117,
])).expect("fu");
]))
.unwrap();
let to = Pubkey::new(&[
1, 1, 1, 4, 5, 6, 7, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 7, 6, 5, 4,
1, 1, 1,

View File

@ -775,7 +775,8 @@ impl Bank {
self.load_account(tx, &accounts, &mut last_ids, max_age, error_counters)
}
(_, Err(e)) => Err(e),
}).collect()
})
.collect()
}
fn load_executable_accounts(&self, mut program_id: Pubkey) -> Result<Vec<(Pubkey, Account)>> {
@ -815,7 +816,8 @@ impl Bank {
.map(|ix| {
let program_id = tx.program_ids[ix.program_ids_index as usize];
self.load_executable_accounts(program_id)
}).collect()
})
.collect()
}
pub fn store_accounts(
@ -889,7 +891,8 @@ impl Bank {
debug!("process transaction failed {:?}", e);
None
}
}).collect();
})
.collect();
// unlock all the accounts with errors which are filtered by the above `filter_map`
if !processed_transactions.is_empty() {
let hash = Transaction::hash(&processed_transactions);
@ -933,7 +936,8 @@ impl Bank {
},
)
}
}).collect();
})
.collect();
let execution_elapsed = now.elapsed();
let now = Instant::now();
self.store_accounts(txs, &executed, &loaded_accounts);
@ -1052,7 +1056,8 @@ impl Bank {
);
self.unlock_accounts(&e.transactions, &results);
Self::first_err(&results)
}).collect();
})
.collect();
Self::first_err(&results)
}
@ -1169,14 +1174,16 @@ impl Bank {
Some(tokens)
} else {
None
}.expect("invalid ledger, needs to start with mint deposit");
}
.expect("invalid ledger, needs to start with mint deposit");
instruction = deserialize(tx.userdata(1)).unwrap();
let leader_payment = if let SystemInstruction::Move { tokens } = instruction {
Some(tokens)
} else {
None
}.expect("invalid ledger, bootstrap leader payment expected");
}
.expect("invalid ledger, bootstrap leader payment expected");
assert!(leader_payment <= mint_deposit);
assert!(leader_payment > 0);
@ -1737,7 +1744,8 @@ mod tests {
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
bank.register_tick(&last_id);
last_id
}).collect();
})
.collect();
assert_eq!(bank.count_valid_ids(&[]).len(), 0);
assert_eq!(bank.count_valid_ids(&[mint.last_id()]).len(), 0);
for (i, id) in bank.count_valid_ids(&ids).iter().enumerate() {
@ -1979,12 +1987,11 @@ mod tests {
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
bank.add_account_subscription(bank_sub_id, alice.pubkey(), sink);
assert!(
bank.account_subscriptions
.write()
.unwrap()
.contains_key(&alice.pubkey())
);
assert!(bank
.account_subscriptions
.write()
.unwrap()
.contains_key(&alice.pubkey()));
let account = bank.get_account(&alice.pubkey()).unwrap();
bank.check_account_subscriptions(&alice.pubkey(), &account);
@ -1996,13 +2003,11 @@ mod tests {
}
bank.remove_account_subscription(&bank_sub_id, &alice.pubkey());
assert!(
!bank
.account_subscriptions
.write()
.unwrap()
.contains_key(&alice.pubkey())
);
assert!(!bank
.account_subscriptions
.write()
.unwrap()
.contains_key(&alice.pubkey()));
}
#[test]
fn test_bank_signature_subscribe() {
@ -2021,12 +2026,11 @@ mod tests {
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
bank.add_signature_subscription(bank_sub_id, signature, sink);
assert!(
bank.signature_subscriptions
.write()
.unwrap()
.contains_key(&signature)
);
assert!(bank
.signature_subscriptions
.write()
.unwrap()
.contains_key(&signature));
bank.check_signature_subscriptions(&signature, RpcSignatureStatus::Confirmed);
let string = transport_receiver.poll();
@ -2037,13 +2041,11 @@ mod tests {
}
bank.remove_signature_subscription(&bank_sub_id, &signature);
assert!(
!bank
.signature_subscriptions
.write()
.unwrap()
.contains_key(&signature)
);
assert!(!bank
.signature_subscriptions
.write()
.unwrap()
.contains_key(&signature));
}
#[test]
fn test_first_err() {

View File

@ -44,6 +44,7 @@ pub struct BankingStage {
impl BankingStage {
/// Create the stage using `bank`. Exit when `verified_receiver` is dropped.
#[allow(clippy::new_ret_no_self)]
pub fn new(
bank: &Arc<Bank>,
verified_receiver: Receiver<VerifiedPackets>,
@ -105,11 +106,13 @@ impl BankingStage {
};
thread_banking_exit.store(true, Ordering::Relaxed);
return_result
}).unwrap()
}).collect();
})
.unwrap()
})
.collect();
(
BankingStage {
Self {
bank_thread_hdls,
poh_service,
compute_finality_service,
@ -127,7 +130,8 @@ impl BankingStage {
deserialize(&x.data[0..x.meta.size])
.map(|req| (req, x.meta.addr()))
.ok()
}).collect()
})
.collect()
}
fn process_transactions(
@ -190,7 +194,8 @@ impl BankingStage {
None
}
}
}).collect();
})
.collect();
debug!("verified transactions {}", transactions.len());
Self::process_transactions(bank, &transactions, poh)?;
new_tx_count += transactions.len();

View File

@ -62,7 +62,8 @@ fn main() -> Result<()> {
.value_name("NUM")
.takes_value(true)
.help("Use NUM receive sockets"),
).get_matches();
)
.get_matches();
if let Some(n) = matches.value_of("num-recv-sockets") {
num_sockets = max(num_sockets, n.to_string().parse().expect("integer"));

View File

@ -136,7 +136,8 @@ fn send_barrier_transaction(barrier_client: &mut ThinClient, last_id: &mut Hash,
.add_tag(
"op",
influxdb::Value::String("send_barrier_transaction".to_string()),
).add_field("poll_count", influxdb::Value::Integer(poll_count))
)
.add_field("poll_count", influxdb::Value::Integer(poll_count))
.add_field("duration", influxdb::Value::Integer(duration_ms as i64))
.to_owned(),
);
@ -147,7 +148,8 @@ fn send_barrier_transaction(barrier_client: &mut ThinClient, last_id: &mut Hash,
&id.pubkey(),
&Duration::from_millis(100),
&Duration::from_secs(10),
).expect("Failed to get balance");
)
.expect("Failed to get balance");
if balance != 1 {
panic!("Expected an account balance of 1 (balance: {}", balance);
}
@ -201,7 +203,8 @@ fn generate_txs(
Transaction::system_new(id, keypair.pubkey(), 1, last_id),
timestamp(),
)
}).collect();
})
.collect();
let duration = signing_start.elapsed();
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
@ -220,7 +223,8 @@ fn generate_txs(
.add_field(
"duration",
influxdb::Value::Integer(duration_as_ms(&duration) as i64),
).to_owned(),
)
.to_owned(),
);
let sz = transactions.len() / threads;
@ -276,7 +280,8 @@ fn do_tx_transfers(
.add_field(
"duration",
influxdb::Value::Integer(duration_as_ms(&transfer_start.elapsed()) as i64),
).add_field("count", influxdb::Value::Integer(tx_len as i64))
)
.add_field("count", influxdb::Value::Integer(tx_len as i64))
.to_owned(),
);
}
@ -339,7 +344,7 @@ fn fund_keys(client: &mut ThinClient, source: &Keypair, dests: &[Keypair], token
let mut tries = 0;
// this set of transactions just initializes us for bookkeeping
#[cfg_attr(feature = "cargo-clippy", allow(clone_double_ref))] // sigh
#[allow(clippy::clone_double_ref)] // sigh
let mut to_fund_txs: Vec<_> = chunk
.par_iter()
.map(|(k, m)| {
@ -347,7 +352,8 @@ fn fund_keys(client: &mut ThinClient, source: &Keypair, dests: &[Keypair], token
k.clone(),
Transaction::system_move_many(k, &m, Default::default(), 0),
)
}).collect();
})
.collect();
let amount = chunk[0].1[0].1;
@ -731,8 +737,10 @@ fn main() {
.name("solana-client-sample".to_string())
.spawn(move || {
sample_tx_count(&exit_signal, &maxes, first_tx_count, &v, sample_period);
}).unwrap()
}).collect();
})
.unwrap()
})
.collect();
let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new()));
@ -756,8 +764,10 @@ fn main() {
&shared_tx_active_thread_count,
&total_tx_sent_count,
);
}).unwrap()
}).collect();
})
.unwrap()
})
.collect();
// generate and send transactions for the specified duration
let start = Instant::now();

View File

@ -27,27 +27,31 @@ fn main() {
.long("local")
.takes_value(false)
.help("Detect network address from local machine configuration"),
).arg(
)
.arg(
Arg::with_name("keypair")
.short("k")
.long("keypair")
.value_name("PATH")
.takes_value(true)
.help("/path/to/id.json"),
).arg(
)
.arg(
Arg::with_name("public")
.short("p")
.long("public")
.takes_value(false)
.help("Detect public network address using public servers"),
).arg(
)
.arg(
Arg::with_name("bind")
.short("b")
.long("bind")
.value_name("PORT")
.takes_value(true)
.help("Bind to port or address"),
).get_matches();
)
.get_matches();
let bind_addr: SocketAddr = {
let mut bind_addr = parse_port_or_addr(matches.value_of("bind"), FULLNODE_PORT_RANGE.0);

View File

@ -37,25 +37,29 @@ fn main() {
.short("v")
.long("nosigverify")
.help("Run without signature verification"),
).arg(
)
.arg(
Arg::with_name("no-leader-rotation")
.long("no-leader-rotation")
.help("Disable leader rotation"),
).arg(
)
.arg(
Arg::with_name("identity")
.short("i")
.long("identity")
.value_name("PATH")
.takes_value(true)
.help("Run with the identity found in FILE"),
).arg(
)
.arg(
Arg::with_name("network")
.short("n")
.long("network")
.value_name("HOST:PORT")
.takes_value(true)
.help("Rendezvous with the network at this gossip entry point"),
).arg(
)
.arg(
Arg::with_name("ledger")
.short("l")
.long("ledger")
@ -63,13 +67,15 @@ fn main() {
.takes_value(true)
.required(true)
.help("Use DIR as persistent ledger location"),
).arg(
)
.arg(
Arg::with_name("rpc")
.long("rpc")
.value_name("PORT")
.takes_value(true)
.help("Custom RPC port for this node"),
).get_matches();
)
.get_matches();
let nosigverify = matches.is_present("nosigverify");
let use_only_bootstrap_leader = matches.is_present("no-leader-rotation");

View File

@ -34,7 +34,8 @@ fn main() -> Result<(), Box<error::Error>> {
.takes_value(true)
.required(true)
.help("Number of tokens to create in the mint"),
).arg(
)
.arg(
Arg::with_name("mint")
.short("m")
.long("mint")
@ -42,7 +43,8 @@ fn main() -> Result<(), Box<error::Error>> {
.takes_value(true)
.required(true)
.help("Path to file containing keys of the mint"),
).arg(
)
.arg(
Arg::with_name("bootstrap-leader-keypair")
.short("b")
.long("bootstrap-leader-keypair")
@ -50,7 +52,8 @@ fn main() -> Result<(), Box<error::Error>> {
.takes_value(true)
.required(true)
.help("Path to file containing the bootstrap leader's keypair"),
).arg(
)
.arg(
Arg::with_name("ledger")
.short("l")
.long("ledger")
@ -58,7 +61,8 @@ fn main() -> Result<(), Box<error::Error>> {
.takes_value(true)
.required(true)
.help("Use directory as persistent ledger location"),
).get_matches();
)
.get_matches();
// Load the bootstreap leader keypair
// TODO: Only the public key is really needed, genesis should not have access to the leader's

View File

@ -19,7 +19,8 @@ fn main() -> Result<(), Box<error::Error>> {
.value_name("PATH")
.takes_value(true)
.help("Path to generated file"),
).get_matches();
)
.get_matches();
let mut path = dirs::home_dir().expect("home directory");
let outfile = if matches.is_present("outfile") {

View File

@ -29,14 +29,16 @@ fn main() {
.value_name("PATH")
.takes_value(true)
.help("Run with the identity found in FILE"),
).arg(
)
.arg(
Arg::with_name("network")
.short("n")
.long("network")
.value_name("HOST:PORT")
.takes_value(true)
.help("Rendezvous with the network at this gossip entry point"),
).arg(
)
.arg(
Arg::with_name("ledger")
.short("l")
.long("ledger")
@ -44,7 +46,8 @@ fn main() {
.takes_value(true)
.required(true)
.help("use DIR as persistent ledger location"),
).get_matches();
)
.get_matches();
let ledger_path = matches.value_of("ledger");

View File

@ -78,7 +78,8 @@ fn main() {
.add_field(
"commit",
influxdb::Value::String(git_commit_hash.trim().to_string()),
).to_owned(),
)
.to_owned(),
);
}
let last_median = get_last_metrics(&"median".to_string(), &db, &name, &branch)

View File

@ -14,6 +14,7 @@ pub struct BlobFetchStage {
}
impl BlobFetchStage {
#[allow(clippy::new_ret_no_self)]
pub fn new(socket: Arc<UdpSocket>, exit: Arc<AtomicBool>) -> (Self, BlobReceiver) {
Self::new_multi_socket(vec![socket], exit)
}
@ -27,7 +28,7 @@ impl BlobFetchStage {
.map(|socket| streamer::blob_receiver(socket, exit.clone(), sender.clone()))
.collect();
(BlobFetchStage { exit, thread_hdls }, receiver)
(Self { exit, thread_hdls }, receiver)
}
pub fn close(&self) {

View File

@ -24,8 +24,8 @@ impl<T: BloomHashIndex> Bloom<T> {
/// https://hur.st/bloomfilter/
pub fn random(num: usize, false_rate: f64, max_bits: usize) -> Self {
let min_num_bits = ((num as f64 * false_rate.log(2f64))
/ (1f64 / 2f64.powf(2f64.log(2f64))).log(2f64)).ceil()
as usize;
/ (1f64 / 2f64.powf(2f64.log(2f64))).log(2f64))
.ceil() as usize;
let num_bits = cmp::max(1, cmp::min(min_num_bits, max_bits));
let num_keys = ((num_bits as f64 / num as f64) * 2f64.log(2f64)).round() as usize;
let keys: Vec<u64> = (0..num_keys).map(|_| rand::thread_rng().gen()).collect();

View File

@ -29,7 +29,7 @@ pub enum BroadcastServiceReturnType {
ChannelDisconnected,
}
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
#[allow(clippy::too_many_arguments)]
fn broadcast(
max_tick_height: Option<u64>,
tick_height: &mut u64,
@ -167,7 +167,8 @@ fn broadcast(
.add_field(
"transmit-index",
influxdb::Value::Integer(transmit_index.data as i64),
).to_owned(),
)
.to_owned(),
);
Ok(())
@ -284,7 +285,8 @@ impl BroadcastService {
max_tick_height,
tick_height,
)
}).unwrap();
})
.unwrap();
Self { thread_hdl }
}

View File

@ -131,13 +131,12 @@ mod tests {
);
let mut cpu_iv = ivecs.clone();
assert!(
chacha_cbc_encrypt_file(
&Path::new(&ledger_path).join(LEDGER_DATA_FILE),
out_path,
&mut cpu_iv,
).is_ok()
);
assert!(chacha_cbc_encrypt_file(
&Path::new(&ledger_path).join(LEDGER_DATA_FILE),
out_path,
&mut cpu_iv,
)
.is_ok());
let ref_hash = sample_file(&out_path, &samples).unwrap();
@ -175,13 +174,12 @@ mod tests {
);
ivec[0] = i;
ivecs.extend(ivec.clone().iter());
assert!(
chacha_cbc_encrypt_file(
&Path::new(&ledger_path).join(LEDGER_DATA_FILE),
out_path,
&mut ivec,
).is_ok()
);
assert!(chacha_cbc_encrypt_file(
&Path::new(&ledger_path).join(LEDGER_DATA_FILE),
out_path,
&mut ivec,
)
.is_ok());
ref_hashes.push(sample_file(&out_path, &samples).unwrap());
info!(

View File

@ -112,7 +112,7 @@ impl Signable for PruneData {
// TODO These messages should go through the gpu pipeline for spam filtering
#[derive(Serialize, Deserialize, Debug)]
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
#[allow(clippy::large_enum_variant)]
enum Protocol {
/// Gossip protocol messages
PullRequest(Bloom<Hash>, CrdsValue),
@ -204,7 +204,8 @@ impl ClusterInfo {
node.tpu.to_string(),
node.rpc.to_string()
)
}).collect();
})
.collect();
format!(
" NodeInfo.contact_info | Node identifier\n\
@ -216,7 +217,7 @@ impl ClusterInfo {
)
}
pub fn set_leader(&mut self, key: Pubkey) -> () {
pub fn set_leader(&mut self, key: Pubkey) {
let prev = self.leader_id();
let self_id = self.gossip.id;
let now = timestamp();
@ -373,7 +374,8 @@ impl ClusterInfo {
//TODO profile this, may need multiple sockets for par_iter
assert!(rblob.meta.size <= BLOB_SIZE);
s.send_to(&rblob.data[..rblob.meta.size], &v.tvu)
}).collect();
})
.collect();
for e in errs {
if let Err(e) = &e {
inc_new_counter_info!("cluster_info-retransmit-send_to_error", 1, 1);
@ -429,9 +431,11 @@ impl ClusterInfo {
ids_and_tvus
);
e
}).collect();
})
.collect();
send_errs_for_blob
}).collect()
})
.collect()
}
fn create_broadcast_orders<'a>(
@ -531,12 +535,14 @@ impl ClusterInfo {
.lookup(&peer_label)
.and_then(|v| v.contact_info())
.map(|peer_info| (peer, filter, peer_info.gossip, self_info))
}).collect();
})
.collect();
pr.into_iter()
.map(|(peer, filter, gossip, self_info)| {
self.gossip.mark_pull_request_creation_time(peer, now);
(gossip, Protocol::PullRequest(filter, self_info))
}).collect()
})
.collect()
}
fn new_push_requests(&mut self) -> Vec<(SocketAddr, Protocol)> {
let self_id = self.gossip.id;
@ -550,7 +556,8 @@ impl ClusterInfo {
.lookup(&peer_label)
.and_then(|v| v.contact_info())
.map(|p| p.gossip)
}).map(|peer| (peer, Protocol::PushMessage(self_id, msgs.clone())))
})
.map(|peer| (peer, Protocol::PushMessage(self_id, msgs.clone())))
.collect()
}
@ -597,7 +604,8 @@ impl ClusterInfo {
.and_then(|x| {
let leader_label = CrdsValueLabel::ContactInfo(x);
self.gossip.crds.lookup(&leader_label)
}).and_then(|x| x.contact_info())
})
.and_then(|x| x.contact_info())
}
/// randomly pick a node and ask them for updates asynchronously
@ -629,7 +637,8 @@ impl ClusterInfo {
sleep(Duration::from_millis(time_left));
}
}
}).unwrap()
})
.unwrap()
}
fn run_window_request(
from: &NodeInfo,
@ -722,7 +731,8 @@ impl ClusterInfo {
.into_iter()
.flat_map(|request| {
ClusterInfo::handle_protocol(obj, &blob.meta.addr(), request, window, ledger_window)
}).collect()
})
.collect()
}
fn handle_pull_request(
me: &Arc<RwLock<Self>>,
@ -811,7 +821,8 @@ impl ClusterInfo {
prune_msg.sign(&me.read().unwrap().keypair);
let rsp = Protocol::PruneMessage(self_id, prune_msg);
to_blob(rsp, ci.gossip).ok()
}).into_iter()
})
.into_iter()
.collect();
let mut blobs: Vec<_> = pushes
.into_iter()
@ -977,7 +988,8 @@ impl ClusterInfo {
me.gossip.crds.table.len()
);
}
}).unwrap()
})
.unwrap()
}
pub fn spy_node() -> (NodeInfo, UdpSocket) {
@ -1282,8 +1294,10 @@ mod tests {
&vec![
Entry::new_tick(&zero, 0, &zero),
Entry::new_tick(&one, 0, &one),
].to_vec(),
).unwrap();
]
.to_vec(),
)
.unwrap();
path
}
@ -1340,7 +1354,8 @@ mod tests {
&me,
leader_id,
0,
)[0].clone();
)[0]
.clone();
let blob = shared_blob.read().unwrap();
// Test we copied the blob
assert_eq!(blob.meta.size, blob_size);

View File

@ -60,7 +60,8 @@ impl ComputeLeaderFinalityService {
}
None
}).collect()
})
.collect()
};
let super_majority_stake = (2 * total_stake) / 3;
@ -77,7 +78,8 @@ impl ComputeLeaderFinalityService {
.add_field(
"duration_ms",
influxdb::Value::Integer((now - last_valid_validator_timestamp) as i64),
).to_owned(),
)
.to_owned(),
);
}
@ -115,7 +117,8 @@ impl ComputeLeaderFinalityService {
Self::compute_finality(&bank, &mut last_valid_validator_timestamp);
sleep(Duration::from_millis(COMPUTE_FINALITY_MS));
}
}).unwrap();
})
.unwrap();
(ComputeLeaderFinalityService {
compute_finality_thread,
@ -162,7 +165,8 @@ pub mod tests {
// sleep to get a different timestamp in the bank
sleep(Duration::from_millis(1));
last_id
}).collect();
})
.collect();
// Create a total of 10 vote accounts, each will have a balance of 1 (after giving 1 to
// their vote account), for a total staking pool of 10 tokens.
@ -186,7 +190,8 @@ pub mod tests {
bank.process_transaction(&vote_tx).unwrap();
}
vote_account
}).collect();
})
.collect();
// There isn't 2/3 consensus, so the bank's finality value should be the default
let mut last_finality_time = 0;

View File

@ -88,7 +88,8 @@ impl Counter {
.add_field(
"count",
influxdb::Value::Integer(counts as i64 - lastlog as i64),
).to_owned(),
)
.to_owned(),
);
}
}

View File

@ -150,7 +150,8 @@ impl Crds {
} else {
None
}
}).cloned()
})
.cloned()
.collect()
}

View File

@ -44,7 +44,8 @@ impl CrdsGossip {
.map(|val| {
self.push
.process_push_message(&mut self.crds, val.clone(), now)
}).collect();
})
.collect();
results
.into_iter()
.zip(values)
@ -58,7 +59,8 @@ impl CrdsGossip {
} else {
None
}
}).collect()
})
.collect()
}
pub fn new_push_messages(&mut self, now: u64) -> (Pubkey, Vec<Pubkey>, Vec<CrdsValue>) {
@ -182,7 +184,8 @@ mod test {
node.crds.insert(entry.clone(), 0).unwrap();
node.set_self(id);
(new.label().pubkey(), Arc::new(Mutex::new(node)))
}).collect();
})
.collect();
let mut node = CrdsGossip::default();
let id = entry.label().pubkey();
node.crds.insert(entry.clone(), 0).unwrap();
@ -207,7 +210,8 @@ mod test {
origin.crds.insert(new.clone(), 0).unwrap();
node.set_self(id);
(new.label().pubkey(), Arc::new(Mutex::new(node)))
}).collect();
})
.collect();
network.insert(id, Arc::new(Mutex::new(origin)));
network
}
@ -222,7 +226,8 @@ mod test {
node.crds.insert(new.clone(), 0).unwrap();
node.set_self(id);
(new.label().pubkey(), Arc::new(Mutex::new(node)))
}).collect();
})
.collect();
let keys: Vec<Pubkey> = network.keys().cloned().collect();
for k in 0..keys.len() {
let start_info = {
@ -320,7 +325,8 @@ mod test {
.map(|node| {
node.lock().unwrap().purge(now);
node.lock().unwrap().new_push_messages(now)
}).collect();
})
.collect();
let transfered: Vec<_> = requests
.par_iter()
.map(|(from, peers, msgs)| {
@ -345,11 +351,13 @@ mod test {
let now = timestamp();
node.process_prune_msg(*to, destination, &rsps, now, now)
.unwrap()
}).unwrap();
})
.unwrap();
delivered += rsps.is_empty() as usize;
}
(bytes, delivered, num_msgs, prunes)
}).collect();
})
.collect();
for (b, d, m, p) in transfered {
bytes += b;
delivered += d;
@ -415,7 +423,8 @@ mod test {
node.lock()
.unwrap()
.process_pull_request(caller_info, request, now)
}).unwrap();
})
.unwrap();
bytes += serialized_size(&rsp).unwrap() as usize;
msgs += rsp.len();
network.get(&from).map(|node| {
@ -425,7 +434,8 @@ mod test {
overhead += node.lock().unwrap().process_pull_response(from, rsp, now);
});
(bytes, msgs, overhead)
}).collect();
})
.collect();
for (b, m, o) in transfered {
bytes += b;
msgs += m;

View File

@ -60,14 +60,16 @@ impl CrdsGossipPull {
.filter_map(|v| v.value.contact_info())
.filter(|v| {
v.id != self_id && !v.gossip.ip().is_unspecified() && !v.gossip.ip().is_multicast()
}).map(|item| {
})
.map(|item| {
let req_time: u64 = *self.pull_request_time.get(&item.id).unwrap_or(&0);
let weight = cmp::max(
1,
cmp::min(u64::from(u16::max_value()) - 1, (now - req_time) / 1024) as u32,
);
(weight, item)
}).collect();
})
.collect();
if options.is_empty() {
return Err(CrdsGossipError::NoPeers);
}
@ -179,7 +181,8 @@ impl CrdsGossipPull {
.map(|val| (val.value_hash, val.local_timestamp));
crds.remove(label);
rv
}).collect();
})
.collect();
self.purged_values.append(&mut purged);
}
/// Purge values from the `self.purged_values` queue that are older then purge_timeout

View File

@ -221,7 +221,8 @@ impl CrdsGossipPush {
} else {
Some(k)
}
}).cloned()
})
.cloned()
.collect();
for k in old_msgs {
self.push_messages.remove(&k);

View File

@ -238,7 +238,8 @@ pub fn retransmit_all_leader_blocks(
.add_field(
"count",
influxdb::Value::Integer(retransmit_queue.len() as i64),
).to_owned(),
)
.to_owned(),
);
if !retransmit_queue.is_empty() {
@ -641,7 +642,8 @@ mod test {
let begin = k * gap + 1;
let end = (k + 1) * gap;
(begin..end)
}).collect();
})
.collect();
assert_eq!(
find_missing_data_indexes(
slot,

View File

@ -324,7 +324,8 @@ pub fn generate_coding(
.map(|(i, l)| {
trace!("{} i: {} data: {}", id, i, l.data[0]);
&l.data[..max_data_size]
}).collect();
})
.collect();
let mut coding_locks: Vec<_> = coding_blobs.iter().map(|b| b.write().unwrap()).collect();
@ -334,7 +335,8 @@ pub fn generate_coding(
.map(|(i, l)| {
trace!("{} i: {} coding: {}", id, i, l.data[0],);
&mut l.data_mut()[..max_data_size]
}).collect();
})
.collect();
generate_coding_blocks(coding_ptrs.as_mut_slice(), &data_ptrs)?;
debug!(
@ -575,10 +577,11 @@ pub mod test {
coding_blocks.iter_mut().map(|x| x.as_mut_slice()).collect();
let v_slices: Vec<_> = vs.iter().map(|x| x.as_slice()).collect();
assert!(
generate_coding_blocks(coding_blocks_slices.as_mut_slice(), v_slices.as_slice(),)
.is_ok()
);
assert!(generate_coding_blocks(
coding_blocks_slices.as_mut_slice(),
v_slices.as_slice(),
)
.is_ok());
}
trace!("coding blocks:");
for b in &coding_blocks {
@ -594,13 +597,12 @@ pub mod test {
coding_blocks.iter_mut().map(|x| x.as_mut_slice()).collect();
let mut v_slices: Vec<_> = vs.iter_mut().map(|x| x.as_mut_slice()).collect();
assert!(
decode_blocks(
v_slices.as_mut_slice(),
coding_blocks_slices.as_mut_slice(),
erasures.as_slice(),
).is_ok()
);
assert!(decode_blocks(
v_slices.as_mut_slice(),
coding_blocks_slices.as_mut_slice(),
erasures.as_slice(),
)
.is_ok());
}
trace!("vs:");
@ -635,7 +637,8 @@ pub mod test {
slot_height,
data_l.index().unwrap(),
&data_l.data[..data_l.data_size().unwrap() as usize],
).expect("Expected successful put into data column of ledger");
)
.expect("Expected successful put into data column of ledger");
} else {
db_ledger
.write_shared_blobs(slot_height, vec![data].into_iter())
@ -661,7 +664,8 @@ pub mod test {
slot_height,
index,
&coding_lock.data[..data_size as usize + BLOB_HEADER_SIZE],
).unwrap();
)
.unwrap();
}
}
@ -692,15 +696,14 @@ pub mod test {
// Generate the coding blocks
let mut index = (NUM_DATA + 2) as u64;
assert!(
generate_coding(
&Pubkey::default(),
&mut window,
offset as u64,
num_blobs,
&mut index
).is_ok()
);
assert!(generate_coding(
&Pubkey::default(),
&mut window,
offset as u64,
num_blobs,
&mut index
)
.is_ok());
assert_eq!(index, (NUM_DATA - NUM_CODING) as u64);
// put junk in the tails, simulates re-used blobs

View File

@ -14,6 +14,7 @@ pub struct FetchStage {
}
impl FetchStage {
#[allow(clippy::new_ret_no_self)]
pub fn new(sockets: Vec<UdpSocket>, exit: Arc<AtomicBool>) -> (Self, PacketReceiver) {
let tx_sockets = sockets.into_iter().map(Arc::new).collect();
Self::new_multi_socket(tx_sockets, exit)
@ -28,7 +29,7 @@ impl FetchStage {
.map(|socket| streamer::receiver(socket, exit.clone(), sender.clone(), "fetch-stage"))
.collect();
(FetchStage { exit, thread_hdls }, receiver)
(Self { exit, thread_hdls }, receiver)
}
pub fn close(&self) {

View File

@ -51,7 +51,7 @@ impl LeaderServices {
self.tpu.is_exited()
}
pub fn exit(&self) -> () {
pub fn exit(&self) {
self.tpu.exit();
}
}
@ -63,7 +63,7 @@ pub struct ValidatorServices {
impl ValidatorServices {
fn new(tvu: Tvu, tpu_forwarder: TpuForwarder) -> Self {
ValidatorServices { tvu, tpu_forwarder }
Self { tvu, tpu_forwarder }
}
pub fn join(self) -> Result<Option<TvuReturnType>> {
@ -76,7 +76,7 @@ impl ValidatorServices {
self.tvu.is_exited()
}
pub fn exit(&self) -> () {
pub fn exit(&self) {
self.tvu.exit()
}
}
@ -201,7 +201,7 @@ impl Fullnode {
}
/// Create a fullnode instance acting as a leader or validator.
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
#[allow(clippy::too_many_arguments)]
pub fn new_with_bank(
keypair: Arc<Keypair>,
vote_account_keypair: Arc<Keypair>,
@ -733,7 +733,8 @@ mod tests {
false,
None,
)
}).collect();
})
.collect();
//each validator can exit in parallel to speed many sequential calls to `join`
vals.iter().for_each(|v| v.exit());
@ -1034,7 +1035,8 @@ mod tests {
ledger_initial_len,
last_id,
&tvu_address,
).into_iter()
)
.into_iter()
.rev()
.collect();
s_responder.send(msgs).expect("send");

View File

@ -328,12 +328,14 @@ impl LeaderScheduler {
.filter(|vote| {
vote.tick_height > lower_bound
&& vote.tick_height <= upper_bound
}).map(|_| vote_state.node_id);
})
.map(|_| vote_state.node_id);
}
}
None
}).collect()
})
.collect()
}
}
@ -415,7 +417,8 @@ impl LeaderScheduler {
} else {
None
}
}).collect();
})
.collect();
active_accounts.sort_by(
|(pk1, t1), (pk2, t2)| {
@ -583,7 +586,8 @@ mod tests {
&mint.keypair(),
new_pubkey,
last_id,
).unwrap();
)
.unwrap();
// Create a vote account
let new_vote_account = create_vote_account(
@ -591,7 +595,8 @@ mod tests {
&bank,
num_vote_account_tokens as u64,
mint.last_id(),
).unwrap();
)
.unwrap();
// Vote to make the validator part of the active set for the entire test
// (we made the active_window_length large enough at the beginning of the test)
push_vote(&new_vote_account, &bank, 1, mint.last_id());
@ -780,7 +785,8 @@ mod tests {
&mint.keypair(),
new_pubkey,
last_id,
).unwrap();
)
.unwrap();
}
let validators_pk: Vec<Pubkey> = validators.iter().map(Keypair::pubkey).collect();
@ -806,7 +812,8 @@ mod tests {
&validators[i],
new_pubkey,
last_id,
).unwrap();
)
.unwrap();
}
let all_validators: Vec<Pubkey> = validators
@ -995,7 +1002,8 @@ mod tests {
&mint.keypair(),
new_pubkey,
last_id,
).unwrap();
)
.unwrap();
// Create a vote account
let new_vote_account = create_vote_account(
@ -1003,7 +1011,8 @@ mod tests {
&bank,
num_vote_account_tokens as u64,
mint.last_id(),
).unwrap();
)
.unwrap();
// Vote at height i * active_window_length for validator i
push_vote(
@ -1226,7 +1235,8 @@ mod tests {
&mint.keypair(),
bootstrap_leader_id,
last_id,
).unwrap();
)
.unwrap();
// Create a vote account
let new_vote_account = create_vote_account(
@ -1234,7 +1244,8 @@ mod tests {
&bank,
vote_account_tokens,
mint.last_id(),
).unwrap();
)
.unwrap();
// Add leader to the active set
push_vote(

View File

@ -75,7 +75,7 @@ const LEDGER_INDEX_FILE: &str = "index";
// use a CONST because there's a cast, and we don't want "sizeof::<u64> as u64"...
const SIZEOF_U64: u64 = size_of::<u64>() as u64;
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
#[allow(clippy::needless_pass_by_value)]
fn err_bincode_to_io(e: Box<bincode::ErrorKind>) -> io::Error {
io::Error::new(io::ErrorKind::Other, e.to_string())
}
@ -493,7 +493,8 @@ impl Block for [Entry] {
.transactions
.iter()
.flat_map(VoteTransaction::get_votes)
}).collect()
})
.collect()
}
}
@ -674,7 +675,8 @@ pub fn make_tiny_test_entries(num: usize) -> Vec<Entry> {
one,
)],
)
}).collect()
})
.collect()
}
pub fn make_large_test_entries(num_entries: usize) -> Vec<Entry> {
@ -792,7 +794,8 @@ mod tests {
num_hashes: 0,
id: Hash::default(),
transactions: vec![],
}).unwrap() as usize;
})
.unwrap() as usize;
assert!(tx_small_size < tx_large_size);
assert!(tx_large_size < PACKET_DATA_SIZE);

View File

@ -54,6 +54,7 @@ impl LedgerWriteStage {
Ok(())
}
#[allow(clippy::new_ret_no_self)]
pub fn new(ledger_path: Option<&str>, entry_receiver: EntryReceiver) -> (Self, EntryReceiver) {
let mut ledger_writer = ledger_path.map(|p| LedgerWriter::open(p, false).unwrap());
@ -77,9 +78,10 @@ impl LedgerWriteStage {
}
}
};
}).unwrap();
})
.unwrap();
(LedgerWriteStage { write_thread }, entry_forwarder)
(Self { write_thread }, entry_forwarder)
}
}

View File

@ -33,7 +33,7 @@ pub struct PohService {
}
impl PohService {
pub fn exit(&self) -> () {
pub fn exit(&self) {
self.poh_exit.store(true, Ordering::Relaxed);
}
@ -56,9 +56,10 @@ impl PohService {
let return_value = Self::tick_producer(&mut poh_recorder_, config, &poh_exit_);
poh_exit_.store(true, Ordering::Relaxed);
return_value
}).unwrap();
})
.unwrap();
PohService {
Self {
tick_producer,
poh_exit,
}
@ -134,7 +135,8 @@ mod tests {
break Ok(());
}
}
}).unwrap()
})
.unwrap()
};
const HASHES_PER_TICK: u64 = 2;

View File

@ -150,6 +150,7 @@ impl ReplicateStage {
Ok(())
}
#[allow(clippy::new_ret_no_self)]
pub fn new(
keypair: Arc<Keypair>,
vote_account_keypair: Arc<Keypair>,
@ -219,10 +220,11 @@ impl ReplicateStage {
}
None
}).unwrap();
})
.unwrap();
(
ReplicateStage {
Self {
t_responder,
t_replicate,
},

View File

@ -83,6 +83,7 @@ pub fn sample_file(in_path: &Path, sample_offsets: &[u64]) -> io::Result<Hash> {
}
impl Replicator {
#[allow(clippy::new_ret_no_self)]
pub fn new(
ledger_path: Option<&str>,
node: Node,
@ -246,7 +247,7 @@ impl Replicator {
Err(e) => info!("Error occurred while sampling: {:?}", e),
}
Ok(Replicator {
Ok(Self {
gossip_service,
fetch_stage,
store_ledger_stage,

View File

@ -182,6 +182,7 @@ mod tests {
io::sink(),
"{:?}",
Error::from(io::Error::new(io::ErrorKind::NotFound, "hi"))
).unwrap();
)
.unwrap();
}
}

View File

@ -72,7 +72,8 @@ fn retransmitter(
}
}
trace!("exiting retransmitter");
}).unwrap()
})
.unwrap()
}
pub struct RetransmitStage {
@ -80,6 +81,7 @@ pub struct RetransmitStage {
}
impl RetransmitStage {
#[allow(clippy::new_ret_no_self)]
pub fn new(
db_ledger: Arc<RwLock<DbLedger>>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
@ -111,7 +113,7 @@ impl RetransmitStage {
);
let thread_hdls = vec![t_retransmit, t_window];
(RetransmitStage { thread_hdls }, entry_receiver)
(Self { thread_hdls }, entry_receiver)
}
}

View File

@ -67,10 +67,9 @@ impl JsonRpcService {
sleep(Duration::from_millis(100));
}
server.unwrap().close();
()
})
.unwrap();
JsonRpcService { thread_hdl, exit }
Self { thread_hdl, exit }
}
pub fn exit(&self) {
@ -462,7 +461,8 @@ mod tests {
arc_bank
.process_transaction(&tx)
.expect("process transaction");
}).join()
})
.join()
.unwrap();
assert_eq!(request_processor.get_transaction_count().unwrap(), 1);
}

View File

@ -67,7 +67,6 @@ impl PubSubService {
sleep(Duration::from_millis(100));
}
server.unwrap().close();
()
})
.unwrap();
PubSubService { thread_hdl, exit }
@ -146,7 +145,8 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
code: ErrorCode::InvalidParams,
message: "Invalid Request: Invalid pubkey provided".into(),
data: None,
}).unwrap();
})
.unwrap();
return;
}
let pubkey = Pubkey::new(&pubkey_vec);
@ -194,7 +194,8 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
code: ErrorCode::InvalidParams,
message: "Invalid Request: Invalid signature provided".into(),
data: None,
}).unwrap();
})
.unwrap();
return;
}
let signature = Signature::new(&signature_vec);

View File

@ -208,7 +208,8 @@ mod tests {
.threads(1)
.cors(DomainsValidation::AllowOnly(vec![
AccessControlAllowOrigin::Any,
])).start_http(&rpc_addr)
]))
.start_http(&rpc_addr)
.expect("Unable to start RPC server");
sender.send(*server.address()).unwrap();
server.wait();

View File

@ -32,7 +32,8 @@ fn process_instruction(
let index = index as usize;
let key = &tx.account_keys[index];
(key, index < tx.signatures.len())
}).zip(program_accounts.iter_mut())
})
.zip(program_accounts.iter_mut())
.map(|((key, is_signer), account)| KeyedAccount::new(key, is_signer, account))
.collect();
keyed_accounts.append(&mut keyed_accounts2);
@ -128,7 +129,8 @@ where
// lifetime of this unsafe is only within the scope of the closure
// there is no way to reorder them without breaking borrow checker rules
unsafe { &mut *ptr }
}).collect();
})
.collect();
func(&mut subset)
}
@ -150,7 +152,8 @@ pub fn execute_transaction(
executable_accounts,
program_accounts,
tick_height,
).map_err(|err| RuntimeError::ProgramError(instruction_index as u8, err))?;
)
.map_err(|err| RuntimeError::ProgramError(instruction_index as u8, err))?;
Ok(())
})?;
}

View File

@ -97,7 +97,8 @@ fn verify_packet(packet: &Packet) -> u8 {
untrusted::Input::from(&packet.data[pubkey_start..pubkey_end]),
untrusted::Input::from(&packet.data[msg_start..msg_end]),
untrusted::Input::from(&packet.data[sig_start..sig_end]),
).is_err()
)
.is_err()
{
return 0;
}
@ -195,7 +196,8 @@ pub fn ed25519_verify_cpu(batches: &[SharedPackets]) -> Vec<Vec<u8>> {
.par_iter()
.map(verify_packet)
.collect()
}).collect();
})
.collect();
inc_new_counter_info!("ed25519_verify_cpu", count);
rv
}
@ -213,7 +215,8 @@ pub fn ed25519_verify_disabled(batches: &[SharedPackets]) -> Vec<Vec<u8>> {
.par_iter()
.map(verify_packet_disabled)
.collect()
}).collect();
})
.collect();
inc_new_counter_info!("ed25519_verify_disabled", count);
rv
}
@ -435,7 +438,8 @@ mod tests {
}
assert_eq!(packets.read().unwrap().packets.len(), num_packets_per_batch);
packets
}).collect();
})
.collect();
assert_eq!(batches.len(), num_batches);
batches

View File

@ -29,6 +29,7 @@ pub struct SigVerifyStage {
}
impl SigVerifyStage {
#[allow(clippy::new_ret_no_self)]
pub fn new(
packet_receiver: Receiver<SharedPackets>,
sigverify_disabled: bool,
@ -37,7 +38,7 @@ impl SigVerifyStage {
let (verified_sender, verified_receiver) = channel();
let thread_hdls =
Self::verifier_services(packet_receiver, verified_sender, sigverify_disabled);
(SigVerifyStage { thread_hdls }, verified_receiver)
(Self { thread_hdls }, verified_receiver)
}
fn verify_batch(batch: Vec<SharedPackets>, sigverify_disabled: bool) -> VerifiedPackets {
@ -106,7 +107,8 @@ impl SigVerifyStage {
.add_field(
"total_time_ms",
influxdb::Value::Integer(total_time_ms as i64),
).to_owned(),
)
.to_owned(),
);
Ok(())

View File

@ -128,7 +128,8 @@ impl StorageStage {
break;
}
}
}).unwrap();
})
.unwrap();
StorageStage {
t_storage_mining_verifier,

View File

@ -52,7 +52,8 @@ impl StoreLedgerStage {
_ => error!("{:?}", e),
}
}
}).unwrap();
})
.unwrap();
let thread_hdls = vec![t_store_requests];

View File

@ -59,8 +59,8 @@ pub fn receiver(
.name("solana-receiver".to_string())
.spawn(move || {
let _ = recv_loop(&sock, &exit, &packet_sender, sender_tag);
()
}).unwrap()
})
.unwrap()
}
fn recv_send(sock: &UdpSocket, r: &BlobReceiver) -> Result<()> {
@ -101,7 +101,8 @@ pub fn responder(name: &'static str, sock: Arc<UdpSocket>, r: BlobReceiver) -> J
_ => warn!("{} responder error: {:?}", name, e),
}
}
}).unwrap()
})
.unwrap()
}
//TODO, we would need to stick block authentication before we create the
@ -128,7 +129,8 @@ pub fn blob_receiver(sock: Arc<UdpSocket>, exit: Arc<AtomicBool>, s: BlobSender)
break;
}
let _ = recv_blobs(&sock, &s);
}).unwrap()
})
.unwrap()
}
#[cfg(test)]

View File

@ -141,7 +141,8 @@ impl ThinClient {
.add_field(
"duration_ms",
influxdb::Value::Integer(timing::duration_as_ms(&now.elapsed()) as i64),
).to_owned(),
)
.to_owned(),
);
result
}
@ -254,7 +255,8 @@ impl ThinClient {
.add_field(
"duration_ms",
influxdb::Value::Integer(timing::duration_as_ms(elapsed) as i64),
).to_owned(),
)
.to_owned(),
);
}
@ -329,7 +331,8 @@ impl ThinClient {
.add_field(
"duration_ms",
influxdb::Value::Integer(timing::duration_as_ms(&now.elapsed()) as i64),
).to_owned(),
)
.to_owned(),
);
self.signature_status
}

View File

@ -29,6 +29,7 @@ pub struct Tpu {
}
impl Tpu {
#[allow(clippy::new_ret_no_self)]
pub fn new(
bank: &Arc<Bank>,
tick_duration: Config,
@ -56,7 +57,7 @@ impl Tpu {
let (ledger_write_stage, entry_forwarder) =
LedgerWriteStage::new(Some(ledger_path), entry_receiver);
let tpu = Tpu {
let tpu = Self {
fetch_stage,
sigverify_stage,
banking_stage,

View File

@ -70,14 +70,15 @@ impl TpuForwarder {
sender.clone(),
"tpu-forwarder",
)
}).collect();
})
.collect();
let thread_hdl = Builder::new()
.name("solana-tpu_forwarder".to_string())
.spawn(move || {
let _ignored = Self::forward(&receiver, &cluster_info);
()
}).unwrap();
})
.unwrap();
thread_hdls.push(thread_hdl);
@ -123,7 +124,8 @@ mod tests {
s,
ContactInfo::new_with_socketaddr(&socketaddr!([127, 0, 0, 1], port)),
)
}).collect();
})
.collect();
let mut cluster_info = ClusterInfo::new(nodes[0].1.clone());

View File

@ -54,7 +54,7 @@ impl Tvu {
/// * `repair_socket` - my repair socket
/// * `retransmit_socket` - my retransmit socket
/// * `ledger_path` - path to the ledger file
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
#[allow(clippy::too_many_arguments)]
pub fn new(
keypair: Arc<Keypair>,
vote_account_keypair: Arc<Keypair>,

View File

@ -417,12 +417,14 @@ pub fn process_command(config: &WalletConfig) -> Result<String, Box<error::Error
let program_userdata = elf::File::open_path(program_location)
.map_err(|_| {
WalletError::DynamicProgramError("Could not parse program file".to_string())
})?.get_section(PLATFORM_SECTION_C)
})?
.get_section(PLATFORM_SECTION_C)
.ok_or_else(|| {
WalletError::DynamicProgramError(
"Could not find entrypoint in program file".to_string(),
)
})?.data
})?
.data
.clone();
let tx = Transaction::system_create(
@ -470,7 +472,8 @@ pub fn process_command(config: &WalletConfig) -> Result<String, Box<error::Error
Ok(json!({
"programId": format!("{}", program.pubkey()),
}).to_string())
})
.to_string())
}
WalletCommand::GetTransactionCount => {
let transaction_count = RpcRequest::GetTransactionCount
@ -542,7 +545,8 @@ pub fn process_command(config: &WalletConfig) -> Result<String, Box<error::Error
Ok(json!({
"signature": signature_str,
"processId": format!("{}", contract_state.pubkey()),
}).to_string())
})
.to_string())
} else if timestamp == None {
let last_id = get_last_id(&rpc_client)?;
@ -597,7 +601,8 @@ pub fn process_command(config: &WalletConfig) -> Result<String, Box<error::Error
Ok(json!({
"signature": signature_str,
"processId": format!("{}", contract_state.pubkey()),
}).to_string())
})
.to_string())
} else {
Ok("Combo transactions not yet handled".to_string())
}
@ -793,7 +798,8 @@ mod tests {
.required(true)
.help("The number of tokens to request"),
),
).subcommand(SubCommand::with_name("balance").about("Get your balance"))
)
.subcommand(SubCommand::with_name("balance").about("Get your balance"))
.subcommand(
SubCommand::with_name("cancel")
.about("Cancel a transfer")
@ -805,7 +811,8 @@ mod tests {
.required(true)
.help("The process id of the transfer to cancel"),
),
).subcommand(
)
.subcommand(
SubCommand::with_name("confirm")
.about("Confirm transaction by signature")
.arg(
@ -816,7 +823,8 @@ mod tests {
.required(true)
.help("The transaction signature to confirm"),
),
).subcommand(
)
.subcommand(
SubCommand::with_name("deploy")
.about("Deploy a program")
.arg(
@ -827,10 +835,12 @@ mod tests {
.required(true)
.help("/path/to/program.o"),
), // TODO: Add "loader" argument; current default is bpf_loader
).subcommand(
)
.subcommand(
SubCommand::with_name("get-transaction-count")
.about("Get current transaction count"),
).subcommand(
)
.subcommand(
SubCommand::with_name("pay")
.about("Send a payment")
.arg(
@ -840,27 +850,31 @@ mod tests {
.takes_value(true)
.required(true)
.help("The pubkey of recipient"),
).arg(
)
.arg(
Arg::with_name("tokens")
.index(2)
.value_name("NUM")
.takes_value(true)
.required(true)
.help("The number of tokens to send"),
).arg(
)
.arg(
Arg::with_name("timestamp")
.long("after")
.value_name("DATETIME")
.takes_value(true)
.help("A timestamp after which transaction will execute"),
).arg(
)
.arg(
Arg::with_name("timestamp-pubkey")
.long("require-timestamp-from")
.value_name("PUBKEY")
.takes_value(true)
.requires("timestamp")
.help("Require timestamp from this third party"),
).arg(
)
.arg(
Arg::with_name("witness")
.long("require-signature-from")
.value_name("PUBKEY")
@ -868,12 +882,14 @@ mod tests {
.multiple(true)
.use_delimiter(true)
.help("Any third party signatures required to unlock the tokens"),
).arg(
)
.arg(
Arg::with_name("cancelable")
.long("cancelable")
.takes_value(false),
),
).subcommand(
)
.subcommand(
SubCommand::with_name("send-signature")
.about("Send a signature to authorize a transfer")
.arg(
@ -883,7 +899,8 @@ mod tests {
.takes_value(true)
.required(true)
.help("The pubkey of recipient"),
).arg(
)
.arg(
Arg::with_name("process-id")
.index(2)
.value_name("PROCESS_ID")
@ -891,7 +908,8 @@ mod tests {
.required(true)
.help("The process id of the transfer to authorize"),
),
).subcommand(
)
.subcommand(
SubCommand::with_name("send-timestamp")
.about("Send a timestamp to unlock a transfer")
.arg(
@ -901,14 +919,16 @@ mod tests {
.takes_value(true)
.required(true)
.help("The pubkey of recipient"),
).arg(
)
.arg(
Arg::with_name("process-id")
.index(2)
.value_name("PROCESS_ID")
.takes_value(true)
.required(true)
.help("The process id of the transfer to unlock"),
).arg(
)
.arg(
Arg::with_name("datetime")
.long("date")
.value_name("DATETIME")

View File

@ -90,7 +90,8 @@ impl WindowUtil for Window {
}
self[i].clear_data();
Some(pix)
}).collect()
})
.collect()
}
fn blob_idx_in_window(&self, id: &Pubkey, pix: u64, consumed: u64, received: &mut u64) -> bool {
@ -226,7 +227,8 @@ impl WindowUtil for Window {
} else {
" "
}
}).collect();
})
.collect();
let buf: Vec<_> = self
.iter()
@ -242,7 +244,8 @@ impl WindowUtil for Window {
// data.is_none()
"c"
}
}).collect();
})
.collect();
format!(
"\n{}: WINDOW ({}): {}\n{}: WINDOW ({}): {}",
id,

View File

@ -50,7 +50,7 @@ fn repair_backoff(last: &mut u64, times: &mut usize, consumed: u64) -> bool {
thread_rng().gen_range(0, *times as u64) == 0
}
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
#[allow(clippy::too_many_arguments)]
fn recv_window(
db_ledger: &mut DbLedger,
id: &Pubkey,
@ -119,7 +119,7 @@ fn recv_window(
Ok(())
}
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
#[allow(clippy::too_many_arguments)]
pub fn window_service(
db_ledger: Arc<RwLock<DbLedger>>,
cluster_info: Arc<RwLock<ClusterInfo>>,
@ -221,7 +221,8 @@ pub fn window_service(
}
}
}
}).unwrap()
})
.unwrap()
}
#[cfg(test)]
@ -305,7 +306,8 @@ mod test {
0,
Hash::default(),
&gossip_address,
).into_iter()
)
.into_iter()
.rev()
.collect();;
s_responder.send(msgs).expect("send");
@ -425,7 +427,8 @@ mod test {
let rv = repair_backoff(&mut last, &mut times, 1) as usize;
assert_eq!(times, x + 2);
rv
}).sum();
})
.sum();
assert_eq!(times, 128);
assert_eq!(last, 1);
repair_backoff(&mut last, &mut times, 1);
@ -434,7 +437,8 @@ mod test {
assert_eq!(times, 2);
assert_eq!(last, 2);
total
}).sum();
})
.sum();
let avg = res / num_tests;
assert!(avg >= 3);
assert!(avg <= 5);

View File

@ -184,7 +184,8 @@ pub fn cluster_info_retransmit() -> result::Result<()> {
s.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
let res = s.recv_from(&mut b.data);
res.is_err() //true if failed to receive the retransmit packet
}).collect();
})
.collect();
//true if failed receive the retransmit packet, r2, and r3 should succeed
//r1 was the sender, so it should fail to receive the packet
assert_eq!(res, [true, false, false]);

View File

@ -652,14 +652,16 @@ fn test_multi_node_dynamic_network() {
&alice_arc.read().unwrap(),
&bob_pubkey,
Some(500),
).unwrap();
)
.unwrap();
assert_eq!(leader_balance, 500);
let leader_balance = retry_send_tx_and_retry_get_balance(
&leader_data,
&alice_arc.read().unwrap(),
&bob_pubkey,
Some(1000),
).unwrap();
)
.unwrap();
assert_eq!(leader_balance, 1000);
let t1: Vec<_> = (0..num_nodes)
@ -670,8 +672,10 @@ fn test_multi_node_dynamic_network() {
.spawn(move || {
info!("Spawned thread {}", n);
Keypair::new()
}).unwrap()
}).collect();
})
.unwrap()
})
.collect();
info!("Waiting for keypairs to be created");
let keypairs: Vec<_> = t1.into_iter().map(|t| t.join().unwrap()).collect();
@ -710,8 +714,10 @@ fn test_multi_node_dynamic_network() {
None,
);
(rd, val)
}).unwrap()
}).collect();
})
.unwrap()
})
.collect();
let mut validators: Vec<_> = t2.into_iter().map(|t| t.join().unwrap()).collect();
@ -731,7 +737,8 @@ fn test_multi_node_dynamic_network() {
&alice_arc.read().unwrap().keypair(),
bob_pubkey,
&last_id,
).unwrap();
)
.unwrap();
expected_balance += 500;
@ -1101,7 +1108,8 @@ fn run_node(
};
}
sleep(Duration::new(1, 0));
}).unwrap()
})
.unwrap()
}
#[test]
@ -1487,7 +1495,8 @@ fn test_full_leader_validator_network() {
.filter_map(|(i, x)| match empty_iterators.get(&i) {
None => Some(x),
_ => None,
}).collect();
})
.collect();
if node_entries.len() == 0 {
break;
@ -1545,7 +1554,8 @@ fn test_broadcast_last_tick() {
Arc::new(node.sockets.replicate.pop().unwrap()),
blob_receiver_exit.clone(),
)
}).collect();
})
.collect();
// Create fullnode, should take 20 seconds to reach end of bootstrap period
let bootstrap_height = (NUM_TICKS_PER_SECOND * 20) as u64;

View File

@ -213,7 +213,8 @@ fn test_program_lua_move_funds() {
local tokens, _ = string.unpack("I", data)
accounts[1].tokens = accounts[1].tokens - tokens
accounts[2].tokens = accounts[2].tokens + tokens
"#.as_bytes()
"#
.as_bytes()
.to_vec();
let program = Program::new(&loader, &userdata);
let from = Keypair::new();

View File

@ -69,7 +69,8 @@ fn test_replicator_startup() {
replicator_node,
&leader_info,
&replicator_keypair,
).unwrap();
)
.unwrap();
let mut num_entries = 0;
for _ in 0..60 {

View File

@ -23,7 +23,8 @@ fn main() -> Result<(), Box<error::Error>> {
.value_name("NUM")
.takes_value(true)
.help("JSON RPC listener port"),
).get_matches();
)
.get_matches();
let port = if let Some(p) = matches.value_of("port") {
p.to_string()

View File

@ -46,10 +46,9 @@ impl VoteSignerRpcService {
sleep(Duration::from_millis(100));
}
server.unwrap().close();
()
})
.unwrap();
VoteSignerRpcService { thread_hdl, exit }
Self { thread_hdl, exit }
}
pub fn exit(&self) {