Bump rand to 0.8, rand_chacha to 0.3, getrandom to 0.2 (#32871)

* sdk: Add concurrent support for rand 0.7 and 0.8

* Update rand, rand_chacha, and getrandom versions

* Run command to replace `gen_range`

Run `git grep -l gen_range | xargs sed -i'' -e 's/gen_range(\(\S*\), /gen_range(\1../'

* sdk: Fix users of older `gen_range`

* Replace `hash::new_rand` with `hash::new_with_thread_rng`

Run:
```
git grep -l hash::new_rand | xargs sed -i'' -e 's/hash::new_rand([^)]*/hash::new_with_thread_rng(/'
```

* perf: Use `Keypair::new()` instead of `generate`

* Use older rand version in zk-token-sdk

* program-runtime: Inline random key generation

* bloom: Fix clippy warnings in tests

* streamer: Scope rng usage correctly

* perf: Fix clippy warning

* accounts-db: Map to char to generate a random string

* Remove `from_secret_key_bytes`, it's just `keypair_from_seed`

* ledger: Generate keypairs by hand

* ed25519-tests: Use new rand

* runtime: Use new rand in all tests

* gossip: Clean up clippy and inline keypair generators

* core: Inline keypair generation for tests

* Push sbf lockfile change

* sdk: Sort dependencies correctly

* Remove `hash::new_with_thread_rng`, use `Hash::new_unique()`

* Use Keypair::new where chacha isn't used

* sdk: Fix build by marking rand 0.7 optional

* Hardcode secret key length, add static assertion

* Unify `getrandom` crate usage to fix linking errors

* bloom: Fix tests that require a random hash

* Remove some dependencies, try to unify others

* Remove unnecessary uses of rand and rand_core

* Update lockfiles

* Add back some dependencies to reduce rebuilds

* Increase max rebuilds from 14 to 15

* frozen-abi: Remove `getrandom`

* Bump rebuilds to 17

* Remove getrandom from zk-token-proof
This commit is contained in:
Jon Cinque 2023-08-21 19:11:21 +02:00 committed by GitHub
parent 524274d8b5
commit 0fe902ced7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
97 changed files with 478 additions and 430 deletions

99
Cargo.lock generated
View File

@ -69,7 +69,7 @@ version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47"
dependencies = [
"getrandom 0.2.8",
"getrandom 0.2.10",
"once_cell",
"version_check",
]
@ -81,7 +81,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f"
dependencies = [
"cfg-if 1.0.0",
"getrandom 0.2.8",
"getrandom 0.2.10",
"once_cell",
"version_check",
]
@ -518,7 +518,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1"
dependencies = [
"futures-core",
"getrandom 0.2.8",
"getrandom 0.2.10",
"instant",
"pin-project-lite",
"rand 0.8.5",
@ -2193,9 +2193,9 @@ dependencies = [
[[package]]
name = "getrandom"
version = "0.2.8"
version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31"
checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427"
dependencies = [
"cfg-if 1.0.0",
"js-sys",
@ -4262,7 +4262,7 @@ version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
dependencies = [
"getrandom 0.2.8",
"getrandom 0.2.10",
]
[[package]]
@ -4369,7 +4369,7 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64"
dependencies = [
"getrandom 0.2.8",
"getrandom 0.2.10",
"redox_syscall 0.2.10",
]
@ -5175,7 +5175,7 @@ version = "1.17.0"
dependencies = [
"clap 2.33.3",
"log",
"rand 0.7.3",
"rand 0.8.5",
"rayon",
"solana-account-decoder",
"solana-accounts-db",
@ -5237,8 +5237,8 @@ dependencies = [
"ouroboros",
"percentage",
"qualifier_attr",
"rand 0.7.3",
"rand_chacha 0.2.2",
"rand 0.8.5",
"rand_chacha 0.3.1",
"rayon",
"regex",
"rustc_version 0.4.0",
@ -5315,7 +5315,7 @@ dependencies = [
"clap 3.2.23",
"crossbeam-channel",
"log",
"rand 0.7.3",
"rand 0.8.5",
"rayon",
"solana-client",
"solana-core",
@ -5394,7 +5394,7 @@ dependencies = [
"clap 2.33.3",
"crossbeam-channel",
"log",
"rand 0.7.3",
"rand 0.8.5",
"rayon",
"serde_json",
"serde_yaml 0.9.25",
@ -5435,7 +5435,7 @@ dependencies = [
"bv",
"fnv",
"log",
"rand 0.7.3",
"rand 0.8.5",
"rayon",
"rustc_version 0.4.0",
"serde",
@ -5454,7 +5454,7 @@ dependencies = [
"libsecp256k1",
"log",
"memoffset 0.9.0",
"rand 0.7.3",
"rand 0.8.5",
"solana-measure",
"solana-program-runtime",
"solana-sdk",
@ -5484,7 +5484,7 @@ dependencies = [
"memmap2",
"modular-bitfield",
"num_enum 0.6.1",
"rand 0.7.3",
"rand 0.8.5",
"rayon",
"solana-logger",
"solana-measure",
@ -5681,8 +5681,6 @@ dependencies = [
"indicatif",
"log",
"quinn",
"rand 0.7.3",
"rand_chacha 0.2.2",
"rayon",
"solana-connection-cache",
"solana-measure",
@ -5761,8 +5759,8 @@ dependencies = [
"indexmap 2.0.0",
"indicatif",
"log",
"rand 0.7.3",
"rand_chacha 0.2.2",
"rand 0.8.5",
"rand_chacha 0.3.1",
"rayon",
"rcgen",
"solana-logger",
@ -5797,8 +5795,8 @@ dependencies = [
"min-max-heap",
"num_enum 0.6.1",
"quinn",
"rand 0.7.3",
"rand_chacha 0.2.2",
"rand 0.8.5",
"rand_chacha 0.3.1",
"raptorq",
"rayon",
"rolling-file",
@ -5887,7 +5885,7 @@ dependencies = [
"crossbeam-channel",
"itertools",
"log",
"rand 0.7.3",
"rand 0.8.5",
"serde",
"solana-bench-tps",
"solana-client",
@ -5926,7 +5924,7 @@ version = "1.17.0"
dependencies = [
"assert_matches",
"ed25519-dalek",
"rand 0.7.3",
"rand 0.8.5",
"solana-program-test",
"solana-sdk",
]
@ -5941,7 +5939,7 @@ dependencies = [
"lazy_static",
"log",
"matches",
"rand 0.7.3",
"rand 0.8.5",
"rayon",
"serde",
"solana-logger",
@ -5988,13 +5986,11 @@ dependencies = [
"cc",
"either",
"generic-array 0.14.7",
"getrandom 0.1.16",
"im",
"lazy_static",
"log",
"memmap2",
"once_cell",
"rand_core 0.6.4",
"rustc_version 0.4.0",
"serde",
"serde_bytes",
@ -6104,8 +6100,8 @@ dependencies = [
"matches",
"num-traits",
"num_cpus",
"rand 0.7.3",
"rand_chacha 0.2.2",
"rand 0.8.5",
"rand_chacha 0.3.1",
"rayon",
"rustc_version 0.4.0",
"serde",
@ -6212,8 +6208,8 @@ dependencies = [
"num_cpus",
"num_enum 0.6.1",
"prost 0.11.9",
"rand 0.7.3",
"rand_chacha 0.2.2",
"rand 0.8.5",
"rand_chacha 0.3.1",
"rayon",
"reed-solomon-erasure",
"rocksdb",
@ -6310,7 +6306,7 @@ version = "1.17.0"
dependencies = [
"bincode",
"log",
"rand 0.7.3",
"rand 0.8.5",
"solana-measure",
"solana-program-runtime",
"solana-sdk",
@ -6327,7 +6323,7 @@ dependencies = [
"gag",
"itertools",
"log",
"rand 0.7.3",
"rand 0.8.5",
"rayon",
"serial_test",
"solana-accounts-db",
@ -6420,7 +6416,7 @@ dependencies = [
"gethostname",
"lazy_static",
"log",
"rand 0.7.3",
"rand 0.8.5",
"reqwest",
"serial_test",
"solana-sdk",
@ -6432,7 +6428,7 @@ name = "solana-net-shaper"
version = "1.17.0"
dependencies = [
"clap 3.2.23",
"rand 0.7.3",
"rand 0.8.5",
"serde",
"serde_json",
"solana-logger",
@ -6447,7 +6443,7 @@ dependencies = [
"crossbeam-channel",
"log",
"nix",
"rand 0.7.3",
"rand 0.8.5",
"serde",
"serde_derive",
"socket2 0.5.3",
@ -6484,8 +6480,8 @@ dependencies = [
"log",
"matches",
"nix",
"rand 0.7.3",
"rand_chacha 0.2.2",
"rand 0.8.5",
"rand_chacha 0.3.1",
"rayon",
"serde",
"solana-logger",
@ -6505,7 +6501,7 @@ dependencies = [
"crossbeam-channel",
"log",
"matches",
"rand 0.7.3",
"rand 0.8.5",
"solana-entry",
"solana-ledger",
"solana-logger",
@ -6523,7 +6519,6 @@ version = "1.17.0"
dependencies = [
"clap 3.2.23",
"log",
"rand 0.7.3",
"rayon",
"solana-entry",
"solana-logger",
@ -6557,7 +6552,7 @@ dependencies = [
"console_error_panic_hook",
"console_log",
"curve25519-dalek",
"getrandom 0.2.8",
"getrandom 0.2.10",
"itertools",
"js-sys",
"lazy_static",
@ -6569,8 +6564,7 @@ dependencies = [
"num-derive",
"num-traits",
"parking_lot 0.12.1",
"rand 0.7.3",
"rand_chacha 0.2.2",
"rand 0.8.5",
"rustc_version 0.4.0",
"rustversion",
"serde",
@ -6605,7 +6599,7 @@ dependencies = [
"num-derive",
"num-traits",
"percentage",
"rand 0.7.3",
"rand 0.8.5",
"rustc_version 0.4.0",
"serde",
"solana-frozen-abi",
@ -6912,8 +6906,8 @@ dependencies = [
"once_cell",
"ouroboros",
"percentage",
"rand 0.7.3",
"rand_chacha 0.2.2",
"rand 0.8.5",
"rand_chacha 0.3.1",
"rayon",
"regex",
"rustc_version 0.4.0",
@ -6990,7 +6984,7 @@ dependencies = [
"pbkdf2 0.11.0",
"qstring",
"rand 0.7.3",
"rand_chacha 0.2.2",
"rand 0.8.5",
"rustc_version 0.4.0",
"rustversion",
"serde",
@ -7150,7 +7144,7 @@ dependencies = [
"pkcs8",
"quinn",
"quinn-proto",
"rand 0.7.3",
"rand 0.8.5",
"rcgen",
"rustls 0.21.6",
"solana-logger",
@ -7262,8 +7256,6 @@ dependencies = [
"indexmap 2.0.0",
"indicatif",
"log",
"rand 0.7.3",
"rand_chacha 0.2.2",
"rayon",
"solana-connection-cache",
"solana-measure",
@ -7283,7 +7275,7 @@ dependencies = [
"bincode",
"clap 2.33.3",
"log",
"rand 0.7.3",
"rand 0.8.5",
"rayon",
"solana-clap-utils",
"solana-cli",
@ -7340,8 +7332,8 @@ dependencies = [
"lru",
"matches",
"quinn",
"rand 0.7.3",
"rand_chacha 0.2.2",
"rand 0.8.5",
"rand_chacha 0.3.1",
"rayon",
"rcgen",
"rustls 0.21.6",
@ -7407,7 +7399,7 @@ dependencies = [
"libloading",
"log",
"num_cpus",
"rand 0.7.3",
"rand 0.8.5",
"rayon",
"serde",
"serde_json",
@ -7530,7 +7522,6 @@ dependencies = [
"bytemuck",
"criterion",
"curve25519-dalek",
"getrandom 0.1.16",
"num-derive",
"num-traits",
"solana-program-runtime",

View File

@ -197,7 +197,7 @@ futures-util = "0.3.28"
gag = "1.0.0"
generic-array = { version = "0.14.7", default-features = false }
gethostname = "0.2.3"
getrandom = "0.1.14"
getrandom = "0.2.10"
goauth = "0.13.1"
hex = "0.4.3"
hidapi = { version = "2.4.1", default-features = false }
@ -263,8 +263,8 @@ qualifier_attr = { version = "0.2.2", default-features = false }
quinn = "0.10.2"
quinn-proto = "0.10.2"
quote = "1.0"
rand = "0.7.0"
rand_chacha = "0.2.2"
rand = "0.8.5"
rand_chacha = "0.3.1"
rand_core = "0.6.4"
raptorq = "1.7.0"
rayon = "1.7.0"

View File

@ -136,7 +136,7 @@ fn make_create_message(
maybe_space: Option<u64>,
mint: Option<Pubkey>,
) -> Message {
let space = maybe_space.unwrap_or_else(|| thread_rng().gen_range(0, 1000));
let space = maybe_space.unwrap_or_else(|| thread_rng().gen_range(0..1000));
let instructions: Vec<_> = (0..num_instructions)
.flat_map(|_| {

View File

@ -4500,7 +4500,7 @@ impl AccountsDb {
} else {
false
};
if is_candidate || (can_randomly_shrink && thread_rng().gen_range(0, 10000) == 0) {
if is_candidate || (can_randomly_shrink && thread_rng().gen_range(0..10000) == 0) {
// we are a candidate for shrink, so either append us to the previous append vec
// or recreate us as a new append vec and eliminate the dead accounts
info!(
@ -5712,7 +5712,7 @@ impl AccountsDb {
self.stats
.create_store_count
.fetch_add(1, Ordering::Relaxed);
let path_index = thread_rng().gen_range(0, paths.len());
let path_index = thread_rng().gen_range(0..paths.len());
let store = Arc::new(self.new_storage_entry(
slot,
Path::new(&paths[path_index]),
@ -9697,7 +9697,7 @@ impl AccountsDb {
pub fn check_accounts(&self, pubkeys: &[Pubkey], slot: Slot, num: usize, count: usize) {
let ancestors = vec![(slot, 0)].into_iter().collect();
for _ in 0..num {
let idx = thread_rng().gen_range(0, num);
let idx = thread_rng().gen_range(0..num);
let account = self.load_without_fixed_root(&ancestors, &pubkeys[idx]);
let account1 = Some((
AccountSharedData::new(
@ -9904,7 +9904,7 @@ pub mod test_utils {
// accounts cache!
pub fn update_accounts_bench(accounts: &Accounts, pubkeys: &[Pubkey], slot: u64) {
for pubkey in pubkeys {
let amount = thread_rng().gen_range(0, 10);
let amount = thread_rng().gen_range(0..10);
let account = AccountSharedData::new(amount, 0, AccountSharedData::default().owner());
accounts.store_slow_uncached(slot, pubkey, &account);
}
@ -11405,7 +11405,7 @@ pub mod tests {
let mut pubkeys: Vec<Pubkey> = vec![];
db.create_account(&mut pubkeys, 0, 100, 0, 0);
for _ in 1..100 {
let idx = thread_rng().gen_range(0, 99);
let idx = thread_rng().gen_range(0..99);
let ancestors = vec![(0, 0)].into_iter().collect();
let account = db
.load_without_fixed_root(&ancestors, &pubkeys[idx])
@ -11421,7 +11421,7 @@ pub mod tests {
// check that all the accounts appear with a new root
for _ in 1..100 {
let idx = thread_rng().gen_range(0, 99);
let idx = thread_rng().gen_range(0..99);
let ancestors = vec![(0, 0)].into_iter().collect();
let account0 = db
.load_without_fixed_root(&ancestors, &pubkeys[idx])
@ -11556,7 +11556,7 @@ pub mod tests {
fn update_accounts(accounts: &AccountsDb, pubkeys: &[Pubkey], slot: Slot, range: usize) {
for _ in 1..1000 {
let idx = thread_rng().gen_range(0, range);
let idx = thread_rng().gen_range(0..range);
let ancestors = vec![(slot, 0)].into_iter().collect();
if let Some((mut account, _)) =
accounts.load_without_fixed_root(&ancestors, &pubkeys[idx])
@ -12393,7 +12393,7 @@ pub mod tests {
let mut account = AccountSharedData::new(1, 0, &pubkey);
let mut i = 0;
loop {
let account_bal = thread_rng().gen_range(1, 99);
let account_bal = thread_rng().gen_range(1..99);
account.set_lamports(account_bal);
db.store_for_tests(slot, &[(&pubkey, &account)]);
@ -15194,7 +15194,7 @@ pub mod tests {
// Ordering::Relaxed is ok because of no data dependencies; the modified field is
// completely free-standing cfg(test) control-flow knob.
db.load_limit
.store(thread_rng().gen_range(0, 10) as u64, Ordering::Relaxed);
.store(thread_rng().gen_range(0..10) as u64, Ordering::Relaxed);
// Load should never be unable to find this key
let loaded_account = db

View File

@ -1619,7 +1619,7 @@ impl<T: IndexValue, U: DiskIndexValue + From<T> + Into<T>> AccountsIndex<T, U> {
let expected_items_per_bin = item_len * 2 / bins;
// offset bin 0 in the 'binned' array by a random amount.
// This results in calls to insert_new_entry_if_missing_with_lock from different threads starting at different bins.
let random_offset = thread_rng().gen_range(0, bins);
let random_offset = thread_rng().gen_range(0..bins);
let use_disk = self.storage.storage.disk.is_some();
let mut binned = (0..bins)
.map(|mut pubkey_bin| {

View File

@ -91,7 +91,7 @@ impl AncientSlotInfos {
let should_shrink = if capacity > 0 {
let alive_ratio = alive_bytes * 100 / capacity;
alive_ratio < 90
|| if can_randomly_shrink && thread_rng().gen_range(0, 10000) == 0 {
|| if can_randomly_shrink && thread_rng().gen_range(0..10000) == 0 {
was_randomly_shrunk = true;
true
} else {
@ -1202,7 +1202,7 @@ pub mod tests {
let mut data_size = 450;
// random # of extra accounts here
let total_accounts_per_storage =
thread_rng().gen_range(0, total_accounts_per_storage);
thread_rng().gen_range(0..total_accounts_per_storage);
let _pubkeys_and_accounts = storages
.iter()
.map(|storage| {

View File

@ -1067,7 +1067,7 @@ pub mod tests {
let now = Instant::now();
for _ in 0..size {
let sample = thread_rng().gen_range(0, indexes.len());
let sample = thread_rng().gen_range(0..indexes.len());
let account = create_test_account(sample);
assert_eq!(av.get_account_test(indexes[sample]).unwrap(), account);
}

View File

@ -25,6 +25,7 @@ pub fn get_append_vec_path(path: &str) -> TempFile {
let out_dir = get_append_vec_dir();
let rand_string: String = rand::thread_rng()
.sample_iter(&Alphanumeric)
.map(char::from)
.take(30)
.collect();
let dir = format!("{out_dir}/{rand_string}");

View File

@ -497,7 +497,7 @@ pub mod tests {
}
CalculateHashIntermediate::new(
solana_sdk::hash::new_rand(&mut rng),
solana_sdk::hash::Hash::new_unique(),
ct as u64,
pk,
)

View File

@ -381,7 +381,7 @@ where
};
if let ["accounts", file] = parts {
// Randomly distribute the accounts files about the available `account_paths`,
let path_index = thread_rng().gen_range(0, account_paths.len());
let path_index = thread_rng().gen_range(0..account_paths.len());
match account_paths
.get(path_index)
.map(|path_buf| path_buf.as_path())

View File

@ -179,7 +179,7 @@ impl<T: IndexValue, U: DiskIndexValue + From<T> + Into<T>> InMemAccountsIndex<T,
// Spread out the scanning across all ages within the window.
// This causes us to scan 1/N of the bins each 'Age'
remaining_ages_to_skip_flushing: AtomicU8::new(
thread_rng().gen_range(0, num_ages_to_distribute_flushes),
thread_rng().gen_range(0..num_ages_to_distribute_flushes),
),
num_ages_to_distribute_flushes,
}
@ -932,7 +932,7 @@ impl<T: IndexValue, U: DiskIndexValue + From<T> + Into<T>> InMemAccountsIndex<T,
// random eviction
const N: usize = 1000;
// 1/N chance of eviction
thread_rng().gen_range(0, N) == 0
thread_rng().gen_range(0..N) == 0
}
/// assumes 1 entry in the slot list. Ignores overhead of the HashMap and such

View File

@ -291,7 +291,7 @@ mod tests {
const MAX_CACHE_SIZE: usize = 17 * (CACHE_ENTRY_SIZE + DATA_SIZE);
let mut rng = ChaChaRng::from_seed(SEED);
let cache = ReadOnlyAccountsCache::new(MAX_CACHE_SIZE);
let slots: Vec<Slot> = repeat_with(|| rng.gen_range(0, 1000)).take(5).collect();
let slots: Vec<Slot> = repeat_with(|| rng.gen_range(0..1000)).take(5).collect();
let pubkeys: Vec<Pubkey> = repeat_with(|| {
let mut arr = [0u8; 32];
rng.fill(&mut arr[..]);

View File

@ -98,7 +98,7 @@ impl StakeReward {
stake_pubkey: Pubkey::new_unique(),
stake_reward_info: RewardInfo {
reward_type: RewardType::Staking,
lamports: rng.gen_range(1, 200),
lamports: rng.gen_range(1..200),
post_balance: 0, /* unused atm */
commission: None, /* unused atm */
},

View File

@ -127,7 +127,7 @@ mod tests {
.iter()
.map(|address| AccountIndexWriterEntry {
address,
block_offset: rng.gen_range(128, 2048),
block_offset: rng.gen_range(128..2048),
intra_block_offset: 0,
})
.collect();

View File

@ -103,7 +103,7 @@ fn bench_sigs_hashmap(bencher: &mut Bencher) {
#[bench]
fn bench_add_hash(bencher: &mut Bencher) {
let mut rng = rand::thread_rng();
let hash_values: Vec<_> = std::iter::repeat_with(|| solana_sdk::hash::new_rand(&mut rng))
let hash_values: Vec<_> = std::iter::repeat_with(Hash::new_unique)
.take(1200)
.collect();
let mut fail = 0;
@ -112,7 +112,7 @@ fn bench_add_hash(bencher: &mut Bencher) {
for hash_value in &hash_values {
bloom.add(hash_value);
}
let index = rng.gen_range(0, hash_values.len());
let index = rng.gen_range(0..hash_values.len());
if !bloom.contains(&hash_values[index]) {
fail += 1;
}
@ -123,7 +123,7 @@ fn bench_add_hash(bencher: &mut Bencher) {
#[bench]
fn bench_add_hash_atomic(bencher: &mut Bencher) {
let mut rng = rand::thread_rng();
let hash_values: Vec<_> = std::iter::repeat_with(|| solana_sdk::hash::new_rand(&mut rng))
let hash_values: Vec<_> = std::iter::repeat_with(Hash::new_unique)
.take(1200)
.collect();
let mut fail = 0;
@ -136,7 +136,7 @@ fn bench_add_hash_atomic(bencher: &mut Bencher) {
for hash_value in &hash_values {
bloom.add(hash_value);
}
let index = rng.gen_range(0, hash_values.len());
let index = rng.gen_range(0..hash_values.len());
if !bloom.contains(&hash_values[index]) {
fail += 1;
}

View File

@ -308,10 +308,16 @@ mod test {
);
}
fn generate_random_hash() -> Hash {
let mut rng = rand::thread_rng();
let mut hash = [0u8; solana_sdk::hash::HASH_BYTES];
rng.fill(&mut hash);
Hash::new_from_array(hash)
}
#[test]
fn test_atomic_bloom() {
let mut rng = rand::thread_rng();
let hash_values: Vec<_> = std::iter::repeat_with(|| solana_sdk::hash::new_rand(&mut rng))
let hash_values: Vec<_> = std::iter::repeat_with(generate_random_hash)
.take(1200)
.collect();
let bloom: AtomicBloom<_> = Bloom::<Hash>::random(1287, 0.1, 7424).into();
@ -328,7 +334,7 @@ mod test {
for hash_value in hash_values {
assert!(bloom.contains(&hash_value));
}
let false_positive = std::iter::repeat_with(|| solana_sdk::hash::new_rand(&mut rng))
let false_positive = std::iter::repeat_with(generate_random_hash)
.take(10_000)
.filter(|hash_value| bloom.contains(hash_value))
.count();
@ -340,7 +346,7 @@ mod test {
let mut rng = rand::thread_rng();
let keys: Vec<_> = std::iter::repeat_with(|| rng.gen()).take(5).collect();
let mut bloom = Bloom::<Hash>::new(9731, keys.clone());
let hash_values: Vec<_> = std::iter::repeat_with(|| solana_sdk::hash::new_rand(&mut rng))
let hash_values: Vec<_> = std::iter::repeat_with(generate_random_hash)
.take(1000)
.collect();
for hash_value in &hash_values {
@ -375,10 +381,9 @@ mod test {
assert!(bloom.contains(hash_value));
}
// Round trip, inserting new hash values.
let more_hash_values: Vec<_> =
std::iter::repeat_with(|| solana_sdk::hash::new_rand(&mut rng))
.take(1000)
.collect();
let more_hash_values: Vec<_> = std::iter::repeat_with(generate_random_hash)
.take(1000)
.collect();
let bloom: AtomicBloom<_> = bloom.into();
assert_eq!(bloom.num_bits, 9731);
assert_eq!(bloom.bits.len(), (9731 + 63) / 64);
@ -391,7 +396,7 @@ mod test {
for hash_value in &more_hash_values {
assert!(bloom.contains(hash_value));
}
let false_positive = std::iter::repeat_with(|| solana_sdk::hash::new_rand(&mut rng))
let false_positive = std::iter::repeat_with(generate_random_hash)
.take(10_000)
.filter(|hash_value| bloom.contains(hash_value))
.count();
@ -410,7 +415,7 @@ mod test {
for hash_value in &more_hash_values {
assert!(bloom.contains(hash_value));
}
let false_positive = std::iter::repeat_with(|| solana_sdk::hash::new_rand(&mut rng))
let false_positive = std::iter::repeat_with(generate_random_hash)
.take(10_000)
.filter(|hash_value| bloom.contains(hash_value))
.count();

View File

@ -483,7 +483,7 @@ impl<'b, T: Clone + Copy + 'static> Bucket<T> {
let best_bucket = &mut self.data[best_fit_bucket as usize];
let cap_power = best_bucket.contents.capacity_pow2();
let cap = best_bucket.capacity();
let pos = thread_rng().gen_range(0, cap);
let pos = thread_rng().gen_range(0..cap);
let mut success = false;
// max search is increased here by a lot for this search. The idea is that we just have to find an empty bucket somewhere.
// We don't mind waiting on a new write (by searching longer). Writing is done in the background only.

View File

@ -367,11 +367,11 @@ mod tests {
let all_keys = Mutex::new(vec![]);
let gen_rand_value = || {
let count = thread_rng().gen_range(0, max_slot_list_len);
let count = thread_rng().gen_range(0..max_slot_list_len);
let v = (0..count)
.map(|x| (x as usize, x as usize /*thread_rng().gen::<usize>()*/))
.collect::<Vec<_>>();
let range = thread_rng().gen_range(0, 100);
let range = thread_rng().gen_range(0..100);
// pick ref counts that are useful and common
let rc = if range < 50 {
1
@ -380,7 +380,7 @@ mod tests {
} else if range < 70 {
2
} else {
thread_rng().gen_range(0, MAX_LEGAL_REFCOUNT)
thread_rng().gen_range(0..MAX_LEGAL_REFCOUNT)
};
(v, rc)
@ -392,7 +392,7 @@ mod tests {
return None;
}
let len = keys.len();
Some(keys.remove(thread_rng().gen_range(0, len)))
Some(keys.remove(thread_rng().gen_range(0..len)))
};
let return_key = |key| {
let mut keys = all_keys.lock().unwrap();
@ -445,11 +445,11 @@ mod tests {
// verify consistency between hashmap and all bucket maps
for i in 0..10000 {
initial = initial.saturating_sub(1);
if initial > 0 || thread_rng().gen_range(0, 5) == 0 {
if initial > 0 || thread_rng().gen_range(0..5) == 0 {
// insert
let mut to_add = 1;
if initial > 1 && use_batch_insert {
to_add = thread_rng().gen_range(1, (initial / 4).max(2));
to_add = thread_rng().gen_range(1..(initial / 4).max(2));
initial -= to_add;
}
@ -481,12 +481,12 @@ mod tests {
hash_map.write().unwrap().insert(k, v);
return_key(k);
});
let insert = thread_rng().gen_range(0, 2) == 0;
let insert = thread_rng().gen_range(0..2) == 0;
maps.iter().for_each(|map| {
// batch insert can only work for the map with only 1 bucket so that we can batch add to a single bucket
let batch_insert_now = map.buckets.len() == 1
&& use_batch_insert
&& thread_rng().gen_range(0, 2) == 0;
&& thread_rng().gen_range(0..2) == 0;
if batch_insert_now {
// batch insert into the map with 1 bucket 50% of the time
let mut batch_additions = additions
@ -495,12 +495,12 @@ mod tests {
.map(|(k, mut v)| (k, v.0.pop().unwrap()))
.collect::<Vec<_>>();
let mut duplicates = 0;
if batch_additions.len() > 1 && thread_rng().gen_range(0, 2) == 0 {
if batch_additions.len() > 1 && thread_rng().gen_range(0..2) == 0 {
// insert a duplicate sometimes
let item_to_duplicate =
thread_rng().gen_range(0, batch_additions.len());
thread_rng().gen_range(0..batch_additions.len());
let where_to_insert_duplicate =
thread_rng().gen_range(0, batch_additions.len());
thread_rng().gen_range(0..batch_additions.len());
batch_additions.insert(
where_to_insert_duplicate,
batch_additions[item_to_duplicate],
@ -541,13 +541,13 @@ mod tests {
// if we are using batch insert, it is illegal to update, delete, or addref/unref an account until all batch inserts are complete
continue;
}
if thread_rng().gen_range(0, 10) == 0 {
if thread_rng().gen_range(0..10) == 0 {
// update
if let Some(k) = get_key() {
let hm = hash_map.read().unwrap();
let (v, rc) = gen_rand_value();
let v_old = hm.get(&k);
let insert = thread_rng().gen_range(0, 2) == 0;
let insert = thread_rng().gen_range(0..2) == 0;
maps.iter().for_each(|map| {
if insert {
map.insert(&k, (&v, rc))
@ -563,7 +563,7 @@ mod tests {
return_key(k);
}
}
if thread_rng().gen_range(0, 20) == 0 {
if thread_rng().gen_range(0..20) == 0 {
// delete
if let Some(k) = get_key() {
let mut hm = hash_map.write().unwrap();
@ -573,10 +573,10 @@ mod tests {
});
}
}
if thread_rng().gen_range(0, 10) == 0 {
if thread_rng().gen_range(0..10) == 0 {
// add/unref
if let Some(k) = get_key() {
let mut inc = thread_rng().gen_range(0, 2) == 0;
let mut inc = thread_rng().gen_range(0..2) == 0;
let mut hm = hash_map.write().unwrap();
let (v, mut rc) = hm.get(&k).map(|(v, rc)| (v.to_vec(), *rc)).unwrap();
if !inc && rc == 0 {

View File

@ -337,9 +337,9 @@ impl<O: BucketOccupied> BucketStorage<O> {
/// allocate a new memory mapped file of size `bytes` on one of `drives`
fn new_map(drives: &[PathBuf], bytes: u64, stats: &BucketStats) -> (MmapMut, PathBuf) {
let mut measure_new_file = Measure::start("measure_new_file");
let r = thread_rng().gen_range(0, drives.len());
let r = thread_rng().gen_range(0..drives.len());
let drive = &drives[r];
let pos = format!("{}", thread_rng().gen_range(0, u128::MAX),);
let pos = format!("{}", thread_rng().gen_range(0..u128::MAX),);
let file = drive.join(pos);
let mut data = OpenOptions::new()
.read(true)

View File

@ -105,7 +105,7 @@ test-stable-sbf)
# latest mainbeta release version.
solana_program_count=$(grep -c 'solana-program v' cargo.log)
rm -f cargo.log
if ((solana_program_count > 14)); then
if ((solana_program_count > 17)); then
echo "Regression of build redundancy ${solana_program_count}."
echo "Review dependency features that trigger redundant rebuilds of solana-program."
exit 1

View File

@ -19,7 +19,6 @@ indexmap = { workspace = true }
indicatif = { workspace = true }
log = { workspace = true }
quinn = { workspace = true }
rand = { workspace = true }
rayon = { workspace = true }
solana-connection-cache = { workspace = true }
solana-measure = { workspace = true }
@ -39,7 +38,6 @@ tokio = { workspace = true, features = ["full"] }
[dev-dependencies]
crossbeam-channel = { workspace = true }
rand_chacha = { workspace = true }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -115,7 +115,7 @@ where
let existing_index = map.get_index_of(addr);
while map.len() >= MAX_CONNECTIONS {
let mut rng = thread_rng();
let n = rng.gen_range(0, MAX_CONNECTIONS);
let n = rng.gen_range(0..MAX_CONNECTIONS);
if let Some(index) = existing_index {
if n == index {
continue;
@ -316,7 +316,7 @@ pub trait ConnectionPool {
/// This randomly picks a connection in the pool.
fn borrow_connection(&self) -> Arc<Self::BaseClientConnection> {
let mut rng = thread_rng();
let n = rng.gen_range(0, self.num_connections());
let n = rng.gen_range(0..self.num_connections());
self.get(n).expect("index is within num_connections")
}
/// Check if we need to create a new connection. If the count of the connections
@ -532,10 +532,10 @@ mod tests {
}
fn get_addr(rng: &mut ChaChaRng) -> SocketAddr {
let a = rng.gen_range(1, 255);
let b = rng.gen_range(1, 255);
let c = rng.gen_range(1, 255);
let d = rng.gen_range(1, 255);
let a = rng.gen_range(1..255);
let b = rng.gen_range(1..255);
let c = rng.gen_range(1..255);
let d = rng.gen_range(1..255);
let addr_str = format!("{a}.{b}.{c}.{d}:80");

View File

@ -52,7 +52,7 @@ fn run_bench_packet_discard(num_ips: usize, bencher: &mut Bencher) {
for batch in batches.iter_mut() {
total += batch.len();
for p in batch.iter_mut() {
let ip_index = thread_rng().gen_range(0, ips.len());
let ip_index = thread_rng().gen_range(0..ips.len());
p.meta_mut().addr = ips[ip_index];
}
}

View File

@ -697,7 +697,7 @@ mod tests {
let vote = from_slots(
vec![(i, 1)],
VoteSource::Gossip,
&keypairs[rng.gen_range(0, 10)],
&keypairs[rng.gen_range(0..10)],
None,
);
latest_unprocessed_votes.update_latest_vote(vote);
@ -712,7 +712,7 @@ mod tests {
let vote = from_slots(
vec![(i, 1)],
VoteSource::Tpu,
&keypairs_tpu[rng.gen_range(0, 10)],
&keypairs_tpu[rng.gen_range(0..10)],
None,
);
latest_unprocessed_votes_tpu.update_latest_vote(vote);

View File

@ -4,7 +4,7 @@ use {
rand::{Rng, SeedableRng},
rand_chacha::ChaChaRng,
rayon::prelude::*,
solana_sdk::signature::Keypair,
solana_sdk::{signature::Keypair, signer::keypair::keypair_from_seed},
};
pub struct GenKeys {
@ -28,13 +28,19 @@ impl GenKeys {
}
pub fn gen_keypair(&mut self) -> Keypair {
Keypair::generate(&mut self.generator)
let mut seed = [0u8; Keypair::SECRET_KEY_LENGTH];
self.generator.fill(&mut seed[..]);
keypair_from_seed(&seed).unwrap()
}
pub fn gen_n_keypairs(&mut self, n: u64) -> Vec<Keypair> {
self.gen_n_seeds(n)
.into_par_iter()
.map(|seed| Keypair::generate(&mut ChaChaRng::from_seed(seed)))
.map(|seed| {
let mut keypair_seed = [0u8; Keypair::SECRET_KEY_LENGTH];
ChaChaRng::from_seed(seed).fill(&mut keypair_seed[..]);
keypair_from_seed(&keypair_seed).unwrap()
})
.collect()
}
}

View File

@ -19,7 +19,7 @@ where
// the given timestamp to be made
pub fn add_request(&mut self, request: T, now: u64) -> Nonce {
let num_expected_responses = request.num_expected_responses();
let nonce = thread_rng().gen_range(0, Nonce::MAX);
let nonce = thread_rng().gen_range(0..Nonce::MAX);
self.requests.put(
nonce,
RequestStatus {

View File

@ -38,7 +38,7 @@ impl WarmQuicCacheService {
let thread_hdl = Builder::new()
.name("solWarmQuicSvc".to_string())
.spawn(move || {
let slot_jitter = thread_rng().gen_range(-CACHE_JITTER_SLOT, CACHE_JITTER_SLOT);
let slot_jitter = thread_rng().gen_range(-CACHE_JITTER_SLOT..CACHE_JITTER_SLOT);
let mut maybe_last_leader = None;
while !exit.load(Ordering::Relaxed) {
let leader_pubkey = poh_recorder

View File

@ -514,7 +514,7 @@ fn test_with_partitions(
for tower in towers.iter_mut() {
let mut fork = tower.last_fork().clone();
if fork.id == 0 {
fork.id = thread_rng().gen_range(1, 1 + num_partitions);
fork.id = thread_rng().gen_range(1..1 + num_partitions);
fork_tree.insert(fork.id, fork.clone());
}
let vote = Vote::new(fork, time);
@ -526,7 +526,7 @@ fn test_with_partitions(
assert_eq!(tower.votes.len(), warmup);
assert_eq!(tower.first_vote().unwrap().lockout, 1 << warmup);
assert!(tower.first_vote().unwrap().lock_height() >= 1 << warmup);
tower.parasite = parasite_rate > thread_rng().gen_range(0.0, 1.0);
tower.parasite = parasite_rate > thread_rng().gen_range(0.0..1.0);
}
let converge_map = calc_fork_map(&towers, &fork_tree);
assert_ne!(calc_tip_converged(&towers, &converge_map), len);
@ -548,7 +548,7 @@ fn test_with_partitions(
})
});
for tower in towers.iter_mut() {
if thread_rng().gen_range(0f64, 1.0f64) < fail_rate {
if thread_rng().gen_range(0f64..1.0f64) < fail_rate {
continue;
}
tower.submit_vote(vote.clone(), &fork_tree, &converge_map, &scores);

View File

@ -911,7 +911,7 @@ pub fn create_ticks(num_ticks: u64, hashes_per_tick: u64, mut hash: Hash) -> Vec
pub fn create_random_ticks(num_ticks: u64, max_hashes_per_tick: u64, mut hash: Hash) -> Vec<Entry> {
repeat_with(|| {
let hashes_per_tick = thread_rng().gen_range(1, max_hashes_per_tick);
let hashes_per_tick = thread_rng().gen_range(1..max_hashes_per_tick);
next_entry_mut(&mut hash, hashes_per_tick, vec![])
})
.take(num_ticks as usize)
@ -1340,7 +1340,7 @@ mod tests {
solana_logger::setup();
for _ in 0..100 {
let mut time = Measure::start("ticks");
let num_ticks = thread_rng().gen_range(1, 100);
let num_ticks = thread_rng().gen_range(1..100);
info!("create {} ticks:", num_ticks);
let mut entries = create_random_ticks(num_ticks, 100, Hash::default());
time.stop();
@ -1348,7 +1348,7 @@ mod tests {
let mut modified = false;
if thread_rng().gen_ratio(1, 2) {
modified = true;
let modify_idx = thread_rng().gen_range(0, num_ticks) as usize;
let modify_idx = thread_rng().gen_range(0..num_ticks) as usize;
entries[modify_idx].hash = hash(&[1, 2, 3]);
}

View File

@ -30,11 +30,9 @@ block-buffer = { workspace = true }
byteorder = { workspace = true, features = ["i128"] }
either = { workspace = true, features = ["use_std"] }
generic-array = { workspace = true, features = ["serde", "more_lengths"] }
getrandom = { workspace = true, features = ["dummy"] }
im = { workspace = true, features = ["rayon", "serde"] }
memmap2 = { workspace = true }
once_cell = { workspace = true, features = ["alloc", "race"] }
rand_core = { workspace = true, features = ["std"] }
subtle = { workspace = true }
[target.'cfg(any(unix, windows))'.dependencies]

View File

@ -21,7 +21,7 @@ fn bench_find_old_labels(bencher: &mut Bencher) {
let mut rng = thread_rng();
let mut crds = Crds::default();
let now = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS + CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS / 1000;
std::iter::repeat_with(|| (CrdsValue::new_rand(&mut rng, None), rng.gen_range(0, now)))
std::iter::repeat_with(|| (CrdsValue::new_rand(&mut rng, None), rng.gen_range(0..now)))
.take(50_000)
.for_each(|(v, ts)| assert!(crds.insert(v, ts, GossipRoute::LocalMessage).is_ok()));
let stakes = HashMap::from([(Pubkey::new_unique(), 1u64)]);

View File

@ -11,15 +11,14 @@ use {
crds_gossip_pull::{CrdsFilter, CrdsGossipPull},
crds_value::CrdsValue,
},
solana_sdk::hash,
solana_sdk::hash::Hash,
std::sync::RwLock,
test::Bencher,
};
#[bench]
fn bench_hash_as_u64(bencher: &mut Bencher) {
let mut rng = thread_rng();
let hashes: Vec<_> = std::iter::repeat_with(|| hash::new_rand(&mut rng))
let hashes: Vec<_> = std::iter::repeat_with(Hash::new_unique)
.take(1000)
.collect();
bencher.iter(|| {

View File

@ -11,7 +11,7 @@ use {
};
fn make_weights<R: Rng>(rng: &mut R) -> Vec<u64> {
repeat_with(|| rng.gen_range(1, 100)).take(1000).collect()
repeat_with(|| rng.gen_range(1..100)).take(1000).collect()
}
#[bench]

View File

@ -198,7 +198,7 @@ impl PruneData {
#[cfg(test)]
fn new_rand<R: Rng>(rng: &mut R, self_keypair: &Keypair, num_nodes: Option<usize>) -> Self {
let wallclock = crds_value::new_rand_timestamp(rng);
let num_nodes = num_nodes.unwrap_or_else(|| rng.gen_range(0, MAX_PRUNE_DATA_NODES + 1));
let num_nodes = num_nodes.unwrap_or_else(|| rng.gen_range(0..MAX_PRUNE_DATA_NODES + 1));
let prunes = std::iter::repeat_with(Pubkey::new_unique)
.take(num_nodes)
.collect();
@ -3511,7 +3511,7 @@ mod tests {
let keypair = Keypair::new();
let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0);
let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap();
let next_shred_index = rng.gen_range(0, 32_000);
let next_shred_index = rng.gen_range(0..32_000);
let shred = new_rand_shred(&mut rng, next_shred_index, &shredder, &leader);
let other_payload = {
let other_shred = new_rand_shred(&mut rng, next_shred_index, &shredder, &leader);
@ -3835,7 +3835,6 @@ mod tests {
#[test]
fn test_push_vote() {
let mut rng = rand::thread_rng();
let keypair = Arc::new(Keypair::new());
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
let cluster_info =
@ -3849,7 +3848,7 @@ mod tests {
// add a vote
let vote = Vote::new(
vec![1, 3, 7], // slots
solana_sdk::hash::new_rand(&mut rng),
Hash::new_unique(),
);
let ix = vote_instruction::vote(
&Pubkey::new_unique(), // vote_pubkey
@ -3878,8 +3877,8 @@ mod tests {
assert_eq!(votes, vec![]);
}
fn new_vote_transaction<R: Rng>(rng: &mut R, slots: Vec<Slot>) -> Transaction {
let vote = Vote::new(slots, solana_sdk::hash::new_rand(rng));
fn new_vote_transaction(slots: Vec<Slot>) -> Transaction {
let vote = Vote::new(slots, Hash::new_unique());
let ix = vote_instruction::vote(
&Pubkey::new_unique(), // vote_pubkey
&Pubkey::new_unique(), // authorized_voter_pubkey
@ -3907,7 +3906,6 @@ mod tests {
}
vote_slots.into_iter().collect()
};
let mut rng = rand::thread_rng();
let keypair = Arc::new(Keypair::new());
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
let cluster_info = ClusterInfo::new(contact_info, keypair, SocketAddrSpace::Unspecified);
@ -3915,7 +3913,7 @@ mod tests {
for k in 0..MAX_LOCKOUT_HISTORY {
let slot = k as Slot;
tower.push(slot);
let vote = new_vote_transaction(&mut rng, vec![slot]);
let vote = new_vote_transaction(vec![slot]);
cluster_info.push_vote(&tower, vote);
}
let vote_slots = get_vote_slots(&cluster_info);
@ -3927,7 +3925,7 @@ mod tests {
let slot = MAX_LOCKOUT_HISTORY as Slot;
tower.push(slot);
tower.remove(23);
let vote = new_vote_transaction(&mut rng, vec![slot]);
let vote = new_vote_transaction(vec![slot]);
// New versioned-crds-value should have wallclock later than existing
// entries, otherwise might not get inserted into the table.
sleep(Duration::from_millis(5));
@ -3944,7 +3942,7 @@ mod tests {
tower.push(slot);
tower.remove(17);
tower.remove(5);
let vote = new_vote_transaction(&mut rng, vec![slot]);
let vote = new_vote_transaction(vec![slot]);
cluster_info.push_vote(&tower, vote);
let vote_slots = get_vote_slots(&cluster_info);
assert_eq!(vote_slots.len(), MAX_LOCKOUT_HISTORY);
@ -4349,7 +4347,7 @@ mod tests {
);
//random should be hard to compress
let mut rng = rand::thread_rng();
let range: Vec<Slot> = repeat_with(|| rng.gen_range(1, 32))
let range: Vec<Slot> = repeat_with(|| rng.gen_range(1..32))
.scan(0, |slot, step| {
*slot += step;
Some(*slot)

View File

@ -611,7 +611,7 @@ mod tests {
fn new_rand_port<R: Rng>(rng: &mut R) -> u16 {
let port = rng.gen::<u16>();
let bits = u16::BITS - port.leading_zeros();
let shift = rng.gen_range(0u32, bits + 1u32);
let shift = rng.gen_range(0u32..bits + 1u32);
port.checked_shr(shift).unwrap_or_default()
}
@ -679,8 +679,8 @@ mod tests {
.iter()
.map(|&key| SocketEntry {
key,
index: rng.gen_range(0u8, addrs.len() as u8),
offset: rng.gen_range(0u16, u16::MAX / 64),
index: rng.gen_range(0u8..addrs.len() as u8),
offset: rng.gen_range(0u16..u16::MAX / 64),
})
.collect();
assert_matches!(
@ -693,8 +693,8 @@ mod tests {
.iter()
.map(|&key| SocketEntry {
key,
index: rng.gen_range(0u8, addrs.len() as u8),
offset: rng.gen_range(0u16, u16::MAX / 256),
index: rng.gen_range(0u8..addrs.len() as u8),
offset: rng.gen_range(0u16..u16::MAX / 256),
})
.collect();
assert_matches!(sanitize_entries(&addrs, &sockets), Ok(()));
@ -721,7 +721,7 @@ mod tests {
for _ in 0..1 << 14 {
let addr = addrs.choose(&mut rng).unwrap();
let socket = SocketAddr::new(*addr, new_rand_port(&mut rng));
let key = rng.gen_range(KEYS.start, KEYS.end);
let key = rng.gen_range(KEYS.start..KEYS.end);
if sanitize_socket(&socket).is_ok() {
sockets.insert(key, socket);
assert_matches!(node.set_socket(key, socket), Ok(()));

View File

@ -1091,7 +1091,7 @@ mod tests {
let mut rng = thread_rng();
let mut num_inserts = 0;
for _ in 0..4096 {
let keypair = &keypairs[rng.gen_range(0, keypairs.len())];
let keypair = &keypairs[rng.gen_range(0..keypairs.len())];
let value = CrdsValue::new_rand(&mut rng, Some(keypair));
let local_timestamp = new_rand_timestamp(&mut rng);
if let Ok(()) = crds.insert(value, local_timestamp, GossipRoute::LocalMessage) {
@ -1108,7 +1108,7 @@ mod tests {
check_crds_shards(&crds);
// Remove values one by one and assert that shards stay valid.
while !crds.table.is_empty() {
let index = rng.gen_range(0, crds.table.len());
let index = rng.gen_range(0..crds.table.len());
let key = crds.table.get_index(index).unwrap().0.clone();
crds.remove(&key, /*now=*/ 0);
check_crds_shards(&crds);
@ -1125,9 +1125,9 @@ mod tests {
) {
let size = crds.table.len();
let since = if size == 0 || rng.gen() {
rng.gen_range(0, crds.cursor.0 + 1)
rng.gen_range(0..crds.cursor.0 + 1)
} else {
crds.table[rng.gen_range(0, size)].ordinal
crds.table[rng.gen_range(0..size)].ordinal
};
let num_epoch_slots = crds
.table
@ -1245,7 +1245,7 @@ mod tests {
let mut crds = Crds::default();
let mut num_inserts = 0;
for k in 0..4096 {
let keypair = &keypairs[rng.gen_range(0, keypairs.len())];
let keypair = &keypairs[rng.gen_range(0..keypairs.len())];
let value = CrdsValue::new_rand(&mut rng, Some(keypair));
let local_timestamp = new_rand_timestamp(&mut rng);
if let Ok(()) = crds.insert(value, local_timestamp, GossipRoute::LocalMessage) {
@ -1268,7 +1268,7 @@ mod tests {
assert!(num_epoch_slots > 100, "num epoch slots: {num_epoch_slots}");
// Remove values one by one and assert that nodes indices stay valid.
while !crds.table.is_empty() {
let index = rng.gen_range(0, crds.table.len());
let index = rng.gen_range(0..crds.table.len());
let key = crds.table.get_index(index).unwrap().0.clone();
crds.remove(&key, /*now=*/ 0);
if crds.table.len() % 16 == 0 {
@ -1295,7 +1295,7 @@ mod tests {
let keypairs: Vec<_> = repeat_with(Keypair::new).take(128).collect();
let mut crds = Crds::default();
for k in 0..4096 {
let keypair = &keypairs[rng.gen_range(0, keypairs.len())];
let keypair = &keypairs[rng.gen_range(0..keypairs.len())];
let value = CrdsValue::new_rand(&mut rng, Some(keypair));
let local_timestamp = new_rand_timestamp(&mut rng);
let _ = crds.insert(value, local_timestamp, GossipRoute::LocalMessage);
@ -1307,7 +1307,7 @@ mod tests {
assert!(crds.records.len() <= keypairs.len());
// Remove values one by one and assert that records stay valid.
while !crds.table.is_empty() {
let index = rng.gen_range(0, crds.table.len());
let index = rng.gen_range(0..crds.table.len());
let key = crds.table.get_index(index).unwrap().0.clone();
crds.remove(&key, /*now=*/ 0);
if crds.table.len() % 64 == 0 {
@ -1393,11 +1393,11 @@ mod tests {
let keypairs: Vec<_> = repeat_with(Keypair::new).take(64).collect();
let stakes = keypairs
.iter()
.map(|k| (k.pubkey(), rng.gen_range(0, 1000)))
.map(|k| (k.pubkey(), rng.gen_range(0..1000)))
.collect();
let mut crds = Crds::default();
for _ in 0..2048 {
let keypair = &keypairs[rng.gen_range(0, keypairs.len())];
let keypair = &keypairs[rng.gen_range(0..keypairs.len())];
let value = CrdsValue::new_rand(&mut rng, Some(keypair));
let local_timestamp = new_rand_timestamp(&mut rng);
let _ = crds.insert(value, local_timestamp, GossipRoute::LocalMessage);

View File

@ -89,7 +89,7 @@ impl CrdsFilter {
let max_items = Self::max_items(max_bits, FALSE_RATE, KEYS);
let mask_bits = Self::mask_bits(num_items as f64, max_items);
let filter = Bloom::random(max_items as usize, FALSE_RATE, max_bits as usize);
let seed: u64 = rand::thread_rng().gen_range(0, 2u64.pow(mask_bits));
let seed: u64 = rand::thread_rng().gen_range(0..2u64.pow(mask_bits));
let mask = Self::compute_mask(seed, mask_bits);
CrdsFilter {
filter,
@ -272,7 +272,7 @@ impl CrdsGossipPull {
let mut filters = self.build_crds_filters(thread_pool, crds, bloom_size);
if filters.len() > MAX_NUM_PULL_REQUESTS {
for i in 0..MAX_NUM_PULL_REQUESTS {
let j = rng.gen_range(i, filters.len());
let j = rng.gen_range(i..filters.len());
filters.swap(i, j);
}
filters.truncate(MAX_NUM_PULL_REQUESTS);
@ -456,7 +456,7 @@ impl CrdsGossipPull {
stats: &GossipStats,
) -> Vec<Vec<CrdsValue>> {
let msg_timeout = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
let jitter = rand::thread_rng().gen_range(0, msg_timeout / 4);
let jitter = rand::thread_rng().gen_range(0..msg_timeout / 4);
//skip filters from callers that are too old
let caller_wallclock_window =
now.saturating_sub(msg_timeout)..now.saturating_add(msg_timeout);
@ -620,13 +620,14 @@ pub(crate) mod tests {
crds_value::{CrdsData, Vote},
},
itertools::Itertools,
rand::{seq::SliceRandom, thread_rng, SeedableRng},
rand::{seq::SliceRandom, SeedableRng},
rand_chacha::ChaChaRng,
rayon::ThreadPoolBuilder,
solana_perf::test_tx::new_test_vote_tx,
solana_sdk::{
hash::{hash, HASH_BYTES},
packet::PACKET_DATA_SIZE,
signer::keypair::keypair_from_seed,
timing::timestamp,
},
std::time::Instant,
@ -653,9 +654,8 @@ pub(crate) mod tests {
}
out
}
let mut rng = thread_rng();
for _ in 0..100 {
let hash = solana_sdk::hash::new_rand(&mut rng);
let hash = Hash::new_unique();
assert_eq!(CrdsFilter::hash_as_u64(&hash), hash_as_u64_bitops(&hash));
}
}
@ -665,21 +665,17 @@ pub(crate) mod tests {
let filter = CrdsFilter::default();
let mask = CrdsFilter::compute_mask(0, filter.mask_bits);
assert_eq!(filter.mask, mask);
let mut rng = thread_rng();
for _ in 0..10 {
let hash = solana_sdk::hash::new_rand(&mut rng);
let hash = Hash::new_unique();
assert!(filter.test_mask(&hash));
}
}
#[test]
fn test_crds_filter_set_add() {
let mut rng = thread_rng();
let crds_filter_set =
CrdsFilterSet::new(/*num_items=*/ 9672788, /*max_bytes=*/ 8196);
let hash_values: Vec<_> = repeat_with(|| solana_sdk::hash::new_rand(&mut rng))
.take(1024)
.collect();
let hash_values: Vec<_> = repeat_with(Hash::new_unique).take(1024).collect();
for hash_value in &hash_values {
crds_filter_set.add(*hash_value);
}
@ -727,9 +723,13 @@ pub(crate) mod tests {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let crds_gossip_pull = CrdsGossipPull::default();
let mut crds = Crds::default();
let keypairs: Vec<_> = repeat_with(|| Keypair::generate(&mut rng))
.take(10_000)
.collect();
let keypairs: Vec<_> = repeat_with(|| {
let mut seed = [0u8; Keypair::SECRET_KEY_LENGTH];
rng.fill(&mut seed[..]);
keypair_from_seed(&seed).unwrap()
})
.take(10_000)
.collect();
let mut num_inserts = 0;
for _ in 0..40_000 {
let keypair = keypairs.choose(&mut rng).unwrap();

View File

@ -194,7 +194,7 @@ mod test {
shards.check(&values);
// Remove some of the values.
for _ in 0..512 {
let index = rng.gen_range(0, values.len());
let index = rng.gen_range(0..values.len());
let value = values.swap_remove(index);
assert!(shards.remove(index, &value));
if index < values.len() {
@ -223,7 +223,7 @@ mod test {
}
// Remove everything.
while !values.is_empty() {
let index = rng.gen_range(0, values.len());
let index = rng.gen_range(0..values.len());
let value = values.swap_remove(index);
assert!(shards.remove(index, &value));
if index < values.len() {

View File

@ -139,13 +139,13 @@ impl Sanitize for CrdsData {
/// Random timestamp for tests and benchmarks.
pub(crate) fn new_rand_timestamp<R: Rng>(rng: &mut R) -> u64 {
const DELAY: u64 = 10 * 60 * 1000; // 10 minutes
timestamp() - DELAY + rng.gen_range(0, 2 * DELAY)
timestamp() - DELAY + rng.gen_range(0..2 * DELAY)
}
impl CrdsData {
/// New random CrdsData for tests and benchmarks.
fn new_rand<R: Rng>(rng: &mut R, pubkey: Option<Pubkey>) -> CrdsData {
let kind = rng.gen_range(0, 7);
let kind = rng.gen_range(0..7);
// TODO: Implement other kinds of CrdsData here.
// TODO: Assign ranges to each arm proportional to their frequency in
// the mainnet crds table.
@ -156,9 +156,9 @@ impl CrdsData {
2 => CrdsData::LegacySnapshotHashes(LegacySnapshotHashes::new_rand(rng, pubkey)),
3 => CrdsData::AccountsHashes(AccountsHashes::new_rand(rng, pubkey)),
4 => CrdsData::Version(Version::new_rand(rng, pubkey)),
5 => CrdsData::Vote(rng.gen_range(0, MAX_VOTES), Vote::new_rand(rng, pubkey)),
5 => CrdsData::Vote(rng.gen_range(0..MAX_VOTES), Vote::new_rand(rng, pubkey)),
_ => CrdsData::EpochSlots(
rng.gen_range(0, MAX_EPOCH_SLOTS),
rng.gen_range(0..MAX_EPOCH_SLOTS),
EpochSlots::new_rand(rng, pubkey),
),
}
@ -195,10 +195,10 @@ impl AccountsHashes {
/// New random AccountsHashes for tests and benchmarks.
pub(crate) fn new_rand<R: Rng>(rng: &mut R, pubkey: Option<Pubkey>) -> Self {
let num_hashes = rng.gen_range(0, MAX_LEGACY_SNAPSHOT_HASHES) + 1;
let num_hashes = rng.gen_range(0..MAX_LEGACY_SNAPSHOT_HASHES) + 1;
let hashes = std::iter::repeat_with(|| {
let slot = 47825632 + rng.gen_range(0, 512);
let hash = solana_sdk::hash::new_rand(rng);
let slot = 47825632 + rng.gen_range(0..512);
let hash = Hash::new_unique();
(slot, hash)
})
.take(num_hashes)
@ -801,7 +801,7 @@ mod test {
let mut rng = rand::thread_rng();
let vote = vote_state::Vote::new(
vec![1, 3, 7], // slots
solana_sdk::hash::new_rand(&mut rng),
Hash::new_unique(),
);
let ix = vote_instruction::vote(
&Pubkey::new_unique(), // vote_pubkey
@ -878,7 +878,7 @@ mod test {
let mut rng = ChaChaRng::from_seed(seed);
let keys: Vec<_> = repeat_with(Keypair::new).take(16).collect();
let values: Vec<_> = repeat_with(|| {
let index = rng.gen_range(0, keys.len());
let index = rng.gen_range(0..keys.len());
CrdsValue::new_rand(&mut rng, Some(&keys[index]))
})
.take(1 << 12)

View File

@ -262,7 +262,7 @@ pub(crate) mod tests {
solana_entry::entry::Entry,
solana_ledger::shred::{ProcessShredsStats, ReedSolomonCache, Shredder},
solana_sdk::{
hash,
hash::Hash,
signature::{Keypair, Signer},
system_transaction,
},
@ -302,12 +302,12 @@ pub(crate) mod tests {
&Keypair::new(), // from
&Pubkey::new_unique(), // to
rng.gen(), // lamports
hash::new_rand(rng), // recent blockhash
Hash::new_unique(), // recent blockhash
);
Entry::new(
&hash::new_rand(rng), // prev_hash
1, // num_hashes,
vec![tx], // transactions
&Hash::new_unique(), // prev_hash
1, // num_hashes,
vec![tx], // transactions
)
})
.take(5)
@ -331,7 +331,7 @@ pub(crate) mod tests {
let leader = Arc::new(Keypair::new());
let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0);
let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap();
let next_shred_index = rng.gen_range(0, 32_000);
let next_shred_index = rng.gen_range(0..32_000);
let shred1 = new_rand_shred(&mut rng, next_shred_index, &shredder, &leader);
let shred2 = new_rand_shred(&mut rng, next_shred_index, &shredder, &leader);
let leader_schedule = |s| {

View File

@ -326,8 +326,8 @@ impl EpochSlots {
let now = crds_value::new_rand_timestamp(rng);
let pubkey = pubkey.unwrap_or_else(solana_sdk::pubkey::new_rand);
let mut epoch_slots = Self::new(pubkey, now);
let num_slots = rng.gen_range(0, 20);
let slots: Vec<_> = std::iter::repeat_with(|| 47825632 + rng.gen_range(0, 512))
let num_slots = rng.gen_range(0..20);
let slots: Vec<_> = std::iter::repeat_with(|| 47825632 + rng.gen_range(0..512))
.take(num_slots)
.collect();
epoch_slots.add(&slots);
@ -486,7 +486,7 @@ mod tests {
}
fn make_rand_slots<R: Rng>(rng: &mut R) -> impl Iterator<Item = Slot> + '_ {
repeat_with(|| rng.gen_range(1, 5)).scan(0, |slot, step| {
repeat_with(|| rng.gen_range(1..5)).scan(0, |slot, step| {
*slot += step;
Some(*slot)
})

View File

@ -203,7 +203,7 @@ pub fn get_client(
.iter()
.filter_map(|node| node.valid_client_facing_addr(protocol, socket_addr_space))
.collect();
let select = thread_rng().gen_range(0, nodes.len());
let select = thread_rng().gen_range(0..nodes.len());
let (rpc, tpu) = nodes[select];
ThinClient::new(rpc, tpu, connection_cache)
}

View File

@ -159,10 +159,10 @@ impl LegacyContactInfo {
/// New random LegacyContactInfo for tests and simulations.
pub fn new_rand<R: rand::Rng>(rng: &mut R, pubkey: Option<Pubkey>) -> Self {
let delay = 10 * 60 * 1000; // 10 minutes
let now = timestamp() - delay + rng.gen_range(0, 2 * delay);
let now = timestamp() - delay + rng.gen_range(0..2 * delay);
let pubkey = pubkey.unwrap_or_else(solana_sdk::pubkey::new_rand);
let mut node = LegacyContactInfo::new_localhost(&pubkey, now);
node.gossip.set_port(rng.gen_range(1024, u16::MAX));
node.gossip.set_port(rng.gen_range(1024..u16::MAX));
node
}

View File

@ -1,7 +1,7 @@
use {
bincode::{serialize, Error},
lru::LruCache,
rand::{AsByteSliceMut, CryptoRng, Rng},
rand::{CryptoRng, Fill, Rng},
serde::Serialize,
solana_sdk::{
hash::{self, Hash},
@ -64,7 +64,7 @@ impl<T: Serialize> Ping<T> {
impl<T> Ping<T>
where
T: Serialize + AsByteSliceMut + Default,
T: Serialize + Fill + Default,
{
pub fn new_rand<R>(rng: &mut R, keypair: &Keypair) -> Result<Self, Error>
where
@ -296,8 +296,8 @@ mod tests {
.take(8)
.collect();
let remote_nodes: Vec<(&Keypair, SocketAddr)> = repeat_with(|| {
let keypair = &keypairs[rng.gen_range(0, keypairs.len())];
let socket = sockets[rng.gen_range(0, sockets.len())];
let keypair = &keypairs[rng.gen_range(0..keypairs.len())];
let socket = sockets[rng.gen_range(0..sockets.len())];
(keypair, socket)
})
.take(128)

View File

@ -207,9 +207,9 @@ mod tests {
let mut rng = ChaChaRng::from_seed([189u8; 32]);
let pubkey = Pubkey::new_unique();
let nodes: Vec<_> = repeat_with(Pubkey::new_unique).take(20).collect();
let stakes = repeat_with(|| rng.gen_range(1, MAX_STAKE));
let stakes = repeat_with(|| rng.gen_range(1..MAX_STAKE));
let mut stakes: HashMap<_, _> = nodes.iter().copied().zip(stakes).collect();
stakes.insert(pubkey, rng.gen_range(1, MAX_STAKE));
stakes.insert(pubkey, rng.gen_range(1..MAX_STAKE));
let mut active_set = PushActiveSet::default();
assert!(active_set.0.iter().all(|entry| entry.0.is_empty()));
active_set.rotate(&mut rng, 5, CLUSTER_SIZE, &nodes, &stakes);
@ -262,7 +262,7 @@ mod tests {
const NUM_BLOOM_FILTER_ITEMS: usize = 100;
let mut rng = ChaChaRng::from_seed([147u8; 32]);
let nodes: Vec<_> = repeat_with(Pubkey::new_unique).take(20).collect();
let weights: Vec<_> = repeat_with(|| rng.gen_range(1, 1000)).take(20).collect();
let weights: Vec<_> = repeat_with(|| rng.gen_range(1..1000)).take(20).collect();
let mut entry = PushActiveSetEntry::default();
entry.rotate(
&mut rng,

View File

@ -198,7 +198,7 @@ mod tests {
.map(|(i, _)| i)
.collect();
while high != 0 {
let sample = rng.gen_range(0, high);
let sample = rng.gen_range(0..high);
let index = weights
.iter()
.scan(0, |acc, &w| {
@ -342,7 +342,7 @@ mod tests {
#[test]
fn test_weighted_shuffle_match_slow() {
let mut rng = rand::thread_rng();
let weights: Vec<u64> = repeat_with(|| rng.gen_range(0, 1000)).take(997).collect();
let weights: Vec<u64> = repeat_with(|| rng.gen_range(0..1000)).take(997).collect();
for _ in 0..10 {
let mut seed = [0u8; 32];
rng.fill(&mut seed[..]);

View File

@ -93,7 +93,7 @@ fn bench_read_sequential(bench: &mut Bencher) {
let mut rng = rand::thread_rng();
bench.iter(move || {
// Generate random starting point in the range [0, total_shreds - 1], read num_reads shreds sequentially
let start_index = rng.gen_range(0, num_small_shreds + num_large_shreds);
let start_index = rng.gen_range(0..num_small_shreds + num_large_shreds);
for i in start_index..start_index + num_reads {
let _ = blockstore.get_data_shred(slot, i % total_shreds);
}
@ -122,7 +122,7 @@ fn bench_read_random(bench: &mut Bencher) {
// simulating random reads
let mut rng = rand::thread_rng();
let indexes: Vec<usize> = (0..num_reads)
.map(|_| rng.gen_range(0, total_shreds) as usize)
.map(|_| rng.gen_range(0..total_shreds) as usize)
.collect();
bench.iter(move || {
for i in indexes.iter() {

View File

@ -3753,7 +3753,7 @@ pub mod tests {
}
i += 1;
let slot = bank.slot() + thread_rng().gen_range(1, 3);
let slot = bank.slot() + thread_rng().gen_range(1..3);
bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot));
}
}

View File

@ -200,7 +200,7 @@ mod tests {
const NUM_SLOTS: usize = 97;
let mut rng = rand::thread_rng();
let pubkeys: Vec<_> = repeat_with(Pubkey::new_unique).take(4).collect();
let schedule: Vec<_> = repeat_with(|| pubkeys[rng.gen_range(0, 3)])
let schedule: Vec<_> = repeat_with(|| pubkeys[rng.gen_range(0..3)])
.take(19)
.collect();
let schedule = LeaderSchedule::new_from_schedule(schedule);

View File

@ -1042,7 +1042,7 @@ mod tests {
matches::assert_matches,
rand::Rng,
rand_chacha::{rand_core::SeedableRng, ChaChaRng},
solana_sdk::{shred_version, signature::Signer},
solana_sdk::{shred_version, signature::Signer, signer::keypair::keypair_from_seed},
};
const SIZE_OF_SHRED_INDEX: usize = 4;
@ -1525,7 +1525,10 @@ mod tests {
};
let mut data = [0u8; legacy::ShredData::CAPACITY];
rng.fill(&mut data[..]);
let keypair = Keypair::generate(&mut rng);
let mut seed = [0u8; Keypair::SECRET_KEY_LENGTH];
rng.fill(&mut seed[..]);
let keypair = keypair_from_seed(&seed).unwrap();
let mut shred = Shred::new_from_data(
141939602, // slot
28685, // index
@ -1562,7 +1565,9 @@ mod tests {
let seed = <[u8; 32]>::try_from(bs58_decode(SEED)).unwrap();
ChaChaRng::from_seed(seed)
};
let keypair = Keypair::generate(&mut rng);
let mut seed = [0u8; Keypair::SECRET_KEY_LENGTH];
rng.fill(&mut seed[..]);
let keypair = keypair_from_seed(&seed).unwrap();
let mut shred = Shred::new_from_data(
142076266, // slot
21443, // index
@ -1598,7 +1603,9 @@ mod tests {
};
let mut parity_shard = vec![0u8; legacy::SIZE_OF_ERASURE_ENCODED_SLICE];
rng.fill(&mut parity_shard[..]);
let keypair = Keypair::generate(&mut rng);
let mut seed = [0u8; Keypair::SECRET_KEY_LENGTH];
rng.fill(&mut seed[..]);
let keypair = keypair_from_seed(&seed).unwrap();
let mut shred = Shred::new_from_parity_shard(
141945197, // slot
23418, // index

View File

@ -1173,7 +1173,7 @@ mod test {
num_coding_shreds: usize,
reed_solomon_cache: &ReedSolomonCache,
) {
let keypair = Keypair::generate(rng);
let keypair = Keypair::new();
let num_shreds = num_data_shreds + num_coding_shreds;
let proof_size = get_proof_size(num_shreds);
let capacity = ShredData::capacity(proof_size).unwrap();
@ -1186,7 +1186,7 @@ mod test {
fec_set_index: 1835,
};
let data_header = {
let reference_tick = rng.gen_range(0, 0x40);
let reference_tick = rng.gen_range(0..0x40);
DataShredHeader {
parent_offset: rng.gen::<u16>().max(1),
flags: ShredFlags::from_bits_retain(reference_tick),
@ -1204,7 +1204,7 @@ mod test {
index: common_header.index + i as u32,
..common_header
};
let size = ShredData::SIZE_OF_HEADERS + rng.gen_range(0, capacity);
let size = ShredData::SIZE_OF_HEADERS + rng.gen_range(0..capacity);
let data_header = DataShredHeader {
size: size as u16,
..data_header
@ -1345,7 +1345,7 @@ mod test {
let mut rng = rand::thread_rng();
let reed_solomon_cache = ReedSolomonCache::default();
for _ in 0..32 {
let data_size = rng.gen_range(0, 31200 * 7);
let data_size = rng.gen_range(0..31200 * 7);
run_make_shreds_from_data(&mut rng, data_size, &reed_solomon_cache);
}
}
@ -1358,11 +1358,11 @@ mod test {
let thread_pool = ThreadPoolBuilder::new().num_threads(2).build().unwrap();
let keypair = Keypair::new();
let slot = 149_745_689;
let parent_slot = slot - rng.gen_range(1, 65536);
let parent_slot = slot - rng.gen_range(1..65536);
let shred_version = rng.gen();
let reference_tick = rng.gen_range(1, 64);
let next_shred_index = rng.gen_range(0, 671);
let next_code_index = rng.gen_range(0, 781);
let reference_tick = rng.gen_range(1..64);
let next_shred_index = rng.gen_range(0..671);
let next_code_index = rng.gen_range(0..781);
let mut data = vec![0u8; data_size];
rng.fill(&mut data[..]);
let shreds = make_shreds_from_data(

View File

@ -476,7 +476,7 @@ mod tests {
matches::assert_matches,
rand::{seq::SliceRandom, Rng},
solana_sdk::{
hash::{self, hash, Hash},
hash::{hash, Hash},
pubkey::Pubkey,
shred_version,
signature::{Signature, Signer},
@ -994,20 +994,20 @@ mod tests {
.take(num_tx)
.collect();
let entry = Entry::new(
&hash::new_rand(&mut rng), // prev hash
rng.gen_range(1, 64), // num hashes
&Hash::new_unique(), // prev hash
rng.gen_range(1..64), // num hashes
txs,
);
let keypair = Arc::new(Keypair::new());
let slot = 71489660;
let shredder = Shredder::new(
slot,
slot - rng.gen_range(1, 27), // parent slot
slot - rng.gen_range(1..27), // parent slot
0, // reference tick
rng.gen(), // version
)
.unwrap();
let next_shred_index = rng.gen_range(1, 1024);
let next_shred_index = rng.gen_range(1..1024);
let reed_solomon_cache = ReedSolomonCache::default();
let (data_shreds, coding_shreds) = shredder.entries_to_shreds(
&keypair,

View File

@ -708,11 +708,11 @@ mod tests {
}
fn make_entry<R: Rng>(rng: &mut R, prev_hash: &Hash) -> Entry {
let size = rng.gen_range(16, 32);
let size = rng.gen_range(16..32);
let txs = repeat_with(|| make_transaction(rng)).take(size).collect();
Entry::new(
prev_hash,
rng.gen_range(1, 64), // num_hashes
rng.gen_range(1..64), // num_hashes
txs,
)
}
@ -731,11 +731,11 @@ mod tests {
.iter()
.flat_map(|(&slot, keypair)| {
let parent_slot = slot - rng.gen::<u16>().max(1) as Slot;
let num_entries = rng.gen_range(64, 128);
let num_entries = rng.gen_range(64..128);
let (data_shreds, coding_shreds) = Shredder::new(
slot,
parent_slot,
rng.gen_range(0, 0x40), // reference_tick
rng.gen_range(0..0x40), // reference_tick
rng.gen(), // version
)
.unwrap()
@ -743,8 +743,8 @@ mod tests {
keypair,
&make_entries(rng, num_entries),
rng.gen(), // is_last_in_slot
rng.gen_range(0, 2671), // next_shred_index
rng.gen_range(0, 2781), // next_code_index
rng.gen_range(0..2671), // next_shred_index
rng.gen_range(0..2781), // next_code_index
rng.gen(), // merkle_variant,
&reed_solomon_cache,
&mut ProcessShredsStats::default(),
@ -782,7 +782,7 @@ mod tests {
packet
});
let packets: Vec<_> = repeat_with(|| {
let size = rng.gen_range(0, 16);
let size = rng.gen_range(0..16);
let packets = packets.by_ref().take(size).collect();
let batch = PacketBatch::new(packets);
(size == 0 || !batch.is_empty()).then_some(batch)
@ -802,7 +802,7 @@ mod tests {
let mut rng = rand::thread_rng();
let thread_pool = ThreadPoolBuilder::new().num_threads(3).build().unwrap();
let recycler_cache = RecyclerCache::default();
let keypairs = repeat_with(|| rng.gen_range(169_367_809, 169_906_789))
let keypairs = repeat_with(|| rng.gen_range(169_367_809..169_906_789))
.map(|slot| (slot, Keypair::new()))
.take(3)
.collect();
@ -848,7 +848,7 @@ mod tests {
let thread_pool = ThreadPoolBuilder::new().num_threads(3).build().unwrap();
let recycler_cache = RecyclerCache::default();
let shreds = {
let keypairs = repeat_with(|| rng.gen_range(169_367_809, 169_906_789))
let keypairs = repeat_with(|| rng.gen_range(169_367_809..169_906_789))
.map(|slot| (slot, Keypair::new()))
.take(3)
.collect();

View File

@ -156,7 +156,7 @@ pub fn send_many_transactions(
let (blockhash, _) = client
.get_latest_blockhash_with_commitment(CommitmentConfig::processed())
.unwrap();
let transfer_amount = thread_rng().gen_range(1, max_tokens_per_transfer);
let transfer_amount = thread_rng().gen_range(1..max_tokens_per_transfer);
let mut transaction = system_transaction::transfer(
funding_keypair,

View File

@ -98,7 +98,7 @@ impl NetworkTopology {
fn new_random(max_partitions: usize, max_packet_drop: u8, max_packet_delay: u32) -> Self {
let mut rng = thread_rng();
let num_partitions = rng.gen_range(0, max_partitions + 1);
let num_partitions = rng.gen_range(0..max_partitions + 1);
if num_partitions == 0 {
return NetworkTopology::default();
@ -110,7 +110,7 @@ impl NetworkTopology {
let partition = if i == num_partitions - 1 {
100 - used_partition
} else {
rng.gen_range(0, 100 - used_partition - num_partitions + i)
rng.gen_range(0..100 - used_partition - num_partitions + i)
};
used_partition += partition;
partitions.push(partition as u8);
@ -120,14 +120,14 @@ impl NetworkTopology {
for i in 0..partitions.len() - 1 {
for j in i + 1..partitions.len() {
let drop_config = if max_packet_drop > 0 {
let packet_drop = rng.gen_range(0, max_packet_drop + 1);
let packet_drop = rng.gen_range(0..max_packet_drop + 1);
format!("loss {packet_drop}% 25% ")
} else {
String::default()
};
let config = if max_packet_delay > 0 {
let packet_delay = rng.gen_range(0, max_packet_delay + 1);
let packet_delay = rng.gen_range(0..max_packet_delay + 1);
format!("{drop_config}delay {packet_delay}ms 10ms")
} else {
drop_config

View File

@ -558,7 +558,7 @@ pub fn bind_two_in_range_with_offset(
pub fn find_available_port_in_range(ip_addr: IpAddr, range: PortRange) -> io::Result<u16> {
let (start, end) = range;
let mut tries_left = end - start;
let mut rand_port = thread_rng().gen_range(start, end);
let mut rand_port = thread_rng().gen_range(start..end);
loop {
match bind_common(ip_addr, rand_port, false) {
Ok(_) => {

View File

@ -36,7 +36,7 @@ name = "solana_perf"
[dev-dependencies]
matches = { workspace = true }
rand_chacha = "0.2.2"
rand_chacha = { workspace = true }
solana-logger = { workspace = true }
test-case = { workspace = true }

View File

@ -139,7 +139,7 @@ fn bench_sigverify_uneven(bencher: &mut Bencher) {
// generate packet vector
let mut batches = vec![];
while current_packets < num_packets {
let mut len: usize = thread_rng().gen_range(1, 128);
let mut len: usize = thread_rng().gen_range(1..128);
current_packets += len;
if current_packets > num_packets {
len -= current_packets - num_packets;

View File

@ -262,7 +262,7 @@ mod tests {
let mut packet = Packet::new([0u8; PACKET_DATA_SIZE], Meta::default());
let mut dup_count = 0usize;
for _ in 0..num_packets {
let size = rng.gen_range(0, PACKET_DATA_SIZE);
let size = rng.gen_range(0..PACKET_DATA_SIZE);
packet.meta_mut().size = size;
rng.fill(&mut packet.buffer_mut()[0..size]);
if deduper.dedup(packet.data(..).unwrap()) {

View File

@ -9,7 +9,7 @@ pub fn discard_batches_randomly(
mut total_packets: usize,
) -> usize {
while total_packets > max_packets {
let index = thread_rng().gen_range(0, batches.len());
let index = thread_rng().gen_range(0..batches.len());
let removed = batches.swap_remove(index);
total_packets = total_packets.saturating_sub(removed.len());
}

View File

@ -46,7 +46,7 @@ pub struct RecyclerX<T> {
impl<T: Default> Default for RecyclerX<T> {
fn default() -> RecyclerX<T> {
let id = thread_rng().gen_range(0, 1000);
let id = thread_rng().gen_range(0..1000);
trace!("new recycler..{}", id);
RecyclerX {
gc: Mutex::default(),
@ -229,7 +229,7 @@ mod tests {
assert_eq!(recycler.recycler.gc.lock().unwrap().len(), NUM_PACKETS);
// Process a normal load of packets for a while.
for _ in 0..RECYCLER_SHRINK_WINDOW / 16 {
let count = rng.gen_range(1, 128);
let count = rng.gen_range(1..128);
let _packets: Vec<_> = repeat_with(|| recycler.allocate("")).take(count).collect();
}
// Assert that the gc size has shrinked.

View File

@ -710,10 +710,10 @@ mod tests {
fn test_copy_return_values() {
let mut rng = rand::thread_rng();
let sig_lens: Vec<Vec<u32>> = {
let size = rng.gen_range(0, 64);
let size = rng.gen_range(0..64);
repeat_with(|| {
let size = rng.gen_range(0, 16);
repeat_with(|| rng.gen_range(0, 5)).take(size).collect()
let size = rng.gen_range(0..16);
repeat_with(|| rng.gen_range(0..5)).take(size).collect()
})
.take(size)
.collect()
@ -1060,7 +1060,7 @@ mod tests {
// generate packet vector
let batches: Vec<_> = (0..num_batches)
.map(|_| {
let num_packets_per_batch = thread_rng().gen_range(1, max_packets_per_batch);
let num_packets_per_batch = thread_rng().gen_range(1..max_packets_per_batch);
let mut packet_batch = PacketBatch::with_capacity(num_packets_per_batch);
for _ in 0..num_packets_per_batch {
packet_batch.push(packet.clone());
@ -1221,22 +1221,22 @@ mod tests {
let recycler = Recycler::default();
let recycler_out = Recycler::default();
for _ in 0..50 {
let num_batches = thread_rng().gen_range(2, 30);
let num_batches = thread_rng().gen_range(2..30);
let mut batches = generate_packet_batches_random_size(&packet, 128, num_batches);
let num_modifications = thread_rng().gen_range(0, 5);
let num_modifications = thread_rng().gen_range(0..5);
for _ in 0..num_modifications {
let batch = thread_rng().gen_range(0, batches.len());
let packet = thread_rng().gen_range(0, batches[batch].len());
let offset = thread_rng().gen_range(0, batches[batch][packet].meta().size);
let add = thread_rng().gen_range(0, 255);
let batch = thread_rng().gen_range(0..batches.len());
let packet = thread_rng().gen_range(0..batches[batch].len());
let offset = thread_rng().gen_range(0..batches[batch][packet].meta().size);
let add = thread_rng().gen_range(0..255);
batches[batch][packet].buffer_mut()[offset] = batches[batch][packet]
.data(offset)
.unwrap()
.wrapping_add(add);
}
let batch_to_disable = thread_rng().gen_range(0, batches.len());
let batch_to_disable = thread_rng().gen_range(0..batches.len());
for p in batches[batch_to_disable].iter_mut() {
p.meta_mut().set_discard(true);
}

View File

@ -59,14 +59,14 @@ where
let mut slots: Vec<Slot> = std::iter::repeat_with(|| rng.gen()).take(5).collect();
slots.sort_unstable();
slots.dedup();
let switch_proof_hash = rng.gen_bool(0.5).then(|| solana_sdk::hash::new_rand(rng));
let switch_proof_hash = rng.gen_bool(0.5).then(Hash::new_unique);
vote_transaction::new_vote_transaction(
slots,
solana_sdk::hash::new_rand(rng), // bank_hash
solana_sdk::hash::new_rand(rng), // blockhash
&Keypair::generate(rng), // node_keypair
&Keypair::generate(rng), // vote_keypair
&Keypair::generate(rng), // authorized_voter_keypair
Hash::new_unique(), // bank_hash
Hash::new_unique(), // blockhash
&Keypair::new(), // node_keypair
&Keypair::new(), // vote_keypair
&Keypair::new(), // authorized_voter_keypair
switch_proof_hash,
)
}

View File

@ -12,7 +12,6 @@ edition = { workspace = true }
[dependencies]
clap = { version = "3.1.5", features = ["cargo"] }
log = { workspace = true }
rand = { workspace = true }
rayon = { workspace = true }
solana-entry = { workspace = true }
solana-logger = { workspace = true }

View File

@ -670,12 +670,23 @@ mod tests {
let mut transaction_context =
TransactionContext::new(accounts, Some(Rent::default()), 1, 2);
// Since libsecp256k1 is still using the old version of rand, this test
// copies the `random` implementation at:
// https://docs.rs/libsecp256k1/latest/src/libsecp256k1/lib.rs.html#430
let secret_key = {
use rand::RngCore;
let mut rng = rand::thread_rng();
loop {
let mut ret = [0u8; libsecp256k1::util::SECRET_KEY_SIZE];
rng.fill_bytes(&mut ret);
if let Ok(key) = libsecp256k1::SecretKey::parse(&ret) {
break key;
}
}
};
let message = SanitizedMessage::Legacy(LegacyMessage::new(Message::new(
&[
new_secp256k1_instruction(
&libsecp256k1::SecretKey::random(&mut rand::thread_rng()),
b"hello",
),
new_secp256k1_instruction(&secret_key, b"hello"),
Instruction::new_with_bytes(mock_program_id, &[], vec![]),
],
None,

View File

@ -3958,8 +3958,8 @@ mod tests {
for _ in 0..outer_iters {
let mut mangled_bytes = bytes.to_vec();
for _ in 0..inner_iters {
let offset = rng.gen_range(offset.start, offset.end);
let value = rng.gen_range(value.start, value.end);
let offset = rng.gen_range(offset.start..offset.end);
let value = rng.gen_range(value.start..value.end);
*mangled_bytes.get_mut(offset).unwrap() = value;
work(&mut mangled_bytes);
}

View File

@ -1,6 +1,5 @@
use {
assert_matches::assert_matches,
rand::thread_rng,
solana_program_test::*,
solana_sdk::{
ed25519_instruction::new_ed25519_instruction,
@ -9,6 +8,20 @@ use {
},
};
// Since ed25519_dalek is still using the old version of rand, this test
// copies the `generate` implementation at:
// https://docs.rs/ed25519-dalek/1.0.1/src/ed25519_dalek/secret.rs.html#167
fn generate_keypair() -> ed25519_dalek::Keypair {
use rand::RngCore;
let mut rng = rand::thread_rng();
let mut seed = [0u8; ed25519_dalek::SECRET_KEY_LENGTH];
rng.fill_bytes(&mut seed);
let secret =
ed25519_dalek::SecretKey::from_bytes(&seed[..ed25519_dalek::SECRET_KEY_LENGTH]).unwrap();
let public = ed25519_dalek::PublicKey::from(&secret);
ed25519_dalek::Keypair { secret, public }
}
#[tokio::test]
async fn test_success() {
let mut context = ProgramTest::default().start_with_context().await;
@ -17,7 +30,7 @@ async fn test_success() {
let payer = &context.payer;
let recent_blockhash = context.last_blockhash;
let privkey = ed25519_dalek::Keypair::generate(&mut thread_rng());
let privkey = generate_keypair();
let message_arr = b"hello";
let instruction = new_ed25519_instruction(&privkey, message_arr);
@ -39,7 +52,7 @@ async fn test_failure() {
let payer = &context.payer;
let recent_blockhash = context.last_blockhash;
let privkey = ed25519_dalek::Keypair::generate(&mut thread_rng());
let privkey = generate_keypair();
let message_arr = b"hello";
let mut instruction = new_ed25519_instruction(&privkey, message_arr);

View File

@ -69,7 +69,7 @@ version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47"
dependencies = [
"getrandom 0.2.8",
"getrandom 0.2.10",
"once_cell",
"version_check",
]
@ -81,7 +81,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f"
dependencies = [
"cfg-if 1.0.0",
"getrandom 0.2.8",
"getrandom 0.2.10",
"once_cell",
"version_check",
]
@ -492,7 +492,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1"
dependencies = [
"futures-core",
"getrandom 0.2.8",
"getrandom 0.2.10",
"instant",
"pin-project-lite",
"rand 0.8.5",
@ -1823,9 +1823,9 @@ dependencies = [
[[package]]
name = "getrandom"
version = "0.2.8"
version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31"
checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427"
dependencies = [
"cfg-if 1.0.0",
"js-sys",
@ -3719,7 +3719,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
dependencies = [
"libc",
"rand_chacha 0.3.0",
"rand_chacha 0.3.1",
"rand_core 0.6.4",
]
@ -3735,9 +3735,9 @@ dependencies = [
[[package]]
name = "rand_chacha"
version = "0.3.0"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [
"ppv-lite86",
"rand_core 0.6.4",
@ -3758,7 +3758,7 @@ version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
dependencies = [
"getrandom 0.2.8",
"getrandom 0.2.10",
]
[[package]]
@ -3843,7 +3843,7 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64"
dependencies = [
"getrandom 0.2.8",
"getrandom 0.2.10",
"redox_syscall 0.2.10",
]
@ -4572,7 +4572,7 @@ dependencies = [
"ouroboros",
"percentage",
"qualifier_attr",
"rand 0.7.3",
"rand 0.8.5",
"rayon",
"regex",
"rustc_version",
@ -4677,7 +4677,7 @@ dependencies = [
"bv",
"fnv",
"log",
"rand 0.7.3",
"rand 0.8.5",
"rayon",
"rustc_version",
"serde",
@ -4695,7 +4695,7 @@ dependencies = [
"byteorder 1.4.3",
"libsecp256k1 0.6.0",
"log",
"rand 0.7.3",
"rand 0.8.5",
"solana-measure",
"solana-program-runtime",
"solana-sdk",
@ -4723,7 +4723,7 @@ dependencies = [
"memmap2",
"modular-bitfield",
"num_enum 0.6.1",
"rand 0.7.3",
"rand 0.8.5",
"solana-measure",
"solana-sdk",
"tempfile",
@ -4796,7 +4796,6 @@ dependencies = [
"indicatif",
"log",
"quinn",
"rand 0.7.3",
"rayon",
"solana-connection-cache",
"solana-measure",
@ -4844,7 +4843,7 @@ dependencies = [
"futures-util",
"indexmap 2.0.0",
"log",
"rand 0.7.3",
"rand 0.8.5",
"rayon",
"rcgen",
"solana-measure",
@ -4875,8 +4874,8 @@ dependencies = [
"min-max-heap",
"num_enum 0.6.1",
"quinn",
"rand 0.7.3",
"rand_chacha 0.2.2",
"rand 0.8.5",
"rand_chacha 0.3.1",
"rayon",
"rolling-file",
"rustc_version",
@ -4965,7 +4964,7 @@ dependencies = [
"dlopen2",
"lazy_static",
"log",
"rand 0.7.3",
"rand 0.8.5",
"rayon",
"serde",
"solana-measure",
@ -5011,13 +5010,11 @@ dependencies = [
"cc",
"either",
"generic-array 0.14.7",
"getrandom 0.1.14",
"im",
"lazy_static",
"log",
"memmap2",
"once_cell",
"rand_core 0.6.4",
"rustc_version",
"serde",
"serde_bytes",
@ -5100,8 +5097,8 @@ dependencies = [
"lru",
"matches",
"num-traits",
"rand 0.7.3",
"rand_chacha 0.2.2",
"rand 0.8.5",
"rand_chacha 0.3.1",
"rayon",
"rustc_version",
"serde",
@ -5153,8 +5150,8 @@ dependencies = [
"num_cpus",
"num_enum 0.6.1",
"prost 0.11.9",
"rand 0.7.3",
"rand_chacha 0.2.2",
"rand 0.8.5",
"rand_chacha 0.3.1",
"rayon",
"reed-solomon-erasure",
"rocksdb",
@ -5199,7 +5196,7 @@ name = "solana-loader-v4-program"
version = "1.17.0"
dependencies = [
"log",
"rand 0.7.3",
"rand 0.8.5",
"solana-measure",
"solana-program-runtime",
"solana-sdk",
@ -5254,7 +5251,7 @@ dependencies = [
"crossbeam-channel",
"log",
"nix",
"rand 0.7.3",
"rand 0.8.5",
"serde",
"serde_derive",
"socket2 0.5.3",
@ -5280,7 +5277,7 @@ dependencies = [
"libc",
"log",
"nix",
"rand 0.7.3",
"rand 0.8.5",
"rayon",
"serde",
"solana-metrics",
@ -5327,7 +5324,7 @@ dependencies = [
"console_error_panic_hook",
"console_log",
"curve25519-dalek",
"getrandom 0.2.8",
"getrandom 0.2.10",
"itertools",
"js-sys",
"lazy_static",
@ -5339,8 +5336,7 @@ dependencies = [
"num-derive",
"num-traits",
"parking_lot 0.12.1",
"rand 0.7.3",
"rand_chacha 0.2.2",
"rand 0.8.5",
"rustc_version",
"rustversion",
"serde",
@ -5372,7 +5368,7 @@ dependencies = [
"num-derive",
"num-traits",
"percentage",
"rand 0.7.3",
"rand 0.8.5",
"rustc_version",
"serde",
"solana-frozen-abi",
@ -5626,7 +5622,7 @@ dependencies = [
"once_cell",
"ouroboros",
"percentage",
"rand 0.7.3",
"rand 0.8.5",
"rayon",
"regex",
"rustc_version",
@ -5938,8 +5934,8 @@ dependencies = [
name = "solana-sbf-rust-rand"
version = "1.17.0"
dependencies = [
"getrandom 0.1.14",
"rand 0.7.3",
"getrandom 0.2.10",
"rand 0.8.5",
"solana-program",
]
@ -6092,7 +6088,7 @@ dependencies = [
"pbkdf2 0.11.0",
"qstring",
"rand 0.7.3",
"rand_chacha 0.2.2",
"rand 0.8.5",
"rustc_version",
"rustversion",
"serde",
@ -6216,7 +6212,7 @@ dependencies = [
"pkcs8",
"quinn",
"quinn-proto",
"rand 0.7.3",
"rand 0.8.5",
"rcgen",
"rustls 0.21.6",
"solana-metrics",
@ -6292,7 +6288,6 @@ dependencies = [
"indexmap 2.0.0",
"indicatif",
"log",
"rand 0.7.3",
"rayon",
"solana-connection-cache",
"solana-measure",
@ -6341,8 +6336,8 @@ dependencies = [
"log",
"lru",
"quinn",
"rand 0.7.3",
"rand_chacha 0.2.2",
"rand 0.8.5",
"rand_chacha 0.3.1",
"rayon",
"rcgen",
"rustls 0.21.6",
@ -6399,7 +6394,7 @@ dependencies = [
"libloading",
"log",
"num_cpus",
"rand 0.7.3",
"rand 0.8.5",
"rayon",
"serde",
"serde_json",
@ -6478,7 +6473,6 @@ name = "solana-zk-token-proof-program"
version = "1.17.0"
dependencies = [
"bytemuck",
"getrandom 0.1.14",
"num-derive",
"num-traits",
"solana-program-runtime",

View File

@ -13,7 +13,7 @@ bincode = { version = "1.1.4", default-features = false }
blake3 = "1.0.0"
byteorder = "1.3.2"
elf = "0.0.10"
getrandom = { version = "0.1.14" }
getrandom = "0.2.10"
itertools = "0.10.1"
libsecp256k1 = { version = "0.7.0", default-features = false }
log = "0.4.11"
@ -21,7 +21,7 @@ miow = "0.3.6"
net2 = "0.2.37"
num-derive = "0.3"
num-traits = "0.2"
rand = "0.7"
rand = "0.8"
serde = "1.0.112"
serde_json = "1.0.56"
solana_rbpf = "=0.6.0"

View File

@ -10,7 +10,7 @@ license = { workspace = true }
edition = { workspace = true }
[dependencies]
getrandom = { workspace = true, features = ["dummy"] }
getrandom = { workspace = true, features = ["custom"] }
rand = { workspace = true }
solana-program = { workspace = true }

View File

@ -10,7 +10,6 @@ edition = { workspace = true }
[dependencies]
bytemuck = { workspace = true, features = ["derive"] }
getrandom = { workspace = true, features = ["dummy"] }
num-derive = { workspace = true }
num-traits = { workspace = true }
solana-program-runtime = { workspace = true }

View File

@ -248,7 +248,7 @@ fn bench_concurrent_read_write(bencher: &mut Bencher) {
|accounts, pubkeys| {
let mut rng = rand::thread_rng();
loop {
let i = rng.gen_range(0, pubkeys.len());
let i = rng.gen_range(0..pubkeys.len());
test::black_box(
accounts
.load_without_fixed_root(&Ancestors::default(), &pubkeys[i])

View File

@ -47,7 +47,7 @@ fn bench_accounts_index(bencher: &mut Bencher) {
let mut root = 0;
bencher.iter(|| {
for _p in 0..NUM_PUBKEYS {
let pubkey = thread_rng().gen_range(0, NUM_PUBKEYS);
let pubkey = thread_rng().gen_range(0..NUM_PUBKEYS);
index.upsert(
fork,
fork,

View File

@ -92,7 +92,7 @@ fn append_vec_random_read(bencher: &mut Bencher) {
let size = 1_000;
let indexes = add_test_accounts(&vec, size);
bencher.iter(|| {
let random_index: usize = thread_rng().gen_range(0, indexes.len());
let random_index: usize = thread_rng().gen_range(0..indexes.len());
let (sample, pos) = &indexes[random_index];
let (account, _next) = vec.get_account(*pos).unwrap();
let (_meta, test) = create_test_account(*sample);
@ -121,7 +121,7 @@ fn append_vec_concurrent_append_read(bencher: &mut Bencher) {
}
bencher.iter(|| {
let len = indexes.lock().unwrap().len();
let random_index: usize = thread_rng().gen_range(0, len);
let random_index: usize = thread_rng().gen_range(0..len);
let (sample, pos) = *indexes.lock().unwrap().get(random_index).unwrap();
let (account, _next) = vec.get_account(pos).unwrap();
let (_meta, test) = create_test_account(sample);
@ -141,14 +141,14 @@ fn append_vec_concurrent_read_append(bencher: &mut Bencher) {
if len == 0 {
continue;
}
let random_index: usize = thread_rng().gen_range(0, len + 1);
let random_index: usize = thread_rng().gen_range(0..len + 1);
let (sample, pos) = *indexes1.lock().unwrap().get(random_index % len).unwrap();
let (account, _next) = vec1.get_account(pos).unwrap();
let (_meta, test) = create_test_account(sample);
assert_eq!(account.data(), test.data());
});
bencher.iter(|| {
let sample: usize = thread_rng().gen_range(0, 256);
let sample: usize = thread_rng().gen_range(0..256);
let (meta, account) = create_test_account(sample);
if let Some(info) = append_account(&vec, meta, &account, Hash::default()) {
indexes.lock().unwrap().push((sample, info.offset))

View File

@ -85,7 +85,7 @@ fn process_transactions_multiple_slots(banks: &[Arc<Bank>], num_slots: usize, nu
})
.collect();
let index = thread_rng().gen_range(0, num_slots);
let index = thread_rng().gen_range(0..num_slots);
prioritization_fee_cache.update(&banks[index], transactions.iter());
})

View File

@ -674,7 +674,7 @@ impl AccountsBackgroundService {
}
} else {
if bank.block_height() - last_cleaned_block_height
> (CLEAN_INTERVAL_BLOCKS + thread_rng().gen_range(0, 10))
> (CLEAN_INTERVAL_BLOCKS + thread_rng().gen_range(0..10))
{
// Note that the flush will do an internal clean of the
// cache up to bank.slot(), so should be safe as long

View File

@ -66,7 +66,7 @@ use {
fee::FeeStructure,
fee_calculator::FeeRateGovernor,
genesis_config::{create_genesis_config, ClusterType, GenesisConfig},
hash::{self, hash, Hash},
hash::{hash, Hash},
incinerator,
instruction::{AccountMeta, CompiledInstruction, Instruction, InstructionError},
loader_upgradeable_instruction::UpgradeableLoaderInstruction,
@ -130,21 +130,21 @@ impl VoteReward {
let mut rng = rand::thread_rng();
let validator_pubkey = solana_sdk::pubkey::new_rand();
let validator_stake_lamports = rng.gen_range(1, 200);
let validator_stake_lamports = rng.gen_range(1..200);
let validator_voting_keypair = Keypair::new();
let validator_vote_account = vote_state::create_account(
&validator_voting_keypair.pubkey(),
&validator_pubkey,
rng.gen_range(1, 20),
rng.gen_range(1..20),
validator_stake_lamports,
);
Self {
vote_account: validator_vote_account,
commission: rng.gen_range(1, 20),
vote_rewards: rng.gen_range(1, 200),
vote_needs_store: rng.gen_range(1, 20) > 10,
commission: rng.gen_range(1..20),
vote_rewards: rng.gen_range(1..200),
vote_needs_store: rng.gen_range(1..20) > 10,
}
}
}
@ -6396,11 +6396,11 @@ fn test_fuzz_instructions() {
let key = solana_sdk::pubkey::new_rand();
let balance = if thread_rng().gen_ratio(9, 10) {
let lamports = if thread_rng().gen_ratio(1, 5) {
thread_rng().gen_range(0, 10)
thread_rng().gen_range(0..10)
} else {
thread_rng().gen_range(20, 100)
thread_rng().gen_range(20..100)
};
let space = thread_rng().gen_range(0, 10);
let space = thread_rng().gen_range(0..10);
let owner = Pubkey::default();
let account = AccountSharedData::new(lamports, space, &owner);
bank.store_account(&key, &account);
@ -6414,16 +6414,16 @@ fn test_fuzz_instructions() {
let mut results = HashMap::new();
for _ in 0..2_000 {
let num_keys = if thread_rng().gen_ratio(1, 5) {
thread_rng().gen_range(0, max_keys)
thread_rng().gen_range(0..max_keys)
} else {
thread_rng().gen_range(1, 4)
thread_rng().gen_range(1..4)
};
let num_instructions = thread_rng().gen_range(0, max_keys - num_keys);
let num_instructions = thread_rng().gen_range(0..max_keys - num_keys);
let mut account_keys: Vec<_> = if thread_rng().gen_ratio(1, 5) {
(0..num_keys)
.map(|_| {
let idx = thread_rng().gen_range(0, keys.len());
let idx = thread_rng().gen_range(0..keys.len());
keys[idx].0
})
.collect()
@ -6433,7 +6433,7 @@ fn test_fuzz_instructions() {
.map(|_| {
let mut idx;
loop {
idx = thread_rng().gen_range(0, keys.len());
idx = thread_rng().gen_range(0..keys.len());
if !inserted.contains(&idx) {
break;
}
@ -6447,13 +6447,13 @@ fn test_fuzz_instructions() {
let instructions: Vec<_> = if num_keys > 0 {
(0..num_instructions)
.map(|_| {
let num_accounts_to_pass = thread_rng().gen_range(0, num_keys);
let num_accounts_to_pass = thread_rng().gen_range(0..num_keys);
let account_indexes = (0..num_accounts_to_pass)
.map(|_| thread_rng().gen_range(0, num_keys))
.map(|_| thread_rng().gen_range(0..num_keys))
.collect();
let program_index: u8 = thread_rng().gen_range(0, num_keys);
let program_index: u8 = thread_rng().gen_range(0..num_keys);
if thread_rng().gen_ratio(4, 5) {
let programs_index = thread_rng().gen_range(0, program_keys.len());
let programs_index = thread_rng().gen_range(0..program_keys.len());
account_keys[program_index as usize] = program_keys[programs_index].0;
}
CompiledInstruction::new(program_index, &10, account_indexes)
@ -6465,33 +6465,33 @@ fn test_fuzz_instructions() {
let account_keys_len = std::cmp::max(account_keys.len(), 2);
let num_signatures = if thread_rng().gen_ratio(1, 5) {
thread_rng().gen_range(0, account_keys_len + 10)
thread_rng().gen_range(0..account_keys_len + 10)
} else {
thread_rng().gen_range(1, account_keys_len)
thread_rng().gen_range(1..account_keys_len)
};
let num_required_signatures = if thread_rng().gen_ratio(1, 5) {
thread_rng().gen_range(0, account_keys_len + 10) as u8
thread_rng().gen_range(0..account_keys_len + 10) as u8
} else {
thread_rng().gen_range(1, std::cmp::max(2, num_signatures)) as u8
thread_rng().gen_range(1..std::cmp::max(2, num_signatures)) as u8
};
let num_readonly_signed_accounts = if thread_rng().gen_ratio(1, 5) {
thread_rng().gen_range(0, account_keys_len) as u8
thread_rng().gen_range(0..account_keys_len) as u8
} else {
let max = if num_required_signatures > 1 {
num_required_signatures - 1
} else {
1
};
thread_rng().gen_range(0, max)
thread_rng().gen_range(0..max)
};
let num_readonly_unsigned_accounts = if thread_rng().gen_ratio(1, 5)
|| (num_required_signatures as usize) >= account_keys_len
{
thread_rng().gen_range(0, account_keys_len) as u8
thread_rng().gen_range(0..account_keys_len) as u8
} else {
thread_rng().gen_range(0, account_keys_len - num_required_signatures as usize) as u8
thread_rng().gen_range(0..account_keys_len - num_required_signatures as usize) as u8
};
let header = MessageHeader {
@ -9909,8 +9909,7 @@ fn test_verify_and_hash_transaction_sig_len() {
.remove(&feature_set::verify_tx_signatures_len::id());
let bank = Bank::new_for_tests(&genesis_config);
let mut rng = rand::thread_rng();
let recent_blockhash = hash::new_rand(&mut rng);
let recent_blockhash = Hash::new_unique();
let from_keypair = Keypair::new();
let to_keypair = Keypair::new();
let from_pubkey = from_keypair.pubkey();
@ -9966,8 +9965,7 @@ fn test_verify_transactions_packet_data_size() {
create_genesis_config_with_leader(42, &solana_sdk::pubkey::new_rand(), 42);
let bank = Bank::new_for_tests(&genesis_config);
let mut rng = rand::thread_rng();
let recent_blockhash = hash::new_rand(&mut rng);
let recent_blockhash = Hash::new_unique();
let keypair = Keypair::new();
let pubkey = keypair.pubkey();
let make_transaction = |size| {
@ -10020,7 +10018,20 @@ fn test_call_precomiled_program() {
let bank = Bank::new_for_tests(&genesis_config);
// libsecp256k1
let secp_privkey = libsecp256k1::SecretKey::random(&mut rand::thread_rng());
// Since libsecp256k1 is still using the old version of rand, this test
// copies the `random` implementation at:
// https://docs.rs/libsecp256k1/latest/src/libsecp256k1/lib.rs.html#430
let secp_privkey = {
use rand::RngCore;
let mut rng = rand::thread_rng();
loop {
let mut ret = [0u8; libsecp256k1::util::SECRET_KEY_SIZE];
rng.fill_bytes(&mut ret);
if let Ok(key) = libsecp256k1::SecretKey::parse(&ret) {
break key;
}
}
};
let message_arr = b"hello";
let instruction =
solana_sdk::secp256k1_instruction::new_secp256k1_instruction(&secp_privkey, message_arr);
@ -10035,7 +10046,20 @@ fn test_call_precomiled_program() {
bank.process_transaction(&tx).unwrap();
// ed25519
let privkey = ed25519_dalek::Keypair::generate(&mut rand::thread_rng());
// Since ed25519_dalek is still using the old version of rand, this test
// copies the `generate` implementation at:
// https://docs.rs/ed25519-dalek/1.0.1/src/ed25519_dalek/secret.rs.html#167
let privkey = {
use rand::RngCore;
let mut rng = rand::thread_rng();
let mut seed = [0u8; ed25519_dalek::SECRET_KEY_LENGTH];
rng.fill_bytes(&mut seed);
let secret =
ed25519_dalek::SecretKey::from_bytes(&seed[..ed25519_dalek::SECRET_KEY_LENGTH])
.unwrap();
let public = ed25519_dalek::PublicKey::from(&secret);
ed25519_dalek::Keypair { secret, public }
};
let message_arr = b"hello";
let instruction =
solana_sdk::ed25519_instruction::new_ed25519_instruction(&privkey, message_arr);
@ -11156,9 +11180,9 @@ fn test_update_accounts_data_size() {
let mut rng = rand::thread_rng();
for _ in 0..100 {
let initial = bank.load_accounts_data_size() as i64;
let delta1 = rng.gen_range(-500, 500);
let delta1 = rng.gen_range(-500..500);
bank.update_accounts_data_size_delta_on_chain(delta1);
let delta2 = rng.gen_range(-500, 500);
let delta2 = rng.gen_range(-500..500);
bank.update_accounts_data_size_delta_off_chain(delta2);
assert_eq!(
bank.load_accounts_data_size() as i64,
@ -11573,15 +11597,13 @@ fn test_accounts_data_size_and_resize_transactions() {
{
let account_pubkey = Pubkey::new_unique();
let account_balance = LAMPORTS_PER_SOL;
let account_size = rng.gen_range(
1,
MAX_PERMITTED_DATA_LENGTH as usize - MAX_PERMITTED_DATA_INCREASE,
);
let account_size =
rng.gen_range(1..MAX_PERMITTED_DATA_LENGTH as usize - MAX_PERMITTED_DATA_INCREASE);
let account_data = AccountSharedData::new(account_balance, account_size, &mock_program_id);
bank.store_account(&account_pubkey, &account_data);
let accounts_data_size_before = bank.load_accounts_data_size();
let account_grow_size = rng.gen_range(1, MAX_PERMITTED_DATA_INCREASE);
let account_grow_size = rng.gen_range(1..MAX_PERMITTED_DATA_INCREASE);
let transaction = create_mock_realloc_tx(
&mint_keypair,
&funding_keypair,
@ -11605,12 +11627,12 @@ fn test_accounts_data_size_and_resize_transactions() {
let account_pubkey = Pubkey::new_unique();
let account_balance = LAMPORTS_PER_SOL;
let account_size =
rng.gen_range(MAX_PERMITTED_DATA_LENGTH / 2, MAX_PERMITTED_DATA_LENGTH) as usize;
rng.gen_range(MAX_PERMITTED_DATA_LENGTH / 2..MAX_PERMITTED_DATA_LENGTH) as usize;
let account_data = AccountSharedData::new(account_balance, account_size, &mock_program_id);
bank.store_account(&account_pubkey, &account_data);
let accounts_data_size_before = bank.load_accounts_data_size();
let account_shrink_size = rng.gen_range(1, account_size);
let account_shrink_size = rng.gen_range(1..account_size);
let transaction = create_mock_realloc_tx(
&mint_keypair,
&funding_keypair,
@ -11772,7 +11794,7 @@ fn test_accounts_data_size_from_genesis() {
bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot));
// Store an account into the bank that is rent-exempt and has data
let data_size = rand::thread_rng().gen_range(3333, 4444);
let data_size = rand::thread_rng().gen_range(3333..4444);
let transaction = system_transaction::create_account(
&mint_keypair,
&Keypair::new(),
@ -12978,7 +13000,7 @@ fn test_update_reward_history_in_partition() {
.collect::<Vec<_>>();
let mut rng = rand::thread_rng();
let i_zero = rng.gen_range(0, expected_num);
let i_zero = rng.gen_range(0..expected_num);
if zero_reward {
// pick one entry to have zero rewards so it gets ignored
stake_rewards[i_zero].stake_reward_info.lamports = 0;

View File

@ -209,7 +209,7 @@ mod serde_snapshot_tests {
fn check_accounts_local(accounts: &Accounts, pubkeys: &[Pubkey], num: usize) {
for _ in 1..num {
let idx = thread_rng().gen_range(0, num - 1);
let idx = thread_rng().gen_range(0..num - 1);
let ancestors = vec![(0, 0)].into_iter().collect();
let account = accounts.load_without_fixed_root(&ancestors, &pubkeys[idx]);
let account1 = Some((

View File

@ -1056,16 +1056,16 @@ pub(crate) mod tests {
epoch: rng.gen(),
..Stakes::default()
});
for _ in 0..rng.gen_range(5usize, 10) {
for _ in 0..rng.gen_range(5usize..10) {
let vote_pubkey = solana_sdk::pubkey::new_rand();
let vote_account = vote_state::create_account(
&vote_pubkey,
&solana_sdk::pubkey::new_rand(), // node_pubkey
rng.gen_range(0, 101), // commission
rng.gen_range(0, 1_000_000), // lamports
rng.gen_range(0..101), // commission
rng.gen_range(0..1_000_000), // lamports
);
stakes_cache.check_and_store(&vote_pubkey, &vote_account, None);
for _ in 0..rng.gen_range(10usize, 20) {
for _ in 0..rng.gen_range(10usize..20) {
let stake_pubkey = solana_sdk::pubkey::new_rand();
let rent = Rent::with_slots_per_epoch(rng.gen());
let stake_account = stake_state::create_account(
@ -1073,7 +1073,7 @@ pub(crate) mod tests {
&vote_pubkey,
&vote_account,
&rent,
rng.gen_range(0, 1_000_000), // lamports
rng.gen_range(0..1_000_000), // lamports
);
stakes_cache.check_and_store(&stake_pubkey, &stake_account, None);
}

View File

@ -183,7 +183,7 @@ impl<T: Serialize + Clone> StatusCache<T> {
) {
let max_key_index = key.as_ref().len().saturating_sub(CACHED_KEY_SIZE + 1);
let hash_map = self.cache.entry(*transaction_blockhash).or_insert_with(|| {
let key_index = thread_rng().gen_range(0, max_key_index + 1);
let key_index = thread_rng().gen_range(0..max_key_index + 1);
(slot, key_index, HashMap::new())
});

View File

@ -360,9 +360,9 @@ mod tests {
) -> impl Iterator<Item = (Pubkey, (/*stake:*/ u64, VoteAccount))> + '_ {
let nodes: Vec<_> = repeat_with(Pubkey::new_unique).take(num_nodes).collect();
repeat_with(move || {
let node = nodes[rng.gen_range(0, nodes.len())];
let node = nodes[rng.gen_range(0..nodes.len())];
let (account, _) = new_rand_vote_account(rng, Some(node));
let stake = rng.gen_range(0, 997);
let stake = rng.gen_range(0..997);
let vote_account = VoteAccount::try_from(account).unwrap();
(Pubkey::new_unique(), (stake, vote_account))
})
@ -489,7 +489,7 @@ mod tests {
}
// Remove some of the vote accounts.
for k in 0..256 {
let index = rng.gen_range(0, accounts.len());
let index = rng.gen_range(0..accounts.len());
let (pubkey, (_, _)) = accounts.swap_remove(index);
vote_accounts.remove(&pubkey);
if (k + 1) % 32 == 0 {
@ -498,9 +498,9 @@ mod tests {
}
// Modify the stakes for some of the accounts.
for k in 0..2048 {
let index = rng.gen_range(0, accounts.len());
let index = rng.gen_range(0..accounts.len());
let (pubkey, (stake, _)) = &mut accounts[index];
let new_stake = rng.gen_range(0, 997);
let new_stake = rng.gen_range(0..997);
if new_stake < *stake {
vote_accounts.sub_stake(pubkey, *stake - new_stake);
} else {
@ -513,7 +513,7 @@ mod tests {
}
// Remove everything.
while !accounts.is_empty() {
let index = rng.gen_range(0, accounts.len());
let index = rng.gen_range(0..accounts.len());
let (pubkey, (_, _)) = accounts.swap_remove(index);
vote_accounts.remove(&pubkey);
if accounts.len() % 32 == 0 {

View File

@ -51,7 +51,7 @@ fn test_shrink_and_clean() {
while alive_accounts.len() <= 10 {
alive_accounts.push((
solana_sdk::pubkey::new_rand(),
AccountSharedData::new(thread_rng().gen_range(0, 50), 0, &owner),
AccountSharedData::new(thread_rng().gen_range(0..50), 0, &owner),
));
}
@ -96,8 +96,8 @@ fn test_bad_bank_hash() {
.into_par_iter()
.map(|_| {
let key = solana_sdk::pubkey::new_rand();
let lamports = thread_rng().gen_range(0, 100);
let some_data_len = thread_rng().gen_range(0, 1000);
let lamports = thread_rng().gen_range(0..100);
let some_data_len = thread_rng().gen_range(0..1000);
let account = AccountSharedData::new(lamports, some_data_len, &key);
(key, account)
})
@ -113,11 +113,11 @@ fn test_bad_bank_hash() {
info!("i: {}", i);
last_print = Instant::now();
}
let num_accounts = thread_rng().gen_range(0, 100);
let num_accounts = thread_rng().gen_range(0..100);
(0..num_accounts).for_each(|_| {
let mut idx;
loop {
idx = thread_rng().gen_range(0, max_accounts);
idx = thread_rng().gen_range(0..max_accounts);
if existing.contains(&idx) {
continue;
}
@ -126,7 +126,7 @@ fn test_bad_bank_hash() {
}
accounts_keys[idx]
.1
.set_lamports(thread_rng().gen_range(0, 1000));
.set_lamports(thread_rng().gen_range(0..1000));
});
let account_refs: Vec<_> = existing

View File

@ -26,7 +26,7 @@ full = [
"generic-array",
"memmap2",
"rand",
"rand_chacha",
"rand0-7",
"serde_json",
"ed25519-dalek",
"ed25519-dalek-bip32",
@ -65,7 +65,7 @@ num_enum = { workspace = true }
pbkdf2 = { workspace = true }
qstring = { workspace = true }
rand = { workspace = true, optional = true }
rand_chacha = { workspace = true, optional = true }
rand0-7 = { package = "rand", version = "0.7", optional = true }
rustversion = { workspace = true }
serde = { workspace = true }
serde_bytes = { workspace = true }

View File

@ -44,7 +44,7 @@ thiserror = { workspace = true }
# Remove this once borsh 0.11 or 1.0 is released, which correctly declares the
# hashbrown dependency as optional.
[target.'cfg(target_os = "solana")'.dependencies]
getrandom = { version = "0.2", features = ["custom"] }
getrandom = { workspace = true, features = ["custom"] }
[target.'cfg(not(target_os = "solana"))'.dependencies]
ark-bn254 = { workspace = true }
@ -60,7 +60,6 @@ libc = { workspace = true, features = ["extra_traits"] }
libsecp256k1 = { workspace = true }
num-bigint = { workspace = true }
rand = { workspace = true }
rand_chacha = { workspace = true }
tiny-bip39 = { workspace = true }
wasm-bindgen = { workspace = true }
zeroize = { workspace = true, features = ["default", "zeroize_derive"] }
@ -72,7 +71,7 @@ solana-logger = { workspace = true }
console_error_panic_hook = { workspace = true }
console_log = { workspace = true }
js-sys = { workspace = true }
getrandom = { version = "0.2", features = ["js", "wasm-bindgen"] }
getrandom = { workspace = true, features = ["js", "wasm-bindgen"] }
[target.'cfg(not(target_pointer_width = "64"))'.dependencies]
parking_lot = { workspace = true }

View File

@ -181,10 +181,10 @@ mod tests {
let mut rng = rand::thread_rng();
for _ in 0..100_000 {
let dummy = Dummy {
a: rng.gen::<u32>() >> rng.gen_range(0, u32::BITS),
b: rng.gen::<u64>() >> rng.gen_range(0, u64::BITS),
c: rng.gen::<u64>() >> rng.gen_range(0, u64::BITS),
d: rng.gen::<u32>() >> rng.gen_range(0, u32::BITS),
a: rng.gen::<u32>() >> rng.gen_range(0..u32::BITS),
b: rng.gen::<u64>() >> rng.gen_range(0..u64::BITS),
c: rng.gen::<u64>() >> rng.gen_range(0..u64::BITS),
d: rng.gen::<u32>() >> rng.gen_range(0..u32::BITS),
};
let bytes = bincode::serialize(&dummy).unwrap();
let other: Dummy = bincode::deserialize(&bytes).unwrap();

View File

@ -1214,8 +1214,8 @@ mod tests {
fn run_serde_compact_vote_state_update<R: Rng>(rng: &mut R) {
let lockouts: VecDeque<_> = std::iter::repeat_with(|| {
let slot = 149_303_885_u64.saturating_add(rng.gen_range(0, 10_000));
let confirmation_count = rng.gen_range(0, 33);
let slot = 149_303_885_u64.saturating_add(rng.gen_range(0..10_000));
let confirmation_count = rng.gen_range(0..33);
Lockout::new_with_confirmation_count(slot, confirmation_count)
})
.take(32)
@ -1224,7 +1224,7 @@ mod tests {
let root = rng.gen_ratio(1, 2).then(|| {
lockouts[0]
.slot()
.checked_sub(rng.gen_range(0, 1_000))
.checked_sub(rng.gen_range(0..1_000))
.expect("All slots should be greater than 1_000")
});
let timestamp = rng.gen_ratio(1, 2).then(|| rng.gen());

View File

@ -188,7 +188,7 @@ pub mod test {
signature::{Keypair, Signer},
transaction::Transaction,
},
rand::{thread_rng, Rng},
rand0_7::{thread_rng, Rng},
};
fn test_case(

View File

@ -6,10 +6,14 @@
pub use solana_program::hash::*;
/// Random hash value for tests and benchmarks.
#[deprecated(
since = "1.17.0",
note = "Please use `Hash::new_unique()` for testing, or fill 32 bytes with any source of randomness"
)]
#[cfg(feature = "full")]
pub fn new_rand<R: ?Sized>(rng: &mut R) -> Hash
where
R: rand::Rng,
R: rand0_7::Rng,
{
let mut buf = [0u8; HASH_BYTES];
rng.fill(&mut buf);

View File

@ -733,7 +733,7 @@
//! // Sign some messages.
//! let mut signatures = vec![];
//! for idx in 0..2 {
//! let secret_key = libsecp256k1::SecretKey::random(&mut rand::thread_rng());
//! let secret_key = libsecp256k1::SecretKey::random(&mut rand0_7::thread_rng());
//! let message = format!("hello world {}", idx).into_bytes();
//! let message_hash = {
//! let mut hasher = keccak::Hasher::default();
@ -1059,7 +1059,7 @@ pub mod test {
signature::{Keypair, Signer},
transaction::Transaction,
},
rand::{thread_rng, Rng},
rand0_7::{thread_rng, Rng},
};
fn test_case(

View File

@ -10,7 +10,7 @@ use {
ed25519_dalek::Signer as DalekSigner,
ed25519_dalek_bip32::Error as Bip32Error,
hmac::Hmac,
rand::{rngs::OsRng, CryptoRng, RngCore},
rand0_7::{rngs::OsRng, CryptoRng, RngCore},
std::{
error,
io::{Read, Write},
@ -25,6 +25,9 @@ use {
pub struct Keypair(ed25519_dalek::Keypair);
impl Keypair {
/// Can be used for generating a Keypair without a dependency on `rand` types
pub const SECRET_KEY_LENGTH: usize = 32;
/// Constructs a new, random `Keypair` using a caller-provided RNG
pub fn generate<R>(csprng: &mut R) -> Self
where
@ -80,6 +83,9 @@ impl Keypair {
}
}
#[cfg(test)]
static_assertions::const_assert_eq!(Keypair::SECRET_KEY_LENGTH, ed25519_dalek::SECRET_KEY_LENGTH);
impl Signer for Keypair {
#[inline]
fn pubkey(&self) -> Pubkey {

View File

@ -996,10 +996,12 @@ impl ConnectionTable {
// If the stakes of all the sampled connections are higher than the
// threshold_stake, rejects the pruning attempt, and returns 0.
fn prune_random(&mut self, sample_size: usize, threshold_stake: u64) -> usize {
let mut rng = thread_rng();
let num_pruned = std::iter::once(self.table.len())
.filter(|&size| size > 0)
.flat_map(|size| repeat_with(move || rng.gen_range(0, size)))
.flat_map(|size| {
let mut rng = thread_rng();
repeat_with(move || rng.gen_range(0..size))
})
.map(|index| {
let connection = self.table[index].first();
let stake = connection.map(|connection| connection.stake);

View File

@ -16,7 +16,6 @@ futures-util = { workspace = true }
indexmap = { workspace = true }
indicatif = { workspace = true, optional = true }
log = { workspace = true }
rand = { workspace = true }
rayon = { workspace = true }
solana-connection-cache = { workspace = true }
solana-measure = { workspace = true }
@ -28,9 +27,6 @@ solana-sdk = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true, features = ["full"] }
[dev-dependencies]
rand_chacha = { workspace = true }
[features]
default = ["spinner"]
# Support tpu-client methods that feature a spinner progress bar for

View File

@ -126,7 +126,7 @@ fn make_dos_message(
) -> Message {
let instructions: Vec<_> = (0..num_instructions)
.map(|_| {
let data = [num_program_iterations, thread_rng().gen_range(0, 255)];
let data = [num_program_iterations, thread_rng().gen_range(0..255)];
Instruction::new_with_bytes(program_id, &data, account_metas.to_vec())
})
.collect();

View File

@ -60,7 +60,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
let id = pubkey::new_rand();
let contact_info = ContactInfo::new_localhost(&id, timestamp());
cluster_info.insert_info(contact_info);
stakes.insert(id, thread_rng().gen_range(1, NUM_PEERS) as u64);
stakes.insert(id, thread_rng().gen_range(1..NUM_PEERS) as u64);
}
let cluster_info = Arc::new(cluster_info);
let cluster_nodes_cache = ClusterNodesCache::<BroadcastStage>::new(

View File

@ -441,12 +441,12 @@ pub fn make_test_cluster<R: Rng>(
if rng.gen_ratio(unstaked_numerator, unstaked_denominator) {
None // No stake for some of the nodes.
} else {
Some((*node.pubkey(), rng.gen_range(0, 20)))
Some((*node.pubkey(), rng.gen_range(0..20)))
}
})
.collect();
// Add some staked nodes with no contact-info.
stakes.extend(repeat_with(|| (Pubkey::new_unique(), rng.gen_range(0, 20))).take(100));
stakes.extend(repeat_with(|| (Pubkey::new_unique(), rng.gen_range(0..20))).take(100));
let cluster_info = ClusterInfo::new(this_node, keypair, SocketAddrSpace::Unspecified);
let nodes: Vec<_> = nodes
.iter()

View File

@ -744,7 +744,7 @@ fn get_rpc_nodes(
blacklist_timeout = Instant::now();
get_rpc_peers_timout = Instant::now();
if bootstrap_config.no_snapshot_fetch {
let random_peer = &rpc_peers[thread_rng().gen_range(0, rpc_peers.len())];
let random_peer = &rpc_peers[thread_rng().gen_range(0..rpc_peers.len())];
return Ok(vec![GetRpcNodeResult {
rpc_contact_info: random_peer.clone(),
snapshot_hash: None,

View File

@ -24,11 +24,11 @@ aes-gcm-siv = { workspace = true }
bincode = { workspace = true }
byteorder = { workspace = true }
curve25519-dalek = { workspace = true, features = ["serde"] }
getrandom = { workspace = true, features = ["dummy"] }
getrandom = { version = "0.1", features = ["dummy"] }
itertools = { workspace = true }
lazy_static = { workspace = true }
merlin = { workspace = true }
rand = { workspace = true }
rand = { version = "0.7" }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
sha3 = "0.9"