clippy for rust 1.65.0 (#28765)

This commit is contained in:
Brooks Prumo 2022-11-09 19:39:38 +00:00 committed by GitHub
parent df81cd11ba
commit d1ba42180d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
91 changed files with 232 additions and 253 deletions

View File

@ -27,7 +27,7 @@ pub fn parse_bpf_upgradeable_loader(
BpfUpgradeableLoaderAccountType::Buffer(UiBuffer {
authority: authority_address.map(|pubkey| pubkey.to_string()),
data: UiAccountData::Binary(
base64::encode(&data[offset as usize..]),
base64::encode(&data[offset..]),
UiAccountEncoding::Base64,
),
})
@ -51,7 +51,7 @@ pub fn parse_bpf_upgradeable_loader(
slot,
authority: upgrade_authority_address.map(|pubkey| pubkey.to_string()),
data: UiAccountData::Binary(
base64::encode(&data[offset as usize..]),
base64::encode(&data[offset..]),
UiAccountEncoding::Base64,
),
})

View File

@ -232,7 +232,7 @@ impl UiTokenAmount {
pub fn real_number_string(&self) -> String {
real_number_string(
u64::from_str(&self.amount).unwrap_or_default(),
self.decimals as u8,
self.decimals,
)
}
@ -242,7 +242,7 @@ impl UiTokenAmount {
} else {
real_number_string_trimmed(
u64::from_str(&self.amount).unwrap_or_default(),
self.decimals as u8,
self.decimals,
)
}
}

View File

@ -500,9 +500,8 @@ fn generate_system_txs(
if use_randomized_compute_unit_price {
let mut rng = rand::thread_rng();
let range = Uniform::from(0..MAX_COMPUTE_UNIT_PRICE);
let compute_unit_prices: Vec<_> = (0..pairs.len())
.map(|_| range.sample(&mut rng) as u64)
.collect();
let compute_unit_prices: Vec<_> =
(0..pairs.len()).map(|_| range.sample(&mut rng)).collect();
let pairs_with_compute_unit_prices: Vec<_> =
pairs.iter().zip(compute_unit_prices.iter()).collect();

View File

@ -210,7 +210,7 @@ impl<T: Clone + Copy> Bucket<T> {
let mut m = Measure::start("bucket_create_key");
let ix = Self::bucket_index_ix(index, key, random);
for i in ix..ix + index.max_search() {
let ii = i as u64 % index.capacity();
let ii = i % index.capacity();
if !index.is_free(ii) {
continue;
}

View File

@ -1055,7 +1055,7 @@ pub fn keypair_from_seed_phrase(
derivation_path: Option<DerivationPath>,
legacy: bool,
) -> Result<Keypair, Box<dyn error::Error>> {
let seed_phrase = prompt_password(&format!("[{}] seed phrase: ", keypair_name))?;
let seed_phrase = prompt_password(format!("[{}] seed phrase: ", keypair_name))?;
let seed_phrase = seed_phrase.trim();
let passphrase_prompt = format!(
"[{}] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue: ",
@ -1184,7 +1184,7 @@ mod tests {
));
let stdin = "stdin:".to_string();
assert!(matches!(
parse_signer_source(&stdin).unwrap(),
parse_signer_source(stdin).unwrap(),
SignerSource {
kind: SignerSourceKind::Stdin,
derivation_path: None,
@ -1201,7 +1201,7 @@ mod tests {
));
let pubkey = Pubkey::new_unique();
assert!(
matches!(parse_signer_source(&pubkey.to_string()).unwrap(), SignerSource {
matches!(parse_signer_source(pubkey.to_string()).unwrap(), SignerSource {
kind: SignerSourceKind::Pubkey(p),
derivation_path: None,
legacy: false,
@ -1241,7 +1241,7 @@ mod tests {
manufacturer: Manufacturer::Ledger,
pubkey: None,
};
assert!(matches!(parse_signer_source(&usb).unwrap(), SignerSource {
assert!(matches!(parse_signer_source(usb).unwrap(), SignerSource {
kind: SignerSourceKind::Usb(u),
derivation_path: None,
legacy: false,
@ -1252,7 +1252,7 @@ mod tests {
pubkey: None,
};
let expected_derivation_path = Some(DerivationPath::new_bip44(Some(0), Some(0)));
assert!(matches!(parse_signer_source(&usb).unwrap(), SignerSource {
assert!(matches!(parse_signer_source(usb).unwrap(), SignerSource {
kind: SignerSourceKind::Usb(u),
derivation_path: d,
legacy: false,
@ -1267,7 +1267,7 @@ mod tests {
let prompt = "prompt:".to_string();
assert!(matches!(
parse_signer_source(&prompt).unwrap(),
parse_signer_source(prompt).unwrap(),
SignerSource {
kind: SignerSourceKind::Prompt,
derivation_path: None,
@ -1275,14 +1275,14 @@ mod tests {
}
));
assert!(
matches!(parse_signer_source(&format!("file:{}", absolute_path_str)).unwrap(), SignerSource {
matches!(parse_signer_source(format!("file:{}", absolute_path_str)).unwrap(), SignerSource {
kind: SignerSourceKind::Filepath(p),
derivation_path: None,
legacy: false,
} if p == absolute_path_str)
);
assert!(
matches!(parse_signer_source(&format!("file:{}", relative_path_str)).unwrap(), SignerSource {
matches!(parse_signer_source(format!("file:{}", relative_path_str)).unwrap(), SignerSource {
kind: SignerSourceKind::Filepath(p),
derivation_path: None,
legacy: false,

View File

@ -1055,7 +1055,7 @@ pub fn keypair_from_seed_phrase(
derivation_path: Option<DerivationPath>,
legacy: bool,
) -> Result<Keypair, Box<dyn error::Error>> {
let seed_phrase = prompt_password(&format!("[{}] seed phrase: ", keypair_name))?;
let seed_phrase = prompt_password(format!("[{}] seed phrase: ", keypair_name))?;
let seed_phrase = seed_phrase.trim();
let passphrase_prompt = format!(
"[{}] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue: ",
@ -1184,7 +1184,7 @@ mod tests {
));
let stdin = "stdin:".to_string();
assert!(matches!(
parse_signer_source(&stdin).unwrap(),
parse_signer_source(stdin).unwrap(),
SignerSource {
kind: SignerSourceKind::Stdin,
derivation_path: None,
@ -1201,7 +1201,7 @@ mod tests {
));
let pubkey = Pubkey::new_unique();
assert!(
matches!(parse_signer_source(&pubkey.to_string()).unwrap(), SignerSource {
matches!(parse_signer_source(pubkey.to_string()).unwrap(), SignerSource {
kind: SignerSourceKind::Pubkey(p),
derivation_path: None,
legacy: false,
@ -1241,7 +1241,7 @@ mod tests {
manufacturer: Manufacturer::Ledger,
pubkey: None,
};
assert!(matches!(parse_signer_source(&usb).unwrap(), SignerSource {
assert!(matches!(parse_signer_source(usb).unwrap(), SignerSource {
kind: SignerSourceKind::Usb(u),
derivation_path: None,
legacy: false,
@ -1252,7 +1252,7 @@ mod tests {
pubkey: None,
};
let expected_derivation_path = Some(DerivationPath::new_bip44(Some(0), Some(0)));
assert!(matches!(parse_signer_source(&usb).unwrap(), SignerSource {
assert!(matches!(parse_signer_source(usb).unwrap(), SignerSource {
kind: SignerSourceKind::Usb(u),
derivation_path: d,
legacy: false,
@ -1267,7 +1267,7 @@ mod tests {
let prompt = "prompt:".to_string();
assert!(matches!(
parse_signer_source(&prompt).unwrap(),
parse_signer_source(prompt).unwrap(),
SignerSource {
kind: SignerSourceKind::Prompt,
derivation_path: None,
@ -1275,14 +1275,14 @@ mod tests {
}
));
assert!(
matches!(parse_signer_source(&format!("file:{}", absolute_path_str)).unwrap(), SignerSource {
matches!(parse_signer_source(format!("file:{}", absolute_path_str)).unwrap(), SignerSource {
kind: SignerSourceKind::Filepath(p),
derivation_path: None,
legacy: false,
} if p == absolute_path_str)
);
assert!(
matches!(parse_signer_source(&format!("file:{}", relative_path_str)).unwrap(), SignerSource {
matches!(parse_signer_source(format!("file:{}", relative_path_str)).unwrap(), SignerSource {
kind: SignerSourceKind::Filepath(p),
derivation_path: None,
legacy: false,

View File

@ -82,7 +82,7 @@ impl ConfigInput {
(SettingType::Explicit, json_rpc_cfg_url.to_string()),
(SettingType::SystemDefault, Self::default_json_rpc_url()),
]);
(setting_type, normalize_to_url_if_moniker(&url_or_moniker))
(setting_type, normalize_to_url_if_moniker(url_or_moniker))
}
pub fn compute_keypair_path_setting(

View File

@ -2282,7 +2282,7 @@ pub fn return_signers_data(tx: &Transaction, config: &ReturnSignersConfig) -> Cl
});
let message = if config.dump_transaction_message {
let message_data = tx.message_data();
Some(base64::encode(&message_data))
Some(base64::encode(message_data))
} else {
None
};

View File

@ -856,7 +856,7 @@ pub fn process_catchup(
let average_time_remaining = if slot_distance == 0 || total_sleep_interval == 0 {
"".to_string()
} else {
let distance_delta = start_slot_distance as i64 - slot_distance as i64;
let distance_delta = start_slot_distance - slot_distance;
let average_catchup_slots_per_second =
distance_delta as f64 / f64::from(total_sleep_interval);
let average_time_remaining =
@ -874,7 +874,7 @@ pub fn process_catchup(
let average_node_slots_per_second =
total_node_slot_delta as f64 / f64::from(total_sleep_interval);
let expected_finish_slot = (node_slot as f64
+ average_time_remaining as f64 * average_node_slots_per_second as f64)
+ average_time_remaining * average_node_slots_per_second)
.round();
format!(
" (AVG: {:.1} slots/second, ETA: slot {} in {})",
@ -2214,7 +2214,7 @@ mod tests {
let default_keypair = Keypair::new();
let (default_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&default_keypair, tmp_file.as_file_mut()).unwrap();
let default_signer = DefaultSigner::new("", &default_keypair_file);
let default_signer = DefaultSigner::new("", default_keypair_file);
let test_cluster_version = test_commands
.clone()

View File

@ -37,7 +37,7 @@ fn build_packet_batch(
1,
recent_blockhash.unwrap_or_else(Hash::new_unique),
);
let mut packet = Packet::from_data(None, &tx).unwrap();
let mut packet = Packet::from_data(None, tx).unwrap();
packet.meta.sender_stake = sender_stake as u64;
packet
})
@ -64,7 +64,7 @@ fn build_randomized_packet_batch(
1,
recent_blockhash.unwrap_or_else(Hash::new_unique),
);
let mut packet = Packet::from_data(None, &tx).unwrap();
let mut packet = Packet::from_data(None, tx).unwrap();
let sender_stake = distribution.sample(&mut rng);
packet.meta.sender_stake = sender_stake as u64;
packet

View File

@ -1945,8 +1945,7 @@ impl BankingStage {
"filter_pending_packets_time",
);
let filter_retryable_packets_us = filter_retryable_packets_time.as_us();
slot_metrics_tracker
.increment_filter_retryable_packets_us(filter_retryable_packets_us as u64);
slot_metrics_tracker.increment_filter_retryable_packets_us(filter_retryable_packets_us);
banking_stage_stats
.filter_pending_packets_elapsed
.fetch_add(filter_retryable_packets_us, Ordering::Relaxed);
@ -3993,7 +3992,7 @@ mod tests {
1,
Hash::new_unique(),
);
let packet = Packet::from_data(None, &tx).unwrap();
let packet = Packet::from_data(None, tx).unwrap();
let deserialized_packet = DeserializedPacket::new(packet).unwrap();
let genesis_config_info = create_slow_genesis_config(10_000);
@ -4084,7 +4083,7 @@ mod tests {
let fwd_block_hash = Hash::new_unique();
let forwarded_packet = {
let transaction = system_transaction::transfer(&keypair, &pubkey, 1, fwd_block_hash);
let mut packet = Packet::from_data(None, &transaction).unwrap();
let mut packet = Packet::from_data(None, transaction).unwrap();
packet.meta.flags |= PacketFlags::FORWARDED;
DeserializedPacket::new(packet).unwrap()
};
@ -4092,7 +4091,7 @@ mod tests {
let normal_block_hash = Hash::new_unique();
let normal_packet = {
let transaction = system_transaction::transfer(&keypair, &pubkey, 1, normal_block_hash);
let packet = Packet::from_data(None, &transaction).unwrap();
let packet = Packet::from_data(None, transaction).unwrap();
DeserializedPacket::new(packet).unwrap()
};

View File

@ -411,7 +411,7 @@ impl ClusterInfoVoteListener {
}
}
if time_since_lock.elapsed().as_millis() > BANK_SEND_VOTES_LOOP_SLEEP_MS as u128 {
if time_since_lock.elapsed().as_millis() > BANK_SEND_VOTES_LOOP_SLEEP_MS {
// Always set this to avoid taking the poh lock too often
time_since_lock = Instant::now();
// We will take this lock at most once every `BANK_SEND_VOTES_LOOP_SLEEP_MS`

View File

@ -198,7 +198,7 @@ mod tests {
) -> DeserializedPacket {
let tx =
system_transaction::transfer(&Keypair::new(), write_to_account, 1, Hash::new_unique());
let packet = Packet::from_data(None, &tx).unwrap();
let packet = Packet::from_data(None, tx).unwrap();
DeserializedPacket::new_with_priority_details(
packet,
TransactionPriorityDetails {

View File

@ -162,7 +162,7 @@ mod tests {
1,
Hash::new_unique(),
);
let packet = Packet::from_data(None, &tx).unwrap();
let packet = Packet::from_data(None, tx).unwrap();
let deserialized_packet = ImmutableDeserializedPacket::new(packet, None);
assert!(matches!(deserialized_packet, Ok(_)));

View File

@ -522,7 +522,7 @@ impl LeaderSlotMetricsTracker {
.timing_metrics
.process_packets_timings
.cost_model_us,
*cost_model_us as u64
*cost_model_us
);
leader_slot_metrics

View File

@ -123,13 +123,13 @@ impl LedgerCleanupService {
max_ledger_shreds,
iterate_time
);
if (total_shreds as u64) < max_ledger_shreds {
if total_shreds < max_ledger_shreds {
return (false, 0, total_shreds);
}
let mut num_shreds_to_clean = 0;
let mut lowest_cleanup_slot = total_slots[0].0;
for (slot, num_shreds) in total_slots.iter().rev() {
num_shreds_to_clean += *num_shreds as u64;
num_shreds_to_clean += *num_shreds;
if num_shreds_to_clean > max_ledger_shreds {
lowest_cleanup_slot = *slot;
break;

View File

@ -828,7 +828,7 @@ mod test {
let num_slots = 2;
// Create some shreds
let (mut shreds, _) = make_many_slot_entries(0, num_slots as u64, 150);
let (mut shreds, _) = make_many_slot_entries(0, num_slots, 150);
let num_shreds = shreds.len() as u64;
let num_shreds_per_slot = num_shreds / num_slots;
@ -856,7 +856,7 @@ mod test {
.flat_map(|slot| {
missing_indexes_per_slot
.iter()
.map(move |shred_index| ShredRepairType::Shred(slot as u64, *shred_index))
.map(move |shred_index| ShredRepairType::Shred(slot, *shred_index))
})
.collect();
@ -969,10 +969,10 @@ mod test {
let expected: Vec<ShredRepairType> = (repair_slot_range.start
..=repair_slot_range.end)
.map(|slot_index| {
if slots.contains(&(slot_index as u64)) {
ShredRepairType::Shred(slot_index as u64, 0)
if slots.contains(&slot_index) {
ShredRepairType::Shred(slot_index, 0)
} else {
ShredRepairType::HighestShred(slot_index as u64, 0)
ShredRepairType::HighestShred(slot_index, 0)
}
})
.collect();

View File

@ -4297,7 +4297,7 @@ pub(crate) mod tests {
assert!(blockstore.is_dead(bank1.slot()));
res.map(|_| ())
};
let _ignored = remove_dir_all(&ledger_path);
let _ignored = remove_dir_all(ledger_path);
res
}

View File

@ -112,10 +112,10 @@ impl RequestResponse for ShredRepairType {
match self {
ShredRepairType::Orphan(slot) => response_shred.slot() <= *slot,
ShredRepairType::HighestShred(slot, index) => {
response_shred.slot() as u64 == *slot && response_shred.index() as u64 >= *index
response_shred.slot() == *slot && response_shred.index() as u64 >= *index
}
ShredRepairType::Shred(slot, index) => {
response_shred.slot() as u64 == *slot && response_shred.index() as u64 == *index
response_shred.slot() == *slot && response_shred.index() as u64 == *index
}
}
}

View File

@ -305,7 +305,7 @@ mod tests {
let link_snapshots_dir = link_snapshots_dir.path().join(snapshot_file_name);
fs::create_dir_all(&link_snapshots_dir).unwrap();
let link_path = link_snapshots_dir.join(snapshot_file_name);
fs::hard_link(&snapshots_path, &link_path).unwrap();
fs::hard_link(&snapshots_path, link_path).unwrap();
}
// Create a packageable snapshot

View File

@ -184,7 +184,7 @@ impl TowerStorage for FileTowerStorage {
.and_then(|t: SavedTowerVersions| t.try_into_tower(node_pubkey))
} else {
// Old format
let file = File::open(&self.old_filename(node_pubkey))?;
let file = File::open(self.old_filename(node_pubkey))?;
let mut stream = BufReader::new(file);
bincode::deserialize_from(&mut stream)
.map_err(|e| e.into())

View File

@ -348,7 +348,7 @@ mod tests {
1,
Hash::new_unique(),
);
let packet = Packet::from_data(None, &tx).unwrap();
let packet = Packet::from_data(None, tx).unwrap();
DeserializedPacket::new(packet).unwrap()
}
@ -359,7 +359,7 @@ mod tests {
1,
Hash::new_unique(),
);
let packet = Packet::from_data(None, &tx).unwrap();
let packet = Packet::from_data(None, tx).unwrap();
DeserializedPacket::new_with_priority_details(
packet,
TransactionPriorityDetails {

View File

@ -2059,7 +2059,7 @@ pub fn move_and_async_delete_path(path: impl AsRef<Path> + Copy) {
Builder::new()
.name("solDeletePath".to_string())
.spawn(move || {
std::fs::remove_dir_all(&path_delete).unwrap();
std::fs::remove_dir_all(path_delete).unwrap();
})
.unwrap();
}

View File

@ -265,7 +265,7 @@ mod tests {
storage_now,
storage_now as i64 - *storage_previous as i64,
data_shred_storage_now,
data_shred_storage_now as i64 - *data_shred_storage_previous as i64,
data_shred_storage_now - *data_shred_storage_previous as i64,
cpu_user,
cpu_system,
cpu_idle,

View File

@ -398,7 +398,7 @@ fn test_concurrent_snapshot_packaging(
bank.squash();
let accounts_package_sender = {
if slot == saved_slot as u64 {
if slot == saved_slot {
// Only send one package on the real accounts package channel so that the
// packaging service doesn't take forever to run the packaging logic on all
// MAX_CACHE_ENTRIES later
@ -433,7 +433,7 @@ fn test_concurrent_snapshot_packaging(
accounts_package_sender.send(accounts_package).unwrap();
bank_forks.insert(bank);
if slot == saved_slot as u64 {
if slot == saved_slot {
// Find the relevant snapshot storages
let snapshot_storage_files: HashSet<_> = bank_forks[slot]
.get_snapshot_storages(None)
@ -451,7 +451,7 @@ fn test_concurrent_snapshot_packaging(
for file in snapshot_storage_files {
fs::copy(
&file,
&saved_accounts_dir.path().join(file.file_name().unwrap()),
saved_accounts_dir.path().join(file.file_name().unwrap()),
)
.unwrap();
}
@ -471,7 +471,7 @@ fn test_concurrent_snapshot_packaging(
.unwrap();
// only save off the snapshot of this slot, we don't need the others.
let options = CopyOptions::new();
fs_extra::dir::copy(&last_snapshot_path, &saved_snapshots_dir, &options).unwrap();
fs_extra::dir::copy(last_snapshot_path, &saved_snapshots_dir, &options).unwrap();
saved_archive_path = Some(snapshot_utils::build_full_snapshot_archive_path(
full_snapshot_archives_dir,

View File

@ -741,7 +741,7 @@ impl EntrySlice for [Entry] {
return self.verify_cpu(start_hash);
}
let api = api.unwrap();
inc_new_counter_info!("entry_verify-num_entries", self.len() as usize);
inc_new_counter_info!("entry_verify-num_entries", self.len());
let genesis = [Entry {
num_hashes: 0,

View File

@ -925,7 +925,7 @@ impl ClusterInfo {
let mut entries = Vec::default();
let keypair = self.keypair();
while !update.is_empty() {
let ix = (epoch_slot_index % crds_value::MAX_EPOCH_SLOTS) as u8;
let ix = epoch_slot_index % crds_value::MAX_EPOCH_SLOTS;
let now = timestamp();
let mut slots = if !reset {
self.lookup_epoch_slots(ix)

View File

@ -88,7 +88,7 @@ impl CrdsFilter {
pub(crate) fn new_rand(num_items: usize, max_bytes: usize) -> Self {
let max_bits = (max_bytes * 8) as f64;
let max_items = Self::max_items(max_bits, FALSE_RATE, KEYS);
let mask_bits = Self::mask_bits(num_items as f64, max_items as f64);
let mask_bits = Self::mask_bits(num_items as f64, max_items);
let filter = Bloom::random(max_items as usize, FALSE_RATE, max_bits as usize);
let seed: u64 = rand::thread_rng().gen_range(0, 2u64.pow(mask_bits));
let mask = Self::compute_mask(seed, mask_bits);
@ -102,7 +102,7 @@ impl CrdsFilter {
fn compute_mask(seed: u64, mask_bits: u32) -> u64 {
assert!(seed <= 2u64.pow(mask_bits));
let seed: u64 = seed.checked_shl(64 - mask_bits).unwrap_or(0x0);
seed | (!0u64).checked_shr(mask_bits).unwrap_or(!0x0) as u64
seed | (!0u64).checked_shr(mask_bits).unwrap_or(!0x0)
}
fn max_items(max_bits: f64, false_rate: f64, num_keys: f64) -> f64 {
let m = max_bits;
@ -152,7 +152,7 @@ impl CrdsFilterSet {
fn new(num_items: usize, max_bytes: usize) -> Self {
let max_bits = (max_bytes * 8) as f64;
let max_items = CrdsFilter::max_items(max_bits, FALSE_RATE, KEYS);
let mask_bits = CrdsFilter::mask_bits(num_items as f64, max_items as f64);
let mask_bits = CrdsFilter::mask_bits(num_items as f64, max_items);
let filters =
repeat_with(|| Bloom::random(max_items as usize, FALSE_RATE, max_bits as usize).into())
.take(1 << mask_bits)

View File

@ -177,7 +177,7 @@ fn run_simulation(stakes: &[u64], fanout: usize) {
range.chunks(chunk_size).for_each(|chunk| {
chunk.iter().for_each(|i| {
//distribute neighbors across threads to maximize parallel compute
let batch_ix = *i as usize % batches.len();
let batch_ix = *i % batches.len();
let node = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
staked_nodes.insert(node.id, stakes[*i - 1]);
cluster_info.insert_info(node.clone());

View File

@ -841,7 +841,7 @@ pub fn gc(config_file: &str) -> Result<(), String> {
progress_bar.set_message(format!("{}Removing old releases", RECYCLING));
for (release, _modified_type) in old_releases {
progress_bar.inc(1);
let _ = fs::remove_dir_all(&release);
let _ = fs::remove_dir_all(release);
}
progress_bar.finish_and_clear();
}

View File

@ -3676,7 +3676,7 @@ fn main() {
let mut csv_writer = if arg_matches.is_present("csv_filename") {
let csv_filename =
value_t_or_exit!(arg_matches, "csv_filename", String);
let file = File::create(&csv_filename).unwrap();
let file = File::create(csv_filename).unwrap();
Some(csv::WriterBuilder::new().from_writer(file))
} else {
None

View File

@ -95,7 +95,7 @@ fn bench_read_sequential(bench: &mut Bencher) {
// Generate random starting point in the range [0, total_shreds - 1], read num_reads shreds sequentially
let start_index = rng.gen_range(0, num_small_shreds + num_large_shreds);
for i in start_index..start_index + num_reads {
let _ = blockstore.get_data_shred(slot, i as u64 % total_shreds);
let _ = blockstore.get_data_shred(slot, i % total_shreds);
}
});

View File

@ -3353,7 +3353,7 @@ fn update_slot_meta(
let maybe_first_insert = slot_meta.received == 0;
// Index is zero-indexed, while the "received" height starts from 1,
// so received = index + 1 for the same shred.
slot_meta.received = cmp::max((u64::from(index) + 1) as u64, slot_meta.received);
slot_meta.received = cmp::max(u64::from(index) + 1, slot_meta.received);
if maybe_first_insert && slot_meta.received > 0 {
// predict the timestamp of what would have been the first shred in this slot
let slot_time_elapsed = u64::from(reference_tick) * 1000 / DEFAULT_TICKS_PER_SECOND;
@ -3923,7 +3923,7 @@ pub fn create_new_ledger(
let mut error_messages = String::new();
fs::rename(
&ledger_path.join(DEFAULT_GENESIS_ARCHIVE),
ledger_path.join(DEFAULT_GENESIS_ARCHIVE),
ledger_path.join(format!("{}.failed", DEFAULT_GENESIS_ARCHIVE)),
)
.unwrap_or_else(|e| {
@ -3934,7 +3934,7 @@ pub fn create_new_ledger(
);
});
fs::rename(
&ledger_path.join(DEFAULT_GENESIS_FILE),
ledger_path.join(DEFAULT_GENESIS_FILE),
ledger_path.join(format!("{}.failed", DEFAULT_GENESIS_FILE)),
)
.unwrap_or_else(|e| {
@ -3945,7 +3945,7 @@ pub fn create_new_ledger(
);
});
fs::rename(
&ledger_path.join(blockstore_dir),
ledger_path.join(blockstore_dir),
ledger_path.join(format!("{}.failed", blockstore_dir)),
)
.unwrap_or_else(|e| {
@ -4841,11 +4841,11 @@ pub mod tests {
assert_eq!(meta.last_index, Some(num_shreds - 1));
if i != 0 {
assert_eq!(result.len(), 0);
assert!(meta.consumed == 0 && meta.received == num_shreds as u64);
assert!(meta.consumed == 0 && meta.received == num_shreds);
} else {
assert_eq!(meta.parent_slot, Some(0));
assert_eq!(result, entries);
assert!(meta.consumed == num_shreds as u64 && meta.received == num_shreds as u64);
assert!(meta.consumed == num_shreds && meta.received == num_shreds);
}
}
}
@ -4957,7 +4957,7 @@ pub mod tests {
);
for b in shreds.iter_mut() {
b.set_index(index);
b.set_slot(slot as u64);
b.set_slot(slot);
index += 1;
}
blockstore
@ -5421,9 +5421,9 @@ pub mod tests {
// However, if it's a slot we haven't inserted, aka one of the gaps, then one of the
// slots we just inserted will chain to that gap, so next_slots for that orphan slot
// won't be empty, but the parent slot is unknown so should equal std::u64::MAX.
let meta = blockstore.meta(slot as u64).unwrap().unwrap();
let meta = blockstore.meta(slot).unwrap().unwrap();
if slot % 2 == 0 {
assert_eq!(meta.next_slots, vec![slot as u64 + 1]);
assert_eq!(meta.next_slots, vec![slot + 1]);
assert_eq!(meta.parent_slot, None);
} else {
assert!(meta.next_slots.is_empty());
@ -5445,9 +5445,9 @@ pub mod tests {
for slot in 0..num_slots {
// Check that all the slots chain correctly once the missing slots
// have been filled
let meta = blockstore.meta(slot as u64).unwrap().unwrap();
let meta = blockstore.meta(slot).unwrap().unwrap();
if slot != num_slots - 1 {
assert_eq!(meta.next_slots, vec![slot as u64 + 1]);
assert_eq!(meta.next_slots, vec![slot + 1]);
} else {
assert!(meta.next_slots.is_empty());
}
@ -5492,10 +5492,10 @@ pub mod tests {
// Check metadata
for slot in 0..num_slots {
let meta = blockstore.meta(slot as u64).unwrap().unwrap();
let meta = blockstore.meta(slot).unwrap().unwrap();
// The last slot will not chain to any other slots
if slot as u64 != num_slots - 1 {
assert_eq!(meta.next_slots, vec![slot as u64 + 1]);
if slot != num_slots - 1 {
assert_eq!(meta.next_slots, vec![slot + 1]);
} else {
assert!(meta.next_slots.is_empty());
}
@ -5521,13 +5521,13 @@ pub mod tests {
blockstore.insert_shreds(vec![shred], None, false).unwrap();
for slot in 0..num_slots {
let meta = blockstore.meta(slot as u64).unwrap().unwrap();
let meta = blockstore.meta(slot).unwrap().unwrap();
if slot != num_slots - 1 {
assert_eq!(meta.next_slots, vec![slot as u64 + 1]);
assert_eq!(meta.next_slots, vec![slot + 1]);
} else {
assert!(meta.next_slots.is_empty());
}
if slot <= slot_index as u64 + 3 {
if slot <= slot_index + 3 {
assert!(meta.is_connected);
} else {
assert!(!meta.is_connected);

View File

@ -244,62 +244,62 @@ impl BlockstoreRocksDbColumnFamilyMetrics {
// Size related
(
"total_sst_files_size",
self.total_sst_files_size as i64,
self.total_sst_files_size,
i64
),
("size_all_mem_tables", self.size_all_mem_tables as i64, i64),
("size_all_mem_tables", self.size_all_mem_tables, i64),
// Snapshot related
("num_snapshots", self.num_snapshots as i64, i64),
("num_snapshots", self.num_snapshots, i64),
(
"oldest_snapshot_time",
self.oldest_snapshot_time as i64,
self.oldest_snapshot_time,
i64
),
// Write related
(
"actual_delayed_write_rate",
self.actual_delayed_write_rate as i64,
self.actual_delayed_write_rate,
i64
),
("is_write_stopped", self.is_write_stopped as i64, i64),
("is_write_stopped", self.is_write_stopped, i64),
// Memory / block cache related
(
"block_cache_capacity",
self.block_cache_capacity as i64,
self.block_cache_capacity,
i64
),
("block_cache_usage", self.block_cache_usage as i64, i64),
("block_cache_usage", self.block_cache_usage, i64),
(
"block_cache_pinned_usage",
self.block_cache_pinned_usage as i64,
self.block_cache_pinned_usage,
i64
),
(
"estimate_table_readers_mem",
self.estimate_table_readers_mem as i64,
self.estimate_table_readers_mem,
i64
),
// Flush and compaction
(
"mem_table_flush_pending",
self.mem_table_flush_pending as i64,
self.mem_table_flush_pending,
i64
),
("compaction_pending", self.compaction_pending as i64, i64),
("compaction_pending", self.compaction_pending, i64),
(
"num_running_compactions",
self.num_running_compactions as i64,
self.num_running_compactions,
i64
),
("num_running_flushes", self.num_running_flushes as i64, i64),
("num_running_flushes", self.num_running_flushes, i64),
// FIFO Compaction related
(
"estimate_oldest_key_time",
self.estimate_oldest_key_time as i64,
self.estimate_oldest_key_time,
i64
),
// Misc
("background_errors", self.background_errors as i64, i64),
("background_errors", self.background_errors, i64),
);
}
}

View File

@ -333,7 +333,7 @@ mod tests {
}
fn run_thread_race() {
let slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH as u64;
let slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH;
let epoch_schedule = EpochSchedule::custom(slots_per_epoch, slots_per_epoch / 2, true);
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));

View File

@ -418,7 +418,7 @@ mod test {
{
let mut shred = shred.clone();
let index = shred.common_header.index - shred.common_header.fec_set_index - 1;
shred.set_index(index as u32);
shred.set_index(index);
assert_matches!(
shred.sanitize(),
Err(Error::InvalidErasureShardIndex { .. })

View File

@ -247,7 +247,7 @@ fn setup_different_sized_fec_blocks(
assert!(!shred.last_in_slot());
}
}
assert_eq!(data_shreds.len(), num_shreds_per_iter as usize);
assert_eq!(data_shreds.len(), num_shreds_per_iter);
next_shred_index = data_shreds.last().unwrap().index() + 1;
next_code_index = coding_shreds.last().unwrap().index() + 1;
sort_data_coding_into_fec_sets(

View File

@ -227,9 +227,7 @@ pub fn kill_entry_and_spend_and_verify_rest(
}
info!("sleeping for 2 leader fortnights");
sleep(Duration::from_millis(
slot_millis * first_two_epoch_slots as u64,
));
sleep(Duration::from_millis(slot_millis * first_two_epoch_slots));
info!("done sleeping for first 2 warmup epochs");
info!("killing entry point: {}", entry_point_info.id);
entry_point_validator_exit.write().unwrap().exit();

View File

@ -97,8 +97,8 @@ fn test_local_cluster_start_and_exit_with_config() {
node_stakes: vec![DEFAULT_NODE_STAKE; NUM_NODES],
cluster_lamports: DEFAULT_CLUSTER_LAMPORTS,
ticks_per_slot: 8,
slots_per_epoch: MINIMUM_SLOTS_PER_EPOCH as u64,
stakers_slot_offset: MINIMUM_SLOTS_PER_EPOCH as u64,
slots_per_epoch: MINIMUM_SLOTS_PER_EPOCH,
stakers_slot_offset: MINIMUM_SLOTS_PER_EPOCH,
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
@ -282,7 +282,7 @@ fn test_two_unbalanced_stakes() {
let validator_config = ValidatorConfig::default_for_test();
let num_ticks_per_second = 100;
let num_ticks_per_slot = 10;
let num_slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH as u64;
let num_slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH;
let mut cluster = LocalCluster::new(
&mut ClusterConfig {
@ -1277,7 +1277,7 @@ fn test_snapshot_restart_tower() {
full_snapshot_archive_info.hash(),
full_snapshot_archive_info.archive_format(),
);
fs::hard_link(full_snapshot_archive_info.path(), &validator_archive_path).unwrap();
fs::hard_link(full_snapshot_archive_info.path(), validator_archive_path).unwrap();
// Restart validator from snapshot, the validator's tower state in this snapshot
// will contain slots < the root bank of the snapshot. Validator should not panic.
@ -1350,7 +1350,7 @@ fn test_snapshots_blockstore_floor() {
archive_info.hash(),
ArchiveFormat::TarBzip2,
);
fs::hard_link(archive_info.path(), &validator_archive_path).unwrap();
fs::hard_link(archive_info.path(), validator_archive_path).unwrap();
let slot_floor = archive_info.slot();
// Start up a new node from a snapshot
@ -2530,7 +2530,7 @@ fn run_test_load_program_accounts_partition(scan_commitment: CommitmentConfig) {
fn test_votes_land_in_fork_during_long_partition() {
let total_stake = 3 * DEFAULT_NODE_STAKE;
// Make `lighter_stake` insufficient for switching threshold
let lighter_stake = (SWITCH_FORK_THRESHOLD as f64 * total_stake as f64) as u64;
let lighter_stake = (SWITCH_FORK_THRESHOLD * total_stake as f64) as u64;
let heavier_stake = lighter_stake + 1;
let failures_stake = total_stake - lighter_stake - heavier_stake;

View File

@ -604,7 +604,7 @@ fn test_switch_threshold_uses_gossip_votes() {
let total_stake = 100 * DEFAULT_NODE_STAKE;
// Minimum stake needed to generate a switching proof
let minimum_switch_stake = (SWITCH_FORK_THRESHOLD as f64 * total_stake as f64) as u64;
let minimum_switch_stake = (SWITCH_FORK_THRESHOLD * total_stake as f64) as u64;
// Make the heavier stake insufficient for switching so tha the lighter validator
// cannot switch without seeing a vote from the dead/failure_stake validator.

View File

@ -115,7 +115,7 @@ fn test_consistency_halt() {
warn!("adding a validator");
cluster.add_validator(
&validator_snapshot_test_config.validator_config,
validator_stake as u64,
validator_stake,
Arc::new(Keypair::new()),
None,
SocketAddrSpace::Unspecified,

View File

@ -113,7 +113,7 @@ fn process_iftop_logs(matches: &ArgMatches) {
};
let log_path = PathBuf::from(matches.value_of_t_or_exit::<String>("file"));
let mut log = fs::read_to_string(&log_path).expect("Unable to read log file");
let mut log = fs::read_to_string(log_path).expect("Unable to read log file");
log.insert(0, '[');
let terminate_at = log.rfind('}').expect("Didn't find a terminating '}'") + 1;
let _ = log.split_off(terminate_at);
@ -158,7 +158,7 @@ fn analyze_logs(matches: &ArgMatches) {
let logs: Vec<_> = files
.flat_map(|f| {
if let Ok(f) = f {
let log_str = fs::read_to_string(&f.path()).expect("Unable to read log file");
let log_str = fs::read_to_string(f.path()).expect("Unable to read log file");
let log: Vec<LogLine> =
serde_json::from_str(log_str.as_str()).expect("Failed to deserialize log");
log

View File

@ -367,7 +367,7 @@ fn partition_id_to_tos(partition: usize) -> u8 {
fn shape_network(matches: &ArgMatches) {
let config_path = PathBuf::from(matches.value_of_t_or_exit::<String>("file"));
let config = fs::read_to_string(&config_path).expect("Unable to read config file");
let config = fs::read_to_string(config_path).expect("Unable to read config file");
let topology: NetworkTopology =
serde_json::from_str(&config).expect("Failed to parse log as JSON");
let interface: String = matches.value_of_t_or_exit("iface");

View File

@ -104,11 +104,13 @@ pub struct Notifier {
notifiers: Vec<NotificationChannel>,
}
impl Notifier {
pub fn default() -> Self {
impl Default for Notifier {
fn default() -> Self {
Self::new("")
}
}
impl Notifier {
pub fn new(env_prefix: &str) -> Self {
info!("Initializing {}Notifier", env_prefix);
@ -218,7 +220,7 @@ impl Notifier {
let data = json!({ "chat_id": chat_id, "text": msg });
let url = format!("https://api.telegram.org/bot{}/sendMessage", bot_token);
if let Err(err) = self.client.post(&url).json(&data).send() {
if let Err(err) = self.client.post(url).json(&data).send() {
warn!("Failed to send Telegram message: {:?}", err);
}
}
@ -234,7 +236,7 @@ impl Notifier {
account, token, account
);
let params = [("To", to), ("From", from), ("Body", &msg.to_string())];
if let Err(err) = self.client.post(&url).form(&params).send() {
if let Err(err) = self.client.post(url).form(&params).send() {
warn!("Failed to send Twilio message: {:?}", err);
}
}

View File

@ -895,7 +895,7 @@ mod tests {
instructions: vec![],
};
let mut tx = Transaction::new_unsigned(message);
tx.signatures = vec![Signature::default(); actual_num_sigs as usize];
tx.signatures = vec![Signature::default(); actual_num_sigs];
Packet::from_data(None, tx).unwrap()
}

View File

@ -502,7 +502,7 @@ mod tests {
&[
Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]),
ComputeBudgetInstruction::request_heap_frame(MIN_HEAP_FRAME_BYTES as u32),
ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES as u32),
ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES),
],
Err(TransactionError::DuplicateInstruction(2)),
ComputeBudget::default()

View File

@ -235,7 +235,7 @@ fn serialize_parameters_unaligned(
.map_err(|_| InstructionError::InvalidArgument)?;
s.write_all(account.get_owner().as_ref());
s.write::<u8>(account.is_executable() as u8);
s.write::<u64>((account.get_rent_epoch() as u64).to_le());
s.write::<u64>((account.get_rent_epoch()).to_le());
}
};
}
@ -351,7 +351,7 @@ fn serialize_parameters_aligned(
s.write::<u64>((borrowed_account.get_data().len() as u64).to_le());
s.write_account(&borrowed_account)
.map_err(|_| InstructionError::InvalidArgument)?;
s.write::<u64>((borrowed_account.get_rent_epoch() as u64).to_le());
s.write::<u64>((borrowed_account.get_rent_epoch()).to_le());
}
SerializeAccount::Duplicate(position) => {
s.write::<u8>(position as u8);

View File

@ -382,12 +382,12 @@ impl SyscallInvokeSigned for SyscallInvokeSignedC {
let meta_cs = translate_slice::<SolAccountMeta>(
memory_mapping,
ix_c.accounts_addr,
ix_c.accounts_len as u64,
ix_c.accounts_len,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
)?;
let ix_data_len = ix_c.data_len as u64;
let ix_data_len = ix_c.data_len;
if invoke_context
.feature_set
.is_active(&feature_set::loosen_cpi_size_restriction::id())

View File

@ -516,7 +516,7 @@ declare_syscall!(
};
if free_addr == 0 {
match allocator.alloc(layout) {
Ok(addr) => Ok(addr as u64),
Ok(addr) => Ok(addr),
Err(_) => Ok(0),
}
} else {
@ -1532,14 +1532,14 @@ declare_syscall!(
let data = translate_slice_mut::<u8>(
memory_mapping,
data_addr,
result_header.data_len as u64,
result_header.data_len,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
)?;
let accounts = translate_slice_mut::<AccountMeta>(
memory_mapping,
accounts_addr,
result_header.accounts_len as u64,
result_header.accounts_len,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
)?;
@ -3937,7 +3937,7 @@ mod tests {
let data = translate_slice_mut::<u8>(
&memory_mapping,
VM_BASE_ADDRESS.saturating_add(DATA_OFFSET as u64),
processed_sibling_instruction.data_len as u64,
processed_sibling_instruction.data_len,
true,
true,
)
@ -3945,7 +3945,7 @@ mod tests {
let accounts = translate_slice_mut::<AccountMeta>(
&memory_mapping,
VM_BASE_ADDRESS.saturating_add(ACCOUNTS_OFFSET as u64),
processed_sibling_instruction.accounts_len as u64,
processed_sibling_instruction.accounts_len,
true,
true,
)
@ -4096,7 +4096,7 @@ mod tests {
AccountPropertyUpdate {
instruction_account_index: 1,
attribute: TransactionContextAttribute::TransactionAccountOwner as u16,
value: VM_ADDRESS_KEYS as u64,
value: VM_ADDRESS_KEYS,
_marker: std::marker::PhantomData::default(),
},
];

View File

@ -576,7 +576,7 @@ fn process_instruction(
TEST_MAX_ACCOUNT_INFOS_EXCEEDED => {
msg!("Test max account infos exceeded");
let instruction = create_instruction(*accounts[INVOKED_PROGRAM_INDEX].key, &[], vec![]);
let account_infos_len = (MAX_CPI_ACCOUNT_INFOS as usize).saturating_add(1);
let account_infos_len = MAX_CPI_ACCOUNT_INFOS.saturating_add(1);
let account_infos = vec![accounts[0].clone(); account_infos_len];
invoke_signed(&instruction, &account_infos, &[])?;
}

View File

@ -1063,7 +1063,7 @@ mod tests {
.convert_to_current();
for i in 0..(MAX_LOCKOUT_HISTORY + 1) {
process_slot_vote_unchecked(&mut vote_state, (INITIAL_LOCKOUT as usize * i) as u64);
process_slot_vote_unchecked(&mut vote_state, (INITIAL_LOCKOUT * i) as u64);
}
// The last vote should have been popped b/c it reached a depth of MAX_LOCKOUT_HISTORY

View File

@ -10,7 +10,7 @@ lazy_static! {
static ref MAX_RAYON_THREADS: usize =
env::var("SOLANA_RAYON_THREADS").ok()
.and_then(|num_threads| num_threads.parse().ok())
.unwrap_or_else(|| num_cpus::get() as usize / 2)
.unwrap_or_else(|| num_cpus::get() / 2)
.max(1);
}

View File

@ -805,7 +805,7 @@ impl JsonRpcRequestProcessor {
fn get_transaction_count(&self, config: RpcContextConfig) -> Result<u64> {
let bank = self.get_bank_with_config(config)?;
Ok(bank.transaction_count() as u64)
Ok(bank.transaction_count())
}
fn get_total_supply(&self, commitment: Option<CommitmentConfig>) -> Result<u64> {
@ -975,9 +975,8 @@ impl JsonRpcRequestProcessor {
})
})
.partition(|vote_account_info| {
if bank.slot() >= delinquent_validator_slot_distance as u64 {
vote_account_info.last_vote
> bank.slot() - delinquent_validator_slot_distance as u64
if bank.slot() >= delinquent_validator_slot_distance {
vote_account_info.last_vote > bank.slot() - delinquent_validator_slot_distance
} else {
vote_account_info.last_vote > 0
}
@ -2651,11 +2650,11 @@ pub mod rpc_minimal {
.unwrap();
let full_snapshot_slot =
snapshot_utils::get_highest_full_snapshot_archive_slot(&full_snapshot_archives_dir)
snapshot_utils::get_highest_full_snapshot_archive_slot(full_snapshot_archives_dir)
.ok_or(RpcCustomError::NoSnapshot)?;
let incremental_snapshot_slot =
snapshot_utils::get_highest_incremental_snapshot_archive_slot(
&incremental_snapshot_archives_dir,
incremental_snapshot_archives_dir,
full_snapshot_slot,
);
@ -4105,7 +4104,7 @@ pub mod rpc_deprecated_v1_9 {
meta.snapshot_config
.and_then(|snapshot_config| {
snapshot_utils::get_highest_full_snapshot_archive_slot(
&snapshot_config.full_snapshot_archives_dir,
snapshot_config.full_snapshot_archives_dir,
)
})
.ok_or_else(|| RpcCustomError::NoSnapshot.into())
@ -8561,7 +8560,7 @@ pub mod tests {
let request = create_test_request(
"getFeeForMessage",
Some(json!([base64::encode(&serialize(&legacy_msg).unwrap())])),
Some(json!([base64::encode(serialize(&legacy_msg).unwrap())])),
);
let response: RpcResponse<u64> = parse_success_result(rpc.handle_request_sync(request));
assert_eq!(response.value, TEST_SIGNATURE_FEE);
@ -8580,7 +8579,7 @@ pub mod tests {
let request = create_test_request(
"getFeeForMessage",
Some(json!([base64::encode(&serialize(&v0_msg).unwrap())])),
Some(json!([base64::encode(serialize(&v0_msg).unwrap())])),
);
let response: RpcResponse<u64> = parse_success_result(rpc.handle_request_sync(request));
assert_eq!(response.value, TEST_SIGNATURE_FEE);

View File

@ -827,7 +827,7 @@ impl RpcSubscriptions {
.get(&SubscriptionParams::SlotsUpdates)
{
inc_new_counter_info!("rpc-subscription-notify-slots-updates", 1);
notifier.notify(&slot_update, sub, false);
notifier.notify(slot_update, sub, false);
}
}
// These notifications are only triggered by votes observed on gossip,

View File

@ -164,8 +164,7 @@ fn bench_delete_dependencies(bencher: &mut Bencher) {
let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
for i in 0..1000 {
let pubkey = solana_sdk::pubkey::new_rand();
let account =
AccountSharedData::new((i + 1) as u64, 0, AccountSharedData::default().owner());
let account = AccountSharedData::new(i + 1, 0, AccountSharedData::default().owner());
accounts.store_slow_uncached(i, &pubkey, &account);
accounts.store_slow_uncached(i, &old_pubkey, &zero_account);
old_pubkey = pubkey;

View File

@ -3073,8 +3073,7 @@ mod tests {
info!("storing..");
for i in 0..2_000 {
let pubkey = solana_sdk::pubkey::new_rand();
let account =
AccountSharedData::new((i + 1) as u64, 0, AccountSharedData::default().owner());
let account = AccountSharedData::new(i + 1, 0, AccountSharedData::default().owner());
accounts.store_slow_uncached(i, &pubkey, &account);
accounts.store_slow_uncached(i, &old_pubkey, &zero_account);
old_pubkey = pubkey;

View File

@ -6455,7 +6455,7 @@ impl AccountsDb {
filler_accounts = filler_account_slots * filler_accounts_per_slot;
// keep space for filler accounts
let addl_size = (filler_accounts as u64)
let addl_size = filler_accounts
* ((self.filler_accounts_config.size + STORE_META_OVERHEAD) as u64);
total_size += addl_size;
}
@ -9179,7 +9179,7 @@ impl AccountsDb {
let len = map_bin.len_for_stats();
min_bin_size = std::cmp::min(min_bin_size, len);
max_bin_size = std::cmp::max(max_bin_size, len);
len as usize
len
})
.sum();

View File

@ -1647,7 +1647,7 @@ pub mod tests {
let packaged_result: ExpectedType = (
human_readable,
is_last_slice,
lamports2 as u64,
lamports2,
hash_result_as_string,
);
assert_eq!(expected[expected_index], packaged_result);
@ -1719,7 +1719,7 @@ pub mod tests {
let vecs = vec![vec![account_maps.to_vec()]];
let slice = convert_to_slice2(&vecs);
let result = test_de_dup_accounts_in_parallel(&slice);
assert_eq!(result, (vec![&val.hash], val.lamports as u64, 1));
assert_eq!(result, (vec![&val.hash], val.lamports, 1));
// zero original lamports, higher version
let val = CalculateHashIntermediate::new(hash, 0, key);

View File

@ -3002,27 +3002,27 @@ pub mod tests {
run_test_range_indexes(&index, &pubkeys, Some(ITER_BATCH_SIZE), None);
run_test_range_indexes(&index, &pubkeys, None, Some(2 * ITER_BATCH_SIZE as usize));
run_test_range_indexes(&index, &pubkeys, None, Some(2 * ITER_BATCH_SIZE));
run_test_range_indexes(
&index,
&pubkeys,
Some(ITER_BATCH_SIZE as usize),
Some(2 * ITER_BATCH_SIZE as usize),
Some(ITER_BATCH_SIZE),
Some(2 * ITER_BATCH_SIZE),
);
run_test_range_indexes(
&index,
&pubkeys,
Some(ITER_BATCH_SIZE as usize),
Some(2 * ITER_BATCH_SIZE as usize - 1),
Some(ITER_BATCH_SIZE),
Some(2 * ITER_BATCH_SIZE - 1),
);
run_test_range_indexes(
&index,
&pubkeys,
Some(ITER_BATCH_SIZE - 1_usize),
Some(2 * ITER_BATCH_SIZE as usize + 1),
Some(2 * ITER_BATCH_SIZE + 1),
);
}

View File

@ -3119,7 +3119,7 @@ impl Bank {
// still being stake-weighted.
// Ref: distribute_rent_to_validators
fn collect_fees(&self) {
let collector_fees = self.collector_fees.load(Relaxed) as u64;
let collector_fees = self.collector_fees.load(Relaxed);
if collector_fees != 0 {
let (deposit, mut burn) = self.fee_rate_governor.burn(collector_fees);
@ -6665,7 +6665,7 @@ impl Bank {
.accounts
.bank_hash_info_at(self.slot(), &self.rewrites_skipped_this_slot);
let mut signature_count_buf = [0u8; 8];
LittleEndian::write_u64(&mut signature_count_buf[..], self.signature_count() as u64);
LittleEndian::write_u64(&mut signature_count_buf[..], self.signature_count());
let mut hash = hashv(&[
self.parent_hash.as_ref(),
@ -9297,7 +9297,7 @@ pub(crate) mod tests {
create_genesis_config_with_leader(5, &leader_pubkey, leader_lamports).genesis_config;
genesis_config.cluster_type = ClusterType::MainnetBeta;
const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOTS_PER_EPOCH as u64;
const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOTS_PER_EPOCH;
const LEADER_SCHEDULE_SLOT_OFFSET: u64 = SLOTS_PER_EPOCH * 3 - 3;
genesis_config.epoch_schedule =
EpochSchedule::custom(SLOTS_PER_EPOCH, LEADER_SCHEDULE_SLOT_OFFSET, false);
@ -9367,7 +9367,7 @@ pub(crate) mod tests {
create_genesis_config_with_leader(5, &leader_pubkey, leader_lamports).genesis_config;
genesis_config.cluster_type = ClusterType::MainnetBeta;
const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOTS_PER_EPOCH as u64;
const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOTS_PER_EPOCH;
const LEADER_SCHEDULE_SLOT_OFFSET: u64 = SLOTS_PER_EPOCH * 3 - 3;
genesis_config.epoch_schedule =
EpochSchedule::custom(SLOTS_PER_EPOCH, LEADER_SCHEDULE_SLOT_OFFSET, false);
@ -9425,7 +9425,7 @@ pub(crate) mod tests {
create_genesis_config_with_leader(5, &leader_pubkey, leader_lamports).genesis_config;
genesis_config.cluster_type = ClusterType::MainnetBeta;
const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOTS_PER_EPOCH as u64 * 8;
const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOTS_PER_EPOCH * 8;
const LEADER_SCHEDULE_SLOT_OFFSET: u64 = SLOTS_PER_EPOCH * 3 - 3;
genesis_config.epoch_schedule =
EpochSchedule::custom(SLOTS_PER_EPOCH, LEADER_SCHEDULE_SLOT_OFFSET, true);
@ -9481,7 +9481,7 @@ pub(crate) mod tests {
let mut genesis_config =
create_genesis_config_with_leader(5, &leader_pubkey, leader_lamports).genesis_config;
const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOTS_PER_EPOCH as u64 * 8;
const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOTS_PER_EPOCH * 8;
const LEADER_SCHEDULE_SLOT_OFFSET: u64 = SLOTS_PER_EPOCH * 3 - 3;
genesis_config.epoch_schedule =
EpochSchedule::custom(SLOTS_PER_EPOCH, LEADER_SCHEDULE_SLOT_OFFSET, true);
@ -10089,9 +10089,7 @@ pub(crate) mod tests {
// set it up so the first epoch is a full year long
poh_config: PohConfig {
target_tick_duration: Duration::from_secs(
SECONDS_PER_YEAR as u64
/ MINIMUM_SLOTS_PER_EPOCH as u64
/ DEFAULT_TICKS_PER_SLOT,
SECONDS_PER_YEAR as u64 / MINIMUM_SLOTS_PER_EPOCH / DEFAULT_TICKS_PER_SLOT,
),
hashes_per_tick: None,
target_tick_count: None,
@ -10218,9 +10216,7 @@ pub(crate) mod tests {
// set it up so the first epoch is a full year long
poh_config: PohConfig {
target_tick_duration: Duration::from_secs(
SECONDS_PER_YEAR as u64
/ MINIMUM_SLOTS_PER_EPOCH as u64
/ DEFAULT_TICKS_PER_SLOT,
SECONDS_PER_YEAR as u64 / MINIMUM_SLOTS_PER_EPOCH / DEFAULT_TICKS_PER_SLOT,
),
hashes_per_tick: None,
target_tick_count: None,
@ -11985,7 +11981,7 @@ pub(crate) mod tests {
// set this up weird, forces future generation, odd mod(), etc.
// this says: "vote_accounts for epoch X should be generated at slot index 3 in epoch X-2...
const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOTS_PER_EPOCH as u64;
const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOTS_PER_EPOCH;
const LEADER_SCHEDULE_SLOT_OFFSET: u64 = SLOTS_PER_EPOCH * 3 - 3;
// no warmup allows me to do the normal division stuff below
genesis_config.epoch_schedule =
@ -12106,11 +12102,8 @@ pub(crate) mod tests {
let bank = Bank::new_for_tests(&genesis_config);
assert_eq!(bank.get_slots_in_epoch(0), MINIMUM_SLOTS_PER_EPOCH as u64);
assert_eq!(
bank.get_slots_in_epoch(2),
(MINIMUM_SLOTS_PER_EPOCH * 4) as u64
);
assert_eq!(bank.get_slots_in_epoch(0), MINIMUM_SLOTS_PER_EPOCH);
assert_eq!(bank.get_slots_in_epoch(2), (MINIMUM_SLOTS_PER_EPOCH * 4));
assert_eq!(
bank.get_slots_in_epoch(5000),
genesis_config.epoch_schedule.slots_per_epoch
@ -14383,7 +14376,7 @@ pub(crate) mod tests {
let account_indexes = (0..num_accounts_to_pass)
.map(|_| thread_rng().gen_range(0, num_keys))
.collect();
let program_index: u8 = thread_rng().gen_range(0, num_keys) as u8;
let program_index: u8 = thread_rng().gen_range(0, num_keys);
if thread_rng().gen_ratio(4, 5) {
let programs_index = thread_rng().gen_range(0, program_keys.len());
account_keys[program_index as usize] = program_keys[programs_index].0;
@ -14415,7 +14408,7 @@ pub(crate) mod tests {
} else {
1
};
thread_rng().gen_range(0, max) as u8
thread_rng().gen_range(0, max)
};
let num_readonly_unsigned_accounts = if thread_rng().gen_ratio(1, 5)
@ -14675,7 +14668,7 @@ pub(crate) mod tests {
let pubkey0_size = get_shrink_account_size();
let account0 = AccountSharedData::new(1000, pubkey0_size as usize, &Pubkey::new_unique());
let account0 = AccountSharedData::new(1000, pubkey0_size, &Pubkey::new_unique());
bank0.store_account(&pubkey0, &account0);
goto_end_of_slot(Arc::<Bank>::get_mut(&mut bank0).unwrap());
@ -19858,8 +19851,8 @@ pub(crate) mod tests {
bank.collect_rent_eagerly(false);
let accounts_data_size_delta_after_collecting_rent = bank.load_accounts_data_size_delta();
let accounts_data_size_delta_delta = accounts_data_size_delta_after_collecting_rent as i64
- accounts_data_size_delta_before_collecting_rent as i64;
let accounts_data_size_delta_delta = accounts_data_size_delta_after_collecting_rent
- accounts_data_size_delta_before_collecting_rent;
assert!(accounts_data_size_delta_delta < 0);
let reclaimed_data_size = accounts_data_size_delta_delta.saturating_neg() as usize;

View File

@ -105,13 +105,13 @@ impl BucketMapHolderStats {
let age_now = storage.current_age();
let ages_flushed = storage.count_buckets_flushed() as u64;
let last_age = self.last_age.swap(age_now, Ordering::Relaxed) as u64;
let last_ages_flushed = self.last_ages_flushed.swap(ages_flushed, Ordering::Relaxed) as u64;
let last_ages_flushed = self.last_ages_flushed.swap(ages_flushed, Ordering::Relaxed);
let mut age_now = age_now as u64;
if last_age > age_now {
// age wrapped
age_now += u8::MAX as u64 + 1;
}
let age_delta = age_now.saturating_sub(last_age) as u64;
let age_delta = age_now.saturating_sub(last_age);
if age_delta > 0 {
return elapsed_ms / age_delta;
} else {

View File

@ -30,7 +30,7 @@ impl PubkeyBinCalculator24 {
pub fn bin_from_pubkey(&self, pubkey: &Pubkey) -> usize {
let as_ref = pubkey.as_ref();
(((as_ref[0] as usize * 256 + as_ref[1] as usize) * 256 + as_ref[2] as usize) as usize)
((as_ref[0] as usize * 256 + as_ref[1] as usize) * 256 + as_ref[2] as usize)
>> self.shift_bits
}

View File

@ -786,7 +786,7 @@ pub mod tests {
// bitfield sizes are powers of 2, cycle through values of 1, 2, 4, .. 2^9
for power in 0..10 {
let max_bitfield_width = 2u64.pow(power) as u64;
let max_bitfield_width = 2u64.pow(power);
let width_iteration_max = if max_bitfield_width > 1 {
// add up to 2 items so we can test out multiple items
3

View File

@ -597,7 +597,7 @@ fn remap_append_vec_file(
let (remapped_append_vec_id, remapped_append_vec_path) = loop {
let remapped_append_vec_id = next_append_vec_id.fetch_add(1, Ordering::AcqRel);
let remapped_file_name = AppendVec::file_name(slot, remapped_append_vec_id);
let remapped_append_vec_path = append_vec_path.parent().unwrap().join(&remapped_file_name);
let remapped_append_vec_path = append_vec_path.parent().unwrap().join(remapped_file_name);
// Break out of the loop in the following situations:
// 1. The new ID is the same as the original ID. This means we do not need to

View File

@ -43,8 +43,8 @@ fn copy_append_vecs<P: AsRef<Path>>(
// Copy file to new directory
let storage_path = storage_entry.get_path();
let file_name = AppendVec::file_name(storage_entry.slot(), storage_entry.append_vec_id());
let output_path = output_dir.as_ref().join(&file_name);
std::fs::copy(&storage_path, &output_path)?;
let output_path = output_dir.as_ref().join(file_name);
std::fs::copy(storage_path, &output_path)?;
// Read new file into append-vec and build new entry
let (append_vec, num_accounts) =

View File

@ -94,7 +94,7 @@ impl AccountsPackage {
snapshot_utils::path_to_file_name_str(&bank_snapshot_info.snapshot_path)?;
fs::hard_link(
&bank_snapshot_info.snapshot_path,
&snapshot_hardlink_dir.join(file_name),
snapshot_hardlink_dir.join(file_name),
)?;
}

View File

@ -350,7 +350,7 @@ pub fn archive_snapshot_package(
// Add the snapshots to the staging directory
symlink::symlink_dir(
snapshot_package.snapshot_links.path(),
&staging_snapshots_dir,
staging_snapshots_dir,
)
.map_err(|e| SnapshotError::IoWithSource(e, "create staging symlinks"))?;
@ -869,10 +869,7 @@ fn verify_and_unarchive_snapshots(
incremental_snapshot_archive_info,
)?;
let parallel_divisions = std::cmp::min(
PARALLEL_UNTAR_READERS_DEFAULT,
std::cmp::max(1, num_cpus::get() / 4),
);
let parallel_divisions = (num_cpus::get() / 4).clamp(1, PARALLEL_UNTAR_READERS_DEFAULT);
let next_append_vec_id = Arc::new(AtomicU32::new(0));
let unarchived_full_snapshot = unarchive_snapshot(
@ -3206,7 +3203,7 @@ mod tests {
] {
let snapshot_path = incremental_snapshot_archives_dir
.path()
.join(&snapshot_filenames);
.join(snapshot_filenames);
File::create(snapshot_path).unwrap();
}

View File

@ -68,7 +68,7 @@ impl SnapshotStorageRebuilder {
) -> Result<RebuiltSnapshotStorage, SnapshotError> {
let (snapshot_version_path, snapshot_file_path, append_vec_files) =
Self::get_version_and_snapshot_files(&file_receiver);
let snapshot_version_str = snapshot_version_from_file(&snapshot_version_path)?;
let snapshot_version_str = snapshot_version_from_file(snapshot_version_path)?;
let snapshot_version = snapshot_version_str.parse().map_err(|_| {
get_io_error(&format!(
"unsupported snapshot version: {}",
@ -283,7 +283,7 @@ impl SnapshotStorageRebuilder {
.snapshot_storage_lengths
.get(&slot)
.unwrap()
.get(&(old_append_vec_id as usize))
.get(&old_append_vec_id)
.unwrap();
let storage_entry = remap_and_reconstruct_single_storage(

View File

@ -256,7 +256,7 @@ pub mod tests {
for entry in 0..entries {
let pk = Pubkey::new(&[entry; 32]);
let account = AccountSharedData::create(
((entry as u64) * starting_slot) as u64,
(entry as u64) * starting_slot,
Vec::default(),
Pubkey::default(),
false,

View File

@ -61,7 +61,7 @@ fn fill_epoch_with_votes(
&[vote_instruction::vote(
&vote_pubkey,
&vote_pubkey,
Vote::new(vec![parent.slot() as u64], parent.hash()),
Vote::new(vec![parent.slot()], parent.hash()),
)],
Some(&mint_pubkey),
);

View File

@ -370,7 +370,7 @@ fn link_sbf_toolchain(config: &Config) {
let rustup_args = vec!["toolchain", "list", "-v"];
let rustup_output = spawn(
&rustup,
&rustup_args,
rustup_args,
config.generate_child_script_on_failure,
);
if config.verbose {
@ -390,7 +390,7 @@ fn link_sbf_toolchain(config: &Config) {
];
let output = spawn(
&rustup,
&rustup_args,
rustup_args,
config.generate_child_script_on_failure,
);
if config.verbose {
@ -411,7 +411,7 @@ fn link_sbf_toolchain(config: &Config) {
];
let output = spawn(
&rustup,
&rustup_args,
rustup_args,
config.generate_child_script_on_failure,
);
if config.verbose {
@ -599,7 +599,7 @@ fn build_sbf_package(config: &Config, target_directory: &Path, package: &cargo_m
target_rustflags = Cow::Owned(format!("{} -C target_cpu=sbfv2", &target_rustflags));
}
if let Cow::Owned(flags) = target_rustflags {
env::set_var(cargo_target, &flags);
env::set_var(cargo_target, flags);
}
if config.verbose {
debug!(
@ -659,11 +659,11 @@ fn build_sbf_package(config: &Config, target_directory: &Path, package: &cargo_m
}
if let Some(program_name) = program_name {
let program_unstripped_so = target_build_directory.join(&format!("{}.so", program_name));
let program_dump = sbf_out_dir.join(&format!("{}-dump.txt", program_name));
let program_so = sbf_out_dir.join(&format!("{}.so", program_name));
let program_debug = sbf_out_dir.join(&format!("{}.debug", program_name));
let program_keypair = sbf_out_dir.join(&format!("{}-keypair.json", program_name));
let program_unstripped_so = target_build_directory.join(format!("{}.so", program_name));
let program_dump = sbf_out_dir.join(format!("{}-dump.txt", program_name));
let program_so = sbf_out_dir.join(format!("{}.so", program_name));
let program_debug = sbf_out_dir.join(format!("{}.debug", program_name));
let program_keypair = sbf_out_dir.join(format!("{}-keypair.json", program_name));
fn file_older_or_missing(prerequisite_file: &Path, target_file: &Path) -> bool {
let prerequisite_metadata = fs::metadata(prerequisite_file).unwrap_or_else(|err| {

View File

@ -75,7 +75,7 @@ impl EpochSchedule {
)
}
pub fn custom(slots_per_epoch: u64, leader_schedule_slot_offset: u64, warmup: bool) -> Self {
assert!(slots_per_epoch >= MINIMUM_SLOTS_PER_EPOCH as u64);
assert!(slots_per_epoch >= MINIMUM_SLOTS_PER_EPOCH);
let (first_normal_epoch, first_normal_slot) = if warmup {
let next_power_of_two = slots_per_epoch.next_power_of_two();
let log2_slots_per_epoch = next_power_of_two
@ -102,7 +102,7 @@ impl EpochSchedule {
pub fn get_slots_in_epoch(&self, epoch: Epoch) -> u64 {
if epoch < self.first_normal_epoch {
2u64.saturating_pow(
(epoch as u32).saturating_add(MINIMUM_SLOTS_PER_EPOCH.trailing_zeros() as u32),
(epoch as u32).saturating_add(MINIMUM_SLOTS_PER_EPOCH.trailing_zeros()),
)
} else {
self.slots_per_epoch

View File

@ -120,8 +120,7 @@ impl FeeRateGovernor {
.min(me.min_lamports_per_signature.max(
me.target_lamports_per_signature
* std::cmp::min(latest_signatures_per_slot, std::u32::MAX as u64)
as u64
/ me.target_signatures_per_slot as u64,
/ me.target_signatures_per_slot,
));
trace!(

View File

@ -60,11 +60,11 @@ pub trait SyscallStubs: Sync + Send {
is_nonoverlapping(src as usize, n, dst as usize, n),
"memcpy does not support overlapping regions"
);
std::ptr::copy_nonoverlapping(src, dst, n as usize);
std::ptr::copy_nonoverlapping(src, dst, n);
}
/// # Safety
unsafe fn sol_memmove(&self, dst: *mut u8, src: *const u8, n: usize) {
std::ptr::copy(src, dst, n as usize);
std::ptr::copy(src, dst, n);
}
/// # Safety
unsafe fn sol_memcmp(&self, s1: *const u8, s2: *const u8, n: usize, result: *mut i32) {

View File

@ -117,7 +117,7 @@ impl Rent {
/// This is used for testing.
pub fn with_slots_per_epoch(slots_per_epoch: u64) -> Self {
let ratio = slots_per_epoch as f64 / DEFAULT_SLOTS_PER_EPOCH as f64;
let exemption_threshold = DEFAULT_EXEMPTION_THRESHOLD as f64 * ratio;
let exemption_threshold = DEFAULT_EXEMPTION_THRESHOLD * ratio;
let lamports_per_byte_year = (DEFAULT_LAMPORTS_PER_BYTE_YEAR as f64 / ratio) as u64;
Self {
lamports_per_byte_year,

View File

@ -63,9 +63,7 @@ impl VisitError {
A: SeqAccess<'de>,
{
match self {
VisitError::TooLong(len) => {
de::Error::invalid_length(len as usize, &"three or fewer bytes")
}
VisitError::TooLong(len) => de::Error::invalid_length(len, &"three or fewer bytes"),
VisitError::TooShort(len) => de::Error::invalid_length(len, &"more bytes"),
VisitError::Overflow(val) => de::Error::invalid_value(
de::Unexpected::Unsigned(val as u64),

View File

@ -764,7 +764,7 @@ mod tests {
let credits = (MAX_EPOCH_CREDITS_HISTORY + 2) as u64;
for i in 0..credits {
vote_state.increment_credits(i as u64, 1);
vote_state.increment_credits(i, 1);
}
assert_eq!(vote_state.credits(), credits);
assert!(vote_state.epoch_credits().len() <= MAX_EPOCH_CREDITS_HISTORY);

View File

@ -486,7 +486,7 @@ fn shared_serialize_data<T: serde::Serialize, U: WritableAccount>(
if bincode::serialized_size(state)? > account.data().len() as u64 {
return Err(Box::new(bincode::ErrorKind::SizeLimit));
}
bincode::serialize_into(&mut account.data_as_mut_slice(), state)
bincode::serialize_into(account.data_as_mut_slice(), state)
}
impl Account {

View File

@ -18,5 +18,5 @@ pub fn load() -> Result<Certificate, String> {
include_bytes!("pki-goog-roots.pem").to_vec()
}
};
Ok(Certificate::from_pem(&pem))
Ok(Certificate::from_pem(pem))
}

View File

@ -397,12 +397,12 @@ fn compute_recieve_window(
) -> Result<VarInt, VarIntBoundsExceeded> {
match peer_type {
ConnectionPeerType::Unstaked => {
VarInt::from_u64((PACKET_DATA_SIZE as u64 * QUIC_UNSTAKED_RECEIVE_WINDOW_RATIO) as u64)
VarInt::from_u64(PACKET_DATA_SIZE as u64 * QUIC_UNSTAKED_RECEIVE_WINDOW_RATIO)
}
ConnectionPeerType::Staked => {
let ratio =
compute_receive_window_ratio_for_staked_node(max_stake, min_stake, peer_stake);
VarInt::from_u64((PACKET_DATA_SIZE as u64 * ratio) as u64)
VarInt::from_u64(PACKET_DATA_SIZE as u64 * ratio)
}
}
}
@ -1506,11 +1506,11 @@ pub mod test {
)
.is_some());
assert_eq!(table.total_size, num_entries as usize);
assert_eq!(table.total_size, num_entries);
let new_max_size = 3;
let pruned = table.prune_oldest(new_max_size);
assert!(pruned >= num_entries as usize - new_max_size);
assert!(pruned >= num_entries - new_max_size);
assert!(table.table.len() <= new_max_size);
assert!(table.total_size <= new_max_size);

View File

@ -224,6 +224,7 @@ impl ThinClient {
if num_confirmed == 0 {
let conn = self.connection_cache.get_connection(self.tpu_addr());
// Send the transaction if there has been no confirmation (e.g. the first time)
#[allow(clippy::needless_borrow)]
conn.send_wire_transaction(&wire_transaction)?;
}

View File

@ -372,7 +372,7 @@ impl TpuClient {
LeaderTpuService::new(rpc_client.clone(), websocket_url, exit.clone()).await?;
Ok(Self {
fanout_slots: config.fanout_slots.min(MAX_FANOUT_SLOTS).max(1),
fanout_slots: config.fanout_slots.clamp(1, MAX_FANOUT_SLOTS),
leader_tpu_service,
exit,
rpc_client,

View File

@ -40,7 +40,7 @@ pub trait TpuConnection {
) -> TransportResult<()> {
let wire_transaction =
bincode::serialize(transaction).expect("serialize Transaction in send_batch");
self.send_wire_transaction(&wire_transaction)
self.send_wire_transaction(wire_transaction)
}
fn send_wire_transaction<T>(&self, wire_transaction: T) -> TransportResult<()>

View File

@ -1476,7 +1476,7 @@ mod test {
.unwrap();
let ui_meta_from: UiTransactionStatusMeta = meta.clone().into();
assert_eq!(
serde_json::to_value(&ui_meta_from).unwrap(),
serde_json::to_value(ui_meta_from).unwrap(),
expected_json_output_value
);
@ -1497,13 +1497,13 @@ mod test {
.unwrap();
let ui_meta_parse_with_rewards = UiTransactionStatusMeta::parse(meta.clone(), &[], true);
assert_eq!(
serde_json::to_value(&ui_meta_parse_with_rewards).unwrap(),
serde_json::to_value(ui_meta_parse_with_rewards).unwrap(),
expected_json_output_value
);
let ui_meta_parse_no_rewards = UiTransactionStatusMeta::parse(meta, &[], false);
assert_eq!(
serde_json::to_value(&ui_meta_parse_no_rewards).unwrap(),
serde_json::to_value(ui_meta_parse_no_rewards).unwrap(),
expected_json_output_value
);
}

View File

@ -876,9 +876,9 @@ fn remove_directory_contents(ledger_path: &Path) -> Result<(), io::Error> {
for entry in fs::read_dir(ledger_path)? {
let entry = entry?;
if entry.metadata()?.is_dir() {
fs::remove_dir_all(&entry.path())?
fs::remove_dir_all(entry.path())?
} else {
fs::remove_file(&entry.path())?
fs::remove_file(entry.path())?
}
}
Ok(())

View File

@ -127,7 +127,7 @@ impl Dashboard {
let new_identity = rpc_client.get_identity().unwrap_or(identity);
if identity != new_identity {
identity = new_identity;
progress_bar.println(&format_name_value("Identity:", &identity.to_string()));
progress_bar.println(format_name_value("Identity:", &identity.to_string()));
}
match get_validator_stats(&rpc_client, &identity) {

View File

@ -173,7 +173,7 @@ pub fn ledger_lockfile(ledger_path: &Path) -> RwLock<File> {
OpenOptions::new()
.write(true)
.create(true)
.open(&lockfile)
.open(lockfile)
.unwrap(),
)
}

View File

@ -259,7 +259,7 @@ fn wait_for_restart_window(
style("Node is unhealthy").red().to_string()
} else {
// Wait until a hole in the leader schedule before restarting the node
let in_leader_schedule_hole = if epoch_info.slot_index + min_idle_slots as u64
let in_leader_schedule_hole = if epoch_info.slot_index + min_idle_slots
> epoch_info.slots_in_epoch
{
Err("Current epoch is almost complete".to_string())

View File

@ -319,7 +319,7 @@ mod tests {
assert_eq!(amount, decoded.unwrap());
// max amount
let amount: u64 = ((1_u64 << 32) - 1) as u64;
let amount: u64 = (1_u64 << 32) - 1;
let instance = DiscreteLog::new(G, Scalar::from(amount) * G);

View File

@ -303,11 +303,7 @@ impl TransferProof {
// generate the range proof
let range_proof = if TRANSFER_AMOUNT_LO_BITS == 32 {
RangeProof::new(
vec![
source_new_balance,
transfer_amount_lo as u64,
transfer_amount_hi as u64,
],
vec![source_new_balance, transfer_amount_lo, transfer_amount_hi],
vec![
TRANSFER_SOURCE_AMOUNT_BITS,
TRANSFER_AMOUNT_LO_BITS,
@ -318,15 +314,15 @@ impl TransferProof {
)
} else {
let transfer_amount_lo_negated =
(1 << TRANSFER_AMOUNT_LO_NEGATED_BITS) - 1 - transfer_amount_lo as u64;
(1 << TRANSFER_AMOUNT_LO_NEGATED_BITS) - 1 - transfer_amount_lo;
let opening_lo_negated = &PedersenOpening::default() - opening_lo;
RangeProof::new(
vec![
source_new_balance,
transfer_amount_lo as u64,
transfer_amount_lo,
transfer_amount_lo_negated,
transfer_amount_hi as u64,
transfer_amount_hi,
],
vec![
TRANSFER_SOURCE_AMOUNT_BITS,

View File

@ -402,8 +402,8 @@ impl TransferWithFeeProof {
RangeProof::new(
vec![
source_new_balance,
transfer_amount_lo as u64,
transfer_amount_hi as u64,
transfer_amount_lo,
transfer_amount_hi,
delta_fee,
MAX_FEE_BASIS_POINTS - delta_fee,
],
@ -425,15 +425,15 @@ impl TransferWithFeeProof {
)
} else {
let transfer_amount_lo_negated =
((1 << TRANSFER_AMOUNT_LO_NEGATED_BITS) - 1) - transfer_amount_lo as u64;
((1 << TRANSFER_AMOUNT_LO_NEGATED_BITS) - 1) - transfer_amount_lo;
let opening_lo_negated = &PedersenOpening::default() - opening_lo;
RangeProof::new(
vec![
source_new_balance,
transfer_amount_lo as u64,
transfer_amount_lo,
transfer_amount_lo_negated,
transfer_amount_hi as u64,
transfer_amount_hi,
delta_fee,
MAX_FEE_BASIS_POINTS - delta_fee,
],