Fixing all the sub overflows by using saturating sub (#309)

This commit is contained in:
galactus 2024-01-30 15:24:35 +01:00 committed by GitHub
parent 8334b2b48c
commit 118f5435e3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 23 additions and 17 deletions

View File

@ -26,7 +26,7 @@ pub fn calculate_supp_percentiles(
.map(|p| {
let prio_fee = {
let index = prio_fees_in_block.len() * p / 100;
let cap_index = index.min(prio_fees_in_block.len() - 1);
let cap_index = index.min(prio_fees_in_block.len().saturating_sub(1));
prio_fees_in_block[cap_index].0
};
FeePoint {

View File

@ -66,7 +66,7 @@ impl LeaderFetcherInterface for GrpcLeaderGetter {
);
}
let limit = to - from;
let limit = to.saturating_sub(from);
let schedule = leader_schedule_data
.get_slot_leaders(from, limit, self.epoch_data.get_epoch_schedule())

View File

@ -229,9 +229,9 @@ pub fn create_grpc_multiplex_blocks_subscription(
cleanup_without_confirmed_recv_blocks_meta += 1;
let size_before = recent_processed_blocks.len();
recent_processed_blocks.retain(|_blockhash, block| {
last_finalized_slot == 0 || block.slot > last_finalized_slot - CLEANUP_SLOTS_BEHIND_FINALIZED
last_finalized_slot == 0 || block.slot > last_finalized_slot.saturating_sub(CLEANUP_SLOTS_BEHIND_FINALIZED)
});
let cnt_cleaned = size_before - recent_processed_blocks.len();
let cnt_cleaned = size_before.saturating_sub(recent_processed_blocks.len());
if cnt_cleaned > 0 {
debug!("cleaned {} processed blocks from cache", cnt_cleaned);
}

View File

@ -47,12 +47,15 @@ impl JsonRpcLeaderGetter {
if last_slot_needed >= first_slot_to_fetch {
let leaders = self
.rpc_client
.get_slot_leaders(first_slot_to_fetch, last_slot_needed - first_slot_to_fetch)
.get_slot_leaders(
first_slot_to_fetch,
last_slot_needed.saturating_sub(first_slot_to_fetch),
)
.await
.context("failed to get slot leaders")?;
for leader_slot in first_slot_to_fetch..last_slot_needed {
let current_leader = (leader_slot - first_slot_to_fetch) as usize;
let current_leader = (leader_slot.saturating_sub(first_slot_to_fetch)) as usize;
let pubkey = leaders[current_leader];
leader_queue.push_back(LeaderData {
leader_slot,
@ -71,7 +74,7 @@ impl LeaderFetcherInterface for JsonRpcLeaderGetter {
from: solana_sdk::slot_history::Slot,
to: solana_sdk::slot_history::Slot,
) -> anyhow::Result<Vec<LeaderData>> {
if to <= from || to - from > self.leaders_to_cache_count {
if to <= from || to.saturating_sub(from) > self.leaders_to_cache_count {
bail!("invalid arguments for get_slot_leaders");
}
let schedule = self.leader_schedule.read().await;

View File

@ -117,7 +117,10 @@ pub async fn main() {
let row_count_after = count_rows(session.client.clone()).await;
info!("total: {}", row_count_after);
info!("inserted: {}", row_count_after - row_count_before);
info!(
"inserted: {}",
row_count_after.saturating_sub(row_count_before)
);
}
async fn count_rows(client: Arc<tokio_postgres::Client>) -> i64 {

View File

@ -57,10 +57,10 @@ pub async fn main() -> anyhow::Result<()> {
// ATM we are 4000 slots behind ...
// TODO reduce 4000 to 0
let slot = 234332620; // literpc3 - local
// let slot = 231541684;
let slot: u64 = 234332620; // literpc3 - local
// let slot = 231541684;
let delta = 50 + rand::random::<u64>() % 100;
let query_slot = slot - delta;
let query_slot = slot.saturating_sub(delta);
info!("query slot (-{}): {}", delta, query_slot);
let (epoch_cache, _) = &epoch_data;

View File

@ -63,7 +63,7 @@ impl MultipleStrategyBlockStorage {
if let Some(faithful_block_storage) = &self.faithful_block_storage {
let faithful_storage_range = faithful_block_storage.get_slot_range();
trace!("Faithful storage range: {:?}", faithful_storage_range);
if lower - faithful_storage_range.end() <= 1 {
if lower.saturating_sub(*faithful_storage_range.end()) <= 1 {
// move the lower bound to the left
lower = lower.min(*faithful_storage_range.start());
}

View File

@ -362,7 +362,7 @@ fn build_assign_permissions_statements(epoch: EpochRef) -> String {
}
fn div_ceil(a: usize, b: usize) -> usize {
(a + b - 1) / b
(a.saturating_add(b).saturating_sub(1)).saturating_div(b)
}
impl PostgresBlockStore {

View File

@ -125,7 +125,7 @@ impl PostgresTransaction {
if inserted < tx_count {
warn!("Some ({}) transactions already existed and where not updated of {} total in schema {schema}",
transactions.len() - inserted, transactions.len(), schema = schema);
transactions.len().saturating_sub(inserted), transactions.len(), schema = schema);
}
trace!(

View File

@ -56,7 +56,7 @@ impl RpcRequestData {
//avoid clone on the first request
if let Some(mut pending_rpc_request) = self.pending_rpc_request.take() {
if pending_rpc_request.len() > 1 {
for return_channel in pending_rpc_request.drain(0..pending_rpc_request.len() - 1) {
for return_channel in pending_rpc_request.drain(0..pending_rpc_request.len().saturating_sub(1)) {
if return_channel.send(rpc_vote_accounts.clone()).is_err() {
log::error!("Vote accounts RPC channel send closed.");
}

View File

@ -119,7 +119,7 @@ impl VoteStore {
if epoch_credits.len() > MAX_RPC_VOTE_ACCOUNT_INFO_EPOCH_CREDITS_HISTORY {
epoch_credits
.iter()
.skip(epoch_credits.len() - MAX_RPC_VOTE_ACCOUNT_INFO_EPOCH_CREDITS_HISTORY)
.skip(epoch_credits.len().saturating_sub(MAX_RPC_VOTE_ACCOUNT_INFO_EPOCH_CREDITS_HISTORY))
.cloned()
.collect()
} else {
@ -260,7 +260,7 @@ pub fn get_rpc_vote_accounts_info(
})
.partition(|vote_account_info| {
if current_slot >= delinquent_validator_slot_distance {
vote_account_info.last_vote > current_slot - delinquent_validator_slot_distance
vote_account_info.last_vote > current_slot.saturating_sub(delinquent_validator_slot_distance)
} else {
vote_account_info.last_vote > 0
}