fixes rust code formatting in core/src/consensus.rs (#29204)

This commit is contained in:
behzad nouri 2022-12-11 23:20:52 +00:00 committed by GitHub
parent 7be57d661f
commit 4ee318b2b2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 269 additions and 252 deletions

View File

@ -651,281 +651,298 @@ impl Tower {
latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks, latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks,
heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice, heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice,
) -> SwitchForkDecision { ) -> SwitchForkDecision {
self.last_voted_slot_hash() let (last_voted_slot, last_voted_hash) = match self.last_voted_slot_hash() {
.map(|(last_voted_slot, last_voted_hash)| { None => return SwitchForkDecision::SameFork,
let root = self.root(); Some(slot_hash) => slot_hash,
let empty_ancestors = HashSet::default(); };
let empty_ancestors_due_to_minor_unsynced_ledger = || { let root = self.root();
// This condition (stale stray last vote) shouldn't occur under normal validator let empty_ancestors = HashSet::default();
// operation, indicating something unusual happened. let empty_ancestors_due_to_minor_unsynced_ledger = || {
// This condition could be introduced by manual ledger mishandling, // This condition (stale stray last vote) shouldn't occur under normal validator
// validator SEGV, OS/HW crash, or plain No Free Space FS error. // operation, indicating something unusual happened.
// This condition could be introduced by manual ledger mishandling,
// validator SEGV, OS/HW crash, or plain No Free Space FS error.
// However, returning empty ancestors as a fallback here shouldn't result in // However, returning empty ancestors as a fallback here shouldn't result in
// slashing by itself (Note that we couldn't fully preclude any kind of slashing if // slashing by itself (Note that we couldn't fully preclude any kind of slashing if
// the failure was OS or HW level). // the failure was OS or HW level).
// Firstly, lockout is ensured elsewhere. // Firstly, lockout is ensured elsewhere.
// Also, there is no risk of optimistic conf. violation. Although empty ancestors // Also, there is no risk of optimistic conf. violation. Although empty ancestors
// could result in incorrect (= more than actual) locked_out_stake and // could result in incorrect (= more than actual) locked_out_stake and
// false-positive SwitchProof later in this function, there should be no such a // false-positive SwitchProof later in this function, there should be no such a
// heavier fork candidate, first of all, if the last vote (or any of its // heavier fork candidate, first of all, if the last vote (or any of its
// unavailable ancestors) were already optimistically confirmed. // unavailable ancestors) were already optimistically confirmed.
// The only exception is that other validator is already violating it... // The only exception is that other validator is already violating it...
if self.is_first_switch_check() && switch_slot < last_voted_slot { if self.is_first_switch_check() && switch_slot < last_voted_slot {
// `switch < last` is needed not to warn! this message just because of using // `switch < last` is needed not to warn! this message just because of using
// newer snapshots on validator restart // newer snapshots on validator restart
let message = format!( let message = format!(
"bank_forks doesn't have corresponding data for the stray restored \ "bank_forks doesn't have corresponding data for the stray restored \
last vote({last_voted_slot}), meaning some inconsistency between saved tower and ledger." last vote({last_voted_slot}), meaning some inconsistency between saved tower and ledger."
); );
warn!("{}", message); warn!("{}", message);
datapoint_warn!("tower_warn", ("warn", message, String)); datapoint_warn!("tower_warn", ("warn", message, String));
} }
&empty_ancestors &empty_ancestors
}; };
let suspended_decision_due_to_major_unsynced_ledger = || { let suspended_decision_due_to_major_unsynced_ledger = || {
// This peculiar corner handling is needed mainly for a tower which is newer than // This peculiar corner handling is needed mainly for a tower which is newer than
// blockstore. (Yeah, we tolerate it for ease of maintaining validator by operators) // blockstore. (Yeah, we tolerate it for ease of maintaining validator by operators)
// This condition could be introduced by manual ledger mishandling, // This condition could be introduced by manual ledger mishandling,
// validator SEGV, OS/HW crash, or plain No Free Space FS error. // validator SEGV, OS/HW crash, or plain No Free Space FS error.
// When we're in this clause, it basically means validator is badly running // When we're in this clause, it basically means validator is badly running
// with a future tower while replaying past slots, especially problematic is // with a future tower while replaying past slots, especially problematic is
// last_voted_slot. // last_voted_slot.
// So, don't re-vote on it by returning pseudo FailedSwitchThreshold, otherwise // So, don't re-vote on it by returning pseudo FailedSwitchThreshold, otherwise
// there would be slashing because of double vote on one of last_vote_ancestors. // there would be slashing because of double vote on one of last_vote_ancestors.
// (Well, needless to say, re-creating the duplicate block must be handled properly // (Well, needless to say, re-creating the duplicate block must be handled properly
// at the banking stage: https://github.com/solana-labs/solana/issues/8232) // at the banking stage: https://github.com/solana-labs/solana/issues/8232)
// //
// To be specific, the replay stage is tricked into a false perception where // To be specific, the replay stage is tricked into a false perception where
// last_vote_ancestors is AVAILABLE for descendant-of-`switch_slot`, stale, and // last_vote_ancestors is AVAILABLE for descendant-of-`switch_slot`, stale, and
// stray slots (which should always be empty_ancestors). // stray slots (which should always be empty_ancestors).
// //
// This is covered by test_future_tower_* in local_cluster // This is covered by test_future_tower_* in local_cluster
SwitchForkDecision::FailedSwitchThreshold(0, total_stake) SwitchForkDecision::FailedSwitchThreshold(0, total_stake)
}; };
let rollback_due_to_to_to_duplicate_ancestor = |latest_duplicate_ancestor| { let rollback_due_to_to_to_duplicate_ancestor = |latest_duplicate_ancestor| {
SwitchForkDecision::FailedSwitchDuplicateRollback(latest_duplicate_ancestor) SwitchForkDecision::FailedSwitchDuplicateRollback(latest_duplicate_ancestor)
}; };
// `heaviest_subtree_fork_choice` entries are not cleaned by duplicate block purging/rollback logic, // `heaviest_subtree_fork_choice` entries are not cleaned by duplicate block purging/rollback logic,
// so this is safe to check here. We return here if the last voted slot was rolled back/purged due to // so this is safe to check here. We return here if the last voted slot was rolled back/purged due to
// being a duplicate because `ancestors`/`descendants`/`progress` structures may be missing this slot due // being a duplicate because `ancestors`/`descendants`/`progress` structures may be missing this slot due
// to duplicate purging. This would cause many of the `unwrap()` checks below to fail. // to duplicate purging. This would cause many of the `unwrap()` checks below to fail.
// //
// TODO: Handle if the last vote is on a dupe, and then we restart. The dupe won't be in // TODO: Handle if the last vote is on a dupe, and then we restart. The dupe won't be in
// heaviest_subtree_fork_choice, so `heaviest_subtree_fork_choice.latest_invalid_ancestor()` will return // heaviest_subtree_fork_choice, so `heaviest_subtree_fork_choice.latest_invalid_ancestor()` will return
// None, but the last vote will be persisted in tower. // None, but the last vote will be persisted in tower.
let switch_hash = progress let switch_hash = progress
.get_hash(switch_slot) .get_hash(switch_slot)
.expect("Slot we're trying to switch to must exist AND be frozen in progress map"); .expect("Slot we're trying to switch to must exist AND be frozen in progress map");
if let Some(latest_duplicate_ancestor) = heaviest_subtree_fork_choice if let Some(latest_duplicate_ancestor) = heaviest_subtree_fork_choice
.latest_invalid_ancestor(&(last_voted_slot, last_voted_hash)) .latest_invalid_ancestor(&(last_voted_slot, last_voted_hash))
{ {
// We're rolling back because one of the ancestors of the last vote was a duplicate. In this // We're rolling back because one of the ancestors of the last vote was a duplicate. In this
// case, it's acceptable if the switch candidate is one of ancestors of the previous vote, // case, it's acceptable if the switch candidate is one of ancestors of the previous vote,
// just fail the switch check because there's no point in voting on an ancestor. ReplayStage // just fail the switch check because there's no point in voting on an ancestor. ReplayStage
// should then have a special case continue building an alternate fork from this ancestor, NOT // should then have a special case continue building an alternate fork from this ancestor, NOT
// the `last_voted_slot`. This is in contrast to usual SwitchFailure where ReplayStage continues to build blocks // the `last_voted_slot`. This is in contrast to usual SwitchFailure where ReplayStage continues to build blocks
// on latest vote. See `ReplayStage::select_vote_and_reset_forks()` for more details. // on latest vote. See `ReplayStage::select_vote_and_reset_forks()` for more details.
if heaviest_subtree_fork_choice.is_strict_ancestor( if heaviest_subtree_fork_choice.is_strict_ancestor(
&(switch_slot, switch_hash), &(switch_slot, switch_hash),
&(last_voted_slot, last_voted_hash), &(last_voted_slot, last_voted_hash),
) { ) {
return rollback_due_to_to_to_duplicate_ancestor(latest_duplicate_ancestor); return rollback_due_to_to_to_duplicate_ancestor(latest_duplicate_ancestor);
} else if progress } else if progress
.get_hash(last_voted_slot) .get_hash(last_voted_slot)
.map(|current_slot_hash| current_slot_hash != last_voted_hash) .map(|current_slot_hash| current_slot_hash != last_voted_hash)
.unwrap_or(true) .unwrap_or(true)
{ {
// Our last vote slot was purged because it was on a duplicate fork, don't continue below // Our last vote slot was purged because it was on a duplicate fork, don't continue below
// where checks may panic. We allow a freebie vote here that may violate switching // where checks may panic. We allow a freebie vote here that may violate switching
// thresholds // thresholds
// TODO: Properly handle this case // TODO: Properly handle this case
info!( info!(
"Allowing switch vote on {:?} because last vote {:?} was rolled back", "Allowing switch vote on {:?} because last vote {:?} was rolled back",
(switch_slot, switch_hash), (switch_slot, switch_hash),
(last_voted_slot, last_voted_hash) (last_voted_slot, last_voted_hash)
); );
return SwitchForkDecision::SwitchProof(Hash::default()); return SwitchForkDecision::SwitchProof(Hash::default());
} }
} }
let last_vote_ancestors = ancestors.get(&last_voted_slot).unwrap_or_else(|| { let last_vote_ancestors = ancestors.get(&last_voted_slot).unwrap_or_else(|| {
if self.is_stray_last_vote() { if self.is_stray_last_vote() {
// Unless last vote is stray and stale, ancestors.get(last_voted_slot) must // Unless last vote is stray and stale, ancestors.get(last_voted_slot) must
// return Some(_), justifying to panic! here. // return Some(_), justifying to panic! here.
// Also, adjust_lockouts_after_replay() correctly makes last_voted_slot None, // Also, adjust_lockouts_after_replay() correctly makes last_voted_slot None,
// if all saved votes are ancestors of replayed_root_slot. So this code shouldn't be // if all saved votes are ancestors of replayed_root_slot. So this code shouldn't be
// touched in that case as well. // touched in that case as well.
// In other words, except being stray, all other slots have been voted on while // In other words, except being stray, all other slots have been voted on while
// this validator has been running, so we must be able to fetch ancestors for // this validator has been running, so we must be able to fetch ancestors for
// all of them. // all of them.
empty_ancestors_due_to_minor_unsynced_ledger() empty_ancestors_due_to_minor_unsynced_ledger()
} else { } else {
panic!("no ancestors found with slot: {last_voted_slot}"); panic!("no ancestors found with slot: {last_voted_slot}");
} }
}); });
let switch_slot_ancestors = ancestors.get(&switch_slot).unwrap(); let switch_slot_ancestors = ancestors.get(&switch_slot).unwrap();
if switch_slot == last_voted_slot || switch_slot_ancestors.contains(&last_voted_slot) { if switch_slot == last_voted_slot || switch_slot_ancestors.contains(&last_voted_slot) {
// If the `switch_slot is a descendant of the last vote, // If the `switch_slot is a descendant of the last vote,
// no switching proof is necessary // no switching proof is necessary
return SwitchForkDecision::SameFork; return SwitchForkDecision::SameFork;
} }
if last_vote_ancestors.contains(&switch_slot) { if last_vote_ancestors.contains(&switch_slot) {
if self.is_stray_last_vote() { if self.is_stray_last_vote() {
return suspended_decision_due_to_major_unsynced_ledger(); return suspended_decision_due_to_major_unsynced_ledger();
} else { } else {
panic!( panic!(
"Should never consider switching to ancestor ({switch_slot}) of last vote: {last_voted_slot}, ancestors({last_vote_ancestors:?})", "Should never consider switching to ancestor ({switch_slot}) of last vote: {last_voted_slot}, ancestors({last_vote_ancestors:?})",
); );
} }
} }
// By this point, we know the `switch_slot` is on a different fork // By this point, we know the `switch_slot` is on a different fork
// (is neither an ancestor nor descendant of `last_vote`), so a // (is neither an ancestor nor descendant of `last_vote`), so a
// switching proof is necessary // switching proof is necessary
let switch_proof = Hash::default(); let switch_proof = Hash::default();
let mut locked_out_stake = 0; let mut locked_out_stake = 0;
let mut locked_out_vote_accounts = HashSet::new(); let mut locked_out_vote_accounts = HashSet::new();
for (candidate_slot, descendants) in descendants.iter() { for (candidate_slot, descendants) in descendants.iter() {
// 1) Don't consider any banks that haven't been frozen yet // 1) Don't consider any banks that haven't been frozen yet
// because the needed stats are unavailable // because the needed stats are unavailable
// 2) Only consider lockouts at the latest `frozen` bank // 2) Only consider lockouts at the latest `frozen` bank
// on each fork, as that bank will contain all the // on each fork, as that bank will contain all the
// lockout intervals for ancestors on that fork as well. // lockout intervals for ancestors on that fork as well.
// 3) Don't consider lockouts on the `last_vote` itself // 3) Don't consider lockouts on the `last_vote` itself
// 4) Don't consider lockouts on any descendants of // 4) Don't consider lockouts on any descendants of
// `last_vote` // `last_vote`
// 5) Don't consider any banks before the root because // 5) Don't consider any banks before the root because
// all lockouts must be ancestors of `last_vote` // all lockouts must be ancestors of `last_vote`
if !progress.get_fork_stats(*candidate_slot).map(|stats| stats.computed).unwrap_or(false) if !progress
// If any of the descendants have the `computed` flag set, then there must be a more .get_fork_stats(*candidate_slot)
// recent frozen bank on this fork to use, so we can ignore this one. Otherwise, .map(|stats| stats.computed)
// even if this bank has descendants, if they have not yet been frozen / stats computed, .unwrap_or(false)
// then use this bank as a representative for the fork. || {
|| descendants.iter().any(|d| progress.get_fork_stats(*d).map(|stats| stats.computed).unwrap_or(false)) // If any of the descendants have the `computed` flag set, then there must be a more
|| *candidate_slot == last_voted_slot // recent frozen bank on this fork to use, so we can ignore this one. Otherwise,
// Ignore if the `candidate_slot` is a descendant of the `last_voted_slot`, since we do not // even if this bank has descendants, if they have not yet been frozen / stats computed,
// want to count votes on the same fork. // then use this bank as a representative for the fork.
|| Self::is_candidate_slot_descendant_of_last_vote(*candidate_slot, last_voted_slot, ancestors).expect("exists in descendants map, so must exist in ancestors map") descendants.iter().any(|d| {
|| *candidate_slot <= root progress
{ .get_fork_stats(*d)
.map(|stats| stats.computed)
.unwrap_or(false)
})
}
|| *candidate_slot == last_voted_slot
|| {
// Ignore if the `candidate_slot` is a descendant of the `last_voted_slot`, since we do not
// want to count votes on the same fork.
Self::is_candidate_slot_descendant_of_last_vote(
*candidate_slot,
last_voted_slot,
ancestors,
)
.expect("exists in descendants map, so must exist in ancestors map")
}
|| *candidate_slot <= root
{
continue;
}
// By the time we reach here, any ancestors of the `last_vote`,
// should have been filtered out, as they all have a descendant,
// namely the `last_vote` itself.
assert!(!last_vote_ancestors.contains(candidate_slot));
// Evaluate which vote accounts in the bank are locked out
// in the interval candidate_slot..last_vote, which means
// finding any lockout intervals in the `lockout_intervals` tree
// for this bank that contain `last_vote`.
let lockout_intervals = &progress
.get(candidate_slot)
.unwrap()
.fork_stats
.lockout_intervals;
// Find any locked out intervals for vote accounts in this bank with
// `lockout_interval_end` >= `last_vote`, which implies they are locked out at
// `last_vote` on another fork.
for (_lockout_interval_end, intervals_keyed_by_end) in
lockout_intervals.range((Included(last_voted_slot), Unbounded))
{
for (lockout_interval_start, vote_account_pubkey) in intervals_keyed_by_end {
if locked_out_vote_accounts.contains(vote_account_pubkey) {
continue; continue;
} }
// By the time we reach here, any ancestors of the `last_vote`, // Only count lockouts on slots that are:
// should have been filtered out, as they all have a descendant, // 1) Not ancestors of `last_vote`, meaning being on different fork
// namely the `last_vote` itself. // 2) Not from before the current root as we can't determine if
assert!(!last_vote_ancestors.contains(candidate_slot)); // anything before the root was an ancestor of `last_vote` or not
if !last_vote_ancestors.contains(lockout_interval_start) && {
// Evaluate which vote accounts in the bank are locked out // Given a `lockout_interval_start` < root that appears in a
// in the interval candidate_slot..last_vote, which means // bank for a `candidate_slot`, it must be that `lockout_interval_start`
// finding any lockout intervals in the `lockout_intervals` tree // is an ancestor of the current root, because `candidate_slot` is a
// for this bank that contain `last_vote`. // descendant of the current root
let lockout_intervals = &progress *lockout_interval_start > root
.get(candidate_slot) } {
.unwrap() let stake = epoch_vote_accounts
.fork_stats .get(vote_account_pubkey)
.lockout_intervals; .map(|(stake, _)| *stake)
// Find any locked out intervals for vote accounts in this bank with .unwrap_or(0);
// `lockout_interval_end` >= `last_vote`, which implies they are locked out at locked_out_stake += stake;
// `last_vote` on another fork. if (locked_out_stake as f64 / total_stake as f64) > SWITCH_FORK_THRESHOLD {
for (_lockout_interval_end, intervals_keyed_by_end) in return SwitchForkDecision::SwitchProof(switch_proof);
lockout_intervals.range((Included(last_voted_slot), Unbounded)) }
{ locked_out_vote_accounts.insert(vote_account_pubkey);
for (lockout_interval_start, vote_account_pubkey) in intervals_keyed_by_end {
if locked_out_vote_accounts.contains(vote_account_pubkey) {
continue;
}
// Only count lockouts on slots that are:
// 1) Not ancestors of `last_vote`, meaning being on different fork
// 2) Not from before the current root as we can't determine if
// anything before the root was an ancestor of `last_vote` or not
if !last_vote_ancestors.contains(lockout_interval_start)
// Given a `lockout_interval_start` < root that appears in a
// bank for a `candidate_slot`, it must be that `lockout_interval_start`
// is an ancestor of the current root, because `candidate_slot` is a
// descendant of the current root
&& *lockout_interval_start > root
{
let stake = epoch_vote_accounts
.get(vote_account_pubkey)
.map(|(stake, _)| *stake)
.unwrap_or(0);
locked_out_stake += stake;
if (locked_out_stake as f64 / total_stake as f64)
> SWITCH_FORK_THRESHOLD
{
return SwitchForkDecision::SwitchProof(switch_proof);
}
locked_out_vote_accounts.insert(vote_account_pubkey);
}
}
}
}
// Check the latest votes for potentially gossip votes that haven't landed yet
for (
vote_account_pubkey,
(candidate_latest_frozen_vote, _candidate_latest_frozen_vote_hash),
) in latest_validator_votes_for_frozen_banks.max_gossip_frozen_votes()
{
if locked_out_vote_accounts.contains(&vote_account_pubkey) {
continue;
} }
if *candidate_latest_frozen_vote > last_voted_slot
&&
// Because `candidate_latest_frozen_vote` is the last vote made by some validator
// in the cluster for a frozen bank `B` observed through gossip, we may have cleared
// that frozen bank `B` because we `set_root(root)` for a `root` on a different fork,
// like so:
//
// |----------X ------candidate_latest_frozen_vote (frozen)
// old root
// |----------new root ----last_voted_slot
//
// In most cases, because `last_voted_slot` must be a descendant of `root`, then
// if `candidate_latest_frozen_vote` is not found in the ancestors/descendants map (recall these
// directly reflect the state of BankForks), this implies that `B` was pruned from BankForks
// because it was on a different fork than `last_voted_slot`, and thus this vote for `candidate_latest_frozen_vote`
// should be safe to count towards the switching proof:
//
// However, there is also the possibility that `last_voted_slot` is a stray, in which
// case we cannot make this conclusion as we do not know the ancestors/descendants
// of strays. Hence we err on the side of caution here and ignore this vote. This
// is ok because validators voting on different unrooted forks should eventually vote
// on some descendant of the root, at which time they can be included in switching proofs.
!Self::is_candidate_slot_descendant_of_last_vote(
*candidate_latest_frozen_vote, last_voted_slot, ancestors)
.unwrap_or(true)
{
let stake = epoch_vote_accounts
.get(vote_account_pubkey)
.map(|(stake, _)| *stake)
.unwrap_or(0);
locked_out_stake += stake;
if (locked_out_stake as f64 / total_stake as f64) > SWITCH_FORK_THRESHOLD {
return SwitchForkDecision::SwitchProof(switch_proof);
}
locked_out_vote_accounts.insert(vote_account_pubkey);
}
} }
}
}
// We have not detected sufficient lockout past the last voted slot to generate // Check the latest votes for potentially gossip votes that haven't landed yet
// a switching proof for (
SwitchForkDecision::FailedSwitchThreshold(locked_out_stake, total_stake) vote_account_pubkey,
}) (candidate_latest_frozen_vote, _candidate_latest_frozen_vote_hash),
.unwrap_or(SwitchForkDecision::SameFork) ) in latest_validator_votes_for_frozen_banks.max_gossip_frozen_votes()
{
if locked_out_vote_accounts.contains(&vote_account_pubkey) {
continue;
}
if *candidate_latest_frozen_vote > last_voted_slot && {
// Because `candidate_latest_frozen_vote` is the last vote made by some validator
// in the cluster for a frozen bank `B` observed through gossip, we may have cleared
// that frozen bank `B` because we `set_root(root)` for a `root` on a different fork,
// like so:
//
// |----------X ------candidate_latest_frozen_vote (frozen)
// old root
// |----------new root ----last_voted_slot
//
// In most cases, because `last_voted_slot` must be a descendant of `root`, then
// if `candidate_latest_frozen_vote` is not found in the ancestors/descendants map (recall these
// directly reflect the state of BankForks), this implies that `B` was pruned from BankForks
// because it was on a different fork than `last_voted_slot`, and thus this vote for `candidate_latest_frozen_vote`
// should be safe to count towards the switching proof:
//
// However, there is also the possibility that `last_voted_slot` is a stray, in which
// case we cannot make this conclusion as we do not know the ancestors/descendants
// of strays. Hence we err on the side of caution here and ignore this vote. This
// is ok because validators voting on different unrooted forks should eventually vote
// on some descendant of the root, at which time they can be included in switching proofs.
!Self::is_candidate_slot_descendant_of_last_vote(
*candidate_latest_frozen_vote,
last_voted_slot,
ancestors,
)
.unwrap_or(true)
} {
let stake = epoch_vote_accounts
.get(vote_account_pubkey)
.map(|(stake, _)| *stake)
.unwrap_or(0);
locked_out_stake += stake;
if (locked_out_stake as f64 / total_stake as f64) > SWITCH_FORK_THRESHOLD {
return SwitchForkDecision::SwitchProof(switch_proof);
}
locked_out_vote_accounts.insert(vote_account_pubkey);
}
}
// We have not detected sufficient lockout past the last voted slot to generate
// a switching proof
SwitchForkDecision::FailedSwitchThreshold(locked_out_stake, total_stake)
} }
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]