Port unconfirmed duplicate tracking logic from ProgressMap to ForkChoice (#17779)
This commit is contained in:
parent
da9ae5f836
commit
c8535be0e1
|
@ -3,7 +3,7 @@ use crate::{
|
|||
progress_map::ProgressMap,
|
||||
};
|
||||
use solana_sdk::{clock::Slot, hash::Hash};
|
||||
use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
|
||||
pub(crate) type DuplicateSlotsTracker = BTreeSet<Slot>;
|
||||
pub(crate) type GossipDuplicateConfirmedSlots = BTreeMap<Slot, Hash>;
|
||||
|
@ -201,19 +201,12 @@ fn get_cluster_duplicate_confirmed_hash<'a>(
|
|||
|
||||
fn apply_state_changes(
|
||||
slot: Slot,
|
||||
progress: &mut ProgressMap,
|
||||
fork_choice: &mut HeaviestSubtreeForkChoice,
|
||||
ancestors: &HashMap<Slot, HashSet<Slot>>,
|
||||
descendants: &HashMap<Slot, HashSet<Slot>>,
|
||||
state_changes: Vec<ResultingStateChange>,
|
||||
) {
|
||||
for state_change in state_changes {
|
||||
match state_change {
|
||||
ResultingStateChange::MarkSlotDuplicate(bank_frozen_hash) => {
|
||||
progress.set_unconfirmed_duplicate_slot(
|
||||
slot,
|
||||
descendants.get(&slot).unwrap_or(&HashSet::default()),
|
||||
);
|
||||
fork_choice.mark_fork_invalid_candidate(&(slot, bank_frozen_hash));
|
||||
}
|
||||
ResultingStateChange::RepairDuplicateConfirmedVersion(
|
||||
|
@ -224,11 +217,6 @@ fn apply_state_changes(
|
|||
repair_correct_version(slot, &cluster_duplicate_confirmed_hash);
|
||||
}
|
||||
ResultingStateChange::DuplicateConfirmedSlotMatchesCluster(bank_frozen_hash) => {
|
||||
progress.set_confirmed_duplicate_slot(
|
||||
slot,
|
||||
ancestors.get(&slot).unwrap_or(&HashSet::default()),
|
||||
descendants.get(&slot).unwrap_or(&HashSet::default()),
|
||||
);
|
||||
fork_choice.mark_fork_valid_candidate(&(slot, bank_frozen_hash));
|
||||
}
|
||||
}
|
||||
|
@ -242,9 +230,7 @@ pub(crate) fn check_slot_agrees_with_cluster(
|
|||
frozen_hash: Option<Hash>,
|
||||
duplicate_slots_tracker: &mut DuplicateSlotsTracker,
|
||||
gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots,
|
||||
ancestors: &HashMap<Slot, HashSet<Slot>>,
|
||||
descendants: &HashMap<Slot, HashSet<Slot>>,
|
||||
progress: &mut ProgressMap,
|
||||
progress: &ProgressMap,
|
||||
fork_choice: &mut HeaviestSubtreeForkChoice,
|
||||
slot_state_update: SlotStateUpdate,
|
||||
) {
|
||||
|
@ -280,7 +266,11 @@ pub(crate) fn check_slot_agrees_with_cluster(
|
|||
let frozen_hash = frozen_hash.unwrap();
|
||||
let gossip_duplicate_confirmed_hash = gossip_duplicate_confirmed_slots.get(&slot);
|
||||
|
||||
let is_local_replay_duplicate_confirmed = progress.is_duplicate_confirmed(slot).expect("If the frozen hash exists, then the slot must exist in bank forks and thus in progress map");
|
||||
// If the bank hasn't been frozen yet, then we haven't duplicate confirmed a local version
|
||||
// this slot through replay yet.
|
||||
let is_local_replay_duplicate_confirmed = fork_choice
|
||||
.is_duplicate_confirmed(&(slot, frozen_hash))
|
||||
.unwrap_or(false);
|
||||
let cluster_duplicate_confirmed_hash = get_cluster_duplicate_confirmed_hash(
|
||||
slot,
|
||||
gossip_duplicate_confirmed_hash,
|
||||
|
@ -310,14 +300,7 @@ pub(crate) fn check_slot_agrees_with_cluster(
|
|||
is_slot_duplicate,
|
||||
is_dead,
|
||||
);
|
||||
apply_state_changes(
|
||||
slot,
|
||||
progress,
|
||||
fork_choice,
|
||||
ancestors,
|
||||
descendants,
|
||||
state_changes,
|
||||
);
|
||||
apply_state_changes(slot, fork_choice, state_changes);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@ -325,15 +308,16 @@ mod test {
|
|||
use super::*;
|
||||
use crate::consensus::test::VoteSimulator;
|
||||
use solana_runtime::bank_forks::BankForks;
|
||||
use std::sync::RwLock;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::RwLock,
|
||||
};
|
||||
use trees::tr;
|
||||
|
||||
struct InitialState {
|
||||
heaviest_subtree_fork_choice: HeaviestSubtreeForkChoice,
|
||||
progress: ProgressMap,
|
||||
ancestors: HashMap<Slot, HashSet<Slot>>,
|
||||
descendants: HashMap<Slot, HashSet<Slot>>,
|
||||
slot: Slot,
|
||||
bank_forks: RwLock<BankForks>,
|
||||
}
|
||||
|
||||
|
@ -342,7 +326,6 @@ mod test {
|
|||
let forks = tr(0) / (tr(1) / (tr(2) / tr(3)));
|
||||
let mut vote_simulator = VoteSimulator::new(1);
|
||||
vote_simulator.fill_bank_forks(forks, &HashMap::new());
|
||||
let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
|
||||
|
||||
let descendants = vote_simulator
|
||||
.bank_forks
|
||||
|
@ -354,9 +337,7 @@ mod test {
|
|||
InitialState {
|
||||
heaviest_subtree_fork_choice: vote_simulator.heaviest_subtree_fork_choice,
|
||||
progress: vote_simulator.progress,
|
||||
ancestors,
|
||||
descendants,
|
||||
slot: 0,
|
||||
bank_forks: vote_simulator.bank_forks,
|
||||
}
|
||||
}
|
||||
|
@ -627,64 +608,68 @@ mod test {
|
|||
// Common state
|
||||
let InitialState {
|
||||
mut heaviest_subtree_fork_choice,
|
||||
mut progress,
|
||||
ancestors,
|
||||
descendants,
|
||||
slot,
|
||||
bank_forks,
|
||||
..
|
||||
} = setup();
|
||||
|
||||
// MarkSlotDuplicate should mark progress map and remove
|
||||
// the slot from fork choice
|
||||
let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash();
|
||||
let duplicate_slot = bank_forks.read().unwrap().root() + 1;
|
||||
let duplicate_slot_hash = bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(duplicate_slot)
|
||||
.unwrap()
|
||||
.hash();
|
||||
apply_state_changes(
|
||||
slot,
|
||||
&mut progress,
|
||||
duplicate_slot,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
vec![ResultingStateChange::MarkSlotDuplicate(slot_hash)],
|
||||
vec![ResultingStateChange::MarkSlotDuplicate(duplicate_slot_hash)],
|
||||
);
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_candidate_slot(&(slot, slot_hash))
|
||||
.is_candidate(&(duplicate_slot, duplicate_slot_hash))
|
||||
.unwrap());
|
||||
for child_slot in descendants
|
||||
.get(&slot)
|
||||
.get(&duplicate_slot)
|
||||
.unwrap()
|
||||
.iter()
|
||||
.chain(std::iter::once(&slot))
|
||||
.chain(std::iter::once(&duplicate_slot))
|
||||
{
|
||||
assert_eq!(
|
||||
progress
|
||||
.latest_unconfirmed_duplicate_ancestor(*child_slot)
|
||||
heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(
|
||||
*child_slot,
|
||||
bank_forks.read().unwrap().get(*child_slot).unwrap().hash()
|
||||
))
|
||||
.unwrap(),
|
||||
slot
|
||||
duplicate_slot
|
||||
);
|
||||
}
|
||||
|
||||
// DuplicateConfirmedSlotMatchesCluster should re-enable fork choice
|
||||
apply_state_changes(
|
||||
slot,
|
||||
&mut progress,
|
||||
duplicate_slot,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
vec![ResultingStateChange::DuplicateConfirmedSlotMatchesCluster(
|
||||
slot_hash,
|
||||
duplicate_slot_hash,
|
||||
)],
|
||||
);
|
||||
for child_slot in descendants
|
||||
.get(&slot)
|
||||
.get(&duplicate_slot)
|
||||
.unwrap()
|
||||
.iter()
|
||||
.chain(std::iter::once(&slot))
|
||||
.chain(std::iter::once(&duplicate_slot))
|
||||
{
|
||||
assert!(progress
|
||||
.latest_unconfirmed_duplicate_ancestor(*child_slot)
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(
|
||||
*child_slot,
|
||||
bank_forks.read().unwrap().get(*child_slot).unwrap().hash()
|
||||
))
|
||||
.is_none());
|
||||
}
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_candidate_slot(&(slot, slot_hash))
|
||||
.is_candidate(&(duplicate_slot, duplicate_slot_hash))
|
||||
.unwrap());
|
||||
}
|
||||
|
||||
|
@ -692,9 +677,7 @@ mod test {
|
|||
// Common state
|
||||
let InitialState {
|
||||
mut heaviest_subtree_fork_choice,
|
||||
mut progress,
|
||||
ancestors,
|
||||
descendants,
|
||||
progress,
|
||||
bank_forks,
|
||||
..
|
||||
} = setup();
|
||||
|
@ -713,12 +696,18 @@ mod test {
|
|||
initial_bank_hash,
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&mut progress,
|
||||
&progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::Duplicate,
|
||||
);
|
||||
assert!(duplicate_slots_tracker.contains(&duplicate_slot));
|
||||
// Nothing should be applied yet to fork choice, since bank was not yet frozen
|
||||
for slot in 2..=3 {
|
||||
let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash();
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(slot, slot_hash))
|
||||
.is_none());
|
||||
}
|
||||
|
||||
// Now freeze the bank
|
||||
let frozen_duplicate_slot_hash = bank_forks
|
||||
|
@ -733,16 +722,16 @@ mod test {
|
|||
Some(frozen_duplicate_slot_hash),
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&mut progress,
|
||||
&progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::Frozen,
|
||||
);
|
||||
|
||||
// Progress map should have the correct updates, fork choice should mark duplicate
|
||||
// as unvotable
|
||||
assert!(progress.is_unconfirmed_duplicate(duplicate_slot).unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_unconfirmed_duplicate(&(duplicate_slot, frozen_duplicate_slot_hash))
|
||||
.unwrap());
|
||||
|
||||
// The ancestor of the duplicate slot should be the best slot now
|
||||
let (duplicate_ancestor, duplicate_parent_hash) = {
|
||||
|
@ -771,9 +760,7 @@ mod test {
|
|||
// Common state
|
||||
let InitialState {
|
||||
mut heaviest_subtree_fork_choice,
|
||||
mut progress,
|
||||
ancestors,
|
||||
descendants,
|
||||
progress,
|
||||
bank_forks,
|
||||
..
|
||||
} = setup();
|
||||
|
@ -796,17 +783,26 @@ mod test {
|
|||
Some(slot2_hash),
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&mut progress,
|
||||
&progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::DuplicateConfirmed,
|
||||
);
|
||||
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&(2, slot2_hash))
|
||||
.unwrap());
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
(3, slot3_hash)
|
||||
);
|
||||
for slot in 0..=2 {
|
||||
let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash();
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&(slot, slot_hash))
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(slot, slot_hash))
|
||||
.is_none());
|
||||
}
|
||||
|
||||
// Mark 3 as duplicate, should not remove the duplicate confirmed slot 2 from
|
||||
// fork choice
|
||||
|
@ -816,17 +812,36 @@ mod test {
|
|||
Some(slot3_hash),
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&mut progress,
|
||||
&progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::Duplicate,
|
||||
);
|
||||
|
||||
assert!(duplicate_slots_tracker.contains(&3));
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
(2, slot2_hash)
|
||||
);
|
||||
for slot in 0..=3 {
|
||||
let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash();
|
||||
if slot <= 2 {
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&(slot, slot_hash))
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(slot, slot_hash))
|
||||
.is_none());
|
||||
} else {
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&(slot, slot_hash))
|
||||
.unwrap());
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(slot, slot_hash))
|
||||
.unwrap(),
|
||||
3
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -834,9 +849,7 @@ mod test {
|
|||
// Common state
|
||||
let InitialState {
|
||||
mut heaviest_subtree_fork_choice,
|
||||
mut progress,
|
||||
ancestors,
|
||||
descendants,
|
||||
progress,
|
||||
bank_forks,
|
||||
..
|
||||
} = setup();
|
||||
|
@ -857,12 +870,20 @@ mod test {
|
|||
Some(bank_forks.read().unwrap().get(2).unwrap().hash()),
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&mut progress,
|
||||
&progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::Duplicate,
|
||||
);
|
||||
assert!(duplicate_slots_tracker.contains(&2));
|
||||
for slot in 2..=3 {
|
||||
let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash();
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(slot, slot_hash))
|
||||
.unwrap(),
|
||||
2
|
||||
);
|
||||
}
|
||||
|
||||
let slot1_hash = bank_forks.read().unwrap().get(1).unwrap().hash();
|
||||
assert_eq!(
|
||||
|
@ -878,13 +899,91 @@ mod test {
|
|||
Some(slot3_hash),
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&mut progress,
|
||||
&progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::DuplicateConfirmed,
|
||||
);
|
||||
for slot in 0..=3 {
|
||||
let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash();
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&(slot, slot_hash))
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(slot, slot_hash))
|
||||
.is_none());
|
||||
}
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
(3, slot3_hash)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_state_descendant_confirmed_ancestor_duplicate() {
|
||||
// Common state
|
||||
let InitialState {
|
||||
mut heaviest_subtree_fork_choice,
|
||||
progress,
|
||||
bank_forks,
|
||||
..
|
||||
} = setup();
|
||||
|
||||
let slot3_hash = bank_forks.read().unwrap().get(3).unwrap().hash();
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
(3, slot3_hash)
|
||||
);
|
||||
let root = 0;
|
||||
let mut duplicate_slots_tracker = DuplicateSlotsTracker::default();
|
||||
let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default();
|
||||
|
||||
// Mark 3 as duplicate confirmed
|
||||
gossip_duplicate_confirmed_slots.insert(3, slot3_hash);
|
||||
check_slot_agrees_with_cluster(
|
||||
3,
|
||||
root,
|
||||
Some(slot3_hash),
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::DuplicateConfirmed,
|
||||
);
|
||||
let verify_all_slots_duplicate_confirmed =
|
||||
|bank_forks: &RwLock<BankForks>,
|
||||
heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice| {
|
||||
for slot in 0..=3 {
|
||||
let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash();
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&(slot, slot_hash))
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(slot, slot_hash))
|
||||
.is_none());
|
||||
}
|
||||
};
|
||||
verify_all_slots_duplicate_confirmed(&bank_forks, &heaviest_subtree_fork_choice);
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
(3, slot3_hash)
|
||||
);
|
||||
|
||||
// Mark ancestor 1 as duplicate, fork choice should be unaffected since
|
||||
// slot 1 was duplicate confirmed by the confirmation on its
|
||||
// descendant, 3.
|
||||
let slot1_hash = bank_forks.read().unwrap().get(1).unwrap().hash();
|
||||
check_slot_agrees_with_cluster(
|
||||
1,
|
||||
root,
|
||||
Some(slot1_hash),
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::Duplicate,
|
||||
);
|
||||
assert!(duplicate_slots_tracker.contains(&1));
|
||||
verify_all_slots_duplicate_confirmed(&bank_forks, &heaviest_subtree_fork_choice);
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
(3, slot3_hash)
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
use crate::{
|
||||
heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
|
||||
latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks,
|
||||
progress_map::{LockoutIntervals, ProgressMap},
|
||||
};
|
||||
|
@ -573,6 +574,7 @@ impl Tower {
|
|||
.map(|candidate_slot_ancestors| candidate_slot_ancestors.contains(&last_voted_slot))
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn make_check_switch_threshold_decision(
|
||||
&self,
|
||||
switch_slot: u64,
|
||||
|
@ -582,9 +584,10 @@ impl Tower {
|
|||
total_stake: u64,
|
||||
epoch_vote_accounts: &HashMap<Pubkey, (u64, ArcVoteAccount)>,
|
||||
latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks,
|
||||
heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice,
|
||||
) -> SwitchForkDecision {
|
||||
self.last_voted_slot()
|
||||
.map(|last_voted_slot| {
|
||||
self.last_voted_slot_hash()
|
||||
.map(|(last_voted_slot, last_voted_hash)| {
|
||||
let root = self.root();
|
||||
let empty_ancestors = HashSet::default();
|
||||
let empty_ancestors_due_to_minor_unsynced_ledger = || {
|
||||
|
@ -673,7 +676,7 @@ impl Tower {
|
|||
if last_vote_ancestors.contains(&switch_slot) {
|
||||
if self.is_stray_last_vote() {
|
||||
return suspended_decision_due_to_major_unsynced_ledger();
|
||||
} else if let Some(latest_duplicate_ancestor) = progress.latest_unconfirmed_duplicate_ancestor(last_voted_slot) {
|
||||
} else if let Some(latest_duplicate_ancestor) = heaviest_subtree_fork_choice.latest_invalid_ancestor(&(last_voted_slot, last_voted_hash)) {
|
||||
// We're rolling back because one of the ancestors of the last vote was a duplicate. In this
|
||||
// case, it's acceptable if the switch candidate is one of ancestors of the previous vote,
|
||||
// just fail the switch check because there's no point in voting on an ancestor. ReplayStage
|
||||
|
@ -821,6 +824,7 @@ impl Tower {
|
|||
.unwrap_or(SwitchForkDecision::SameFork)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) fn check_switch_threshold(
|
||||
&mut self,
|
||||
switch_slot: u64,
|
||||
|
@ -830,6 +834,7 @@ impl Tower {
|
|||
total_stake: u64,
|
||||
epoch_vote_accounts: &HashMap<Pubkey, (u64, ArcVoteAccount)>,
|
||||
latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks,
|
||||
heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice,
|
||||
) -> SwitchForkDecision {
|
||||
let decision = self.make_check_switch_threshold_decision(
|
||||
switch_slot,
|
||||
|
@ -839,6 +844,7 @@ impl Tower {
|
|||
total_stake,
|
||||
epoch_vote_accounts,
|
||||
latest_validator_votes_for_frozen_banks,
|
||||
heaviest_subtree_fork_choice,
|
||||
);
|
||||
let new_check = Some((switch_slot, decision.clone()));
|
||||
if new_check != self.last_switch_threshold_check {
|
||||
|
@ -1360,9 +1366,9 @@ pub mod test {
|
|||
cluster_info_vote_listener::VoteTracker,
|
||||
cluster_slot_state_verifier::{DuplicateSlotsTracker, GossipDuplicateConfirmedSlots},
|
||||
cluster_slots::ClusterSlots,
|
||||
fork_choice::SelectVoteAndResetForkResult,
|
||||
heaviest_subtree_fork_choice::{HeaviestSubtreeForkChoice, SlotHashKey},
|
||||
progress_map::{DuplicateStats, ForkProgress},
|
||||
fork_choice::{ForkChoice, SelectVoteAndResetForkResult},
|
||||
heaviest_subtree_fork_choice::SlotHashKey,
|
||||
progress_map::ForkProgress,
|
||||
replay_stage::{HeaviestForkFailures, ReplayStage},
|
||||
unfrozen_gossip_verified_vote_hashes::UnfrozenGossipVerifiedVoteHashes,
|
||||
};
|
||||
|
@ -1439,9 +1445,9 @@ pub mod test {
|
|||
|
||||
while let Some(visit) = walk.get() {
|
||||
let slot = visit.node().data;
|
||||
self.progress.entry(slot).or_insert_with(|| {
|
||||
ForkProgress::new(Hash::default(), None, DuplicateStats::default(), None, 0, 0)
|
||||
});
|
||||
self.progress
|
||||
.entry(slot)
|
||||
.or_insert_with(|| ForkProgress::new(Hash::default(), None, None, 0, 0));
|
||||
if self.bank_forks.read().unwrap().get(slot).is_some() {
|
||||
walk.forward();
|
||||
continue;
|
||||
|
@ -1530,6 +1536,7 @@ pub mod test {
|
|||
&self.progress,
|
||||
tower,
|
||||
&self.latest_validator_votes_for_frozen_banks,
|
||||
&self.heaviest_subtree_fork_choice,
|
||||
);
|
||||
|
||||
// Make sure this slot isn't locked out or failing threshold
|
||||
|
@ -1593,9 +1600,7 @@ pub mod test {
|
|||
) {
|
||||
self.progress
|
||||
.entry(slot)
|
||||
.or_insert_with(|| {
|
||||
ForkProgress::new(Hash::default(), None, DuplicateStats::default(), None, 0, 0)
|
||||
})
|
||||
.or_insert_with(|| ForkProgress::new(Hash::default(), None, None, 0, 0))
|
||||
.fork_stats
|
||||
.lockout_intervals
|
||||
.entry(lockout_interval.1)
|
||||
|
@ -1702,14 +1707,7 @@ pub mod test {
|
|||
let mut progress = ProgressMap::default();
|
||||
progress.insert(
|
||||
0,
|
||||
ForkProgress::new(
|
||||
bank0.last_blockhash(),
|
||||
None,
|
||||
DuplicateStats::default(),
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
ForkProgress::new(bank0.last_blockhash(), None, None, 0, 0),
|
||||
);
|
||||
let bank_forks = BankForks::new(bank0);
|
||||
let heaviest_subtree_fork_choice =
|
||||
|
@ -1868,21 +1866,46 @@ pub mod test {
|
|||
let mut tower = Tower::new_with_key(&vote_simulator.node_pubkeys[0]);
|
||||
|
||||
// Last vote is 47
|
||||
tower.record_vote(47, Hash::default());
|
||||
tower.record_vote(
|
||||
47,
|
||||
vote_simulator
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(47)
|
||||
.unwrap()
|
||||
.hash(),
|
||||
);
|
||||
|
||||
// Trying to switch to an ancestor of last vote should only not panic
|
||||
// if the current vote has a duplicate ancestor
|
||||
let ancestor_of_voted_slot = 43;
|
||||
let duplicate_ancestor1 = 44;
|
||||
let duplicate_ancestor2 = 45;
|
||||
vote_simulator.progress.set_unconfirmed_duplicate_slot(
|
||||
vote_simulator
|
||||
.heaviest_subtree_fork_choice
|
||||
.mark_fork_invalid_candidate(&(
|
||||
duplicate_ancestor1,
|
||||
&descendants.get(&duplicate_ancestor1).unwrap(),
|
||||
);
|
||||
vote_simulator.progress.set_unconfirmed_duplicate_slot(
|
||||
vote_simulator
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(duplicate_ancestor1)
|
||||
.unwrap()
|
||||
.hash(),
|
||||
));
|
||||
vote_simulator
|
||||
.heaviest_subtree_fork_choice
|
||||
.mark_fork_invalid_candidate(&(
|
||||
duplicate_ancestor2,
|
||||
&descendants.get(&duplicate_ancestor2).unwrap(),
|
||||
);
|
||||
vote_simulator
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(duplicate_ancestor2)
|
||||
.unwrap()
|
||||
.hash(),
|
||||
));
|
||||
assert_eq!(
|
||||
tower.check_switch_threshold(
|
||||
ancestor_of_voted_slot,
|
||||
|
@ -1891,7 +1914,8 @@ pub mod test {
|
|||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchDuplicateRollback(duplicate_ancestor2)
|
||||
);
|
||||
|
@ -1904,11 +1928,18 @@ pub mod test {
|
|||
confirm_ancestors.push(duplicate_ancestor2);
|
||||
}
|
||||
for (i, duplicate_ancestor) in confirm_ancestors.into_iter().enumerate() {
|
||||
vote_simulator.progress.set_confirmed_duplicate_slot(
|
||||
vote_simulator
|
||||
.heaviest_subtree_fork_choice
|
||||
.mark_fork_valid_candidate(&(
|
||||
duplicate_ancestor,
|
||||
ancestors.get(&duplicate_ancestor).unwrap(),
|
||||
&descendants.get(&duplicate_ancestor).unwrap(),
|
||||
);
|
||||
vote_simulator
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(duplicate_ancestor)
|
||||
.unwrap()
|
||||
.hash(),
|
||||
));
|
||||
let res = tower.check_switch_threshold(
|
||||
ancestor_of_voted_slot,
|
||||
&ancestors,
|
||||
|
@ -1917,6 +1948,7 @@ pub mod test {
|
|||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
);
|
||||
if i == 0 {
|
||||
assert_eq!(
|
||||
|
@ -1952,7 +1984,8 @@ pub mod test {
|
|||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::SameFork
|
||||
);
|
||||
|
@ -1966,7 +1999,8 @@ pub mod test {
|
|||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
|
@ -1982,7 +2016,8 @@ pub mod test {
|
|||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
|
@ -1998,7 +2033,8 @@ pub mod test {
|
|||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
|
@ -2014,7 +2050,8 @@ pub mod test {
|
|||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
|
@ -2032,7 +2069,8 @@ pub mod test {
|
|||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
|
@ -2048,7 +2086,8 @@ pub mod test {
|
|||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::SwitchProof(Hash::default())
|
||||
);
|
||||
|
@ -2065,7 +2104,8 @@ pub mod test {
|
|||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::SwitchProof(Hash::default())
|
||||
);
|
||||
|
@ -2091,7 +2131,8 @@ pub mod test {
|
|||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
|
@ -2123,7 +2164,8 @@ pub mod test {
|
|||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, num_validators * 10000)
|
||||
);
|
||||
|
@ -2138,7 +2180,8 @@ pub mod test {
|
|||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
|
@ -2170,7 +2213,8 @@ pub mod test {
|
|||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::SwitchProof(Hash::default())
|
||||
);
|
||||
|
@ -2194,7 +2238,8 @@ pub mod test {
|
|||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
|
@ -2873,7 +2918,8 @@ pub mod test {
|
|||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::SameFork
|
||||
);
|
||||
|
@ -2887,7 +2933,8 @@ pub mod test {
|
|||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
|
@ -2902,7 +2949,8 @@ pub mod test {
|
|||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::SwitchProof(Hash::default())
|
||||
);
|
||||
|
@ -2972,7 +3020,8 @@ pub mod test {
|
|||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
|
@ -2987,7 +3036,8 @@ pub mod test {
|
|||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
|
@ -3002,7 +3052,8 @@ pub mod test {
|
|||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::SwitchProof(Hash::default())
|
||||
);
|
||||
|
|
|
@ -30,7 +30,17 @@ const MAX_ROOT_PRINT_SECONDS: u64 = 30;
|
|||
enum UpdateLabel {
|
||||
Aggregate,
|
||||
Add,
|
||||
MarkValid,
|
||||
|
||||
// Notify a fork in the tree that a particular slot in that fork is now
|
||||
// marked as valid. If there are multiple MarkValid operations for
|
||||
// a single node, should apply the one with the smaller slot first (hence
|
||||
// why the actual slot is included here).
|
||||
MarkValid(Slot),
|
||||
// Notify a fork in the tree that a particular slot in that fork is now
|
||||
// marked as invalid. If there are multiple MarkInvalid operations for
|
||||
// a single node, should apply the one with the smaller slot first (hence
|
||||
// why the actual slot is included here).
|
||||
MarkInvalid(Slot),
|
||||
Subtract,
|
||||
}
|
||||
|
||||
|
@ -53,7 +63,10 @@ impl GetSlotHash for Slot {
|
|||
#[derive(PartialEq, Eq, Clone, Debug)]
|
||||
enum UpdateOperation {
|
||||
Add(u64),
|
||||
MarkValid,
|
||||
MarkValid(Slot),
|
||||
// Notify a fork in the tree that a particular slot in that fork is now
|
||||
// marked as invalid.
|
||||
MarkInvalid(Slot),
|
||||
Subtract(u64),
|
||||
Aggregate,
|
||||
}
|
||||
|
@ -63,7 +76,8 @@ impl UpdateOperation {
|
|||
match self {
|
||||
Self::Aggregate => panic!("Should not get here"),
|
||||
Self::Add(stake) => *stake += new_stake,
|
||||
Self::MarkValid => panic!("Should not get here"),
|
||||
Self::MarkValid(_slot) => panic!("Should not get here"),
|
||||
Self::MarkInvalid(_slot) => panic!("Should not get here"),
|
||||
Self::Subtract(stake) => *stake += new_stake,
|
||||
}
|
||||
}
|
||||
|
@ -80,9 +94,68 @@ struct ForkInfo {
|
|||
best_slot: SlotHashKey,
|
||||
parent: Option<SlotHashKey>,
|
||||
children: Vec<SlotHashKey>,
|
||||
// Whether the fork rooted at this slot is a valid contender
|
||||
// for the best fork
|
||||
is_candidate: bool,
|
||||
// The latest ancestor of this node that has been marked invalid. If the slot
|
||||
// itself is a duplicate, this is set to the slot itself.
|
||||
latest_invalid_ancestor: Option<Slot>,
|
||||
// Set to true if this slot or a child node was duplicate confirmed.
|
||||
is_duplicate_confirmed: bool,
|
||||
}
|
||||
|
||||
impl ForkInfo {
|
||||
/// Returns if this node has been explicitly marked as a duplicate
|
||||
/// slot
|
||||
fn is_unconfirmed_duplicate(&self, my_slot: Slot) -> bool {
|
||||
self.latest_invalid_ancestor
|
||||
.map(|ancestor| ancestor == my_slot)
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Returns if the fork rooted at this node is included in fork choice
|
||||
fn is_candidate(&self) -> bool {
|
||||
self.latest_invalid_ancestor.is_none()
|
||||
}
|
||||
|
||||
fn is_duplicate_confirmed(&self) -> bool {
|
||||
self.is_duplicate_confirmed
|
||||
}
|
||||
|
||||
fn set_duplicate_confirmed(&mut self) {
|
||||
self.is_duplicate_confirmed = true;
|
||||
self.latest_invalid_ancestor = None;
|
||||
}
|
||||
|
||||
fn update_with_newly_valid_ancestor(
|
||||
&mut self,
|
||||
my_key: &SlotHashKey,
|
||||
newly_valid_ancestor: Slot,
|
||||
) {
|
||||
if let Some(latest_invalid_ancestor) = self.latest_invalid_ancestor {
|
||||
if latest_invalid_ancestor <= newly_valid_ancestor {
|
||||
info!("Fork choice for {:?} clearing latest invalid ancestor {:?} because {:?} was duplicate confirmed", my_key, latest_invalid_ancestor, newly_valid_ancestor);
|
||||
self.latest_invalid_ancestor = None;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn update_with_newly_invalid_ancestor(
|
||||
&mut self,
|
||||
my_key: &SlotHashKey,
|
||||
newly_invalid_ancestor: Slot,
|
||||
) {
|
||||
// Should not be marking a duplicate confirmed slot as invalid
|
||||
assert!(!self.is_duplicate_confirmed);
|
||||
if self
|
||||
.latest_invalid_ancestor
|
||||
.map(|latest_invalid_ancestor| newly_invalid_ancestor > latest_invalid_ancestor)
|
||||
.unwrap_or(true)
|
||||
{
|
||||
info!(
|
||||
"Fork choice for {:?} setting latest invalid ancestor from {:?} to {}",
|
||||
my_key, self.latest_invalid_ancestor, newly_invalid_ancestor
|
||||
);
|
||||
self.latest_invalid_ancestor = Some(newly_invalid_ancestor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct HeaviestSubtreeForkChoice {
|
||||
|
@ -182,12 +255,6 @@ impl HeaviestSubtreeForkChoice {
|
|||
.map(|fork_info| fork_info.stake_voted_subtree)
|
||||
}
|
||||
|
||||
pub fn is_candidate_slot(&self, key: &SlotHashKey) -> Option<bool> {
|
||||
self.fork_infos
|
||||
.get(key)
|
||||
.map(|fork_info| fork_info.is_candidate)
|
||||
}
|
||||
|
||||
pub fn root(&self) -> SlotHashKey {
|
||||
self.root
|
||||
}
|
||||
|
@ -252,35 +319,41 @@ impl HeaviestSubtreeForkChoice {
|
|||
best_slot: root_info.best_slot,
|
||||
children: vec![self.root],
|
||||
parent: None,
|
||||
is_candidate: true,
|
||||
latest_invalid_ancestor: None,
|
||||
is_duplicate_confirmed: root_info.is_duplicate_confirmed,
|
||||
};
|
||||
self.fork_infos.insert(root_parent, root_parent_info);
|
||||
self.root = root_parent;
|
||||
}
|
||||
|
||||
pub fn add_new_leaf_slot(&mut self, slot: SlotHashKey, parent: Option<SlotHashKey>) {
|
||||
pub fn add_new_leaf_slot(&mut self, slot_hash_key: SlotHashKey, parent: Option<SlotHashKey>) {
|
||||
if self.last_root_time.elapsed().as_secs() > MAX_ROOT_PRINT_SECONDS {
|
||||
self.print_state();
|
||||
self.last_root_time = Instant::now();
|
||||
}
|
||||
|
||||
if self.fork_infos.contains_key(&slot) {
|
||||
if self.fork_infos.contains_key(&slot_hash_key) {
|
||||
// Can potentially happen if we repair the same version of the duplicate slot, after
|
||||
// dumping the original version
|
||||
return;
|
||||
}
|
||||
|
||||
let parent_latest_invalid_ancestor =
|
||||
parent.and_then(|parent| self.latest_invalid_ancestor(&parent));
|
||||
self.fork_infos
|
||||
.entry(slot)
|
||||
.and_modify(|slot_info| slot_info.parent = parent)
|
||||
.entry(slot_hash_key)
|
||||
.and_modify(|fork_info| fork_info.parent = parent)
|
||||
.or_insert(ForkInfo {
|
||||
stake_voted_at: 0,
|
||||
stake_voted_subtree: 0,
|
||||
// The `best_slot` of a leaf is itself
|
||||
best_slot: slot,
|
||||
best_slot: slot_hash_key,
|
||||
children: vec![],
|
||||
parent,
|
||||
is_candidate: true,
|
||||
latest_invalid_ancestor: parent_latest_invalid_ancestor,
|
||||
// If the parent is none, then this is the root, which implies this must
|
||||
// have reached the duplicate confirmed threshold
|
||||
is_duplicate_confirmed: parent.is_none(),
|
||||
});
|
||||
|
||||
if parent.is_none() {
|
||||
|
@ -294,11 +367,11 @@ impl HeaviestSubtreeForkChoice {
|
|||
.get_mut(&parent)
|
||||
.unwrap()
|
||||
.children
|
||||
.push(slot);
|
||||
.push(slot_hash_key);
|
||||
|
||||
// Propagate leaf up the tree to any ancestors who considered the previous leaf
|
||||
// the `best_slot`
|
||||
self.propagate_new_leaf(&slot, &parent)
|
||||
self.propagate_new_leaf(&slot_hash_key, &parent)
|
||||
}
|
||||
|
||||
// Returns if the given `maybe_best_child` is the heaviest among the children
|
||||
|
@ -316,10 +389,7 @@ impl HeaviestSubtreeForkChoice {
|
|||
.expect("child must exist in `self.fork_infos`");
|
||||
|
||||
// Don't count children currently marked as invalid
|
||||
if !self
|
||||
.is_candidate_slot(child)
|
||||
.expect("child must exist in tree")
|
||||
{
|
||||
if !self.is_candidate(child).expect("child must exist in tree") {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -378,6 +448,34 @@ impl HeaviestSubtreeForkChoice {
|
|||
.map(|fork_info| fork_info.stake_voted_at)
|
||||
}
|
||||
|
||||
pub fn latest_invalid_ancestor(&self, slot_hash_key: &SlotHashKey) -> Option<Slot> {
|
||||
self.fork_infos
|
||||
.get(slot_hash_key)
|
||||
.map(|fork_info| fork_info.latest_invalid_ancestor)
|
||||
.unwrap_or(None)
|
||||
}
|
||||
|
||||
pub fn is_duplicate_confirmed(&self, slot_hash_key: &SlotHashKey) -> Option<bool> {
|
||||
self.fork_infos
|
||||
.get(&slot_hash_key)
|
||||
.map(|fork_info| fork_info.is_duplicate_confirmed())
|
||||
}
|
||||
|
||||
/// Returns if the exact node with the specified key has been explicitly marked as a duplicate
|
||||
/// slot (doesn't count ancestors being marked as duplicate).
|
||||
pub fn is_unconfirmed_duplicate(&self, slot_hash_key: &SlotHashKey) -> Option<bool> {
|
||||
self.fork_infos
|
||||
.get(slot_hash_key)
|
||||
.map(|fork_info| fork_info.is_unconfirmed_duplicate(slot_hash_key.0))
|
||||
}
|
||||
|
||||
/// Returns false if the node or any of its ancestors have been marked as duplicate
|
||||
pub fn is_candidate(&self, slot_hash_key: &SlotHashKey) -> Option<bool> {
|
||||
self.fork_infos
|
||||
.get(&slot_hash_key)
|
||||
.map(|fork_info| fork_info.is_candidate())
|
||||
}
|
||||
|
||||
fn propagate_new_leaf(
|
||||
&mut self,
|
||||
slot_hash_key: &SlotHashKey,
|
||||
|
@ -406,42 +504,69 @@ impl HeaviestSubtreeForkChoice {
|
|||
}
|
||||
}
|
||||
|
||||
fn insert_mark_valid_aggregate_operations(
|
||||
&self,
|
||||
update_operations: &mut BTreeMap<(SlotHashKey, UpdateLabel), UpdateOperation>,
|
||||
slot_hash_key: SlotHashKey,
|
||||
) {
|
||||
self.do_insert_aggregate_operations(update_operations, true, slot_hash_key);
|
||||
}
|
||||
|
||||
fn insert_aggregate_operations(
|
||||
&self,
|
||||
update_operations: &mut BTreeMap<(SlotHashKey, UpdateLabel), UpdateOperation>,
|
||||
slot_hash_key: SlotHashKey,
|
||||
) {
|
||||
self.do_insert_aggregate_operations(update_operations, false, slot_hash_key);
|
||||
self.do_insert_aggregate_operations_across_ancestors(
|
||||
update_operations,
|
||||
None,
|
||||
slot_hash_key,
|
||||
);
|
||||
}
|
||||
|
||||
#[allow(clippy::map_entry)]
|
||||
fn do_insert_aggregate_operations(
|
||||
fn do_insert_aggregate_operations_across_ancestors(
|
||||
&self,
|
||||
update_operations: &mut BTreeMap<(SlotHashKey, UpdateLabel), UpdateOperation>,
|
||||
should_mark_valid: bool,
|
||||
modify_fork_validity: Option<UpdateOperation>,
|
||||
slot_hash_key: SlotHashKey,
|
||||
) {
|
||||
for parent_slot_hash_key in self.ancestor_iterator(slot_hash_key) {
|
||||
let aggregate_label = (parent_slot_hash_key, UpdateLabel::Aggregate);
|
||||
if update_operations.contains_key(&aggregate_label) {
|
||||
if !self.do_insert_aggregate_operation(
|
||||
update_operations,
|
||||
&modify_fork_validity,
|
||||
parent_slot_hash_key,
|
||||
) {
|
||||
// If this parent was already inserted, we assume all the other parents have also
|
||||
// already been inserted. This is to prevent iterating over the parents multiple times
|
||||
// when we are aggregating leaves that have a lot of shared ancestors
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::map_entry)]
|
||||
fn do_insert_aggregate_operation(
|
||||
&self,
|
||||
update_operations: &mut BTreeMap<(SlotHashKey, UpdateLabel), UpdateOperation>,
|
||||
modify_fork_validity: &Option<UpdateOperation>,
|
||||
slot_hash_key: SlotHashKey,
|
||||
) -> bool {
|
||||
let aggregate_label = (slot_hash_key, UpdateLabel::Aggregate);
|
||||
if update_operations.contains_key(&aggregate_label) {
|
||||
false
|
||||
} else {
|
||||
if should_mark_valid {
|
||||
if let Some(mark_fork_validity) = modify_fork_validity {
|
||||
match mark_fork_validity {
|
||||
UpdateOperation::MarkValid(slot) => {
|
||||
update_operations.insert(
|
||||
(parent_slot_hash_key, UpdateLabel::MarkValid),
|
||||
UpdateOperation::MarkValid,
|
||||
(slot_hash_key, UpdateLabel::MarkValid(*slot)),
|
||||
UpdateOperation::MarkValid(*slot),
|
||||
);
|
||||
}
|
||||
update_operations.insert(aggregate_label, UpdateOperation::Aggregate);
|
||||
UpdateOperation::MarkInvalid(slot) => {
|
||||
update_operations.insert(
|
||||
(slot_hash_key, UpdateLabel::MarkInvalid(*slot)),
|
||||
UpdateOperation::MarkInvalid(*slot),
|
||||
);
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
update_operations.insert(aggregate_label, UpdateOperation::Aggregate);
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -452,12 +577,18 @@ impl HeaviestSubtreeForkChoice {
|
|||
fn aggregate_slot(&mut self, slot_hash_key: SlotHashKey) {
|
||||
let mut stake_voted_subtree;
|
||||
let mut best_slot_hash_key = slot_hash_key;
|
||||
let mut is_duplicate_confirmed = false;
|
||||
if let Some(fork_info) = self.fork_infos.get(&slot_hash_key) {
|
||||
stake_voted_subtree = fork_info.stake_voted_at;
|
||||
let mut best_child_stake_voted_subtree = 0;
|
||||
let mut best_child_slot = slot_hash_key;
|
||||
for child in &fork_info.children {
|
||||
let child_stake_voted_subtree = self.stake_voted_subtree(child).unwrap();
|
||||
let mut best_child_slot_key = slot_hash_key;
|
||||
for child_key in &fork_info.children {
|
||||
let child_fork_info = self
|
||||
.fork_infos
|
||||
.get(&child_key)
|
||||
.expect("Child must exist in fork_info map");
|
||||
let child_stake_voted_subtree = child_fork_info.stake_voted_subtree;
|
||||
is_duplicate_confirmed |= child_fork_info.is_duplicate_confirmed;
|
||||
|
||||
// Child forks that are not candidates still contribute to the weight
|
||||
// of the subtree rooted at `slot_hash_key`. For instance:
|
||||
|
@ -482,19 +613,15 @@ impl HeaviestSubtreeForkChoice {
|
|||
|
||||
// Note: If there's no valid children, then the best slot should default to the
|
||||
// input `slot` itself.
|
||||
if self
|
||||
.is_candidate_slot(child)
|
||||
.expect("Child must exist in fork_info map")
|
||||
&& (best_child_slot == slot_hash_key ||
|
||||
if child_fork_info.is_candidate()
|
||||
&& (best_child_slot_key == slot_hash_key ||
|
||||
child_stake_voted_subtree > best_child_stake_voted_subtree ||
|
||||
// tiebreaker by slot height, prioritize earlier slot
|
||||
(child_stake_voted_subtree == best_child_stake_voted_subtree && child < &best_child_slot))
|
||||
(child_stake_voted_subtree == best_child_stake_voted_subtree && child_key < &best_child_slot_key))
|
||||
{
|
||||
best_child_stake_voted_subtree = child_stake_voted_subtree;
|
||||
best_child_slot = *child;
|
||||
best_slot_hash_key = self
|
||||
.best_slot(child)
|
||||
.expect("`child` must exist in `self.fork_infos`");
|
||||
best_child_slot_key = *child_key;
|
||||
best_slot_hash_key = child_fork_info.best_slot;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -502,19 +629,38 @@ impl HeaviestSubtreeForkChoice {
|
|||
}
|
||||
|
||||
let fork_info = self.fork_infos.get_mut(&slot_hash_key).unwrap();
|
||||
if is_duplicate_confirmed {
|
||||
if !fork_info.is_duplicate_confirmed {
|
||||
info!(
|
||||
"Fork choice setting {:?} to duplicate confirmed",
|
||||
slot_hash_key
|
||||
);
|
||||
}
|
||||
fork_info.set_duplicate_confirmed();
|
||||
}
|
||||
fork_info.stake_voted_subtree = stake_voted_subtree;
|
||||
fork_info.best_slot = best_slot_hash_key;
|
||||
}
|
||||
|
||||
fn mark_slot_valid(&mut self, valid_slot_hash_key: (Slot, Hash)) {
|
||||
if let Some(fork_info) = self.fork_infos.get_mut(&valid_slot_hash_key) {
|
||||
if !fork_info.is_candidate {
|
||||
info!(
|
||||
"marked previously invalid fork starting at slot: {:?} as valid",
|
||||
valid_slot_hash_key
|
||||
);
|
||||
/// Mark that `valid_slot` on the fork starting at `fork_to_modify` has been marked
|
||||
/// valid. Note we don't need the hash for `valid_slot` because slot number uniquely
|
||||
/// identifies a node on a single fork.
|
||||
fn mark_fork_valid(&mut self, fork_to_modify_key: SlotHashKey, valid_slot: Slot) {
|
||||
if let Some(fork_info_to_modify) = self.fork_infos.get_mut(&fork_to_modify_key) {
|
||||
fork_info_to_modify.update_with_newly_valid_ancestor(&fork_to_modify_key, valid_slot);
|
||||
if fork_to_modify_key.0 == valid_slot {
|
||||
fork_info_to_modify.is_duplicate_confirmed = true;
|
||||
}
|
||||
fork_info.is_candidate = true;
|
||||
}
|
||||
}
|
||||
|
||||
/// Mark that `invalid_slot` on the fork starting at `fork_to_modify` has been marked
|
||||
/// invalid. Note we don't need the hash for `invalid_slot` because slot number uniquely
|
||||
/// identifies a node on a single fork.
|
||||
fn mark_fork_invalid(&mut self, fork_to_modify_key: SlotHashKey, invalid_slot: Slot) {
|
||||
if let Some(fork_info_to_modify) = self.fork_infos.get_mut(&fork_to_modify_key) {
|
||||
fork_info_to_modify
|
||||
.update_with_newly_invalid_ancestor(&fork_to_modify_key, invalid_slot);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -641,7 +787,12 @@ impl HeaviestSubtreeForkChoice {
|
|||
// Iterate through the update operations from greatest to smallest slot
|
||||
for ((slot_hash_key, _), operation) in update_operations.into_iter().rev() {
|
||||
match operation {
|
||||
UpdateOperation::MarkValid => self.mark_slot_valid(slot_hash_key),
|
||||
UpdateOperation::MarkValid(valid_slot) => {
|
||||
self.mark_fork_valid(slot_hash_key, valid_slot)
|
||||
}
|
||||
UpdateOperation::MarkInvalid(invalid_slot) => {
|
||||
self.mark_fork_invalid(slot_hash_key, invalid_slot)
|
||||
}
|
||||
UpdateOperation::Aggregate => self.aggregate_slot(slot_hash_key),
|
||||
UpdateOperation::Add(stake) => self.add_slot_stake(&slot_hash_key, stake),
|
||||
UpdateOperation::Subtract(stake) => self.subtract_slot_stake(&slot_hash_key, stake),
|
||||
|
@ -810,35 +961,48 @@ impl ForkChoice for HeaviestSubtreeForkChoice {
|
|||
|
||||
fn mark_fork_invalid_candidate(&mut self, invalid_slot_hash_key: &SlotHashKey) {
|
||||
info!(
|
||||
"marking fork starting at slot: {:?} invalid candidate",
|
||||
"marking fork starting at: {:?} invalid candidate",
|
||||
invalid_slot_hash_key
|
||||
);
|
||||
let fork_info = self.fork_infos.get_mut(invalid_slot_hash_key);
|
||||
if let Some(fork_info) = fork_info {
|
||||
if fork_info.is_candidate {
|
||||
fork_info.is_candidate = false;
|
||||
// Aggregate to find the new best slots excluding this fork
|
||||
// Should not be marking duplicate confirmed blocks as invalid candidates
|
||||
assert!(!fork_info.is_duplicate_confirmed);
|
||||
let mut update_operations = UpdateOperations::default();
|
||||
|
||||
// Notify all the children of this node that a parent was marked as invalid
|
||||
for child_hash_key in self.subtree_diff(*invalid_slot_hash_key, SlotHashKey::default())
|
||||
{
|
||||
self.do_insert_aggregate_operation(
|
||||
&mut update_operations,
|
||||
&Some(UpdateOperation::MarkInvalid(invalid_slot_hash_key.0)),
|
||||
child_hash_key,
|
||||
);
|
||||
}
|
||||
|
||||
// Aggregate across all ancestors to find the new best slots excluding this fork
|
||||
self.insert_aggregate_operations(&mut update_operations, *invalid_slot_hash_key);
|
||||
self.process_update_operations(update_operations);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn mark_fork_valid_candidate(&mut self, valid_slot_hash_key: &SlotHashKey) {
|
||||
info!(
|
||||
"marking fork starting at: {:?} valid candidate",
|
||||
valid_slot_hash_key
|
||||
);
|
||||
let mut update_operations = UpdateOperations::default();
|
||||
let fork_info = self.fork_infos.get_mut(valid_slot_hash_key);
|
||||
if let Some(fork_info) = fork_info {
|
||||
// If a bunch of slots on the same fork are confirmed at once, then only the latest
|
||||
// slot will incur this aggregation operation
|
||||
fork_info.is_candidate = true;
|
||||
self.insert_mark_valid_aggregate_operations(
|
||||
// Notify all the children of this node that a parent was marked as valid
|
||||
for child_hash_key in self.subtree_diff(*valid_slot_hash_key, SlotHashKey::default()) {
|
||||
self.do_insert_aggregate_operation(
|
||||
&mut update_operations,
|
||||
*valid_slot_hash_key,
|
||||
&Some(UpdateOperation::MarkValid(valid_slot_hash_key.0)),
|
||||
child_hash_key,
|
||||
);
|
||||
}
|
||||
|
||||
// Aggregate to find the new best slots including this fork
|
||||
// Aggregate across all ancestors to find the new best slots including this fork
|
||||
self.insert_aggregate_operations(&mut update_operations, *valid_slot_hash_key);
|
||||
self.process_update_operations(update_operations);
|
||||
}
|
||||
}
|
||||
|
@ -2740,34 +2904,50 @@ mod test {
|
|||
(expected_best_slot, Hash::default()),
|
||||
);
|
||||
|
||||
// Mark slot 5 as invalid, the best fork should be its ancestor 3,
|
||||
// not the other for at 4.
|
||||
let invalid_candidate = (5, Hash::default());
|
||||
// Simulate a vote on slot 5
|
||||
let last_voted_slot_hash = (5, Hash::default());
|
||||
let mut tower = Tower::new_for_tests(10, 0.9);
|
||||
tower.record_vote(last_voted_slot_hash.0, last_voted_slot_hash.1);
|
||||
|
||||
// The heaviest_slot_on_same_voted_fork() should be 6, descended from 5.
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.heaviest_slot_on_same_voted_fork(&tower)
|
||||
.unwrap(),
|
||||
(6, Hash::default())
|
||||
);
|
||||
|
||||
// Mark slot 5 as invalid
|
||||
let invalid_candidate = last_voted_slot_hash;
|
||||
heaviest_subtree_fork_choice.mark_fork_invalid_candidate(&invalid_candidate);
|
||||
assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 3);
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_candidate_slot(&invalid_candidate)
|
||||
.is_candidate(&invalid_candidate)
|
||||
.unwrap());
|
||||
|
||||
// The ancestor is still a candidate
|
||||
// The ancestor 3 is still a candidate
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_candidate_slot(&(3, Hash::default()))
|
||||
.is_candidate(&(3, Hash::default()))
|
||||
.unwrap());
|
||||
|
||||
// The best fork should be its ancestor 3, not the other fork at 4.
|
||||
assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 3);
|
||||
|
||||
// After marking the last vote in the tower as invalid, `heaviest_slot_on_same_voted_fork()`
|
||||
// should disregard all descendants of that invalid vote
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.heaviest_slot_on_same_voted_fork(&tower)
|
||||
.is_none());
|
||||
|
||||
// Adding another descendant to the invalid candidate won't
|
||||
// update the best slot, even if it contains votes
|
||||
let new_leaf_slot7 = 7;
|
||||
heaviest_subtree_fork_choice.add_new_leaf_slot(
|
||||
(new_leaf_slot7, Hash::default()),
|
||||
Some((6, Hash::default())),
|
||||
);
|
||||
let new_leaf7 = (7, Hash::default());
|
||||
heaviest_subtree_fork_choice.add_new_leaf_slot(new_leaf7, Some((6, Hash::default())));
|
||||
let invalid_slot_ancestor = 3;
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot().0,
|
||||
invalid_slot_ancestor
|
||||
);
|
||||
let pubkey_votes: Vec<(Pubkey, SlotHashKey)> =
|
||||
vec![(vote_pubkeys[0], (new_leaf_slot7, Hash::default()))];
|
||||
let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = vec![(vote_pubkeys[0], new_leaf7)];
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.add_votes(
|
||||
pubkey_votes.iter(),
|
||||
|
@ -2777,34 +2957,51 @@ mod test {
|
|||
(invalid_slot_ancestor, Hash::default()),
|
||||
);
|
||||
|
||||
// This shouldn't update the `heaviest_slot_on_same_voted_fork` either
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.heaviest_slot_on_same_voted_fork(&tower)
|
||||
.is_none());
|
||||
|
||||
// Adding a descendant to the ancestor of the invalid candidate *should* update
|
||||
// the best slot though, since the ancestor is on the heaviest fork
|
||||
let new_leaf_slot8 = 8;
|
||||
heaviest_subtree_fork_choice.add_new_leaf_slot(
|
||||
(new_leaf_slot8, Hash::default()),
|
||||
Some((invalid_slot_ancestor, Hash::default())),
|
||||
);
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot().0,
|
||||
new_leaf_slot8,
|
||||
);
|
||||
let new_leaf8 = (8, Hash::default());
|
||||
heaviest_subtree_fork_choice
|
||||
.add_new_leaf_slot(new_leaf8, Some((invalid_slot_ancestor, Hash::default())));
|
||||
assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), new_leaf8,);
|
||||
// Should not update the `heaviest_slot_on_same_voted_fork` because the new leaf
|
||||
// is not descended from the last vote
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.heaviest_slot_on_same_voted_fork(&tower)
|
||||
.is_none());
|
||||
|
||||
// If we mark slot a descendant of `invalid_candidate` as valid, then that
|
||||
// should also mark `invalid_candidate` as valid, and the best slot should
|
||||
// be the leaf of the heaviest fork, `new_leaf_slot`.
|
||||
heaviest_subtree_fork_choice.mark_fork_valid_candidate(&invalid_candidate);
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_candidate_slot(&invalid_candidate)
|
||||
.is_candidate(&invalid_candidate)
|
||||
.unwrap());
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot().0,
|
||||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
// Should pick the smaller slot of the two new equally weighted leaves
|
||||
new_leaf_slot7
|
||||
new_leaf7
|
||||
);
|
||||
// Should update the `heaviest_slot_on_same_voted_fork` as well
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.heaviest_slot_on_same_voted_fork(&tower)
|
||||
.unwrap(),
|
||||
new_leaf7
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mark_valid_invalid_forks_duplicate() {
|
||||
fn setup_mark_invalid_forks_duplicate_tests() -> (
|
||||
HeaviestSubtreeForkChoice,
|
||||
Vec<SlotHashKey>,
|
||||
SlotHashKey,
|
||||
Bank,
|
||||
Vec<Pubkey>,
|
||||
) {
|
||||
let (
|
||||
mut heaviest_subtree_fork_choice,
|
||||
duplicate_leaves_descended_from_4,
|
||||
|
@ -2832,11 +3029,27 @@ mod test {
|
|||
// the other branch at slot 5
|
||||
let invalid_candidate = (4, Hash::default());
|
||||
heaviest_subtree_fork_choice.mark_fork_invalid_candidate(&invalid_candidate);
|
||||
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
(2, Hash::default())
|
||||
);
|
||||
(
|
||||
heaviest_subtree_fork_choice,
|
||||
duplicate_leaves_descended_from_4,
|
||||
invalid_candidate,
|
||||
bank,
|
||||
vote_pubkeys,
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mark_invalid_then_valid_duplicate() {
|
||||
let (
|
||||
mut heaviest_subtree_fork_choice,
|
||||
duplicate_leaves_descended_from_4,
|
||||
invalid_candidate,
|
||||
..,
|
||||
) = setup_mark_invalid_forks_duplicate_tests();
|
||||
|
||||
// Marking candidate as valid again will choose the the heaviest leaf of
|
||||
// the newly valid branch
|
||||
|
@ -2851,16 +3064,26 @@ mod test {
|
|||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
duplicate_descendant
|
||||
);
|
||||
}
|
||||
|
||||
// Mark the current heaviest branch as invalid again
|
||||
heaviest_subtree_fork_choice.mark_fork_invalid_candidate(&invalid_candidate);
|
||||
#[test]
|
||||
fn test_mark_invalid_then_add_new_heavier_duplicate_slot() {
|
||||
let (
|
||||
mut heaviest_subtree_fork_choice,
|
||||
duplicate_leaves_descended_from_4,
|
||||
_invalid_candidate,
|
||||
bank,
|
||||
vote_pubkeys,
|
||||
) = setup_mark_invalid_forks_duplicate_tests();
|
||||
|
||||
// If we add a new version of the duplicate slot that is not descended from the invalid
|
||||
// candidate and votes for that duplicate slot, the new duplicate slot should be picked
|
||||
// once it has more weight
|
||||
let new_duplicate_hash = Hash::default();
|
||||
|
||||
// The hash has to be smaller in order for the votes to be counted
|
||||
assert!(new_duplicate_hash < duplicate_leaves_descended_from_4[0].1);
|
||||
let duplicate_slot = duplicate_leaves_descended_from_4[0].0;
|
||||
let new_duplicate = (duplicate_slot, new_duplicate_hash);
|
||||
heaviest_subtree_fork_choice.add_new_leaf_slot(new_duplicate, Some((3, Hash::default())));
|
||||
|
||||
|
@ -2881,6 +3104,285 @@ mod test {
|
|||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mark_valid_then_descendant_invalid() {
|
||||
let forks = tr(0) / (tr(1) / (tr(2) / (tr(3) / (tr(4) / (tr(5) / tr(6))))));
|
||||
let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new_from_tree(forks);
|
||||
let duplicate_confirmed_slot: Slot = 1;
|
||||
let duplicate_confirmed_key = duplicate_confirmed_slot.slot_hash();
|
||||
heaviest_subtree_fork_choice.mark_fork_valid_candidate(&duplicate_confirmed_key);
|
||||
|
||||
for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() {
|
||||
let slot = slot_hash_key.0;
|
||||
if slot <= duplicate_confirmed_slot {
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&slot_hash_key)
|
||||
.unwrap());
|
||||
} else {
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&slot_hash_key)
|
||||
.unwrap());
|
||||
}
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.is_none());
|
||||
}
|
||||
|
||||
// Mark a later descendant invalid
|
||||
let invalid_descendant_slot = 5;
|
||||
let invalid_descendant_key = invalid_descendant_slot.slot_hash();
|
||||
heaviest_subtree_fork_choice.mark_fork_invalid_candidate(&invalid_descendant_key);
|
||||
for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() {
|
||||
let slot = slot_hash_key.0;
|
||||
if slot <= duplicate_confirmed_slot {
|
||||
// All ancestors of the duplicate confirmed slot should:
|
||||
// 1) Be duplicate confirmed
|
||||
// 2) Have no invalid ancestors
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&slot_hash_key)
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.is_none());
|
||||
} else if slot >= invalid_descendant_slot {
|
||||
// Anything descended from the invalid slot should:
|
||||
// 1) Not be duplicate confirmed
|
||||
// 2) Should have an invalid ancestor == `invalid_descendant_slot`
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&slot_hash_key)
|
||||
.unwrap());
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.unwrap(),
|
||||
invalid_descendant_slot
|
||||
);
|
||||
} else {
|
||||
// Anything in between the duplicate confirmed slot and the invalid slot should:
|
||||
// 1) Not be duplicate confirmed
|
||||
// 2) Should not have an invalid ancestor
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&slot_hash_key)
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
// Mark later descendant `d` duplicate confirmed where `duplicate_confirmed_slot < d < invalid_descendant_slot`.
|
||||
let later_duplicate_confirmed_slot = 4;
|
||||
assert!(
|
||||
later_duplicate_confirmed_slot > duplicate_confirmed_slot
|
||||
&& later_duplicate_confirmed_slot < invalid_descendant_slot
|
||||
);
|
||||
let later_duplicate_confirmed_key = later_duplicate_confirmed_slot.slot_hash();
|
||||
heaviest_subtree_fork_choice.mark_fork_valid_candidate(&later_duplicate_confirmed_key);
|
||||
|
||||
for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() {
|
||||
let slot = slot_hash_key.0;
|
||||
if slot <= later_duplicate_confirmed_slot {
|
||||
// All ancestors of the later_duplicate_confirmed_slot should:
|
||||
// 1) Be duplicate confirmed
|
||||
// 2) Have no invalid ancestors
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&slot_hash_key)
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.is_none());
|
||||
} else if slot >= invalid_descendant_slot {
|
||||
// Anything descended from the invalid slot should:
|
||||
// 1) Not be duplicate confirmed
|
||||
// 2) Should have an invalid ancestor == `invalid_descendant_slot`
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&slot_hash_key)
|
||||
.unwrap());
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.unwrap(),
|
||||
invalid_descendant_slot
|
||||
);
|
||||
} else {
|
||||
// Anything in between the duplicate confirmed slot and the invalid slot should:
|
||||
// 1) Not be duplicate confirmed
|
||||
// 2) Should not have an invalid ancestor
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&slot_hash_key)
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
// Mark all slots duplicate confirmed
|
||||
let last_duplicate_confirmed_slot = 6;
|
||||
let last_duplicate_confirmed_key = last_duplicate_confirmed_slot.slot_hash();
|
||||
heaviest_subtree_fork_choice.mark_fork_valid_candidate(&last_duplicate_confirmed_key);
|
||||
for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() {
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&slot_hash_key)
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_mark_valid_then_ancestor_invalid() {
|
||||
let forks = tr(0) / (tr(1) / (tr(2) / (tr(3) / (tr(4) / (tr(5) / tr(6))))));
|
||||
let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new_from_tree(forks);
|
||||
let duplicate_confirmed_slot: Slot = 4;
|
||||
let duplicate_confirmed_key = duplicate_confirmed_slot.slot_hash();
|
||||
heaviest_subtree_fork_choice.mark_fork_valid_candidate(&duplicate_confirmed_key);
|
||||
|
||||
// Now mark an ancestor of this fork invalid, should panic since this ancestor
|
||||
// was duplicate confirmed by its descendant 4 already
|
||||
heaviest_subtree_fork_choice.mark_fork_invalid_candidate(&3.slot_hash());
|
||||
}
|
||||
|
||||
fn setup_set_unconfirmed_and_confirmed_duplicate_slot_tests(
|
||||
smaller_duplicate_slot: Slot,
|
||||
larger_duplicate_slot: Slot,
|
||||
) -> HeaviestSubtreeForkChoice {
|
||||
// Create simple fork 0 -> 1 -> 2 -> 3 -> 4 -> 5
|
||||
let forks = tr(0) / (tr(1) / (tr(2) / (tr(3) / (tr(4) / tr(5)))));
|
||||
let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new_from_tree(forks);
|
||||
|
||||
// Mark the slots as unconfirmed duplicates
|
||||
heaviest_subtree_fork_choice
|
||||
.mark_fork_invalid_candidate(&smaller_duplicate_slot.slot_hash());
|
||||
heaviest_subtree_fork_choice
|
||||
.mark_fork_invalid_candidate(&larger_duplicate_slot.slot_hash());
|
||||
|
||||
// Correctness checks
|
||||
for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() {
|
||||
let slot = slot_hash_key.0;
|
||||
if slot < smaller_duplicate_slot {
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.is_none());
|
||||
} else if slot < larger_duplicate_slot {
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.unwrap(),
|
||||
smaller_duplicate_slot
|
||||
);
|
||||
} else {
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.unwrap(),
|
||||
larger_duplicate_slot
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
heaviest_subtree_fork_choice
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_unconfirmed_duplicate_confirm_smaller_slot_first() {
|
||||
let smaller_duplicate_slot = 1;
|
||||
let larger_duplicate_slot = 4;
|
||||
let mut heaviest_subtree_fork_choice =
|
||||
setup_set_unconfirmed_and_confirmed_duplicate_slot_tests(
|
||||
smaller_duplicate_slot,
|
||||
larger_duplicate_slot,
|
||||
);
|
||||
|
||||
// Mark the smaller duplicate slot as confirmed
|
||||
heaviest_subtree_fork_choice.mark_fork_valid_candidate(&smaller_duplicate_slot.slot_hash());
|
||||
for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() {
|
||||
let slot = slot_hash_key.0;
|
||||
if slot < larger_duplicate_slot {
|
||||
// Only slots <= smaller_duplicate_slot have been duplicate confirmed
|
||||
if slot <= smaller_duplicate_slot {
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap());
|
||||
} else {
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap());
|
||||
}
|
||||
// The unconfirmed duplicate flag has been cleared on the smaller
|
||||
// descendants because their most recent duplicate ancestor has
|
||||
// been confirmed
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.is_none());
|
||||
} else {
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap(),);
|
||||
// The unconfirmed duplicate flag has not been cleared on the smaller
|
||||
// descendants because their most recent duplicate ancestor,
|
||||
// `larger_duplicate_slot` has not yet been confirmed
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.unwrap(),
|
||||
larger_duplicate_slot
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Mark the larger duplicate slot as confirmed, all slots should no longer
|
||||
// have any unconfirmed duplicate ancestors, and should be marked as duplciate confirmed
|
||||
heaviest_subtree_fork_choice.mark_fork_valid_candidate(&larger_duplicate_slot.slot_hash());
|
||||
for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() {
|
||||
let slot = slot_hash_key.0;
|
||||
// All slots <= the latest duplciate confirmed slot are ancestors of
|
||||
// that slot, so they should all be marked duplicate confirmed
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap(),
|
||||
slot <= larger_duplicate_slot
|
||||
);
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_unconfirmed_duplicate_confirm_larger_slot_first() {
|
||||
let smaller_duplicate_slot = 1;
|
||||
let larger_duplicate_slot = 4;
|
||||
let mut heaviest_subtree_fork_choice =
|
||||
setup_set_unconfirmed_and_confirmed_duplicate_slot_tests(
|
||||
smaller_duplicate_slot,
|
||||
larger_duplicate_slot,
|
||||
);
|
||||
|
||||
// Mark the larger duplicate slot as confirmed
|
||||
heaviest_subtree_fork_choice.mark_fork_valid_candidate(&larger_duplicate_slot.slot_hash());
|
||||
|
||||
// All slots should no longer have any unconfirmed duplicate ancestors
|
||||
heaviest_subtree_fork_choice.mark_fork_valid_candidate(&smaller_duplicate_slot.slot_hash());
|
||||
for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() {
|
||||
let slot = slot_hash_key.0;
|
||||
// All slots <= the latest duplciate confirmed slot are ancestors of
|
||||
// that slot, so they should all be marked duplicate confirmed
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap(),
|
||||
slot <= larger_duplicate_slot
|
||||
);
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
fn setup_forks() -> HeaviestSubtreeForkChoice {
|
||||
/*
|
||||
Build fork structure:
|
||||
|
|
|
@ -161,7 +161,6 @@ pub(crate) struct ForkProgress {
|
|||
pub(crate) propagated_stats: PropagatedStats,
|
||||
pub(crate) replay_stats: ReplaySlotStats,
|
||||
pub(crate) replay_progress: ConfirmationProgress,
|
||||
pub(crate) duplicate_stats: DuplicateStats,
|
||||
// Note `num_blocks_on_fork` and `num_dropped_blocks_on_fork` only
|
||||
// count new blocks replayed since last restart, which won't include
|
||||
// blocks already existing in the ledger/before snapshot at start,
|
||||
|
@ -174,7 +173,6 @@ impl ForkProgress {
|
|||
pub fn new(
|
||||
last_entry: Hash,
|
||||
prev_leader_slot: Option<Slot>,
|
||||
duplicate_stats: DuplicateStats,
|
||||
validator_stake_info: Option<ValidatorStakeInfo>,
|
||||
num_blocks_on_fork: u64,
|
||||
num_dropped_blocks_on_fork: u64,
|
||||
|
@ -208,7 +206,6 @@ impl ForkProgress {
|
|||
fork_stats: ForkStats::default(),
|
||||
replay_stats: ReplaySlotStats::default(),
|
||||
replay_progress: ConfirmationProgress::new(last_entry),
|
||||
duplicate_stats,
|
||||
num_blocks_on_fork,
|
||||
num_dropped_blocks_on_fork,
|
||||
propagated_stats: PropagatedStats {
|
||||
|
@ -228,7 +225,6 @@ impl ForkProgress {
|
|||
my_pubkey: &Pubkey,
|
||||
voting_pubkey: &Pubkey,
|
||||
prev_leader_slot: Option<Slot>,
|
||||
duplicate_stats: DuplicateStats,
|
||||
num_blocks_on_fork: u64,
|
||||
num_dropped_blocks_on_fork: u64,
|
||||
) -> Self {
|
||||
|
@ -247,20 +243,11 @@ impl ForkProgress {
|
|||
Self::new(
|
||||
bank.last_blockhash(),
|
||||
prev_leader_slot,
|
||||
duplicate_stats,
|
||||
validator_stake_info,
|
||||
num_blocks_on_fork,
|
||||
num_dropped_blocks_on_fork,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn is_duplicate_confirmed(&self) -> bool {
|
||||
self.duplicate_stats.is_duplicate_confirmed
|
||||
}
|
||||
|
||||
pub fn set_duplicate_confirmed(&mut self) {
|
||||
self.duplicate_stats.set_duplicate_confirmed();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
|
@ -295,38 +282,6 @@ pub(crate) struct PropagatedStats {
|
|||
pub(crate) total_epoch_stake: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub(crate) struct DuplicateStats {
|
||||
latest_unconfirmed_duplicate_ancestor: Option<Slot>,
|
||||
is_duplicate_confirmed: bool,
|
||||
}
|
||||
|
||||
impl DuplicateStats {
|
||||
pub fn new_with_unconfirmed_duplicate_ancestor(
|
||||
latest_unconfirmed_duplicate_ancestor: Option<Slot>,
|
||||
) -> Self {
|
||||
Self {
|
||||
latest_unconfirmed_duplicate_ancestor,
|
||||
is_duplicate_confirmed: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn set_duplicate_confirmed(&mut self) {
|
||||
self.is_duplicate_confirmed = true;
|
||||
self.latest_unconfirmed_duplicate_ancestor = None;
|
||||
}
|
||||
|
||||
fn update_with_newly_confirmed_duplicate_ancestor(&mut self, newly_confirmed_ancestor: Slot) {
|
||||
if let Some(latest_unconfirmed_duplicate_ancestor) =
|
||||
self.latest_unconfirmed_duplicate_ancestor
|
||||
{
|
||||
if latest_unconfirmed_duplicate_ancestor <= newly_confirmed_ancestor {
|
||||
self.latest_unconfirmed_duplicate_ancestor = None;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PropagatedStats {
|
||||
pub fn add_vote_pubkey(&mut self, vote_pubkey: Pubkey, stake: u64) {
|
||||
if self.propagated_validators.insert(vote_pubkey) {
|
||||
|
@ -458,102 +413,6 @@ impl ProgressMap {
|
|||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn is_unconfirmed_duplicate(&self, slot: Slot) -> Option<bool> {
|
||||
self.get(&slot).map(|p| {
|
||||
p.duplicate_stats
|
||||
.latest_unconfirmed_duplicate_ancestor
|
||||
.map(|ancestor| ancestor == slot)
|
||||
.unwrap_or(false)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn latest_unconfirmed_duplicate_ancestor(&self, slot: Slot) -> Option<Slot> {
|
||||
self.get(&slot)
|
||||
.map(|p| p.duplicate_stats.latest_unconfirmed_duplicate_ancestor)
|
||||
.unwrap_or(None)
|
||||
}
|
||||
|
||||
pub fn set_unconfirmed_duplicate_slot(&mut self, slot: Slot, descendants: &HashSet<u64>) {
|
||||
if let Some(fork_progress) = self.get_mut(&slot) {
|
||||
if fork_progress.is_duplicate_confirmed() {
|
||||
assert!(fork_progress
|
||||
.duplicate_stats
|
||||
.latest_unconfirmed_duplicate_ancestor
|
||||
.is_none());
|
||||
return;
|
||||
}
|
||||
|
||||
if fork_progress
|
||||
.duplicate_stats
|
||||
.latest_unconfirmed_duplicate_ancestor
|
||||
== Some(slot)
|
||||
{
|
||||
// Already been marked
|
||||
return;
|
||||
}
|
||||
fork_progress
|
||||
.duplicate_stats
|
||||
.latest_unconfirmed_duplicate_ancestor = Some(slot);
|
||||
|
||||
for d in descendants {
|
||||
if let Some(fork_progress) = self.get_mut(&d) {
|
||||
fork_progress
|
||||
.duplicate_stats
|
||||
.latest_unconfirmed_duplicate_ancestor = Some(std::cmp::max(
|
||||
fork_progress
|
||||
.duplicate_stats
|
||||
.latest_unconfirmed_duplicate_ancestor
|
||||
.unwrap_or(0),
|
||||
slot,
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_confirmed_duplicate_slot(
|
||||
&mut self,
|
||||
slot: Slot,
|
||||
ancestors: &HashSet<u64>,
|
||||
descendants: &HashSet<u64>,
|
||||
) {
|
||||
for a in ancestors {
|
||||
if let Some(fork_progress) = self.get_mut(&a) {
|
||||
fork_progress.set_duplicate_confirmed();
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(slot_fork_progress) = self.get_mut(&slot) {
|
||||
// Setting the fields here is nly correct and necessary if the loop above didn't
|
||||
// already do this, so check with an assert.
|
||||
assert!(!ancestors.contains(&slot));
|
||||
let slot_had_unconfirmed_duplicate_ancestor = slot_fork_progress
|
||||
.duplicate_stats
|
||||
.latest_unconfirmed_duplicate_ancestor
|
||||
.is_some();
|
||||
slot_fork_progress.set_duplicate_confirmed();
|
||||
|
||||
if slot_had_unconfirmed_duplicate_ancestor {
|
||||
for d in descendants {
|
||||
if let Some(descendant_fork_progress) = self.get_mut(&d) {
|
||||
descendant_fork_progress
|
||||
.duplicate_stats
|
||||
.update_with_newly_confirmed_duplicate_ancestor(slot);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Neither this slot `S`, nor earlier ancestors were marked as duplicate,
|
||||
// so this means all descendants either:
|
||||
// 1) Have no duplicate ancestors
|
||||
// 2) Have a duplicate ancestor > `S`
|
||||
|
||||
// In both cases, there's no need to iterate through descendants because
|
||||
// this confirmation on `S` is irrelevant to them.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn my_latest_landed_vote(&self, slot: Slot) -> Option<Slot> {
|
||||
self.progress_map
|
||||
.get(&slot)
|
||||
|
@ -571,12 +430,6 @@ impl ProgressMap {
|
|||
.map(|s| s.fork_stats.is_supermajority_confirmed)
|
||||
}
|
||||
|
||||
pub fn is_duplicate_confirmed(&self, slot: Slot) -> Option<bool> {
|
||||
self.progress_map
|
||||
.get(&slot)
|
||||
.map(|s| s.is_duplicate_confirmed())
|
||||
}
|
||||
|
||||
pub fn get_bank_prev_leader_slot(&self, bank: &Bank) -> Option<Slot> {
|
||||
let parent_slot = bank.parent_slot();
|
||||
self.get_propagated_stats(parent_slot)
|
||||
|
@ -619,8 +472,6 @@ impl ProgressMap {
|
|||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::consensus::test::VoteSimulator;
|
||||
use trees::tr;
|
||||
|
||||
#[test]
|
||||
fn test_add_vote_pubkey() {
|
||||
|
@ -711,21 +562,13 @@ mod test {
|
|||
fn test_is_propagated_status_on_construction() {
|
||||
// If the given ValidatorStakeInfo == None, then this is not
|
||||
// a leader slot and is_propagated == false
|
||||
let progress = ForkProgress::new(
|
||||
Hash::default(),
|
||||
Some(9),
|
||||
DuplicateStats::default(),
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
);
|
||||
let progress = ForkProgress::new(Hash::default(), Some(9), None, 0, 0);
|
||||
assert!(!progress.propagated_stats.is_propagated);
|
||||
|
||||
// If the stake is zero, then threshold is always achieved
|
||||
let progress = ForkProgress::new(
|
||||
Hash::default(),
|
||||
Some(9),
|
||||
DuplicateStats::default(),
|
||||
Some(ValidatorStakeInfo {
|
||||
total_epoch_stake: 0,
|
||||
..ValidatorStakeInfo::default()
|
||||
|
@ -740,7 +583,6 @@ mod test {
|
|||
let progress = ForkProgress::new(
|
||||
Hash::default(),
|
||||
Some(9),
|
||||
DuplicateStats::default(),
|
||||
Some(ValidatorStakeInfo {
|
||||
total_epoch_stake: 2,
|
||||
..ValidatorStakeInfo::default()
|
||||
|
@ -754,7 +596,6 @@ mod test {
|
|||
let progress = ForkProgress::new(
|
||||
Hash::default(),
|
||||
Some(9),
|
||||
DuplicateStats::default(),
|
||||
Some(ValidatorStakeInfo {
|
||||
stake: 1,
|
||||
total_epoch_stake: 2,
|
||||
|
@ -771,7 +612,6 @@ mod test {
|
|||
let progress = ForkProgress::new(
|
||||
Hash::default(),
|
||||
Some(9),
|
||||
DuplicateStats::default(),
|
||||
Some(ValidatorStakeInfo::default()),
|
||||
0,
|
||||
0,
|
||||
|
@ -785,23 +625,12 @@ mod test {
|
|||
|
||||
// Insert new ForkProgress for slot 10 (not a leader slot) and its
|
||||
// previous leader slot 9 (leader slot)
|
||||
progress_map.insert(
|
||||
10,
|
||||
ForkProgress::new(
|
||||
Hash::default(),
|
||||
Some(9),
|
||||
DuplicateStats::default(),
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
);
|
||||
progress_map.insert(10, ForkProgress::new(Hash::default(), Some(9), None, 0, 0));
|
||||
progress_map.insert(
|
||||
9,
|
||||
ForkProgress::new(
|
||||
Hash::default(),
|
||||
None,
|
||||
DuplicateStats::default(),
|
||||
Some(ValidatorStakeInfo::default()),
|
||||
0,
|
||||
0,
|
||||
|
@ -816,17 +645,7 @@ mod test {
|
|||
// The previous leader before 8, slot 7, does not exist in
|
||||
// progress map, so is_propagated(8) should return true as
|
||||
// this implies the parent is rooted
|
||||
progress_map.insert(
|
||||
8,
|
||||
ForkProgress::new(
|
||||
Hash::default(),
|
||||
Some(7),
|
||||
DuplicateStats::default(),
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
);
|
||||
progress_map.insert(8, ForkProgress::new(Hash::default(), Some(7), None, 0, 0));
|
||||
assert!(progress_map.is_propagated(8));
|
||||
|
||||
// If we set the is_propagated = true, is_propagated should return true
|
||||
|
@ -849,157 +668,4 @@ mod test {
|
|||
.is_leader_slot = true;
|
||||
assert!(!progress_map.is_propagated(10));
|
||||
}
|
||||
|
||||
fn setup_set_unconfirmed_and_confirmed_duplicate_slot_tests(
|
||||
smaller_duplicate_slot: Slot,
|
||||
larger_duplicate_slot: Slot,
|
||||
) -> (ProgressMap, RwLock<BankForks>) {
|
||||
// Create simple fork 0 -> 1 -> 2 -> 3 -> 4 -> 5
|
||||
let forks = tr(0) / (tr(1) / (tr(2) / (tr(3) / (tr(4) / tr(5)))));
|
||||
let mut vote_simulator = VoteSimulator::new(1);
|
||||
vote_simulator.fill_bank_forks(forks, &HashMap::new());
|
||||
let VoteSimulator {
|
||||
mut progress,
|
||||
bank_forks,
|
||||
..
|
||||
} = vote_simulator;
|
||||
let descendants = bank_forks.read().unwrap().descendants().clone();
|
||||
|
||||
// Mark the slots as unconfirmed duplicates
|
||||
progress.set_unconfirmed_duplicate_slot(
|
||||
smaller_duplicate_slot,
|
||||
&descendants.get(&smaller_duplicate_slot).unwrap(),
|
||||
);
|
||||
progress.set_unconfirmed_duplicate_slot(
|
||||
larger_duplicate_slot,
|
||||
&descendants.get(&larger_duplicate_slot).unwrap(),
|
||||
);
|
||||
|
||||
// Correctness checks
|
||||
for slot in bank_forks.read().unwrap().banks().keys() {
|
||||
if *slot < smaller_duplicate_slot {
|
||||
assert!(progress
|
||||
.latest_unconfirmed_duplicate_ancestor(*slot)
|
||||
.is_none());
|
||||
} else if *slot < larger_duplicate_slot {
|
||||
assert_eq!(
|
||||
progress
|
||||
.latest_unconfirmed_duplicate_ancestor(*slot)
|
||||
.unwrap(),
|
||||
smaller_duplicate_slot
|
||||
);
|
||||
} else {
|
||||
assert_eq!(
|
||||
progress
|
||||
.latest_unconfirmed_duplicate_ancestor(*slot)
|
||||
.unwrap(),
|
||||
larger_duplicate_slot
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
(progress, bank_forks)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_unconfirmed_duplicate_confirm_smaller_slot_first() {
|
||||
let smaller_duplicate_slot = 1;
|
||||
let larger_duplicate_slot = 4;
|
||||
let (mut progress, bank_forks) = setup_set_unconfirmed_and_confirmed_duplicate_slot_tests(
|
||||
smaller_duplicate_slot,
|
||||
larger_duplicate_slot,
|
||||
);
|
||||
let descendants = bank_forks.read().unwrap().descendants().clone();
|
||||
let ancestors = bank_forks.read().unwrap().ancestors();
|
||||
|
||||
// Mark the smaller duplicate slot as confirmed
|
||||
progress.set_confirmed_duplicate_slot(
|
||||
smaller_duplicate_slot,
|
||||
&ancestors.get(&smaller_duplicate_slot).unwrap(),
|
||||
&descendants.get(&smaller_duplicate_slot).unwrap(),
|
||||
);
|
||||
for slot in bank_forks.read().unwrap().banks().keys() {
|
||||
if *slot < larger_duplicate_slot {
|
||||
// Only slots <= smaller_duplicate_slot have been duplicate confirmed
|
||||
if *slot <= smaller_duplicate_slot {
|
||||
assert!(progress.is_duplicate_confirmed(*slot).unwrap());
|
||||
} else {
|
||||
assert!(!progress.is_duplicate_confirmed(*slot).unwrap());
|
||||
}
|
||||
// The unconfirmed duplicate flag has been cleared on the smaller
|
||||
// descendants because their most recent duplicate ancestor has
|
||||
// been confirmed
|
||||
assert!(progress
|
||||
.latest_unconfirmed_duplicate_ancestor(*slot)
|
||||
.is_none());
|
||||
} else {
|
||||
assert!(!progress.is_duplicate_confirmed(*slot).unwrap(),);
|
||||
// The unconfirmed duplicate flag has not been cleared on the smaller
|
||||
// descendants because their most recent duplicate ancestor,
|
||||
// `larger_duplicate_slot` has not yet been confirmed
|
||||
assert_eq!(
|
||||
progress
|
||||
.latest_unconfirmed_duplicate_ancestor(*slot)
|
||||
.unwrap(),
|
||||
larger_duplicate_slot
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Mark the larger duplicate slot as confirmed, all slots should no longer
|
||||
// have any unconfirmed duplicate ancestors, and should be marked as duplciate confirmed
|
||||
progress.set_confirmed_duplicate_slot(
|
||||
larger_duplicate_slot,
|
||||
&ancestors.get(&larger_duplicate_slot).unwrap(),
|
||||
&descendants.get(&larger_duplicate_slot).unwrap(),
|
||||
);
|
||||
for slot in bank_forks.read().unwrap().banks().keys() {
|
||||
// All slots <= the latest duplciate confirmed slot are ancestors of
|
||||
// that slot, so they should all be marked duplicate confirmed
|
||||
assert_eq!(
|
||||
progress.is_duplicate_confirmed(*slot).unwrap(),
|
||||
*slot <= larger_duplicate_slot
|
||||
);
|
||||
assert!(progress
|
||||
.latest_unconfirmed_duplicate_ancestor(*slot)
|
||||
.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_unconfirmed_duplicate_confirm_larger_slot_first() {
|
||||
let smaller_duplicate_slot = 1;
|
||||
let larger_duplicate_slot = 4;
|
||||
let (mut progress, bank_forks) = setup_set_unconfirmed_and_confirmed_duplicate_slot_tests(
|
||||
smaller_duplicate_slot,
|
||||
larger_duplicate_slot,
|
||||
);
|
||||
let descendants = bank_forks.read().unwrap().descendants().clone();
|
||||
let ancestors = bank_forks.read().unwrap().ancestors();
|
||||
|
||||
// Mark the larger duplicate slot as confirmed
|
||||
progress.set_confirmed_duplicate_slot(
|
||||
larger_duplicate_slot,
|
||||
&ancestors.get(&larger_duplicate_slot).unwrap(),
|
||||
&descendants.get(&larger_duplicate_slot).unwrap(),
|
||||
);
|
||||
|
||||
// All slots should no longer have any unconfirmed duplicate ancestors
|
||||
progress.set_confirmed_duplicate_slot(
|
||||
larger_duplicate_slot,
|
||||
&ancestors.get(&larger_duplicate_slot).unwrap(),
|
||||
&descendants.get(&larger_duplicate_slot).unwrap(),
|
||||
);
|
||||
for slot in bank_forks.read().unwrap().banks().keys() {
|
||||
// All slots <= the latest duplciate confirmed slot are ancestors of
|
||||
// that slot, so they should all be marked duplicate confirmed
|
||||
assert_eq!(
|
||||
progress.is_duplicate_confirmed(*slot).unwrap(),
|
||||
*slot <= larger_duplicate_slot
|
||||
);
|
||||
assert!(progress
|
||||
.latest_unconfirmed_duplicate_ancestor(*slot)
|
||||
.is_none());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ use crate::{
|
|||
fork_choice::{ForkChoice, SelectVoteAndResetForkResult},
|
||||
heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
|
||||
latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks,
|
||||
progress_map::{DuplicateStats, ForkProgress, ProgressMap, PropagatedStats},
|
||||
progress_map::{ForkProgress, ProgressMap, PropagatedStats},
|
||||
repair_service::DuplicateSlotsResetReceiver,
|
||||
result::Result,
|
||||
rewards_recorder_service::RewardsRecorderSender,
|
||||
|
@ -389,8 +389,6 @@ impl ReplayStage {
|
|||
&subscriptions,
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&mut unfrozen_gossip_verified_vote_hashes,
|
||||
&mut latest_validator_votes_for_frozen_banks,
|
||||
&cluster_slots_update_sender,
|
||||
|
@ -421,8 +419,6 @@ impl ReplayStage {
|
|||
&bank_forks,
|
||||
&mut progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
);
|
||||
process_gossip_duplicate_confirmed_slots_time.stop();
|
||||
|
||||
|
@ -449,8 +445,6 @@ impl ReplayStage {
|
|||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&bank_forks,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&mut progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
);
|
||||
|
@ -494,10 +488,7 @@ impl ReplayStage {
|
|||
&bank_forks,
|
||||
);
|
||||
|
||||
Self::mark_slots_confirmed(&confirmed_forks, &bank_forks, &mut progress,
|
||||
&mut duplicate_slots_tracker,
|
||||
&ancestors, &descendants, &mut
|
||||
heaviest_subtree_fork_choice);
|
||||
Self::mark_slots_confirmed(&confirmed_forks, &bank_forks, &mut progress, &mut duplicate_slots_tracker, &mut heaviest_subtree_fork_choice);
|
||||
}
|
||||
compute_slot_stats_time.stop();
|
||||
|
||||
|
@ -533,6 +524,7 @@ impl ReplayStage {
|
|||
&progress,
|
||||
&mut tower,
|
||||
&latest_validator_votes_for_frozen_banks,
|
||||
&heaviest_subtree_fork_choice,
|
||||
);
|
||||
select_vote_and_reset_forks_time.stop();
|
||||
|
||||
|
@ -783,9 +775,6 @@ impl ReplayStage {
|
|||
// Initialize progress map with any root banks
|
||||
for bank in &frozen_banks {
|
||||
let prev_leader_slot = progress.get_bank_prev_leader_slot(bank);
|
||||
let duplicate_stats = DuplicateStats::new_with_unconfirmed_duplicate_ancestor(
|
||||
progress.latest_unconfirmed_duplicate_ancestor(bank.parent_slot()),
|
||||
);
|
||||
progress.insert(
|
||||
bank.slot(),
|
||||
ForkProgress::new_from_bank(
|
||||
|
@ -793,7 +782,6 @@ impl ReplayStage {
|
|||
&my_pubkey,
|
||||
&vote_account,
|
||||
prev_leader_slot,
|
||||
duplicate_stats,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
|
@ -924,8 +912,6 @@ impl ReplayStage {
|
|||
bank_forks: &RwLock<BankForks>,
|
||||
progress: &mut ProgressMap,
|
||||
fork_choice: &mut HeaviestSubtreeForkChoice,
|
||||
ancestors: &HashMap<Slot, HashSet<Slot>>,
|
||||
descendants: &HashMap<Slot, HashSet<Slot>>,
|
||||
) {
|
||||
let root = bank_forks.read().unwrap().root();
|
||||
for new_confirmed_slots in gossip_duplicate_confirmed_slots_receiver.try_iter() {
|
||||
|
@ -950,8 +936,6 @@ impl ReplayStage {
|
|||
.map(|b| b.hash()),
|
||||
duplicate_slots_tracker,
|
||||
gossip_duplicate_confirmed_slots,
|
||||
ancestors,
|
||||
descendants,
|
||||
progress,
|
||||
fork_choice,
|
||||
SlotStateUpdate::DuplicateConfirmed,
|
||||
|
@ -985,8 +969,6 @@ impl ReplayStage {
|
|||
duplicate_slots_tracker: &mut DuplicateSlotsTracker,
|
||||
gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots,
|
||||
bank_forks: &RwLock<BankForks>,
|
||||
ancestors: &HashMap<Slot, HashSet<Slot>>,
|
||||
descendants: &HashMap<Slot, HashSet<Slot>>,
|
||||
progress: &mut ProgressMap,
|
||||
fork_choice: &mut HeaviestSubtreeForkChoice,
|
||||
) {
|
||||
|
@ -1010,8 +992,6 @@ impl ReplayStage {
|
|||
bank_hash,
|
||||
duplicate_slots_tracker,
|
||||
gossip_duplicate_confirmed_slots,
|
||||
ancestors,
|
||||
descendants,
|
||||
progress,
|
||||
fork_choice,
|
||||
SlotStateUpdate::Duplicate,
|
||||
|
@ -1259,8 +1239,6 @@ impl ReplayStage {
|
|||
subscriptions: &Arc<RpcSubscriptions>,
|
||||
duplicate_slots_tracker: &mut DuplicateSlotsTracker,
|
||||
gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots,
|
||||
ancestors: &HashMap<Slot, HashSet<Slot>>,
|
||||
descendants: &HashMap<Slot, HashSet<Slot>>,
|
||||
progress: &mut ProgressMap,
|
||||
heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice,
|
||||
) {
|
||||
|
@ -1303,8 +1281,6 @@ impl ReplayStage {
|
|||
Some(bank.hash()),
|
||||
duplicate_slots_tracker,
|
||||
gossip_duplicate_confirmed_slots,
|
||||
ancestors,
|
||||
descendants,
|
||||
progress,
|
||||
heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::Dead,
|
||||
|
@ -1676,8 +1652,6 @@ impl ReplayStage {
|
|||
subscriptions: &Arc<RpcSubscriptions>,
|
||||
duplicate_slots_tracker: &mut DuplicateSlotsTracker,
|
||||
gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots,
|
||||
ancestors: &HashMap<Slot, HashSet<Slot>>,
|
||||
descendants: &HashMap<Slot, HashSet<Slot>>,
|
||||
unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes,
|
||||
latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks,
|
||||
cluster_slots_update_sender: &ClusterSlotsUpdateSender,
|
||||
|
@ -1709,11 +1683,6 @@ impl ReplayStage {
|
|||
(num_blocks_on_fork, num_dropped_blocks_on_fork)
|
||||
};
|
||||
|
||||
// New children adopt the same latest duplicate ancestor as their parent.
|
||||
let duplicate_stats = DuplicateStats::new_with_unconfirmed_duplicate_ancestor(
|
||||
progress.latest_unconfirmed_duplicate_ancestor(bank.parent_slot()),
|
||||
);
|
||||
|
||||
// Insert a progress entry even for slots this node is the leader for, so that
|
||||
// 1) confirm_forks can report confirmation, 2) we can cache computations about
|
||||
// this bank in `select_forks()`
|
||||
|
@ -1723,7 +1692,6 @@ impl ReplayStage {
|
|||
&my_pubkey,
|
||||
vote_account,
|
||||
prev_leader_slot,
|
||||
duplicate_stats,
|
||||
num_blocks_on_fork,
|
||||
num_dropped_blocks_on_fork,
|
||||
)
|
||||
|
@ -1755,8 +1723,6 @@ impl ReplayStage {
|
|||
subscriptions,
|
||||
duplicate_slots_tracker,
|
||||
gossip_duplicate_confirmed_slots,
|
||||
ancestors,
|
||||
descendants,
|
||||
progress,
|
||||
heaviest_subtree_fork_choice,
|
||||
);
|
||||
|
@ -1782,6 +1748,9 @@ impl ReplayStage {
|
|||
bank.freeze();
|
||||
let bank_hash = bank.hash();
|
||||
assert_ne!(bank_hash, Hash::default());
|
||||
// Needs to be updated before `check_slot_agrees_with_cluster()` so that
|
||||
// any updates in `check_slot_agrees_with_cluster()` on fork choice take
|
||||
// effect
|
||||
heaviest_subtree_fork_choice.add_new_leaf_slot(
|
||||
(bank.slot(), bank.hash()),
|
||||
Some((bank.parent_slot(), bank.parent_hash())),
|
||||
|
@ -1792,8 +1761,6 @@ impl ReplayStage {
|
|||
Some(bank.hash()),
|
||||
duplicate_slots_tracker,
|
||||
gossip_duplicate_confirmed_slots,
|
||||
ancestors,
|
||||
descendants,
|
||||
progress,
|
||||
heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::Frozen,
|
||||
|
@ -2033,6 +2000,7 @@ impl ReplayStage {
|
|||
progress: &ProgressMap,
|
||||
tower: &mut Tower,
|
||||
latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks,
|
||||
fork_choice: &HeaviestSubtreeForkChoice,
|
||||
) -> SelectVoteAndResetForkResult {
|
||||
// Try to vote on the actual heaviest fork. If the heaviest bank is
|
||||
// locked out or fails the threshold check, the validator will:
|
||||
|
@ -2059,6 +2027,7 @@ impl ReplayStage {
|
|||
.epoch_vote_accounts(heaviest_bank.epoch())
|
||||
.expect("Bank epoch vote accounts must contain entry for the bank's own epoch"),
|
||||
latest_validator_votes_for_frozen_banks,
|
||||
fork_choice,
|
||||
);
|
||||
|
||||
match switch_fork_decision {
|
||||
|
@ -2328,8 +2297,6 @@ impl ReplayStage {
|
|||
bank_forks: &RwLock<BankForks>,
|
||||
progress: &mut ProgressMap,
|
||||
duplicate_slots_tracker: &mut DuplicateSlotsTracker,
|
||||
ancestors: &HashMap<Slot, HashSet<Slot>>,
|
||||
descendants: &HashMap<Slot, HashSet<Slot>>,
|
||||
fork_choice: &mut HeaviestSubtreeForkChoice,
|
||||
) {
|
||||
let (root_slot, bank_hashes) = {
|
||||
|
@ -2344,8 +2311,8 @@ impl ReplayStage {
|
|||
for (slot, bank_hash) in confirmed_forks.iter().zip(bank_hashes.into_iter()) {
|
||||
// This case should be guaranteed as false by confirm_forks()
|
||||
if let Some(false) = progress.is_supermajority_confirmed(*slot) {
|
||||
// Because supermajority confirmation will iterate through all ancestors/descendants
|
||||
// in `check_slot_agrees_with_cluster`, only incur this cost if the slot wasn't already
|
||||
// Because supermajority confirmation will iterate through and update the
|
||||
// subtree in fork choice, only incur this cost if the slot wasn't already
|
||||
// confirmed
|
||||
progress.set_supermajority_confirmed_slot(*slot);
|
||||
check_slot_agrees_with_cluster(
|
||||
|
@ -2356,8 +2323,6 @@ impl ReplayStage {
|
|||
// Don't need to pass the gossip confirmed slots since `slot`
|
||||
// is already marked as confirmed in progress
|
||||
&BTreeMap::new(),
|
||||
ancestors,
|
||||
descendants,
|
||||
progress,
|
||||
fork_choice,
|
||||
SlotStateUpdate::DuplicateConfirmed,
|
||||
|
@ -2671,7 +2636,6 @@ mod tests {
|
|||
bank0.collector_id(),
|
||||
&Pubkey::default(),
|
||||
None,
|
||||
DuplicateStats::default(),
|
||||
0,
|
||||
0,
|
||||
),
|
||||
|
@ -2778,7 +2742,6 @@ mod tests {
|
|||
.get(&bank1.collector_id())
|
||||
.unwrap(),
|
||||
Some(0),
|
||||
DuplicateStats::default(),
|
||||
0,
|
||||
0,
|
||||
),
|
||||
|
@ -2875,10 +2838,7 @@ mod tests {
|
|||
|
||||
let mut progress = ProgressMap::default();
|
||||
for i in 0..=root {
|
||||
progress.insert(
|
||||
i,
|
||||
ForkProgress::new(Hash::default(), None, DuplicateStats::default(), None, 0, 0),
|
||||
);
|
||||
progress.insert(i, ForkProgress::new(Hash::default(), None, None, 0, 0));
|
||||
}
|
||||
|
||||
let mut duplicate_slots_tracker: DuplicateSlotsTracker =
|
||||
|
@ -2964,10 +2924,7 @@ mod tests {
|
|||
let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new((root, root_hash));
|
||||
let mut progress = ProgressMap::default();
|
||||
for i in 0..=root {
|
||||
progress.insert(
|
||||
i,
|
||||
ForkProgress::new(Hash::default(), None, DuplicateStats::default(), None, 0, 0),
|
||||
);
|
||||
progress.insert(i, ForkProgress::new(Hash::default(), None, None, 0, 0));
|
||||
}
|
||||
ReplayStage::handle_new_root(
|
||||
root,
|
||||
|
@ -3223,9 +3180,9 @@ mod tests {
|
|||
let bank0 = bank_forks.working_bank();
|
||||
let mut progress = ProgressMap::default();
|
||||
let last_blockhash = bank0.last_blockhash();
|
||||
let mut bank0_progress = progress.entry(bank0.slot()).or_insert_with(|| {
|
||||
ForkProgress::new(last_blockhash, None, DuplicateStats::default(), None, 0, 0)
|
||||
});
|
||||
let mut bank0_progress = progress
|
||||
.entry(bank0.slot())
|
||||
.or_insert_with(|| ForkProgress::new(last_blockhash, None, None, 0, 0));
|
||||
let shreds = shred_to_insert(&mint_keypair, bank0.clone());
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
|
@ -3255,8 +3212,6 @@ mod tests {
|
|||
&subscriptions,
|
||||
&mut DuplicateSlotsTracker::default(),
|
||||
&GossipDuplicateConfirmedSlots::default(),
|
||||
&HashMap::new(),
|
||||
&HashMap::new(),
|
||||
&mut progress,
|
||||
&mut HeaviestSubtreeForkChoice::new((0, Hash::default())),
|
||||
);
|
||||
|
@ -3525,14 +3480,7 @@ mod tests {
|
|||
bank_forks.write().unwrap().insert(bank1);
|
||||
progress.insert(
|
||||
1,
|
||||
ForkProgress::new(
|
||||
bank0.last_blockhash(),
|
||||
None,
|
||||
DuplicateStats::default(),
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
ForkProgress::new(bank0.last_blockhash(), None, None, 0, 0),
|
||||
);
|
||||
let ancestors = bank_forks.read().unwrap().ancestors();
|
||||
let mut frozen_banks: Vec<_> = bank_forks
|
||||
|
@ -3959,7 +3907,6 @@ mod tests {
|
|||
ForkProgress::new(
|
||||
Hash::default(),
|
||||
Some(9),
|
||||
DuplicateStats::default(),
|
||||
Some(ValidatorStakeInfo {
|
||||
total_epoch_stake,
|
||||
..ValidatorStakeInfo::default()
|
||||
|
@ -3973,7 +3920,6 @@ mod tests {
|
|||
ForkProgress::new(
|
||||
Hash::default(),
|
||||
Some(8),
|
||||
DuplicateStats::default(),
|
||||
Some(ValidatorStakeInfo {
|
||||
total_epoch_stake,
|
||||
..ValidatorStakeInfo::default()
|
||||
|
@ -4056,7 +4002,6 @@ mod tests {
|
|||
ForkProgress::new(
|
||||
Hash::default(),
|
||||
Some(prev_leader_slot),
|
||||
DuplicateStats::default(),
|
||||
{
|
||||
if i % 2 == 0 {
|
||||
Some(ValidatorStakeInfo {
|
||||
|
@ -4136,7 +4081,6 @@ mod tests {
|
|||
let mut fork_progress = ForkProgress::new(
|
||||
Hash::default(),
|
||||
Some(prev_leader_slot),
|
||||
DuplicateStats::default(),
|
||||
Some(ValidatorStakeInfo {
|
||||
total_epoch_stake,
|
||||
..ValidatorStakeInfo::default()
|
||||
|
@ -4196,7 +4140,7 @@ mod tests {
|
|||
// should succeed
|
||||
progress_map.insert(
|
||||
parent_slot,
|
||||
ForkProgress::new(Hash::default(), None, DuplicateStats::default(), None, 0, 0),
|
||||
ForkProgress::new(Hash::default(), None, None, 0, 0),
|
||||
);
|
||||
assert!(ReplayStage::check_propagation_for_start_leader(
|
||||
poh_slot,
|
||||
|
@ -4212,7 +4156,6 @@ mod tests {
|
|||
ForkProgress::new(
|
||||
Hash::default(),
|
||||
None,
|
||||
DuplicateStats::default(),
|
||||
Some(ValidatorStakeInfo::default()),
|
||||
0,
|
||||
0,
|
||||
|
@ -4239,21 +4182,13 @@ mod tests {
|
|||
let previous_leader_slot = parent_slot - 1;
|
||||
progress_map.insert(
|
||||
parent_slot,
|
||||
ForkProgress::new(
|
||||
Hash::default(),
|
||||
Some(previous_leader_slot),
|
||||
DuplicateStats::default(),
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
ForkProgress::new(Hash::default(), Some(previous_leader_slot), None, 0, 0),
|
||||
);
|
||||
progress_map.insert(
|
||||
previous_leader_slot,
|
||||
ForkProgress::new(
|
||||
Hash::default(),
|
||||
None,
|
||||
DuplicateStats::default(),
|
||||
Some(ValidatorStakeInfo::default()),
|
||||
0,
|
||||
0,
|
||||
|
@ -4314,7 +4249,6 @@ mod tests {
|
|||
ForkProgress::new(
|
||||
Hash::default(),
|
||||
None,
|
||||
DuplicateStats::default(),
|
||||
Some(ValidatorStakeInfo::default()),
|
||||
0,
|
||||
0,
|
||||
|
@ -4350,7 +4284,6 @@ mod tests {
|
|||
ForkProgress::new(
|
||||
Hash::default(),
|
||||
None,
|
||||
DuplicateStats::default(),
|
||||
Some(ValidatorStakeInfo::default()),
|
||||
0,
|
||||
0,
|
||||
|
@ -4374,7 +4307,6 @@ mod tests {
|
|||
ForkProgress::new(
|
||||
Hash::default(),
|
||||
None,
|
||||
DuplicateStats::default(),
|
||||
Some(ValidatorStakeInfo::default()),
|
||||
0,
|
||||
0,
|
||||
|
@ -4624,8 +4556,6 @@ mod tests {
|
|||
// Mark 4 as duplicate, 3 should be the heaviest slot, but should not be votable
|
||||
// because of lockout
|
||||
blockstore.store_duplicate_slot(4, vec![], vec![]).unwrap();
|
||||
let ancestors = bank_forks.read().unwrap().ancestors();
|
||||
let descendants = bank_forks.read().unwrap().descendants().clone();
|
||||
let mut duplicate_slots_tracker = DuplicateSlotsTracker::default();
|
||||
let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default();
|
||||
let bank4_hash = bank_forks.read().unwrap().get(4).unwrap().hash();
|
||||
|
@ -4636,9 +4566,7 @@ mod tests {
|
|||
Some(bank4_hash),
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&mut progress,
|
||||
&progress,
|
||||
&mut vote_simulator.heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::Duplicate,
|
||||
);
|
||||
|
@ -4655,8 +4583,6 @@ mod tests {
|
|||
|
||||
// Now mark 2, an ancestor of 4, as duplicate
|
||||
blockstore.store_duplicate_slot(2, vec![], vec![]).unwrap();
|
||||
let ancestors = bank_forks.read().unwrap().ancestors();
|
||||
let descendants = bank_forks.read().unwrap().descendants().clone();
|
||||
let bank2_hash = bank_forks.read().unwrap().get(2).unwrap().hash();
|
||||
assert_ne!(bank2_hash, Hash::default());
|
||||
check_slot_agrees_with_cluster(
|
||||
|
@ -4665,9 +4591,7 @@ mod tests {
|
|||
Some(bank2_hash),
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&mut progress,
|
||||
&progress,
|
||||
&mut vote_simulator.heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::Duplicate,
|
||||
);
|
||||
|
@ -4694,9 +4618,7 @@ mod tests {
|
|||
Some(bank4_hash),
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&mut progress,
|
||||
&progress,
|
||||
&mut vote_simulator.heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::DuplicateConfirmed,
|
||||
);
|
||||
|
@ -5110,6 +5032,7 @@ mod tests {
|
|||
progress,
|
||||
tower,
|
||||
latest_validator_votes_for_frozen_banks,
|
||||
heaviest_subtree_fork_choice,
|
||||
);
|
||||
(
|
||||
vote_bank.map(|(b, _)| b.slot()),
|
||||
|
|
Loading…
Reference in New Issue