Clear parent in repair weighting when dumping from replay (#29770)

This commit is contained in:
Ashwin Sekar 2023-01-23 12:55:09 -08:00 committed by GitHub
parent bd9b311c63
commit 3e8874e3a2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 80 additions and 3 deletions

View File

@ -421,7 +421,7 @@ impl HeaviestSubtreeForkChoice {
/// Returns the subtree originating from `slot_hash_key`
pub fn split_off(&mut self, slot_hash_key: &SlotHashKey) -> Self {
assert_ne!(self.tree_root, *slot_hash_key);
let split_tree_root = {
let mut split_tree_root = {
let mut node_to_split_at = self
.fork_infos
.get_mut(slot_hash_key)
@ -464,6 +464,7 @@ impl HeaviestSubtreeForkChoice {
// Update the root of the new tree with the proper info, now that we have finished
// aggregating
split_tree_root.parent = None;
split_tree_fork_infos.insert(*slot_hash_key, split_tree_root);
// Split off the relevant votes to the new tree
@ -3541,6 +3542,13 @@ mod test {
stake,
tree.stake_voted_subtree(&(6, Hash::default())).unwrap()
);
assert!(tree
.fork_infos
.get(&tree.tree_root)
.unwrap()
.parent
.is_none());
}
#[test]

View File

@ -691,7 +691,10 @@ mod test {
use {
super::*,
itertools::Itertools,
solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path},
solana_ledger::{
blockstore::{make_chaining_slot_entries, Blockstore},
get_tmp_ledger_path,
},
solana_runtime::{bank::Bank, bank_utils},
solana_sdk::hash::Hash,
trees::tr,
@ -1427,7 +1430,7 @@ mod test {
}
#[test]
fn test_orphan_slot_copy_weight() {
fn test_split_off_copy_weight() {
let (blockstore, _, mut repair_weight) = setup_orphan_repair_weight();
let stake = 100;
let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys_for_tests(1, stake);
@ -1496,6 +1499,72 @@ mod test {
assert_eq!(repairs[3].slot(), 8);
}
#[test]
fn test_split_off_multi_dump_repair() {
let blockstore = setup_forks();
let stake = 100;
let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys_for_tests(1, stake);
let mut repair_weight = RepairWeight::new(0);
repair_weight.add_votes(
&blockstore,
vec![(6, vote_pubkeys)].into_iter(),
bank.epoch_stakes_map(),
bank.epoch_schedule(),
);
// Simulate multiple dumps (whole branch is duplicate) from replay
blockstore.clear_unconfirmed_slot(3);
repair_weight.split_off(3);
blockstore.clear_unconfirmed_slot(5);
repair_weight.split_off(5);
blockstore.clear_unconfirmed_slot(6);
repair_weight.split_off(6);
// Verify orphans
let mut orphans = repair_weight.trees.keys().copied().collect_vec();
orphans.sort();
assert_eq!(vec![0, 3, 5, 6], orphans);
// Get best orphans works as usual
let mut repairs = vec![];
let mut processed_slots = vec![repair_weight.root].into_iter().collect();
repair_weight.get_best_orphans(
&blockstore,
&mut processed_slots,
&mut repairs,
bank.epoch_stakes_map(),
bank.epoch_schedule(),
4,
);
assert_eq!(repairs.len(), 3);
assert_eq!(repairs[0].slot(), 6);
assert_eq!(repairs[1].slot(), 3);
assert_eq!(repairs[2].slot(), 5);
// Simulate repair on 6 and 5
for (shreds, _) in make_chaining_slot_entries(&[5, 6], 100) {
blockstore.insert_shreds(shreds, None, true).unwrap();
}
// Verify orphans properly updated and chained
let mut repairs = vec![];
let mut processed_slots = vec![repair_weight.root].into_iter().collect();
repair_weight.get_best_orphans(
&blockstore,
&mut processed_slots,
&mut repairs,
bank.epoch_stakes_map(),
bank.epoch_schedule(),
4,
);
assert_eq!(repairs.len(), 1);
assert_eq!(repairs[0].slot(), 3);
let mut orphans = repair_weight.trees.keys().copied().collect_vec();
orphans.sort();
assert_eq!(orphans, vec![0, 3]);
}
fn setup_orphan_repair_weight() -> (Blockstore, Bank, RepairWeight) {
let blockstore = setup_orphans();
let stake = 100;