Fix set_roots to use cached bank parents instead of searching blocktree (#4466)

This commit is contained in:
carllin 2019-05-29 09:43:22 -07:00 committed by GitHub
parent 335dfdc4d5
commit 534244b322
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 19 additions and 44 deletions

View File

@ -818,18 +818,12 @@ impl Blocktree {
}
}
pub fn set_root(&self, new_root: u64, prev_root: u64) -> Result<()> {
let mut current_slot = new_root;
pub fn set_roots(&self, rooted_slots: &[u64]) -> Result<()> {
unsafe {
let mut batch_processor = self.db.batch_processor();
let mut write_batch = batch_processor.batch()?;
if new_root == 0 {
write_batch.put::<cf::Root>(0, &true)?;
} else {
while current_slot != prev_root {
write_batch.put::<cf::Root>(current_slot, &true)?;
current_slot = self.meta(current_slot).unwrap().unwrap().parent_slot;
}
for slot in rooted_slots {
write_batch.put::<cf::Root>(*slot, &true)?;
}
batch_processor.write(write_batch)?;
@ -3128,30 +3122,12 @@ pub mod tests {
}
#[test]
fn test_set_root() {
fn test_set_roots() {
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
blocktree.set_root(0, 0).unwrap();
let chained_slots = vec![0, 2, 4, 7, 12, 15];
// Make a chain of slots
let all_blobs = make_chaining_slot_entries(&chained_slots, 10);
// Insert the chain of slots into the ledger
for (slot_blobs, _) in all_blobs {
blocktree.insert_data_blobs(&slot_blobs[..]).unwrap();
}
blocktree.set_root(4, 0).unwrap();
for i in &chained_slots[0..3] {
assert!(blocktree.is_root(*i));
}
for i in &chained_slots[3..] {
assert!(!blocktree.is_root(*i));
}
blocktree.set_root(15, 4).unwrap();
blocktree.set_roots(&chained_slots).unwrap();
for i in chained_slots {
assert!(blocktree.is_root(i));

View File

@ -51,7 +51,7 @@ mod tests {
fn test_rooted_slot_iterator() {
let blocktree_path = get_tmp_ledger_path("test_rooted_slot_iterator");
let blocktree = Blocktree::open(&blocktree_path).unwrap();
blocktree.set_root(0, 0).unwrap();
blocktree.set_roots(&[0]).unwrap();
let ticks_per_slot = 5;
/*
Build a blocktree in the ledger with the following fork structure:
@ -98,7 +98,7 @@ mod tests {
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 4, fork_point, fork_hash);
// Set a root
blocktree.set_root(3, 0).unwrap();
blocktree.set_roots(&[1, 2, 3]).unwrap();
// Trying to get an iterator on a different fork will error
assert!(RootedSlotIterator::new(4, &blocktree).is_err());

View File

@ -155,7 +155,7 @@ pub fn process_blocktree(
vec![(slot, meta, bank, entry_height, last_entry_hash)]
};
blocktree.set_root(0, 0).expect("Couldn't set first root");
blocktree.set_roots(&[0]).expect("Couldn't set first root");
let leader_schedule_cache = LeaderScheduleCache::new(*pending_slots[0].2.epoch_schedule(), 0);
@ -420,7 +420,7 @@ pub mod tests {
info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash);
info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash);
blocktree.set_root(4, 0).unwrap();
blocktree.set_roots(&[4, 1, 0]).unwrap();
let (bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_block, &blocktree, None).unwrap();
@ -494,8 +494,7 @@ pub mod tests {
info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash);
info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash);
blocktree.set_root(0, 0).unwrap();
blocktree.set_root(1, 0).unwrap();
blocktree.set_roots(&[0, 1]).unwrap();
let (bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_block, &blocktree, None).unwrap();
@ -571,10 +570,11 @@ pub mod tests {
}
// Set a root on the last slot of the last confirmed epoch
blocktree.set_root(last_slot, 0).unwrap();
let rooted_slots: Vec<_> = (0..=last_slot).collect();
blocktree.set_roots(&rooted_slots).unwrap();
// Set a root on the next slot of the confrimed epoch
blocktree.set_root(last_slot + 1, last_slot).unwrap();
blocktree.set_roots(&[last_slot + 1]).unwrap();
// Check that we can properly restart the ledger / leader scheduler doesn't fail
let (bank_forks, bank_forks_info, _) =

View File

@ -620,8 +620,8 @@ mod tests {
blocktree.insert_data_blobs(&blobs).unwrap();
// Write roots so that these slots will qualify to be sent by the repairman
blocktree.set_root(0, 0).unwrap();
blocktree.set_root(num_slots - 1, 0).unwrap();
let roots: Vec<_> = (0..=num_slots - 1).collect();
blocktree.set_roots(&roots).unwrap();
// Set up my information
let my_pubkey = Pubkey::new_rand();
@ -696,8 +696,8 @@ mod tests {
blocktree.insert_data_blobs(&blobs).unwrap();
// Write roots so that these slots will qualify to be sent by the repairman
blocktree.set_root(0, 0).unwrap();
blocktree.set_root(slots_per_epoch * 2 - 1, 0).unwrap();
let roots: Vec<_> = (0..=slots_per_epoch * 2 - 1).collect();
blocktree.set_roots(&roots).unwrap();
// Set up my information
let my_pubkey = Pubkey::new_rand();

View File

@ -316,10 +316,9 @@ impl ReplayStage {
.map(|bank| bank.slot())
.collect::<Vec<_>>();
rooted_slots.push(root_bank.slot());
let old_root = bank_forks.read().unwrap().root();
blocktree
.set_root(new_root, old_root)
.expect("Ledger set root failed");
.set_roots(&rooted_slots)
.expect("Ledger set roots failed");
// Set root first in leader schedule_cache before bank_forks because bank_forks.root
// is consumed by repair_service to update gossip, so we don't want to get blobs for
// repair on gossip before we update leader schedule, otherwise they may get dropped.