Test cleanup (#11192)

Co-authored-by: Carl <carl@solana.com>
This commit is contained in:
carllin 2020-07-24 02:55:25 -07:00 committed by GitHub
parent 6578ad7d08
commit c0dc21620b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 163 additions and 129 deletions

View File

@ -1946,143 +1946,177 @@ pub(crate) mod tests {
assert!(ReplayStage::is_partition_detected(&ancestors, 4, 3)); assert!(ReplayStage::is_partition_detected(&ancestors, 4, 3));
} }
struct ReplayBlockstoreComponents {
blockstore: Arc<Blockstore>,
validator_voting_keys: HashMap<Pubkey, Pubkey>,
progress: ProgressMap,
bank_forks: Arc<RwLock<BankForks>>,
leader_schedule_cache: Arc<LeaderScheduleCache>,
rpc_subscriptions: Arc<RpcSubscriptions>,
}
fn replay_blockstore_components() -> ReplayBlockstoreComponents {
// Setup blockstore
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let validator_authorized_voter_keypairs: Vec<_> =
(0..20).map(|_| ValidatorVoteKeypairs::new_rand()).collect();
let validator_voting_keys: HashMap<_, _> = validator_authorized_voter_keypairs
.iter()
.map(|v| (v.node_keypair.pubkey(), v.vote_keypair.pubkey()))
.collect();
let GenesisConfigInfo { genesis_config, .. } =
genesis_utils::create_genesis_config_with_vote_accounts(
10_000,
&validator_authorized_voter_keypairs,
vec![100; validator_authorized_voter_keypairs.len()],
);
let bank0 = Bank::new(&genesis_config);
// ProgressMap
let mut progress = ProgressMap::default();
progress.insert(
0,
ForkProgress::new_from_bank(
&bank0,
bank0.collector_id(),
&Pubkey::default(),
None,
0,
0,
),
);
// Leader schedule cache
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank0));
// BankForks
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0)));
// RpcSubscriptions
let exit = Arc::new(AtomicBool::new(false));
let rpc_subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
bank_forks.clone(),
Arc::new(RwLock::new(BlockCommitmentCache::default())),
));
ReplayBlockstoreComponents {
blockstore,
validator_voting_keys,
progress,
bank_forks,
leader_schedule_cache,
rpc_subscriptions,
}
}
#[test] #[test]
fn test_child_slots_of_same_parent() { fn test_child_slots_of_same_parent() {
let ledger_path = get_tmp_ledger_path!(); let ReplayBlockstoreComponents {
{ blockstore,
// Setup validator_voting_keys,
let blockstore = Arc::new( mut progress,
Blockstore::open(&ledger_path) bank_forks,
.expect("Expected to be able to open database ledger"), leader_schedule_cache,
); rpc_subscriptions,
let validator_authorized_voter_keypairs: Vec<_> = } = replay_blockstore_components();
(0..20).map(|_| ValidatorVoteKeypairs::new_rand()).collect();
let validator_voting_keys: HashMap<_, _> = validator_authorized_voter_keypairs // Insert a non-root bank so that the propagation logic will update this
.iter() // bank
.map(|v| (v.node_keypair.pubkey(), v.vote_keypair.pubkey())) let bank1 = Bank::new_from_parent(
.collect(); bank_forks.read().unwrap().get(0).unwrap(),
let GenesisConfigInfo { genesis_config, .. } = &leader_schedule_cache.slot_leader_at(1, None).unwrap(),
genesis_utils::create_genesis_config_with_vote_accounts( 1,
10_000, );
&validator_authorized_voter_keypairs, progress.insert(
vec![100; validator_authorized_voter_keypairs.len()], 1,
); ForkProgress::new_from_bank(
let bank0 = Bank::new(&genesis_config); &bank1,
let mut progress = ProgressMap::default(); bank1.collector_id(),
progress.insert( validator_voting_keys.get(&bank1.collector_id()).unwrap(),
Some(0),
0, 0,
ForkProgress::new_from_bank( 0,
&bank0, ),
bank0.collector_id(), );
&Pubkey::default(), assert!(progress.get_propagated_stats(1).unwrap().is_leader_slot);
None, bank1.freeze();
0, bank_forks.write().unwrap().insert(bank1);
0,
),
);
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank0));
let exit = Arc::new(AtomicBool::new(false));
let mut bank_forks = BankForks::new(bank0);
// Insert a non-root bank so that the propagation logic will update this // Insert shreds for slot NUM_CONSECUTIVE_LEADER_SLOTS,
// bank // chaining to slot 1
let bank1 = Bank::new_from_parent( let (shreds, _) = make_slot_entries(NUM_CONSECUTIVE_LEADER_SLOTS, 1, 8);
bank_forks.get(0).unwrap(), blockstore.insert_shreds(shreds, None, false).unwrap();
&leader_schedule_cache.slot_leader_at(1, None).unwrap(), assert!(bank_forks
1, .read()
); .unwrap()
progress.insert( .get(NUM_CONSECUTIVE_LEADER_SLOTS)
1, .is_none());
ForkProgress::new_from_bank( ReplayStage::generate_new_bank_forks(
&bank1, &blockstore,
bank1.collector_id(), &bank_forks,
&validator_voting_keys.get(&bank1.collector_id()).unwrap(), &leader_schedule_cache,
Some(0), &rpc_subscriptions,
0, None,
0, &mut progress,
), &mut PubkeyReferences::default(),
); );
assert!(progress.get_propagated_stats(1).unwrap().is_leader_slot); assert!(bank_forks
bank1.freeze(); .read()
bank_forks.insert(bank1); .unwrap()
let bank_forks = Arc::new(RwLock::new(bank_forks)); .get(NUM_CONSECUTIVE_LEADER_SLOTS)
let subscriptions = Arc::new(RpcSubscriptions::new( .is_some());
&exit,
bank_forks.clone(),
Arc::new(RwLock::new(BlockCommitmentCache::default())),
));
// Insert shreds for slot NUM_CONSECUTIVE_LEADER_SLOTS, // Insert shreds for slot 2 * NUM_CONSECUTIVE_LEADER_SLOTS,
// chaining to slot 1 // chaining to slot 1
let (shreds, _) = make_slot_entries(NUM_CONSECUTIVE_LEADER_SLOTS, 1, 8); let (shreds, _) = make_slot_entries(2 * NUM_CONSECUTIVE_LEADER_SLOTS, 1, 8);
blockstore.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
assert!(bank_forks assert!(bank_forks
.read() .read()
.unwrap() .unwrap()
.get(NUM_CONSECUTIVE_LEADER_SLOTS) .get(2 * NUM_CONSECUTIVE_LEADER_SLOTS)
.is_none()); .is_none());
ReplayStage::generate_new_bank_forks( ReplayStage::generate_new_bank_forks(
&blockstore, &blockstore,
&bank_forks, &bank_forks,
&leader_schedule_cache, &leader_schedule_cache,
&subscriptions, &rpc_subscriptions,
None, None,
&mut progress, &mut progress,
&mut PubkeyReferences::default(), &mut PubkeyReferences::default(),
); );
assert!(bank_forks assert!(bank_forks
.read() .read()
.unwrap() .unwrap()
.get(NUM_CONSECUTIVE_LEADER_SLOTS) .get(NUM_CONSECUTIVE_LEADER_SLOTS)
.is_some()); .is_some());
assert!(bank_forks
.read()
.unwrap()
.get(2 * NUM_CONSECUTIVE_LEADER_SLOTS)
.is_some());
// Insert shreds for slot 2 * NUM_CONSECUTIVE_LEADER_SLOTS, // // There are 20 equally staked accounts, of which 3 have built
// chaining to slot 1 // banks above or at bank 1. Because 3/20 < SUPERMINORITY_THRESHOLD,
let (shreds, _) = make_slot_entries(2 * NUM_CONSECUTIVE_LEADER_SLOTS, 1, 8); // we should see 3 validators in bank 1's propagated_validator set.
blockstore.insert_shreds(shreds, None, false).unwrap(); let expected_leader_slots = vec![
assert!(bank_forks 1,
.read() NUM_CONSECUTIVE_LEADER_SLOTS,
2 * NUM_CONSECUTIVE_LEADER_SLOTS,
];
for slot in expected_leader_slots {
let leader = leader_schedule_cache.slot_leader_at(slot, None).unwrap();
let vote_key = validator_voting_keys.get(&leader).unwrap();
assert!(progress
.get_propagated_stats(1)
.unwrap() .unwrap()
.get(2 * NUM_CONSECUTIVE_LEADER_SLOTS) .propagated_validators
.is_none()); .contains(vote_key));
ReplayStage::generate_new_bank_forks(
&blockstore,
&bank_forks,
&leader_schedule_cache,
&subscriptions,
None,
&mut progress,
&mut PubkeyReferences::default(),
);
assert!(bank_forks
.read()
.unwrap()
.get(NUM_CONSECUTIVE_LEADER_SLOTS)
.is_some());
assert!(bank_forks
.read()
.unwrap()
.get(2 * NUM_CONSECUTIVE_LEADER_SLOTS)
.is_some());
// // There are 20 equally staked accounts, of which 3 have built
// banks above or at bank 1. Because 3/20 < SUPERMINORITY_THRESHOLD,
// we should see 3 validators in bank 1's propagated_validator set.
let expected_leader_slots = vec![
1,
NUM_CONSECUTIVE_LEADER_SLOTS,
2 * NUM_CONSECUTIVE_LEADER_SLOTS,
];
for slot in expected_leader_slots {
let leader = leader_schedule_cache.slot_leader_at(slot, None).unwrap();
let vote_key = validator_voting_keys.get(&leader).unwrap();
assert!(progress
.get_propagated_stats(1)
.unwrap()
.propagated_validators
.contains(vote_key));
}
} }
} }