2019-12-03 16:31:59 -08:00
|
|
|
use crate::{
|
|
|
|
blocktree::Blocktree,
|
|
|
|
leader_schedule::{FixedSchedule, LeaderSchedule},
|
|
|
|
leader_schedule_utils,
|
|
|
|
};
|
2019-10-18 09:28:51 -07:00
|
|
|
use log::*;
|
2019-05-13 16:24:32 -07:00
|
|
|
use solana_runtime::bank::Bank;
|
2019-11-02 00:38:30 -07:00
|
|
|
use solana_sdk::{
|
|
|
|
clock::{Epoch, Slot},
|
|
|
|
epoch_schedule::EpochSchedule,
|
|
|
|
pubkey::Pubkey,
|
|
|
|
};
|
2019-10-08 22:34:26 -07:00
|
|
|
use std::{
|
|
|
|
collections::{hash_map::Entry, HashMap, VecDeque},
|
|
|
|
sync::{Arc, RwLock},
|
|
|
|
};
|
2019-04-19 02:39:44 -07:00
|
|
|
|
2019-11-02 00:38:30 -07:00
|
|
|
type CachedSchedules = (HashMap<Epoch, Arc<LeaderSchedule>>, VecDeque<u64>);
|
2019-04-19 02:39:44 -07:00
|
|
|
const MAX_SCHEDULES: usize = 10;
|
|
|
|
|
2019-10-08 14:58:49 -07:00
|
|
|
struct CacheCapacity(usize);
|
|
|
|
impl Default for CacheCapacity {
|
|
|
|
fn default() -> Self {
|
|
|
|
CacheCapacity(MAX_SCHEDULES)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-19 02:39:44 -07:00
|
|
|
#[derive(Default)]
|
|
|
|
pub struct LeaderScheduleCache {
|
|
|
|
// Map from an epoch to a leader schedule for that epoch
|
|
|
|
pub cached_schedules: RwLock<CachedSchedules>,
|
|
|
|
epoch_schedule: EpochSchedule,
|
2019-11-02 00:38:30 -07:00
|
|
|
max_epoch: RwLock<Epoch>,
|
2019-10-08 14:58:49 -07:00
|
|
|
max_schedules: CacheCapacity,
|
2019-12-03 16:31:59 -08:00
|
|
|
fixed_schedule: Option<Arc<FixedSchedule>>,
|
2019-04-19 02:39:44 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
impl LeaderScheduleCache {
|
|
|
|
pub fn new_from_bank(bank: &Bank) -> Self {
|
2019-06-26 00:19:48 -07:00
|
|
|
Self::new(*bank.epoch_schedule(), bank)
|
2019-04-19 02:39:44 -07:00
|
|
|
}
|
|
|
|
|
2019-06-26 00:19:48 -07:00
|
|
|
pub fn new(epoch_schedule: EpochSchedule, root_bank: &Bank) -> Self {
|
2019-04-30 13:23:21 -07:00
|
|
|
let cache = Self {
|
2019-04-19 02:39:44 -07:00
|
|
|
cached_schedules: RwLock::new((HashMap::new(), VecDeque::new())),
|
|
|
|
epoch_schedule,
|
2019-04-30 13:23:21 -07:00
|
|
|
max_epoch: RwLock::new(0),
|
2019-10-08 14:58:49 -07:00
|
|
|
max_schedules: CacheCapacity::default(),
|
2019-12-03 16:31:59 -08:00
|
|
|
fixed_schedule: None,
|
2019-04-30 13:23:21 -07:00
|
|
|
};
|
|
|
|
|
2019-10-08 22:34:26 -07:00
|
|
|
// This sets the root and calculates the schedule at leader_schedule_epoch(root)
|
2019-06-26 00:19:48 -07:00
|
|
|
cache.set_root(root_bank);
|
|
|
|
|
2019-10-08 22:34:26 -07:00
|
|
|
// Calculate the schedule for all epochs between 0 and leader_schedule_epoch(root)
|
2019-10-31 13:26:55 -07:00
|
|
|
let leader_schedule_epoch = epoch_schedule.get_leader_schedule_epoch(root_bank.slot());
|
|
|
|
for epoch in 0..leader_schedule_epoch {
|
2019-06-26 00:19:48 -07:00
|
|
|
let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(epoch);
|
|
|
|
cache.slot_leader_at(first_slot_in_epoch, Some(root_bank));
|
|
|
|
}
|
2019-04-30 13:23:21 -07:00
|
|
|
cache
|
2019-04-19 02:39:44 -07:00
|
|
|
}
|
|
|
|
|
2019-10-08 14:58:49 -07:00
|
|
|
pub fn set_max_schedules(&mut self, max_schedules: usize) {
|
|
|
|
if max_schedules > 0 {
|
|
|
|
self.max_schedules = CacheCapacity(max_schedules);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn max_schedules(&self) -> usize {
|
|
|
|
self.max_schedules.0
|
|
|
|
}
|
|
|
|
|
2019-06-26 00:19:48 -07:00
|
|
|
pub fn set_root(&self, root_bank: &Bank) {
|
2019-10-08 22:34:26 -07:00
|
|
|
let new_max_epoch = self
|
|
|
|
.epoch_schedule
|
|
|
|
.get_leader_schedule_epoch(root_bank.slot());
|
2019-06-26 00:19:48 -07:00
|
|
|
let old_max_epoch = {
|
|
|
|
let mut max_epoch = self.max_epoch.write().unwrap();
|
|
|
|
let old_max_epoch = *max_epoch;
|
|
|
|
*max_epoch = new_max_epoch;
|
|
|
|
assert!(new_max_epoch >= old_max_epoch);
|
|
|
|
old_max_epoch
|
|
|
|
};
|
|
|
|
|
|
|
|
// Calculate the epoch as soon as it's rooted
|
|
|
|
if new_max_epoch > old_max_epoch {
|
|
|
|
self.compute_epoch_schedule(new_max_epoch, root_bank);
|
|
|
|
}
|
2019-04-19 02:39:44 -07:00
|
|
|
}
|
|
|
|
|
2019-11-02 00:38:30 -07:00
|
|
|
pub fn slot_leader_at(&self, slot: Slot, bank: Option<&Bank>) -> Option<Pubkey> {
|
2019-04-30 13:23:21 -07:00
|
|
|
if let Some(bank) = bank {
|
|
|
|
self.slot_leader_at_else_compute(slot, bank)
|
2019-09-05 18:20:30 -07:00
|
|
|
} else if self.epoch_schedule.slots_per_epoch == 0 {
|
|
|
|
None
|
2019-04-19 02:39:44 -07:00
|
|
|
} else {
|
2019-04-30 13:23:21 -07:00
|
|
|
self.slot_leader_at_no_compute(slot)
|
2019-04-19 02:39:44 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-26 11:33:51 -07:00
|
|
|
/// Return the (next slot, last slot) after the given current_slot that the given node will be leader
|
2019-04-19 02:39:44 -07:00
|
|
|
pub fn next_leader_slot(
|
|
|
|
&self,
|
|
|
|
pubkey: &Pubkey,
|
2019-11-02 00:38:30 -07:00
|
|
|
mut current_slot: Slot,
|
2019-04-19 02:39:44 -07:00
|
|
|
bank: &Bank,
|
|
|
|
blocktree: Option<&Blocktree>,
|
2019-11-02 00:38:30 -07:00
|
|
|
) -> Option<(Slot, Slot)> {
|
2019-04-19 02:39:44 -07:00
|
|
|
let (mut epoch, mut start_index) = bank.get_epoch_and_slot_index(current_slot + 1);
|
2019-07-26 11:33:51 -07:00
|
|
|
let mut first_slot = None;
|
|
|
|
let mut last_slot = current_slot;
|
2019-07-30 15:51:02 -07:00
|
|
|
let max_epoch = *self.max_epoch.read().unwrap();
|
|
|
|
if epoch > max_epoch {
|
|
|
|
debug!(
|
|
|
|
"Requested next leader in slot: {} of unconfirmed epoch: {}",
|
|
|
|
current_slot + 1,
|
|
|
|
epoch
|
|
|
|
);
|
|
|
|
return None;
|
|
|
|
}
|
2019-04-19 02:39:44 -07:00
|
|
|
while let Some(leader_schedule) = self.get_epoch_schedule_else_compute(epoch, bank) {
|
|
|
|
// clippy thinks I should do this:
|
|
|
|
// for (i, <item>) in leader_schedule
|
|
|
|
// .iter()
|
|
|
|
// .enumerate()
|
|
|
|
// .take(bank.get_slots_in_epoch(epoch))
|
|
|
|
// .skip(from_slot_index + 1) {
|
|
|
|
//
|
|
|
|
// but leader_schedule doesn't implement Iter...
|
|
|
|
#[allow(clippy::needless_range_loop)]
|
|
|
|
for i in start_index..bank.get_slots_in_epoch(epoch) {
|
|
|
|
current_slot += 1;
|
|
|
|
if *pubkey == leader_schedule[i] {
|
|
|
|
if let Some(blocktree) = blocktree {
|
|
|
|
if let Some(meta) = blocktree.meta(current_slot).unwrap() {
|
2019-11-14 11:49:31 -08:00
|
|
|
// We have already sent a shred for this slot, so skip it
|
2019-04-19 02:39:44 -07:00
|
|
|
if meta.received > 0 {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-26 11:33:51 -07:00
|
|
|
if first_slot.is_none() {
|
|
|
|
first_slot = Some(current_slot);
|
|
|
|
}
|
|
|
|
last_slot = current_slot;
|
|
|
|
} else if first_slot.is_some() {
|
|
|
|
return Some((first_slot.unwrap(), last_slot));
|
2019-04-19 02:39:44 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
epoch += 1;
|
2019-07-30 15:51:02 -07:00
|
|
|
if epoch > max_epoch {
|
|
|
|
break;
|
|
|
|
}
|
2019-04-19 02:39:44 -07:00
|
|
|
start_index = 0;
|
|
|
|
}
|
2019-11-14 11:27:01 -08:00
|
|
|
first_slot.map(|slot| (slot, last_slot))
|
2019-04-19 02:39:44 -07:00
|
|
|
}
|
|
|
|
|
2019-12-03 16:31:59 -08:00
|
|
|
pub fn set_fixed_leader_schedule(&mut self, fixed_schedule: Option<FixedSchedule>) {
|
|
|
|
self.fixed_schedule = fixed_schedule.map(Arc::new);
|
|
|
|
}
|
|
|
|
|
2019-11-02 00:38:30 -07:00
|
|
|
fn slot_leader_at_no_compute(&self, slot: Slot) -> Option<Pubkey> {
|
2019-04-30 13:23:21 -07:00
|
|
|
let (epoch, slot_index) = self.epoch_schedule.get_epoch_and_slot_index(slot);
|
2019-12-03 16:31:59 -08:00
|
|
|
if let Some(ref fixed_schedule) = self.fixed_schedule {
|
|
|
|
if epoch >= fixed_schedule.start_epoch {
|
|
|
|
return Some(fixed_schedule.leader_schedule[slot_index]);
|
|
|
|
}
|
|
|
|
}
|
2019-04-30 13:23:21 -07:00
|
|
|
self.cached_schedules
|
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
.0
|
|
|
|
.get(&epoch)
|
|
|
|
.map(|schedule| schedule[slot_index])
|
|
|
|
}
|
|
|
|
|
2019-11-02 00:38:30 -07:00
|
|
|
fn slot_leader_at_else_compute(&self, slot: Slot, bank: &Bank) -> Option<Pubkey> {
|
2019-04-30 13:23:21 -07:00
|
|
|
let cache_result = self.slot_leader_at_no_compute(slot);
|
|
|
|
// Forbid asking for slots in an unconfirmed epoch
|
|
|
|
let bank_epoch = self.epoch_schedule.get_epoch_and_slot_index(slot).0;
|
|
|
|
if bank_epoch > *self.max_epoch.read().unwrap() {
|
2019-05-24 19:20:09 -07:00
|
|
|
debug!(
|
2019-04-30 13:23:21 -07:00
|
|
|
"Requested leader in slot: {} of unconfirmed epoch: {}",
|
|
|
|
slot, bank_epoch
|
|
|
|
);
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
if cache_result.is_some() {
|
|
|
|
cache_result
|
|
|
|
} else {
|
|
|
|
let (epoch, slot_index) = bank.get_epoch_and_slot_index(slot);
|
|
|
|
if let Some(epoch_schedule) = self.compute_epoch_schedule(epoch, bank) {
|
|
|
|
Some(epoch_schedule[slot_index])
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-19 02:39:44 -07:00
|
|
|
fn get_epoch_schedule_else_compute(
|
|
|
|
&self,
|
2019-11-02 00:38:30 -07:00
|
|
|
epoch: Epoch,
|
2019-04-19 02:39:44 -07:00
|
|
|
bank: &Bank,
|
|
|
|
) -> Option<Arc<LeaderSchedule>> {
|
2019-12-03 16:31:59 -08:00
|
|
|
if let Some(ref fixed_schedule) = self.fixed_schedule {
|
|
|
|
if epoch >= fixed_schedule.start_epoch {
|
|
|
|
return Some(fixed_schedule.leader_schedule.clone());
|
|
|
|
}
|
|
|
|
}
|
2019-04-19 02:39:44 -07:00
|
|
|
let epoch_schedule = self.cached_schedules.read().unwrap().0.get(&epoch).cloned();
|
|
|
|
|
|
|
|
if epoch_schedule.is_some() {
|
|
|
|
epoch_schedule
|
|
|
|
} else if let Some(epoch_schedule) = self.compute_epoch_schedule(epoch, bank) {
|
|
|
|
Some(epoch_schedule)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-02 00:38:30 -07:00
|
|
|
fn compute_epoch_schedule(&self, epoch: Epoch, bank: &Bank) -> Option<Arc<LeaderSchedule>> {
|
2019-04-19 02:39:44 -07:00
|
|
|
let leader_schedule = leader_schedule_utils::leader_schedule(epoch, bank);
|
|
|
|
leader_schedule.map(|leader_schedule| {
|
|
|
|
let leader_schedule = Arc::new(leader_schedule);
|
|
|
|
let (ref mut cached_schedules, ref mut order) = *self.cached_schedules.write().unwrap();
|
|
|
|
// Check to see if schedule exists in case somebody already inserted in the time we were
|
|
|
|
// waiting for the lock
|
|
|
|
let entry = cached_schedules.entry(epoch);
|
|
|
|
if let Entry::Vacant(v) = entry {
|
|
|
|
v.insert(leader_schedule.clone());
|
|
|
|
order.push_back(epoch);
|
2019-10-08 14:58:49 -07:00
|
|
|
Self::retain_latest(cached_schedules, order, self.max_schedules());
|
2019-04-19 02:39:44 -07:00
|
|
|
}
|
|
|
|
leader_schedule
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-10-08 14:58:49 -07:00
|
|
|
fn retain_latest(
|
2019-11-02 00:38:30 -07:00
|
|
|
schedules: &mut HashMap<Epoch, Arc<LeaderSchedule>>,
|
2019-10-08 14:58:49 -07:00
|
|
|
order: &mut VecDeque<u64>,
|
|
|
|
max_schedules: usize,
|
|
|
|
) {
|
|
|
|
while schedules.len() > max_schedules {
|
2019-04-19 02:39:44 -07:00
|
|
|
let first = order.pop_front().unwrap();
|
|
|
|
schedules.remove(&first);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::*;
|
2019-10-08 22:34:26 -07:00
|
|
|
use crate::{
|
2019-11-13 07:14:09 -08:00
|
|
|
blocktree::make_slot_entries,
|
2019-10-08 22:34:26 -07:00
|
|
|
genesis_utils::{
|
2019-11-08 20:56:57 -08:00
|
|
|
create_genesis_config, create_genesis_config_with_leader, GenesisConfigInfo,
|
2019-10-08 22:34:26 -07:00
|
|
|
BOOTSTRAP_LEADER_LAMPORTS,
|
|
|
|
},
|
2019-11-13 07:14:09 -08:00
|
|
|
get_tmp_ledger_path,
|
2019-10-08 22:34:26 -07:00
|
|
|
staking_utils::tests::setup_vote_and_stake_accounts,
|
2019-05-22 20:39:00 -07:00
|
|
|
};
|
2019-05-13 16:24:32 -07:00
|
|
|
use solana_runtime::bank::Bank;
|
2019-10-08 22:34:26 -07:00
|
|
|
use solana_sdk::epoch_schedule::{
|
|
|
|
EpochSchedule, DEFAULT_LEADER_SCHEDULE_SLOT_OFFSET, DEFAULT_SLOTS_PER_EPOCH,
|
|
|
|
MINIMUM_SLOTS_PER_EPOCH,
|
|
|
|
};
|
2019-11-08 02:27:35 -08:00
|
|
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
2019-10-08 22:34:26 -07:00
|
|
|
use std::{sync::mpsc::channel, sync::Arc, thread::Builder};
|
2019-04-19 02:39:44 -07:00
|
|
|
|
|
|
|
#[test]
|
2019-06-26 00:19:48 -07:00
|
|
|
fn test_new_cache() {
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
|
|
|
|
let bank = Bank::new(&genesis_config);
|
2019-04-29 15:26:52 -07:00
|
|
|
let cache = LeaderScheduleCache::new_from_bank(&bank);
|
2019-06-26 00:19:48 -07:00
|
|
|
assert_eq!(bank.slot(), 0);
|
2019-10-08 14:58:49 -07:00
|
|
|
assert_eq!(cache.max_schedules(), MAX_SCHEDULES);
|
2019-06-26 00:19:48 -07:00
|
|
|
|
|
|
|
// Epoch schedule for all epochs in the range:
|
2019-10-31 13:26:55 -07:00
|
|
|
// [0, leader_schedule_epoch(bank.slot())] should
|
2019-06-26 00:19:48 -07:00
|
|
|
// be calculated by constructor
|
|
|
|
let epoch_schedule = bank.epoch_schedule();
|
2019-10-31 13:26:55 -07:00
|
|
|
let leader_schedule_epoch = bank.get_leader_schedule_epoch(bank.slot());
|
|
|
|
for epoch in 0..=leader_schedule_epoch {
|
|
|
|
let first_slot_in_leader_schedule_epoch = epoch_schedule.get_first_slot_in_epoch(epoch);
|
|
|
|
let last_slot_in_leader_schedule_epoch = epoch_schedule.get_last_slot_in_epoch(epoch);
|
2019-06-26 00:19:48 -07:00
|
|
|
assert!(cache
|
2019-10-31 13:26:55 -07:00
|
|
|
.slot_leader_at(first_slot_in_leader_schedule_epoch, None)
|
2019-06-26 00:19:48 -07:00
|
|
|
.is_some());
|
|
|
|
assert!(cache
|
2019-10-31 13:26:55 -07:00
|
|
|
.slot_leader_at(last_slot_in_leader_schedule_epoch, None)
|
2019-06-26 00:19:48 -07:00
|
|
|
.is_some());
|
2019-10-31 13:26:55 -07:00
|
|
|
if epoch == leader_schedule_epoch {
|
2019-06-26 00:19:48 -07:00
|
|
|
assert!(cache
|
2019-10-31 13:26:55 -07:00
|
|
|
.slot_leader_at(last_slot_in_leader_schedule_epoch + 1, None)
|
2019-06-26 00:19:48 -07:00
|
|
|
.is_none());
|
|
|
|
}
|
|
|
|
}
|
2019-04-19 02:39:44 -07:00
|
|
|
|
2019-06-26 00:19:48 -07:00
|
|
|
// Should be a schedule for every epoch just checked
|
|
|
|
assert_eq!(
|
|
|
|
cache.cached_schedules.read().unwrap().0.len() as u64,
|
2019-10-31 13:26:55 -07:00
|
|
|
leader_schedule_epoch + 1
|
2019-06-26 00:19:48 -07:00
|
|
|
);
|
2019-04-19 02:39:44 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_retain_latest() {
|
|
|
|
let mut cached_schedules = HashMap::new();
|
|
|
|
let mut order = VecDeque::new();
|
|
|
|
for i in 0..=MAX_SCHEDULES {
|
|
|
|
cached_schedules.insert(i as u64, Arc::new(LeaderSchedule::default()));
|
|
|
|
order.push_back(i as u64);
|
|
|
|
}
|
2019-10-08 14:58:49 -07:00
|
|
|
LeaderScheduleCache::retain_latest(&mut cached_schedules, &mut order, MAX_SCHEDULES);
|
2019-04-19 02:39:44 -07:00
|
|
|
assert_eq!(cached_schedules.len(), MAX_SCHEDULES);
|
|
|
|
let mut keys: Vec<_> = cached_schedules.keys().cloned().collect();
|
|
|
|
keys.sort();
|
|
|
|
let expected: Vec<_> = (1..=MAX_SCHEDULES as u64).collect();
|
|
|
|
let expected_order: VecDeque<_> = (1..=MAX_SCHEDULES as u64).collect();
|
|
|
|
assert_eq!(expected, keys);
|
|
|
|
assert_eq!(expected_order, order);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_thread_race_leader_schedule_cache() {
|
|
|
|
let num_runs = 10;
|
|
|
|
for _ in 0..num_runs {
|
|
|
|
run_thread_race()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn run_thread_race() {
|
2019-06-14 11:38:37 -07:00
|
|
|
let slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH as u64;
|
2019-10-08 22:34:26 -07:00
|
|
|
let epoch_schedule = EpochSchedule::custom(slots_per_epoch, slots_per_epoch / 2, true);
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
|
|
|
|
let bank = Arc::new(Bank::new(&genesis_config));
|
2019-06-26 00:19:48 -07:00
|
|
|
let cache = Arc::new(LeaderScheduleCache::new(epoch_schedule, &bank));
|
2019-04-19 02:39:44 -07:00
|
|
|
|
|
|
|
let num_threads = 10;
|
|
|
|
let (threads, senders): (Vec<_>, Vec<_>) = (0..num_threads)
|
|
|
|
.map(|_| {
|
|
|
|
let cache = cache.clone();
|
|
|
|
let bank = bank.clone();
|
|
|
|
let (sender, receiver) = channel();
|
|
|
|
(
|
|
|
|
Builder::new()
|
|
|
|
.name("test_thread_race_leader_schedule_cache".to_string())
|
|
|
|
.spawn(move || {
|
|
|
|
let _ = receiver.recv();
|
2019-04-30 13:23:21 -07:00
|
|
|
cache.slot_leader_at(bank.slot(), Some(&bank));
|
2019-04-19 02:39:44 -07:00
|
|
|
})
|
|
|
|
.unwrap(),
|
|
|
|
sender,
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.unzip();
|
|
|
|
|
|
|
|
for sender in &senders {
|
|
|
|
sender.send(true).unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
for t in threads.into_iter() {
|
|
|
|
t.join().unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
let (ref cached_schedules, ref order) = *cache.cached_schedules.read().unwrap();
|
|
|
|
assert_eq!(cached_schedules.len(), 1);
|
|
|
|
assert_eq!(order.len(), 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_next_leader_slot() {
|
|
|
|
let pubkey = Pubkey::new_rand();
|
2019-11-08 20:56:57 -08:00
|
|
|
let mut genesis_config = create_genesis_config_with_leader(
|
2019-04-19 02:39:44 -07:00
|
|
|
BOOTSTRAP_LEADER_LAMPORTS,
|
|
|
|
&pubkey,
|
|
|
|
BOOTSTRAP_LEADER_LAMPORTS,
|
|
|
|
)
|
2019-11-08 20:56:57 -08:00
|
|
|
.genesis_config;
|
|
|
|
genesis_config.epoch_schedule = EpochSchedule::custom(
|
2019-10-08 22:34:26 -07:00
|
|
|
DEFAULT_SLOTS_PER_EPOCH,
|
|
|
|
DEFAULT_LEADER_SCHEDULE_SLOT_OFFSET,
|
|
|
|
false,
|
|
|
|
);
|
2019-04-19 02:39:44 -07:00
|
|
|
|
2019-11-08 20:56:57 -08:00
|
|
|
let bank = Bank::new(&genesis_config);
|
2019-04-19 02:39:44 -07:00
|
|
|
let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
|
|
|
|
|
|
|
assert_eq!(
|
2019-04-30 13:23:21 -07:00
|
|
|
cache.slot_leader_at(bank.slot(), Some(&bank)).unwrap(),
|
2019-04-19 02:39:44 -07:00
|
|
|
pubkey
|
|
|
|
);
|
2019-07-26 11:33:51 -07:00
|
|
|
assert_eq!(
|
|
|
|
cache.next_leader_slot(&pubkey, 0, &bank, None),
|
2019-11-16 20:53:54 -08:00
|
|
|
Some((1, 6047999))
|
2019-07-26 11:33:51 -07:00
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
cache.next_leader_slot(&pubkey, 1, &bank, None),
|
2019-11-16 20:53:54 -08:00
|
|
|
Some((2, 6047999))
|
2019-07-26 11:33:51 -07:00
|
|
|
);
|
2019-04-19 02:39:44 -07:00
|
|
|
assert_eq!(
|
|
|
|
cache.next_leader_slot(
|
|
|
|
&pubkey,
|
2019-11-08 20:56:57 -08:00
|
|
|
2 * genesis_config.epoch_schedule.slots_per_epoch - 1, // no schedule generated for epoch 2
|
2019-04-19 02:39:44 -07:00
|
|
|
&bank,
|
|
|
|
None
|
|
|
|
),
|
|
|
|
None
|
|
|
|
);
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
cache.next_leader_slot(
|
|
|
|
&Pubkey::new_rand(), // not in leader_schedule
|
|
|
|
0,
|
|
|
|
&bank,
|
|
|
|
None
|
|
|
|
),
|
|
|
|
None
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_next_leader_slot_blocktree() {
|
|
|
|
let pubkey = Pubkey::new_rand();
|
2019-11-08 20:56:57 -08:00
|
|
|
let mut genesis_config = create_genesis_config_with_leader(
|
2019-04-19 02:39:44 -07:00
|
|
|
BOOTSTRAP_LEADER_LAMPORTS,
|
|
|
|
&pubkey,
|
|
|
|
BOOTSTRAP_LEADER_LAMPORTS,
|
|
|
|
)
|
2019-11-08 20:56:57 -08:00
|
|
|
.genesis_config;
|
|
|
|
genesis_config.epoch_schedule.warmup = false;
|
2019-04-19 02:39:44 -07:00
|
|
|
|
2019-11-08 20:56:57 -08:00
|
|
|
let bank = Bank::new(&genesis_config);
|
2019-04-19 02:39:44 -07:00
|
|
|
let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
|
|
|
{
|
|
|
|
let blocktree = Arc::new(
|
|
|
|
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
|
|
|
);
|
|
|
|
|
|
|
|
assert_eq!(
|
2019-04-30 13:23:21 -07:00
|
|
|
cache.slot_leader_at(bank.slot(), Some(&bank)).unwrap(),
|
2019-04-19 02:39:44 -07:00
|
|
|
pubkey
|
|
|
|
);
|
|
|
|
// Check that the next leader slot after 0 is slot 1
|
|
|
|
assert_eq!(
|
2019-07-26 11:33:51 -07:00
|
|
|
cache
|
|
|
|
.next_leader_slot(&pubkey, 0, &bank, Some(&blocktree))
|
|
|
|
.unwrap()
|
|
|
|
.0,
|
|
|
|
1
|
2019-04-19 02:39:44 -07:00
|
|
|
);
|
|
|
|
|
2019-11-14 11:49:31 -08:00
|
|
|
// Write a shred into slot 2 that chains to slot 1,
|
2019-04-19 02:39:44 -07:00
|
|
|
// but slot 1 is empty so should not be skipped
|
2019-09-03 21:32:51 -07:00
|
|
|
let (shreds, _) = make_slot_entries(2, 1, 1);
|
2019-11-14 00:32:07 -08:00
|
|
|
blocktree.insert_shreds(shreds, None, false).unwrap();
|
2019-04-19 02:39:44 -07:00
|
|
|
assert_eq!(
|
2019-07-26 11:33:51 -07:00
|
|
|
cache
|
|
|
|
.next_leader_slot(&pubkey, 0, &bank, Some(&blocktree))
|
|
|
|
.unwrap()
|
|
|
|
.0,
|
|
|
|
1
|
2019-04-19 02:39:44 -07:00
|
|
|
);
|
|
|
|
|
2019-11-14 11:49:31 -08:00
|
|
|
// Write a shred into slot 1
|
2019-09-03 21:32:51 -07:00
|
|
|
let (shreds, _) = make_slot_entries(1, 0, 1);
|
2019-04-19 02:39:44 -07:00
|
|
|
|
|
|
|
// Check that slot 1 and 2 are skipped
|
2019-11-14 00:32:07 -08:00
|
|
|
blocktree.insert_shreds(shreds, None, false).unwrap();
|
2019-04-19 02:39:44 -07:00
|
|
|
assert_eq!(
|
2019-07-26 11:33:51 -07:00
|
|
|
cache
|
|
|
|
.next_leader_slot(&pubkey, 0, &bank, Some(&blocktree))
|
|
|
|
.unwrap()
|
|
|
|
.0,
|
|
|
|
3
|
2019-04-19 02:39:44 -07:00
|
|
|
);
|
|
|
|
|
|
|
|
// Integrity checks
|
|
|
|
assert_eq!(
|
|
|
|
cache.next_leader_slot(
|
|
|
|
&pubkey,
|
2019-11-08 20:56:57 -08:00
|
|
|
2 * genesis_config.epoch_schedule.slots_per_epoch - 1, // no schedule generated for epoch 2
|
2019-04-19 02:39:44 -07:00
|
|
|
&bank,
|
|
|
|
Some(&blocktree)
|
|
|
|
),
|
|
|
|
None
|
|
|
|
);
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
cache.next_leader_slot(
|
|
|
|
&Pubkey::new_rand(), // not in leader_schedule
|
|
|
|
0,
|
|
|
|
&bank,
|
|
|
|
Some(&blocktree)
|
|
|
|
),
|
|
|
|
None
|
|
|
|
);
|
|
|
|
}
|
|
|
|
Blocktree::destroy(&ledger_path).unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_next_leader_slot_next_epoch() {
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
mut genesis_config,
|
2019-05-22 20:39:00 -07:00
|
|
|
mint_keypair,
|
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config(10_000);
|
|
|
|
genesis_config.epoch_schedule.warmup = false;
|
2019-04-19 02:39:44 -07:00
|
|
|
|
2019-11-08 20:56:57 -08:00
|
|
|
let bank = Bank::new(&genesis_config);
|
2019-04-19 02:39:44 -07:00
|
|
|
let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
|
|
|
|
|
|
|
// Create new vote account
|
2019-05-23 23:20:04 -07:00
|
|
|
let node_pubkey = Pubkey::new_rand();
|
2019-11-08 02:27:35 -08:00
|
|
|
let vote_account = Keypair::new();
|
2019-05-16 08:23:31 -07:00
|
|
|
setup_vote_and_stake_accounts(
|
2019-04-19 02:39:44 -07:00
|
|
|
&bank,
|
2019-05-16 08:23:31 -07:00
|
|
|
&mint_keypair,
|
2019-11-08 02:27:35 -08:00
|
|
|
&vote_account,
|
2019-05-23 23:20:04 -07:00
|
|
|
&node_pubkey,
|
2019-04-19 02:39:44 -07:00
|
|
|
BOOTSTRAP_LEADER_LAMPORTS,
|
|
|
|
);
|
|
|
|
|
|
|
|
// Have to wait until the epoch at after the epoch stakes generated at genesis
|
|
|
|
// for the new votes to take effect.
|
|
|
|
let mut target_slot = 1;
|
2019-10-08 22:34:26 -07:00
|
|
|
let epoch = bank.get_leader_schedule_epoch(0);
|
|
|
|
while bank.get_leader_schedule_epoch(target_slot) == epoch {
|
2019-04-19 02:39:44 -07:00
|
|
|
target_slot += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
let bank = Bank::new_from_parent(&Arc::new(bank), &Pubkey::default(), target_slot);
|
|
|
|
let mut expected_slot = 0;
|
2019-10-08 22:34:26 -07:00
|
|
|
let epoch = bank.get_leader_schedule_epoch(target_slot);
|
2019-04-19 02:39:44 -07:00
|
|
|
for i in 0..epoch {
|
|
|
|
expected_slot += bank.get_slots_in_epoch(i);
|
|
|
|
}
|
|
|
|
|
|
|
|
let schedule = cache.compute_epoch_schedule(epoch, &bank).unwrap();
|
|
|
|
let mut index = 0;
|
2019-05-23 23:20:04 -07:00
|
|
|
while schedule[index] != node_pubkey {
|
2019-05-16 08:23:31 -07:00
|
|
|
index += 1;
|
2019-11-08 20:56:57 -08:00
|
|
|
assert_ne!(index, genesis_config.epoch_schedule.slots_per_epoch);
|
2019-04-19 02:39:44 -07:00
|
|
|
}
|
|
|
|
expected_slot += index;
|
|
|
|
|
2019-07-30 15:51:02 -07:00
|
|
|
// If the max root isn't set, we'll get None
|
|
|
|
assert!(cache
|
|
|
|
.next_leader_slot(&node_pubkey, 0, &bank, None)
|
|
|
|
.is_none());
|
|
|
|
|
|
|
|
cache.set_root(&bank);
|
2019-04-19 02:39:44 -07:00
|
|
|
assert_eq!(
|
2019-07-26 11:33:51 -07:00
|
|
|
cache
|
|
|
|
.next_leader_slot(&node_pubkey, 0, &bank, None)
|
|
|
|
.unwrap()
|
|
|
|
.0,
|
|
|
|
expected_slot
|
2019-04-19 02:39:44 -07:00
|
|
|
);
|
|
|
|
}
|
2019-04-30 13:23:21 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_schedule_for_unconfirmed_epoch() {
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
|
|
|
|
let bank = Arc::new(Bank::new(&genesis_config));
|
2019-04-30 13:23:21 -07:00
|
|
|
let cache = LeaderScheduleCache::new_from_bank(&bank);
|
|
|
|
|
|
|
|
assert_eq!(*cache.max_epoch.read().unwrap(), 1);
|
|
|
|
|
|
|
|
// Asking for the leader for the last slot in epoch 1 is ok b/c
|
|
|
|
// epoch 1 is confirmed
|
|
|
|
assert_eq!(bank.get_epoch_and_slot_index(95).0, 1);
|
|
|
|
assert!(cache.slot_leader_at(95, Some(&bank)).is_some());
|
|
|
|
|
|
|
|
// Asking for the lader for the first slot in epoch 2 is not ok
|
|
|
|
// b/c epoch 2 is unconfirmed
|
|
|
|
assert_eq!(bank.get_epoch_and_slot_index(96).0, 2);
|
|
|
|
assert!(cache.slot_leader_at(96, Some(&bank)).is_none());
|
|
|
|
|
|
|
|
let bank2 = Bank::new_from_parent(&bank, &Pubkey::new_rand(), 95);
|
|
|
|
assert!(bank2.epoch_vote_accounts(2).is_some());
|
|
|
|
|
|
|
|
// Set root for a slot in epoch 1, so that epoch 2 is now confirmed
|
2019-06-26 00:19:48 -07:00
|
|
|
cache.set_root(&bank2);
|
2019-04-30 13:23:21 -07:00
|
|
|
assert_eq!(*cache.max_epoch.read().unwrap(), 2);
|
|
|
|
assert!(cache.slot_leader_at(96, Some(&bank2)).is_some());
|
|
|
|
assert_eq!(bank2.get_epoch_and_slot_index(223).0, 2);
|
|
|
|
assert!(cache.slot_leader_at(223, Some(&bank2)).is_some());
|
|
|
|
assert_eq!(bank2.get_epoch_and_slot_index(224).0, 3);
|
|
|
|
assert!(cache.slot_leader_at(224, Some(&bank2)).is_none());
|
|
|
|
}
|
2019-10-08 14:58:49 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_set_max_schedules() {
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
|
|
|
|
let bank = Arc::new(Bank::new(&genesis_config));
|
2019-10-08 14:58:49 -07:00
|
|
|
let mut cache = LeaderScheduleCache::new_from_bank(&bank);
|
|
|
|
|
|
|
|
// Max schedules must be greater than 0
|
|
|
|
cache.set_max_schedules(0);
|
|
|
|
assert_eq!(cache.max_schedules(), MAX_SCHEDULES);
|
|
|
|
|
|
|
|
cache.set_max_schedules(std::usize::MAX);
|
|
|
|
assert_eq!(cache.max_schedules(), std::usize::MAX);
|
|
|
|
}
|
2019-04-19 02:39:44 -07:00
|
|
|
}
|