Make minimum warmup period 32 slots long (#4031)

* Make minimum warmup period 32 slots long

* PR fixes
This commit is contained in:
carllin 2019-04-29 15:26:52 -07:00 committed by GitHub
parent bae0aadafa
commit 73f250f03a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 69 additions and 42 deletions

View File

@ -10,6 +10,7 @@ use crate::gossip_service::discover_nodes;
use crate::locktower::VOTE_THRESHOLD_DEPTH;
use crate::poh_service::PohServiceConfig;
use solana_client::thin_client::create_client;
use solana_runtime::bank::MINIMUM_SLOT_LENGTH;
use solana_sdk::client::SyncClient;
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
@ -147,21 +148,23 @@ pub fn kill_entry_and_spend_and_verify_rest(
entry_point_info: &ContactInfo,
funding_keypair: &Keypair,
nodes: usize,
slot_millis: u64,
) {
solana_logger::setup();
let cluster_nodes = discover_nodes(&entry_point_info.gossip, nodes).unwrap();
assert!(cluster_nodes.len() >= nodes);
let client = create_client(entry_point_info.client_facing_addr(), FULLNODE_PORT_RANGE);
let first_two_epoch_slots = MINIMUM_SLOT_LENGTH * 3;
info!("sleeping for 2 leader fortnights");
sleep(Duration::from_millis(
SLOT_MILLIS * NUM_CONSECUTIVE_LEADER_SLOTS * 2,
slot_millis * first_two_epoch_slots as u64,
));
info!("done sleeping for 2 fortnights");
info!("done sleeping for first 2 warmup epochs");
info!("killing entry point");
assert!(client.fullnode_exit().unwrap());
info!("sleeping for 2 leader fortnights");
info!("sleeping for some time");
sleep(Duration::from_millis(
SLOT_MILLIS * NUM_CONSECUTIVE_LEADER_SLOTS,
slot_millis * NUM_CONSECUTIVE_LEADER_SLOTS,
));
info!("done sleeping for 2 fortnights");
for ingress_node in &cluster_nodes {

View File

@ -140,7 +140,7 @@ mod tests {
use super::*;
use crate::blocktree::tests::make_slot_entries;
use crate::voting_keypair::tests::new_vote_account;
use solana_runtime::bank::{Bank, EpochSchedule};
use solana_runtime::bank::{Bank, EpochSchedule, MINIMUM_SLOT_LENGTH};
use solana_sdk::genesis_block::{GenesisBlock, BOOTSTRAP_LEADER_LAMPORTS};
use solana_sdk::signature::{Keypair, KeypairUtil};
use std::sync::mpsc::channel;
@ -151,11 +151,9 @@ mod tests {
#[test]
fn test_slot_leader_at_else_compute() {
let slots_per_epoch = 10;
let epoch_schedule = EpochSchedule::new(slots_per_epoch, slots_per_epoch / 2, true);
let cache = LeaderScheduleCache::new(epoch_schedule);
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
let bank = Bank::new(&genesis_block);
let cache = LeaderScheduleCache::new_from_bank(&bank);
// Nothing in the cache, should return None
assert!(cache.slot_leader_at(bank.slot()).is_none());
@ -195,7 +193,7 @@ mod tests {
}
fn run_thread_race() {
let slots_per_epoch = 10;
let slots_per_epoch = MINIMUM_SLOT_LENGTH as u64;
let epoch_schedule = EpochSchedule::new(slots_per_epoch, slots_per_epoch / 2, true);
let cache = Arc::new(LeaderScheduleCache::new(epoch_schedule));
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);

View File

@ -449,6 +449,7 @@ impl Drop for LocalCluster {
#[cfg(test)]
mod test {
use super::*;
use solana_runtime::bank::MINIMUM_SLOT_LENGTH;
#[test]
fn test_local_cluster_start_and_exit() {
@ -472,7 +473,7 @@ mod test {
node_stakes: vec![3; NUM_NODES],
cluster_lamports: 100,
ticks_per_slot: 16,
slots_per_epoch: 16,
slots_per_epoch: MINIMUM_SLOT_LENGTH as u64,
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&config);

View File

@ -2,7 +2,7 @@
//! "ticks", a measure of time in the PoH stream
use crate::poh_recorder::PohRecorder;
use crate::service::Service;
use solana_sdk::timing::NUM_TICKS_PER_SECOND;
use solana_sdk::timing::{self, NUM_TICKS_PER_SECOND};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::SyncSender;
use std::sync::{Arc, Mutex};
@ -28,6 +28,15 @@ impl Default for PohServiceConfig {
}
}
impl PohServiceConfig {
pub fn ticks_to_ms(&self, num_ticks: u64) -> u64 {
match self {
PohServiceConfig::Sleep(d) => timing::duration_as_ms(d) * num_ticks,
_ => panic!("Unsuppported tick config"),
}
}
}
pub struct PohService {
tick_producer: JoinHandle<()>,
}

View File

@ -255,7 +255,7 @@ mod test {
use crate::packet::{index_blobs, Blob};
use crate::service::Service;
use crate::streamer::{blob_receiver, responder};
use solana_runtime::bank::Bank;
use solana_runtime::bank::{Bank, MINIMUM_SLOT_LENGTH};
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::hash::Hash;
use std::fs::remove_dir_all;
@ -320,8 +320,8 @@ mod test {
);
// with a Bank and no idea who leader is, we keep the blobs (for now)
// TODO: persistr in blocktree that we didn't know who the leader was at the time?
blob.set_slot(100);
// TODO: persist in blocktree that we didn't know who the leader was at the time?
blob.set_slot(MINIMUM_SLOT_LENGTH as u64 * 3);
assert_eq!(
should_retransmit_and_persist(&blob, Some(&bank), Some(&cache), &me_id),
true

View File

@ -6,6 +6,7 @@ use solana::fullnode::FullnodeConfig;
use solana::gossip_service::discover_nodes;
use solana::local_cluster::{ClusterConfig, LocalCluster};
use solana::poh_service::PohServiceConfig;
use solana_runtime::bank::MINIMUM_SLOT_LENGTH;
use solana_sdk::timing;
use std::time::Duration;
@ -96,7 +97,7 @@ fn test_leader_failure_4() {
let config = ClusterConfig {
cluster_lamports: 10_000,
node_stakes: vec![100; 4],
fullnode_config,
fullnode_config: fullnode_config.clone(),
..ClusterConfig::default()
};
let local = LocalCluster::new(&config);
@ -104,6 +105,9 @@ fn test_leader_failure_4() {
&local.entry_point_info,
&local.funding_keypair,
num_nodes,
fullnode_config
.tick_config
.ticks_to_ms(config.ticks_per_slot as u64),
);
}
#[test]
@ -111,8 +115,8 @@ fn test_two_unbalanced_stakes() {
solana_logger::setup();
let mut fullnode_config = FullnodeConfig::default();
let num_ticks_per_second = 100;
let num_ticks_per_slot = 160;
let num_slots_per_epoch = 16;
let num_ticks_per_slot = 40;
let num_slots_per_epoch = MINIMUM_SLOT_LENGTH as u64;
fullnode_config.tick_config =
PohServiceConfig::Sleep(Duration::from_millis(100 / num_ticks_per_second));
fullnode_config.rpc_config.enable_fullnode_exit = true;
@ -124,13 +128,13 @@ fn test_two_unbalanced_stakes() {
slots_per_epoch: num_slots_per_epoch,
..ClusterConfig::default()
});
cluster_tests::sleep_n_epochs(
10.0,
&fullnode_config.tick_config,
num_ticks_per_slot,
num_slots_per_epoch,
);
cluster.close_preserve_ledgers();
let leader_id = cluster.entry_point_info.id;
let leader_ledger = cluster.fullnode_infos[&leader_id].ledger_path.clone();
@ -163,7 +167,7 @@ fn test_forwarding() {
#[test]
fn test_restart_node() {
let fullnode_config = FullnodeConfig::default();
let slots_per_epoch = 8;
let slots_per_epoch = MINIMUM_SLOT_LENGTH as u64;
let ticks_per_slot = 16;
let mut cluster = LocalCluster::new(&ClusterConfig {
node_stakes: vec![3],

View File

@ -16,6 +16,7 @@ use solana::storage_stage::StorageState;
use solana::storage_stage::STORAGE_ROTATE_TEST_COUNT;
use solana::streamer;
use solana::tvu::{Sockets, Tvu};
use solana_runtime::bank::MINIMUM_SLOT_LENGTH;
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction;
@ -78,7 +79,7 @@ fn test_replay() {
let (mut genesis_block, mint_keypair) =
GenesisBlock::new_with_leader(total_balance, &leader.info.id, leader_balance);
genesis_block.ticks_per_slot = 160;
genesis_block.slots_per_epoch = 16;
genesis_block.slots_per_epoch = MINIMUM_SLOT_LENGTH as u64;
let (blocktree_path, blockhash) = create_new_tmp_ledger!(&genesis_block);
let tvu_addr = target1.info.tvu;

View File

@ -24,12 +24,14 @@ use solana_sdk::signature::{Keypair, Signature};
use solana_sdk::system_transaction;
use solana_sdk::timing::{duration_as_ms, duration_as_us, MAX_RECENT_BLOCKHASHES};
use solana_sdk::transaction::{Result, Transaction, TransactionError};
use solana_vote_api::vote_state::{self, Vote};
use solana_vote_api::vote_state::{self, Vote, MAX_LOCKOUT_HISTORY};
use std::cmp;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::{Arc, RwLock};
use std::time::Instant;
pub const MINIMUM_SLOT_LENGTH: usize = MAX_LOCKOUT_HISTORY + 1;
#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)]
pub struct EpochSchedule {
/// The maximum number of slots in each epoch.
@ -38,20 +40,26 @@ pub struct EpochSchedule {
/// A number of slots before slot_index 0. Used to calculate finalized staked nodes.
pub stakers_slot_offset: u64,
/// basically: log2(slots_per_epoch)
/// basically: log2(slots_per_epoch) - log2(MINIMUM_SLOT_LEN)
pub first_normal_epoch: u64,
/// basically: 2.pow(first_normal_epoch)
/// basically: 2.pow(first_normal_epoch) - MINIMUM_SLOT_LEN
pub first_normal_slot: u64,
}
impl EpochSchedule {
pub fn new(slots_per_epoch: u64, stakers_slot_offset: u64, warmup: bool) -> Self {
assert!(slots_per_epoch >= MINIMUM_SLOT_LENGTH as u64);
let (first_normal_epoch, first_normal_slot) = if warmup {
let next_power_of_two = slots_per_epoch.next_power_of_two();
let log2_slots_per_epoch = next_power_of_two.trailing_zeros();
let log2_slots_per_epoch = next_power_of_two
.trailing_zeros()
.saturating_sub(MINIMUM_SLOT_LENGTH.trailing_zeros());
(u64::from(log2_slots_per_epoch), next_power_of_two - 1)
(
u64::from(log2_slots_per_epoch),
next_power_of_two.saturating_sub(MINIMUM_SLOT_LENGTH as u64),
)
} else {
(0, 0)
};
@ -66,7 +74,7 @@ impl EpochSchedule {
/// get the length of the given epoch (in slots)
pub fn get_slots_in_epoch(&self, epoch: u64) -> u64 {
if epoch < self.first_normal_epoch {
2u64.pow(epoch as u32)
2u64.pow(epoch as u32 + MINIMUM_SLOT_LENGTH.trailing_zeros() as u32)
} else {
self.slots_per_epoch
}
@ -88,15 +96,18 @@ impl EpochSchedule {
/// get epoch and offset into the epoch for the given slot
pub fn get_epoch_and_slot_index(&self, slot: u64) -> (u64, u64) {
if slot < self.first_normal_slot {
let epoch = if slot < 2 {
slot as u32
} else {
(slot + 2).next_power_of_two().trailing_zeros() - 1
};
let epoch = (slot + MINIMUM_SLOT_LENGTH as u64 + 1)
.next_power_of_two()
.trailing_zeros()
- MINIMUM_SLOT_LENGTH.trailing_zeros()
- 1;
let epoch_len = 2u64.pow(epoch);
let epoch_len = 2u64.pow(epoch + MINIMUM_SLOT_LENGTH.trailing_zeros());
(u64::from(epoch), slot - (epoch_len - 1))
(
u64::from(epoch),
slot - (epoch_len - MINIMUM_SLOT_LENGTH as u64),
)
} else {
(
self.first_normal_epoch + ((slot - self.first_normal_slot) / self.slots_per_epoch),
@ -1651,9 +1662,9 @@ mod tests {
let (mut genesis_block, _) = GenesisBlock::new_with_leader(5, &leader_id, leader_lamports);
// set this up weird, forces future generation, odd mod(), etc.
// this says: "stakes for slot X should be generated at slot index 3 in slot X-2...
const SLOTS_PER_EPOCH: u64 = 8;
const STAKERS_SLOT_OFFSET: u64 = 21;
// this says: "stakes for epoch X should be generated at slot index 3 in epoch X-2...
const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOT_LENGTH as u64;
const STAKERS_SLOT_OFFSET: u64 = SLOTS_PER_EPOCH * 3 - 3;
genesis_block.slots_per_epoch = SLOTS_PER_EPOCH;
genesis_block.stakers_slot_offset = STAKERS_SLOT_OFFSET;
genesis_block.epoch_warmup = false; // allows me to do the normal division stuff below
@ -1734,8 +1745,8 @@ mod tests {
let bank = Bank::new(&genesis_block);
assert_eq!(bank.get_slots_in_epoch(0), 1);
assert_eq!(bank.get_slots_in_epoch(2), 4);
assert_eq!(bank.get_slots_in_epoch(0), MINIMUM_SLOT_LENGTH as u64);
assert_eq!(bank.get_slots_in_epoch(2), (MINIMUM_SLOT_LENGTH * 4) as u64);
assert_eq!(bank.get_slots_in_epoch(5000), genesis_block.slots_per_epoch);
}
@ -1744,16 +1755,16 @@ mod tests {
// one week of slots at 8 ticks/slot, 10 ticks/sec is
// (1 * 7 * 24 * 4500u64).next_power_of_two();
// test values between 1 and 16, should cover a good mix
for slots_per_epoch in 1..=16 {
// test values between MINIMUM_SLOT_LEN and MINIMUM_SLOT_LEN * 16, should cover a good mix
for slots_per_epoch in MINIMUM_SLOT_LENGTH as u64..=MINIMUM_SLOT_LENGTH as u64 * 16 {
let epoch_schedule = EpochSchedule::new(slots_per_epoch, slots_per_epoch / 2, true);
let mut last_stakers = 0;
let mut last_epoch = 0;
let mut last_slots_in_epoch = 1;
let mut last_slots_in_epoch = MINIMUM_SLOT_LENGTH as u64;
for slot in 0..(2 * slots_per_epoch) {
// verify that stakers_epoch is continuous over the warmup
// and into the first normal epoch
// and into the first normal epoch
let stakers = epoch_schedule.get_stakers_epoch(slot);
if stakers != last_stakers {