Schedule node for consecutive slots as leader (#3353)

* Also tweak epoch and slot duration

* new test for leader schedule
This commit is contained in:
Pankaj Garg 2019-03-19 06:36:45 -07:00 committed by GitHub
parent c70412d7bb
commit 56fcc93ef5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 158 additions and 19 deletions

View File

@ -12,11 +12,21 @@ pub struct LeaderSchedule {
impl LeaderSchedule {
// Note: passing in zero stakers will cause a panic.
pub fn new(ids_and_stakes: &[(Pubkey, u64)], seed: [u8; 32], len: u64) -> Self {
pub fn new(ids_and_stakes: &[(Pubkey, u64)], seed: [u8; 32], len: u64, repeat: u64) -> Self {
let (ids, stakes): (Vec<_>, Vec<_>) = ids_and_stakes.iter().cloned().unzip();
let rng = &mut ChaChaRng::from_seed(seed);
let weighted_index = WeightedIndex::new(stakes).unwrap();
let slot_leaders = (0..len).map(|_| ids[weighted_index.sample(rng)]).collect();
let mut current_node = Pubkey::default();
let slot_leaders = (0..len)
.map(|i| {
if i % repeat == 0 {
current_node = ids[weighted_index.sample(rng)];
current_node
} else {
current_node
}
})
.collect();
Self { slot_leaders }
}
}
@ -57,10 +67,76 @@ mod tests {
let mut seed_bytes = [0u8; 32];
seed_bytes.copy_from_slice(seed.as_ref());
let len = num_keys * 10;
let leader_schedule = LeaderSchedule::new(&stakes, seed_bytes, len);
let leader_schedule2 = LeaderSchedule::new(&stakes, seed_bytes, len);
let leader_schedule = LeaderSchedule::new(&stakes, seed_bytes, len, 1);
let leader_schedule2 = LeaderSchedule::new(&stakes, seed_bytes, len, 1);
assert_eq!(leader_schedule.slot_leaders.len() as u64, len);
// Check that the same schedule is reproducibly generated
assert_eq!(leader_schedule, leader_schedule2);
}
#[test]
fn test_repeated_leader_schedule() {
let num_keys = 10;
let stakes: Vec<_> = (0..num_keys)
.map(|i| (Keypair::new().pubkey(), i))
.collect();
let seed = Keypair::new().pubkey();
let mut seed_bytes = [0u8; 32];
seed_bytes.copy_from_slice(seed.as_ref());
let len = num_keys * 10;
let repeat = 8;
let leader_schedule = LeaderSchedule::new(&stakes, seed_bytes, len, repeat);
assert_eq!(leader_schedule.slot_leaders.len() as u64, len);
let mut leader_node = Pubkey::default();
for (i, node) in leader_schedule.slot_leaders.iter().enumerate() {
if i % repeat as usize == 0 {
leader_node = *node;
} else {
assert_eq!(leader_node, *node);
}
}
}
#[test]
fn test_repeated_leader_schedule_specific() {
let alice_pubkey = Keypair::new().pubkey();
let bob_pubkey = Keypair::new().pubkey();
let stakes = vec![(alice_pubkey, 2), (bob_pubkey, 1)];
let seed = Pubkey::default();
let mut seed_bytes = [0u8; 32];
seed_bytes.copy_from_slice(seed.as_ref());
let len = 8;
// What the schedule looks like without any repeats
let leaders1 = LeaderSchedule::new(&stakes, seed_bytes, len, 1).slot_leaders;
// What the schedule looks like with repeats
let leaders2 = LeaderSchedule::new(&stakes, seed_bytes, len, 2).slot_leaders;
assert_eq!(leaders1.len(), leaders2.len());
let leaders1_expected = vec![
alice_pubkey,
alice_pubkey,
alice_pubkey,
bob_pubkey,
alice_pubkey,
alice_pubkey,
alice_pubkey,
alice_pubkey,
];
let leaders2_expected = vec![
alice_pubkey,
alice_pubkey,
alice_pubkey,
alice_pubkey,
alice_pubkey,
alice_pubkey,
bob_pubkey,
bob_pubkey,
];
assert_eq!(leaders1, leaders1_expected);
assert_eq!(leaders2, leaders2_expected);
}
}

View File

@ -2,6 +2,7 @@ use crate::leader_schedule::LeaderSchedule;
use crate::staking_utils;
use solana_runtime::bank::Bank;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::NUM_CONSECUTIVE_LEADER_SLOTS;
/// Return the leader schedule for the given epoch.
fn leader_schedule(epoch_height: u64, bank: &Bank) -> Option<LeaderSchedule> {
@ -10,7 +11,12 @@ fn leader_schedule(epoch_height: u64, bank: &Bank) -> Option<LeaderSchedule> {
seed[0..8].copy_from_slice(&epoch_height.to_le_bytes());
let mut stakes: Vec<_> = stakes.into_iter().collect();
sort_stakes(&mut stakes);
LeaderSchedule::new(&stakes, seed, bank.get_slots_in_epoch(epoch_height))
LeaderSchedule::new(
&stakes,
seed,
bank.get_slots_in_epoch(epoch_height),
NUM_CONSECUTIVE_LEADER_SLOTS,
)
})
}
@ -123,8 +129,12 @@ mod tests {
let ids_and_stakes: Vec<_> = staking_utils::delegated_stakes(&bank).into_iter().collect();
let seed = [0u8; 32];
let leader_schedule =
LeaderSchedule::new(&ids_and_stakes, seed, genesis_block.slots_per_epoch);
let leader_schedule = LeaderSchedule::new(
&ids_and_stakes,
seed,
genesis_block.slots_per_epoch,
NUM_CONSECUTIVE_LEADER_SLOTS,
);
assert_eq!(leader_schedule[0], pubkey);
assert_eq!(leader_schedule[1], pubkey);

View File

@ -11,6 +11,8 @@ use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction::SystemTransaction;
use solana_sdk::timing::DEFAULT_SLOTS_PER_EPOCH;
use solana_sdk::timing::DEFAULT_TICKS_PER_SLOT;
use solana_vote_api::vote_state::VoteState;
use solana_vote_api::vote_transaction::VoteTransaction;
use std::fs::remove_dir_all;
@ -40,7 +42,31 @@ impl LocalCluster {
cluster_lamports: u64,
fullnode_config: &FullnodeConfig,
) -> Self {
Self::new_with_config_replicators(node_stakes, cluster_lamports, fullnode_config, 0)
Self::new_with_config_replicators(
node_stakes,
cluster_lamports,
fullnode_config,
0,
DEFAULT_TICKS_PER_SLOT,
DEFAULT_SLOTS_PER_EPOCH,
)
}
pub fn new_with_tick_config(
node_stakes: &[u64],
cluster_lamports: u64,
fullnode_config: &FullnodeConfig,
ticks_per_slot: u64,
slots_per_epoch: u64,
) -> Self {
Self::new_with_config_replicators(
node_stakes,
cluster_lamports,
fullnode_config,
0,
ticks_per_slot,
slots_per_epoch,
)
}
pub fn new_with_config_replicators(
@ -48,12 +74,16 @@ impl LocalCluster {
cluster_lamports: u64,
fullnode_config: &FullnodeConfig,
num_replicators: usize,
ticks_per_slot: u64,
slots_per_epoch: u64,
) -> Self {
let leader_keypair = Arc::new(Keypair::new());
let leader_pubkey = leader_keypair.pubkey();
let leader_node = Node::new_localhost_with_pubkey(&leader_keypair.pubkey());
let (genesis_block, mint_keypair) =
let (mut genesis_block, mint_keypair) =
GenesisBlock::new_with_leader(cluster_lamports, &leader_pubkey, node_stakes[0]);
genesis_block.ticks_per_slot = ticks_per_slot;
genesis_block.slots_per_epoch = slots_per_epoch;
let (genesis_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
let leader_ledger_path = tmp_copy_blocktree!(&genesis_ledger_path);
let mut ledger_paths = vec![];
@ -301,6 +331,8 @@ mod test {
100,
&fullnode_exit,
num_replicators,
16,
16,
);
assert_eq!(cluster.fullnodes.len(), NUM_NODES);
assert_eq!(cluster.replicators.len(), num_replicators);

View File

@ -5,7 +5,6 @@ use solana::fullnode::FullnodeConfig;
use solana::gossip_service::discover;
use solana::local_cluster::LocalCluster;
use solana::poh_service::PohServiceConfig;
use solana_sdk::timing::{DEFAULT_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT};
use std::thread::sleep;
use std::time::Duration;
@ -98,19 +97,27 @@ fn test_leader_failure_3() {
fn test_two_unbalanced_stakes() {
let mut fullnode_config = FullnodeConfig::default();
let num_ticks_per_second = 100;
let num_ticks_per_slot = 160;
let num_slots_per_epoch = 16;
fullnode_config.tick_config =
PohServiceConfig::Sleep(Duration::from_millis(100 / num_ticks_per_second));
fullnode_config.rpc_config.enable_fullnode_exit = true;
let mut cluster = LocalCluster::new_with_config(&[999_990, 3], 1_000_000, &fullnode_config);
let mut cluster = LocalCluster::new_with_tick_config(
&[999_990, 3],
1_000_000,
&fullnode_config,
num_ticks_per_slot,
num_slots_per_epoch,
);
let num_epochs_to_sleep = 10;
let num_ticks_to_sleep = num_epochs_to_sleep * DEFAULT_TICKS_PER_SLOT * DEFAULT_SLOTS_PER_EPOCH;
let num_ticks_to_sleep = num_epochs_to_sleep * num_ticks_per_slot * num_slots_per_epoch;
sleep(Duration::from_millis(
num_ticks_to_sleep / num_ticks_per_second * 100,
num_ticks_to_sleep / num_ticks_per_second as u64 * 100,
));
cluster.close_preserve_ledgers();
let leader_ledger = cluster.ledger_paths[1].clone();
cluster_tests::verify_ledger_ticks(&leader_ledger, DEFAULT_TICKS_PER_SLOT as usize);
cluster_tests::verify_ledger_ticks(&leader_ledger, num_ticks_per_slot as usize);
}
#[test]

View File

@ -13,6 +13,8 @@ use solana::replicator::Replicator;
use solana::storage_stage::STORAGE_ROTATE_TEST_COUNT;
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::timing::DEFAULT_SLOTS_PER_EPOCH;
use solana_sdk::timing::DEFAULT_TICKS_PER_SLOT;
use std::fs::remove_dir_all;
use std::sync::Arc;
use std::time::Duration;
@ -25,8 +27,14 @@ fn test_replicator_startup_basic() {
const NUM_NODES: usize = 2;
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.storage_rotate_count = STORAGE_ROTATE_TEST_COUNT;
let _cluster =
LocalCluster::new_with_config_replicators(&[100; NUM_NODES], 10_000, &fullnode_config, 1);
let _cluster = LocalCluster::new_with_config_replicators(
&[100; NUM_NODES],
10_000,
&fullnode_config,
1,
DEFAULT_TICKS_PER_SLOT,
DEFAULT_SLOTS_PER_EPOCH,
);
}
#[test]

View File

@ -75,8 +75,10 @@ fn test_replay() {
let total_balance = 10_000;
let leader_balance = 100;
let starting_mint_balance = total_balance - leader_balance;
let (genesis_block, mint_keypair) =
let (mut genesis_block, mint_keypair) =
GenesisBlock::new_with_leader(total_balance, &leader.info.id, leader_balance);
genesis_block.ticks_per_slot = 160;
genesis_block.slots_per_epoch = 16;
let (blocktree_path, blockhash) = create_new_tmp_ledger!(&genesis_block);
let tvu_addr = target1.info.tvu;

View File

@ -6,8 +6,12 @@ pub const NUM_TICKS_PER_SECOND: u64 = 10;
// At 10 ticks/s, 8 ticks per slot implies that leader rotation and voting will happen
// every 800 ms. A fast voting cadence ensures faster finality and convergence
pub const DEFAULT_TICKS_PER_SLOT: u64 = 160;
pub const DEFAULT_SLOTS_PER_EPOCH: u64 = 16;
pub const DEFAULT_TICKS_PER_SLOT: u64 = 8;
// 1 Epoch = 800 * 4096 ms ~= 55 minutes
pub const DEFAULT_SLOTS_PER_EPOCH: u64 = 4096;
pub const NUM_CONSECUTIVE_LEADER_SLOTS: u64 = 8;
/// The time window of recent block hash values that the bank will track the signatures
/// of over. Once the bank discards a block hash, it will reject any transactions that use