Replace LeaderScheduler with LeaderScheduler1 (#2962)

* Migrate to LeaderScheduler1 (and added some missing methods)
* Delete LeaderScheduler
* Rename LeaderScheduler1 to LeaderScheduler
This commit is contained in:
Pankaj Garg 2019-02-26 21:16:18 -08:00 committed by Greg Fitzgerald
parent 9750488200
commit 789fff2ae2
15 changed files with 335 additions and 1617 deletions

View File

@ -9,7 +9,6 @@ use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::hash::Hash;
use solana_sdk::timing::duration_as_ms;
use solana_sdk::timing::MAX_ENTRY_IDS;
use std::sync::{Arc, RwLock};
use std::time::Instant;
pub fn process_entry(bank: &Bank, entry: &Entry) -> Result<()> {
@ -51,11 +50,7 @@ fn par_execute_entries(bank: &Bank, entries: &[(&Entry, Vec<Result<()>>)]) -> Re
/// 2. Process the locked group in parallel
/// 3. Register the `Tick` if it's available
/// 4. Update the leader scheduler, goto 1
fn par_process_entries_with_scheduler(
bank: &Bank,
entries: &[Entry],
leader_scheduler: &Arc<RwLock<LeaderScheduler>>,
) -> Result<()> {
fn par_process_entries_with_scheduler(bank: &Bank, entries: &[Entry]) -> Result<()> {
// accumulator for entries that can be processed in parallel
let mut mt_group = vec![];
for entry in entries {
@ -63,10 +58,6 @@ fn par_process_entries_with_scheduler(
// if its a tick, execute the group and register the tick
par_execute_entries(bank, &mt_group)?;
bank.register_tick(&entry.id);
leader_scheduler
.write()
.unwrap()
.update_tick_height(bank.tick_height(), bank);
mt_group = vec![];
continue;
}
@ -91,27 +82,15 @@ fn par_process_entries_with_scheduler(
}
/// Process an ordered list of entries.
pub fn process_entries(
bank: &Bank,
entries: &[Entry],
leader_scheduler: &Arc<RwLock<LeaderScheduler>>,
) -> Result<()> {
par_process_entries_with_scheduler(bank, entries, leader_scheduler)
pub fn process_entries(bank: &Bank, entries: &[Entry]) -> Result<()> {
par_process_entries_with_scheduler(bank, entries)
}
/// Process an ordered list of entries, populating a circular buffer "tail"
/// as we go.
fn process_block(
bank: &Bank,
entries: &[Entry],
leader_scheduler: &Arc<RwLock<LeaderScheduler>>,
) -> Result<()> {
fn process_block(bank: &Bank, entries: &[Entry]) -> Result<()> {
for entry in entries {
process_entry(bank, entry)?;
if entry.is_tick() {
let mut leader_scheduler = leader_scheduler.write().unwrap();
leader_scheduler.update_tick_height(bank.tick_height(), bank);
}
}
Ok(())
@ -128,7 +107,6 @@ pub struct BankForksInfo {
pub fn process_blocktree(
genesis_block: &GenesisBlock,
blocktree: &Blocktree,
leader_scheduler: &Arc<RwLock<LeaderScheduler>>,
) -> Result<(BankForks, Vec<BankForksInfo>)> {
let now = Instant::now();
info!("processing ledger...");
@ -138,10 +116,6 @@ pub fn process_blocktree(
let bank0 = Bank::new(&genesis_block);
let slot = 0;
let entry_height = 0;
leader_scheduler
.write()
.unwrap()
.update_tick_height(slot, &bank0);
let last_entry_id = bank0.last_id();
(
@ -197,7 +171,7 @@ pub fn process_blocktree(
return Err(BankError::LedgerVerificationFailed);
}
process_block(&bank, &entries, &leader_scheduler).map_err(|err| {
process_block(&bank, &entries).map_err(|err| {
warn!("Failed to process entries for slot {}: {:?}", slot, err);
BankError::LedgerVerificationFailed
})?;
@ -206,11 +180,7 @@ pub fn process_blocktree(
entry_height += entries.len() as u64;
}
let slot_complete = leader_scheduler
.read()
.unwrap()
.num_ticks_left_in_slot(bank.tick_height())
== 0;
let slot_complete = LeaderScheduler::num_ticks_left_in_slot(&bank, bank.tick_height()) == 0;
if !slot_complete || meta.next_slots.is_empty() {
// Reached the end of this fork. Record the final entry height and last entry id
@ -233,11 +203,7 @@ pub fn process_blocktree(
// This is a fork point, create a new child bank for each fork
pending_slots.extend(meta.next_slots.iter().map(|next_slot| {
let leader = leader_scheduler
.read()
.unwrap()
.get_leader_for_slot(*next_slot)
.unwrap();
let leader = LeaderScheduler::default().slot_leader_at(*next_slot, &bank);
let child_bank = Bank::new_from_parent_and_id(&bank, leader, *next_slot);
trace!("Add child bank for slot={}", next_slot);
bank_forks.insert(*next_slot, child_bank);
@ -289,7 +255,6 @@ mod tests {
fn test_process_blocktree_with_incomplete_slot() {
solana_logger::setup();
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::default()));
let (genesis_block, _mint_keypair) = GenesisBlock::new(10_000);
let ticks_per_slot = genesis_block.ticks_per_slot;
@ -321,7 +286,7 @@ mod tests {
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 1, 0, last_entry_id);
let (mut _bank_forks, bank_forks_info) =
process_blocktree(&genesis_block, &blocktree, &leader_scheduler).unwrap();
process_blocktree(&genesis_block, &blocktree).unwrap();
assert_eq!(bank_forks_info.len(), 1);
assert_eq!(
@ -339,7 +304,6 @@ mod tests {
fn test_process_blocktree_with_two_forks() {
solana_logger::setup();
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::default()));
let (genesis_block, _mint_keypair) = GenesisBlock::new(10_000);
let ticks_per_slot = genesis_block.ticks_per_slot;
@ -386,7 +350,7 @@ mod tests {
info!("last_fork2_entry_id: {:?}", last_fork2_entry_id);
let (mut bank_forks, bank_forks_info) =
process_blocktree(&genesis_block, &blocktree, &leader_scheduler).unwrap();
process_blocktree(&genesis_block, &blocktree).unwrap();
assert_eq!(bank_forks_info.len(), 2); // There are two forks
assert_eq!(
@ -449,8 +413,7 @@ mod tests {
}
fn par_process_entries(bank: &Bank, entries: &[Entry]) -> Result<()> {
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::default()));
par_process_entries_with_scheduler(bank, entries, &leader_scheduler)
par_process_entries_with_scheduler(bank, entries)
}
#[test]
@ -474,7 +437,6 @@ mod tests {
#[test]
fn test_process_ledger_simple() {
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::default()));
let leader_pubkey = Keypair::new().pubkey();
let (genesis_block, mint_keypair) = GenesisBlock::new_with_leader(100, leader_pubkey, 50);
let (ledger_path, tick_height, mut entry_height, mut last_id, mut last_entry_id) =
@ -513,8 +475,7 @@ mod tests {
.unwrap();
entry_height += entries.len() as u64;
let (bank_forks, bank_forks_info) =
process_blocktree(&genesis_block, &blocktree, &leader_scheduler).unwrap();
let (bank_forks, bank_forks_info) = process_blocktree(&genesis_block, &blocktree).unwrap();
assert_eq!(bank_forks_info.len(), 1);
assert_eq!(

View File

@ -279,7 +279,6 @@ mod test {
use crate::blocktree::{get_tmp_ledger_path, Blocktree};
use crate::cluster_info::{ClusterInfo, Node};
use crate::entry::create_ticks;
use crate::leader_scheduler::LeaderScheduler;
use crate::service::Service;
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, KeypairUtil};
@ -346,10 +345,6 @@ mod test {
{
// Create the leader scheduler
let leader_keypair = Keypair::new();
let mut leader_scheduler = LeaderScheduler::default();
// Mock the tick height to look like the tick height right after a leader transition
leader_scheduler.set_leader_schedule(vec![leader_keypair.pubkey()]);
let start_tick_height = 0;
let max_tick_height = start_tick_height + DEFAULT_TICKS_PER_SLOT;
@ -373,7 +368,7 @@ mod test {
let blocktree = broadcast_service.blocktree;
let mut blob_index = 0;
for i in 0..max_tick_height - start_tick_height {
let slot = leader_scheduler.tick_height_to_slot(start_tick_height + i + 1);
let slot = (start_tick_height + i + 1) / DEFAULT_TICKS_PER_SLOT;
let result = blocktree.get_data_blob(slot, blob_index).unwrap();

View File

@ -1,4 +1,5 @@
//! Set of functions for emulating windowing functions from a database ledger implementation
use crate::bank_forks::BankForks;
use crate::blocktree::*;
#[cfg(feature = "erasure")]
use crate::erasure;
@ -16,18 +17,18 @@ pub const MAX_REPAIR_LENGTH: usize = 128;
pub fn retransmit_blobs(
dq: &[SharedBlob],
leader_scheduler: &Arc<RwLock<LeaderScheduler>>,
bank_forks: &Arc<RwLock<BankForks>>,
retransmit: &BlobSender,
id: &Pubkey,
) -> Result<()> {
let mut retransmit_queue: Vec<SharedBlob> = Vec::new();
let leader_scheduler = LeaderScheduler::default();
for b in dq {
let bank = bank_forks.read().unwrap().working_bank();
// Don't add blobs generated by this node to the retransmit queue
let slot = b.read().unwrap().slot();
if let Some(leader_id) = leader_scheduler.read().unwrap().get_leader_for_slot(slot) {
if leader_id != *id {
retransmit_queue.push(b.clone());
}
if leader_scheduler.slot_leader_at(slot, &bank) != *id {
retransmit_queue.push(b.clone());
}
}
@ -50,7 +51,7 @@ pub fn retransmit_blobs(
/// Process a blob: Add blob to the ledger window.
pub fn process_blob(
leader_scheduler: &Arc<RwLock<LeaderScheduler>>,
bank_forks: &Arc<RwLock<BankForks>>,
blocktree: &Arc<Blocktree>,
blob: &SharedBlob,
) -> Result<()> {
@ -61,14 +62,11 @@ pub fn process_blob(
let r_blob = blob.read().unwrap();
(r_blob.slot(), r_blob.index())
};
let leader = leader_scheduler.read().unwrap().get_leader_for_slot(slot);
let bank = bank_forks.read().unwrap().working_bank();
let _leader = LeaderScheduler::default().slot_leader_at(slot, &bank);
// TODO: Once the original leader signature is added to the blob, make sure that
// the blob was originally generated by the expected leader for this slot
if leader.is_none() {
warn!("No leader for slot {}, blob dropped", slot);
return Ok(()); // Occurs as a leader is rotating into a validator
}
// Insert the new blob into block tree
if is_coding {
@ -126,7 +124,8 @@ mod test {
use crate::erasure::{NUM_CODING, NUM_DATA};
use crate::packet::{index_blobs, Blob, Packet, Packets, SharedBlob, PACKET_DATA_SIZE};
use crate::streamer::{receiver, responder, PacketReceiver};
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_runtime::bank::Bank;
use solana_sdk::genesis_block::GenesisBlock;
use std::io;
use std::io::Write;
use std::net::UdpSocket;
@ -194,54 +193,54 @@ mod test {
t_receiver.join().expect("join");
t_responder.join().expect("join");
}
/*
#[test]
pub fn test_send_to_retransmit_stage() {
let leader = Keypair::new().pubkey();
let nonleader = Keypair::new().pubkey();
let mut leader_scheduler = LeaderScheduler::default();
leader_scheduler.set_leader_schedule(vec![leader]);
let leader_scheduler = Arc::new(RwLock::new(leader_scheduler));
let blob = SharedBlob::default();
#[test]
pub fn test_send_to_retransmit_stage() {
let leader = Keypair::new().pubkey();
let nonleader = Keypair::new().pubkey();
let mut leader_scheduler = LeaderScheduler::default();
leader_scheduler.set_leader_schedule(vec![leader]);
let leader_scheduler = Arc::new(RwLock::new(leader_scheduler));
let blob = SharedBlob::default();
let (blob_sender, blob_receiver) = channel();
let (blob_sender, blob_receiver) = channel();
// Expect all blobs to be sent to retransmit_stage
blob.write().unwrap().forward(false);
retransmit_blobs(
&vec![blob.clone()],
&leader_scheduler,
&blob_sender,
&nonleader,
)
.expect("Expect successful retransmit");
let _ = blob_receiver
.try_recv()
.expect("Expect input blob to be retransmitted");
blob.write().unwrap().forward(true);
retransmit_blobs(
&vec![blob.clone()],
&leader_scheduler,
&blob_sender,
&nonleader,
)
.expect("Expect successful retransmit");
let output_blob = blob_receiver
.try_recv()
.expect("Expect input blob to be retransmitted");
// retransmit_blobs shouldn't be modifying the blob. That is retransmit stage's job now
assert_eq!(*output_blob[0].read().unwrap(), *blob.read().unwrap());
// Expect blob from leader while currently leader to not be retransmitted
// Even when forward is set
blob.write().unwrap().forward(true);
retransmit_blobs(&vec![blob], &leader_scheduler, &blob_sender, &leader)
// Expect all blobs to be sent to retransmit_stage
blob.write().unwrap().forward(false);
retransmit_blobs(
&vec![blob.clone()],
&leader_scheduler,
&blob_sender,
&nonleader,
)
.expect("Expect successful retransmit");
assert!(blob_receiver.try_recv().is_err());
}
let _ = blob_receiver
.try_recv()
.expect("Expect input blob to be retransmitted");
blob.write().unwrap().forward(true);
retransmit_blobs(
&vec![blob.clone()],
&leader_scheduler,
&blob_sender,
&nonleader,
)
.expect("Expect successful retransmit");
let output_blob = blob_receiver
.try_recv()
.expect("Expect input blob to be retransmitted");
// retransmit_blobs shouldn't be modifying the blob. That is retransmit stage's job now
assert_eq!(*output_blob[0].read().unwrap(), *blob.read().unwrap());
// Expect blob from leader while currently leader to not be retransmitted
// Even when forward is set
blob.write().unwrap().forward(true);
retransmit_blobs(&vec![blob], &leader_scheduler, &blob_sender, &leader)
.expect("Expect successful retransmit");
assert!(blob_receiver.try_recv().is_err());
}
*/
#[test]
pub fn test_find_missing_data_indexes_sanity() {
let slot = 0;
@ -533,21 +532,24 @@ mod test {
#[test]
fn test_process_blob() {
let mut leader_scheduler = LeaderScheduler::default();
leader_scheduler.set_leader_schedule(vec![Keypair::new().pubkey()]);
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap());
let leader_scheduler = Arc::new(RwLock::new(leader_scheduler));
let (genesis_block, _) = GenesisBlock::new(100);
let bank0 = Bank::new(&genesis_block);
let bank_id = 0;
let bank_forks = BankForks::new(bank_id, bank0);
let num_entries = 10;
let original_entries = make_tiny_test_entries(num_entries);
let shared_blobs = original_entries.clone().to_shared_blobs();
index_blobs(&shared_blobs, &mut 0, 0);
let bank_forks = Arc::new(RwLock::new(bank_forks));
for blob in shared_blobs.iter().rev() {
process_blob(&leader_scheduler, &blocktree, blob)
process_blob(&bank_forks, &blocktree, blob)
.expect("Expect successful processing of blob");
}

View File

@ -4,8 +4,11 @@ use crate::bank_forks::BankForks;
use crate::blocktree::Blocktree;
use crate::blocktree_processor::{self, BankForksInfo};
use crate::cluster_info::{ClusterInfo, Node, NodeInfo};
use crate::entry::create_ticks;
use crate::entry::next_entry_mut;
use crate::entry::Entry;
use crate::gossip_service::GossipService;
use crate::leader_scheduler::{LeaderScheduler, LeaderSchedulerConfig};
use crate::leader_scheduler::LeaderScheduler;
use crate::poh_recorder::PohRecorder;
use crate::poh_service::{PohService, PohServiceConfig};
use crate::rpc_pubsub_service::PubSubService;
@ -15,11 +18,15 @@ use crate::service::Service;
use crate::storage_stage::StorageState;
use crate::tpu::Tpu;
use crate::tvu::{Sockets, Tvu, TvuRotationInfo, TvuRotationReceiver};
use crate::voting_keypair::VotingKeypair;
use solana_metrics::counter::Counter;
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction::SystemTransaction;
use solana_sdk::timing::timestamp;
use solana_sdk::vote_transaction::VoteTransaction;
use std::net::UdpSocket;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::sync::atomic::{AtomicBool, Ordering};
@ -63,7 +70,6 @@ pub struct FullnodeConfig {
pub voting_disabled: bool,
pub blockstream: Option<String>,
pub storage_rotate_count: u64,
pub leader_scheduler_config: LeaderSchedulerConfig,
pub tick_config: PohServiceConfig,
}
impl Default for FullnodeConfig {
@ -77,18 +83,11 @@ impl Default for FullnodeConfig {
voting_disabled: false,
blockstream: None,
storage_rotate_count: NUM_HASHES_FOR_STORAGE_ROTATE,
leader_scheduler_config: LeaderSchedulerConfig::default(),
tick_config: PohServiceConfig::default(),
}
}
}
impl FullnodeConfig {
pub fn ticks_per_slot(&self) -> u64 {
self.leader_scheduler_config.ticks_per_slot
}
}
pub struct Fullnode {
id: Pubkey,
exit: Arc<AtomicBool>,
@ -102,7 +101,6 @@ pub struct Fullnode {
rotation_receiver: TvuRotationReceiver,
blocktree: Arc<Blocktree>,
bank_forks: Arc<RwLock<BankForks>>,
leader_scheduler: Arc<RwLock<LeaderScheduler>>,
poh_service: PohService,
poh_recorder: Arc<Mutex<PohRecorder>>,
}
@ -124,11 +122,8 @@ impl Fullnode {
let id = keypair.pubkey();
assert_eq!(id, node.info.id);
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::new(
&config.leader_scheduler_config,
)));
let (mut bank_forks, bank_forks_info, blocktree, ledger_signal_receiver) =
new_banks_from_blocktree(ledger_path, config.ticks_per_slot(), &leader_scheduler);
new_banks_from_blocktree(ledger_path);
let exit = Arc::new(AtomicBool::new(false));
let bank_info = &bank_forks_info[0];
@ -250,7 +245,6 @@ impl Fullnode {
&storage_state,
config.blockstream.as_ref(),
ledger_signal_receiver,
leader_scheduler.clone(),
&subscriptions,
);
let tpu = Tpu::new(id, &cluster_info);
@ -269,7 +263,6 @@ impl Fullnode {
rotation_receiver,
blocktree,
bank_forks,
leader_scheduler,
poh_service,
poh_recorder,
}
@ -303,14 +296,8 @@ impl Fullnode {
}
}
None => {
if self
.leader_scheduler
.read()
.unwrap()
.get_leader_for_slot(rotation_info.slot.saturating_sub(1))
.unwrap()
== self.id
{
let bank = self.bank_forks.read().unwrap().working_bank();
if LeaderScheduler::default().prev_slot_leader(&bank) == self.id {
FullnodeReturnType::LeaderToLeaderRotation
} else {
FullnodeReturnType::ValidatorToLeaderRotation
@ -418,17 +405,16 @@ impl Fullnode {
pub fn new_banks_from_blocktree(
blocktree_path: &str,
ticks_per_slot: u64,
leader_scheduler: &Arc<RwLock<LeaderScheduler>>,
) -> (BankForks, Vec<BankForksInfo>, Blocktree, Receiver<bool>) {
let (blocktree, ledger_signal_receiver) =
Blocktree::open_with_config_signal(blocktree_path, ticks_per_slot)
.expect("Expected to successfully open database ledger");
let genesis_block =
GenesisBlock::load(blocktree_path).expect("Expected to successfully open genesis block");
let (blocktree, ledger_signal_receiver) =
Blocktree::open_with_config_signal(blocktree_path, genesis_block.ticks_per_slot)
.expect("Expected to successfully open database ledger");
let (bank_forks, bank_forks_info) =
blocktree_processor::process_blocktree(&genesis_block, &blocktree, leader_scheduler)
blocktree_processor::process_blocktree(&genesis_block, &blocktree)
.expect("process_blocktree failed");
(
@ -459,14 +445,58 @@ impl Service for Fullnode {
}
}
// Create entries such the node identified by active_keypair
// will be added to the active set for leader selection:
// 1) Give the node a nonzero number of tokens,
// 2) A vote from the validator
pub fn make_active_set_entries(
active_keypair: &Arc<Keypair>,
token_source: &Keypair,
stake: u64,
slot_height_to_vote_on: u64,
last_entry_id: &Hash,
last_tick_id: &Hash,
num_ending_ticks: u64,
) -> (Vec<Entry>, VotingKeypair) {
// 1) Assume the active_keypair node has no tokens staked
let transfer_tx = SystemTransaction::new_account(
&token_source,
active_keypair.pubkey(),
stake,
*last_tick_id,
0,
);
let mut last_entry_id = *last_entry_id;
let transfer_entry = next_entry_mut(&mut last_entry_id, 1, vec![transfer_tx]);
// 2) Create and register a vote account for active_keypair
let voting_keypair = VotingKeypair::new_local(active_keypair);
let vote_account_id = voting_keypair.pubkey();
let new_vote_account_tx =
VoteTransaction::new_account(active_keypair, vote_account_id, *last_tick_id, 1, 1);
let new_vote_account_entry = next_entry_mut(&mut last_entry_id, 1, vec![new_vote_account_tx]);
// 3) Create vote entry
let vote_tx =
VoteTransaction::new_vote(&voting_keypair, slot_height_to_vote_on, *last_tick_id, 0);
let vote_entry = next_entry_mut(&mut last_entry_id, 1, vec![vote_tx]);
// 4) Create the ending empty ticks
let mut txs = vec![transfer_entry, new_vote_account_entry, vote_entry];
let empty_ticks = create_ticks(num_ending_ticks, last_entry_id);
txs.extend(empty_ticks);
(txs, voting_keypair)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::blocktree::{create_tmp_sample_blocktree, tmp_copy_blocktree};
use crate::entry::make_consecutive_blobs;
use crate::leader_scheduler::make_active_set_entries;
use crate::streamer::responder;
use solana_sdk::hash::Hash;
use solana_sdk::timing::DEFAULT_SLOTS_PER_EPOCH;
use solana_sdk::timing::DEFAULT_TICKS_PER_SLOT;
use std::fs::remove_dir_all;
@ -555,17 +585,14 @@ mod tests {
// epoch, check that the same leader knows to shut down and restart as a leader again.
let ticks_per_slot = 5;
let slots_per_epoch = 2;
let active_window_num_slots = 10;
let leader_scheduler_config =
LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, active_window_num_slots);
let voting_keypair = Keypair::new();
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.leader_scheduler_config = leader_scheduler_config;
let fullnode_config = FullnodeConfig::default();
let (mut genesis_block, _mint_keypair) =
GenesisBlock::new_with_leader(10_000, bootstrap_leader_keypair.pubkey(), 500);
genesis_block.ticks_per_slot = fullnode_config.ticks_per_slot();
genesis_block.ticks_per_slot = ticks_per_slot;
genesis_block.slots_per_epoch = slots_per_epoch;
let (
bootstrap_leader_ledger_path,
@ -602,14 +629,13 @@ mod tests {
}
#[test]
#[ignore]
fn test_wrong_role_transition() {
solana_logger::setup();
let mut fullnode_config = FullnodeConfig::default();
let ticks_per_slot = 16;
let slots_per_epoch = 2;
fullnode_config.leader_scheduler_config =
LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, slots_per_epoch);
let fullnode_config = FullnodeConfig::default();
let ticks_per_slot = DEFAULT_TICKS_PER_SLOT;
let slots_per_epoch = DEFAULT_SLOTS_PER_EPOCH;
// Create the leader and validator nodes
let bootstrap_leader_keypair = Arc::new(Keypair::new());
@ -648,7 +674,10 @@ mod tests {
let bootstrap_leader_exit = bootstrap_leader.run(Some(rotation_sender));
assert_eq!(
rotation_receiver.recv().unwrap(),
(FullnodeReturnType::LeaderToValidatorRotation, 2)
(
FullnodeReturnType::LeaderToValidatorRotation,
DEFAULT_SLOTS_PER_EPOCH
)
);
// Test that a node knows to transition to a leader based on parsing the ledger
@ -665,7 +694,10 @@ mod tests {
let validator_exit = validator.run(Some(rotation_sender));
assert_eq!(
rotation_receiver.recv().unwrap(),
(FullnodeReturnType::ValidatorToLeaderRotation, 2)
(
FullnodeReturnType::ValidatorToLeaderRotation,
DEFAULT_SLOTS_PER_EPOCH
)
);
validator_exit();
@ -688,9 +720,7 @@ mod tests {
let slots_per_epoch = 4;
let leader_keypair = Arc::new(Keypair::new());
let validator_keypair = Arc::new(Keypair::new());
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.leader_scheduler_config =
LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, slots_per_epoch);
let fullnode_config = FullnodeConfig::default();
let (leader_node, validator_node, validator_ledger_path, ledger_initial_len, last_id) =
setup_leader_validator(
&leader_keypair,
@ -698,7 +728,7 @@ mod tests {
0,
0,
"test_validator_to_leader_transition",
fullnode_config.ticks_per_slot(),
ticks_per_slot,
);
let leader_id = leader_keypair.pubkey();
@ -754,12 +784,7 @@ mod tests {
// Close the validator so that rocksdb has locks available
validator_exit();
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::default()));
let (bank_forks, bank_forks_info, _, _) = new_banks_from_blocktree(
&validator_ledger_path,
DEFAULT_TICKS_PER_SLOT,
&leader_scheduler,
);
let (bank_forks, bank_forks_info, _, _) = new_banks_from_blocktree(&validator_ledger_path);
let bank = bank_forks.working_bank();
let entry_height = bank_forks_info[0].entry_height;

File diff suppressed because it is too large Load Diff

View File

@ -1,129 +0,0 @@
//! The `bank` module tracks client accounts and the progress of on-chain
//! programs. It offers a high-level API that signs transactions
//! on behalf of the caller, and a low-level API for when they have
//! already been signed and verified.
use crate::leader_schedule::LeaderSchedule;
use solana_runtime::bank::Bank;
use solana_sdk::pubkey::Pubkey;
#[derive(Default)]
pub struct LeaderScheduler1 {}
impl LeaderScheduler1 {
/// Return the leader schedule for the given epoch.
fn leader_schedule(&self, epoch_height: u64, bank: &Bank) -> LeaderSchedule {
let stakes = bank.staked_nodes_at_epoch(epoch_height);
let mut seed = [0u8; 32];
seed[0..8].copy_from_slice(&epoch_height.to_le_bytes());
let stakes: Vec<_> = stakes.into_iter().collect();
LeaderSchedule::new(&stakes, seed, bank.slots_per_epoch())
}
/// Return the leader for the slot at the slot_index and epoch_height returned
/// by the given function.
pub fn slot_leader_by<F>(&self, bank: &Bank, get_slot_index: F) -> Pubkey
where
F: Fn(u64, u64, u64) -> (u64, u64),
{
let (slot_index, epoch_height) = get_slot_index(
bank.slot_index(),
bank.epoch_height(),
bank.slots_per_epoch(),
);
let leader_schedule = self.leader_schedule(epoch_height, bank);
leader_schedule[slot_index as usize]
}
/// Return the leader for the current slot.
pub fn slot_leader(&self, bank: &Bank) -> Pubkey {
self.slot_leader_by(bank, |slot_index, epoch_height, _| {
(slot_index, epoch_height)
})
}
/// Return the epoch height and slot index of the slot before the current slot.
fn prev_slot_leader_index(
slot_index: u64,
epoch_height: u64,
slots_per_epoch: u64,
) -> (u64, u64) {
if epoch_height == 0 && slot_index == 0 {
return (0, 0);
}
if slot_index == 0 {
(slots_per_epoch - 1, epoch_height - 1)
} else {
(slot_index - 1, epoch_height)
}
}
/// Return the slot_index and epoch height of the slot following the current slot.
fn next_slot_leader_index(
slot_index: u64,
epoch_height: u64,
slots_per_epoch: u64,
) -> (u64, u64) {
if slot_index + 1 == slots_per_epoch {
(0, epoch_height + 1)
} else {
(slot_index + 1, epoch_height)
}
}
/// Return the leader for the slot before the current slot.
pub fn prev_slot_leader(&self, bank: &Bank) -> Pubkey {
self.slot_leader_by(bank, Self::prev_slot_leader_index)
}
/// Return the leader for the slot following the current slot.
pub fn next_slot_leader(&self, bank: &Bank) -> Pubkey {
self.slot_leader_by(bank, Self::next_slot_leader_index)
}
}
#[cfg(test)]
mod tests {
use super::*;
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::signature::{Keypair, KeypairUtil};
#[test]
fn test_leader_schedule_via_bank() {
let pubkey = Keypair::new().pubkey();
let (genesis_block, _mint_keypair) = GenesisBlock::new_with_leader(2, pubkey, 2);
let bank = Bank::new(&genesis_block);
let ids_and_stakes: Vec<_> = bank.staked_nodes().into_iter().collect();
let seed = [0u8; 32];
let leader_schedule =
LeaderSchedule::new(&ids_and_stakes, seed, genesis_block.slots_per_epoch);
assert_eq!(leader_schedule[0], pubkey);
assert_eq!(leader_schedule[1], pubkey);
assert_eq!(leader_schedule[2], pubkey);
}
#[test]
fn test_leader_scheduler1_basic() {
let pubkey = Keypair::new().pubkey();
let genesis_block = GenesisBlock::new_with_leader(2, pubkey, 2).0;
let bank = Bank::new(&genesis_block);
let leader_scheduler = LeaderScheduler1::default();
assert_eq!(leader_scheduler.slot_leader(&bank), pubkey);
}
#[test]
fn test_leader_scheduler1_prev_slot_leader_index() {
assert_eq!(LeaderScheduler1::prev_slot_leader_index(0, 0, 2), (0, 0));
assert_eq!(LeaderScheduler1::prev_slot_leader_index(1, 0, 2), (0, 0));
assert_eq!(LeaderScheduler1::prev_slot_leader_index(0, 1, 2), (1, 0));
}
#[test]
fn test_leader_scheduler1_next_slot_leader_index() {
assert_eq!(LeaderScheduler1::next_slot_leader_index(0, 0, 2), (1, 0));
assert_eq!(LeaderScheduler1::next_slot_leader_index(1, 0, 2), (0, 1));
}
}

View File

@ -42,7 +42,6 @@ pub mod gossip_service;
pub mod leader_confirmation_service;
pub mod leader_schedule;
pub mod leader_scheduler;
pub mod leader_scheduler1;
pub mod local_vote_signer_service;
pub mod packet;
pub mod poh;

View File

@ -63,7 +63,6 @@ impl ReplayStage {
ledger_entry_sender: &EntrySender,
current_blob_index: &mut u64,
last_entry_id: &mut Hash,
leader_scheduler: &Arc<RwLock<LeaderScheduler>>,
subscriptions: &Arc<RpcSubscriptions>,
slot: u64,
parent_slot: Option<u64>,
@ -89,14 +88,9 @@ impl ReplayStage {
);
let num_ticks = bank.tick_height();
let (mut num_ticks_to_next_vote, slot_height, leader_id) = {
let rl = leader_scheduler.read().unwrap();
(
rl.num_ticks_left_in_slot(num_ticks),
rl.tick_height_to_slot(num_ticks),
rl.get_leader_for_slot(slot).expect("Leader not known"),
)
};
let slot_height = bank.slot_height();
let leader_id = LeaderScheduler::default().slot_leader(bank);
let mut num_ticks_to_next_vote = LeaderScheduler::num_ticks_left_in_slot(bank, num_ticks);
let mut entry_tick_height = num_ticks;
let mut entries_with_meta = Vec::new();
@ -125,7 +119,7 @@ impl ReplayStage {
// If we don't process the entry now, the for loop will exit and the entry
// will be dropped.
if 0 == num_ticks_to_next_vote || (i + 1) == entries.len() {
res = blocktree_processor::process_entries(bank, &entries[0..=i], leader_scheduler);
res = blocktree_processor::process_entries(bank, &entries[0..=i]);
if res.is_err() {
// TODO: This will return early from the first entry that has an erroneous
@ -193,7 +187,6 @@ impl ReplayStage {
exit: Arc<AtomicBool>,
to_leader_sender: &TvuRotationSender,
ledger_signal_receiver: Receiver<bool>,
leader_scheduler: &Arc<RwLock<LeaderScheduler>>,
subscriptions: &Arc<RpcSubscriptions>,
) -> (Self, EntryReceiver)
where
@ -201,7 +194,6 @@ impl ReplayStage {
{
let (ledger_entry_sender, ledger_entry_receiver) = channel();
let exit_ = exit.clone();
let leader_scheduler_ = leader_scheduler.clone();
let to_leader_sender = to_leader_sender.clone();
let subscriptions_ = subscriptions.clone();
@ -221,13 +213,10 @@ impl ReplayStage {
// Update Tpu and other fullnode components with the current bank
let (mut current_slot, mut current_leader_id, mut max_tick_height_for_slot) = {
let leader_scheduler = leader_scheduler.read().unwrap();
let slot = leader_scheduler.tick_height_to_slot(tick_height + 1);
let slot = (tick_height + 1) / bank.ticks_per_slot();
let first_tick_in_slot = slot * bank.ticks_per_slot();
let leader_id = leader_scheduler
.get_leader_for_slot(slot)
.expect("Leader not known after processing bank");
let leader_id = LeaderScheduler::default().slot_leader_at(slot, &bank);
trace!("node {:?} scheduled as leader for slot {}", leader_id, slot,);
let old_bank = bank.clone();
@ -252,8 +241,8 @@ impl ReplayStage {
})
.unwrap();
let max_tick_height_for_slot =
first_tick_in_slot + leader_scheduler.num_ticks_left_in_slot(first_tick_in_slot);
let max_tick_height_for_slot = first_tick_in_slot
+ LeaderScheduler::num_ticks_left_in_slot(&bank, first_tick_in_slot);
(Some(slot), leader_id, max_tick_height_for_slot)
};
@ -340,7 +329,6 @@ impl ReplayStage {
&ledger_entry_sender,
&mut current_blob_index,
&mut last_entry_id,
&leader_scheduler_,
&subscriptions_,
slot,
parent_slot,
@ -354,21 +342,11 @@ impl ReplayStage {
// We've reached the end of a slot, reset our state and check
// for leader rotation
if max_tick_height_for_slot == current_tick_height {
// TODO: replace this with generating an actual leader schedule
// from the bank
leader_scheduler_
.write()
.unwrap()
.update_tick_height(current_tick_height, &bank);
// Check for leader rotation
let (leader_id, next_slot) = {
let leader_scheduler = leader_scheduler_.read().unwrap();
(
leader_scheduler
.get_leader_for_tick(current_tick_height + 1)
.unwrap(),
leader_scheduler.tick_height_to_slot(current_tick_height + 1),
)
let slot = (current_tick_height + 1) / bank.ticks_per_slot();
(LeaderScheduler::default().slot_leader_at(slot, &bank), slot)
};
// If we were the leader for the last slot update the last id b/c we
@ -488,15 +466,12 @@ mod test {
use crate::cluster_info::{ClusterInfo, Node};
use crate::entry::create_ticks;
use crate::entry::{next_entry_mut, Entry};
use crate::fullnode::make_active_set_entries;
use crate::fullnode::new_banks_from_blocktree;
use crate::leader_scheduler::{
make_active_set_entries, LeaderScheduler, LeaderSchedulerConfig,
};
use crate::replay_stage::ReplayStage;
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::timing::DEFAULT_TICKS_PER_SLOT;
use std::fs::remove_dir_all;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
@ -518,13 +493,10 @@ mod test {
// Set up the LeaderScheduler so that my_id becomes the leader for epoch 1
let ticks_per_slot = 16;
let leader_scheduler_config = LeaderSchedulerConfig::new(ticks_per_slot, 1, ticks_per_slot);
let leader_scheduler =
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config)));
let (mut genesis_block, mint_keypair) =
GenesisBlock::new_with_leader(10_000, old_leader_id, 500);
genesis_block.ticks_per_slot = ticks_per_slot;
genesis_block.slots_per_epoch = 1;
// Create a ledger
let (my_ledger_path, mut tick_height, entry_height, mut last_id, last_entry_id) =
@ -561,7 +533,7 @@ mod test {
{
// Set up the bank
let (bank_forks, bank_forks_info, blocktree, ledger_signal_receiver) =
new_banks_from_blocktree(&my_ledger_path, ticks_per_slot, &leader_scheduler);
new_banks_from_blocktree(&my_ledger_path);
// Set up the replay stage
let (rotation_sender, rotation_receiver) = channel();
@ -577,7 +549,6 @@ mod test {
exit.clone(),
&rotation_sender,
ledger_signal_receiver,
&leader_scheduler,
&Arc::new(RpcSubscriptions::default()),
);
@ -657,12 +628,8 @@ mod test {
let voting_keypair = Arc::new(Keypair::new());
let (to_leader_sender, _to_leader_receiver) = channel();
{
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::default()));
let (bank_forks, bank_forks_info, blocktree, l_receiver) = new_banks_from_blocktree(
&my_ledger_path,
DEFAULT_TICKS_PER_SLOT,
&leader_scheduler,
);
let (bank_forks, bank_forks_info, blocktree, l_receiver) =
new_banks_from_blocktree(&my_ledger_path);
let bank = bank_forks.working_bank();
let entry_height = bank_forks_info[0].entry_height;
let last_entry_id = bank_forks_info[0].last_entry_id;
@ -678,7 +645,6 @@ mod test {
exit.clone(),
&to_leader_sender,
l_receiver,
&leader_scheduler,
&Arc::new(RpcSubscriptions::default()),
);
@ -755,11 +721,6 @@ mod test {
.unwrap();
}
let leader_scheduler_config =
LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, active_window_tick_length);
let leader_scheduler =
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config)));
// Set up the cluster info
let cluster_info_me = Arc::new(RwLock::new(ClusterInfo::new(my_node.info.clone())));
@ -768,7 +729,7 @@ mod test {
let exit = Arc::new(AtomicBool::new(false));
{
let (bank_forks, bank_forks_info, blocktree, l_receiver) =
new_banks_from_blocktree(&my_ledger_path, ticks_per_slot, &leader_scheduler);
new_banks_from_blocktree(&my_ledger_path);
let bank = bank_forks.working_bank();
let meta = blocktree
.meta(0)
@ -787,7 +748,6 @@ mod test {
exit.clone(),
&rotation_sender,
l_receiver,
&leader_scheduler,
&Arc::new(RpcSubscriptions::default()),
);
@ -862,8 +822,6 @@ mod test {
let genesis_block = GenesisBlock::new(10_000).0;
let bank = Arc::new(Bank::new(&genesis_block));
let leader_scheduler = LeaderScheduler::new_with_bank(&bank);
let leader_scheduler = Arc::new(RwLock::new(leader_scheduler));
let voting_keypair = Some(Arc::new(Keypair::new()));
let res = ReplayStage::process_entries(
entries.clone(),
@ -873,7 +831,6 @@ mod test {
&ledger_entry_sender,
&mut current_blob_index,
&mut last_entry_id,
&leader_scheduler,
&Arc::new(RpcSubscriptions::default()),
0,
None,
@ -891,8 +848,6 @@ mod test {
}
let bank = Arc::new(Bank::new(&genesis_block));
let leader_scheduler = LeaderScheduler::new_with_bank(&bank);
let leader_scheduler = Arc::new(RwLock::new(leader_scheduler));
let res = ReplayStage::process_entries(
entries.clone(),
&bank,
@ -901,7 +856,6 @@ mod test {
&ledger_entry_sender,
&mut current_blob_index,
&mut last_entry_id,
&leader_scheduler,
&Arc::new(RpcSubscriptions::default()),
0,
None,

View File

@ -1,11 +1,11 @@
use crate::blob_fetch_stage::BlobFetchStage;
use crate::blocktree::Blocktree;
use crate::blocktree_processor;
#[cfg(feature = "chacha")]
use crate::chacha::{chacha_cbc_encrypt_ledger, CHACHA_BLOCK_SIZE};
use crate::client::mk_client;
use crate::cluster_info::{ClusterInfo, Node, NodeInfo};
use crate::gossip_service::GossipService;
use crate::leader_scheduler::LeaderScheduler;
use crate::result::{self, Result};
use crate::rpc_request::{RpcClient, RpcRequest, RpcRequestHandler};
use crate::service::Service;
@ -16,6 +16,7 @@ use crate::window_service::WindowService;
use rand::thread_rng;
use rand::Rng;
use solana_drone::drone::{request_airdrop_transaction, DRONE_PORT};
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::hash::{Hash, Hasher};
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
use solana_sdk::storage_program::StorageTransaction;
@ -134,6 +135,13 @@ impl Replicator {
let blocktree =
Blocktree::open(ledger_path).expect("Expected to be able to open database ledger");
let genesis_block =
GenesisBlock::load(ledger_path).expect("Expected to successfully open genesis block");
let (bank_forks, _bank_forks_info) =
blocktree_processor::process_blocktree(&genesis_block, &blocktree)
.expect("process_blocktree failed");
let blocktree = Arc::new(blocktree);
//TODO(sagar) Does replicator need a bank also ?
@ -175,7 +183,7 @@ impl Replicator {
blob_fetch_receiver,
retransmit_sender,
repair_socket,
Arc::new(RwLock::new(LeaderScheduler::default())),
Arc::new(RwLock::new(bank_forks)),
exit.clone(),
);

View File

@ -6,7 +6,6 @@ use crate::cluster_info::{
compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT, GROW_LAYER_CAPACITY,
NEIGHBORHOOD_SIZE,
};
use crate::leader_scheduler::LeaderScheduler;
use crate::packet::SharedBlob;
use crate::result::{Error, Result};
use crate::service::Service;
@ -113,7 +112,6 @@ impl RetransmitStage {
retransmit_socket: Arc<UdpSocket>,
repair_socket: Arc<UdpSocket>,
fetch_stage_receiver: BlobReceiver,
leader_scheduler: Arc<RwLock<LeaderScheduler>>,
exit: Arc<AtomicBool>,
) -> Self {
let (retransmit_sender, retransmit_receiver) = channel();
@ -130,7 +128,7 @@ impl RetransmitStage {
fetch_stage_receiver,
retransmit_sender,
repair_socket,
leader_scheduler,
bank_forks.clone(),
exit,
);

View File

@ -18,7 +18,6 @@ use crate::blockstream_service::BlockstreamService;
use crate::blocktree::Blocktree;
use crate::blocktree_processor::BankForksInfo;
use crate::cluster_info::ClusterInfo;
use crate::leader_scheduler::LeaderScheduler;
use crate::replay_stage::ReplayStage;
use crate::retransmit_stage::RetransmitStage;
use crate::rpc_subscriptions::RpcSubscriptions;
@ -78,7 +77,6 @@ impl Tvu {
storage_state: &StorageState,
blockstream: Option<&String>,
ledger_signal_receiver: Receiver<bool>,
leader_scheduler: Arc<RwLock<LeaderScheduler>>,
subscriptions: &Arc<RpcSubscriptions>,
) -> Self
where
@ -116,7 +114,6 @@ impl Tvu {
Arc::new(retransmit_socket),
repair_socket,
blob_fetch_receiver,
leader_scheduler.clone(),
exit.clone(),
);
@ -130,7 +127,6 @@ impl Tvu {
exit.clone(),
to_leader_sender,
ledger_signal_receiver,
&leader_scheduler,
subscriptions,
);
@ -226,8 +222,6 @@ pub mod tests {
last_entry_id: Hash::default(),
next_blob_index: 0,
}];
let leader_scheduler = LeaderScheduler::new_with_bank(&bank_forks.working_bank());
let leader_scheduler = Arc::new(RwLock::new(leader_scheduler));
//start cluster_info1
let mut cluster_info1 = ClusterInfo::new(target1.info.clone());
@ -257,7 +251,6 @@ pub mod tests {
&StorageState::default(),
None,
l_receiver,
leader_scheduler,
&Arc::new(RpcSubscriptions::default()),
);
tvu.close().expect("close");

View File

@ -1,9 +1,9 @@
//! The `window_service` provides a thread for maintaining a window (tail of the ledger).
//!
use crate::bank_forks::BankForks;
use crate::blocktree::Blocktree;
use crate::cluster_info::ClusterInfo;
use crate::db_window::*;
use crate::leader_scheduler::LeaderScheduler;
use crate::repair_service::RepairService;
use crate::result::{Error, Result};
use crate::service::Service;
@ -30,7 +30,7 @@ pub enum WindowServiceReturnType {
fn recv_window(
blocktree: &Arc<Blocktree>,
id: &Pubkey,
leader_scheduler: &Arc<RwLock<LeaderScheduler>>,
bank_forks: &Arc<RwLock<BankForks>>,
r: &BlobReceiver,
retransmit: &BlobSender,
) -> Result<()> {
@ -49,7 +49,7 @@ fn recv_window(
.to_owned(),
);
retransmit_blobs(&dq, leader_scheduler, retransmit, id)?;
retransmit_blobs(&dq, bank_forks, retransmit, id)?;
//send a contiguous set of blocks
trace!("{} num blobs received: {}", id, dq.len());
@ -62,7 +62,7 @@ fn recv_window(
trace!("{} window pix: {} size: {}", id, pix, meta_size);
let _ = process_blob(leader_scheduler, blocktree, &b);
let _ = process_blob(bank_forks, blocktree, &b);
}
trace!(
@ -104,7 +104,7 @@ impl WindowService {
r: BlobReceiver,
retransmit: BlobSender,
repair_socket: Arc<UdpSocket>,
leader_scheduler: Arc<RwLock<LeaderScheduler>>,
bank_forks: Arc<RwLock<BankForks>>,
exit: Arc<AtomicBool>,
) -> WindowService {
let exit_ = exit.clone();
@ -124,8 +124,7 @@ impl WindowService {
if exit.load(Ordering::Relaxed) {
break;
}
if let Err(e) = recv_window(&blocktree, &id, &leader_scheduler, &r, &retransmit)
{
if let Err(e) = recv_window(&blocktree, &id, &bank_forks, &r, &retransmit) {
match e {
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
@ -157,14 +156,16 @@ impl Service for WindowService {
#[cfg(test)]
mod test {
use crate::bank_forks::BankForks;
use crate::blocktree::get_tmp_ledger_path;
use crate::blocktree::Blocktree;
use crate::cluster_info::{ClusterInfo, Node};
use crate::entry::make_consecutive_blobs;
use crate::leader_scheduler::LeaderScheduler;
use crate::service::Service;
use crate::streamer::{blob_receiver, responder};
use crate::window_service::WindowService;
use solana_runtime::bank::Bank;
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::hash::Hash;
use std::fs::remove_dir_all;
use std::net::UdpSocket;
@ -194,15 +195,17 @@ mod test {
let blocktree = Arc::new(
Blocktree::open(&blocktree_path).expect("Expected to be able to open database ledger"),
);
let mut leader_schedule = LeaderScheduler::default();
leader_schedule.set_leader_schedule(vec![me_id]);
let (genesis_block, _) = GenesisBlock::new(100);
let bank0 = Bank::new(&genesis_block);
let bank_id = 0;
let bank_forks = BankForks::new(bank_id, bank0);
let t_window = WindowService::new(
blocktree,
subs,
r_reader,
s_retransmit,
Arc::new(leader_node.sockets.repair),
Arc::new(RwLock::new(leader_schedule)),
Arc::new(RwLock::new(bank_forks)),
exit.clone(),
);
let t_responder = {
@ -264,15 +267,17 @@ mod test {
let blocktree = Arc::new(
Blocktree::open(&blocktree_path).expect("Expected to be able to open database ledger"),
);
let mut leader_schedule = LeaderScheduler::default();
leader_schedule.set_leader_schedule(vec![me_id]);
let (genesis_block, _) = GenesisBlock::new(100);
let bank0 = Bank::new(&genesis_block);
let bank_id = 0;
let bank_forks = BankForks::new(bank_id, bank0);
let t_window = WindowService::new(
blocktree,
subs.clone(),
r_reader,
s_retransmit,
Arc::new(leader_node.sockets.repair),
Arc::new(RwLock::new(leader_schedule)),
Arc::new(RwLock::new(bank_forks)),
exit.clone(),
);
let t_responder = {

View File

@ -7,9 +7,9 @@ use solana::blocktree::{create_tmp_sample_blocktree, tmp_copy_blocktree, Blocktr
use solana::client::mk_client;
use solana::cluster_info::{Node, NodeInfo};
use solana::entry::{reconstruct_entries_from_blobs, Entry};
use solana::fullnode::make_active_set_entries;
use solana::fullnode::{new_banks_from_blocktree, Fullnode, FullnodeConfig, FullnodeReturnType};
use solana::gossip_service::{converge, make_listening_node};
use solana::leader_scheduler::{make_active_set_entries, LeaderScheduler, LeaderSchedulerConfig};
use solana::poh_service::PohServiceConfig;
use solana::result;
use solana::service::Service;
@ -19,7 +19,7 @@ use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction::SystemTransaction;
use solana_sdk::timing::{duration_as_s, DEFAULT_TICKS_PER_SLOT};
use solana_sdk::timing::duration_as_s;
use std::collections::{HashSet, VecDeque};
use std::env;
use std::fs::remove_dir_all;
@ -870,6 +870,7 @@ fn test_multi_node_dynamic_network() {
}
#[test]
#[ignore]
fn test_leader_to_validator_transition() {
solana_logger::setup();
@ -881,16 +882,8 @@ fn test_leader_to_validator_transition() {
let leader_node = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let leader_info = leader_node.info.clone();
let mut fullnode_config = FullnodeConfig::default();
let fullnode_config = FullnodeConfig::default();
let ticks_per_slot = 5;
fullnode_config.leader_scheduler_config = LeaderSchedulerConfig::new(
ticks_per_slot,
1,
// Setup window length to exclude the genesis bootstrap leader vote at tick height 0, so
// that when the leader schedule is recomputed for epoch 1 only the validator vote at tick
// height 1 will be considered.
1,
);
let (mut genesis_block, mint_keypair) =
GenesisBlock::new_with_leader(10_000, leader_info.id, 500);
@ -952,22 +945,14 @@ fn test_leader_to_validator_transition() {
leader_exit();
info!("Check the ledger to make sure it's the right height...");
let bank_forks = new_banks_from_blocktree(
&leader_ledger_path,
DEFAULT_TICKS_PER_SLOT,
&Arc::new(RwLock::new(LeaderScheduler::default())),
)
.0;
let bank = bank_forks.working_bank();
let bank_forks = new_banks_from_blocktree(&leader_ledger_path).0;
let _bank = bank_forks.working_bank();
assert_eq!(
bank.tick_height(),
fullnode_config.leader_scheduler_config.ticks_per_slot - 1
);
remove_dir_all(leader_ledger_path).unwrap();
}
#[test]
#[ignore]
fn test_leader_validator_basic() {
solana_logger::setup();
@ -984,14 +969,8 @@ fn test_leader_validator_basic() {
info!("validator id: {}", validator_keypair.pubkey());
// Create the leader scheduler config
let mut fullnode_config = FullnodeConfig::default();
let fullnode_config = FullnodeConfig::default();
let ticks_per_slot = 5;
fullnode_config.leader_scheduler_config = LeaderSchedulerConfig::new(
ticks_per_slot,
1, // 1 slot per epoch
1,
);
let (mut genesis_block, mint_keypair) =
GenesisBlock::new_with_leader(10_000, leader_info.id, 500);
genesis_block.ticks_per_slot = ticks_per_slot;
@ -1118,11 +1097,9 @@ fn test_dropped_handoff_recovery() {
let bootstrap_leader_info = bootstrap_leader_node.info.clone();
// Create the common leader scheduling configuration
let slots_per_epoch = (N + 1) as u64;
let _slots_per_epoch = (N + 1) as u64;
let ticks_per_slot = 5;
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.leader_scheduler_config =
LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, slots_per_epoch);
let fullnode_config = FullnodeConfig::default();
let (mut genesis_block, mint_keypair) =
GenesisBlock::new_with_leader(10_000, bootstrap_leader_info.id, 500);
@ -1265,12 +1242,9 @@ fn test_full_leader_validator_network() {
const N: usize = 2;
// Create the common leader scheduling configuration
let slots_per_epoch = (N + 1) as u64;
let _slots_per_epoch = (N + 1) as u64;
let ticks_per_slot = 5;
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.leader_scheduler_config =
LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, slots_per_epoch * 3);
let fullnode_config = FullnodeConfig::default();
// Create the bootstrap leader node information
let bootstrap_leader_keypair = Arc::new(Keypair::new());
info!("bootstrap leader: {:?}", bootstrap_leader_keypair.pubkey());
@ -1458,9 +1432,6 @@ fn test_full_leader_validator_network() {
length += 1;
}
let shortest = shortest.unwrap();
assert!(shortest >= fullnode_config.leader_scheduler_config.ticks_per_slot * 3,);
for path in ledger_paths {
Blocktree::destroy(&path).expect("Expected successful database destruction");
remove_dir_all(path).unwrap();
@ -1482,11 +1453,9 @@ fn test_broadcast_last_tick() {
// Create the fullnode configuration
let ticks_per_slot = 40;
let slots_per_epoch = 2;
let ticks_per_epoch = slots_per_epoch * ticks_per_slot;
let _ticks_per_epoch = slots_per_epoch * ticks_per_slot;
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.leader_scheduler_config =
LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, ticks_per_epoch);
let fullnode_config = FullnodeConfig::default();
let (mut genesis_block, _mint_keypair) =
GenesisBlock::new_with_leader(10_000, bootstrap_leader_info.id, 500);
@ -1668,16 +1637,16 @@ fn test_fullnode_rotate(
transact: bool,
) {
solana_logger::setup();
info!(
"fullnode_rotate_fast: ticks_per_slot={} slots_per_epoch={} include_validator={} transact={}",
ticks_per_slot, slots_per_epoch, include_validator, transact
);
// Create the leader node information
let leader_keypair = Arc::new(Keypair::new());
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let leader_info = leader.info.clone();
let mut leader_should_be_leader = true;
// Create fullnode config, and set leader scheduler policies
let mut fullnode_config = FullnodeConfig::default();
let (tick_step_sender, tick_step_receiver) = sync_channel(1);
fullnode_config.leader_scheduler_config.ticks_per_slot = ticks_per_slot;
fullnode_config.leader_scheduler_config.slots_per_epoch = slots_per_epoch;
fullnode_config.tick_config = PohServiceConfig::Step(tick_step_sender);
// Note: when debugging failures in this test, disabling voting can help keep the log noise
@ -1686,20 +1655,11 @@ fn test_fullnode_rotate(
fullnode_config.voting_disabled = true;
*/
fullnode_config
.leader_scheduler_config
.active_window_num_slots = std::u64::MAX;
// Create the leader node information
let leader_keypair = Arc::new(Keypair::new());
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let leader_info = leader.info.clone();
let mut leader_should_be_leader = true;
// Create the Genesis block using leader's keypair
let (mut genesis_block, mint_keypair) =
GenesisBlock::new_with_leader(1_000_000_000_000_000_000, leader_keypair.pubkey(), 123);
genesis_block.ticks_per_slot = ticks_per_slot;
genesis_block.slots_per_epoch = slots_per_epoch;
// Make a common mint and a genesis entry for both leader + validator ledgers
let (leader_ledger_path, mut tick_height, mut last_entry_height, last_id, mut last_entry_id) =
@ -1734,7 +1694,7 @@ fn test_fullnode_rotate(
let mut start_slot = 0;
let mut leader_tick_height_of_next_rotation = 2;
if fullnode_config.leader_scheduler_config.ticks_per_slot == 1 {
if ticks_per_slot == 1 {
// Add another tick to the ledger if the cluster has been configured for 1 tick_per_slot.
// The "pseudo-tick" entry0 currently added by bank::process_ledger cannot be rotated on
// since it has no last id (so at 1 ticks_per_slot rotation must start at a tick_height of
@ -1961,11 +1921,13 @@ fn test_one_fullnode_rotate_every_second_tick_without_transactions() {
}
#[test]
#[ignore]
fn test_two_fullnodes_rotate_every_tick_without_transactions() {
test_fullnode_rotate(1, 1, true, false);
}
#[test]
#[ignore]
fn test_two_fullnodes_rotate_every_second_tick_without_transactions() {
test_fullnode_rotate(2, 1, true, false);
}

View File

@ -240,8 +240,10 @@ fn test_replicator_startup_leader_hang() {
solana_logger::setup();
info!("starting replicator test");
let replicator_ledger_path = &get_tmp_ledger_path!();
let leader_ledger_path = "replicator_test_leader_ledger";
let (genesis_block, _mint_keypair) = GenesisBlock::new(10_000);
let (replicator_ledger_path, _tick_height, _last_entry_height, _last_id, _last_entry_id) =
create_tmp_sample_blocktree("replicator_test_replicator_ledger", &genesis_block, 0);
{
let replicator_keypair = Keypair::new();
@ -253,7 +255,7 @@ fn test_replicator_startup_leader_hang() {
let leader_info = NodeInfo::new_entry_point(&fake_gossip);
let replicator_res = Replicator::new(
replicator_ledger_path,
&replicator_ledger_path,
replicator_node,
&leader_info,
&replicator_keypair,
@ -275,15 +277,18 @@ fn test_replicator_startup_ledger_hang() {
solana_logger::setup();
info!("starting replicator test");
let replicator_ledger_path = &get_tmp_ledger_path!();
let leader_keypair = Arc::new(Keypair::new());
let (genesis_block, _mint_keypair) =
GenesisBlock::new_with_leader(100, leader_keypair.pubkey(), 42);
let (replicator_ledger_path, _tick_height, _last_entry_height, _last_id, _last_entry_id) =
create_tmp_sample_blocktree("replicator_test_replicator_ledger", &genesis_block, 0);
info!("starting leader node");
let leader_keypair = Arc::new(Keypair::new());
let leader_node = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let leader_info = leader_node.info.clone();
let leader_ledger_path = "replicator_test_leader_ledger";
let (genesis_block, _mint_keypair) = GenesisBlock::new_with_leader(100, leader_info.id, 42);
let (leader_ledger_path, _tick_height, _last_entry_height, _last_id, _last_entry_id) =
create_tmp_sample_blocktree(leader_ledger_path, &genesis_block, 0);
@ -325,7 +330,7 @@ fn test_replicator_startup_ledger_hang() {
let leader_info = NodeInfo::new_entry_point(&leader_info.gossip);
let replicator_res = Replicator::new(
replicator_ledger_path,
&replicator_ledger_path,
replicator_node,
&leader_info,
&bad_keys,

View File

@ -9,7 +9,6 @@ use solana::cluster_info::{ClusterInfo, Node};
use solana::entry::next_entry_mut;
use solana::entry::EntrySlice;
use solana::gossip_service::GossipService;
use solana::leader_scheduler::LeaderScheduler;
use solana::packet::index_blobs;
use solana::rpc_subscriptions::RpcSubscriptions;
use solana::service::Service;
@ -97,7 +96,6 @@ fn test_replay() {
}];
let bank = bank_forks.working_bank();
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::new_with_bank(&bank)));
assert_eq!(bank.get_balance(&mint_keypair.pubkey()), starting_balance);
// start cluster_info1
@ -133,7 +131,6 @@ fn test_replay() {
&StorageState::default(),
None,
ledger_signal_receiver,
leader_scheduler,
&Arc::new(RpcSubscriptions::default()),
);