Prune older epoch stakes
This commit is contained in:
parent
cd65a1e172
commit
e782c26908
|
@ -733,6 +733,7 @@ mod test {
|
||||||
cluster_lamports: 100,
|
cluster_lamports: 100,
|
||||||
ticks_per_slot: 8,
|
ticks_per_slot: 8,
|
||||||
slots_per_epoch: MINIMUM_SLOTS_PER_EPOCH as u64,
|
slots_per_epoch: MINIMUM_SLOTS_PER_EPOCH as u64,
|
||||||
|
stakers_slot_offset: MINIMUM_SLOTS_PER_EPOCH as u64,
|
||||||
..ClusterConfig::default()
|
..ClusterConfig::default()
|
||||||
};
|
};
|
||||||
let cluster = LocalCluster::new(&config);
|
let cluster = LocalCluster::new(&config);
|
||||||
|
|
|
@ -449,6 +449,7 @@ fn test_two_unbalanced_stakes() {
|
||||||
validator_configs: vec![validator_config.clone(); 2],
|
validator_configs: vec![validator_config.clone(); 2],
|
||||||
ticks_per_slot: num_ticks_per_slot,
|
ticks_per_slot: num_ticks_per_slot,
|
||||||
slots_per_epoch: num_slots_per_epoch,
|
slots_per_epoch: num_slots_per_epoch,
|
||||||
|
stakers_slot_offset: num_slots_per_epoch,
|
||||||
poh_config: PohConfig::new_sleep(Duration::from_millis(1000 / num_ticks_per_second)),
|
poh_config: PohConfig::new_sleep(Duration::from_millis(1000 / num_ticks_per_second)),
|
||||||
..ClusterConfig::default()
|
..ClusterConfig::default()
|
||||||
});
|
});
|
||||||
|
@ -497,15 +498,16 @@ fn test_forwarding() {
|
||||||
fn test_restart_node() {
|
fn test_restart_node() {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
error!("test_restart_node");
|
error!("test_restart_node");
|
||||||
let slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH as u64;
|
let slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH * 2 as u64;
|
||||||
let ticks_per_slot = 16;
|
let ticks_per_slot = 16;
|
||||||
let validator_config = ValidatorConfig::default();
|
let validator_config = ValidatorConfig::default();
|
||||||
let mut cluster = LocalCluster::new(&ClusterConfig {
|
let mut cluster = LocalCluster::new(&ClusterConfig {
|
||||||
node_stakes: vec![3],
|
node_stakes: vec![100; 1],
|
||||||
cluster_lamports: 100,
|
cluster_lamports: 100,
|
||||||
validator_configs: vec![validator_config.clone()],
|
validator_configs: vec![validator_config.clone()],
|
||||||
ticks_per_slot,
|
ticks_per_slot,
|
||||||
slots_per_epoch,
|
slots_per_epoch,
|
||||||
|
stakers_slot_offset: slots_per_epoch,
|
||||||
..ClusterConfig::default()
|
..ClusterConfig::default()
|
||||||
});
|
});
|
||||||
let nodes = cluster.get_node_pubkeys();
|
let nodes = cluster.get_node_pubkeys();
|
||||||
|
|
|
@ -60,6 +60,8 @@ use std::{
|
||||||
|
|
||||||
pub const SECONDS_PER_YEAR: f64 = (365.25 * 24.0 * 60.0 * 60.0);
|
pub const SECONDS_PER_YEAR: f64 = (365.25 * 24.0 * 60.0 * 60.0);
|
||||||
|
|
||||||
|
pub const MAX_LEADER_SCHEDULE_STAKES: Epoch = 5;
|
||||||
|
|
||||||
type BankStatusCache = StatusCache<Result<()>>;
|
type BankStatusCache = StatusCache<Result<()>>;
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
|
@ -416,7 +418,6 @@ impl Bank {
|
||||||
);
|
);
|
||||||
|
|
||||||
let leader_schedule_epoch = epoch_schedule.get_leader_schedule_epoch(slot);
|
let leader_schedule_epoch = epoch_schedule.get_leader_schedule_epoch(slot);
|
||||||
|
|
||||||
if parent.epoch() < new.epoch() {
|
if parent.epoch() < new.epoch() {
|
||||||
if let Some(entered_epoch_callback) =
|
if let Some(entered_epoch_callback) =
|
||||||
parent.entered_epoch_callback.read().unwrap().as_ref()
|
parent.entered_epoch_callback.read().unwrap().as_ref()
|
||||||
|
@ -425,14 +426,7 @@ impl Bank {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// update epoch_stakes cache
|
new.update_epoch_stakes(leader_schedule_epoch);
|
||||||
// if my parent didn't populate for this staker's epoch, we've
|
|
||||||
// crossed a boundary
|
|
||||||
if new.epoch_stakes.get(&leader_schedule_epoch).is_none() {
|
|
||||||
new.epoch_stakes
|
|
||||||
.insert(leader_schedule_epoch, new.stakes.read().unwrap().clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
new.ancestors.insert(new.slot(), 0);
|
new.ancestors.insert(new.slot(), 0);
|
||||||
new.parents().iter().enumerate().for_each(|(i, p)| {
|
new.parents().iter().enumerate().for_each(|(i, p)| {
|
||||||
new.ancestors.insert(p.slot(), i + 1);
|
new.ancestors.insert(p.slot(), i + 1);
|
||||||
|
@ -541,6 +535,20 @@ impl Bank {
|
||||||
self.store_account(&sysvar::slot_hashes::id(), &slot_hashes.create_account(1));
|
self.store_account(&sysvar::slot_hashes::id(), &slot_hashes.create_account(1));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn update_epoch_stakes(&mut self, leader_schedule_epoch: Epoch) {
|
||||||
|
// update epoch_stakes cache
|
||||||
|
// if my parent didn't populate for this staker's epoch, we've
|
||||||
|
// crossed a boundary
|
||||||
|
if self.epoch_stakes.get(&leader_schedule_epoch).is_none() {
|
||||||
|
self.epoch_stakes.retain(|&epoch, _| {
|
||||||
|
epoch >= leader_schedule_epoch.saturating_sub(MAX_LEADER_SCHEDULE_STAKES)
|
||||||
|
});
|
||||||
|
|
||||||
|
self.epoch_stakes
|
||||||
|
.insert(leader_schedule_epoch, self.stakes.read().unwrap().clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn update_fees(&self) {
|
fn update_fees(&self) {
|
||||||
self.store_account(
|
self.store_account(
|
||||||
&sysvar::fees::id(),
|
&sysvar::fees::id(),
|
||||||
|
@ -1963,6 +1971,68 @@ mod tests {
|
||||||
assert_eq!(bank1.block_height(), 1);
|
assert_eq!(bank1.block_height(), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_bank_update_epoch_stakes() {
|
||||||
|
impl Bank {
|
||||||
|
fn epoch_stake_keys(&self) -> Vec<Epoch> {
|
||||||
|
let mut keys: Vec<Epoch> = self.epoch_stakes.keys().map(|k| *k).collect();
|
||||||
|
keys.sort();
|
||||||
|
keys
|
||||||
|
}
|
||||||
|
|
||||||
|
fn epoch_stake_key_info(&self) -> (Epoch, Epoch, usize) {
|
||||||
|
let mut keys: Vec<Epoch> = self.epoch_stakes.keys().map(|k| *k).collect();
|
||||||
|
keys.sort();
|
||||||
|
(*keys.first().unwrap(), *keys.last().unwrap(), keys.len())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let (genesis_config, _mint_keypair) = create_genesis_config(100_000);
|
||||||
|
let mut bank = Bank::new(&genesis_config);
|
||||||
|
|
||||||
|
let initial_epochs = bank.epoch_stake_keys();
|
||||||
|
assert_eq!(initial_epochs, vec![0, 1]);
|
||||||
|
|
||||||
|
for existing_epoch in &initial_epochs {
|
||||||
|
bank.update_epoch_stakes(*existing_epoch);
|
||||||
|
assert_eq!(bank.epoch_stake_keys(), initial_epochs);
|
||||||
|
}
|
||||||
|
|
||||||
|
for epoch in (initial_epochs.len() as Epoch)..MAX_LEADER_SCHEDULE_STAKES {
|
||||||
|
bank.update_epoch_stakes(epoch);
|
||||||
|
assert_eq!(bank.epoch_stakes.len() as Epoch, epoch + 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
bank.epoch_stake_key_info(),
|
||||||
|
(
|
||||||
|
0,
|
||||||
|
MAX_LEADER_SCHEDULE_STAKES - 1,
|
||||||
|
MAX_LEADER_SCHEDULE_STAKES as usize
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
bank.update_epoch_stakes(MAX_LEADER_SCHEDULE_STAKES);
|
||||||
|
assert_eq!(
|
||||||
|
bank.epoch_stake_key_info(),
|
||||||
|
(
|
||||||
|
0,
|
||||||
|
MAX_LEADER_SCHEDULE_STAKES,
|
||||||
|
MAX_LEADER_SCHEDULE_STAKES as usize + 1
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
bank.update_epoch_stakes(MAX_LEADER_SCHEDULE_STAKES + 1);
|
||||||
|
assert_eq!(
|
||||||
|
bank.epoch_stake_key_info(),
|
||||||
|
(
|
||||||
|
1,
|
||||||
|
MAX_LEADER_SCHEDULE_STAKES + 1,
|
||||||
|
MAX_LEADER_SCHEDULE_STAKES as usize + 1
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_bank_capitalization() {
|
fn test_bank_capitalization() {
|
||||||
let bank = Arc::new(Bank::new(&GenesisConfig {
|
let bank = Arc::new(Bank::new(&GenesisConfig {
|
||||||
|
|
Loading…
Reference in New Issue