slot_height considered harmful (#3135)
* slot_height considered harmful * fix test_tick_slot_epoch_indexes
This commit is contained in:
parent
33c4c7e511
commit
b9e878ee80
|
@ -124,8 +124,8 @@ pub trait LedgerColumnFamilyRaw {
|
||||||
// The Meta column family
|
// The Meta column family
|
||||||
pub struct SlotMeta {
|
pub struct SlotMeta {
|
||||||
// The number of slots above the root (the genesis block). The first
|
// The number of slots above the root (the genesis block). The first
|
||||||
// slot has slot_height 0.
|
// slot has slot 0.
|
||||||
pub slot_height: u64,
|
pub slot: u64,
|
||||||
// The total number of consecutive blobs starting from index 0
|
// The total number of consecutive blobs starting from index 0
|
||||||
// we have received for this slot.
|
// we have received for this slot.
|
||||||
pub consumed: u64,
|
pub consumed: u64,
|
||||||
|
@ -141,7 +141,7 @@ pub struct SlotMeta {
|
||||||
// from this one.
|
// from this one.
|
||||||
pub next_slots: Vec<u64>,
|
pub next_slots: Vec<u64>,
|
||||||
// True if this slot is full (consumed == last_index + 1) and if every
|
// True if this slot is full (consumed == last_index + 1) and if every
|
||||||
// slot from 0..slot_height is also rooted.
|
// slot that is a parent of this slot is also rooted.
|
||||||
pub is_rooted: bool,
|
pub is_rooted: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -158,14 +158,14 @@ impl SlotMeta {
|
||||||
self.consumed == self.last_index + 1
|
self.consumed == self.last_index + 1
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new(slot_height: u64, parent_slot: u64) -> Self {
|
fn new(slot: u64, parent_slot: u64) -> Self {
|
||||||
SlotMeta {
|
SlotMeta {
|
||||||
slot_height,
|
slot,
|
||||||
consumed: 0,
|
consumed: 0,
|
||||||
received: 0,
|
received: 0,
|
||||||
parent_slot,
|
parent_slot,
|
||||||
next_slots: vec![],
|
next_slots: vec![],
|
||||||
is_rooted: slot_height == 0,
|
is_rooted: slot == 0,
|
||||||
last_index: std::u64::MAX,
|
last_index: std::u64::MAX,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -180,19 +180,19 @@ impl MetaCf {
|
||||||
MetaCf { db }
|
MetaCf { db }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn key(slot_height: u64) -> Vec<u8> {
|
pub fn key(slot: u64) -> Vec<u8> {
|
||||||
let mut key = vec![0u8; 8];
|
let mut key = vec![0u8; 8];
|
||||||
BigEndian::write_u64(&mut key[0..8], slot_height);
|
BigEndian::write_u64(&mut key[0..8], slot);
|
||||||
key
|
key
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_slot_meta(&self, slot_height: u64) -> Result<Option<SlotMeta>> {
|
pub fn get_slot_meta(&self, slot: u64) -> Result<Option<SlotMeta>> {
|
||||||
let key = Self::key(slot_height);
|
let key = Self::key(slot);
|
||||||
self.get(&key)
|
self.get(&key)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn put_slot_meta(&self, slot_height: u64, slot_meta: &SlotMeta) -> Result<()> {
|
pub fn put_slot_meta(&self, slot: u64, slot_meta: &SlotMeta) -> Result<()> {
|
||||||
let key = Self::key(slot_height);
|
let key = Self::key(slot);
|
||||||
self.put(&key, slot_meta)
|
self.put(&key, slot_meta)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -225,34 +225,29 @@ impl DataCf {
|
||||||
DataCf { db }
|
DataCf { db }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_by_slot_index(&self, slot_height: u64, index: u64) -> Result<Option<Vec<u8>>> {
|
pub fn get_by_slot_index(&self, slot: u64, index: u64) -> Result<Option<Vec<u8>>> {
|
||||||
let key = Self::key(slot_height, index);
|
let key = Self::key(slot, index);
|
||||||
self.get(&key)
|
self.get(&key)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_by_slot_index(&self, slot_height: u64, index: u64) -> Result<()> {
|
pub fn delete_by_slot_index(&self, slot: u64, index: u64) -> Result<()> {
|
||||||
let key = Self::key(slot_height, index);
|
let key = Self::key(slot, index);
|
||||||
self.delete(&key)
|
self.delete(&key)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn put_by_slot_index(
|
pub fn put_by_slot_index(&self, slot: u64, index: u64, serialized_value: &[u8]) -> Result<()> {
|
||||||
&self,
|
let key = Self::key(slot, index);
|
||||||
slot_height: u64,
|
|
||||||
index: u64,
|
|
||||||
serialized_value: &[u8],
|
|
||||||
) -> Result<()> {
|
|
||||||
let key = Self::key(slot_height, index);
|
|
||||||
self.put(&key, serialized_value)
|
self.put(&key, serialized_value)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn key(slot_height: u64, index: u64) -> Vec<u8> {
|
pub fn key(slot: u64, index: u64) -> Vec<u8> {
|
||||||
let mut key = vec![0u8; 16];
|
let mut key = vec![0u8; 16];
|
||||||
BigEndian::write_u64(&mut key[0..8], slot_height);
|
BigEndian::write_u64(&mut key[0..8], slot);
|
||||||
BigEndian::write_u64(&mut key[8..16], index);
|
BigEndian::write_u64(&mut key[8..16], index);
|
||||||
key
|
key
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn slot_height_from_key(key: &[u8]) -> Result<u64> {
|
pub fn slot_from_key(key: &[u8]) -> Result<u64> {
|
||||||
let mut rdr = io::Cursor::new(&key[0..8]);
|
let mut rdr = io::Cursor::new(&key[0..8]);
|
||||||
let height = rdr.read_u64::<BigEndian>()?;
|
let height = rdr.read_u64::<BigEndian>()?;
|
||||||
Ok(height)
|
Ok(height)
|
||||||
|
@ -284,32 +279,27 @@ impl ErasureCf {
|
||||||
pub fn new(db: Arc<DB>) -> Self {
|
pub fn new(db: Arc<DB>) -> Self {
|
||||||
ErasureCf { db }
|
ErasureCf { db }
|
||||||
}
|
}
|
||||||
pub fn delete_by_slot_index(&self, slot_height: u64, index: u64) -> Result<()> {
|
pub fn delete_by_slot_index(&self, slot: u64, index: u64) -> Result<()> {
|
||||||
let key = Self::key(slot_height, index);
|
let key = Self::key(slot, index);
|
||||||
self.delete(&key)
|
self.delete(&key)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_by_slot_index(&self, slot_height: u64, index: u64) -> Result<Option<Vec<u8>>> {
|
pub fn get_by_slot_index(&self, slot: u64, index: u64) -> Result<Option<Vec<u8>>> {
|
||||||
let key = Self::key(slot_height, index);
|
let key = Self::key(slot, index);
|
||||||
self.get(&key)
|
self.get(&key)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn put_by_slot_index(
|
pub fn put_by_slot_index(&self, slot: u64, index: u64, serialized_value: &[u8]) -> Result<()> {
|
||||||
&self,
|
let key = Self::key(slot, index);
|
||||||
slot_height: u64,
|
|
||||||
index: u64,
|
|
||||||
serialized_value: &[u8],
|
|
||||||
) -> Result<()> {
|
|
||||||
let key = Self::key(slot_height, index);
|
|
||||||
self.put(&key, serialized_value)
|
self.put(&key, serialized_value)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn key(slot_height: u64, index: u64) -> Vec<u8> {
|
pub fn key(slot: u64, index: u64) -> Vec<u8> {
|
||||||
DataCf::key(slot_height, index)
|
DataCf::key(slot, index)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn slot_height_from_key(key: &[u8]) -> Result<u64> {
|
pub fn slot_from_key(key: &[u8]) -> Result<u64> {
|
||||||
DataCf::slot_height_from_key(key)
|
DataCf::slot_from_key(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn index_from_key(key: &[u8]) -> Result<u64> {
|
pub fn index_from_key(key: &[u8]) -> Result<u64> {
|
||||||
|
@ -413,15 +403,15 @@ impl Blocktree {
|
||||||
Ok((blocktree, signal_receiver))
|
Ok((blocktree, signal_receiver))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn meta(&self, slot_height: u64) -> Result<Option<SlotMeta>> {
|
pub fn meta(&self, slot: u64) -> Result<Option<SlotMeta>> {
|
||||||
self.meta_cf.get(&MetaCf::key(slot_height))
|
self.meta_cf.get(&MetaCf::key(slot))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn reset_slot_consumed(&self, slot_height: u64) -> Result<()> {
|
pub fn reset_slot_consumed(&self, slot: u64) -> Result<()> {
|
||||||
let meta_key = MetaCf::key(slot_height);
|
let meta_key = MetaCf::key(slot);
|
||||||
if let Some(mut meta) = self.meta_cf.get(&meta_key)? {
|
if let Some(mut meta) = self.meta_cf.get(&meta_key)? {
|
||||||
for index in 0..meta.received {
|
for index in 0..meta.received {
|
||||||
self.data_cf.delete_by_slot_index(slot_height, index)?;
|
self.data_cf.delete_by_slot_index(slot, index)?;
|
||||||
}
|
}
|
||||||
meta.consumed = 0;
|
meta.consumed = 0;
|
||||||
meta.received = 0;
|
meta.received = 0;
|
||||||
|
@ -440,9 +430,9 @@ impl Blocktree {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_next_slot(&self, slot_height: u64) -> Result<Option<u64>> {
|
pub fn get_next_slot(&self, slot: u64) -> Result<Option<u64>> {
|
||||||
let mut db_iterator = self.db.raw_iterator_cf(self.meta_cf.handle())?;
|
let mut db_iterator = self.db.raw_iterator_cf(self.meta_cf.handle())?;
|
||||||
db_iterator.seek(&MetaCf::key(slot_height + 1));
|
db_iterator.seek(&MetaCf::key(slot + 1));
|
||||||
if !db_iterator.valid() {
|
if !db_iterator.valid() {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
} else {
|
} else {
|
||||||
|
@ -537,7 +527,7 @@ impl Blocktree {
|
||||||
I::Item: Borrow<Blob>,
|
I::Item: Borrow<Blob>,
|
||||||
{
|
{
|
||||||
let mut write_batch = WriteBatch::default();
|
let mut write_batch = WriteBatch::default();
|
||||||
// A map from slot_height to a 2-tuple of metadata: (working copy, backup copy),
|
// A map from slot to a 2-tuple of metadata: (working copy, backup copy),
|
||||||
// so we can detect changes to the slot metadata later
|
// so we can detect changes to the slot metadata later
|
||||||
let mut slot_meta_working_set = HashMap::new();
|
let mut slot_meta_working_set = HashMap::new();
|
||||||
let new_blobs: Vec<_> = new_blobs.into_iter().collect();
|
let new_blobs: Vec<_> = new_blobs.into_iter().collect();
|
||||||
|
@ -596,14 +586,14 @@ impl Blocktree {
|
||||||
|
|
||||||
// Check if any metadata was changed, if so, insert the new version of the
|
// Check if any metadata was changed, if so, insert the new version of the
|
||||||
// metadata into the write batch
|
// metadata into the write batch
|
||||||
for (slot_height, (meta_copy, meta_backup)) in slot_meta_working_set.iter() {
|
for (slot, (meta_copy, meta_backup)) in slot_meta_working_set.iter() {
|
||||||
let meta: &SlotMeta = &RefCell::borrow(&*meta_copy);
|
let meta: &SlotMeta = &RefCell::borrow(&*meta_copy);
|
||||||
// Check if the working copy of the metadata has changed
|
// Check if the working copy of the metadata has changed
|
||||||
if Some(meta) != meta_backup.as_ref() {
|
if Some(meta) != meta_backup.as_ref() {
|
||||||
should_signal = should_signal || Self::slot_has_updates(meta, &meta_backup);
|
should_signal = should_signal || Self::slot_has_updates(meta, &meta_backup);
|
||||||
write_batch.put_cf(
|
write_batch.put_cf(
|
||||||
self.meta_cf.handle(),
|
self.meta_cf.handle(),
|
||||||
&MetaCf::key(*slot_height),
|
&MetaCf::key(*slot),
|
||||||
&serialize(&meta)?,
|
&serialize(&meta)?,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
@ -628,9 +618,9 @@ impl Blocktree {
|
||||||
start_index: u64,
|
start_index: u64,
|
||||||
num_blobs: u64,
|
num_blobs: u64,
|
||||||
buf: &mut [u8],
|
buf: &mut [u8],
|
||||||
slot_height: u64,
|
slot: u64,
|
||||||
) -> Result<(u64, u64)> {
|
) -> Result<(u64, u64)> {
|
||||||
let start_key = DataCf::key(slot_height, start_index);
|
let start_key = DataCf::key(slot, start_index);
|
||||||
let mut db_iterator = self.db.raw_iterator_cf(self.data_cf.handle())?;
|
let mut db_iterator = self.db.raw_iterator_cf(self.data_cf.handle())?;
|
||||||
db_iterator.seek(&start_key);
|
db_iterator.seek(&start_key);
|
||||||
let mut total_blobs = 0;
|
let mut total_blobs = 0;
|
||||||
|
@ -721,11 +711,11 @@ impl Blocktree {
|
||||||
self.data_cf.put_by_slot_index(slot, index, bytes)
|
self.data_cf.put_by_slot_index(slot, index, bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_data_blob(&self, slot_height: u64, blob_index: u64) -> Result<Option<Blob>> {
|
pub fn get_data_blob(&self, slot: u64, blob_index: u64) -> Result<Option<Blob>> {
|
||||||
let bytes = self.get_data_blob_bytes(slot_height, blob_index)?;
|
let bytes = self.get_data_blob_bytes(slot, blob_index)?;
|
||||||
Ok(bytes.map(|bytes| {
|
Ok(bytes.map(|bytes| {
|
||||||
let blob = Blob::new(&bytes);
|
let blob = Blob::new(&bytes);
|
||||||
assert!(blob.slot() == slot_height);
|
assert!(blob.slot() == slot);
|
||||||
assert!(blob.index() == blob_index);
|
assert!(blob.index() == blob_index);
|
||||||
blob
|
blob
|
||||||
}))
|
}))
|
||||||
|
@ -742,14 +732,14 @@ impl Blocktree {
|
||||||
|
|
||||||
// Given a start and end entry index, find all the missing
|
// Given a start and end entry index, find all the missing
|
||||||
// indexes in the ledger in the range [start_index, end_index)
|
// indexes in the ledger in the range [start_index, end_index)
|
||||||
// for the slot with slot_height == slot
|
// for the slot with the specified slot
|
||||||
fn find_missing_indexes(
|
fn find_missing_indexes(
|
||||||
db_iterator: &mut BlocktreeRawIterator,
|
db_iterator: &mut BlocktreeRawIterator,
|
||||||
slot: u64,
|
slot: u64,
|
||||||
start_index: u64,
|
start_index: u64,
|
||||||
end_index: u64,
|
end_index: u64,
|
||||||
key: &dyn Fn(u64, u64) -> Vec<u8>,
|
key: &dyn Fn(u64, u64) -> Vec<u8>,
|
||||||
slot_height_from_key: &dyn Fn(&[u8]) -> Result<u64>,
|
slot_from_key: &dyn Fn(&[u8]) -> Result<u64>,
|
||||||
index_from_key: &dyn Fn(&[u8]) -> Result<u64>,
|
index_from_key: &dyn Fn(&[u8]) -> Result<u64>,
|
||||||
max_missing: usize,
|
max_missing: usize,
|
||||||
) -> Vec<u64> {
|
) -> Vec<u64> {
|
||||||
|
@ -775,7 +765,7 @@ impl Blocktree {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
let current_key = db_iterator.key().expect("Expect a valid key");
|
let current_key = db_iterator.key().expect("Expect a valid key");
|
||||||
let current_slot = slot_height_from_key(¤t_key)
|
let current_slot = slot_from_key(¤t_key)
|
||||||
.expect("Expect to be able to parse slot from valid key");
|
.expect("Expect to be able to parse slot from valid key");
|
||||||
let current_index = {
|
let current_index = {
|
||||||
if current_slot > slot {
|
if current_slot > slot {
|
||||||
|
@ -824,7 +814,7 @@ impl Blocktree {
|
||||||
start_index,
|
start_index,
|
||||||
end_index,
|
end_index,
|
||||||
&DataCf::key,
|
&DataCf::key,
|
||||||
&DataCf::slot_height_from_key,
|
&DataCf::slot_from_key,
|
||||||
&DataCf::index_from_key,
|
&DataCf::index_from_key,
|
||||||
max_missing,
|
max_missing,
|
||||||
)
|
)
|
||||||
|
@ -845,7 +835,7 @@ impl Blocktree {
|
||||||
start_index,
|
start_index,
|
||||||
end_index,
|
end_index,
|
||||||
&ErasureCf::key,
|
&ErasureCf::key,
|
||||||
&ErasureCf::slot_height_from_key,
|
&ErasureCf::slot_from_key,
|
||||||
&ErasureCf::index_from_key,
|
&ErasureCf::index_from_key,
|
||||||
max_missing,
|
max_missing,
|
||||||
)
|
)
|
||||||
|
@ -854,42 +844,36 @@ impl Blocktree {
|
||||||
/// Returns the entry vector for the slot starting with `blob_start_index`
|
/// Returns the entry vector for the slot starting with `blob_start_index`
|
||||||
pub fn get_slot_entries(
|
pub fn get_slot_entries(
|
||||||
&self,
|
&self,
|
||||||
slot_height: u64,
|
slot: u64,
|
||||||
blob_start_index: u64,
|
blob_start_index: u64,
|
||||||
max_entries: Option<u64>,
|
max_entries: Option<u64>,
|
||||||
) -> Result<Vec<Entry>> {
|
) -> Result<Vec<Entry>> {
|
||||||
self.get_slot_entries_with_blob_count(slot_height, blob_start_index, max_entries)
|
self.get_slot_entries_with_blob_count(slot, blob_start_index, max_entries)
|
||||||
.map(|x| x.0)
|
.map(|x| x.0)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_slot_entries_with_blob_count(
|
pub fn get_slot_entries_with_blob_count(
|
||||||
&self,
|
&self,
|
||||||
slot_height: u64,
|
slot: u64,
|
||||||
blob_start_index: u64,
|
blob_start_index: u64,
|
||||||
max_entries: Option<u64>,
|
max_entries: Option<u64>,
|
||||||
) -> Result<(Vec<Entry>, usize)> {
|
) -> Result<(Vec<Entry>, usize)> {
|
||||||
// Find the next consecutive block of blobs.
|
// Find the next consecutive block of blobs.
|
||||||
let consecutive_blobs = self.get_slot_consecutive_blobs(
|
let consecutive_blobs =
|
||||||
slot_height,
|
self.get_slot_consecutive_blobs(slot, &HashMap::new(), blob_start_index, max_entries)?;
|
||||||
&HashMap::new(),
|
|
||||||
blob_start_index,
|
|
||||||
max_entries,
|
|
||||||
)?;
|
|
||||||
let num = consecutive_blobs.len();
|
let num = consecutive_blobs.len();
|
||||||
Ok((Self::deserialize_blobs(&consecutive_blobs), num))
|
Ok((Self::deserialize_blobs(&consecutive_blobs), num))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns slots connecting to any element of the list `slot_heights`.
|
// Returns slots connecting to any element of the list `slots`.
|
||||||
pub fn get_slots_since(&self, slot_heights: &[u64]) -> Result<HashMap<u64, Vec<u64>>> {
|
pub fn get_slots_since(&self, slots: &[u64]) -> Result<HashMap<u64, Vec<u64>>> {
|
||||||
// Return error if there was a database error during lookup of any of the
|
// Return error if there was a database error during lookup of any of the
|
||||||
// slot indexes
|
// slot indexes
|
||||||
let slot_metas: Result<Vec<Option<SlotMeta>>> = slot_heights
|
let slot_metas: Result<Vec<Option<SlotMeta>>> =
|
||||||
.iter()
|
slots.iter().map(|slot| self.meta(*slot)).collect();
|
||||||
.map(|slot_height| self.meta(*slot_height))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let slot_metas = slot_metas?;
|
let slot_metas = slot_metas?;
|
||||||
let result: HashMap<u64, Vec<u64>> = slot_heights
|
let result: HashMap<u64, Vec<u64>> = slots
|
||||||
.iter()
|
.iter()
|
||||||
.zip(slot_metas)
|
.zip(slot_metas)
|
||||||
.filter_map(|(height, meta)| meta.map(|meta| (*height, meta.next_slots)))
|
.filter_map(|(height, meta)| meta.map(|meta| (*height, meta.next_slots)))
|
||||||
|
@ -956,17 +940,17 @@ impl Blocktree {
|
||||||
working_set: &HashMap<u64, (Rc<RefCell<SlotMeta>>, Option<SlotMeta>)>,
|
working_set: &HashMap<u64, (Rc<RefCell<SlotMeta>>, Option<SlotMeta>)>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut new_chained_slots = HashMap::new();
|
let mut new_chained_slots = HashMap::new();
|
||||||
let working_set_slot_heights: Vec<_> = working_set.iter().map(|s| *s.0).collect();
|
let working_set_slots: Vec<_> = working_set.iter().map(|s| *s.0).collect();
|
||||||
for slot_height in working_set_slot_heights {
|
for slot in working_set_slots {
|
||||||
self.handle_chaining_for_slot(working_set, &mut new_chained_slots, slot_height)?;
|
self.handle_chaining_for_slot(working_set, &mut new_chained_slots, slot)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write all the newly changed slots in new_chained_slots to the write_batch
|
// Write all the newly changed slots in new_chained_slots to the write_batch
|
||||||
for (slot_height, meta_copy) in new_chained_slots.iter() {
|
for (slot, meta_copy) in new_chained_slots.iter() {
|
||||||
let meta: &SlotMeta = &RefCell::borrow(&*meta_copy);
|
let meta: &SlotMeta = &RefCell::borrow(&*meta_copy);
|
||||||
write_batch.put_cf(
|
write_batch.put_cf(
|
||||||
self.meta_cf.handle(),
|
self.meta_cf.handle(),
|
||||||
&MetaCf::key(*slot_height),
|
&MetaCf::key(*slot),
|
||||||
&serialize(meta)?,
|
&serialize(meta)?,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
@ -977,35 +961,32 @@ impl Blocktree {
|
||||||
&self,
|
&self,
|
||||||
working_set: &HashMap<u64, (Rc<RefCell<SlotMeta>>, Option<SlotMeta>)>,
|
working_set: &HashMap<u64, (Rc<RefCell<SlotMeta>>, Option<SlotMeta>)>,
|
||||||
new_chained_slots: &mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
|
new_chained_slots: &mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
|
||||||
slot_height: u64,
|
slot: u64,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let (meta_copy, meta_backup) = working_set
|
let (meta_copy, meta_backup) = working_set
|
||||||
.get(&slot_height)
|
.get(&slot)
|
||||||
.expect("Slot must exist in the working_set hashmap");
|
.expect("Slot must exist in the working_set hashmap");
|
||||||
{
|
{
|
||||||
let mut slot_meta = meta_copy.borrow_mut();
|
let mut slot_meta = meta_copy.borrow_mut();
|
||||||
|
|
||||||
// If:
|
// If:
|
||||||
// 1) This is a new slot
|
// 1) This is a new slot
|
||||||
// 2) slot_height != 0
|
// 2) slot != 0
|
||||||
// then try to chain this slot to a previous slot
|
// then try to chain this slot to a previous slot
|
||||||
if slot_height != 0 {
|
if slot != 0 {
|
||||||
let prev_slot_height = slot_meta.parent_slot;
|
let prev_slot = slot_meta.parent_slot;
|
||||||
|
|
||||||
// Check if slot_meta is a new slot
|
// Check if slot_meta is a new slot
|
||||||
if meta_backup.is_none() {
|
if meta_backup.is_none() {
|
||||||
let prev_slot = self.find_slot_meta_else_create(
|
let prev_slot =
|
||||||
working_set,
|
self.find_slot_meta_else_create(working_set, new_chained_slots, prev_slot)?;
|
||||||
new_chained_slots,
|
|
||||||
prev_slot_height,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// This is a newly inserted slot so:
|
// This is a newly inserted slot so:
|
||||||
// 1) Chain to the previous slot, and also
|
// 1) Chain to the previous slot, and also
|
||||||
// 2) Determine whether to set the is_rooted flag
|
// 2) Determine whether to set the is_rooted flag
|
||||||
self.chain_new_slot_to_prev_slot(
|
self.chain_new_slot_to_prev_slot(
|
||||||
&mut prev_slot.borrow_mut(),
|
&mut prev_slot.borrow_mut(),
|
||||||
slot_height,
|
slot,
|
||||||
&mut slot_meta,
|
&mut slot_meta,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -1018,7 +999,7 @@ impl Blocktree {
|
||||||
// This is a newly inserted slot and slot.is_rooted is true, so go through
|
// This is a newly inserted slot and slot.is_rooted is true, so go through
|
||||||
// and update all child slots with is_rooted if applicable
|
// and update all child slots with is_rooted if applicable
|
||||||
let mut next_slots: Vec<(u64, Rc<RefCell<(SlotMeta)>>)> =
|
let mut next_slots: Vec<(u64, Rc<RefCell<(SlotMeta)>>)> =
|
||||||
vec![(slot_height, meta_copy.clone())];
|
vec![(slot, meta_copy.clone())];
|
||||||
while !next_slots.is_empty() {
|
while !next_slots.is_empty() {
|
||||||
let (_, current_slot) = next_slots.pop().unwrap();
|
let (_, current_slot) = next_slots.pop().unwrap();
|
||||||
current_slot.borrow_mut().is_rooted = true;
|
current_slot.borrow_mut().is_rooted = true;
|
||||||
|
@ -1042,12 +1023,12 @@ impl Blocktree {
|
||||||
|
|
||||||
fn chain_new_slot_to_prev_slot(
|
fn chain_new_slot_to_prev_slot(
|
||||||
&self,
|
&self,
|
||||||
prev_slot: &mut SlotMeta,
|
prev_slot_meta: &mut SlotMeta,
|
||||||
current_slot_height: u64,
|
current_slot: u64,
|
||||||
current_slot: &mut SlotMeta,
|
current_slot_meta: &mut SlotMeta,
|
||||||
) {
|
) {
|
||||||
prev_slot.next_slots.push(current_slot_height);
|
prev_slot_meta.next_slots.push(current_slot);
|
||||||
current_slot.is_rooted = prev_slot.is_rooted && prev_slot.is_full();
|
current_slot_meta.is_rooted = prev_slot_meta.is_rooted && prev_slot_meta.is_full();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_newly_completed_slot(
|
fn is_newly_completed_slot(
|
||||||
|
@ -1082,21 +1063,21 @@ impl Blocktree {
|
||||||
// create a dummy placeholder slot in the database
|
// create a dummy placeholder slot in the database
|
||||||
fn find_slot_meta_in_db_else_create<'a>(
|
fn find_slot_meta_in_db_else_create<'a>(
|
||||||
&self,
|
&self,
|
||||||
slot_height: u64,
|
slot: u64,
|
||||||
insert_map: &'a mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
|
insert_map: &'a mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
|
||||||
) -> Result<Rc<RefCell<SlotMeta>>> {
|
) -> Result<Rc<RefCell<SlotMeta>>> {
|
||||||
if let Some(slot) = self.meta(slot_height)? {
|
if let Some(slot_meta) = self.meta(slot)? {
|
||||||
insert_map.insert(slot_height, Rc::new(RefCell::new(slot)));
|
insert_map.insert(slot, Rc::new(RefCell::new(slot_meta)));
|
||||||
Ok(insert_map.get(&slot_height).unwrap().clone())
|
Ok(insert_map.get(&slot).unwrap().clone())
|
||||||
} else {
|
} else {
|
||||||
// If this slot doesn't exist, make a placeholder slot. This way we
|
// If this slot doesn't exist, make a placeholder slot. This way we
|
||||||
// remember which slots chained to this one when we eventually get a real blob
|
// remember which slots chained to this one when we eventually get a real blob
|
||||||
// for this slot
|
// for this slot
|
||||||
insert_map.insert(
|
insert_map.insert(
|
||||||
slot_height,
|
slot,
|
||||||
Rc::new(RefCell::new(SlotMeta::new(slot_height, std::u64::MAX))),
|
Rc::new(RefCell::new(SlotMeta::new(slot, std::u64::MAX))),
|
||||||
);
|
);
|
||||||
Ok(insert_map.get(&slot_height).unwrap().clone())
|
Ok(insert_map.get(&slot).unwrap().clone())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1105,11 +1086,11 @@ impl Blocktree {
|
||||||
&self,
|
&self,
|
||||||
working_set: &'a HashMap<u64, (Rc<RefCell<SlotMeta>>, Option<SlotMeta>)>,
|
working_set: &'a HashMap<u64, (Rc<RefCell<SlotMeta>>, Option<SlotMeta>)>,
|
||||||
chained_slots: &'a HashMap<u64, Rc<RefCell<SlotMeta>>>,
|
chained_slots: &'a HashMap<u64, Rc<RefCell<SlotMeta>>>,
|
||||||
slot_height: u64,
|
slot: u64,
|
||||||
) -> Result<Option<Rc<RefCell<SlotMeta>>>> {
|
) -> Result<Option<Rc<RefCell<SlotMeta>>>> {
|
||||||
if let Some((entry, _)) = working_set.get(&slot_height) {
|
if let Some((entry, _)) = working_set.get(&slot) {
|
||||||
Ok(Some(entry.clone()))
|
Ok(Some(entry.clone()))
|
||||||
} else if let Some(entry) = chained_slots.get(&slot_height) {
|
} else if let Some(entry) = chained_slots.get(&slot) {
|
||||||
Ok(Some(entry.clone()))
|
Ok(Some(entry.clone()))
|
||||||
} else {
|
} else {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
|
@ -1185,7 +1166,7 @@ impl Blocktree {
|
||||||
/// range
|
/// range
|
||||||
fn get_slot_consecutive_blobs<'a>(
|
fn get_slot_consecutive_blobs<'a>(
|
||||||
&self,
|
&self,
|
||||||
slot_height: u64,
|
slot: u64,
|
||||||
prev_inserted_blob_datas: &HashMap<(u64, u64), &'a [u8]>,
|
prev_inserted_blob_datas: &HashMap<(u64, u64), &'a [u8]>,
|
||||||
mut current_index: u64,
|
mut current_index: u64,
|
||||||
max_blobs: Option<u64>,
|
max_blobs: Option<u64>,
|
||||||
|
@ -1196,13 +1177,9 @@ impl Blocktree {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
// Try to find the next blob we're looking for in the prev_inserted_blob_datas
|
// Try to find the next blob we're looking for in the prev_inserted_blob_datas
|
||||||
if let Some(prev_blob_data) =
|
if let Some(prev_blob_data) = prev_inserted_blob_datas.get(&(slot, current_index)) {
|
||||||
prev_inserted_blob_datas.get(&(slot_height, current_index))
|
|
||||||
{
|
|
||||||
blobs.push(Cow::Borrowed(*prev_blob_data));
|
blobs.push(Cow::Borrowed(*prev_blob_data));
|
||||||
} else if let Some(blob_data) =
|
} else if let Some(blob_data) = self.data_cf.get_by_slot_index(slot, current_index)? {
|
||||||
self.data_cf.get_by_slot_index(slot_height, current_index)?
|
|
||||||
{
|
|
||||||
// Try to find the next blob we're looking for in the database
|
// Try to find the next blob we're looking for in the database
|
||||||
blobs.push(Cow::Owned(blob_data));
|
blobs.push(Cow::Owned(blob_data));
|
||||||
} else {
|
} else {
|
||||||
|
@ -1791,22 +1768,20 @@ pub mod tests {
|
||||||
// Write entries
|
// Write entries
|
||||||
let num_slots = 5 as u64;
|
let num_slots = 5 as u64;
|
||||||
let mut index = 0;
|
let mut index = 0;
|
||||||
for slot_height in 0..num_slots {
|
for slot in 0..num_slots {
|
||||||
let entries = make_tiny_test_entries(slot_height as usize + 1);
|
let entries = make_tiny_test_entries(slot as usize + 1);
|
||||||
let last_entry = entries.last().unwrap().clone();
|
let last_entry = entries.last().unwrap().clone();
|
||||||
let mut blobs = entries.clone().to_blobs();
|
let mut blobs = entries.clone().to_blobs();
|
||||||
for b in blobs.iter_mut() {
|
for b in blobs.iter_mut() {
|
||||||
b.set_index(index);
|
b.set_index(index);
|
||||||
b.set_slot(slot_height as u64);
|
b.set_slot(slot as u64);
|
||||||
index += 1;
|
index += 1;
|
||||||
}
|
}
|
||||||
blocktree
|
blocktree
|
||||||
.write_blobs(&blobs)
|
.write_blobs(&blobs)
|
||||||
.expect("Expected successful write of blobs");
|
.expect("Expected successful write of blobs");
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
blocktree
|
blocktree.get_slot_entries(slot, index - 1, None).unwrap(),
|
||||||
.get_slot_entries(slot_height, index - 1, None)
|
|
||||||
.unwrap(),
|
|
||||||
vec![last_entry],
|
vec![last_entry],
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -2003,10 +1978,9 @@ pub mod tests {
|
||||||
let num_slots = entries_per_slot;
|
let num_slots = entries_per_slot;
|
||||||
let mut blobs: Vec<Blob> = vec![];
|
let mut blobs: Vec<Blob> = vec![];
|
||||||
let mut missing_blobs = vec![];
|
let mut missing_blobs = vec![];
|
||||||
for slot_height in 1..num_slots + 1 {
|
for slot in 1..num_slots + 1 {
|
||||||
let (mut slot_blobs, _) =
|
let (mut slot_blobs, _) = make_slot_entries(slot, slot - 1, entries_per_slot);
|
||||||
make_slot_entries(slot_height, slot_height - 1, entries_per_slot);
|
let missing_blob = slot_blobs.remove(slot as usize - 1);
|
||||||
let missing_blob = slot_blobs.remove(slot_height as usize - 1);
|
|
||||||
blobs.extend(slot_blobs);
|
blobs.extend(slot_blobs);
|
||||||
missing_blobs.push(missing_blob);
|
missing_blobs.push(missing_blob);
|
||||||
}
|
}
|
||||||
|
@ -2018,8 +1992,8 @@ pub mod tests {
|
||||||
// Insert a blob for each slot that doesn't make a consecutive block, we
|
// Insert a blob for each slot that doesn't make a consecutive block, we
|
||||||
// should get no updates
|
// should get no updates
|
||||||
let blobs: Vec<_> = (1..num_slots + 1)
|
let blobs: Vec<_> = (1..num_slots + 1)
|
||||||
.flat_map(|slot_height| {
|
.flat_map(|slot| {
|
||||||
let (mut blob, _) = make_slot_entries(slot_height, slot_height - 1, 1);
|
let (mut blob, _) = make_slot_entries(slot, slot - 1, 1);
|
||||||
blob[0].set_index(2 * num_slots as u64);
|
blob[0].set_index(2 * num_slots as u64);
|
||||||
blob
|
blob
|
||||||
})
|
})
|
||||||
|
@ -2124,17 +2098,17 @@ pub mod tests {
|
||||||
// Separate every other slot into two separate vectors
|
// Separate every other slot into two separate vectors
|
||||||
let mut slots = vec![];
|
let mut slots = vec![];
|
||||||
let mut missing_slots = vec![];
|
let mut missing_slots = vec![];
|
||||||
for slot_height in 0..num_slots {
|
for slot in 0..num_slots {
|
||||||
let parent_slot = {
|
let parent_slot = {
|
||||||
if slot_height == 0 {
|
if slot == 0 {
|
||||||
0
|
0
|
||||||
} else {
|
} else {
|
||||||
slot_height - 1
|
slot - 1
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let (slot_blobs, _) = make_slot_entries(slot_height, parent_slot, entries_per_slot);
|
let (slot_blobs, _) = make_slot_entries(slot, parent_slot, entries_per_slot);
|
||||||
|
|
||||||
if slot_height % 2 == 1 {
|
if slot % 2 == 1 {
|
||||||
slots.extend(slot_blobs);
|
slots.extend(slot_blobs);
|
||||||
} else {
|
} else {
|
||||||
missing_slots.extend(slot_blobs);
|
missing_slots.extend(slot_blobs);
|
||||||
|
@ -2205,8 +2179,8 @@ pub mod tests {
|
||||||
let (blobs, _) = make_many_slot_entries(0, num_slots, entries_per_slot);
|
let (blobs, _) = make_many_slot_entries(0, num_slots, entries_per_slot);
|
||||||
|
|
||||||
// Write the blobs such that every 3rd slot has a gap in the beginning
|
// Write the blobs such that every 3rd slot has a gap in the beginning
|
||||||
for (slot_height, slot_ticks) in blobs.chunks(entries_per_slot as usize).enumerate() {
|
for (slot, slot_ticks) in blobs.chunks(entries_per_slot as usize).enumerate() {
|
||||||
if slot_height % 3 == 0 {
|
if slot % 3 == 0 {
|
||||||
blocktree
|
blocktree
|
||||||
.write_blobs(&slot_ticks[1..entries_per_slot as usize])
|
.write_blobs(&slot_ticks[1..entries_per_slot as usize])
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
@ -2296,17 +2270,17 @@ pub mod tests {
|
||||||
|
|
||||||
// Get blobs for the slot
|
// Get blobs for the slot
|
||||||
slots.shuffle(&mut thread_rng());
|
slots.shuffle(&mut thread_rng());
|
||||||
for slot_height in slots {
|
for slot in slots {
|
||||||
// Get blobs for the slot "slot_height"
|
// Get blobs for the slot "slot"
|
||||||
let slot_blobs = &mut blobs[(slot_height * entries_per_slot) as usize
|
let slot_blobs = &mut blobs
|
||||||
..((slot_height + 1) * entries_per_slot) as usize];
|
[(slot * entries_per_slot) as usize..((slot + 1) * entries_per_slot) as usize];
|
||||||
for blob in slot_blobs.iter_mut() {
|
for blob in slot_blobs.iter_mut() {
|
||||||
// Get the parent slot of the slot in the tree
|
// Get the parent slot of the slot in the tree
|
||||||
let slot_parent = {
|
let slot_parent = {
|
||||||
if slot_height == 0 {
|
if slot == 0 {
|
||||||
0
|
0
|
||||||
} else {
|
} else {
|
||||||
(slot_height - 1) / branching_factor
|
(slot - 1) / branching_factor
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
blob.set_parent(slot_parent);
|
blob.set_parent(slot_parent);
|
||||||
|
@ -2318,27 +2292,25 @@ pub mod tests {
|
||||||
// Make sure everything chains correctly
|
// Make sure everything chains correctly
|
||||||
let last_level =
|
let last_level =
|
||||||
(branching_factor.pow(num_tree_levels - 1) - 1) / (branching_factor - 1);
|
(branching_factor.pow(num_tree_levels - 1) - 1) / (branching_factor - 1);
|
||||||
for slot_height in 0..num_slots {
|
for slot in 0..num_slots {
|
||||||
let slot_meta = blocktree.meta(slot_height).unwrap().unwrap();
|
let slot_meta = blocktree.meta(slot).unwrap().unwrap();
|
||||||
assert_eq!(slot_meta.consumed, entries_per_slot);
|
assert_eq!(slot_meta.consumed, entries_per_slot);
|
||||||
assert_eq!(slot_meta.received, entries_per_slot);
|
assert_eq!(slot_meta.received, entries_per_slot);
|
||||||
let slot_parent = {
|
let slot_parent = {
|
||||||
if slot_height == 0 {
|
if slot == 0 {
|
||||||
0
|
0
|
||||||
} else {
|
} else {
|
||||||
(slot_height - 1) / branching_factor
|
(slot - 1) / branching_factor
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
assert_eq!(slot_meta.parent_slot, slot_parent);
|
assert_eq!(slot_meta.parent_slot, slot_parent);
|
||||||
|
|
||||||
let expected_children: HashSet<_> = {
|
let expected_children: HashSet<_> = {
|
||||||
if slot_height >= last_level {
|
if slot >= last_level {
|
||||||
HashSet::new()
|
HashSet::new()
|
||||||
} else {
|
} else {
|
||||||
let first_child_slot =
|
let first_child_slot = min(num_slots - 1, slot * branching_factor + 1);
|
||||||
min(num_slots - 1, slot_height * branching_factor + 1);
|
let last_child_slot = min(num_slots - 1, (slot + 1) * branching_factor);
|
||||||
let last_child_slot =
|
|
||||||
min(num_slots - 1, (slot_height + 1) * branching_factor);
|
|
||||||
(first_child_slot..last_child_slot + 1).collect()
|
(first_child_slot..last_child_slot + 1).collect()
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -2402,17 +2374,17 @@ pub mod tests {
|
||||||
let num_entries = 20 as u64;
|
let num_entries = 20 as u64;
|
||||||
let mut entries = vec![];
|
let mut entries = vec![];
|
||||||
let mut blobs = vec![];
|
let mut blobs = vec![];
|
||||||
for slot_height in 0..num_entries {
|
for slot in 0..num_entries {
|
||||||
let parent_slot = {
|
let parent_slot = {
|
||||||
if slot_height == 0 {
|
if slot == 0 {
|
||||||
0
|
0
|
||||||
} else {
|
} else {
|
||||||
slot_height - 1
|
slot - 1
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let (mut blob, entry) = make_slot_entries(slot_height, parent_slot, 1);
|
let (mut blob, entry) = make_slot_entries(slot, parent_slot, 1);
|
||||||
blob[0].set_index(slot_height);
|
blob[0].set_index(slot);
|
||||||
blobs.extend(blob);
|
blobs.extend(blob);
|
||||||
entries.extend(entry);
|
entries.extend(entry);
|
||||||
}
|
}
|
||||||
|
@ -2451,14 +2423,14 @@ pub mod tests {
|
||||||
|
|
||||||
pub fn entries_to_blobs(
|
pub fn entries_to_blobs(
|
||||||
entries: &Vec<Entry>,
|
entries: &Vec<Entry>,
|
||||||
slot_height: u64,
|
slot: u64,
|
||||||
parent_slot: u64,
|
parent_slot: u64,
|
||||||
is_full_slot: bool,
|
is_full_slot: bool,
|
||||||
) -> Vec<Blob> {
|
) -> Vec<Blob> {
|
||||||
let mut blobs = entries.clone().to_blobs();
|
let mut blobs = entries.clone().to_blobs();
|
||||||
for (i, b) in blobs.iter_mut().enumerate() {
|
for (i, b) in blobs.iter_mut().enumerate() {
|
||||||
b.set_index(i as u64);
|
b.set_index(i as u64);
|
||||||
b.set_slot(slot_height);
|
b.set_slot(slot);
|
||||||
b.set_parent(parent_slot);
|
b.set_parent(parent_slot);
|
||||||
}
|
}
|
||||||
if is_full_slot {
|
if is_full_slot {
|
||||||
|
@ -2468,27 +2440,26 @@ pub mod tests {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn make_slot_entries(
|
pub fn make_slot_entries(
|
||||||
slot_height: u64,
|
slot: u64,
|
||||||
parent_slot: u64,
|
parent_slot: u64,
|
||||||
num_entries: u64,
|
num_entries: u64,
|
||||||
) -> (Vec<Blob>, Vec<Entry>) {
|
) -> (Vec<Blob>, Vec<Entry>) {
|
||||||
let entries = make_tiny_test_entries(num_entries as usize);
|
let entries = make_tiny_test_entries(num_entries as usize);
|
||||||
let blobs = entries_to_blobs(&entries, slot_height, parent_slot, true);
|
let blobs = entries_to_blobs(&entries, slot, parent_slot, true);
|
||||||
(blobs, entries)
|
(blobs, entries)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn make_many_slot_entries(
|
pub fn make_many_slot_entries(
|
||||||
start_slot_height: u64,
|
start_slot: u64,
|
||||||
num_slots: u64,
|
num_slots: u64,
|
||||||
entries_per_slot: u64,
|
entries_per_slot: u64,
|
||||||
) -> (Vec<Blob>, Vec<Entry>) {
|
) -> (Vec<Blob>, Vec<Entry>) {
|
||||||
let mut blobs = vec![];
|
let mut blobs = vec![];
|
||||||
let mut entries = vec![];
|
let mut entries = vec![];
|
||||||
for slot_height in start_slot_height..start_slot_height + num_slots {
|
for slot in start_slot..start_slot + num_slots {
|
||||||
let parent_slot = if slot_height == 0 { 0 } else { slot_height - 1 };
|
let parent_slot = if slot == 0 { 0 } else { slot - 1 };
|
||||||
|
|
||||||
let (slot_blobs, slot_entries) =
|
let (slot_blobs, slot_entries) = make_slot_entries(slot, parent_slot, entries_per_slot);
|
||||||
make_slot_entries(slot_height, parent_slot, entries_per_slot);
|
|
||||||
blobs.extend(slot_blobs);
|
blobs.extend(slot_blobs);
|
||||||
entries.extend(slot_entries);
|
entries.extend(slot_entries);
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,7 +52,6 @@ impl Broadcast {
|
||||||
broadcast_table.truncate(DATA_PLANE_FANOUT);
|
broadcast_table.truncate(DATA_PLANE_FANOUT);
|
||||||
inc_new_counter_info!("broadcast_service-num_peers", broadcast_table.len() + 1);
|
inc_new_counter_info!("broadcast_service-num_peers", broadcast_table.len() + 1);
|
||||||
|
|
||||||
let slot_height = bank.slot();
|
|
||||||
let max_tick_height = (bank.slot() + 1) * bank.ticks_per_slot() - 1;
|
let max_tick_height = (bank.slot() + 1) * bank.ticks_per_slot() - 1;
|
||||||
// TODO: Fix BankingStage/BroadcastStage to operate on `slot` directly instead of
|
// TODO: Fix BankingStage/BroadcastStage to operate on `slot` directly instead of
|
||||||
// `max_tick_height`
|
// `max_tick_height`
|
||||||
|
@ -91,8 +90,7 @@ impl Broadcast {
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
// TODO: blob_index should be slot-relative...
|
index_blobs(&blobs, &self.id, &mut blob_index, bank.slot());
|
||||||
index_blobs(&blobs, &self.id, &mut blob_index, slot_height);
|
|
||||||
let parent = bank.parents().first().map(|bank| bank.slot()).unwrap_or(0);
|
let parent = bank.parents().first().map(|bank| bank.slot()).unwrap_or(0);
|
||||||
for b in blobs.iter() {
|
for b in blobs.iter() {
|
||||||
b.write().unwrap().set_parent(parent);
|
b.write().unwrap().set_parent(parent);
|
||||||
|
|
|
@ -720,26 +720,25 @@ impl ClusterInfo {
|
||||||
orders
|
orders
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn window_index_request_bytes(&self, slot_height: u64, blob_index: u64) -> Result<Vec<u8>> {
|
pub fn window_index_request_bytes(&self, slot: u64, blob_index: u64) -> Result<Vec<u8>> {
|
||||||
let req = Protocol::RequestWindowIndex(self.my_data().clone(), slot_height, blob_index);
|
let req = Protocol::RequestWindowIndex(self.my_data().clone(), slot, blob_index);
|
||||||
let out = serialize(&req)?;
|
let out = serialize(&req)?;
|
||||||
Ok(out)
|
Ok(out)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn window_highest_index_request_bytes(
|
pub fn window_highest_index_request_bytes(
|
||||||
&self,
|
&self,
|
||||||
slot_height: u64,
|
slot: u64,
|
||||||
blob_index: u64,
|
blob_index: u64,
|
||||||
) -> Result<Vec<u8>> {
|
) -> Result<Vec<u8>> {
|
||||||
let req =
|
let req = Protocol::RequestHighestWindowIndex(self.my_data().clone(), slot, blob_index);
|
||||||
Protocol::RequestHighestWindowIndex(self.my_data().clone(), slot_height, blob_index);
|
|
||||||
let out = serialize(&req)?;
|
let out = serialize(&req)?;
|
||||||
Ok(out)
|
Ok(out)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn window_index_request(
|
pub fn window_index_request(
|
||||||
&self,
|
&self,
|
||||||
slot_height: u64,
|
slot: u64,
|
||||||
blob_index: u64,
|
blob_index: u64,
|
||||||
get_highest: bool,
|
get_highest: bool,
|
||||||
) -> Result<(SocketAddr, Vec<u8>)> {
|
) -> Result<(SocketAddr, Vec<u8>)> {
|
||||||
|
@ -753,9 +752,9 @@ impl ClusterInfo {
|
||||||
let addr = valid[n].gossip; // send the request to the peer's gossip port
|
let addr = valid[n].gossip; // send the request to the peer's gossip port
|
||||||
let out = {
|
let out = {
|
||||||
if get_highest {
|
if get_highest {
|
||||||
self.window_highest_index_request_bytes(slot_height, blob_index)?
|
self.window_highest_index_request_bytes(slot, blob_index)?
|
||||||
} else {
|
} else {
|
||||||
self.window_index_request_bytes(slot_height, blob_index)?
|
self.window_index_request_bytes(slot, blob_index)?
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -909,12 +908,12 @@ impl ClusterInfo {
|
||||||
from_addr: &SocketAddr,
|
from_addr: &SocketAddr,
|
||||||
blocktree: Option<&Arc<Blocktree>>,
|
blocktree: Option<&Arc<Blocktree>>,
|
||||||
me: &NodeInfo,
|
me: &NodeInfo,
|
||||||
slot_height: u64,
|
slot: u64,
|
||||||
blob_index: u64,
|
blob_index: u64,
|
||||||
) -> Vec<SharedBlob> {
|
) -> Vec<SharedBlob> {
|
||||||
if let Some(blocktree) = blocktree {
|
if let Some(blocktree) = blocktree {
|
||||||
// Try to find the requested index in one of the slots
|
// Try to find the requested index in one of the slots
|
||||||
let blob = blocktree.get_data_blob(slot_height, blob_index);
|
let blob = blocktree.get_data_blob(slot, blob_index);
|
||||||
|
|
||||||
if let Ok(Some(mut blob)) = blob {
|
if let Ok(Some(mut blob)) = blob {
|
||||||
inc_new_counter_info!("cluster_info-window-request-ledger", 1);
|
inc_new_counter_info!("cluster_info-window-request-ledger", 1);
|
||||||
|
@ -929,7 +928,7 @@ impl ClusterInfo {
|
||||||
"{}: failed RequestWindowIndex {} {} {}",
|
"{}: failed RequestWindowIndex {} {} {}",
|
||||||
me.id,
|
me.id,
|
||||||
from.id,
|
from.id,
|
||||||
slot_height,
|
slot,
|
||||||
blob_index,
|
blob_index,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -939,17 +938,17 @@ impl ClusterInfo {
|
||||||
fn run_highest_window_request(
|
fn run_highest_window_request(
|
||||||
from_addr: &SocketAddr,
|
from_addr: &SocketAddr,
|
||||||
blocktree: Option<&Arc<Blocktree>>,
|
blocktree: Option<&Arc<Blocktree>>,
|
||||||
slot_height: u64,
|
slot: u64,
|
||||||
highest_index: u64,
|
highest_index: u64,
|
||||||
) -> Vec<SharedBlob> {
|
) -> Vec<SharedBlob> {
|
||||||
if let Some(blocktree) = blocktree {
|
if let Some(blocktree) = blocktree {
|
||||||
// Try to find the requested index in one of the slots
|
// Try to find the requested index in one of the slots
|
||||||
let meta = blocktree.meta(slot_height);
|
let meta = blocktree.meta(slot);
|
||||||
|
|
||||||
if let Ok(Some(meta)) = meta {
|
if let Ok(Some(meta)) = meta {
|
||||||
if meta.received > highest_index {
|
if meta.received > highest_index {
|
||||||
// meta.received must be at least 1 by this point
|
// meta.received must be at least 1 by this point
|
||||||
let blob = blocktree.get_data_blob(slot_height, meta.received - 1);
|
let blob = blocktree.get_data_blob(slot, meta.received - 1);
|
||||||
|
|
||||||
if let Ok(Some(mut blob)) = blob {
|
if let Ok(Some(mut blob)) = blob {
|
||||||
blob.meta.set_addr(from_addr);
|
blob.meta.set_addr(from_addr);
|
||||||
|
@ -1082,7 +1081,7 @@ impl ClusterInfo {
|
||||||
me: &Arc<RwLock<Self>>,
|
me: &Arc<RwLock<Self>>,
|
||||||
from: &ContactInfo,
|
from: &ContactInfo,
|
||||||
blocktree: Option<&Arc<Blocktree>>,
|
blocktree: Option<&Arc<Blocktree>>,
|
||||||
slot_height: u64,
|
slot: u64,
|
||||||
blob_index: u64,
|
blob_index: u64,
|
||||||
from_addr: &SocketAddr,
|
from_addr: &SocketAddr,
|
||||||
is_get_highest: bool,
|
is_get_highest: bool,
|
||||||
|
@ -1097,7 +1096,7 @@ impl ClusterInfo {
|
||||||
if from.id == me.read().unwrap().gossip.id {
|
if from.id == me.read().unwrap().gossip.id {
|
||||||
warn!(
|
warn!(
|
||||||
"{}: Ignored received RequestWindowIndex from ME {} {} {} ",
|
"{}: Ignored received RequestWindowIndex from ME {} {} {} ",
|
||||||
self_id, from.id, slot_height, blob_index,
|
self_id, from.id, slot, blob_index,
|
||||||
);
|
);
|
||||||
inc_new_counter_info!("cluster_info-window-request-address-eq", 1);
|
inc_new_counter_info!("cluster_info-window-request-address-eq", 1);
|
||||||
return vec![];
|
return vec![];
|
||||||
|
@ -1107,30 +1106,23 @@ impl ClusterInfo {
|
||||||
let my_info = me.read().unwrap().my_data().clone();
|
let my_info = me.read().unwrap().my_data().clone();
|
||||||
inc_new_counter_info!("cluster_info-window-request-recv", 1);
|
inc_new_counter_info!("cluster_info-window-request-recv", 1);
|
||||||
trace!(
|
trace!(
|
||||||
"{}: received RequestWindowIndex from: {} slot_height: {}, blob_index: {}",
|
"{}: received RequestWindowIndex from: {} slot: {}, blob_index: {}",
|
||||||
self_id,
|
self_id,
|
||||||
from.id,
|
from.id,
|
||||||
slot_height,
|
slot,
|
||||||
blob_index,
|
blob_index,
|
||||||
);
|
);
|
||||||
let res = {
|
let res = {
|
||||||
if is_get_highest {
|
if is_get_highest {
|
||||||
Self::run_highest_window_request(&from_addr, blocktree, slot_height, blob_index)
|
Self::run_highest_window_request(&from_addr, blocktree, slot, blob_index)
|
||||||
} else {
|
} else {
|
||||||
Self::run_window_request(
|
Self::run_window_request(&from, &from_addr, blocktree, &my_info, slot, blob_index)
|
||||||
&from,
|
|
||||||
&from_addr,
|
|
||||||
blocktree,
|
|
||||||
&my_info,
|
|
||||||
slot_height,
|
|
||||||
blob_index,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
report_time_spent(
|
report_time_spent(
|
||||||
"RequestWindowIndex",
|
"RequestWindowIndex",
|
||||||
&now.elapsed(),
|
&now.elapsed(),
|
||||||
&format!("slot_height {}, blob_index: {}", slot_height, blob_index),
|
&format!("slot {}, blob_index: {}", slot, blob_index),
|
||||||
);
|
);
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
@ -1193,23 +1185,17 @@ impl ClusterInfo {
|
||||||
}
|
}
|
||||||
vec![]
|
vec![]
|
||||||
}
|
}
|
||||||
Protocol::RequestWindowIndex(from, slot_height, blob_index) => {
|
Protocol::RequestWindowIndex(from, slot, blob_index) => {
|
||||||
Self::handle_request_window_index(
|
Self::handle_request_window_index(
|
||||||
me,
|
me, &from, blocktree, slot, blob_index, from_addr, false,
|
||||||
&from,
|
|
||||||
blocktree,
|
|
||||||
slot_height,
|
|
||||||
blob_index,
|
|
||||||
from_addr,
|
|
||||||
false,
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
Protocol::RequestHighestWindowIndex(from, slot_height, highest_index) => {
|
Protocol::RequestHighestWindowIndex(from, slot, highest_index) => {
|
||||||
Self::handle_request_window_index(
|
Self::handle_request_window_index(
|
||||||
me,
|
me,
|
||||||
&from,
|
&from,
|
||||||
blocktree,
|
blocktree,
|
||||||
slot_height,
|
slot,
|
||||||
highest_index,
|
highest_index,
|
||||||
from_addr,
|
from_addr,
|
||||||
true,
|
true,
|
||||||
|
|
|
@ -360,10 +360,10 @@ mod test {
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
// For each slot, find all missing indexes in the range [0, num_entries_per_slot * nth]
|
// For each slot, find all missing indexes in the range [0, num_entries_per_slot * nth]
|
||||||
for slot_height in 0..num_slots {
|
for slot in 0..num_slots {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
blocktree.find_missing_data_indexes(
|
blocktree.find_missing_data_indexes(
|
||||||
slot_height as u64,
|
slot as u64,
|
||||||
0,
|
0,
|
||||||
(num_entries_per_slot * nth) as u64,
|
(num_entries_per_slot * nth) as u64,
|
||||||
num_entries_per_slot * nth as usize
|
num_entries_per_slot * nth as usize
|
||||||
|
@ -373,10 +373,10 @@ mod test {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test with a limit on the number of returned entries
|
// Test with a limit on the number of returned entries
|
||||||
for slot_height in 0..num_slots {
|
for slot in 0..num_slots {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
blocktree.find_missing_data_indexes(
|
blocktree.find_missing_data_indexes(
|
||||||
slot_height as u64,
|
slot as u64,
|
||||||
0,
|
0,
|
||||||
(num_entries_per_slot * nth) as u64,
|
(num_entries_per_slot * nth) as u64,
|
||||||
num_entries_per_slot * (nth - 1)
|
num_entries_per_slot * (nth - 1)
|
||||||
|
@ -392,10 +392,10 @@ mod test {
|
||||||
expected.extend(extra_entries);
|
expected.extend(extra_entries);
|
||||||
|
|
||||||
// For each slot, find all missing indexes in the range [0, num_entries_per_slot * nth]
|
// For each slot, find all missing indexes in the range [0, num_entries_per_slot * nth]
|
||||||
for slot_height in 0..num_slots {
|
for slot in 0..num_slots {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
blocktree.find_missing_data_indexes(
|
blocktree.find_missing_data_indexes(
|
||||||
slot_height as u64,
|
slot as u64,
|
||||||
0,
|
0,
|
||||||
(num_entries_per_slot * (nth + 1)) as u64,
|
(num_entries_per_slot * (nth + 1)) as u64,
|
||||||
num_entries_per_slot * (nth + 1),
|
num_entries_per_slot * (nth + 1),
|
||||||
|
@ -441,8 +441,8 @@ mod test {
|
||||||
// Setup the window
|
// Setup the window
|
||||||
let offset = 0;
|
let offset = 0;
|
||||||
let num_blobs = NUM_DATA + 2;
|
let num_blobs = NUM_DATA + 2;
|
||||||
let slot_height = 0;
|
let slot = 0;
|
||||||
let mut window = setup_window_ledger(offset, num_blobs, false, slot_height);
|
let mut window = setup_window_ledger(offset, num_blobs, false, slot);
|
||||||
let end_index = (offset + num_blobs) % window.len();
|
let end_index = (offset + num_blobs) % window.len();
|
||||||
|
|
||||||
// Test erasing a data block and an erasure block
|
// Test erasing a data block and an erasure block
|
||||||
|
@ -466,7 +466,7 @@ mod test {
|
||||||
{
|
{
|
||||||
let data_blobs: Vec<_> = window[erased_index..end_index]
|
let data_blobs: Vec<_> = window[erased_index..end_index]
|
||||||
.iter()
|
.iter()
|
||||||
.map(|slot| slot.data.clone().unwrap())
|
.map(|entry| entry.data.clone().unwrap())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let locks: Vec<_> = data_blobs.iter().map(|blob| blob.read().unwrap()).collect();
|
let locks: Vec<_> = data_blobs.iter().map(|blob| blob.read().unwrap()).collect();
|
||||||
|
@ -490,7 +490,7 @@ mod test {
|
||||||
let erased_coding_l = erased_coding.read().unwrap();
|
let erased_coding_l = erased_coding.read().unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
&blocktree
|
&blocktree
|
||||||
.get_coding_blob_bytes(slot_height, erased_index as u64)
|
.get_coding_blob_bytes(slot, erased_index as u64)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap()[BLOB_HEADER_SIZE..],
|
.unwrap()[BLOB_HEADER_SIZE..],
|
||||||
&erased_coding_l.data()[..erased_coding_l.size() as usize],
|
&erased_coding_l.data()[..erased_coding_l.size() as usize],
|
||||||
|
|
|
@ -331,7 +331,7 @@ pub fn make_active_set_entries(
|
||||||
active_keypair: &Arc<Keypair>,
|
active_keypair: &Arc<Keypair>,
|
||||||
token_source: &Keypair,
|
token_source: &Keypair,
|
||||||
stake: u64,
|
stake: u64,
|
||||||
slot_height_to_vote_on: u64,
|
slot_to_vote_on: u64,
|
||||||
blockhash: &Hash,
|
blockhash: &Hash,
|
||||||
num_ending_ticks: u64,
|
num_ending_ticks: u64,
|
||||||
) -> (Vec<Entry>, Keypair) {
|
) -> (Vec<Entry>, Keypair) {
|
||||||
|
@ -360,7 +360,7 @@ pub fn make_active_set_entries(
|
||||||
let new_vote_account_entry = next_entry_mut(&mut last_entry_hash, 1, vec![new_vote_account_tx]);
|
let new_vote_account_entry = next_entry_mut(&mut last_entry_hash, 1, vec![new_vote_account_tx]);
|
||||||
|
|
||||||
// 3) Create vote entry
|
// 3) Create vote entry
|
||||||
let vote_tx = VoteTransaction::new_vote(&voting_keypair, slot_height_to_vote_on, *blockhash, 0);
|
let vote_tx = VoteTransaction::new_vote(&voting_keypair, slot_to_vote_on, *blockhash, 0);
|
||||||
let vote_entry = next_entry_mut(&mut last_entry_hash, 1, vec![vote_tx]);
|
let vote_entry = next_entry_mut(&mut last_entry_hash, 1, vec![vote_tx]);
|
||||||
|
|
||||||
// 4) Create `num_ending_ticks` empty ticks
|
// 4) Create `num_ending_ticks` empty ticks
|
||||||
|
|
|
@ -37,7 +37,7 @@ impl LeaderConfirmationService {
|
||||||
if let Some(stake_and_state) = vote_state
|
if let Some(stake_and_state) = vote_state
|
||||||
.votes
|
.votes
|
||||||
.back()
|
.back()
|
||||||
.map(|vote| (vote.slot_height, account.tokens))
|
.map(|vote| (vote.slot, account.tokens))
|
||||||
{
|
{
|
||||||
slots_and_stakes.push(stake_and_state);
|
slots_and_stakes.push(stake_and_state);
|
||||||
}
|
}
|
||||||
|
@ -116,6 +116,7 @@ mod tests {
|
||||||
use bincode::serialize;
|
use bincode::serialize;
|
||||||
use solana_sdk::genesis_block::GenesisBlock;
|
use solana_sdk::genesis_block::GenesisBlock;
|
||||||
use solana_sdk::hash::hash;
|
use solana_sdk::hash::hash;
|
||||||
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
use solana_vote_api::vote_transaction::VoteTransaction;
|
use solana_vote_api::vote_transaction::VoteTransaction;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
@ -125,14 +126,22 @@ mod tests {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
|
|
||||||
let (genesis_block, mint_keypair) = GenesisBlock::new(1234);
|
let (genesis_block, mint_keypair) = GenesisBlock::new(1234);
|
||||||
let bank = Arc::new(Bank::new(&genesis_block));
|
let mut tick_hash = genesis_block.hash();
|
||||||
|
|
||||||
|
let mut bank = Arc::new(Bank::new(&genesis_block));
|
||||||
|
|
||||||
// Move the bank up 10 slots
|
// Move the bank up 10 slots
|
||||||
let mut tick_hash = genesis_block.hash();
|
for slot in 1..=10 {
|
||||||
while bank.slot_height() < 10 {
|
let max_tick_height = slot * bank.ticks_per_slot() - 1;
|
||||||
tick_hash = hash(&serialize(&tick_hash).unwrap());
|
|
||||||
bank.register_tick(&tick_hash);
|
while bank.tick_height() != max_tick_height {
|
||||||
|
tick_hash = hash(&serialize(&tick_hash).unwrap());
|
||||||
|
bank.register_tick(&tick_hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
bank = Arc::new(Bank::new_from_parent(&bank, Pubkey::default(), slot));
|
||||||
}
|
}
|
||||||
|
|
||||||
let blockhash = bank.last_blockhash();
|
let blockhash = bank.last_blockhash();
|
||||||
|
|
||||||
// Create a total of 10 vote accounts, each will have a balance of 1 (after giving 1 to
|
// Create a total of 10 vote accounts, each will have a balance of 1 (after giving 1 to
|
||||||
|
|
|
@ -62,7 +62,7 @@ impl RepairService {
|
||||||
let reqs: Vec<_> = repairs
|
let reqs: Vec<_> = repairs
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter_map(|repair_request| {
|
.filter_map(|repair_request| {
|
||||||
let (slot_height, blob_index, is_highest_request) = {
|
let (slot, blob_index, is_highest_request) = {
|
||||||
match repair_request {
|
match repair_request {
|
||||||
RepairType::Blob(s, i) => (s, i, false),
|
RepairType::Blob(s, i) => (s, i, false),
|
||||||
RepairType::HighestBlob(s, i) => (s, i, true),
|
RepairType::HighestBlob(s, i) => (s, i, true),
|
||||||
|
@ -71,20 +71,17 @@ impl RepairService {
|
||||||
cluster_info
|
cluster_info
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.window_index_request(slot_height, blob_index, is_highest_request)
|
.window_index_request(slot, blob_index, is_highest_request)
|
||||||
.map(|result| (result, slot_height, blob_index))
|
.map(|result| (result, slot, blob_index))
|
||||||
.ok()
|
.ok()
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for ((to, req), slot_height, blob_index) in reqs {
|
for ((to, req), slot, blob_index) in reqs {
|
||||||
if let Ok(local_addr) = repair_socket.local_addr() {
|
if let Ok(local_addr) = repair_socket.local_addr() {
|
||||||
submit(
|
submit(
|
||||||
influxdb::Point::new("repair_service")
|
influxdb::Point::new("repair_service")
|
||||||
.add_field(
|
.add_field("repair_slot", influxdb::Value::Integer(slot as i64))
|
||||||
"repair_slot",
|
|
||||||
influxdb::Value::Integer(slot_height as i64),
|
|
||||||
)
|
|
||||||
.to_owned()
|
.to_owned()
|
||||||
.add_field(
|
.add_field(
|
||||||
"repair_blob",
|
"repair_blob",
|
||||||
|
@ -127,24 +124,24 @@ impl RepairService {
|
||||||
|
|
||||||
fn process_slot(
|
fn process_slot(
|
||||||
blocktree: &Blocktree,
|
blocktree: &Blocktree,
|
||||||
slot_height: u64,
|
slot: u64,
|
||||||
slot: &SlotMeta,
|
slot_meta: &SlotMeta,
|
||||||
max_repairs: usize,
|
max_repairs: usize,
|
||||||
) -> Vec<RepairType> {
|
) -> Vec<RepairType> {
|
||||||
if slot.is_full() {
|
if slot_meta.is_full() {
|
||||||
vec![]
|
vec![]
|
||||||
} else if slot.consumed == slot.received {
|
} else if slot_meta.consumed == slot_meta.received {
|
||||||
vec![RepairType::HighestBlob(slot_height, slot.received)]
|
vec![RepairType::HighestBlob(slot, slot_meta.received)]
|
||||||
} else {
|
} else {
|
||||||
let reqs = blocktree.find_missing_data_indexes(
|
let reqs = blocktree.find_missing_data_indexes(
|
||||||
slot_height,
|
slot,
|
||||||
slot.consumed,
|
slot_meta.consumed,
|
||||||
slot.received,
|
slot_meta.received,
|
||||||
max_repairs,
|
max_repairs,
|
||||||
);
|
);
|
||||||
|
|
||||||
reqs.into_iter()
|
reqs.into_iter()
|
||||||
.map(|i| RepairType::Blob(slot_height, i))
|
.map(|i| RepairType::Blob(slot, i))
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -156,25 +153,25 @@ impl RepairService {
|
||||||
) -> Result<(Vec<RepairType>)> {
|
) -> Result<(Vec<RepairType>)> {
|
||||||
// Slot height and blob indexes for blobs we want to repair
|
// Slot height and blob indexes for blobs we want to repair
|
||||||
let mut repairs: Vec<RepairType> = vec![];
|
let mut repairs: Vec<RepairType> = vec![];
|
||||||
let mut current_slot_height = Some(0);
|
let mut current_slot = Some(0);
|
||||||
while repairs.len() < max_repairs && current_slot_height.is_some() {
|
while repairs.len() < max_repairs && current_slot.is_some() {
|
||||||
if current_slot_height.unwrap() > repair_info.max_slot {
|
if current_slot.unwrap() > repair_info.max_slot {
|
||||||
repair_info.repair_tries = 0;
|
repair_info.repair_tries = 0;
|
||||||
repair_info.max_slot = current_slot_height.unwrap();
|
repair_info.max_slot = current_slot.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
let slot = blocktree.meta(current_slot_height.unwrap())?;
|
let slot = blocktree.meta(current_slot.unwrap())?;
|
||||||
if slot.is_some() {
|
if slot.is_some() {
|
||||||
let slot = slot.unwrap();
|
let slot = slot.unwrap();
|
||||||
let new_repairs = Self::process_slot(
|
let new_repairs = Self::process_slot(
|
||||||
blocktree,
|
blocktree,
|
||||||
current_slot_height.unwrap(),
|
current_slot.unwrap(),
|
||||||
&slot,
|
&slot,
|
||||||
max_repairs - repairs.len(),
|
max_repairs - repairs.len(),
|
||||||
);
|
);
|
||||||
repairs.extend(new_repairs);
|
repairs.extend(new_repairs);
|
||||||
}
|
}
|
||||||
current_slot_height = blocktree.get_next_slot(current_slot_height.unwrap())?;
|
current_slot = blocktree.get_next_slot(current_slot.unwrap())?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only increment repair_tries if the ledger contains every blob for every slot
|
// Only increment repair_tries if the ledger contains every blob for every slot
|
||||||
|
@ -308,10 +305,10 @@ mod test {
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let expected: Vec<RepairType> = (0..num_slots)
|
let expected: Vec<RepairType> = (0..num_slots)
|
||||||
.flat_map(|slot_height| {
|
.flat_map(|slot| {
|
||||||
missing_indexes_per_slot
|
missing_indexes_per_slot
|
||||||
.iter()
|
.iter()
|
||||||
.map(move |blob_index| RepairType::Blob(slot_height as u64, *blob_index))
|
.map(move |blob_index| RepairType::Blob(slot as u64, *blob_index))
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
|
|
|
@ -114,9 +114,9 @@ pub mod tests {
|
||||||
bank.process_transaction(&tx).unwrap();
|
bank.process_transaction(&tx).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn push_vote<T: KeypairUtil>(voting_keypair: &T, bank: &Bank, slot_height: u64) {
|
pub fn push_vote<T: KeypairUtil>(voting_keypair: &T, bank: &Bank, slot: u64) {
|
||||||
let blockhash = bank.last_blockhash();
|
let blockhash = bank.last_blockhash();
|
||||||
let tx = VoteTransaction::new_vote(voting_keypair, slot_height, blockhash, 0);
|
let tx = VoteTransaction::new_vote(voting_keypair, slot, blockhash, 0);
|
||||||
bank.process_transaction(&tx).unwrap();
|
bank.process_transaction(&tx).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -125,9 +125,9 @@ pub mod tests {
|
||||||
voting_keypair: &T,
|
voting_keypair: &T,
|
||||||
bank: &Bank,
|
bank: &Bank,
|
||||||
num_tokens: u64,
|
num_tokens: u64,
|
||||||
slot_height: u64,
|
slot: u64,
|
||||||
) {
|
) {
|
||||||
new_vote_account(from_keypair, &voting_keypair.pubkey(), bank, num_tokens);
|
new_vote_account(from_keypair, &voting_keypair.pubkey(), bank, num_tokens);
|
||||||
push_vote(voting_keypair, bank, slot_height);
|
push_vote(voting_keypair, bank, slot);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,13 +6,13 @@ use solana_sdk::transaction_builder::BuilderInstruction;
|
||||||
#[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Clone)]
|
#[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||||
pub struct Vote {
|
pub struct Vote {
|
||||||
// TODO: add signature of the state here as well
|
// TODO: add signature of the state here as well
|
||||||
/// A vote for height slot_height
|
/// A vote for height slot
|
||||||
pub slot_height: u64,
|
pub slot: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Vote {
|
impl Vote {
|
||||||
pub fn new(slot_height: u64) -> Self {
|
pub fn new(slot: u64) -> Self {
|
||||||
Self { slot_height }
|
Self { slot }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,14 +17,14 @@ pub const INITIAL_LOCKOUT: usize = 2;
|
||||||
|
|
||||||
#[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Clone)]
|
#[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||||
pub struct Lockout {
|
pub struct Lockout {
|
||||||
pub slot_height: u64,
|
pub slot: u64,
|
||||||
pub confirmation_count: u32,
|
pub confirmation_count: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Lockout {
|
impl Lockout {
|
||||||
pub fn new(vote: &Vote) -> Self {
|
pub fn new(vote: &Vote) -> Self {
|
||||||
Self {
|
Self {
|
||||||
slot_height: vote.slot_height,
|
slot: vote.slot,
|
||||||
confirmation_count: 1,
|
confirmation_count: 1,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -36,8 +36,8 @@ impl Lockout {
|
||||||
|
|
||||||
// The slot height at which this vote expires (cannot vote for any slot
|
// The slot height at which this vote expires (cannot vote for any slot
|
||||||
// less than this)
|
// less than this)
|
||||||
pub fn expiration_slot_height(&self) -> u64 {
|
pub fn expiration_slot(&self) -> u64 {
|
||||||
self.slot_height + self.lockout()
|
self.slot + self.lockout()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,7 +87,7 @@ impl VoteState {
|
||||||
if self
|
if self
|
||||||
.votes
|
.votes
|
||||||
.back()
|
.back()
|
||||||
.map_or(false, |old_vote| old_vote.slot_height >= vote.slot_height)
|
.map_or(false, |old_vote| old_vote.slot >= vote.slot)
|
||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -97,11 +97,11 @@ impl VoteState {
|
||||||
// TODO: Integrity checks
|
// TODO: Integrity checks
|
||||||
// Verify the vote's bank hash matches what is expected
|
// Verify the vote's bank hash matches what is expected
|
||||||
|
|
||||||
self.pop_expired_votes(vote.slot_height);
|
self.pop_expired_votes(vote.slot);
|
||||||
// Once the stack is full, pop the oldest vote and distribute rewards
|
// Once the stack is full, pop the oldest vote and distribute rewards
|
||||||
if self.votes.len() == MAX_LOCKOUT_HISTORY {
|
if self.votes.len() == MAX_LOCKOUT_HISTORY {
|
||||||
let vote = self.votes.pop_front().unwrap();
|
let vote = self.votes.pop_front().unwrap();
|
||||||
self.root_slot = Some(vote.slot_height);
|
self.root_slot = Some(vote.slot);
|
||||||
self.credits += 1;
|
self.credits += 1;
|
||||||
}
|
}
|
||||||
self.votes.push_back(vote);
|
self.votes.push_back(vote);
|
||||||
|
@ -119,12 +119,12 @@ impl VoteState {
|
||||||
self.credits = 0;
|
self.credits = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pop_expired_votes(&mut self, slot_height: u64) {
|
fn pop_expired_votes(&mut self, slot: u64) {
|
||||||
loop {
|
loop {
|
||||||
if self
|
if self
|
||||||
.votes
|
.votes
|
||||||
.back()
|
.back()
|
||||||
.map_or(false, |v| v.expiration_slot_height() < slot_height)
|
.map_or(false, |v| v.expiration_slot() < slot)
|
||||||
{
|
{
|
||||||
self.votes.pop_back();
|
self.votes.pop_back();
|
||||||
} else {
|
} else {
|
||||||
|
@ -357,14 +357,14 @@ mod tests {
|
||||||
// One more vote that confirms the entire stack,
|
// One more vote that confirms the entire stack,
|
||||||
// the root_slot should change to the
|
// the root_slot should change to the
|
||||||
// second vote
|
// second vote
|
||||||
let top_vote = vote_state.votes.front().unwrap().slot_height;
|
let top_vote = vote_state.votes.front().unwrap().slot;
|
||||||
vote_state.process_vote(Vote::new(
|
vote_state.process_vote(Vote::new(
|
||||||
vote_state.votes.back().unwrap().expiration_slot_height(),
|
vote_state.votes.back().unwrap().expiration_slot(),
|
||||||
));
|
));
|
||||||
assert_eq!(Some(top_vote), vote_state.root_slot);
|
assert_eq!(Some(top_vote), vote_state.root_slot);
|
||||||
|
|
||||||
// Expire everything except the first vote
|
// Expire everything except the first vote
|
||||||
let vote = Vote::new(vote_state.votes.front().unwrap().expiration_slot_height());
|
let vote = Vote::new(vote_state.votes.front().unwrap().expiration_slot());
|
||||||
vote_state.process_vote(vote);
|
vote_state.process_vote(vote);
|
||||||
// First vote and new vote are both stored for a total of 2 votes
|
// First vote and new vote are both stored for a total of 2 votes
|
||||||
assert_eq!(vote_state.votes.len(), 2);
|
assert_eq!(vote_state.votes.len(), 2);
|
||||||
|
|
|
@ -17,11 +17,11 @@ pub struct VoteTransaction {}
|
||||||
impl VoteTransaction {
|
impl VoteTransaction {
|
||||||
pub fn new_vote<T: KeypairUtil>(
|
pub fn new_vote<T: KeypairUtil>(
|
||||||
voting_keypair: &T,
|
voting_keypair: &T,
|
||||||
slot_height: u64,
|
slot: u64,
|
||||||
recent_blockhash: Hash,
|
recent_blockhash: Hash,
|
||||||
fee: u64,
|
fee: u64,
|
||||||
) -> Transaction {
|
) -> Transaction {
|
||||||
let vote = Vote { slot_height };
|
let vote = Vote { slot };
|
||||||
TransactionBuilder::new(fee)
|
TransactionBuilder::new(fee)
|
||||||
.push(VoteInstruction::new_vote(voting_keypair.pubkey(), vote))
|
.push(VoteInstruction::new_vote(voting_keypair.pubkey(), vote))
|
||||||
.sign(&[voting_keypair], recent_blockhash)
|
.sign(&[voting_keypair], recent_blockhash)
|
||||||
|
@ -94,12 +94,12 @@ mod tests {
|
||||||
#[test]
|
#[test]
|
||||||
fn test_get_votes() {
|
fn test_get_votes() {
|
||||||
let keypair = Keypair::new();
|
let keypair = Keypair::new();
|
||||||
let slot_height = 1;
|
let slot = 1;
|
||||||
let recent_blockhash = Hash::default();
|
let recent_blockhash = Hash::default();
|
||||||
let transaction = VoteTransaction::new_vote(&keypair, slot_height, recent_blockhash, 0);
|
let transaction = VoteTransaction::new_vote(&keypair, slot, recent_blockhash, 0);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
VoteTransaction::get_votes(&transaction),
|
VoteTransaction::get_votes(&transaction),
|
||||||
vec![(keypair.pubkey(), Vote::new(slot_height), recent_blockhash)]
|
vec![(keypair.pubkey(), Vote::new(slot), recent_blockhash)]
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -342,7 +342,7 @@ impl Bank {
|
||||||
// Sort by slot height
|
// Sort by slot height
|
||||||
slots_and_stakes.sort_by(|a, b| a.0.cmp(&b.0));
|
slots_and_stakes.sort_by(|a, b| a.0.cmp(&b.0));
|
||||||
|
|
||||||
let max_slot = self.slot_height();
|
let max_slot = self.slot();
|
||||||
let min_slot = max_slot.saturating_sub(MAX_RECENT_BLOCKHASHES as u64);
|
let min_slot = max_slot.saturating_sub(MAX_RECENT_BLOCKHASHES as u64);
|
||||||
|
|
||||||
let mut total_stake = 0;
|
let mut total_stake = 0;
|
||||||
|
@ -757,7 +757,7 @@ impl Bank {
|
||||||
self.stakers_slot_offset
|
self.stakers_slot_offset
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the number of ticks per slot that should be used calls to slot_height().
|
/// Return the number of ticks per slot
|
||||||
pub fn ticks_per_slot(&self) -> u64 {
|
pub fn ticks_per_slot(&self) -> u64 {
|
||||||
self.ticks_per_slot
|
self.ticks_per_slot
|
||||||
}
|
}
|
||||||
|
@ -776,11 +776,6 @@ impl Bank {
|
||||||
self.tick_height() % self.ticks_per_slot()
|
self.tick_height() % self.ticks_per_slot()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the slot_height of the last registered tick.
|
|
||||||
pub fn slot_height(&self) -> u64 {
|
|
||||||
self.tick_height() / self.ticks_per_slot()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the number of slots per tick.
|
/// Return the number of slots per tick.
|
||||||
pub fn slots_per_epoch(&self) -> u64 {
|
pub fn slots_per_epoch(&self) -> u64 {
|
||||||
self.slots_per_epoch
|
self.slots_per_epoch
|
||||||
|
@ -804,20 +799,22 @@ impl Bank {
|
||||||
|
|
||||||
/// Return the number of slots since the last epoch boundary.
|
/// Return the number of slots since the last epoch boundary.
|
||||||
pub fn slot_index(&self) -> u64 {
|
pub fn slot_index(&self) -> u64 {
|
||||||
self.slot_height() % self.slots_per_epoch()
|
self.slot() % self.slots_per_epoch()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the epoch height of the last registered tick.
|
/// Return the epoch height of the last registered tick.
|
||||||
pub fn epoch_height(&self) -> u64 {
|
pub fn epoch_height(&self) -> u64 {
|
||||||
self.slot_height() / self.slots_per_epoch()
|
self.slot() / self.slots_per_epoch()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use bincode::serialize;
|
||||||
use hashbrown::HashSet;
|
use hashbrown::HashSet;
|
||||||
use solana_sdk::genesis_block::BOOTSTRAP_LEADER_TOKENS;
|
use solana_sdk::genesis_block::{GenesisBlock, BOOTSTRAP_LEADER_TOKENS};
|
||||||
|
use solana_sdk::hash::hash;
|
||||||
use solana_sdk::native_program::ProgramError;
|
use solana_sdk::native_program::ProgramError;
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
use solana_sdk::system_instruction::SystemInstruction;
|
use solana_sdk::system_instruction::SystemInstruction;
|
||||||
|
@ -1163,9 +1160,20 @@ mod tests {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register n ticks and return the tick, slot and epoch indexes.
|
// Register n ticks and return the tick, slot and epoch indexes.
|
||||||
fn register_ticks(bank: &Bank, n: u64) -> (u64, u64, u64) {
|
fn register_ticks(bank: &mut Arc<Bank>, tick_hash: &mut Hash, n: u64) -> (u64, u64, u64) {
|
||||||
|
let mut max_tick_height = (bank.slot() + 1) * bank.ticks_per_slot() - 1;
|
||||||
for _ in 0..n {
|
for _ in 0..n {
|
||||||
bank.register_tick(&Hash::default());
|
if bank.tick_height() == max_tick_height {
|
||||||
|
*bank = Arc::new(Bank::new_from_parent(
|
||||||
|
&bank,
|
||||||
|
Pubkey::default(),
|
||||||
|
bank.slot() + 1,
|
||||||
|
));
|
||||||
|
max_tick_height = (bank.slot() + 1) * bank.ticks_per_slot() - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
*tick_hash = hash(&serialize(tick_hash).unwrap());
|
||||||
|
bank.register_tick(&tick_hash);
|
||||||
}
|
}
|
||||||
(bank.tick_index(), bank.slot_index(), bank.epoch_height())
|
(bank.tick_index(), bank.slot_index(), bank.epoch_height())
|
||||||
}
|
}
|
||||||
|
@ -1173,25 +1181,29 @@ mod tests {
|
||||||
#[test]
|
#[test]
|
||||||
fn test_tick_slot_epoch_indexes() {
|
fn test_tick_slot_epoch_indexes() {
|
||||||
let (genesis_block, _) = GenesisBlock::new(5);
|
let (genesis_block, _) = GenesisBlock::new(5);
|
||||||
let bank = Bank::new(&genesis_block);
|
let mut tick_hash = genesis_block.hash();
|
||||||
|
let mut bank = Arc::new(Bank::new(&genesis_block));
|
||||||
let ticks_per_slot = bank.ticks_per_slot();
|
let ticks_per_slot = bank.ticks_per_slot();
|
||||||
let slots_per_epoch = bank.slots_per_epoch();
|
let slots_per_epoch = bank.slots_per_epoch();
|
||||||
let ticks_per_epoch = ticks_per_slot * slots_per_epoch;
|
let ticks_per_epoch = ticks_per_slot * slots_per_epoch;
|
||||||
|
|
||||||
// All indexes are zero-based.
|
// All indexes are zero-based.
|
||||||
assert_eq!(register_ticks(&bank, 0), (0, 0, 0));
|
assert_eq!(register_ticks(&mut bank, &mut tick_hash, 0), (0, 0, 0));
|
||||||
|
|
||||||
// Slot index remains zero through the last tick.
|
// Slot index remains zero through the last tick.
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
register_ticks(&bank, ticks_per_slot - 1),
|
register_ticks(&mut bank, &mut tick_hash, ticks_per_slot - 1),
|
||||||
(ticks_per_slot - 1, 0, 0)
|
(ticks_per_slot - 1, 0, 0)
|
||||||
);
|
);
|
||||||
|
|
||||||
// Cross a slot boundary.
|
// Cross a slot boundary.
|
||||||
assert_eq!(register_ticks(&bank, 1), (0, 1, 0));
|
assert_eq!(register_ticks(&mut bank, &mut tick_hash, 1), (0, 1, 0));
|
||||||
|
|
||||||
// Cross an epoch boundary.
|
// Cross an epoch boundary.
|
||||||
assert_eq!(register_ticks(&bank, ticks_per_epoch), (0, 1, 1));
|
assert_eq!(
|
||||||
|
register_ticks(&mut bank, &mut tick_hash, ticks_per_epoch),
|
||||||
|
(0, 1, 1)
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|
Loading…
Reference in New Issue