diff --git a/bucket_map/src/bucket.rs b/bucket_map/src/bucket.rs index 1761adcef..922653957 100644 --- a/bucket_map/src/bucket.rs +++ b/bucket_map/src/bucket.rs @@ -13,7 +13,6 @@ use { std::{ collections::hash_map::DefaultHasher, hash::{Hash, Hasher}, - marker::PhantomData, ops::RangeBounds, path::PathBuf, sync::{ @@ -86,7 +85,6 @@ pub struct Bucket { random: u64, //storage buckets to store SlotSlice up to a power of 2 in len pub data: Vec>, - _phantom: PhantomData, stats: Arc, pub reallocated: Reallocated, DataBucket>, @@ -102,7 +100,7 @@ impl<'b, T: Clone + Copy + 'static> Bucket { let index = BucketStorage::new( Arc::clone(&drives), 1, - std::mem::size_of::() as u64, + std::mem::size_of::>() as u64, max_search, Arc::clone(&stats.index), count, @@ -114,7 +112,6 @@ impl<'b, T: Clone + Copy + 'static> Bucket { drives, index, data: vec![], - _phantom: PhantomData, stats, reallocated: Reallocated::default(), } @@ -126,7 +123,7 @@ impl<'b, T: Clone + Copy + 'static> Bucket { if self.index.is_free(i) { continue; } - let ix: &IndexEntry = self.index.get(i); + let ix: &IndexEntry = self.index.get(i); rv.push(ix.key); } rv @@ -268,7 +265,7 @@ impl<'b, T: Clone + Copy + 'static> Bucket { data_len: usize, ref_count: RefCount, ) -> Result<(), BucketMapError> { - let best_fit_bucket = IndexEntry::data_bucket_from_num_slots(data_len as u64); + let best_fit_bucket = IndexEntry::::data_bucket_from_num_slots(data_len as u64); if self.data.get(best_fit_bucket as usize).is_none() { // fail early if the data bucket we need doesn't exist - we don't want the index entry partially allocated return Err(BucketMapError::DataNoSpace((best_fit_bucket, 0))); @@ -375,7 +372,7 @@ impl<'b, T: Clone + Copy + 'static> Bucket { let mut index = BucketStorage::new_with_capacity( Arc::clone(&self.drives), 1, - std::mem::size_of::() as u64, + std::mem::size_of::>() as u64, // *2 causes rapid growth of index buckets self.index.capacity_pow2 + i, // * 2, self.index.max_search, @@ -386,14 +383,14 @@ impl<'b, T: Clone + Copy + 'static> Bucket { let mut valid = true; for ix in 0..self.index.capacity() { if !self.index.is_free(ix) { - let elem: &IndexEntry = self.index.get(ix); + let elem: &IndexEntry = self.index.get(ix); let new_ix = Self::bucket_create_key(&mut index, &elem.key, random, true); if new_ix.is_err() { valid = false; break; } let new_ix = new_ix.unwrap(); - let new_elem: &mut IndexEntry = index.get_mut(new_ix); + let new_elem: &mut IndexEntry = index.get_mut(new_ix); *new_elem = *elem; /* let dbg_elem: IndexEntry = *new_elem; diff --git a/bucket_map/src/index_entry.rs b/bucket_map/src/index_entry.rs index 8507de8b6..9d9daba31 100644 --- a/bucket_map/src/index_entry.rs +++ b/bucket_map/src/index_entry.rs @@ -68,12 +68,13 @@ pub struct IndexEntryPlaceInBucket { #[derive(Debug, Copy, Clone, PartialEq, Eq)] // one instance of this per item in the index // stored in the index bucket -pub struct IndexEntry { +pub struct IndexEntry { pub key: Pubkey, // can this be smaller if we have reduced the keys into buckets already? ref_count: RefCount, // can this be smaller? Do we ever need more than 4B refcounts? storage_cap_and_offset: PackedStorage, // if the bucket doubled, the index can be recomputed using create_bucket_capacity_pow2 num_slots: Slot, // can this be smaller? epoch size should ~ be the max len. this is the num elements in the slot list + _phantom: PhantomData<&'static T>, } /// Pack the storage offset and capacity-when-crated-pow2 fields into a single u64 @@ -85,7 +86,7 @@ struct PackedStorage { offset: B56, } -impl IndexEntry { +impl IndexEntry { /// return closest bucket index fit for the slot slice. /// Since bucket size is 2^index, the return value is /// min index, such that 2^index >= num_slots @@ -103,7 +104,7 @@ impl IndexEntry { impl IndexEntryPlaceInBucket { pub fn init(&self, index_bucket: &mut BucketStorage>, pubkey: &Pubkey) { - let index_entry = index_bucket.get_mut::(self.ix); + let index_entry = index_bucket.get_mut::>(self.ix); index_entry.key = *pubkey; index_entry.ref_count = 0; index_entry.storage_cap_and_offset = PackedStorage::default(); @@ -116,7 +117,7 @@ impl IndexEntryPlaceInBucket { storage_capacity_when_created_pow2: u8, ) { index_bucket - .get_mut::(self.ix) + .get_mut::>(self.ix) .storage_cap_and_offset .set_capacity_when_created_pow2(storage_capacity_when_created_pow2) } @@ -127,18 +128,18 @@ impl IndexEntryPlaceInBucket { storage_offset: u64, ) { index_bucket - .get_mut::(self.ix) + .get_mut::>(self.ix) .storage_cap_and_offset .set_offset_checked(storage_offset) .expect("New storage offset must fit into 7 bytes!"); } pub fn data_bucket_ix(&self, index_bucket: &BucketStorage>) -> u64 { - IndexEntry::data_bucket_from_num_slots(self.num_slots(index_bucket)) + IndexEntry::::data_bucket_from_num_slots(self.num_slots(index_bucket)) } pub fn ref_count(&self, index_bucket: &BucketStorage>) -> RefCount { - let index_entry = index_bucket.get::(self.ix); + let index_entry = index_bucket.get::>(self.ix); index_entry.ref_count } @@ -146,7 +147,7 @@ impl IndexEntryPlaceInBucket { &self, index_bucket: &BucketStorage>, ) -> u8 { - let index_entry = index_bucket.get::(self.ix); + let index_entry = index_bucket.get::>(self.ix); index_entry .storage_cap_and_offset .capacity_when_created_pow2() @@ -154,7 +155,7 @@ impl IndexEntryPlaceInBucket { pub fn storage_offset(&self, index_bucket: &BucketStorage>) -> u64 { index_bucket - .get::(self.ix) + .get::>(self.ix) .storage_cap_and_offset .offset() } @@ -166,7 +167,7 @@ impl IndexEntryPlaceInBucket { index_bucket: &BucketStorage>, storage: &BucketStorage, ) -> u64 { - let index_entry = index_bucket.get::(self.ix); + let index_entry = index_bucket.get::>(self.ix); self.storage_offset(index_bucket) << (storage.capacity_pow2 - index_entry @@ -201,7 +202,7 @@ impl IndexEntryPlaceInBucket { } pub fn key<'a>(&self, index_bucket: &'a BucketStorage>) -> &'a Pubkey { - let entry: &IndexEntry = index_bucket.get(self.ix); + let entry: &IndexEntry = index_bucket.get(self.ix); &entry.key } @@ -210,16 +211,16 @@ impl IndexEntryPlaceInBucket { index_bucket: &mut BucketStorage>, ref_count: RefCount, ) { - let index_entry = index_bucket.get_mut::(self.ix); + let index_entry = index_bucket.get_mut::>(self.ix); index_entry.ref_count = ref_count; } pub fn num_slots(&self, index_bucket: &BucketStorage>) -> Slot { - index_bucket.get::(self.ix).num_slots + index_bucket.get::>(self.ix).num_slots } pub fn set_num_slots(&self, index_bucket: &mut BucketStorage>, num_slots: Slot) { - index_bucket.get_mut::(self.ix).num_slots = num_slots; + index_bucket.get_mut::>(self.ix).num_slots = num_slots; } } @@ -231,13 +232,14 @@ mod tests { tempfile::tempdir, }; - impl IndexEntry { + impl IndexEntry { pub fn new(key: Pubkey) -> Self { IndexEntry { key, ref_count: 0, storage_cap_and_offset: PackedStorage::default(), num_slots: 0, + _phantom: PhantomData, } } } @@ -267,7 +269,7 @@ mod tests { #[test] fn test_size() { assert_eq!(std::mem::size_of::(), 1 + 7); - assert_eq!(std::mem::size_of::(), 32 + 8 + 8 + 8); + assert_eq!(std::mem::size_of::>(), 32 + 8 + 8 + 8); } fn index_bucket_for_testing() -> BucketStorage> { @@ -279,7 +281,7 @@ mod tests { BucketStorage::>::new( Arc::new(paths), 1, - std::mem::size_of::() as u64, + std::mem::size_of::>() as u64, 1, Arc::default(), Arc::default(), @@ -305,17 +307,20 @@ mod tests { fn test_data_bucket_from_num_slots() { for n in 0..512 { assert_eq!( - IndexEntry::data_bucket_from_num_slots(n), + IndexEntry::::data_bucket_from_num_slots(n), (n as f64).log2().ceil() as u64 ); } - assert_eq!(IndexEntry::data_bucket_from_num_slots(u32::MAX as u64), 32); assert_eq!( - IndexEntry::data_bucket_from_num_slots(u32::MAX as u64 + 1), + IndexEntry::::data_bucket_from_num_slots(u32::MAX as u64), 32 ); assert_eq!( - IndexEntry::data_bucket_from_num_slots(u32::MAX as u64 + 2), + IndexEntry::::data_bucket_from_num_slots(u32::MAX as u64 + 1), + 32 + ); + assert_eq!( + IndexEntry::::data_bucket_from_num_slots(u32::MAX as u64 + 2), 33 ); }