uses first_coding_index for erasure meta obtained from coding shreds (#23974)

Now that nodes correctly populate position field in coding shreds, and
first_coding_index in erasure meta, the old code to maintain backward
compatibility can be removed.
The commit is working towards changing erasure coding schema to 32:64.
This commit is contained in:
behzad nouri 2022-03-30 13:55:11 +00:00 committed by GitHub
parent 5636570d6d
commit cda3d66b21
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 17 additions and 30 deletions

View File

@ -1108,10 +1108,9 @@ impl Blockstore {
} }
fn erasure_mismatch(shred1: &Shred, shred2: &Shred) -> bool { fn erasure_mismatch(shred1: &Shred, shred2: &Shred) -> bool {
// TODO should also compare first-coding-index once position field is
// populated across cluster.
shred1.coding_header.num_coding_shreds != shred2.coding_header.num_coding_shreds shred1.coding_header.num_coding_shreds != shred2.coding_header.num_coding_shreds
|| shred1.coding_header.num_data_shreds != shred2.coding_header.num_data_shreds || shred1.coding_header.num_data_shreds != shred2.coding_header.num_data_shreds
|| shred1.first_coding_index() != shred2.first_coding_index()
} }
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
@ -6094,7 +6093,7 @@ pub mod tests {
); );
coding_shred.common_header.fec_set_index = std::u32::MAX - 1; coding_shred.common_header.fec_set_index = std::u32::MAX - 1;
coding_shred.coding_header.num_data_shreds = 2; coding_shred.coding_header.num_data_shreds = 2;
coding_shred.coding_header.num_coding_shreds = 3; coding_shred.coding_header.num_coding_shreds = 4;
coding_shred.coding_header.position = 1; coding_shred.coding_header.position = 1;
coding_shred.common_header.index = std::u32::MAX - 1; coding_shred.common_header.index = std::u32::MAX - 1;
assert!(!Blockstore::should_insert_coding_shred( assert!(!Blockstore::should_insert_coding_shred(

View File

@ -257,10 +257,6 @@ impl ErasureMeta {
None => return false, None => return false,
}; };
other.__unused_size = self.__unused_size; other.__unused_size = self.__unused_size;
// Ignore first_coding_index field for now to be backward compatible.
// TODO remove this once cluster is upgraded to always populate
// first_coding_index field.
other.first_coding_index = self.first_coding_index;
self == &other self == &other
} }
@ -275,16 +271,7 @@ impl ErasureMeta {
pub(crate) fn coding_shreds_indices(&self) -> Range<u64> { pub(crate) fn coding_shreds_indices(&self) -> Range<u64> {
let num_coding = self.config.num_coding() as u64; let num_coding = self.config.num_coding() as u64;
// first_coding_index == 0 may imply that the field is not populated. self.first_coding_index..self.first_coding_index + num_coding
// self.set_index to be backward compatible.
// TODO remove this once cluster is upgraded to always populate
// first_coding_index field.
let first_coding_index = if self.first_coding_index == 0 {
self.set_index
} else {
self.first_coding_index
};
first_coding_index..first_coding_index + num_coding
} }
pub(crate) fn status(&self, index: &Index) -> ErasureMetaStatus { pub(crate) fn status(&self, index: &Index) -> ErasureMetaStatus {

View File

@ -504,9 +504,10 @@ impl Shred {
pub(crate) fn first_coding_index(&self) -> Option<u32> { pub(crate) fn first_coding_index(&self) -> Option<u32> {
match self.shred_type() { match self.shred_type() {
ShredType::Data => None, ShredType::Data => None,
// TODO should be: self.index() - self.coding_header.position ShredType::Code => {
// once position field is populated. let position = u32::from(self.coding_header.position);
ShredType::Code => Some(self.fec_set_index()), self.index().checked_sub(position)
}
} }
} }
@ -536,25 +537,25 @@ impl Shred {
// Returns the block index within the erasure coding set. // Returns the block index within the erasure coding set.
fn erasure_block_index(&self) -> Option<usize> { fn erasure_block_index(&self) -> Option<usize> {
let index = self.index().checked_sub(self.fec_set_index())?;
let index = usize::try_from(index).ok()?;
match self.shred_type() { match self.shred_type() {
ShredType::Data => Some(index), ShredType::Data => {
let index = self.index().checked_sub(self.fec_set_index())?;
usize::try_from(index).ok()
}
ShredType::Code => { ShredType::Code => {
// TODO should use first_coding_index once position field is
// populated.
// Assert that the last shred index in the erasure set does not // Assert that the last shred index in the erasure set does not
// overshoot u32. // overshoot u32.
self.fec_set_index().checked_add(u32::from( self.fec_set_index().checked_add(u32::from(
self.coding_header self.coding_header.num_data_shreds.checked_sub(1)?,
.num_data_shreds ))?;
.max(self.coding_header.num_coding_shreds) self.first_coding_index()?.checked_add(u32::from(
.checked_sub(1)?, self.coding_header.num_coding_shreds.checked_sub(1)?,
))?; ))?;
let num_data_shreds = usize::from(self.coding_header.num_data_shreds); let num_data_shreds = usize::from(self.coding_header.num_data_shreds);
let num_coding_shreds = usize::from(self.coding_header.num_coding_shreds); let num_coding_shreds = usize::from(self.coding_header.num_coding_shreds);
let position = usize::from(self.coding_header.position);
let fec_set_size = num_data_shreds.checked_add(num_coding_shreds)?; let fec_set_size = num_data_shreds.checked_add(num_coding_shreds)?;
let index = index.checked_add(num_data_shreds)?; let index = position.checked_add(num_data_shreds)?;
(index < fec_set_size).then(|| index) (index < fec_set_size).then(|| index)
} }
} }