adds Merkle shred variant with retransmitter's signature (#35293)

Moving towards locking down Turbine propagation path, the commit
reserves a buffer within shred payload for retransmitter's signature.
This commit is contained in:
behzad nouri 2024-02-28 20:31:40 +00:00 committed by GitHub
parent 98ec72e6ed
commit a7a41e7631
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 224 additions and 81 deletions

View File

@ -270,7 +270,7 @@ pub mod test {
&mut processed_slots, &mut processed_slots,
1, 1,
); );
assert_eq!(repairs, [ShredRepairType::Shred(1, 3)]); assert_eq!(repairs, [ShredRepairType::Shred(1, 4)]);
} }
fn add_tree_with_missing_shreds( fn add_tree_with_missing_shreds(

View File

@ -7451,7 +7451,7 @@ pub mod tests {
#[test] #[test]
fn test_insert_multiple_is_last() { fn test_insert_multiple_is_last() {
solana_logger::setup(); solana_logger::setup();
let (shreds, _) = make_slot_entries(0, 0, 19, /*merkle_variant:*/ true); let (shreds, _) = make_slot_entries(0, 0, 18, /*merkle_variant:*/ true);
let num_shreds = shreds.len() as u64; let num_shreds = shreds.len() as u64;
let ledger_path = get_tmp_ledger_path_auto_delete!(); let ledger_path = get_tmp_ledger_path_auto_delete!();
let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let blockstore = Blockstore::open(ledger_path.path()).unwrap();

View File

@ -198,10 +198,20 @@ enum ShredVariant {
// the shred variant: // the shred variant:
// 0b0100_???? MerkleCode // 0b0100_???? MerkleCode
// 0b0110_???? MerkleCode chained // 0b0110_???? MerkleCode chained
// 0b0111_???? MerkleCode chained resigned
// 0b1000_???? MerkleData // 0b1000_???? MerkleData
// 0b1001_???? MerkleData chained // 0b1001_???? MerkleData chained
MerkleCode { proof_size: u8, chained: bool }, // 0b01?0_???? // 0b1011_???? MerkleData chained resigned
MerkleData { proof_size: u8, chained: bool }, // 0b100?_???? MerkleCode {
proof_size: u8,
chained: bool,
resigned: bool,
}, // 0b01??_????
MerkleData {
proof_size: u8,
chained: bool,
resigned: bool,
}, // 0b10??_????
} }
/// A common header that is present in data and code shred headers /// A common header that is present in data and code shred headers
@ -656,17 +666,19 @@ pub mod layout {
ShredVariant::MerkleCode { ShredVariant::MerkleCode {
proof_size, proof_size,
chained, chained,
resigned,
} => { } => {
let merkle_root = let merkle_root =
self::merkle::ShredCode::get_merkle_root(shred, proof_size, chained)?; self::merkle::ShredCode::get_merkle_root(shred, proof_size, chained, resigned)?;
SignedData::MerkleRoot(merkle_root) SignedData::MerkleRoot(merkle_root)
} }
ShredVariant::MerkleData { ShredVariant::MerkleData {
proof_size, proof_size,
chained, chained,
resigned,
} => { } => {
let merkle_root = let merkle_root =
self::merkle::ShredData::get_merkle_root(shred, proof_size, chained)?; self::merkle::ShredData::get_merkle_root(shred, proof_size, chained, resigned)?;
SignedData::MerkleRoot(merkle_root) SignedData::MerkleRoot(merkle_root)
} }
}; };
@ -704,11 +716,13 @@ pub mod layout {
ShredVariant::MerkleCode { ShredVariant::MerkleCode {
proof_size, proof_size,
chained, chained,
} => merkle::ShredCode::get_merkle_root(shred, proof_size, chained), resigned,
} => merkle::ShredCode::get_merkle_root(shred, proof_size, chained, resigned),
ShredVariant::MerkleData { ShredVariant::MerkleData {
proof_size, proof_size,
chained, chained,
} => merkle::ShredData::get_merkle_root(shred, proof_size, chained), resigned,
} => merkle::ShredData::get_merkle_root(shred, proof_size, chained, resigned),
} }
} }
@ -725,10 +739,18 @@ pub mod layout {
*byte = rng.gen::<u8>().max(1u8).wrapping_add(*byte); *byte = rng.gen::<u8>().max(1u8).wrapping_add(*byte);
} }
let shred = get_shred(packet).unwrap(); let shred = get_shred(packet).unwrap();
let merkle_proof_size = match get_shred_variant(shred).unwrap() { let merkle_variant = match get_shred_variant(shred).unwrap() {
ShredVariant::LegacyCode | ShredVariant::LegacyData => None, ShredVariant::LegacyCode | ShredVariant::LegacyData => None,
ShredVariant::MerkleCode { proof_size, .. } ShredVariant::MerkleCode {
| ShredVariant::MerkleData { proof_size, .. } => Some(proof_size), proof_size,
resigned,
..
}
| ShredVariant::MerkleData {
proof_size,
resigned,
..
} => Some((proof_size, resigned)),
}; };
let coin_flip: bool = rng.gen(); let coin_flip: bool = rng.gen();
if coin_flip { if coin_flip {
@ -736,12 +758,13 @@ pub mod layout {
modify_packet(rng, packet, 0..SIGNATURE_BYTES); modify_packet(rng, packet, 0..SIGNATURE_BYTES);
} else { } else {
// Corrupt one byte within the signed data offsets. // Corrupt one byte within the signed data offsets.
let offsets = merkle_proof_size let offsets = merkle_variant
.map(|merkle_proof_size| { .map(|(proof_size, resigned)| {
// Need to corrupt the merkle proof. // Need to corrupt the merkle proof.
// Proof entries are each 20 bytes at the end of shreds. // Proof entries are each 20 bytes at the end of shreds.
let offset = usize::from(merkle_proof_size) * 20; let offset = usize::from(proof_size) * 20;
shred.len() - offset..shred.len() let size = shred.len() - if resigned { SIZE_OF_SIGNATURE } else { 0 };
size - offset..size
}) })
.or_else(|| get_signed_data_offsets(shred)); .or_else(|| get_signed_data_offsets(shred));
modify_packet(rng, packet, offsets.unwrap()); modify_packet(rng, packet, offsets.unwrap());
@ -823,19 +846,43 @@ impl From<ShredVariant> for u8 {
ShredVariant::MerkleCode { ShredVariant::MerkleCode {
proof_size, proof_size,
chained: false, chained: false,
resigned: false,
} => proof_size | 0x40, } => proof_size | 0x40,
ShredVariant::MerkleCode { ShredVariant::MerkleCode {
proof_size, proof_size,
chained: true, chained: true,
resigned: false,
} => proof_size | 0x60, } => proof_size | 0x60,
ShredVariant::MerkleCode {
proof_size,
chained: true,
resigned: true,
} => proof_size | 0x70,
ShredVariant::MerkleData { ShredVariant::MerkleData {
proof_size, proof_size,
chained: false, chained: false,
resigned: false,
} => proof_size | 0x80, } => proof_size | 0x80,
ShredVariant::MerkleData { ShredVariant::MerkleData {
proof_size, proof_size,
chained: true, chained: true,
resigned: false,
} => proof_size | 0x90, } => proof_size | 0x90,
ShredVariant::MerkleData {
proof_size,
chained: true,
resigned: true,
} => proof_size | 0xb0,
ShredVariant::MerkleCode {
proof_size: _,
chained: false,
resigned: true,
}
| ShredVariant::MerkleData {
proof_size: _,
chained: false,
resigned: true,
} => panic!("Invalid shred variant: {shred_variant:?}"),
} }
} }
} }
@ -853,18 +900,32 @@ impl TryFrom<u8> for ShredVariant {
0x40 => Ok(ShredVariant::MerkleCode { 0x40 => Ok(ShredVariant::MerkleCode {
proof_size, proof_size,
chained: false, chained: false,
resigned: false,
}), }),
0x60 => Ok(ShredVariant::MerkleCode { 0x60 => Ok(ShredVariant::MerkleCode {
proof_size, proof_size,
chained: true, chained: true,
resigned: false,
}),
0x70 => Ok(ShredVariant::MerkleCode {
proof_size,
chained: true,
resigned: true,
}), }),
0x80 => Ok(ShredVariant::MerkleData { 0x80 => Ok(ShredVariant::MerkleData {
proof_size, proof_size,
chained: false, chained: false,
resigned: false,
}), }),
0x90 => Ok(ShredVariant::MerkleData { 0x90 => Ok(ShredVariant::MerkleData {
proof_size, proof_size,
chained: true, chained: true,
resigned: false,
}),
0xb0 => Ok(ShredVariant::MerkleData {
proof_size,
chained: true,
resigned: true,
}), }),
_ => Err(Error::InvalidShredVariant), _ => Err(Error::InvalidShredVariant),
} }
@ -1058,7 +1119,9 @@ pub fn max_entries_per_n_shred(
shred_data_size: Option<usize>, shred_data_size: Option<usize>,
) -> u64 { ) -> u64 {
// Default 32:32 erasure batches yields 64 shreds; log2(64) = 6. // Default 32:32 erasure batches yields 64 shreds; log2(64) = 6.
let merkle_variant = Some((/*proof_size:*/ 6, /*chained:*/ false)); let merkle_variant = Some((
/*proof_size:*/ 6, /*chained:*/ false, /*resigned:*/ false,
));
let data_buffer_size = ShredData::capacity(merkle_variant).unwrap(); let data_buffer_size = ShredData::capacity(merkle_variant).unwrap();
let shred_data_size = shred_data_size.unwrap_or(data_buffer_size) as u64; let shred_data_size = shred_data_size.unwrap_or(data_buffer_size) as u64;
let vec_size = bincode::serialized_size(&vec![entry]).unwrap(); let vec_size = bincode::serialized_size(&vec![entry]).unwrap();
@ -1163,6 +1226,7 @@ mod tests {
bincode::serialized_size(&ShredVariant::MerkleCode { bincode::serialized_size(&ShredVariant::MerkleCode {
proof_size: 15, proof_size: 15,
chained: true, chained: true,
resigned: true
}) })
.unwrap() as usize .unwrap() as usize
); );
@ -1468,15 +1532,17 @@ mod tests {
); );
} }
#[test_case(false, 0b0100_0000)] #[test_case(false, false, 0b0100_0000)]
#[test_case(true, 0b0110_0000)] #[test_case(true, false, 0b0110_0000)]
fn test_shred_variant_compat_merkle_code(chained: bool, byte: u8) { #[test_case(true, true, 0b0111_0000)]
fn test_shred_variant_compat_merkle_code(chained: bool, resigned: bool, byte: u8) {
for proof_size in 0..=15u8 { for proof_size in 0..=15u8 {
let byte = byte | proof_size; let byte = byte | proof_size;
assert_eq!( assert_eq!(
u8::from(ShredVariant::MerkleCode { u8::from(ShredVariant::MerkleCode {
proof_size, proof_size,
chained, chained,
resigned,
}), }),
byte byte
); );
@ -1484,6 +1550,7 @@ mod tests {
ShredType::from(ShredVariant::MerkleCode { ShredType::from(ShredVariant::MerkleCode {
proof_size, proof_size,
chained, chained,
resigned,
}), }),
ShredType::Code ShredType::Code
); );
@ -1492,11 +1559,13 @@ mod tests {
ShredVariant::MerkleCode { ShredVariant::MerkleCode {
proof_size, proof_size,
chained, chained,
resigned,
}, },
); );
let buf = bincode::serialize(&ShredVariant::MerkleCode { let buf = bincode::serialize(&ShredVariant::MerkleCode {
proof_size, proof_size,
chained, chained,
resigned,
}) })
.unwrap(); .unwrap();
assert_eq!(buf, vec![byte]); assert_eq!(buf, vec![byte]);
@ -1505,20 +1574,23 @@ mod tests {
ShredVariant::MerkleCode { ShredVariant::MerkleCode {
proof_size, proof_size,
chained, chained,
resigned,
} }
); );
} }
} }
#[test_case(false, 0b1000_0000)] #[test_case(false, false, 0b1000_0000)]
#[test_case(true, 0b1001_0000)] #[test_case(true, false, 0b1001_0000)]
fn test_shred_variant_compat_merkle_data(chained: bool, byte: u8) { #[test_case(true, true, 0b1011_0000)]
fn test_shred_variant_compat_merkle_data(chained: bool, resigned: bool, byte: u8) {
for proof_size in 0..=15u8 { for proof_size in 0..=15u8 {
let byte = byte | proof_size; let byte = byte | proof_size;
assert_eq!( assert_eq!(
u8::from(ShredVariant::MerkleData { u8::from(ShredVariant::MerkleData {
proof_size, proof_size,
chained, chained,
resigned,
}), }),
byte byte
); );
@ -1526,6 +1598,7 @@ mod tests {
ShredType::from(ShredVariant::MerkleData { ShredType::from(ShredVariant::MerkleData {
proof_size, proof_size,
chained, chained,
resigned,
}), }),
ShredType::Data ShredType::Data
); );
@ -1534,11 +1607,13 @@ mod tests {
ShredVariant::MerkleData { ShredVariant::MerkleData {
proof_size, proof_size,
chained, chained,
resigned
} }
); );
let buf = bincode::serialize(&ShredVariant::MerkleData { let buf = bincode::serialize(&ShredVariant::MerkleData {
proof_size, proof_size,
chained, chained,
resigned,
}) })
.unwrap(); .unwrap();
assert_eq!(buf, vec![byte]); assert_eq!(buf, vec![byte]);
@ -1547,6 +1622,7 @@ mod tests {
ShredVariant::MerkleData { ShredVariant::MerkleData {
proof_size, proof_size,
chained, chained,
resigned
} }
); );
} }

View File

@ -53,6 +53,7 @@ type MerkleProofEntry = [u8; 20];
// Layout: {common, data} headers | data buffer // Layout: {common, data} headers | data buffer
// | [Merkle root of the previous erasure batch if chained] // | [Merkle root of the previous erasure batch if chained]
// | Merkle proof // | Merkle proof
// | [Retransmitter's signature if resigned]
// The slice past signature till the end of the data buffer is erasure coded. // The slice past signature till the end of the data buffer is erasure coded.
// The slice past signature and before the merkle proof is hashed to generate // The slice past signature and before the merkle proof is hashed to generate
// the Merkle tree. The root of the Merkle tree is signed. // the Merkle tree. The root of the Merkle tree is signed.
@ -66,6 +67,7 @@ pub struct ShredData {
// Layout: {common, coding} headers | erasure coded shard // Layout: {common, coding} headers | erasure coded shard
// | [Merkle root of the previous erasure batch if chained] // | [Merkle root of the previous erasure batch if chained]
// | Merkle proof // | Merkle proof
// | [Retransmitter's signature if resigned]
// The slice past signature and before the merkle proof is hashed to generate // The slice past signature and before the merkle proof is hashed to generate
// the Merkle tree. The root of the Merkle tree is signed. // the Merkle tree. The root of the Merkle tree is signed.
#[derive(Clone, Debug, Eq, PartialEq)] #[derive(Clone, Debug, Eq, PartialEq)]
@ -145,15 +147,17 @@ impl ShredData {
// Maximum size of ledger data that can be embedded in a data-shred. // Maximum size of ledger data that can be embedded in a data-shred.
// Also equal to: // Also equal to:
// ShredCode::capacity(proof_size).unwrap() // ShredCode::capacity(proof_size, chained, resigned).unwrap()
// - ShredData::SIZE_OF_HEADERS // - ShredData::SIZE_OF_HEADERS
// + SIZE_OF_SIGNATURE // + SIZE_OF_SIGNATURE
pub(super) fn capacity(proof_size: u8, chained: bool) -> Result<usize, Error> { pub(super) fn capacity(proof_size: u8, chained: bool, resigned: bool) -> Result<usize, Error> {
debug_assert!(chained || !resigned);
Self::SIZE_OF_PAYLOAD Self::SIZE_OF_PAYLOAD
.checked_sub( .checked_sub(
Self::SIZE_OF_HEADERS Self::SIZE_OF_HEADERS
+ if chained { SIZE_OF_MERKLE_ROOT } else { 0 } + if chained { SIZE_OF_MERKLE_ROOT } else { 0 }
+ usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY, + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY
+ if resigned { SIZE_OF_SIGNATURE } else { 0 },
) )
.ok_or(Error::InvalidProofSize(proof_size)) .ok_or(Error::InvalidProofSize(proof_size))
} }
@ -163,16 +167,17 @@ impl ShredData {
let ShredVariant::MerkleData { let ShredVariant::MerkleData {
proof_size, proof_size,
chained, chained,
resigned,
} = self.common_header.shred_variant } = self.common_header.shred_variant
else { else {
return Err(Error::InvalidShredVariant); return Err(Error::InvalidShredVariant);
}; };
Self::get_proof_offset(proof_size, chained) Self::get_proof_offset(proof_size, chained, resigned)
} }
fn get_proof_offset(proof_size: u8, chained: bool) -> Result<usize, Error> { fn get_proof_offset(proof_size: u8, chained: bool, resigned: bool) -> Result<usize, Error> {
Ok(Self::SIZE_OF_HEADERS Ok(Self::SIZE_OF_HEADERS
+ Self::capacity(proof_size, chained)? + Self::capacity(proof_size, chained, resigned)?
+ if chained { SIZE_OF_MERKLE_ROOT } else { 0 }) + if chained { SIZE_OF_MERKLE_ROOT } else { 0 })
} }
@ -180,11 +185,12 @@ impl ShredData {
let ShredVariant::MerkleData { let ShredVariant::MerkleData {
proof_size, proof_size,
chained: true, chained: true,
resigned,
} = self.common_header.shred_variant } = self.common_header.shred_variant
else { else {
return Err(Error::InvalidShredVariant); return Err(Error::InvalidShredVariant);
}; };
Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size, /*chained:*/ true)?) Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size, /*chained:*/ true, resigned)?)
} }
fn set_chained_merkle_root(&mut self, chained_merkle_root: &Hash) -> Result<(), Error> { fn set_chained_merkle_root(&mut self, chained_merkle_root: &Hash) -> Result<(), Error> {
@ -234,11 +240,12 @@ impl ShredData {
let ShredVariant::MerkleData { let ShredVariant::MerkleData {
proof_size, proof_size,
chained, chained,
resigned,
} = common_header.shred_variant } = common_header.shred_variant
else { else {
return Err(Error::InvalidShredVariant); return Err(Error::InvalidShredVariant);
}; };
if ShredCode::capacity(proof_size, chained)? != shard_size { if ShredCode::capacity(proof_size, chained, resigned)? != shard_size {
return Err(Error::InvalidShardSize(shard_size)); return Err(Error::InvalidShardSize(shard_size));
} }
let data_header = deserialize_from_with_limit(&mut cursor)?; let data_header = deserialize_from_with_limit(&mut cursor)?;
@ -271,12 +278,18 @@ impl ShredData {
Ok(()) Ok(())
} }
pub(super) fn get_merkle_root(shred: &[u8], proof_size: u8, chained: bool) -> Option<Hash> { pub(super) fn get_merkle_root(
shred: &[u8],
proof_size: u8,
chained: bool,
resigned: bool,
) -> Option<Hash> {
debug_assert_eq!( debug_assert_eq!(
shred::layout::get_shred_variant(shred).unwrap(), shred::layout::get_shred_variant(shred).unwrap(),
ShredVariant::MerkleData { ShredVariant::MerkleData {
proof_size, proof_size,
chained, chained,
resigned,
}, },
); );
// Shred index in the erasure batch. // Shred index in the erasure batch.
@ -289,7 +302,7 @@ impl ShredData {
.map(usize::try_from)? .map(usize::try_from)?
.ok()? .ok()?
}; };
let proof_offset = Self::get_proof_offset(proof_size, chained).ok()?; let proof_offset = Self::get_proof_offset(proof_size, chained, resigned).ok()?;
let proof = get_merkle_proof(shred, proof_offset, proof_size).ok()?; let proof = get_merkle_proof(shred, proof_offset, proof_size).ok()?;
let node = get_merkle_node(shred, SIZE_OF_SIGNATURE..proof_offset).ok()?; let node = get_merkle_node(shred, SIZE_OF_SIGNATURE..proof_offset).ok()?;
get_merkle_root(index, node, proof).ok() get_merkle_root(index, node, proof).ok()
@ -306,14 +319,16 @@ impl ShredCode {
} }
// Size of buffer embedding erasure codes. // Size of buffer embedding erasure codes.
fn capacity(proof_size: u8, chained: bool) -> Result<usize, Error> { fn capacity(proof_size: u8, chained: bool, resigned: bool) -> Result<usize, Error> {
debug_assert!(chained || !resigned);
// Merkle proof is generated and signed after coding shreds are // Merkle proof is generated and signed after coding shreds are
// generated. Coding shred headers cannot be erasure coded either. // generated. Coding shred headers cannot be erasure coded either.
Self::SIZE_OF_PAYLOAD Self::SIZE_OF_PAYLOAD
.checked_sub( .checked_sub(
Self::SIZE_OF_HEADERS Self::SIZE_OF_HEADERS
+ if chained { SIZE_OF_MERKLE_ROOT } else { 0 } + if chained { SIZE_OF_MERKLE_ROOT } else { 0 }
+ usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY, + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY
+ if resigned { SIZE_OF_SIGNATURE } else { 0 },
) )
.ok_or(Error::InvalidProofSize(proof_size)) .ok_or(Error::InvalidProofSize(proof_size))
} }
@ -323,16 +338,17 @@ impl ShredCode {
let ShredVariant::MerkleCode { let ShredVariant::MerkleCode {
proof_size, proof_size,
chained, chained,
resigned,
} = self.common_header.shred_variant } = self.common_header.shred_variant
else { else {
return Err(Error::InvalidShredVariant); return Err(Error::InvalidShredVariant);
}; };
Self::get_proof_offset(proof_size, chained) Self::get_proof_offset(proof_size, chained, resigned)
} }
fn get_proof_offset(proof_size: u8, chained: bool) -> Result<usize, Error> { fn get_proof_offset(proof_size: u8, chained: bool, resigned: bool) -> Result<usize, Error> {
Ok(Self::SIZE_OF_HEADERS Ok(Self::SIZE_OF_HEADERS
+ Self::capacity(proof_size, chained)? + Self::capacity(proof_size, chained, resigned)?
+ if chained { SIZE_OF_MERKLE_ROOT } else { 0 }) + if chained { SIZE_OF_MERKLE_ROOT } else { 0 })
} }
@ -340,11 +356,12 @@ impl ShredCode {
let ShredVariant::MerkleCode { let ShredVariant::MerkleCode {
proof_size, proof_size,
chained: true, chained: true,
resigned,
} = self.common_header.shred_variant } = self.common_header.shred_variant
else { else {
return Err(Error::InvalidShredVariant); return Err(Error::InvalidShredVariant);
}; };
Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size, /*chained:*/ true)?) Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size, /*chained:*/ true, resigned)?)
} }
fn chained_merkle_root(&self) -> Result<Hash, Error> { fn chained_merkle_root(&self) -> Result<Hash, Error> {
@ -393,12 +410,13 @@ impl ShredCode {
let ShredVariant::MerkleCode { let ShredVariant::MerkleCode {
proof_size, proof_size,
chained, chained,
resigned,
} = common_header.shred_variant } = common_header.shred_variant
else { else {
return Err(Error::InvalidShredVariant); return Err(Error::InvalidShredVariant);
}; };
let shard_size = shard.len(); let shard_size = shard.len();
if Self::capacity(proof_size, chained)? != shard_size { if Self::capacity(proof_size, chained, resigned)? != shard_size {
return Err(Error::InvalidShardSize(shard_size)); return Err(Error::InvalidShardSize(shard_size));
} }
if shard_size + Self::SIZE_OF_HEADERS > Self::SIZE_OF_PAYLOAD { if shard_size + Self::SIZE_OF_HEADERS > Self::SIZE_OF_PAYLOAD {
@ -438,12 +456,18 @@ impl ShredCode {
Ok(()) Ok(())
} }
pub(super) fn get_merkle_root(shred: &[u8], proof_size: u8, chained: bool) -> Option<Hash> { pub(super) fn get_merkle_root(
shred: &[u8],
proof_size: u8,
chained: bool,
resigned: bool,
) -> Option<Hash> {
debug_assert_eq!( debug_assert_eq!(
shred::layout::get_shred_variant(shred).unwrap(), shred::layout::get_shred_variant(shred).unwrap(),
ShredVariant::MerkleCode { ShredVariant::MerkleCode {
proof_size, proof_size,
chained, chained,
resigned,
}, },
); );
// Shred index in the erasure batch. // Shred index in the erasure batch.
@ -458,7 +482,7 @@ impl ShredCode {
.ok()?; .ok()?;
num_data_shreds.checked_add(position)? num_data_shreds.checked_add(position)?
}; };
let proof_offset = Self::get_proof_offset(proof_size, chained).ok()?; let proof_offset = Self::get_proof_offset(proof_size, chained, resigned).ok()?;
let proof = get_merkle_proof(shred, proof_offset, proof_size).ok()?; let proof = get_merkle_proof(shred, proof_offset, proof_size).ok()?;
let node = get_merkle_node(shred, SIZE_OF_SIGNATURE..proof_offset).ok()?; let node = get_merkle_node(shred, SIZE_OF_SIGNATURE..proof_offset).ok()?;
get_merkle_root(index, node, proof).ok() get_merkle_root(index, node, proof).ok()
@ -472,9 +496,10 @@ impl<'a> ShredTrait<'a> for ShredData {
// Also equal to: // Also equal to:
// ShredData::SIZE_OF_HEADERS // ShredData::SIZE_OF_HEADERS
// + ShredData::capacity(proof_size, chained).unwrap() // + ShredData::capacity(proof_size, chained, resigned).unwrap()
// + if chained { SIZE_OF_MERKLE_ROOT } else { 0 } // + if chained { SIZE_OF_MERKLE_ROOT } else { 0 }
// + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY // + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY
// + if resigned { SIZE_OF_SIGNATURE } else { 0 }
const SIZE_OF_PAYLOAD: usize = const SIZE_OF_PAYLOAD: usize =
ShredCode::SIZE_OF_PAYLOAD - ShredCode::SIZE_OF_HEADERS + SIZE_OF_SIGNATURE; ShredCode::SIZE_OF_PAYLOAD - ShredCode::SIZE_OF_HEADERS + SIZE_OF_SIGNATURE;
const SIZE_OF_HEADERS: usize = SIZE_OF_DATA_SHRED_HEADERS; const SIZE_OF_HEADERS: usize = SIZE_OF_DATA_SHRED_HEADERS;
@ -514,11 +539,12 @@ impl<'a> ShredTrait<'a> for ShredData {
let ShredVariant::MerkleData { let ShredVariant::MerkleData {
proof_size, proof_size,
chained, chained,
resigned,
} = self.common_header.shred_variant } = self.common_header.shred_variant
else { else {
return Err(Error::InvalidShredVariant); return Err(Error::InvalidShredVariant);
}; };
let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained)?; let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained, resigned)?;
let mut shard = self.payload; let mut shard = self.payload;
shard.truncate(offset); shard.truncate(offset);
shard.drain(..SIZE_OF_SIGNATURE); shard.drain(..SIZE_OF_SIGNATURE);
@ -532,11 +558,12 @@ impl<'a> ShredTrait<'a> for ShredData {
let ShredVariant::MerkleData { let ShredVariant::MerkleData {
proof_size, proof_size,
chained, chained,
resigned,
} = self.common_header.shred_variant } = self.common_header.shred_variant
else { else {
return Err(Error::InvalidShredVariant); return Err(Error::InvalidShredVariant);
}; };
let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained)?; let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained, resigned)?;
self.payload self.payload
.get(SIZE_OF_SIGNATURE..offset) .get(SIZE_OF_SIGNATURE..offset)
.ok_or(Error::InvalidPayloadSize(self.payload.len())) .ok_or(Error::InvalidPayloadSize(self.payload.len()))
@ -598,11 +625,12 @@ impl<'a> ShredTrait<'a> for ShredCode {
let ShredVariant::MerkleCode { let ShredVariant::MerkleCode {
proof_size, proof_size,
chained, chained,
resigned,
} = self.common_header.shred_variant } = self.common_header.shred_variant
else { else {
return Err(Error::InvalidShredVariant); return Err(Error::InvalidShredVariant);
}; };
let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained)?; let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained, resigned)?;
let mut shard = self.payload; let mut shard = self.payload;
shard.truncate(offset); shard.truncate(offset);
shard.drain(..Self::SIZE_OF_HEADERS); shard.drain(..Self::SIZE_OF_HEADERS);
@ -616,11 +644,12 @@ impl<'a> ShredTrait<'a> for ShredCode {
let ShredVariant::MerkleCode { let ShredVariant::MerkleCode {
proof_size, proof_size,
chained, chained,
resigned,
} = self.common_header.shred_variant } = self.common_header.shred_variant
else { else {
return Err(Error::InvalidShredVariant); return Err(Error::InvalidShredVariant);
}; };
let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained)?; let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained, resigned)?;
self.payload self.payload
.get(Self::SIZE_OF_HEADERS..offset) .get(Self::SIZE_OF_HEADERS..offset)
.ok_or(Error::InvalidPayloadSize(self.payload.len())) .ok_or(Error::InvalidPayloadSize(self.payload.len()))
@ -650,11 +679,12 @@ impl ShredDataTrait for ShredData {
let ShredVariant::MerkleData { let ShredVariant::MerkleData {
proof_size, proof_size,
chained, chained,
resigned,
} = self.common_header.shred_variant } = self.common_header.shred_variant
else { else {
return Err(Error::InvalidShredVariant); return Err(Error::InvalidShredVariant);
}; };
let data_buffer_size = Self::capacity(proof_size, chained)?; let data_buffer_size = Self::capacity(proof_size, chained, resigned)?;
let size = usize::from(self.data_header.size); let size = usize::from(self.data_header.size);
if size > self.payload.len() if size > self.payload.len()
|| size < Self::SIZE_OF_HEADERS || size < Self::SIZE_OF_HEADERS
@ -786,11 +816,12 @@ pub(super) fn recover(
}) })
.ok_or(TooFewParityShards)?; .ok_or(TooFewParityShards)?;
debug_assert_matches!(common_header.shred_variant, ShredVariant::MerkleCode { .. }); debug_assert_matches!(common_header.shred_variant, ShredVariant::MerkleCode { .. });
let (proof_size, chained) = match common_header.shred_variant { let (proof_size, chained, resigned) = match common_header.shred_variant {
ShredVariant::MerkleCode { ShredVariant::MerkleCode {
proof_size, proof_size,
chained, chained,
} => (proof_size, chained), resigned,
} => (proof_size, chained, resigned),
ShredVariant::MerkleData { .. } | ShredVariant::LegacyCode | ShredVariant::LegacyData => { ShredVariant::MerkleData { .. } | ShredVariant::LegacyCode | ShredVariant::LegacyData => {
return Err(Error::InvalidShredVariant); return Err(Error::InvalidShredVariant);
} }
@ -816,6 +847,7 @@ pub(super) fn recover(
== &ShredVariant::MerkleData { == &ShredVariant::MerkleData {
proof_size, proof_size,
chained, chained,
resigned,
} }
} }
Shred::ShredCode(shred) => { Shred::ShredCode(shred) => {
@ -828,6 +860,7 @@ pub(super) fn recover(
== &ShredVariant::MerkleCode { == &ShredVariant::MerkleCode {
proof_size, proof_size,
chained, chained,
resigned,
} }
&& num_data_shreds == coding_header.num_data_shreds && num_data_shreds == coding_header.num_data_shreds
&& num_coding_shreds == coding_header.num_coding_shreds && num_coding_shreds == coding_header.num_coding_shreds
@ -884,6 +917,7 @@ pub(super) fn recover(
let expected_shred_variant = ShredVariant::MerkleData { let expected_shred_variant = ShredVariant::MerkleData {
proof_size, proof_size,
chained, chained,
resigned,
}; };
if shred_variant != expected_shred_variant if shred_variant != expected_shred_variant
|| common_header.slot != slot || common_header.slot != slot
@ -992,16 +1026,18 @@ pub(super) fn make_shreds_from_data(
} }
let now = Instant::now(); let now = Instant::now();
let chained = chained_merkle_root.is_some(); let chained = chained_merkle_root.is_some();
let resigned = chained && is_last_in_slot;
let erasure_batch_size = let erasure_batch_size =
shredder::get_erasure_batch_size(DATA_SHREDS_PER_FEC_BLOCK, is_last_in_slot); shredder::get_erasure_batch_size(DATA_SHREDS_PER_FEC_BLOCK, is_last_in_slot);
let proof_size = get_proof_size(erasure_batch_size); let proof_size = get_proof_size(erasure_batch_size);
let data_buffer_size = ShredData::capacity(proof_size, chained)?; let data_buffer_size = ShredData::capacity(proof_size, chained, resigned)?;
let chunk_size = DATA_SHREDS_PER_FEC_BLOCK * data_buffer_size; let chunk_size = DATA_SHREDS_PER_FEC_BLOCK * data_buffer_size;
let mut common_header = ShredCommonHeader { let mut common_header = ShredCommonHeader {
signature: Signature::default(), signature: Signature::default(),
shred_variant: ShredVariant::MerkleData { shred_variant: ShredVariant::MerkleData {
proof_size, proof_size,
chained, chained,
resigned,
}, },
slot, slot,
index: next_shred_index, index: next_shred_index,
@ -1044,7 +1080,7 @@ pub(super) fn make_shreds_from_data(
// which can embed the remaining data. // which can embed the remaining data.
let (proof_size, data_buffer_size) = (1u8..32) let (proof_size, data_buffer_size) = (1u8..32)
.find_map(|proof_size| { .find_map(|proof_size| {
let data_buffer_size = ShredData::capacity(proof_size, chained).ok()?; let data_buffer_size = ShredData::capacity(proof_size, chained, resigned).ok()?;
let num_data_shreds = (data.len() + data_buffer_size - 1) / data_buffer_size; let num_data_shreds = (data.len() + data_buffer_size - 1) / data_buffer_size;
let num_data_shreds = num_data_shreds.max(1); let num_data_shreds = num_data_shreds.max(1);
let erasure_batch_size = let erasure_batch_size =
@ -1056,6 +1092,7 @@ pub(super) fn make_shreds_from_data(
common_header.shred_variant = ShredVariant::MerkleData { common_header.shred_variant = ShredVariant::MerkleData {
proof_size, proof_size,
chained, chained,
resigned,
}; };
common_header.fec_set_index = common_header.index; common_header.fec_set_index = common_header.index;
let chunks = if data.is_empty() { let chunks = if data.is_empty() {
@ -1076,7 +1113,7 @@ pub(super) fn make_shreds_from_data(
// Only the very last shred may have residual data buffer. // Only the very last shred may have residual data buffer.
debug_assert!(shreds.iter().rev().skip(1).all(|shred| { debug_assert!(shreds.iter().rev().skip(1).all(|shred| {
let proof_size = shred.proof_size().unwrap(); let proof_size = shred.proof_size().unwrap();
let capacity = ShredData::capacity(proof_size, chained).unwrap(); let capacity = ShredData::capacity(proof_size, chained, resigned).unwrap();
shred.data().unwrap().len() == capacity shred.data().unwrap().len() == capacity
})); }));
// Adjust flags for the very last shred. // Adjust flags for the very last shred.
@ -1196,6 +1233,7 @@ fn make_erasure_batch(
) -> Result<(/*merkle root:*/ Hash, Vec<Shred>), Error> { ) -> Result<(/*merkle root:*/ Hash, Vec<Shred>), Error> {
let num_data_shreds = shreds.len(); let num_data_shreds = shreds.len();
let chained = chained_merkle_root.is_some(); let chained = chained_merkle_root.is_some();
let resigned = chained && is_last_in_slot;
let erasure_batch_size = shredder::get_erasure_batch_size(num_data_shreds, is_last_in_slot); let erasure_batch_size = shredder::get_erasure_batch_size(num_data_shreds, is_last_in_slot);
let num_coding_shreds = erasure_batch_size - num_data_shreds; let num_coding_shreds = erasure_batch_size - num_data_shreds;
let proof_size = get_proof_size(erasure_batch_size); let proof_size = get_proof_size(erasure_batch_size);
@ -1203,6 +1241,7 @@ fn make_erasure_batch(
== ShredVariant::MerkleData { == ShredVariant::MerkleData {
proof_size, proof_size,
chained, chained,
resigned
})); }));
let mut common_header = match shreds.first() { let mut common_header = match shreds.first() {
None => return Err(Error::from(TooFewShards)), None => return Err(Error::from(TooFewShards)),
@ -1230,6 +1269,7 @@ fn make_erasure_batch(
common_header.shred_variant = ShredVariant::MerkleCode { common_header.shred_variant = ShredVariant::MerkleCode {
proof_size, proof_size,
chained, chained,
resigned,
}; };
let mut coding_header = CodingShredHeader { let mut coding_header = CodingShredHeader {
num_data_shreds: num_data_shreds as u16, num_data_shreds: num_data_shreds as u16,
@ -1300,59 +1340,69 @@ mod test {
}; };
// Total size of a data shred including headers and merkle proof. // Total size of a data shred including headers and merkle proof.
fn shred_data_size_of_payload(proof_size: u8, chained: bool) -> usize { fn shred_data_size_of_payload(proof_size: u8, chained: bool, resigned: bool) -> usize {
assert!(chained || !resigned);
ShredData::SIZE_OF_HEADERS ShredData::SIZE_OF_HEADERS
+ ShredData::capacity(proof_size, chained).unwrap() + ShredData::capacity(proof_size, chained, resigned).unwrap()
+ if chained { SIZE_OF_MERKLE_ROOT } else { 0 } + if chained { SIZE_OF_MERKLE_ROOT } else { 0 }
+ usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY
+ if resigned { SIZE_OF_SIGNATURE } else { 0 }
} }
// Merkle proof is generated and signed after coding shreds are generated. // Merkle proof is generated and signed after coding shreds are generated.
// All payload excluding merkle proof and the signature are erasure coded. // All payload excluding merkle proof and the signature are erasure coded.
// Therefore the data capacity is equal to erasure encoded shard size minus // Therefore the data capacity is equal to erasure encoded shard size minus
// size of erasure encoded header. // size of erasure encoded header.
fn shred_data_capacity(proof_size: u8, chained: bool) -> usize { fn shred_data_capacity(proof_size: u8, chained: bool, resigned: bool) -> usize {
const SIZE_OF_ERASURE_ENCODED_HEADER: usize = const SIZE_OF_ERASURE_ENCODED_HEADER: usize =
ShredData::SIZE_OF_HEADERS - SIZE_OF_SIGNATURE; ShredData::SIZE_OF_HEADERS - SIZE_OF_SIGNATURE;
ShredCode::capacity(proof_size, chained).unwrap() - SIZE_OF_ERASURE_ENCODED_HEADER ShredCode::capacity(proof_size, chained, resigned).unwrap() - SIZE_OF_ERASURE_ENCODED_HEADER
} }
fn shred_data_size_of_erasure_encoded_slice(proof_size: u8, chained: bool) -> usize { fn shred_data_size_of_erasure_encoded_slice(
proof_size: u8,
chained: bool,
resigned: bool,
) -> usize {
ShredData::SIZE_OF_PAYLOAD ShredData::SIZE_OF_PAYLOAD
- SIZE_OF_SIGNATURE - SIZE_OF_SIGNATURE
- if chained { SIZE_OF_MERKLE_ROOT } else { 0 } - if chained { SIZE_OF_MERKLE_ROOT } else { 0 }
- usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY - usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY
- if resigned { SIZE_OF_SIGNATURE } else { 0 }
} }
#[test_case(false)] #[test_case(false, false)]
#[test_case(true)] #[test_case(true, false)]
fn test_shred_data_size_of_payload(chained: bool) { #[test_case(true, true)]
fn test_shred_data_size_of_payload(chained: bool, resigned: bool) {
for proof_size in 0..0x15 { for proof_size in 0..0x15 {
assert_eq!( assert_eq!(
ShredData::SIZE_OF_PAYLOAD, ShredData::SIZE_OF_PAYLOAD,
shred_data_size_of_payload(proof_size, chained) shred_data_size_of_payload(proof_size, chained, resigned)
); );
} }
} }
#[test_case(false)] #[test_case(false, false)]
#[test_case(true)] #[test_case(true, false)]
fn test_shred_data_capacity(chained: bool) { #[test_case(true, true)]
fn test_shred_data_capacity(chained: bool, resigned: bool) {
for proof_size in 0..0x15 { for proof_size in 0..0x15 {
assert_eq!( assert_eq!(
ShredData::capacity(proof_size, chained).unwrap(), ShredData::capacity(proof_size, chained, resigned).unwrap(),
shred_data_capacity(proof_size, chained) shred_data_capacity(proof_size, chained, resigned)
); );
} }
} }
#[test_case(false)] #[test_case(false, false)]
#[test_case(true)] #[test_case(true, false)]
fn test_shred_code_capacity(chained: bool) { #[test_case(true, true)]
fn test_shred_code_capacity(chained: bool, resigned: bool) {
for proof_size in 0..0x15 { for proof_size in 0..0x15 {
assert_eq!( assert_eq!(
ShredCode::capacity(proof_size, chained).unwrap(), ShredCode::capacity(proof_size, chained, resigned).unwrap(),
shred_data_size_of_erasure_encoded_slice(proof_size, chained), shred_data_size_of_erasure_encoded_slice(proof_size, chained, resigned),
); );
} }
} }
@ -1393,13 +1443,16 @@ mod test {
} }
} }
#[test_case(37, false)] #[test_case(37, false, false)]
#[test_case(37, true)] #[test_case(37, true, false)]
#[test_case(64, false)] #[test_case(37, true, true)]
#[test_case(64, true)] #[test_case(64, false, false)]
#[test_case(73, false)] #[test_case(64, true, false)]
#[test_case(73, true)] #[test_case(64, true, true)]
fn test_recover_merkle_shreds(num_shreds: usize, chained: bool) { #[test_case(73, false, false)]
#[test_case(73, true, false)]
#[test_case(73, true, true)]
fn test_recover_merkle_shreds(num_shreds: usize, chained: bool, resigned: bool) {
let mut rng = rand::thread_rng(); let mut rng = rand::thread_rng();
let reed_solomon_cache = ReedSolomonCache::default(); let reed_solomon_cache = ReedSolomonCache::default();
for num_data_shreds in 1..num_shreds { for num_data_shreds in 1..num_shreds {
@ -1407,6 +1460,7 @@ mod test {
run_recover_merkle_shreds( run_recover_merkle_shreds(
&mut rng, &mut rng,
chained, chained,
resigned,
num_data_shreds, num_data_shreds,
num_coding_shreds, num_coding_shreds,
&reed_solomon_cache, &reed_solomon_cache,
@ -1417,6 +1471,7 @@ mod test {
fn run_recover_merkle_shreds<R: Rng + CryptoRng>( fn run_recover_merkle_shreds<R: Rng + CryptoRng>(
rng: &mut R, rng: &mut R,
chained: bool, chained: bool,
resigned: bool,
num_data_shreds: usize, num_data_shreds: usize,
num_coding_shreds: usize, num_coding_shreds: usize,
reed_solomon_cache: &ReedSolomonCache, reed_solomon_cache: &ReedSolomonCache,
@ -1424,12 +1479,13 @@ mod test {
let keypair = Keypair::new(); let keypair = Keypair::new();
let num_shreds = num_data_shreds + num_coding_shreds; let num_shreds = num_data_shreds + num_coding_shreds;
let proof_size = get_proof_size(num_shreds); let proof_size = get_proof_size(num_shreds);
let capacity = ShredData::capacity(proof_size, chained).unwrap(); let capacity = ShredData::capacity(proof_size, chained, resigned).unwrap();
let common_header = ShredCommonHeader { let common_header = ShredCommonHeader {
signature: Signature::default(), signature: Signature::default(),
shred_variant: ShredVariant::MerkleData { shred_variant: ShredVariant::MerkleData {
proof_size, proof_size,
chained, chained,
resigned,
}, },
slot: 145_865_705, slot: 145_865_705,
index: 1835, index: 1835,
@ -1488,6 +1544,7 @@ mod test {
shred_variant: ShredVariant::MerkleCode { shred_variant: ShredVariant::MerkleCode {
proof_size, proof_size,
chained, chained,
resigned,
}, },
index: common_header.index + i as u32 + 7, index: common_header.index + i as u32 + 7,
..common_header ..common_header
@ -1660,6 +1717,7 @@ mod test {
let thread_pool = ThreadPoolBuilder::new().num_threads(2).build().unwrap(); let thread_pool = ThreadPoolBuilder::new().num_threads(2).build().unwrap();
let keypair = Keypair::new(); let keypair = Keypair::new();
let chained_merkle_root = chained.then(|| Hash::new_from_array(rng.gen())); let chained_merkle_root = chained.then(|| Hash::new_from_array(rng.gen()));
let resigned = chained && is_last_in_slot;
let slot = 149_745_689; let slot = 149_745_689;
let parent_slot = slot - rng.gen_range(1..65536); let parent_slot = slot - rng.gen_range(1..65536);
let shred_version = rng.gen(); let shred_version = rng.gen();
@ -1752,6 +1810,7 @@ mod test {
ShredVariant::MerkleCode { ShredVariant::MerkleCode {
proof_size, proof_size,
chained, chained,
resigned
} }
); );
num_coding_shreds += 1; num_coding_shreds += 1;
@ -1763,6 +1822,7 @@ mod test {
ShredVariant::MerkleData { ShredVariant::MerkleData {
proof_size, proof_size,
chained, chained,
resigned
} }
); );
assert!(common_header.fec_set_index <= common_header.index); assert!(common_header.fec_set_index <= common_header.index);

View File

@ -114,11 +114,18 @@ impl ShredData {
// merkle_proof_size is the number of merkle proof entries. // merkle_proof_size is the number of merkle proof entries.
// None indicates a legacy data-shred. // None indicates a legacy data-shred.
pub fn capacity( pub fn capacity(
merkle_variant: Option<(/*proof_size:*/ u8, /*chained:*/ bool)>, merkle_variant: Option<(
u8, // proof_size
bool, // chained
bool, // resigned
)>,
) -> Result<usize, Error> { ) -> Result<usize, Error> {
match merkle_variant { match merkle_variant {
None => Ok(legacy::ShredData::CAPACITY), None => Ok(legacy::ShredData::CAPACITY),
Some((proof_size, chained)) => merkle::ShredData::capacity(proof_size, chained), Some((proof_size, chained, resigned)) => {
debug_assert!(chained || !resigned);
merkle::ShredData::capacity(proof_size, chained, resigned)
}
} }
} }