removes the merkle root from shreds binary (#29427)

https://github.com/solana-labs/solana/pull/29445
makes it unnecessary to embed merkle roots into shreds binary. This
commit removes the merkle root from shreds binary.

This adds 20 bytes to shreds capacity to store more data.
Additionally since we no longer need to truncate the merkle root, the
signature would be on the full 32 bytes of hash as opposed to the
truncated one.

Also signature verification would now effectively verify merkle proof as
well, so we no longer need to verify merkle proof in the sanitize
implementation.
This commit is contained in:
behzad nouri 2023-02-15 21:17:24 +00:00 committed by GitHub
parent 4d289222c9
commit cf0a149add
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 224 additions and 298 deletions

View File

@ -49,7 +49,6 @@
//! So, given a) - c), we must restrict data shred's payload length such that the entire coding //! So, given a) - c), we must restrict data shred's payload length such that the entire coding
//! payload can fit into one coding shred / packet. //! payload can fit into one coding shred / packet.
pub(crate) use self::merkle::{MerkleRoot, SIZE_OF_MERKLE_ROOT};
#[cfg(test)] #[cfg(test)]
pub(crate) use self::shred_code::MAX_CODE_SHREDS_PER_SLOT; pub(crate) use self::shred_code::MAX_CODE_SHREDS_PER_SLOT;
use { use {
@ -193,7 +192,7 @@ pub enum ShredType {
enum ShredVariant { enum ShredVariant {
LegacyCode, // 0b0101_1010 LegacyCode, // 0b0101_1010
LegacyData, // 0b1010_0101 LegacyData, // 0b1010_0101
// proof_size is the number of proof entries in the merkle tree branch. // proof_size is the number of merkle proof entries.
MerkleCode(/*proof_size:*/ u8), // 0b0100_???? MerkleCode(/*proof_size:*/ u8), // 0b0100_????
MerkleData(/*proof_size:*/ u8), // 0b1000_???? MerkleData(/*proof_size:*/ u8), // 0b1000_????
} }
@ -231,16 +230,17 @@ pub enum Shred {
ShredData(ShredData), ShredData(ShredData),
} }
#[derive(Debug, PartialEq, Eq)]
pub(crate) enum SignedData<'a> { pub(crate) enum SignedData<'a> {
Chunk(&'a [u8]), // Chunk of payload past signature. Chunk(&'a [u8]), // Chunk of payload past signature.
MerkleRoot(MerkleRoot), MerkleRoot(Hash),
} }
impl<'a> AsRef<[u8]> for SignedData<'a> { impl<'a> AsRef<[u8]> for SignedData<'a> {
fn as_ref(&self) -> &[u8] { fn as_ref(&self) -> &[u8] {
match self { match self {
Self::Chunk(chunk) => chunk, Self::Chunk(chunk) => chunk,
Self::MerkleRoot(root) => root, Self::MerkleRoot(root) => root.as_ref(),
} }
} }
} }
@ -656,16 +656,17 @@ pub mod layout {
// Returns offsets within the shred payload which is signed. // Returns offsets within the shred payload which is signed.
pub(crate) fn get_signed_data_offsets(shred: &[u8]) -> Option<Range<usize>> { pub(crate) fn get_signed_data_offsets(shred: &[u8]) -> Option<Range<usize>> {
let offsets = match get_shred_variant(shred).ok()? { match get_shred_variant(shred).ok()? {
ShredVariant::LegacyCode | ShredVariant::LegacyData => legacy::SIGNED_MESSAGE_OFFSETS, ShredVariant::LegacyCode | ShredVariant::LegacyData => {
ShredVariant::MerkleCode(proof_size) => { let offsets = self::legacy::SIGNED_MESSAGE_OFFSETS;
merkle::ShredCode::get_signed_data_offsets(proof_size)? (offsets.end <= shred.len()).then_some(offsets)
} }
ShredVariant::MerkleData(proof_size) => { // Merkle shreds sign merkle tree root which can be recovered from
merkle::ShredData::get_signed_data_offsets(proof_size)? // the merkle proof embedded in the payload but itself is not
} // stored the payload.
}; ShredVariant::MerkleCode(_) => None,
(offsets.end <= shred.len()).then_some(offsets) ShredVariant::MerkleData(_) => None,
}
} }
pub fn get_reference_tick(shred: &[u8]) -> Result<u8, Error> { pub fn get_reference_tick(shred: &[u8]) -> Result<u8, Error> {
@ -679,7 +680,7 @@ pub mod layout {
Ok(flags & ShredFlags::SHRED_TICK_REFERENCE_MASK.bits()) Ok(flags & ShredFlags::SHRED_TICK_REFERENCE_MASK.bits())
} }
pub(crate) fn get_merkle_root(shred: &[u8]) -> Option<MerkleRoot> { pub(crate) fn get_merkle_root(shred: &[u8]) -> Option<Hash> {
match get_shred_variant(shred).ok()? { match get_shred_variant(shred).ok()? {
ShredVariant::LegacyCode | ShredVariant::LegacyData => None, ShredVariant::LegacyCode | ShredVariant::LegacyData => None,
ShredVariant::MerkleCode(proof_size) => { ShredVariant::MerkleCode(proof_size) => {
@ -716,15 +717,15 @@ pub mod layout {
modify_packet(rng, packet, 0..SIGNATURE_BYTES); modify_packet(rng, packet, 0..SIGNATURE_BYTES);
} else { } else {
// Corrupt one byte within the signed data offsets. // Corrupt one byte within the signed data offsets.
let size = shred.len(); let offsets = merkle_proof_size
let offsets = get_signed_data_offsets(shred).unwrap(); .map(|merkle_proof_size| {
modify_packet(rng, packet, offsets); // Need to corrupt the merkle proof.
if let Some(proof_size) = merkle_proof_size { // Proof entries are each 20 bytes at the end of shreds.
// Also need to corrupt the merkle proof. let offset = usize::from(merkle_proof_size) * 20;
// Proof entries are each 20 bytes at the end of shreds. shred.len() - offset..shred.len()
let offset = usize::from(proof_size) * 20; })
modify_packet(rng, packet, size - offset..size); .or_else(|| get_signed_data_offsets(shred));
} modify_packet(rng, packet, offsets.unwrap());
} }
// Assert that the signature no longer verifies. // Assert that the signature no longer verifies.
let shred = get_shred(packet).unwrap(); let shred = get_shred(packet).unwrap();
@ -734,8 +735,9 @@ pub mod layout {
let pubkey = keypairs[&slot].pubkey(); let pubkey = keypairs[&slot].pubkey();
let data = get_signed_data(shred).unwrap(); let data = get_signed_data(shred).unwrap();
assert!(!signature.verify(pubkey.as_ref(), data.as_ref())); assert!(!signature.verify(pubkey.as_ref(), data.as_ref()));
let offsets = get_signed_data_offsets(shred).unwrap(); if let Some(offsets) = get_signed_data_offsets(shred) {
assert!(!signature.verify(pubkey.as_ref(), &shred[offsets])); assert!(!signature.verify(pubkey.as_ref(), &shred[offsets]));
}
} else { } else {
// Slot may have been corrupted and no longer mapping to a keypair. // Slot may have been corrupted and no longer mapping to a keypair.
let pubkey = keypairs.get(&slot).map(Keypair::pubkey).unwrap_or_default(); let pubkey = keypairs.get(&slot).map(Keypair::pubkey).unwrap_or_default();

View File

@ -35,8 +35,6 @@ use {
}, },
}; };
const_assert_eq!(SIZE_OF_MERKLE_ROOT, 20);
pub(crate) const SIZE_OF_MERKLE_ROOT: usize = std::mem::size_of::<MerkleRoot>();
const_assert_eq!(SIZE_OF_MERKLE_PROOF_ENTRY, 20); const_assert_eq!(SIZE_OF_MERKLE_PROOF_ENTRY, 20);
const SIZE_OF_MERKLE_PROOF_ENTRY: usize = std::mem::size_of::<MerkleProofEntry>(); const SIZE_OF_MERKLE_PROOF_ENTRY: usize = std::mem::size_of::<MerkleProofEntry>();
const_assert_eq!(ShredData::SIZE_OF_PAYLOAD, 1203); const_assert_eq!(ShredData::SIZE_OF_PAYLOAD, 1203);
@ -46,11 +44,10 @@ const_assert_eq!(ShredData::SIZE_OF_PAYLOAD, 1203);
const MERKLE_HASH_PREFIX_LEAF: &[u8] = &[0x00]; const MERKLE_HASH_PREFIX_LEAF: &[u8] = &[0x00];
const MERKLE_HASH_PREFIX_NODE: &[u8] = &[0x01]; const MERKLE_HASH_PREFIX_NODE: &[u8] = &[0x01];
pub(crate) type MerkleRoot = MerkleProofEntry;
type MerkleProofEntry = [u8; 20]; type MerkleProofEntry = [u8; 20];
// Layout: {common, data} headers | data buffer | merkle branch // Layout: {common, data} headers | data buffer | merkle proof
// The slice past signature and before merkle branch is erasure coded. // The slice past signature and before the merkle proof is erasure coded.
// Same slice is hashed to generate merkle tree. // Same slice is hashed to generate merkle tree.
// The root of merkle tree is signed. // The root of merkle tree is signed.
#[derive(Clone, Debug, Eq, PartialEq)] #[derive(Clone, Debug, Eq, PartialEq)]
@ -60,8 +57,8 @@ pub struct ShredData {
payload: Vec<u8>, payload: Vec<u8>,
} }
// Layout: {common, coding} headers | erasure coded shard | merkle branch // Layout: {common, coding} headers | erasure coded shard | merkle proof
// The slice past signature and before merkle branch is hashed to generate // The slice past signature and before the merkle proof is hashed to generate
// merkle tree. The root of merkle tree is signed. // merkle tree. The root of merkle tree is signed.
#[derive(Clone, Debug, Eq, PartialEq)] #[derive(Clone, Debug, Eq, PartialEq)]
pub struct ShredCode { pub struct ShredCode {
@ -76,22 +73,23 @@ pub(super) enum Shred {
ShredData(ShredData), ShredData(ShredData),
} }
struct MerkleBranch<'a> {
root: &'a MerkleRoot,
proof: Vec<&'a MerkleProofEntry>,
}
impl Shred { impl Shred {
dispatch!(fn common_header(&self) -> &ShredCommonHeader); dispatch!(fn common_header(&self) -> &ShredCommonHeader);
dispatch!(fn erasure_shard_as_slice(&self) -> Result<&[u8], Error>); dispatch!(fn erasure_shard_as_slice(&self) -> Result<&[u8], Error>);
dispatch!(fn erasure_shard_index(&self) -> Result<usize, Error>); dispatch!(fn erasure_shard_index(&self) -> Result<usize, Error>);
dispatch!(fn merkle_root(&self) -> Result<&MerkleRoot, Error>); dispatch!(fn merkle_node(&self) -> Result<Hash, Error>);
dispatch!(fn merkle_tree_node(&self) -> Result<Hash, Error>);
dispatch!(fn payload(&self) -> &Vec<u8>); dispatch!(fn payload(&self) -> &Vec<u8>);
dispatch!(fn sanitize(&self, verify_merkle_proof: bool) -> Result<(), Error>); dispatch!(fn sanitize(&self) -> Result<(), Error>);
dispatch!(fn set_merkle_branch(&mut self, merkle_branch: &MerkleBranch) -> Result<(), Error>); dispatch!(fn set_merkle_proof(&mut self, proof: &[&MerkleProofEntry]) -> Result<(), Error>);
dispatch!(fn set_signature(&mut self, signature: Signature)); dispatch!(fn set_signature(&mut self, signature: Signature));
dispatch!(fn signed_data(&self) -> Result<MerkleRoot, Error>); dispatch!(fn signed_data(&self) -> Result<Hash, Error>);
fn merkle_proof(&self) -> Result<impl Iterator<Item = &MerkleProofEntry>, Error> {
match self {
Self::ShredCode(shred) => shred.merkle_proof().map(Either::Left),
Self::ShredData(shred) => shred.merkle_proof().map(Either::Right),
}
}
#[must_use] #[must_use]
fn verify(&self, pubkey: &Pubkey) -> bool { fn verify(&self, pubkey: &Pubkey) -> bool {
@ -116,6 +114,8 @@ impl Shred {
#[cfg(test)] #[cfg(test)]
impl Shred { impl Shred {
dispatch!(fn merkle_root(&self) -> Result<Hash, Error>);
fn index(&self) -> u32 { fn index(&self) -> u32 {
self.common_header().index self.common_header().index
} }
@ -126,7 +126,7 @@ impl Shred {
} }
impl ShredData { impl ShredData {
// proof_size is the number of proof entries in the merkle tree branch. // proof_size is the number of merkle proof entries.
fn proof_size(&self) -> Result<u8, Error> { fn proof_size(&self) -> Result<u8, Error> {
match self.common_header.shred_variant { match self.common_header.shred_variant {
ShredVariant::MerkleData(proof_size) => Ok(proof_size), ShredVariant::MerkleData(proof_size) => Ok(proof_size),
@ -142,53 +142,35 @@ impl ShredData {
pub(super) fn capacity(proof_size: u8) -> Result<usize, Error> { pub(super) fn capacity(proof_size: u8) -> Result<usize, Error> {
Self::SIZE_OF_PAYLOAD Self::SIZE_OF_PAYLOAD
.checked_sub( .checked_sub(
Self::SIZE_OF_HEADERS Self::SIZE_OF_HEADERS + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY,
+ SIZE_OF_MERKLE_ROOT
+ usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY,
) )
.ok_or(Error::InvalidProofSize(proof_size)) .ok_or(Error::InvalidProofSize(proof_size))
} }
pub(super) fn get_signed_data_offsets(proof_size: u8) -> Option<Range<usize>> { // Where the merkle proof starts in the shred binary.
let data_buffer_size = Self::capacity(proof_size).ok()?; fn proof_offset(proof_size: u8) -> Result<usize, Error> {
let offset = Self::SIZE_OF_HEADERS + data_buffer_size; Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?)
Some(offset..offset + SIZE_OF_MERKLE_ROOT)
} }
fn merkle_root(&self) -> Result<&MerkleRoot, Error> { fn merkle_root(&self) -> Result<Hash, Error> {
let proof_size = self.proof_size()?; let proof_size = self.proof_size()?;
let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?;
let root = self
.payload
.get(offset..offset + SIZE_OF_MERKLE_ROOT)
.ok_or(Error::InvalidPayloadSize(self.payload.len()))?;
Ok(<&MerkleRoot>::try_from(root).unwrap())
}
fn merkle_branch(&self) -> Result<MerkleBranch, Error> {
let proof_size = self.proof_size()?;
let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?;
let size = SIZE_OF_MERKLE_ROOT + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY;
let merkle_branch = MerkleBranch::try_from(
self.payload
.get(offset..offset + size)
.ok_or(Error::InvalidPayloadSize(self.payload.len()))?,
)?;
if merkle_branch.proof.len() != usize::from(proof_size) {
return Err(Error::InvalidMerkleProof);
}
Ok(merkle_branch)
}
fn merkle_tree_node(&self) -> Result<Hash, Error> {
let chunk = self.erasure_shard_as_slice()?;
Ok(hashv(&[MERKLE_HASH_PREFIX_LEAF, chunk]))
}
fn verify_merkle_proof(&self) -> Result<bool, Error> {
let node = self.merkle_tree_node()?;
let index = self.erasure_shard_index()?; let index = self.erasure_shard_index()?;
Ok(verify_merkle_proof(index, node, &self.merkle_branch()?)) let proof_offset = Self::proof_offset(proof_size)?;
let proof = get_merkle_proof(&self.payload, proof_offset, proof_size)?;
let node = get_merkle_node(&self.payload, SIZE_OF_SIGNATURE..proof_offset)?;
get_merkle_root(index, node, proof)
}
fn merkle_proof(&self) -> Result<impl Iterator<Item = &MerkleProofEntry>, Error> {
let proof_size = self.proof_size()?;
let proof_offset = Self::proof_offset(proof_size)?;
get_merkle_proof(&self.payload, proof_offset, proof_size)
}
fn merkle_node(&self) -> Result<Hash, Error> {
let proof_size = self.proof_size()?;
let proof_offset = Self::proof_offset(proof_size)?;
get_merkle_node(&self.payload, SIZE_OF_SIGNATURE..proof_offset)
} }
fn from_recovered_shard(signature: &Signature, mut shard: Vec<u8>) -> Result<Self, Error> { fn from_recovered_shard(signature: &Signature, mut shard: Vec<u8>) -> Result<Self, Error> {
@ -215,41 +197,28 @@ impl ShredData {
data_header, data_header,
payload: shard, payload: shard,
}; };
// Merkle proof is not erasure coded and is not yet available here. shred.sanitize()?;
shred.sanitize(/*verify_merkle_proof:*/ false)?;
Ok(shred) Ok(shred)
} }
fn set_merkle_branch(&mut self, merkle_branch: &MerkleBranch) -> Result<(), Error> { fn set_merkle_proof(&mut self, proof: &[&MerkleProofEntry]) -> Result<(), Error> {
let proof_size = self.proof_size()?; let proof_size = self.proof_size()?;
if merkle_branch.proof.len() != usize::from(proof_size) { if proof.len() != usize::from(proof_size) {
return Err(Error::InvalidMerkleProof); return Err(Error::InvalidMerkleProof);
} }
let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?; let proof_offset = Self::proof_offset(proof_size)?;
let mut cursor = Cursor::new( let mut cursor = Cursor::new(
self.payload self.payload
.get_mut(offset..) .get_mut(proof_offset..)
.ok_or(Error::InvalidProofSize(proof_size))?, .ok_or(Error::InvalidProofSize(proof_size))?,
); );
bincode::serialize_into(&mut cursor, &merkle_branch.root)?; for entry in proof {
for entry in &merkle_branch.proof {
bincode::serialize_into(&mut cursor, entry)?; bincode::serialize_into(&mut cursor, entry)?;
} }
Ok(()) Ok(())
} }
fn sanitize(&self, verify_merkle_proof: bool) -> Result<(), Error> { pub(super) fn get_merkle_root(shred: &[u8], proof_size: u8) -> Option<Hash> {
let shred_variant = self.common_header.shred_variant;
if !matches!(shred_variant, ShredVariant::MerkleData(_)) {
return Err(Error::InvalidShredVariant);
}
if verify_merkle_proof && !self.verify_merkle_proof()? {
return Err(Error::InvalidMerkleProof);
}
shred_data::sanitize(self)
}
pub(super) fn get_merkle_root(shred: &[u8], proof_size: u8) -> Option<MerkleRoot> {
debug_assert_eq!( debug_assert_eq!(
shred::layout::get_shred_variant(shred).unwrap(), shred::layout::get_shred_variant(shred).unwrap(),
ShredVariant::MerkleData(proof_size) ShredVariant::MerkleData(proof_size)
@ -264,14 +233,15 @@ impl ShredData {
.map(usize::try_from)? .map(usize::try_from)?
.ok()? .ok()?
}; };
// Where the merkle branch starts in the shred binary. let proof_offset = Self::proof_offset(proof_size).ok()?;
let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size).ok()?; let proof = get_merkle_proof(shred, proof_offset, proof_size).ok()?;
get_merkle_root(shred, proof_size, index, offset) let node = get_merkle_node(shred, SIZE_OF_SIGNATURE..proof_offset).ok()?;
get_merkle_root(index, node, proof).ok()
} }
} }
impl ShredCode { impl ShredCode {
// proof_size is the number of proof entries in the merkle tree branch. // proof_size is the number of merkle proof entries.
fn proof_size(&self) -> Result<u8, Error> { fn proof_size(&self) -> Result<u8, Error> {
match self.common_header.shred_variant { match self.common_header.shred_variant {
ShredVariant::MerkleCode(proof_size) => Ok(proof_size), ShredVariant::MerkleCode(proof_size) => Ok(proof_size),
@ -281,61 +251,39 @@ impl ShredCode {
// Size of buffer embedding erasure codes. // Size of buffer embedding erasure codes.
fn capacity(proof_size: u8) -> Result<usize, Error> { fn capacity(proof_size: u8) -> Result<usize, Error> {
// Merkle branch is generated and signed after coding shreds are // Merkle proof is generated and signed after coding shreds are
// generated. Coding shred headers cannot be erasure coded either. // generated. Coding shred headers cannot be erasure coded either.
Self::SIZE_OF_PAYLOAD Self::SIZE_OF_PAYLOAD
.checked_sub( .checked_sub(
Self::SIZE_OF_HEADERS Self::SIZE_OF_HEADERS + SIZE_OF_MERKLE_PROOF_ENTRY * usize::from(proof_size),
+ SIZE_OF_MERKLE_ROOT
+ SIZE_OF_MERKLE_PROOF_ENTRY * usize::from(proof_size),
) )
.ok_or(Error::InvalidProofSize(proof_size)) .ok_or(Error::InvalidProofSize(proof_size))
} }
fn merkle_root(&self) -> Result<&MerkleRoot, Error> { // Where the merkle proof starts in the shred binary.
let proof_size = self.proof_size()?; fn proof_offset(proof_size: u8) -> Result<usize, Error> {
let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?; Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?)
let root = self
.payload
.get(offset..offset + SIZE_OF_MERKLE_ROOT)
.ok_or(Error::InvalidPayloadSize(self.payload.len()))?;
Ok(<&MerkleRoot>::try_from(root).unwrap())
} }
fn merkle_branch(&self) -> Result<MerkleBranch, Error> { fn merkle_root(&self) -> Result<Hash, Error> {
let proof_size = self.proof_size()?; let proof_size = self.proof_size()?;
let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?;
let size = SIZE_OF_MERKLE_ROOT + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY;
let merkle_branch = MerkleBranch::try_from(
self.payload
.get(offset..offset + size)
.ok_or(Error::InvalidPayloadSize(self.payload.len()))?,
)?;
if merkle_branch.proof.len() != usize::from(proof_size) {
return Err(Error::InvalidMerkleProof);
}
Ok(merkle_branch)
}
fn merkle_tree_node(&self) -> Result<Hash, Error> {
let proof_size = self.proof_size()?;
let shard_size = Self::capacity(proof_size)?;
let chunk = self
.payload
.get(SIZE_OF_SIGNATURE..Self::SIZE_OF_HEADERS + shard_size)
.ok_or(Error::InvalidPayloadSize(self.payload.len()))?;
Ok(hashv(&[MERKLE_HASH_PREFIX_LEAF, chunk]))
}
fn verify_merkle_proof(&self) -> Result<bool, Error> {
let node = self.merkle_tree_node()?;
let index = self.erasure_shard_index()?; let index = self.erasure_shard_index()?;
Ok(verify_merkle_proof(index, node, &self.merkle_branch()?)) let proof_offset = Self::proof_offset(proof_size)?;
let proof = get_merkle_proof(&self.payload, proof_offset, proof_size)?;
let node = get_merkle_node(&self.payload, SIZE_OF_SIGNATURE..proof_offset)?;
get_merkle_root(index, node, proof)
} }
pub(super) fn get_signed_data_offsets(proof_size: u8) -> Option<Range<usize>> { fn merkle_proof(&self) -> Result<impl Iterator<Item = &MerkleProofEntry>, Error> {
let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size).ok()?; let proof_size = self.proof_size()?;
Some(offset..offset + SIZE_OF_MERKLE_ROOT) let proof_offset = Self::proof_offset(proof_size)?;
get_merkle_proof(&self.payload, proof_offset, proof_size)
}
fn merkle_node(&self) -> Result<Hash, Error> {
let proof_size = self.proof_size()?;
let proof_offset = Self::proof_offset(proof_size)?;
get_merkle_node(&self.payload, SIZE_OF_SIGNATURE..proof_offset)
} }
fn from_recovered_shard( fn from_recovered_shard(
@ -364,41 +312,28 @@ impl ShredCode {
coding_header, coding_header,
payload: shard, payload: shard,
}; };
// Merkle proof is not erasure coded and is not yet available here. shred.sanitize()?;
shred.sanitize(/*verify_merkle_proof:*/ false)?;
Ok(shred) Ok(shred)
} }
fn set_merkle_branch(&mut self, merkle_branch: &MerkleBranch) -> Result<(), Error> { fn set_merkle_proof(&mut self, proof: &[&MerkleProofEntry]) -> Result<(), Error> {
let proof_size = self.proof_size()?; let proof_size = self.proof_size()?;
if merkle_branch.proof.len() != usize::from(proof_size) { if proof.len() != usize::from(proof_size) {
return Err(Error::InvalidMerkleProof); return Err(Error::InvalidMerkleProof);
} }
let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?; let proof_offset = Self::proof_offset(proof_size)?;
let mut cursor = Cursor::new( let mut cursor = Cursor::new(
self.payload self.payload
.get_mut(offset..) .get_mut(proof_offset..)
.ok_or(Error::InvalidProofSize(proof_size))?, .ok_or(Error::InvalidProofSize(proof_size))?,
); );
bincode::serialize_into(&mut cursor, &merkle_branch.root)?; for entry in proof {
for entry in &merkle_branch.proof {
bincode::serialize_into(&mut cursor, entry)?; bincode::serialize_into(&mut cursor, entry)?;
} }
Ok(()) Ok(())
} }
fn sanitize(&self, verify_merkle_proof: bool) -> Result<(), Error> { pub(super) fn get_merkle_root(shred: &[u8], proof_size: u8) -> Option<Hash> {
let shred_variant = self.common_header.shred_variant;
if !matches!(shred_variant, ShredVariant::MerkleCode(_)) {
return Err(Error::InvalidShredVariant);
}
if verify_merkle_proof && !self.verify_merkle_proof()? {
return Err(Error::InvalidMerkleProof);
}
shred_code::sanitize(self)
}
pub(super) fn get_merkle_root(shred: &[u8], proof_size: u8) -> Option<MerkleRoot> {
debug_assert_eq!( debug_assert_eq!(
shred::layout::get_shred_variant(shred).unwrap(), shred::layout::get_shred_variant(shred).unwrap(),
ShredVariant::MerkleCode(proof_size) ShredVariant::MerkleCode(proof_size)
@ -415,21 +350,21 @@ impl ShredCode {
.ok()?; .ok()?;
num_data_shreds.checked_add(position)? num_data_shreds.checked_add(position)?
}; };
// Where the merkle branch starts in the shred binary. let proof_offset = Self::proof_offset(proof_size).ok()?;
let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size).ok()?; let proof = get_merkle_proof(shred, proof_offset, proof_size).ok()?;
get_merkle_root(shred, proof_size, index, offset) let node = get_merkle_node(shred, SIZE_OF_SIGNATURE..proof_offset).ok()?;
get_merkle_root(index, node, proof).ok()
} }
} }
impl<'a> ShredTrait<'a> for ShredData { impl<'a> ShredTrait<'a> for ShredData {
type SignedData = MerkleRoot; type SignedData = Hash;
impl_shred_common!(); impl_shred_common!();
// Also equal to: // Also equal to:
// ShredData::SIZE_OF_HEADERS // ShredData::SIZE_OF_HEADERS
// + ShredData::capacity(proof_size).unwrap() // + ShredData::capacity(proof_size).unwrap()
// + SIZE_OF_MERKLE_ROOT
// + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY // + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY
const SIZE_OF_PAYLOAD: usize = const SIZE_OF_PAYLOAD: usize =
ShredCode::SIZE_OF_PAYLOAD - ShredCode::SIZE_OF_HEADERS + SIZE_OF_SIGNATURE; ShredCode::SIZE_OF_PAYLOAD - ShredCode::SIZE_OF_HEADERS + SIZE_OF_SIGNATURE;
@ -452,7 +387,7 @@ impl<'a> ShredTrait<'a> for ShredData {
data_header, data_header,
payload, payload,
}; };
shred.sanitize(/*verify_merkle_proof:*/ true)?; shred.sanitize()?;
Ok(shred) Ok(shred)
} }
@ -468,9 +403,9 @@ impl<'a> ShredTrait<'a> for ShredData {
return Err(Error::InvalidPayloadSize(self.payload.len())); return Err(Error::InvalidPayloadSize(self.payload.len()));
} }
let proof_size = self.proof_size()?; let proof_size = self.proof_size()?;
let data_buffer_size = Self::capacity(proof_size)?; let proof_offset = Self::proof_offset(proof_size)?;
let mut shard = self.payload; let mut shard = self.payload;
shard.truncate(Self::SIZE_OF_HEADERS + data_buffer_size); shard.truncate(proof_offset);
shard.drain(0..SIZE_OF_SIGNATURE); shard.drain(0..SIZE_OF_SIGNATURE);
Ok(shard) Ok(shard)
} }
@ -480,23 +415,28 @@ impl<'a> ShredTrait<'a> for ShredData {
return Err(Error::InvalidPayloadSize(self.payload.len())); return Err(Error::InvalidPayloadSize(self.payload.len()));
} }
let proof_size = self.proof_size()?; let proof_size = self.proof_size()?;
let data_buffer_size = Self::capacity(proof_size)?; let proof_offset = Self::proof_offset(proof_size)?;
self.payload self.payload
.get(SIZE_OF_SIGNATURE..Self::SIZE_OF_HEADERS + data_buffer_size) .get(SIZE_OF_SIGNATURE..proof_offset)
.ok_or(Error::InvalidPayloadSize(self.payload.len())) .ok_or(Error::InvalidPayloadSize(self.payload.len()))
} }
fn sanitize(&self) -> Result<(), Error> { fn sanitize(&self) -> Result<(), Error> {
self.sanitize(/*verify_merkle_proof:*/ true) let shred_variant = self.common_header.shred_variant;
if !matches!(shred_variant, ShredVariant::MerkleData(_)) {
return Err(Error::InvalidShredVariant);
}
let _ = self.merkle_proof()?;
shred_data::sanitize(self)
} }
fn signed_data(&'a self) -> Result<Self::SignedData, Error> { fn signed_data(&'a self) -> Result<Self::SignedData, Error> {
self.merkle_root().copied() self.merkle_root()
} }
} }
impl<'a> ShredTrait<'a> for ShredCode { impl<'a> ShredTrait<'a> for ShredCode {
type SignedData = MerkleRoot; type SignedData = Hash;
impl_shred_common!(); impl_shred_common!();
const SIZE_OF_PAYLOAD: usize = shred_code::ShredCode::SIZE_OF_PAYLOAD; const SIZE_OF_PAYLOAD: usize = shred_code::ShredCode::SIZE_OF_PAYLOAD;
@ -519,7 +459,7 @@ impl<'a> ShredTrait<'a> for ShredCode {
coding_header, coding_header,
payload, payload,
}; };
shred.sanitize(/*verify_merkle_proof:*/ true)?; shred.sanitize()?;
Ok(shred) Ok(shred)
} }
@ -535,10 +475,10 @@ impl<'a> ShredTrait<'a> for ShredCode {
return Err(Error::InvalidPayloadSize(self.payload.len())); return Err(Error::InvalidPayloadSize(self.payload.len()));
} }
let proof_size = self.proof_size()?; let proof_size = self.proof_size()?;
let shard_size = Self::capacity(proof_size)?; let proof_offset = Self::proof_offset(proof_size)?;
let mut shard = self.payload; let mut shard = self.payload;
shard.truncate(proof_offset);
shard.drain(..Self::SIZE_OF_HEADERS); shard.drain(..Self::SIZE_OF_HEADERS);
shard.truncate(shard_size);
Ok(shard) Ok(shard)
} }
@ -547,18 +487,23 @@ impl<'a> ShredTrait<'a> for ShredCode {
return Err(Error::InvalidPayloadSize(self.payload.len())); return Err(Error::InvalidPayloadSize(self.payload.len()));
} }
let proof_size = self.proof_size()?; let proof_size = self.proof_size()?;
let shard_size = Self::capacity(proof_size)?; let proof_offset = Self::proof_offset(proof_size)?;
self.payload self.payload
.get(Self::SIZE_OF_HEADERS..Self::SIZE_OF_HEADERS + shard_size) .get(Self::SIZE_OF_HEADERS..proof_offset)
.ok_or(Error::InvalidPayloadSize(self.payload.len())) .ok_or(Error::InvalidPayloadSize(self.payload.len()))
} }
fn sanitize(&self) -> Result<(), Error> { fn sanitize(&self) -> Result<(), Error> {
self.sanitize(/*verify_merkle_proof:*/ true) let shred_variant = self.common_header.shred_variant;
if !matches!(shred_variant, ShredVariant::MerkleCode(_)) {
return Err(Error::InvalidShredVariant);
}
let _ = self.merkle_proof()?;
shred_code::sanitize(self)
} }
fn signed_data(&'a self) -> Result<Self::SignedData, Error> { fn signed_data(&'a self) -> Result<Self::SignedData, Error> {
self.merkle_root().copied() self.merkle_root()
} }
} }
@ -592,23 +537,6 @@ impl ShredCodeTrait for ShredCode {
} }
} }
impl<'a> TryFrom<&'a [u8]> for MerkleBranch<'a> {
type Error = Error;
fn try_from(merkle_branch: &'a [u8]) -> Result<Self, Self::Error> {
if merkle_branch.len() < SIZE_OF_MERKLE_ROOT {
return Err(Error::InvalidMerkleProof);
}
let (root, proof) = merkle_branch.split_at(SIZE_OF_MERKLE_ROOT);
let root = <&MerkleRoot>::try_from(root).unwrap();
let proof = proof
.chunks(SIZE_OF_MERKLE_PROOF_ENTRY)
.map(<&MerkleProofEntry>::try_from)
.collect::<Result<_, _>>()
.map_err(|_| Error::InvalidMerkleProof)?;
Ok(Self { root, proof })
}
}
// Obtains parent's hash by joining two sibiling nodes in merkle tree. // Obtains parent's hash by joining two sibiling nodes in merkle tree.
fn join_nodes<S: AsRef<[u8]>, T: AsRef<[u8]>>(node: S, other: T) -> Hash { fn join_nodes<S: AsRef<[u8]>, T: AsRef<[u8]>>(node: S, other: T) -> Hash {
let node = &node.as_ref()[..SIZE_OF_MERKLE_PROOF_ENTRY]; let node = &node.as_ref()[..SIZE_OF_MERKLE_PROOF_ENTRY];
@ -616,15 +544,9 @@ fn join_nodes<S: AsRef<[u8]>, T: AsRef<[u8]>>(node: S, other: T) -> Hash {
hashv(&[MERKLE_HASH_PREFIX_NODE, node, other]) hashv(&[MERKLE_HASH_PREFIX_NODE, node, other])
} }
fn verify_merkle_proof(index: usize, node: Hash, merkle_branch: &MerkleBranch) -> bool {
let proof = merkle_branch.proof.iter().copied();
let root = fold_merkle_proof(index, node, proof);
root.as_ref() == Some(merkle_branch.root)
}
// Recovers root of the merkle tree from a leaf node // Recovers root of the merkle tree from a leaf node
// at the given index and the respective proof. // at the given index and the respective proof.
fn fold_merkle_proof<'a, I>(index: usize, node: Hash, proof: I) -> Option<MerkleRoot> fn get_merkle_root<'a, I>(index: usize, node: Hash, proof: I) -> Result<Hash, Error>
where where
I: IntoIterator<Item = &'a MerkleProofEntry>, I: IntoIterator<Item = &'a MerkleProofEntry>,
{ {
@ -638,29 +560,30 @@ where
}; };
(index >> 1, parent) (index >> 1, parent)
}); });
(index == 0).then(|| { (index == 0)
let root = &root.as_ref()[..SIZE_OF_MERKLE_ROOT]; .then_some(root)
MerkleRoot::try_from(root).ok() .ok_or(Error::InvalidMerkleProof)
})?
} }
fn get_merkle_root( fn get_merkle_proof(
shred: &[u8], shred: &[u8],
proof_size: u8, proof_offset: usize, // Where the merkle proof starts.
index: usize, // Shred index in the erasure batch. proof_size: u8, // Number of proof entries.
offset: usize, // Where the merkle branch starts in the shred binary. ) -> Result<impl Iterator<Item = &MerkleProofEntry>, Error> {
) -> Option<MerkleRoot> { let proof_size = usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY;
let node = shred.get(SIZE_OF_SIGNATURE..offset)?; Ok(shred
let node = hashv(&[MERKLE_HASH_PREFIX_LEAF, node]); .get(proof_offset..proof_offset + proof_size)
// Merkle proof embedded in the payload. .ok_or(Error::InvalidPayloadSize(shred.len()))?
let offset = offset + SIZE_OF_MERKLE_ROOT;
let size = usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY;
let proof = shred
.get(offset..offset + size)?
.chunks(SIZE_OF_MERKLE_PROOF_ENTRY) .chunks(SIZE_OF_MERKLE_PROOF_ENTRY)
.map(<&MerkleProofEntry>::try_from) .map(<&MerkleProofEntry>::try_from)
.map(Result::unwrap); .map(Result::unwrap))
fold_merkle_proof(index, node, proof) }
fn get_merkle_node(shred: &[u8], offsets: Range<usize>) -> Result<Hash, Error> {
let node = shred
.get(offsets)
.ok_or(Error::InvalidPayloadSize(shred.len()))?;
Ok(hashv(&[MERKLE_HASH_PREFIX_LEAF, node]))
} }
fn make_merkle_tree(mut nodes: Vec<Hash>) -> Vec<Hash> { fn make_merkle_tree(mut nodes: Vec<Hash>) -> Vec<Hash> {
@ -678,11 +601,11 @@ fn make_merkle_tree(mut nodes: Vec<Hash>) -> Vec<Hash> {
nodes nodes
} }
fn make_merkle_branch( fn make_merkle_proof(
mut index: usize, // leaf index ~ shred's erasure shard index. mut index: usize, // leaf index ~ shred's erasure shard index.
mut size: usize, // number of leaves ~ erasure batch size. mut size: usize, // number of leaves ~ erasure batch size.
tree: &[Hash], tree: &[Hash],
) -> Option<MerkleBranch> { ) -> Option<Vec<&MerkleProofEntry>> {
if index >= size { if index >= size {
return None; return None;
} }
@ -696,12 +619,7 @@ fn make_merkle_branch(
size = (size + 1) >> 1; size = (size + 1) >> 1;
index >>= 1; index >>= 1;
} }
if offset + 1 != tree.len() { (offset + 1 == tree.len()).then_some(proof)
return None;
}
let root = &tree.last()?.as_ref()[..SIZE_OF_MERKLE_ROOT];
let root = <&MerkleRoot>::try_from(root).unwrap();
Some(MerkleBranch { root, proof })
} }
pub(super) fn recover( pub(super) fn recover(
@ -828,28 +746,25 @@ pub(super) fn recover(
} }
}) })
.collect::<Result<_, Error>>()?; .collect::<Result<_, Error>>()?;
// Compute merkle tree and set the merkle branch on the recovered shreds. // Compute merkle tree and set the merkle proof on the recovered shreds.
let nodes: Vec<_> = shreds let nodes: Vec<_> = shreds
.iter() .iter()
.map(Shred::merkle_tree_node) .map(Shred::merkle_node)
.collect::<Result<_, _>>()?; .collect::<Result<_, _>>()?;
let tree = make_merkle_tree(nodes); let tree = make_merkle_tree(nodes);
let merkle_root = &tree.last().unwrap().as_ref()[..SIZE_OF_MERKLE_ROOT];
let merkle_root = MerkleRoot::try_from(merkle_root).unwrap();
for (index, (shred, mask)) in shreds.iter_mut().zip(&mask).enumerate() { for (index, (shred, mask)) in shreds.iter_mut().zip(&mask).enumerate() {
let proof = make_merkle_proof(index, num_shards, &tree).ok_or(Error::InvalidMerkleProof)?;
if proof.len() != usize::from(proof_size) {
return Err(Error::InvalidMerkleProof);
}
if *mask { if *mask {
if shred.merkle_root()? != &merkle_root { if shred.merkle_proof()?.ne(proof) {
return Err(Error::InvalidMerkleProof); return Err(Error::InvalidMerkleProof);
} }
} else { } else {
let merkle_branch = shred.set_merkle_proof(&proof)?;
make_merkle_branch(index, num_shards, &tree).ok_or(Error::InvalidMerkleProof)?;
if merkle_branch.proof.len() != usize::from(proof_size) {
return Err(Error::InvalidMerkleProof);
}
shred.set_merkle_branch(&merkle_branch)?;
// Already sanitized in Shred{Code,Data}::from_recovered_shard. // Already sanitized in Shred{Code,Data}::from_recovered_shard.
debug_assert_matches!(shred.sanitize(/*verify_merkle_proof:*/ true), Ok(())); debug_assert_matches!(shred.sanitize(), Ok(()));
// Assert that shred payload is fully populated. // Assert that shred payload is fully populated.
debug_assert_eq!(shred, { debug_assert_eq!(shred, {
let shred = shred.payload().clone(); let shred = shred.payload().clone();
@ -865,7 +780,7 @@ pub(super) fn recover(
.collect()) .collect())
} }
// Maps number of (code + data) shreds to MerkleBranch.proof.len(). // Maps number of (code + data) shreds to merkle_proof.len().
fn get_proof_size(num_shreds: usize) -> u8 { fn get_proof_size(num_shreds: usize) -> u8 {
let bits = usize::BITS - num_shreds.leading_zeros(); let bits = usize::BITS - num_shreds.leading_zeros();
let proof_size = if num_shreds.is_power_of_two() { let proof_size = if num_shreds.is_power_of_two() {
@ -1026,7 +941,7 @@ pub(super) fn make_shreds_from_data(
out out
}) })
.collect(); .collect();
// Generate coding shreds, populate merkle branch // Generate coding shreds, populate merkle proof
// for all shreds and attach signature. // for all shreds and attach signature.
let shreds: Result<Vec<_>, Error> = if shreds.len() <= 1 { let shreds: Result<Vec<_>, Error> = if shreds.len() <= 1 {
shreds shreds
@ -1051,7 +966,7 @@ pub(super) fn make_shreds_from_data(
shreds shreds
} }
// Generates coding shreds from data shreds, populates merke branch for all // Generates coding shreds from data shreds, populates merke proof for all
// shreds and attaches signature. // shreds and attaches signature.
fn make_erasure_batch( fn make_erasure_batch(
keypair: &Keypair, keypair: &Keypair,
@ -1109,24 +1024,23 @@ fn make_erasure_batch(
let tree = make_merkle_tree( let tree = make_merkle_tree(
shreds shreds
.iter() .iter()
.map(Shred::merkle_tree_node) .map(Shred::merkle_node)
.collect::<Result<_, _>>()?, .collect::<Result<_, _>>()?,
); );
// Sign root of Merkle tree. // Sign root of Merkle tree.
let signature = { let signature = {
let root = tree.last().ok_or(Error::InvalidMerkleProof)?; let root = tree.last().ok_or(Error::InvalidMerkleProof)?;
let root = &root.as_ref()[..SIZE_OF_MERKLE_ROOT]; keypair.sign_message(root.as_ref())
keypair.sign_message(root)
}; };
// Populate merkle branch for all shreds and attach signature. // Populate merkle proof for all shreds and attach signature.
for (index, shred) in shreds.iter_mut().enumerate() { for (index, shred) in shreds.iter_mut().enumerate() {
let merkle_branch = make_merkle_branch(index, erasure_batch_size, &tree) let proof =
.ok_or(Error::InvalidMerkleProof)?; make_merkle_proof(index, erasure_batch_size, &tree).ok_or(Error::InvalidMerkleProof)?;
debug_assert_eq!(merkle_branch.proof.len(), usize::from(proof_size)); debug_assert_eq!(proof.len(), usize::from(proof_size));
shred.set_merkle_branch(&merkle_branch)?; shred.set_merkle_proof(&proof)?;
shred.set_signature(signature); shred.set_signature(signature);
debug_assert!(shred.verify(&keypair.pubkey())); debug_assert!(shred.verify(&keypair.pubkey()));
debug_assert_matches!(shred.sanitize(/*verify_merkle_proof:*/ true), Ok(())); debug_assert_matches!(shred.sanitize(), Ok(()));
// Assert that shred payload is fully populated. // Assert that shred payload is fully populated.
debug_assert_eq!(shred, { debug_assert_eq!(shred, {
let shred = shred.payload().clone(); let shred = shred.payload().clone();
@ -1140,7 +1054,7 @@ fn make_erasure_batch(
mod test { mod test {
use { use {
super::*, super::*,
crate::shred::{ShredFlags, ShredId}, crate::shred::{ShredFlags, ShredId, SignedData},
itertools::Itertools, itertools::Itertools,
matches::assert_matches, matches::assert_matches,
rand::{seq::SliceRandom, CryptoRng, Rng}, rand::{seq::SliceRandom, CryptoRng, Rng},
@ -1150,16 +1064,15 @@ mod test {
test_case::test_case, test_case::test_case,
}; };
// Total size of a data shred including headers and merkle branch. // Total size of a data shred including headers and merkle proof.
fn shred_data_size_of_payload(proof_size: u8) -> usize { fn shred_data_size_of_payload(proof_size: u8) -> usize {
ShredData::SIZE_OF_HEADERS ShredData::SIZE_OF_HEADERS
+ ShredData::capacity(proof_size).unwrap() + ShredData::capacity(proof_size).unwrap()
+ SIZE_OF_MERKLE_ROOT
+ usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY
} }
// Merkle branch is generated and signed after coding shreds are generated. // Merkle proof is generated and signed after coding shreds are generated.
// All payload excluding merkle branch and the signature are erasure coded. // All payload excluding merkle proof and the signature are erasure coded.
// Therefore the data capacity is equal to erasure encoded shard size minus // Therefore the data capacity is equal to erasure encoded shard size minus
// size of erasure encoded header. // size of erasure encoded header.
fn shred_data_capacity(proof_size: u8) -> usize { fn shred_data_capacity(proof_size: u8) -> usize {
@ -1171,7 +1084,6 @@ mod test {
fn shred_data_size_of_erasure_encoded_slice(proof_size: u8) -> usize { fn shred_data_size_of_erasure_encoded_slice(proof_size: u8) -> usize {
ShredData::SIZE_OF_PAYLOAD ShredData::SIZE_OF_PAYLOAD
- SIZE_OF_SIGNATURE - SIZE_OF_SIGNATURE
- SIZE_OF_MERKLE_ROOT
- usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY - usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY
} }
@ -1220,13 +1132,16 @@ mod test {
let nodes = repeat_with(|| rng.gen::<[u8; 32]>()).map(Hash::from); let nodes = repeat_with(|| rng.gen::<[u8; 32]>()).map(Hash::from);
let nodes: Vec<_> = nodes.take(size).collect(); let nodes: Vec<_> = nodes.take(size).collect();
let tree = make_merkle_tree(nodes.clone()); let tree = make_merkle_tree(nodes.clone());
let root = tree.last().copied().unwrap();
for index in 0..size { for index in 0..size {
let branch = make_merkle_branch(index, size, &tree).unwrap(); let proof = make_merkle_proof(index, size, &tree).unwrap();
let root = &tree.last().unwrap().as_ref()[..SIZE_OF_MERKLE_ROOT]; for (k, &node) in nodes.iter().enumerate() {
assert_eq!(branch.root, root); let proof = proof.iter().copied();
assert!(verify_merkle_proof(index, nodes[index], &branch)); if k == index {
for i in (0..size).filter(|&i| i != index) { assert_eq!(root, get_merkle_root(k, node, proof).unwrap());
assert!(!verify_merkle_proof(i, nodes[i], &branch)); } else {
assert_ne!(root, get_merkle_root(k, node, proof).unwrap());
}
} }
} }
} }
@ -1268,15 +1183,18 @@ mod test {
let common_header = ShredCommonHeader { let common_header = ShredCommonHeader {
signature: Signature::default(), signature: Signature::default(),
shred_variant: ShredVariant::MerkleData(proof_size), shred_variant: ShredVariant::MerkleData(proof_size),
slot: 145865705, slot: 145_865_705,
index: 1835, index: 1835,
version: 4978, version: rng.gen(),
fec_set_index: 1835, fec_set_index: 1835,
}; };
let data_header = DataShredHeader { let data_header = {
parent_offset: 25, let reference_tick = rng.gen_range(0, 0x40);
flags: unsafe { ShredFlags::from_bits_unchecked(0b0010_1010) }, DataShredHeader {
size: 0, parent_offset: rng.gen::<u16>().max(1),
flags: unsafe { ShredFlags::from_bits_unchecked(reference_tick) },
size: 0,
}
}; };
let coding_header = CodingShredHeader { let coding_header = CodingShredHeader {
num_data_shreds: num_data_shreds as u16, num_data_shreds: num_data_shreds as u16,
@ -1342,19 +1260,19 @@ mod test {
} }
let nodes: Vec<_> = shreds let nodes: Vec<_> = shreds
.iter() .iter()
.map(Shred::merkle_tree_node) .map(Shred::merkle_node)
.collect::<Result<_, _>>() .collect::<Result<_, _>>()
.unwrap(); .unwrap();
let tree = make_merkle_tree(nodes); let tree = make_merkle_tree(nodes);
for (index, shred) in shreds.iter_mut().enumerate() { for (index, shred) in shreds.iter_mut().enumerate() {
let merkle_branch = make_merkle_branch(index, num_shreds, &tree).unwrap(); let proof = make_merkle_proof(index, num_shreds, &tree).unwrap();
assert_eq!(merkle_branch.proof.len(), usize::from(proof_size)); assert_eq!(proof.len(), usize::from(proof_size));
shred.set_merkle_branch(&merkle_branch).unwrap(); shred.set_merkle_proof(&proof).unwrap();
let data = shred.signed_data().unwrap(); let data = shred.signed_data().unwrap();
let signature = keypair.sign_message(data.as_ref()); let signature = keypair.sign_message(data.as_ref());
shred.set_signature(signature); shred.set_signature(signature);
assert!(shred.verify(&keypair.pubkey())); assert!(shred.verify(&keypair.pubkey()));
assert_matches!(shred.sanitize(/*verify_merkle_proof:*/ true), Ok(())); assert_matches!(shred.sanitize(), Ok(()));
} }
assert_eq!(shreds.iter().map(Shred::signature).dedup().count(), 1); assert_eq!(shreds.iter().map(Shred::signature).dedup().count(), 1);
for size in num_data_shreds..num_shreds { for size in num_data_shreds..num_shreds {
@ -1486,7 +1404,7 @@ mod test {
let pubkey = keypair.pubkey(); let pubkey = keypair.pubkey();
for shred in &shreds { for shred in &shreds {
assert!(shred.verify(&pubkey)); assert!(shred.verify(&pubkey));
assert_matches!(shred.sanitize(/*verify_merkle_proof:*/ true), Ok(())); assert_matches!(shred.sanitize(), Ok(()));
let ShredCommonHeader { let ShredCommonHeader {
signature, signature,
shred_variant, shred_variant,
@ -1497,7 +1415,9 @@ mod test {
} = *shred.common_header(); } = *shred.common_header();
let shred_type = ShredType::from(shred_variant); let shred_type = ShredType::from(shred_variant);
let key = ShredId::new(slot, index, shred_type); let key = ShredId::new(slot, index, shred_type);
let merkle_root = shred.merkle_root().copied().ok(); let merkle_root = shred.merkle_root().unwrap();
assert!(signature.verify(pubkey.as_ref(), merkle_root.as_ref()));
// Verify shred::layout api.
let shred = shred.payload(); let shred = shred.payload();
assert_eq!(shred::layout::get_signature(shred), Some(signature)); assert_eq!(shred::layout::get_signature(shred), Some(signature));
assert_eq!( assert_eq!(
@ -1509,11 +1429,10 @@ mod test {
assert_eq!(shred::layout::get_index(shred), Some(index)); assert_eq!(shred::layout::get_index(shred), Some(index));
assert_eq!(shred::layout::get_version(shred), Some(version)); assert_eq!(shred::layout::get_version(shred), Some(version));
assert_eq!(shred::layout::get_shred_id(shred), Some(key)); assert_eq!(shred::layout::get_shred_id(shred), Some(key));
assert_eq!(shred::layout::get_merkle_root(shred), merkle_root); assert_eq!(shred::layout::get_merkle_root(shred), Some(merkle_root));
let offsets = shred::layout::get_signed_data_offsets(shred).unwrap(); assert_eq!(shred::layout::get_signed_data_offsets(shred), None);
let data = shred.get(offsets).unwrap();
assert!(signature.verify(pubkey.as_ref(), data));
let data = shred::layout::get_signed_data(shred).unwrap(); let data = shred::layout::get_signed_data(shred).unwrap();
assert_eq!(data, SignedData::MerkleRoot(merkle_root));
assert!(signature.verify(pubkey.as_ref(), data.as_ref())); assert!(signature.verify(pubkey.as_ref(), data.as_ref()));
} }
// Verify common, data and coding headers. // Verify common, data and coding headers.

View File

@ -102,8 +102,8 @@ impl ShredData {
} }
// Maximum size of ledger data that can be embedded in a data-shred. // Maximum size of ledger data that can be embedded in a data-shred.
// merkle_proof_size is the number of proof entries in the merkle tree // merkle_proof_size is the number of merkle proof entries.
// branch. None indicates a legacy data-shred. // None indicates a legacy data-shred.
pub fn capacity(merkle_proof_size: Option<u8>) -> Result<usize, Error> { pub fn capacity(merkle_proof_size: Option<u8>) -> Result<usize, Error> {
match merkle_proof_size { match merkle_proof_size {
None => Ok(legacy::ShredData::CAPACITY), None => Ok(legacy::ShredData::CAPACITY),

View File

@ -1,6 +1,6 @@
#![allow(clippy::implicit_hasher)] #![allow(clippy::implicit_hasher)]
use { use {
crate::shred::{self, MerkleRoot, SIZE_OF_MERKLE_ROOT}, crate::shred,
itertools::{izip, Itertools}, itertools::{izip, Itertools},
rayon::{prelude::*, ThreadPool}, rayon::{prelude::*, ThreadPool},
sha2::{Digest, Sha512}, sha2::{Digest, Sha512},
@ -15,13 +15,17 @@ use {
solana_rayon_threadlimit::get_thread_count, solana_rayon_threadlimit::get_thread_count,
solana_sdk::{ solana_sdk::{
clock::Slot, clock::Slot,
hash::Hash,
pubkey::Pubkey, pubkey::Pubkey,
signature::{Keypair, Signature, Signer}, signature::{Keypair, Signature, Signer},
}, },
static_assertions::const_assert_eq,
std::{collections::HashMap, fmt::Debug, iter::repeat, mem::size_of, ops::Range, sync::Arc}, std::{collections::HashMap, fmt::Debug, iter::repeat, mem::size_of, ops::Range, sync::Arc},
}; };
const SIGN_SHRED_GPU_MIN: usize = 256; const SIGN_SHRED_GPU_MIN: usize = 256;
const_assert_eq!(SIZE_OF_MERKLE_ROOT, 32);
const SIZE_OF_MERKLE_ROOT: usize = std::mem::size_of::<Hash>();
lazy_static! { lazy_static! {
static ref SIGVERIFY_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new() static ref SIGVERIFY_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new()
@ -153,7 +157,7 @@ fn get_merkle_roots(
PinnedVec<u8>, // Merkle roots PinnedVec<u8>, // Merkle roots
Vec<Option<usize>>, // Offsets Vec<Option<usize>>, // Offsets
) { ) {
let merkle_roots: Vec<Option<MerkleRoot>> = SIGVERIFY_THREAD_POOL.install(|| { let merkle_roots: Vec<Option<Hash>> = SIGVERIFY_THREAD_POOL.install(|| {
packets packets
.par_iter() .par_iter()
.flat_map(|packets| { .flat_map(|packets| {
@ -179,7 +183,7 @@ fn get_merkle_roots(
let root = root?; let root = root?;
let offset = next_offset; let offset = next_offset;
next_offset += SIZE_OF_MERKLE_ROOT; next_offset += SIZE_OF_MERKLE_ROOT;
buffer[offset..next_offset].copy_from_slice(&root); buffer[offset..next_offset].copy_from_slice(root.as_ref());
Some(offset) Some(offset)
}) })
.collect() .collect()
@ -804,9 +808,10 @@ mod tests {
let shred = shred.payload(); let shred = shred.payload();
let slot = shred::layout::get_slot(shred).unwrap(); let slot = shred::layout::get_slot(shred).unwrap();
let signature = shred::layout::get_signature(shred).unwrap(); let signature = shred::layout::get_signature(shred).unwrap();
let offsets = shred::layout::get_signed_data_offsets(shred).unwrap();
let pubkey = keypairs[&slot].pubkey(); let pubkey = keypairs[&slot].pubkey();
assert!(signature.verify(pubkey.as_ref(), &shred[offsets])); if let Some(offsets) = shred::layout::get_signed_data_offsets(shred) {
assert!(signature.verify(pubkey.as_ref(), &shred[offsets]));
}
let data = shred::layout::get_signed_data(shred).unwrap(); let data = shred::layout::get_signed_data(shred).unwrap();
assert!(signature.verify(pubkey.as_ref(), data.as_ref())); assert!(signature.verify(pubkey.as_ref(), data.as_ref()));
} }