shred: expose merkle root for use in blockstore (#34063)

* shred: expose merkle root for use in blockstore

* pr feedback: sorted, keep Result return type

* convert Result<Hash> -> Option<Hash>
This commit is contained in:
Ashwin Sekar 2023-11-15 15:13:50 -05:00 committed by GitHub
parent 7ea4b2dbf7
commit 6a5b8e86f3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 48 additions and 4 deletions

View File

@ -406,6 +406,35 @@ impl ErasureMeta {
}
}
#[allow(dead_code)]
impl MerkleRootMeta {
pub(crate) fn from_shred(shred: &Shred) -> Self {
Self {
// An error here after the shred has already sigverified
// can only indicate that the leader is sending
// legacy or malformed shreds. We should still store
// `None` for those cases in blockstore, as a later
// shred that contains a proper merkle root would constitute
// a valid duplicate shred proof.
merkle_root: shred.merkle_root().ok(),
first_received_shred_index: shred.index(),
first_received_shred_type: shred.shred_type(),
}
}
pub(crate) fn merkle_root(&self) -> Option<Hash> {
self.merkle_root
}
pub(crate) fn first_received_shred_index(&self) -> u32 {
self.first_received_shred_index
}
pub(crate) fn first_received_shred_type(&self) -> ShredType {
self.first_received_shred_type
}
}
impl DuplicateSlotProof {
pub(crate) fn new(shred1: Vec<u8>, shred2: Vec<u8>) -> Self {
DuplicateSlotProof { shred1, shred2 }

View File

@ -334,6 +334,7 @@ impl Shred {
dispatch!(pub(crate) fn erasure_shard_index(&self) -> Result<usize, Error>);
dispatch!(pub fn into_payload(self) -> Vec<u8>);
dispatch!(pub fn merkle_root(&self) -> Result<Hash, Error>);
dispatch!(pub fn payload(&self) -> &Vec<u8>);
dispatch!(pub fn sanitize(&self) -> Result<(), Error>);

View File

@ -154,7 +154,7 @@ impl ShredData {
Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?)
}
fn merkle_root(&self) -> Result<Hash, Error> {
pub(super) fn merkle_root(&self) -> Result<Hash, Error> {
let proof_size = self.proof_size()?;
let index = self.erasure_shard_index()?;
let proof_offset = Self::proof_offset(proof_size)?;
@ -266,7 +266,7 @@ impl ShredCode {
Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?)
}
fn merkle_root(&self) -> Result<Hash, Error> {
pub(super) fn merkle_root(&self) -> Result<Hash, Error> {
let proof_size = self.proof_size()?;
let index = self.erasure_shard_index()?;
let proof_offset = Self::proof_offset(proof_size)?;

View File

@ -6,7 +6,7 @@ use {
CodingShredHeader, Error, ShredCommonHeader, ShredType, SignedData,
DATA_SHREDS_PER_FEC_BLOCK, MAX_DATA_SHREDS_PER_SLOT, SIZE_OF_NONCE,
},
solana_sdk::{clock::Slot, packet::PACKET_DATA_SIZE, signature::Signature},
solana_sdk::{clock::Slot, hash::Hash, packet::PACKET_DATA_SIZE, signature::Signature},
static_assertions::const_assert_eq,
};
@ -47,6 +47,13 @@ impl ShredCode {
}
}
pub(super) fn merkle_root(&self) -> Result<Hash, Error> {
match self {
Self::Legacy(_) => Err(Error::InvalidShredType),
Self::Merkle(shred) => shred.merkle_root(),
}
}
pub(super) fn new_from_parity_shard(
slot: Slot,
index: u32,

View File

@ -7,7 +7,7 @@ use {
DataShredHeader, Error, ShredCommonHeader, ShredFlags, ShredType, ShredVariant, SignedData,
MAX_DATA_SHREDS_PER_SLOT,
},
solana_sdk::{clock::Slot, signature::Signature},
solana_sdk::{clock::Slot, hash::Hash, signature::Signature},
};
#[derive(Clone, Debug, Eq, PartialEq)]
@ -41,6 +41,13 @@ impl ShredData {
}
}
pub(super) fn merkle_root(&self) -> Result<Hash, Error> {
match self {
Self::Legacy(_) => Err(Error::InvalidShredType),
Self::Merkle(shred) => shred.merkle_root(),
}
}
pub(super) fn new_from_data(
slot: Slot,
index: u32,