adds crds-value for broadcasting duplicate shreds through gossip (#14133)

In gossip, the header overhead we get from:
https://github.com/solana-labs/solana/blob/de9ac43eb/core/src/cluster_info.rs#L434-L435
https://github.com/solana-labs/solana/blob/de9ac43eb/core/src/crds_value.rs#L31-L36
https://github.com/solana-labs/solana/blob/de9ac43eb/core/src/crds_value.rs#L73
already exceeds SIZE_OF_NONCE in shreds. We also need aditional
meta-data (wallclock, source pubkey, ...). Which means that given the
SHRED_PAYLOAD_SIZE, we cannot fit all these in PACKET_DATA_SIZE:
https://github.com/solana-labs/solana/blob/de9ac43eb/ledger/src/shred.rs#L80

On top of that, we need 2 shred payloads as the proof of duplicate. So
each DuplicateShred crds value includes only a chunk of the payload,
along with the meta-data to reconstruct the full payload from the chunks
on the receiving end.
This commit is contained in:
behzad nouri 2020-12-18 14:32:43 +00:00 committed by GitHub
parent 3c9b853268
commit 6a3797e164
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 445 additions and 26 deletions

4
Cargo.lock generated
View File

@ -4020,6 +4020,7 @@ dependencies = [
"reqwest",
"rustc_version",
"serde",
"serde_bytes",
"serde_derive",
"serde_json",
"serial_test",
@ -4339,11 +4340,14 @@ dependencies = [
"rayon",
"reed-solomon-erasure",
"rocksdb",
"rustc_version",
"serde",
"serde_bytes",
"sha2",
"solana-bpf-loader-program",
"solana-budget-program",
"solana-frozen-abi 1.6.0",
"solana-frozen-abi-macro 1.6.0",
"solana-logger 1.6.0",
"solana-measure",
"solana-merkle-tree",

View File

@ -46,6 +46,7 @@ raptorq = "1.4.2"
rayon = "1.4.1"
regex = "1.3.9"
serde = "1.0.112"
serde_bytes = "0.11"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-account-decoder = { path = "../account-decoder", version = "1.6.0" }

View File

@ -424,7 +424,7 @@ pub fn make_accounts_hashes_message(
type Ping = ping_pong::Ping<[u8; GOSSIP_PING_TOKEN_SIZE]>;
// TODO These messages should go through the gpu pipeline for spam filtering
#[frozen_abi(digest = "6PpTdBvyX37y5ERokb8DejgKobpsuTbFJC39f8Eqz7Vy")]
#[frozen_abi(digest = "HAFjUDgiGthYTiAg6CYJxA8PqfwuhrC82NtHYYmee4vb")]
#[derive(Serialize, Deserialize, Debug, AbiEnumVisitor, AbiExample)]
#[allow(clippy::large_enum_variant)]
enum Protocol {

View File

@ -267,6 +267,7 @@ impl Crds {
now: u64,
timeouts: &HashMap<Pubkey, u64>,
) -> Vec<CrdsValueLabel> {
// TODO: need custom logic for purging duplicate shreds.
let default_timeout = *timeouts
.get(&Pubkey::default())
.expect("must have default timeout");

View File

@ -1,7 +1,10 @@
use crate::cluster_info::MAX_SNAPSHOT_HASHES;
use crate::contact_info::ContactInfo;
use crate::deprecated;
use crate::epoch_slots::EpochSlots;
use crate::{
cluster_info::MAX_SNAPSHOT_HASHES,
contact_info::ContactInfo,
deprecated,
duplicate_shred::{DuplicateShred, DuplicateShredIndex},
epoch_slots::EpochSlots,
};
use bincode::{serialize, serialized_size};
use rand::{CryptoRng, Rng};
use solana_sdk::sanitize::{Sanitize, SanitizeError};
@ -80,6 +83,7 @@ pub enum CrdsData {
LegacyVersion(LegacyVersion),
Version(Version),
NodeInstance(NodeInstance),
DuplicateShred(DuplicateShred),
}
impl Sanitize for CrdsData {
@ -109,6 +113,7 @@ impl Sanitize for CrdsData {
CrdsData::LegacyVersion(version) => version.sanitize(),
CrdsData::Version(version) => version.sanitize(),
CrdsData::NodeInstance(node) => node.sanitize(),
CrdsData::DuplicateShred(shred) => shred.sanitize(),
}
}
}
@ -145,9 +150,7 @@ pub struct SnapshotHash {
impl Sanitize for SnapshotHash {
fn sanitize(&self) -> Result<(), SanitizeError> {
if self.wallclock >= MAX_WALLCLOCK {
return Err(SanitizeError::ValueOutOfBounds);
}
sanitize_wallclock(self.wallclock)?;
for (slot, _) in &self.hashes {
if *slot >= MAX_SLOT {
return Err(SanitizeError::ValueOutOfBounds);
@ -220,9 +223,7 @@ impl LowestSlot {
impl Sanitize for LowestSlot {
fn sanitize(&self) -> Result<(), SanitizeError> {
if self.wallclock >= MAX_WALLCLOCK {
return Err(SanitizeError::ValueOutOfBounds);
}
sanitize_wallclock(self.wallclock)?;
if self.lowest >= MAX_SLOT {
return Err(SanitizeError::ValueOutOfBounds);
}
@ -248,9 +249,7 @@ pub struct Vote {
impl Sanitize for Vote {
fn sanitize(&self) -> Result<(), SanitizeError> {
if self.wallclock >= MAX_WALLCLOCK {
return Err(SanitizeError::ValueOutOfBounds);
}
sanitize_wallclock(self.wallclock)?;
self.from.sanitize()?;
self.transaction.sanitize()
}
@ -275,9 +274,7 @@ pub struct LegacyVersion {
impl Sanitize for LegacyVersion {
fn sanitize(&self) -> Result<(), SanitizeError> {
if self.wallclock >= MAX_WALLCLOCK {
return Err(SanitizeError::ValueOutOfBounds);
}
sanitize_wallclock(self.wallclock)?;
self.from.sanitize()?;
self.version.sanitize()
}
@ -292,9 +289,7 @@ pub struct Version {
impl Sanitize for Version {
fn sanitize(&self) -> Result<(), SanitizeError> {
if self.wallclock >= MAX_WALLCLOCK {
return Err(SanitizeError::ValueOutOfBounds);
}
sanitize_wallclock(self.wallclock)?;
self.from.sanitize()?;
self.version.sanitize()
}
@ -370,9 +365,7 @@ impl NodeInstance {
impl Sanitize for NodeInstance {
fn sanitize(&self) -> Result<(), SanitizeError> {
if self.wallclock >= MAX_WALLCLOCK {
return Err(SanitizeError::ValueOutOfBounds);
}
sanitize_wallclock(self.wallclock)?;
self.from.sanitize()
}
}
@ -390,6 +383,7 @@ pub enum CrdsValueLabel {
LegacyVersion(Pubkey),
Version(Pubkey),
NodeInstance(Pubkey, u64 /*token*/),
DuplicateShred(DuplicateShredIndex, Pubkey),
}
impl fmt::Display for CrdsValueLabel {
@ -403,7 +397,8 @@ impl fmt::Display for CrdsValueLabel {
CrdsValueLabel::AccountsHashes(_) => write!(f, "AccountsHashes({})", self.pubkey()),
CrdsValueLabel::LegacyVersion(_) => write!(f, "LegacyVersion({})", self.pubkey()),
CrdsValueLabel::Version(_) => write!(f, "Version({})", self.pubkey()),
CrdsValueLabel::NodeInstance(_, _) => write!(f, "NodeInstance({})", self.pubkey()),
CrdsValueLabel::NodeInstance(pk, token) => write!(f, "NodeInstance({}, {})", pk, token),
CrdsValueLabel::DuplicateShred(ix, pk) => write!(f, "DuplicateShred({:?}, {})", ix, pk),
}
}
}
@ -420,6 +415,7 @@ impl CrdsValueLabel {
CrdsValueLabel::LegacyVersion(p) => *p,
CrdsValueLabel::Version(p) => *p,
CrdsValueLabel::NodeInstance(p, _ /*token*/) => *p,
CrdsValueLabel::DuplicateShred(_, p) => *p,
}
}
}
@ -467,6 +463,7 @@ impl CrdsValue {
CrdsData::LegacyVersion(version) => version.wallclock,
CrdsData::Version(version) => version.wallclock,
CrdsData::NodeInstance(node) => node.wallclock,
CrdsData::DuplicateShred(shred) => shred.wallclock,
}
}
pub fn pubkey(&self) -> Pubkey {
@ -480,6 +477,7 @@ impl CrdsValue {
CrdsData::LegacyVersion(version) => version.from,
CrdsData::Version(version) => version.from,
CrdsData::NodeInstance(node) => node.from,
CrdsData::DuplicateShred(shred) => shred.from,
}
}
pub fn label(&self) -> CrdsValueLabel {
@ -492,7 +490,10 @@ impl CrdsValue {
CrdsData::EpochSlots(ix, _) => CrdsValueLabel::EpochSlots(*ix, self.pubkey()),
CrdsData::LegacyVersion(_) => CrdsValueLabel::LegacyVersion(self.pubkey()),
CrdsData::Version(_) => CrdsValueLabel::Version(self.pubkey()),
CrdsData::NodeInstance(node) => CrdsValueLabel::NodeInstance(self.pubkey(), node.token),
CrdsData::NodeInstance(node) => CrdsValueLabel::NodeInstance(node.from, node.token),
CrdsData::DuplicateShred(shred) => {
CrdsValueLabel::DuplicateShred(DuplicateShredIndex::from(shred), shred.from)
}
}
}
pub fn contact_info(&self) -> Option<&ContactInfo> {
@ -623,6 +624,14 @@ where
out.into_iter().map(|(_, (v, _))| v)
}
pub(crate) fn sanitize_wallclock(wallclock: u64) -> Result<(), SanitizeError> {
if wallclock >= MAX_WALLCLOCK {
Err(SanitizeError::ValueOutOfBounds)
} else {
Ok(())
}
}
#[cfg(test)]
mod test {
use super::*;

367
core/src/duplicate_shred.rs Normal file
View File

@ -0,0 +1,367 @@
use crate::crds_value::sanitize_wallclock;
use itertools::Itertools;
use solana_ledger::{
blockstore_meta::DuplicateSlotProof,
shred::{Shred, ShredError, ShredType},
};
use solana_sdk::{
clock::Slot,
pubkey::Pubkey,
sanitize::{Sanitize, SanitizeError},
};
use std::{
collections::{hash_map::Entry, HashMap},
convert::TryFrom,
num::TryFromIntError,
};
use thiserror::Error;
const DUPLICATE_SHRED_HEADER_SIZE: usize = 63;
/// Function returning leader at a given slot.
pub trait LeaderScheduleFn: FnOnce(Slot) -> Option<Pubkey> {}
impl<F> LeaderScheduleFn for F where F: FnOnce(Slot) -> Option<Pubkey> {}
#[derive(Clone, Debug, PartialEq, AbiExample, Deserialize, Serialize)]
pub struct DuplicateShred {
pub(crate) from: Pubkey,
pub(crate) wallclock: u64,
slot: Slot,
shred_index: u32,
shred_type: ShredType,
// Serialized DuplicateSlotProof split into chunks.
num_chunks: u8,
chunk_index: u8,
#[serde(with = "serde_bytes")]
chunk: Vec<u8>,
}
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct DuplicateShredIndex {
slot: Slot,
shred_index: u32,
shred_type: ShredType,
num_chunks: u8,
chunk_index: u8,
}
#[derive(Debug, Error)]
pub enum Error {
#[error("data chunk mismatch")]
DataChunkMismatch,
#[error("decoding error")]
DecodingError(std::io::Error),
#[error("encoding error")]
EncodingError(std::io::Error),
#[error("invalid chunk index")]
InvalidChunkIndex,
#[error("invalid duplicate shreds")]
InvalidDuplicateShreds,
#[error("invalid duplicate slot proof")]
InvalidDuplicateSlotProof,
#[error("invalid signature")]
InvalidSignature,
#[error("invalid size limit")]
InvalidSizeLimit,
#[error("invalid shred")]
InvalidShred(#[from] ShredError),
#[error("number of chunks mismatch")]
NumChunksMismatch,
#[error("missing data chunk")]
MissingDataChunk,
#[error("(de)serialization error")]
SerializationError(#[from] bincode::Error),
#[error("shred index mismatch")]
ShredIndexMismatch,
#[error("shred type mismatch")]
ShredTypeMismatch,
#[error("slot mismatch")]
SlotMismatch,
#[error("type conversion error")]
TryFromIntError(#[from] TryFromIntError),
#[error("unknown slot leader")]
UnknownSlotLeader,
}
// Asserts that the two shreds can indicate duplicate proof for
// the same triplet of (slot, shred-index, and shred-type_), and
// that they have valid signatures from the slot leader.
fn check_shreds(
leader: impl LeaderScheduleFn,
shred1: &Shred,
shred2: &Shred,
) -> Result<(), Error> {
if shred1.slot() != shred2.slot() {
Err(Error::SlotMismatch)
} else if shred1.index() != shred2.index() {
Err(Error::ShredIndexMismatch)
} else if shred1.common_header.shred_type != shred2.common_header.shred_type {
Err(Error::ShredTypeMismatch)
} else if shred1.payload == shred2.payload {
Err(Error::InvalidDuplicateShreds)
} else {
let slot_leader = leader(shred1.slot()).ok_or(Error::UnknownSlotLeader)?;
if !shred1.verify(&slot_leader) || !shred2.verify(&slot_leader) {
Err(Error::InvalidSignature)
} else {
Ok(())
}
}
}
/// Splits a DuplicateSlotProof into DuplicateShred
/// chunks with a size limit on each chunk.
pub fn from_duplicate_slot_proof(
proof: &DuplicateSlotProof,
self_pubkey: Pubkey, // Pubkey of my node broadcasting crds value.
leader: impl LeaderScheduleFn,
wallclock: u64,
max_size: usize, // Maximum serialized size of each DuplicateShred.
encoder: impl FnOnce(Vec<u8>) -> Result<Vec<u8>, std::io::Error>,
) -> Result<impl Iterator<Item = DuplicateShred>, Error> {
if proof.shred1 == proof.shred2 {
return Err(Error::InvalidDuplicateSlotProof);
}
let shred1 = Shred::new_from_serialized_shred(proof.shred1.clone())?;
let shred2 = Shred::new_from_serialized_shred(proof.shred2.clone())?;
check_shreds(leader, &shred1, &shred2)?;
let (slot, shred_index, shred_type) = (
shred1.slot(),
shred1.index(),
shred1.common_header.shred_type,
);
let data = bincode::serialize(proof)?;
let data = encoder(data).map_err(Error::EncodingError)?;
let chunk_size = if DUPLICATE_SHRED_HEADER_SIZE < max_size {
max_size - DUPLICATE_SHRED_HEADER_SIZE
} else {
return Err(Error::InvalidSizeLimit);
};
let chunks: Vec<_> = data.chunks(chunk_size).map(Vec::from).collect();
let num_chunks = u8::try_from(chunks.len())?;
let chunks = chunks
.into_iter()
.enumerate()
.map(move |(i, chunk)| DuplicateShred {
from: self_pubkey,
wallclock,
slot,
shred_index,
shred_type,
num_chunks,
chunk_index: i as u8,
chunk,
});
Ok(chunks)
}
// Returns a predicate checking if a duplicate-shred chunk matches
// (slot, shred_index, shred_type) and has valid chunk_index.
fn check_chunk(
slot: Slot,
shred_index: u32,
shred_type: ShredType,
num_chunks: u8,
) -> impl Fn(&DuplicateShred) -> Result<(), Error> {
move |dup| {
if dup.slot != slot {
Err(Error::SlotMismatch)
} else if dup.shred_index != shred_index {
Err(Error::ShredIndexMismatch)
} else if dup.shred_type != shred_type {
Err(Error::ShredTypeMismatch)
} else if dup.num_chunks != num_chunks {
Err(Error::NumChunksMismatch)
} else if dup.chunk_index >= num_chunks {
Err(Error::InvalidChunkIndex)
} else {
Ok(())
}
}
}
/// Reconstructs the duplicate shreds from chunks of DuplicateShred.
pub fn into_shreds(
chunks: impl IntoIterator<Item = DuplicateShred>,
leader: impl LeaderScheduleFn,
decoder: impl FnOnce(Vec<u8>) -> Result<Vec<u8>, std::io::Error>,
) -> Result<(Shred, Shred), Error> {
let mut chunks = chunks.into_iter();
let DuplicateShred {
slot,
shred_index,
shred_type,
num_chunks,
chunk_index,
chunk,
..
} = match chunks.next() {
None => return Err(Error::InvalidDuplicateShreds),
Some(chunk) => chunk,
};
let slot_leader = leader(slot).ok_or(Error::UnknownSlotLeader)?;
let check_chunk = check_chunk(slot, shred_index, shred_type, num_chunks);
let mut data = HashMap::new();
data.insert(chunk_index, chunk);
for chunk in chunks {
check_chunk(&chunk)?;
match data.entry(chunk.chunk_index) {
Entry::Vacant(entry) => {
entry.insert(chunk.chunk);
}
Entry::Occupied(entry) => {
if *entry.get() != chunk.chunk {
return Err(Error::DataChunkMismatch);
}
}
}
}
if data.len() != num_chunks as usize {
return Err(Error::MissingDataChunk);
}
let data = (0..num_chunks).map(|k| data.remove(&k).unwrap());
let data = decoder(data.concat()).map_err(Error::DecodingError)?;
let proof: DuplicateSlotProof = bincode::deserialize(&data)?;
if proof.shred1 == proof.shred2 {
return Err(Error::InvalidDuplicateSlotProof);
}
let shred1 = Shred::new_from_serialized_shred(proof.shred1)?;
let shred2 = Shred::new_from_serialized_shred(proof.shred2)?;
if shred1.slot() != slot || shred2.slot() != slot {
Err(Error::SlotMismatch)
} else if shred1.index() != shred_index || shred2.index() != shred_index {
Err(Error::ShredIndexMismatch)
} else if shred1.common_header.shred_type != shred_type
|| shred2.common_header.shred_type != shred_type
{
Err(Error::ShredTypeMismatch)
} else if shred1.payload == shred2.payload {
Err(Error::InvalidDuplicateShreds)
} else if !shred1.verify(&slot_leader) || !shred2.verify(&slot_leader) {
Err(Error::InvalidSignature)
} else {
Ok((shred1, shred2))
}
}
impl Sanitize for DuplicateShred {
fn sanitize(&self) -> Result<(), SanitizeError> {
sanitize_wallclock(self.wallclock)?;
if self.chunk_index >= self.num_chunks {
return Err(SanitizeError::IndexOutOfBounds);
}
self.from.sanitize()
}
}
impl From<&DuplicateShred> for DuplicateShredIndex {
fn from(shred: &DuplicateShred) -> Self {
Self {
slot: shred.slot,
shred_index: shred.shred_index,
shred_type: shred.shred_type,
num_chunks: shred.num_chunks,
chunk_index: shred.chunk_index,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand::Rng;
use solana_ledger::{entry::Entry, shred::Shredder};
use solana_sdk::{hash, signature::Keypair, signature::Signer, system_transaction};
use std::sync::Arc;
#[test]
fn test_duplicate_shred_header_size() {
let dup = DuplicateShred {
from: Pubkey::new_unique(),
wallclock: u64::MAX,
slot: Slot::MAX,
shred_index: u32::MAX,
shred_type: ShredType(u8::MAX),
num_chunks: u8::MAX,
chunk_index: u8::MAX,
chunk: Vec::default(),
};
assert_eq!(
bincode::serialize(&dup).unwrap().len(),
DUPLICATE_SHRED_HEADER_SIZE
);
assert_eq!(
bincode::serialized_size(&dup).unwrap(),
DUPLICATE_SHRED_HEADER_SIZE as u64
);
}
fn new_rand_shred<R: Rng>(rng: &mut R, next_shred_index: u32, shredder: &Shredder) -> Shred {
let entries: Vec<_> = std::iter::repeat_with(|| {
let tx = system_transaction::transfer(
&Keypair::new(), // from
&Pubkey::new_unique(), // to
rng.gen(), // lamports
hash::new_rand(rng), // recent blockhash
);
Entry::new(
&hash::new_rand(rng), // prev_hash
1, // num_hashes,
vec![tx], // transactions
)
})
.take(5)
.collect();
let (mut data_shreds, _coding_shreds, _last_shred_index) = shredder.entries_to_shreds(
&entries,
true, // is_last_in_slot
next_shred_index,
);
data_shreds.swap_remove(0)
}
#[test]
fn test_duplicate_shred_round_trip() {
let mut rng = rand::thread_rng();
let leader = Arc::new(Keypair::new());
let (slot, parent_slot, fec_rate, reference_tick, version) =
(53084024, 53084023, 0.0, 0, 0);
let shredder = Shredder::new(
slot,
parent_slot,
fec_rate,
leader.clone(),
reference_tick,
version,
)
.unwrap();
let next_shred_index = rng.gen();
let shred1 = new_rand_shred(&mut rng, next_shred_index, &shredder);
let shred2 = new_rand_shred(&mut rng, next_shred_index, &shredder);
let leader = |s| {
if s == slot {
Some(leader.pubkey())
} else {
None
}
};
let proof = DuplicateSlotProof {
shred1: shred1.payload.clone(),
shred2: shred2.payload.clone(),
};
let chunks: Vec<_> = from_duplicate_slot_proof(
&proof,
Pubkey::new_unique(), // self_pubkey
leader,
rng.gen(), // wallclock
512, // max_size
Ok, // encoder
)
.unwrap()
.collect();
assert!(chunks.len() > 4);
let (shred3, shred4) = into_shreds(chunks, leader, Ok).unwrap();
assert_eq!(shred1, shred3);
assert_eq!(shred2, shred4);
}
}

View File

@ -31,6 +31,7 @@ pub mod crds_gossip_push;
pub mod crds_shards;
pub mod crds_value;
pub mod data_budget;
pub mod duplicate_shred;
pub mod epoch_slots;
pub mod fetch_stage;
pub mod fork_choice;

View File

@ -33,6 +33,8 @@ serde = "1.0.112"
serde_bytes = "0.11.4"
sha2 = "0.8.2"
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.6.0" }
solana-frozen-abi = { path = "../frozen-abi", version = "1.6.0" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "1.6.0" }
solana-transaction-status = { path = "../transaction-status", version = "1.6.0" }
solana-logger = { path = "../logger", version = "1.6.0" }
solana-measure = { path = "../measure", version = "1.6.0" }
@ -63,6 +65,9 @@ assert_matches = "1.3.0"
matches = "0.1.6"
solana-budget-program = { path = "../programs/budget", version = "1.6.0" }
[build-dependencies]
rustc_version = "0.2"
[lib]
crate-type = ["lib"]
name = "solana_ledger"

27
ledger/build.rs Normal file
View File

@ -0,0 +1,27 @@
extern crate rustc_version;
use rustc_version::{version_meta, Channel};
fn main() {
// Copied and adapted from
// https://github.com/Kimundi/rustc-version-rs/blob/1d692a965f4e48a8cb72e82cda953107c0d22f47/README.md#example
// Licensed under Apache-2.0 + MIT
match version_meta().unwrap().channel {
Channel::Stable => {
println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION");
}
Channel::Beta => {
println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION");
}
Channel::Nightly => {
println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION");
}
Channel::Dev => {
println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION");
// See https://github.com/solana-labs/solana/issues/11055
// We may be running the custom `rust-bpf-builder` toolchain,
// which currently needs `#![feature(proc_macro_hygiene)]` to
// be applied.
println!("cargo:rustc-cfg=RUSTC_NEEDS_PROC_MACRO_HYGIENE");
}
}
}

View File

@ -1,3 +1,4 @@
#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))]
#[macro_use]
extern crate solana_bpf_loader_program;
@ -32,3 +33,6 @@ extern crate log;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate solana_frozen_abi_macro;

View File

@ -119,7 +119,7 @@ pub enum ShredError {
pub type Result<T> = std::result::Result<T, ShredError>;
#[derive(Serialize, Clone, Deserialize, PartialEq, Debug)]
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, AbiExample, Deserialize, Serialize)]
pub struct ShredType(pub u8);
impl Default for ShredType {
fn default() -> Self {