2022-05-05 16:48:00 -07:00
|
|
|
use {
|
|
|
|
crate::shred::{
|
2022-05-30 05:51:19 -07:00
|
|
|
common::impl_shred_common,
|
|
|
|
shred_code, shred_data,
|
|
|
|
traits::{Shred, ShredCode as ShredCodeTrait, ShredData as ShredDataTrait},
|
2022-05-30 05:51:00 -07:00
|
|
|
CodingShredHeader, DataShredHeader, Error, ShredCommonHeader, ShredFlags, ShredVariant,
|
2022-05-30 05:51:19 -07:00
|
|
|
SIZE_OF_CODING_SHRED_HEADERS, SIZE_OF_COMMON_SHRED_HEADER, SIZE_OF_DATA_SHRED_HEADERS,
|
|
|
|
SIZE_OF_SIGNATURE,
|
2022-05-05 16:48:00 -07:00
|
|
|
},
|
|
|
|
solana_perf::packet::deserialize_from_with_limit,
|
|
|
|
solana_sdk::{clock::Slot, signature::Signature},
|
|
|
|
static_assertions::const_assert_eq,
|
2022-05-30 05:51:19 -07:00
|
|
|
std::{io::Cursor, ops::Range},
|
2022-05-05 16:48:00 -07:00
|
|
|
};
|
|
|
|
|
2022-05-30 05:51:19 -07:00
|
|
|
// All payload including any zero paddings are signed.
|
|
|
|
// Code and data shreds have the same payload size.
|
|
|
|
pub(super) const SIGNED_MESSAGE_RANGE: Range<usize> = SIZE_OF_SIGNATURE..ShredData::SIZE_OF_PAYLOAD;
|
|
|
|
const_assert_eq!(ShredData::SIZE_OF_PAYLOAD, ShredCode::SIZE_OF_PAYLOAD);
|
|
|
|
const_assert_eq!(ShredData::SIZE_OF_PAYLOAD, 1228);
|
|
|
|
const_assert_eq!(ShredData::CAPACITY, 1051);
|
|
|
|
|
2022-08-15 12:02:32 -07:00
|
|
|
// ShredCode::SIZE_OF_HEADERS bytes at the end of data shreds
|
2022-05-05 16:48:00 -07:00
|
|
|
// is never used and is not part of erasure coding.
|
2022-05-30 05:51:19 -07:00
|
|
|
const_assert_eq!(SIZE_OF_ERASURE_ENCODED_SLICE, 1139);
|
|
|
|
pub(super) const SIZE_OF_ERASURE_ENCODED_SLICE: usize =
|
2022-08-15 12:02:32 -07:00
|
|
|
ShredCode::SIZE_OF_PAYLOAD - ShredCode::SIZE_OF_HEADERS;
|
2022-05-30 05:51:19 -07:00
|
|
|
|
|
|
|
// Layout: {common, data} headers | data | zero padding
|
2022-08-15 12:02:32 -07:00
|
|
|
// Everything up to ShredCode::SIZE_OF_HEADERS bytes at the end (which is part
|
|
|
|
// of zero padding) is erasure coded.
|
2022-05-30 05:51:19 -07:00
|
|
|
// All payload past signature, including the entirety of zero paddings, is
|
|
|
|
// signed.
|
|
|
|
#[derive(Clone, Debug, Eq, PartialEq)]
|
2022-05-05 16:48:00 -07:00
|
|
|
pub struct ShredData {
|
|
|
|
common_header: ShredCommonHeader,
|
|
|
|
data_header: DataShredHeader,
|
|
|
|
payload: Vec<u8>,
|
|
|
|
}
|
|
|
|
|
2022-05-30 05:51:19 -07:00
|
|
|
// Layout: {common, coding} headers | erasure coded shard
|
|
|
|
// All payload past signature is singed.
|
|
|
|
#[derive(Clone, Debug, Eq, PartialEq)]
|
2022-05-05 16:48:00 -07:00
|
|
|
pub struct ShredCode {
|
|
|
|
common_header: ShredCommonHeader,
|
|
|
|
coding_header: CodingShredHeader,
|
|
|
|
payload: Vec<u8>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Shred for ShredData {
|
|
|
|
impl_shred_common!();
|
2022-05-30 05:51:19 -07:00
|
|
|
// Legacy data shreds are always zero padded and
|
|
|
|
// the same size as coding shreds.
|
|
|
|
const SIZE_OF_PAYLOAD: usize = shred_code::ShredCode::SIZE_OF_PAYLOAD;
|
2022-08-15 12:02:32 -07:00
|
|
|
const SIZE_OF_HEADERS: usize = SIZE_OF_DATA_SHRED_HEADERS;
|
2022-05-05 16:48:00 -07:00
|
|
|
|
|
|
|
fn from_payload(mut payload: Vec<u8>) -> Result<Self, Error> {
|
|
|
|
let mut cursor = Cursor::new(&payload[..]);
|
|
|
|
let common_header: ShredCommonHeader = deserialize_from_with_limit(&mut cursor)?;
|
2022-05-30 05:51:00 -07:00
|
|
|
if common_header.shred_variant != ShredVariant::LegacyData {
|
|
|
|
return Err(Error::InvalidShredVariant);
|
2022-05-05 16:48:00 -07:00
|
|
|
}
|
|
|
|
let data_header = deserialize_from_with_limit(&mut cursor)?;
|
2022-05-30 05:51:19 -07:00
|
|
|
// Shreds stored to blockstore may have trailing zeros trimmed.
|
|
|
|
// Repair packets have nonce at the end of packet payload; see:
|
|
|
|
// https://github.com/solana-labs/solana/pull/10109
|
|
|
|
// https://github.com/solana-labs/solana/pull/16602
|
2022-08-15 12:02:32 -07:00
|
|
|
if payload.len() < Self::SIZE_OF_HEADERS {
|
2022-05-30 05:51:19 -07:00
|
|
|
return Err(Error::InvalidPayloadSize(payload.len()));
|
|
|
|
}
|
|
|
|
payload.resize(Self::SIZE_OF_PAYLOAD, 0u8);
|
2022-05-05 16:48:00 -07:00
|
|
|
let shred = Self {
|
|
|
|
common_header,
|
|
|
|
data_header,
|
|
|
|
payload,
|
|
|
|
};
|
|
|
|
shred.sanitize().map(|_| shred)
|
|
|
|
}
|
|
|
|
|
2022-05-30 05:51:19 -07:00
|
|
|
fn erasure_shard_index(&self) -> Result<usize, Error> {
|
|
|
|
shred_data::erasure_shard_index(self).ok_or_else(|| {
|
|
|
|
let headers = Box::new((self.common_header, self.data_header));
|
|
|
|
Error::InvalidErasureShardIndex(headers)
|
|
|
|
})
|
2022-05-05 16:48:00 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
fn erasure_shard(self) -> Result<Vec<u8>, Error> {
|
2022-05-30 05:51:19 -07:00
|
|
|
if self.payload.len() != Self::SIZE_OF_PAYLOAD {
|
2022-05-05 16:48:00 -07:00
|
|
|
return Err(Error::InvalidPayloadSize(self.payload.len()));
|
|
|
|
}
|
|
|
|
let mut shard = self.payload;
|
2022-05-30 05:51:19 -07:00
|
|
|
shard.truncate(SIZE_OF_ERASURE_ENCODED_SLICE);
|
2022-05-05 16:48:00 -07:00
|
|
|
Ok(shard)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn erasure_shard_as_slice(&self) -> Result<&[u8], Error> {
|
2022-05-30 05:51:19 -07:00
|
|
|
if self.payload.len() != Self::SIZE_OF_PAYLOAD {
|
2022-05-05 16:48:00 -07:00
|
|
|
return Err(Error::InvalidPayloadSize(self.payload.len()));
|
|
|
|
}
|
2022-05-30 05:51:19 -07:00
|
|
|
Ok(&self.payload[..SIZE_OF_ERASURE_ENCODED_SLICE])
|
2022-05-05 16:48:00 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
fn sanitize(&self) -> Result<(), Error> {
|
2022-05-30 05:51:19 -07:00
|
|
|
match self.common_header.shred_variant {
|
|
|
|
ShredVariant::LegacyData => (),
|
|
|
|
_ => return Err(Error::InvalidShredVariant),
|
2022-05-05 16:48:00 -07:00
|
|
|
}
|
2022-05-30 05:51:19 -07:00
|
|
|
shred_data::sanitize(self)
|
2022-05-05 16:48:00 -07:00
|
|
|
}
|
|
|
|
|
2022-05-30 05:51:19 -07:00
|
|
|
fn signed_message(&self) -> &[u8] {
|
|
|
|
debug_assert_eq!(self.payload.len(), Self::SIZE_OF_PAYLOAD);
|
2022-05-05 16:48:00 -07:00
|
|
|
&self.payload[SIZE_OF_SIGNATURE..]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Shred for ShredCode {
|
|
|
|
impl_shred_common!();
|
2022-05-30 05:51:19 -07:00
|
|
|
const SIZE_OF_PAYLOAD: usize = shred_code::ShredCode::SIZE_OF_PAYLOAD;
|
2022-08-15 12:02:32 -07:00
|
|
|
const SIZE_OF_HEADERS: usize = SIZE_OF_CODING_SHRED_HEADERS;
|
2022-05-05 16:48:00 -07:00
|
|
|
|
|
|
|
fn from_payload(mut payload: Vec<u8>) -> Result<Self, Error> {
|
|
|
|
let mut cursor = Cursor::new(&payload[..]);
|
|
|
|
let common_header: ShredCommonHeader = deserialize_from_with_limit(&mut cursor)?;
|
2022-05-30 05:51:00 -07:00
|
|
|
if common_header.shred_variant != ShredVariant::LegacyCode {
|
|
|
|
return Err(Error::InvalidShredVariant);
|
2022-05-05 16:48:00 -07:00
|
|
|
}
|
|
|
|
let coding_header = deserialize_from_with_limit(&mut cursor)?;
|
2022-05-30 05:51:19 -07:00
|
|
|
// Repair packets have nonce at the end of packet payload:
|
|
|
|
// https://github.com/solana-labs/solana/pull/10109
|
|
|
|
payload.truncate(Self::SIZE_OF_PAYLOAD);
|
2022-05-05 16:48:00 -07:00
|
|
|
let shred = Self {
|
|
|
|
common_header,
|
|
|
|
coding_header,
|
|
|
|
payload,
|
|
|
|
};
|
|
|
|
shred.sanitize().map(|_| shred)
|
|
|
|
}
|
|
|
|
|
2022-05-30 05:51:19 -07:00
|
|
|
fn erasure_shard_index(&self) -> Result<usize, Error> {
|
|
|
|
shred_code::erasure_shard_index(self).ok_or_else(|| {
|
|
|
|
let headers = Box::new((self.common_header, self.coding_header));
|
|
|
|
Error::InvalidErasureShardIndex(headers)
|
|
|
|
})
|
2022-05-05 16:48:00 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
fn erasure_shard(self) -> Result<Vec<u8>, Error> {
|
2022-05-30 05:51:19 -07:00
|
|
|
if self.payload.len() != Self::SIZE_OF_PAYLOAD {
|
2022-05-05 16:48:00 -07:00
|
|
|
return Err(Error::InvalidPayloadSize(self.payload.len()));
|
|
|
|
}
|
|
|
|
let mut shard = self.payload;
|
2022-08-15 12:02:32 -07:00
|
|
|
// ShredCode::SIZE_OF_HEADERS bytes at the beginning of the coding
|
|
|
|
// shreds contains the header and is not part of erasure coding.
|
|
|
|
shard.drain(..Self::SIZE_OF_HEADERS);
|
2022-05-05 16:48:00 -07:00
|
|
|
Ok(shard)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn erasure_shard_as_slice(&self) -> Result<&[u8], Error> {
|
2022-05-30 05:51:19 -07:00
|
|
|
if self.payload.len() != Self::SIZE_OF_PAYLOAD {
|
2022-05-05 16:48:00 -07:00
|
|
|
return Err(Error::InvalidPayloadSize(self.payload.len()));
|
|
|
|
}
|
2022-08-15 12:02:32 -07:00
|
|
|
Ok(&self.payload[Self::SIZE_OF_HEADERS..])
|
2022-05-05 16:48:00 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
fn sanitize(&self) -> Result<(), Error> {
|
2022-05-30 05:51:19 -07:00
|
|
|
match self.common_header.shred_variant {
|
|
|
|
ShredVariant::LegacyCode => (),
|
|
|
|
_ => return Err(Error::InvalidShredVariant),
|
2022-05-05 16:48:00 -07:00
|
|
|
}
|
2022-05-30 05:51:19 -07:00
|
|
|
shred_code::sanitize(self)
|
2022-05-05 16:48:00 -07:00
|
|
|
}
|
|
|
|
|
2022-05-30 05:51:19 -07:00
|
|
|
fn signed_message(&self) -> &[u8] {
|
|
|
|
debug_assert_eq!(self.payload.len(), Self::SIZE_OF_PAYLOAD);
|
2022-05-05 16:48:00 -07:00
|
|
|
&self.payload[SIZE_OF_SIGNATURE..]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-30 05:51:19 -07:00
|
|
|
impl ShredDataTrait for ShredData {
|
2022-05-05 16:48:00 -07:00
|
|
|
#[inline]
|
|
|
|
fn data_header(&self) -> &DataShredHeader {
|
|
|
|
&self.data_header
|
|
|
|
}
|
|
|
|
|
|
|
|
fn data(&self) -> Result<&[u8], Error> {
|
|
|
|
let size = usize::from(self.data_header.size);
|
2022-06-21 13:26:51 -07:00
|
|
|
#[allow(clippy::manual_range_contains)]
|
2022-05-30 05:51:19 -07:00
|
|
|
if size > self.payload.len()
|
2022-08-15 12:02:32 -07:00
|
|
|
|| size < Self::SIZE_OF_HEADERS
|
|
|
|
|| size > Self::SIZE_OF_HEADERS + Self::CAPACITY
|
2022-05-30 05:51:19 -07:00
|
|
|
{
|
2022-05-05 16:48:00 -07:00
|
|
|
return Err(Error::InvalidDataSize {
|
|
|
|
size: self.data_header.size,
|
|
|
|
payload: self.payload.len(),
|
|
|
|
});
|
|
|
|
}
|
2022-08-15 12:02:32 -07:00
|
|
|
Ok(&self.payload[Self::SIZE_OF_HEADERS..size])
|
2022-05-05 16:48:00 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-30 05:51:19 -07:00
|
|
|
impl ShredCodeTrait for ShredCode {
|
2022-05-05 16:48:00 -07:00
|
|
|
#[inline]
|
|
|
|
fn coding_header(&self) -> &CodingShredHeader {
|
|
|
|
&self.coding_header
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ShredData {
|
2022-05-30 05:51:19 -07:00
|
|
|
// Maximum size of ledger data that can be embedded in a data-shred.
|
|
|
|
pub(super) const CAPACITY: usize =
|
2022-08-15 12:02:32 -07:00
|
|
|
Self::SIZE_OF_PAYLOAD - Self::SIZE_OF_HEADERS - ShredCode::SIZE_OF_HEADERS;
|
2022-05-30 05:51:19 -07:00
|
|
|
|
2022-05-05 16:48:00 -07:00
|
|
|
pub(super) fn new_from_data(
|
|
|
|
slot: Slot,
|
|
|
|
index: u32,
|
|
|
|
parent_offset: u16,
|
|
|
|
data: &[u8],
|
|
|
|
flags: ShredFlags,
|
|
|
|
reference_tick: u8,
|
|
|
|
version: u16,
|
|
|
|
fec_set_index: u32,
|
|
|
|
) -> Self {
|
2022-05-30 05:51:19 -07:00
|
|
|
let mut payload = vec![0; Self::SIZE_OF_PAYLOAD];
|
2022-05-05 16:48:00 -07:00
|
|
|
let common_header = ShredCommonHeader {
|
|
|
|
signature: Signature::default(),
|
2022-05-30 05:51:00 -07:00
|
|
|
shred_variant: ShredVariant::LegacyData,
|
2022-05-05 16:48:00 -07:00
|
|
|
slot,
|
|
|
|
index,
|
|
|
|
version,
|
|
|
|
fec_set_index,
|
|
|
|
};
|
2022-08-15 12:02:32 -07:00
|
|
|
let size = (data.len() + Self::SIZE_OF_HEADERS) as u16;
|
2022-05-05 16:48:00 -07:00
|
|
|
let flags = flags
|
|
|
|
| unsafe {
|
|
|
|
ShredFlags::from_bits_unchecked(
|
|
|
|
ShredFlags::SHRED_TICK_REFERENCE_MASK
|
|
|
|
.bits()
|
|
|
|
.min(reference_tick),
|
|
|
|
)
|
|
|
|
};
|
|
|
|
let data_header = DataShredHeader {
|
|
|
|
parent_offset,
|
|
|
|
flags,
|
|
|
|
size,
|
|
|
|
};
|
|
|
|
let mut cursor = Cursor::new(&mut payload[..]);
|
|
|
|
bincode::serialize_into(&mut cursor, &common_header).unwrap();
|
|
|
|
bincode::serialize_into(&mut cursor, &data_header).unwrap();
|
|
|
|
// TODO: Need to check if data is too large!
|
|
|
|
let offset = cursor.position() as usize;
|
2022-08-15 12:02:32 -07:00
|
|
|
debug_assert_eq!(offset, Self::SIZE_OF_HEADERS);
|
2022-05-05 16:48:00 -07:00
|
|
|
payload[offset..offset + data.len()].copy_from_slice(data);
|
|
|
|
Self {
|
|
|
|
common_header,
|
|
|
|
data_header,
|
|
|
|
payload,
|
|
|
|
}
|
|
|
|
}
|
2022-05-30 05:51:19 -07:00
|
|
|
|
|
|
|
pub(super) fn bytes_to_store(&self) -> &[u8] {
|
|
|
|
// Payload will be padded out to Self::SIZE_OF_PAYLOAD.
|
|
|
|
// But only need to store the bytes within data_header.size.
|
|
|
|
&self.payload[..self.data_header.size as usize]
|
|
|
|
}
|
|
|
|
|
|
|
|
pub(super) fn resize_stored_shred(mut shred: Vec<u8>) -> Result<Vec<u8>, Error> {
|
|
|
|
// Old shreds might have been extra zero padded.
|
2022-08-15 12:02:32 -07:00
|
|
|
if !(Self::SIZE_OF_HEADERS..=Self::SIZE_OF_PAYLOAD).contains(&shred.len()) {
|
2022-05-30 05:51:19 -07:00
|
|
|
return Err(Error::InvalidPayloadSize(shred.len()));
|
|
|
|
}
|
|
|
|
shred.resize(Self::SIZE_OF_PAYLOAD, 0u8);
|
|
|
|
Ok(shred)
|
|
|
|
}
|
2022-09-09 10:58:04 -07:00
|
|
|
|
|
|
|
// Only for tests.
|
|
|
|
pub(crate) fn set_last_in_slot(&mut self) {
|
|
|
|
self.data_header.flags |= ShredFlags::LAST_SHRED_IN_SLOT;
|
|
|
|
let buffer = &mut self.payload[SIZE_OF_COMMON_SHRED_HEADER..];
|
|
|
|
bincode::serialize_into(buffer, &self.data_header).unwrap();
|
|
|
|
}
|
2022-05-05 16:48:00 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
impl ShredCode {
|
|
|
|
pub(super) fn new_from_parity_shard(
|
|
|
|
slot: Slot,
|
|
|
|
index: u32,
|
|
|
|
parity_shard: &[u8],
|
|
|
|
fec_set_index: u32,
|
|
|
|
num_data_shreds: u16,
|
|
|
|
num_coding_shreds: u16,
|
|
|
|
position: u16,
|
|
|
|
version: u16,
|
|
|
|
) -> Self {
|
|
|
|
let common_header = ShredCommonHeader {
|
|
|
|
signature: Signature::default(),
|
2022-05-30 05:51:00 -07:00
|
|
|
shred_variant: ShredVariant::LegacyCode,
|
2022-05-05 16:48:00 -07:00
|
|
|
index,
|
|
|
|
slot,
|
|
|
|
version,
|
|
|
|
fec_set_index,
|
|
|
|
};
|
|
|
|
let coding_header = CodingShredHeader {
|
|
|
|
num_data_shreds,
|
|
|
|
num_coding_shreds,
|
|
|
|
position,
|
|
|
|
};
|
2022-05-30 05:51:19 -07:00
|
|
|
let mut payload = vec![0; Self::SIZE_OF_PAYLOAD];
|
2022-05-05 16:48:00 -07:00
|
|
|
let mut cursor = Cursor::new(&mut payload[..]);
|
|
|
|
bincode::serialize_into(&mut cursor, &common_header).unwrap();
|
|
|
|
bincode::serialize_into(&mut cursor, &coding_header).unwrap();
|
|
|
|
// Tests may have an empty parity_shard.
|
|
|
|
if !parity_shard.is_empty() {
|
|
|
|
let offset = cursor.position() as usize;
|
2022-08-15 12:02:32 -07:00
|
|
|
debug_assert_eq!(offset, Self::SIZE_OF_HEADERS);
|
2022-05-05 16:48:00 -07:00
|
|
|
payload[offset..].copy_from_slice(parity_shard);
|
|
|
|
}
|
|
|
|
Self {
|
|
|
|
common_header,
|
|
|
|
coding_header,
|
|
|
|
payload,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
2022-06-19 08:30:18 -07:00
|
|
|
use {
|
|
|
|
super::*,
|
2022-08-12 11:02:01 -07:00
|
|
|
crate::shred::{shred_code::MAX_CODE_SHREDS_PER_SLOT, ShredType, MAX_DATA_SHREDS_PER_SLOT},
|
2022-06-19 08:30:18 -07:00
|
|
|
matches::assert_matches,
|
|
|
|
};
|
2022-05-05 16:48:00 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_sanitize_data_shred() {
|
2022-05-30 05:51:19 -07:00
|
|
|
let data = [0xa5u8; ShredData::CAPACITY];
|
2022-05-05 16:48:00 -07:00
|
|
|
let mut shred = ShredData::new_from_data(
|
|
|
|
420, // slot
|
|
|
|
19, // index
|
|
|
|
5, // parent_offset
|
|
|
|
&data,
|
|
|
|
ShredFlags::DATA_COMPLETE_SHRED,
|
|
|
|
3, // reference_tick
|
|
|
|
1, // version
|
|
|
|
16, // fec_set_index
|
|
|
|
);
|
|
|
|
assert_matches!(shred.sanitize(), Ok(()));
|
|
|
|
// Corrupt shred by making it too large
|
|
|
|
{
|
|
|
|
let mut shred = shred.clone();
|
|
|
|
shred.payload.push(10u8);
|
|
|
|
assert_matches!(shred.sanitize(), Err(Error::InvalidPayloadSize(1229)));
|
|
|
|
}
|
|
|
|
{
|
|
|
|
let mut shred = shred.clone();
|
|
|
|
shred.data_header.size += 1;
|
|
|
|
assert_matches!(
|
|
|
|
shred.sanitize(),
|
|
|
|
Err(Error::InvalidDataSize {
|
|
|
|
size: 1140,
|
|
|
|
payload: 1228,
|
|
|
|
})
|
|
|
|
);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
let mut shred = shred.clone();
|
|
|
|
shred.data_header.size = 0;
|
|
|
|
assert_matches!(
|
|
|
|
shred.sanitize(),
|
|
|
|
Err(Error::InvalidDataSize {
|
|
|
|
size: 0,
|
|
|
|
payload: 1228,
|
|
|
|
})
|
|
|
|
);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
let mut shred = shred.clone();
|
|
|
|
shred.common_header.index = MAX_DATA_SHREDS_PER_SLOT as u32;
|
2022-06-19 08:30:18 -07:00
|
|
|
assert_matches!(
|
|
|
|
shred.sanitize(),
|
|
|
|
Err(Error::InvalidShredIndex(ShredType::Data, 32768))
|
|
|
|
);
|
2022-05-05 16:48:00 -07:00
|
|
|
}
|
|
|
|
{
|
|
|
|
let mut shred = shred.clone();
|
|
|
|
shred.data_header.flags |= ShredFlags::LAST_SHRED_IN_SLOT;
|
|
|
|
assert_matches!(shred.sanitize(), Ok(()));
|
|
|
|
shred.data_header.flags &= !ShredFlags::DATA_COMPLETE_SHRED;
|
|
|
|
assert_matches!(shred.sanitize(), Err(Error::InvalidShredFlags(131u8)));
|
|
|
|
shred.data_header.flags |= ShredFlags::SHRED_TICK_REFERENCE_MASK;
|
|
|
|
assert_matches!(shred.sanitize(), Err(Error::InvalidShredFlags(191u8)));
|
|
|
|
}
|
|
|
|
{
|
|
|
|
shred.data_header.size = shred.payload().len() as u16 + 1;
|
|
|
|
assert_matches!(
|
|
|
|
shred.sanitize(),
|
|
|
|
Err(Error::InvalidDataSize {
|
|
|
|
size: 1229,
|
|
|
|
payload: 1228,
|
|
|
|
})
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_sanitize_coding_shred() {
|
|
|
|
let mut shred = ShredCode::new_from_parity_shard(
|
|
|
|
1, // slot
|
|
|
|
12, // index
|
|
|
|
&[], // parity_shard
|
|
|
|
11, // fec_set_index
|
|
|
|
11, // num_data_shreds
|
|
|
|
11, // num_coding_shreds
|
|
|
|
8, // position
|
|
|
|
0, // version
|
|
|
|
);
|
|
|
|
assert_matches!(shred.sanitize(), Ok(()));
|
|
|
|
// index < position is invalid.
|
|
|
|
{
|
|
|
|
let mut shred = shred.clone();
|
|
|
|
let index = shred.common_header.index - shred.common_header.fec_set_index - 1;
|
2022-11-09 11:39:38 -08:00
|
|
|
shred.set_index(index);
|
2022-05-05 16:48:00 -07:00
|
|
|
assert_matches!(
|
|
|
|
shred.sanitize(),
|
|
|
|
Err(Error::InvalidErasureShardIndex { .. })
|
|
|
|
);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
let mut shred = shred.clone();
|
|
|
|
shred.coding_header.num_coding_shreds = 0;
|
|
|
|
assert_matches!(
|
|
|
|
shred.sanitize(),
|
|
|
|
Err(Error::InvalidErasureShardIndex { .. })
|
|
|
|
);
|
|
|
|
}
|
2022-06-19 08:30:18 -07:00
|
|
|
{
|
|
|
|
let mut shred = shred.clone();
|
2022-08-12 11:02:01 -07:00
|
|
|
shred.common_header.index = MAX_CODE_SHREDS_PER_SLOT as u32;
|
2022-06-19 08:30:18 -07:00
|
|
|
assert_matches!(
|
|
|
|
shred.sanitize(),
|
2022-08-12 11:02:01 -07:00
|
|
|
Err(Error::InvalidShredIndex(ShredType::Code, 557_056))
|
2022-06-19 08:30:18 -07:00
|
|
|
);
|
|
|
|
}
|
2022-05-05 16:48:00 -07:00
|
|
|
// pos >= num_coding is invalid.
|
|
|
|
{
|
|
|
|
let mut shred = shred.clone();
|
|
|
|
let num_coding_shreds = shred.common_header.index - shred.common_header.fec_set_index;
|
|
|
|
shred.coding_header.num_coding_shreds = num_coding_shreds as u16;
|
|
|
|
assert_matches!(
|
|
|
|
shred.sanitize(),
|
|
|
|
Err(Error::InvalidErasureShardIndex { .. })
|
|
|
|
);
|
|
|
|
}
|
|
|
|
// set_index with num_coding that would imply the last
|
|
|
|
// shred has index > u32::MAX should fail.
|
|
|
|
{
|
|
|
|
let mut shred = shred.clone();
|
2022-06-19 08:30:18 -07:00
|
|
|
shred.common_header.fec_set_index = MAX_DATA_SHREDS_PER_SLOT as u32 - 2;
|
2022-08-12 11:02:01 -07:00
|
|
|
shred.coding_header.num_data_shreds = 3;
|
2022-05-05 16:48:00 -07:00
|
|
|
shred.coding_header.num_coding_shreds = 4;
|
|
|
|
shred.coding_header.position = 1;
|
2022-06-19 08:30:18 -07:00
|
|
|
shred.common_header.index = MAX_DATA_SHREDS_PER_SLOT as u32 - 2;
|
2022-05-05 16:48:00 -07:00
|
|
|
assert_matches!(
|
|
|
|
shred.sanitize(),
|
|
|
|
Err(Error::InvalidErasureShardIndex { .. })
|
|
|
|
);
|
|
|
|
|
2022-08-12 11:02:01 -07:00
|
|
|
shred.coding_header.num_data_shreds = 2;
|
2022-05-05 16:48:00 -07:00
|
|
|
shred.coding_header.num_coding_shreds = 2000;
|
2022-06-19 08:30:18 -07:00
|
|
|
assert_matches!(shred.sanitize(), Err(Error::InvalidNumCodingShreds(2000)));
|
2022-05-05 16:48:00 -07:00
|
|
|
|
|
|
|
// Decreasing the number of num_coding_shreds will put it within
|
|
|
|
// the allowed limit.
|
|
|
|
shred.coding_header.num_coding_shreds = 2;
|
|
|
|
assert_matches!(shred.sanitize(), Ok(()));
|
|
|
|
}
|
|
|
|
{
|
|
|
|
shred.coding_header.num_coding_shreds = u16::MAX;
|
|
|
|
assert_matches!(shred.sanitize(), Err(Error::InvalidNumCodingShreds(65535)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|