2021-04-16 12:04:46 -07:00
|
|
|
//! The `shred` module defines data structures and methods to pull MTU sized data frames from the
|
|
|
|
//! network. There are two types of shreds: data and coding. Data shreds contain entry information
|
|
|
|
//! while coding shreds provide redundancy to protect against dropped network packets (erasures).
|
|
|
|
//!
|
|
|
|
//! +---------------------------------------------------------------------------------------------+
|
|
|
|
//! | Data Shred |
|
|
|
|
//! +---------------------------------------------------------------------------------------------+
|
|
|
|
//! | common | data | payload |
|
|
|
|
//! | header | header | |
|
|
|
|
//! |+---+---+--- |+---+---+---|+----------------------------------------------------------+----+|
|
|
|
|
//! || s | s | . || p | f | s || data (ie ledger entries) | r ||
|
|
|
|
//! || i | h | . || a | l | i || | e ||
|
|
|
|
//! || g | r | . || r | a | z || See notes immediately after shred diagrams for an | s ||
|
|
|
|
//! || n | e | || e | g | e || explanation of the "restricted" section in this payload | t ||
|
|
|
|
//! || a | d | || n | s | || | r ||
|
|
|
|
//! || t | | || t | | || | i ||
|
|
|
|
//! || u | t | || | | || | c ||
|
|
|
|
//! || r | y | || o | | || | t ||
|
|
|
|
//! || e | p | || f | | || | e ||
|
|
|
|
//! || | e | || f | | || | d ||
|
|
|
|
//! |+---+---+--- |+---+---+---+|----------------------------------------------------------+----+|
|
|
|
|
//! +---------------------------------------------------------------------------------------------+
|
|
|
|
//!
|
|
|
|
//! +---------------------------------------------------------------------------------------------+
|
|
|
|
//! | Coding Shred |
|
|
|
|
//! +---------------------------------------------------------------------------------------------+
|
|
|
|
//! | common | coding | payload |
|
|
|
|
//! | header | header | |
|
|
|
|
//! |+---+---+--- |+---+---+---+----------------------------------------------------------------+|
|
|
|
|
//! || s | s | . || n | n | p || data (encoded data shred data) ||
|
|
|
|
//! || i | h | . || u | u | o || ||
|
|
|
|
//! || g | r | . || m | m | s || ||
|
|
|
|
//! || n | e | || | | i || ||
|
|
|
|
//! || a | d | || d | c | t || ||
|
|
|
|
//! || t | | || | | i || ||
|
|
|
|
//! || u | t | || s | s | o || ||
|
|
|
|
//! || r | y | || h | h | n || ||
|
|
|
|
//! || e | p | || r | r | || ||
|
|
|
|
//! || | e | || e | e | || ||
|
|
|
|
//! || | | || d | d | || ||
|
|
|
|
//! |+---+---+--- |+---+---+---+|+--------------------------------------------------------------+|
|
|
|
|
//! +---------------------------------------------------------------------------------------------+
|
|
|
|
//!
|
|
|
|
//! Notes:
|
|
|
|
//! a) Coding shreds encode entire data shreds: both of the headers AND the payload.
|
|
|
|
//! b) Coding shreds require their own headers for identification and etc.
|
|
|
|
//! c) The erasure algorithm requires data shred and coding shred bytestreams to be equal in length.
|
|
|
|
//!
|
|
|
|
//! So, given a) - c), we must restrict data shred's payload length such that the entire coding
|
|
|
|
//! payload can fit into one coding shred / packet.
|
|
|
|
|
2021-09-01 08:44:26 -07:00
|
|
|
use {
|
|
|
|
crate::{blockstore::MAX_DATA_SHREDS_PER_SLOT, erasure::Session},
|
|
|
|
bincode::config::Options,
|
2021-10-28 02:38:08 -07:00
|
|
|
rayon::{prelude::*, ThreadPool},
|
2021-09-01 08:44:26 -07:00
|
|
|
serde::{Deserialize, Serialize},
|
|
|
|
solana_entry::entry::{create_ticks, Entry},
|
|
|
|
solana_measure::measure::Measure,
|
|
|
|
solana_perf::packet::{limited_deserialize, Packet},
|
|
|
|
solana_rayon_threadlimit::get_thread_count,
|
|
|
|
solana_runtime::bank::Bank,
|
|
|
|
solana_sdk::{
|
|
|
|
clock::Slot,
|
|
|
|
feature_set,
|
|
|
|
hash::{hashv, Hash},
|
|
|
|
packet::PACKET_DATA_SIZE,
|
|
|
|
pubkey::Pubkey,
|
|
|
|
signature::{Keypair, Signature, Signer},
|
|
|
|
},
|
|
|
|
std::{cell::RefCell, convert::TryInto, mem::size_of},
|
|
|
|
thiserror::Error,
|
2019-11-02 00:38:30 -07:00
|
|
|
};
|
2019-08-02 15:53:42 -07:00
|
|
|
|
2020-11-09 23:04:27 -08:00
|
|
|
#[derive(Default, Clone)]
|
|
|
|
pub struct ProcessShredsStats {
|
|
|
|
// Per-slot elapsed time
|
|
|
|
pub shredding_elapsed: u64,
|
|
|
|
pub receive_elapsed: u64,
|
|
|
|
pub serialize_elapsed: u64,
|
|
|
|
pub gen_data_elapsed: u64,
|
|
|
|
pub gen_coding_elapsed: u64,
|
|
|
|
pub sign_coding_elapsed: u64,
|
|
|
|
pub coding_send_elapsed: u64,
|
|
|
|
pub get_leader_schedule_elapsed: u64,
|
|
|
|
}
|
|
|
|
impl ProcessShredsStats {
|
|
|
|
pub fn update(&mut self, new_stats: &ProcessShredsStats) {
|
|
|
|
self.shredding_elapsed += new_stats.shredding_elapsed;
|
|
|
|
self.receive_elapsed += new_stats.receive_elapsed;
|
|
|
|
self.serialize_elapsed += new_stats.serialize_elapsed;
|
|
|
|
self.gen_data_elapsed += new_stats.gen_data_elapsed;
|
|
|
|
self.gen_coding_elapsed += new_stats.gen_coding_elapsed;
|
|
|
|
self.sign_coding_elapsed += new_stats.sign_coding_elapsed;
|
|
|
|
self.coding_send_elapsed += new_stats.gen_coding_elapsed;
|
|
|
|
self.get_leader_schedule_elapsed += new_stats.get_leader_schedule_elapsed;
|
|
|
|
}
|
|
|
|
pub fn reset(&mut self) {
|
|
|
|
*self = Self::default();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-19 12:38:18 -07:00
|
|
|
pub type Nonce = u32;
|
|
|
|
|
2019-10-21 12:46:16 -07:00
|
|
|
/// The following constants are computed by hand, and hardcoded.
|
|
|
|
/// `test_shred_constants` ensures that the values are correct.
|
|
|
|
/// Constants are used over lazy_static for performance reasons.
|
2019-12-12 16:50:29 -08:00
|
|
|
pub const SIZE_OF_COMMON_SHRED_HEADER: usize = 83;
|
2020-05-19 12:38:18 -07:00
|
|
|
pub const SIZE_OF_DATA_SHRED_HEADER: usize = 5;
|
2019-10-21 12:46:16 -07:00
|
|
|
pub const SIZE_OF_CODING_SHRED_HEADER: usize = 6;
|
|
|
|
pub const SIZE_OF_SIGNATURE: usize = 64;
|
2019-12-30 07:42:09 -08:00
|
|
|
pub const SIZE_OF_SHRED_TYPE: usize = 1;
|
|
|
|
pub const SIZE_OF_SHRED_SLOT: usize = 8;
|
|
|
|
pub const SIZE_OF_SHRED_INDEX: usize = 4;
|
2020-05-19 12:38:18 -07:00
|
|
|
pub const SIZE_OF_NONCE: usize = 4;
|
2021-04-16 12:04:46 -07:00
|
|
|
pub const SIZE_OF_CODING_SHRED_HEADERS: usize =
|
2019-10-21 12:46:16 -07:00
|
|
|
SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_CODING_SHRED_HEADER;
|
|
|
|
pub const SIZE_OF_DATA_SHRED_PAYLOAD: usize = PACKET_DATA_SIZE
|
|
|
|
- SIZE_OF_COMMON_SHRED_HEADER
|
|
|
|
- SIZE_OF_DATA_SHRED_HEADER
|
2021-04-16 12:04:46 -07:00
|
|
|
- SIZE_OF_CODING_SHRED_HEADERS
|
2020-05-19 12:38:18 -07:00
|
|
|
- SIZE_OF_NONCE;
|
2019-09-14 21:05:54 -07:00
|
|
|
|
2020-05-15 13:23:56 -07:00
|
|
|
pub const OFFSET_OF_SHRED_TYPE: usize = SIZE_OF_SIGNATURE;
|
2019-12-30 07:42:09 -08:00
|
|
|
pub const OFFSET_OF_SHRED_SLOT: usize = SIZE_OF_SIGNATURE + SIZE_OF_SHRED_TYPE;
|
|
|
|
pub const OFFSET_OF_SHRED_INDEX: usize = OFFSET_OF_SHRED_SLOT + SIZE_OF_SHRED_SLOT;
|
2020-05-19 12:38:18 -07:00
|
|
|
pub const SHRED_PAYLOAD_SIZE: usize = PACKET_DATA_SIZE - SIZE_OF_NONCE;
|
2019-12-30 07:42:09 -08:00
|
|
|
|
2019-09-18 18:00:07 -07:00
|
|
|
thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new()
|
|
|
|
.num_threads(get_thread_count())
|
2020-01-20 20:08:19 -08:00
|
|
|
.thread_name(|ix| format!("shredder_{}", ix))
|
2019-09-18 18:00:07 -07:00
|
|
|
.build()
|
|
|
|
.unwrap()));
|
|
|
|
|
2019-09-16 20:28:54 -07:00
|
|
|
/// The constants that define if a shred is data or coding
|
2019-09-18 16:24:30 -07:00
|
|
|
pub const DATA_SHRED: u8 = 0b1010_0101;
|
|
|
|
pub const CODING_SHRED: u8 = 0b0101_1010;
|
2019-09-16 20:28:54 -07:00
|
|
|
|
2019-11-07 16:38:06 -08:00
|
|
|
pub const MAX_DATA_SHREDS_PER_FEC_BLOCK: u32 = 32;
|
2019-09-19 16:29:52 -07:00
|
|
|
|
2019-11-06 13:27:58 -08:00
|
|
|
pub const SHRED_TICK_REFERENCE_MASK: u8 = 0b0011_1111;
|
|
|
|
const LAST_SHRED_IN_SLOT: u8 = 0b1000_0000;
|
|
|
|
pub const DATA_COMPLETE_SHRED: u8 = 0b0100_0000;
|
2019-09-19 16:29:52 -07:00
|
|
|
|
2019-12-02 14:42:05 -08:00
|
|
|
#[derive(Error, Debug)]
|
2019-10-17 15:44:15 -07:00
|
|
|
pub enum ShredError {
|
2019-12-02 14:42:05 -08:00
|
|
|
#[error("invalid shred type")]
|
2019-10-17 15:44:15 -07:00
|
|
|
InvalidShredType,
|
|
|
|
|
2019-12-02 14:42:05 -08:00
|
|
|
#[error("invalid FEC rate; must be 0.0 < {0} < 1.0")]
|
|
|
|
InvalidFecRate(f32),
|
2019-10-17 15:44:15 -07:00
|
|
|
|
2019-12-02 14:42:05 -08:00
|
|
|
#[error("slot too low; current slot {slot} must be above parent slot {parent_slot}, but the difference must be below u16::MAX")]
|
|
|
|
SlotTooLow { slot: Slot, parent_slot: Slot },
|
|
|
|
|
|
|
|
#[error("serialization error")]
|
|
|
|
Serialize(#[from] Box<bincode::ErrorKind>),
|
2020-04-19 21:15:09 -07:00
|
|
|
|
|
|
|
#[error(
|
|
|
|
"invalid parent offset; parent_offset {parent_offset} must be larger than slot {slot}"
|
|
|
|
)]
|
|
|
|
InvalidParentOffset { slot: Slot, parent_offset: u16 },
|
2019-10-17 15:44:15 -07:00
|
|
|
}
|
|
|
|
|
2019-12-02 14:42:05 -08:00
|
|
|
pub type Result<T> = std::result::Result<T, ShredError>;
|
|
|
|
|
2020-12-18 06:32:43 -08:00
|
|
|
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, AbiExample, Deserialize, Serialize)]
|
2019-10-16 15:41:43 -07:00
|
|
|
pub struct ShredType(pub u8);
|
2019-10-18 22:55:59 -07:00
|
|
|
impl Default for ShredType {
|
|
|
|
fn default() -> Self {
|
|
|
|
ShredType(DATA_SHRED)
|
|
|
|
}
|
|
|
|
}
|
2019-10-15 20:48:45 -07:00
|
|
|
|
|
|
|
/// A common header that is present in data and code shred headers
|
2019-09-19 16:29:52 -07:00
|
|
|
#[derive(Serialize, Clone, Deserialize, Default, PartialEq, Debug)]
|
|
|
|
pub struct ShredCommonHeader {
|
|
|
|
pub signature: Signature,
|
2019-10-18 22:55:59 -07:00
|
|
|
pub shred_type: ShredType,
|
2019-11-02 00:38:30 -07:00
|
|
|
pub slot: Slot,
|
2019-09-19 16:29:52 -07:00
|
|
|
pub index: u32,
|
2019-11-18 18:05:02 -08:00
|
|
|
pub version: u16,
|
2019-12-12 16:50:29 -08:00
|
|
|
pub fec_set_index: u32,
|
2019-09-19 16:29:52 -07:00
|
|
|
}
|
|
|
|
|
2019-10-15 20:48:45 -07:00
|
|
|
/// The data shred header has parent offset and flags
|
|
|
|
#[derive(Serialize, Clone, Default, Deserialize, PartialEq, Debug)]
|
2019-09-19 16:29:52 -07:00
|
|
|
pub struct DataShredHeader {
|
|
|
|
pub parent_offset: u16,
|
|
|
|
pub flags: u8,
|
2020-05-19 12:38:18 -07:00
|
|
|
pub size: u16,
|
2019-09-19 16:29:52 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/// The coding shred header has FEC information
|
2019-10-15 20:48:45 -07:00
|
|
|
#[derive(Serialize, Clone, Default, Deserialize, PartialEq, Debug)]
|
2019-09-19 16:29:52 -07:00
|
|
|
pub struct CodingShredHeader {
|
|
|
|
pub num_data_shreds: u16,
|
|
|
|
pub num_coding_shreds: u16,
|
2021-05-03 06:20:47 -07:00
|
|
|
#[serde(rename = "position")]
|
|
|
|
__unused: u16,
|
2019-09-19 16:29:52 -07:00
|
|
|
}
|
|
|
|
|
2019-09-17 18:22:46 -07:00
|
|
|
#[derive(Clone, Debug, PartialEq)]
|
2019-09-18 16:24:30 -07:00
|
|
|
pub struct Shred {
|
2019-10-18 22:55:59 -07:00
|
|
|
pub common_header: ShredCommonHeader,
|
|
|
|
pub data_header: DataShredHeader,
|
|
|
|
pub coding_header: CodingShredHeader,
|
2019-09-18 16:24:30 -07:00
|
|
|
pub payload: Vec<u8>,
|
2019-09-16 20:28:54 -07:00
|
|
|
}
|
|
|
|
|
2019-09-18 16:24:30 -07:00
|
|
|
impl Shred {
|
2019-10-18 22:55:59 -07:00
|
|
|
fn deserialize_obj<'de, T>(index: &mut usize, size: usize, buf: &'de [u8]) -> bincode::Result<T>
|
|
|
|
where
|
|
|
|
T: Deserialize<'de>,
|
|
|
|
{
|
2021-04-27 15:40:41 -07:00
|
|
|
let end = std::cmp::min(*index + size, buf.len());
|
2020-07-08 17:08:05 -07:00
|
|
|
let ret = bincode::options()
|
|
|
|
.with_limit(PACKET_DATA_SIZE as u64)
|
|
|
|
.with_fixint_encoding()
|
|
|
|
.allow_trailing_bytes()
|
2021-04-27 15:40:41 -07:00
|
|
|
.deserialize(&buf[*index..end])?;
|
2019-10-18 22:55:59 -07:00
|
|
|
*index += size;
|
|
|
|
Ok(ret)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn serialize_obj_into<'de, T>(
|
|
|
|
index: &mut usize,
|
|
|
|
size: usize,
|
|
|
|
buf: &'de mut [u8],
|
|
|
|
obj: &T,
|
|
|
|
) -> bincode::Result<()>
|
|
|
|
where
|
|
|
|
T: Serialize,
|
|
|
|
{
|
|
|
|
bincode::serialize_into(&mut buf[*index..*index + size], obj)?;
|
|
|
|
*index += size;
|
|
|
|
Ok(())
|
2019-09-16 20:28:54 -07:00
|
|
|
}
|
|
|
|
|
2019-12-12 13:27:33 -08:00
|
|
|
pub fn copy_to_packet(&self, packet: &mut Packet) {
|
|
|
|
let len = self.payload.len();
|
|
|
|
packet.data[..len].copy_from_slice(&self.payload[..]);
|
|
|
|
packet.meta.size = len;
|
|
|
|
}
|
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
pub fn new_from_data(
|
2019-11-02 00:38:30 -07:00
|
|
|
slot: Slot,
|
2019-10-08 00:42:51 -07:00
|
|
|
index: u32,
|
|
|
|
parent_offset: u16,
|
|
|
|
data: Option<&[u8]>,
|
2021-03-16 03:09:16 -07:00
|
|
|
is_last_data: bool,
|
2019-10-09 16:07:18 -07:00
|
|
|
is_last_in_slot: bool,
|
2019-11-06 13:27:58 -08:00
|
|
|
reference_tick: u8,
|
2019-11-18 18:05:02 -08:00
|
|
|
version: u16,
|
2019-12-12 16:50:29 -08:00
|
|
|
fec_set_index: u32,
|
2019-10-08 00:42:51 -07:00
|
|
|
) -> Self {
|
2020-05-19 12:38:18 -07:00
|
|
|
let payload_size = SHRED_PAYLOAD_SIZE;
|
|
|
|
let mut payload = vec![0; payload_size];
|
2019-11-18 18:05:02 -08:00
|
|
|
let common_header = ShredCommonHeader {
|
|
|
|
slot,
|
|
|
|
index,
|
|
|
|
version,
|
2019-12-12 16:50:29 -08:00
|
|
|
fec_set_index,
|
2019-11-18 18:05:02 -08:00
|
|
|
..ShredCommonHeader::default()
|
|
|
|
};
|
2019-10-18 22:55:59 -07:00
|
|
|
|
2020-05-19 12:38:18 -07:00
|
|
|
let size = (data.map(|d| d.len()).unwrap_or(0)
|
|
|
|
+ SIZE_OF_DATA_SHRED_HEADER
|
|
|
|
+ SIZE_OF_COMMON_SHRED_HEADER) as u16;
|
2019-11-18 18:05:02 -08:00
|
|
|
let mut data_header = DataShredHeader {
|
|
|
|
parent_offset,
|
|
|
|
flags: reference_tick.min(SHRED_TICK_REFERENCE_MASK),
|
2020-05-19 12:38:18 -07:00
|
|
|
size,
|
2019-11-18 18:05:02 -08:00
|
|
|
};
|
2019-10-09 16:07:18 -07:00
|
|
|
|
2021-03-16 03:09:16 -07:00
|
|
|
if is_last_data {
|
2019-10-18 22:55:59 -07:00
|
|
|
data_header.flags |= DATA_COMPLETE_SHRED
|
2019-10-09 16:07:18 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if is_last_in_slot {
|
2019-10-18 22:55:59 -07:00
|
|
|
data_header.flags |= LAST_SHRED_IN_SLOT
|
2019-10-09 16:07:18 -07:00
|
|
|
}
|
2019-10-08 00:42:51 -07:00
|
|
|
|
2019-11-06 07:25:17 -08:00
|
|
|
let mut start = 0;
|
|
|
|
Self::serialize_obj_into(
|
|
|
|
&mut start,
|
|
|
|
SIZE_OF_COMMON_SHRED_HEADER,
|
|
|
|
&mut payload,
|
|
|
|
&common_header,
|
|
|
|
)
|
2021-04-16 12:04:46 -07:00
|
|
|
.expect("Failed to write common header into shred buffer");
|
|
|
|
|
2019-11-06 07:25:17 -08:00
|
|
|
Self::serialize_obj_into(
|
|
|
|
&mut start,
|
2021-04-16 12:04:46 -07:00
|
|
|
SIZE_OF_DATA_SHRED_HEADER,
|
2019-11-06 07:25:17 -08:00
|
|
|
&mut payload,
|
|
|
|
&data_header,
|
|
|
|
)
|
|
|
|
.expect("Failed to write data header into shred buffer");
|
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
if let Some(data) = data {
|
2019-10-18 22:55:59 -07:00
|
|
|
payload[start..start + data.len()].clone_from_slice(data);
|
|
|
|
}
|
|
|
|
|
|
|
|
Self {
|
|
|
|
common_header,
|
|
|
|
data_header,
|
|
|
|
coding_header: CodingShredHeader::default(),
|
|
|
|
payload,
|
2019-10-08 00:42:51 -07:00
|
|
|
}
|
2019-10-18 22:55:59 -07:00
|
|
|
}
|
2019-10-08 00:42:51 -07:00
|
|
|
|
2020-05-19 12:38:18 -07:00
|
|
|
pub fn new_from_serialized_shred(mut payload: Vec<u8>) -> Result<Self> {
|
2019-10-18 22:55:59 -07:00
|
|
|
let mut start = 0;
|
|
|
|
let common_header: ShredCommonHeader =
|
2019-10-21 12:46:16 -07:00
|
|
|
Self::deserialize_obj(&mut start, SIZE_OF_COMMON_SHRED_HEADER, &payload)?;
|
2019-10-18 22:55:59 -07:00
|
|
|
|
2020-05-19 12:38:18 -07:00
|
|
|
let slot = common_header.slot;
|
2021-04-27 15:40:41 -07:00
|
|
|
// Shreds should be padded out to SHRED_PAYLOAD_SIZE
|
|
|
|
// so that erasure generation/recovery works correctly
|
|
|
|
// But only the data_header.size is stored in blockstore.
|
|
|
|
payload.resize(SHRED_PAYLOAD_SIZE, 0);
|
2019-10-18 22:55:59 -07:00
|
|
|
let shred = if common_header.shred_type == ShredType(CODING_SHRED) {
|
|
|
|
let coding_header: CodingShredHeader =
|
2019-10-21 12:46:16 -07:00
|
|
|
Self::deserialize_obj(&mut start, SIZE_OF_CODING_SHRED_HEADER, &payload)?;
|
2019-10-18 22:55:59 -07:00
|
|
|
Self {
|
|
|
|
common_header,
|
|
|
|
data_header: DataShredHeader::default(),
|
|
|
|
coding_header,
|
|
|
|
payload,
|
|
|
|
}
|
|
|
|
} else if common_header.shred_type == ShredType(DATA_SHRED) {
|
|
|
|
let data_header: DataShredHeader =
|
2021-04-16 12:04:46 -07:00
|
|
|
Self::deserialize_obj(&mut start, SIZE_OF_DATA_SHRED_HEADER, &payload)?;
|
2020-04-19 21:15:09 -07:00
|
|
|
if u64::from(data_header.parent_offset) > common_header.slot {
|
|
|
|
return Err(ShredError::InvalidParentOffset {
|
2020-05-19 12:38:18 -07:00
|
|
|
slot,
|
2020-04-19 21:15:09 -07:00
|
|
|
parent_offset: data_header.parent_offset,
|
|
|
|
});
|
|
|
|
}
|
2019-10-18 22:55:59 -07:00
|
|
|
Self {
|
|
|
|
common_header,
|
|
|
|
data_header,
|
|
|
|
coding_header: CodingShredHeader::default(),
|
|
|
|
payload,
|
|
|
|
}
|
2019-09-16 20:28:54 -07:00
|
|
|
} else {
|
2019-10-17 15:44:15 -07:00
|
|
|
return Err(ShredError::InvalidShredType);
|
2019-09-16 20:28:54 -07:00
|
|
|
};
|
2019-09-17 18:22:46 -07:00
|
|
|
|
2019-10-18 22:55:59 -07:00
|
|
|
Ok(shred)
|
2019-09-17 18:22:46 -07:00
|
|
|
}
|
|
|
|
|
2020-12-15 16:50:40 -08:00
|
|
|
pub fn new_empty_coding(
|
|
|
|
slot: Slot,
|
|
|
|
index: u32,
|
|
|
|
fec_set_index: u32,
|
|
|
|
num_data: usize,
|
|
|
|
num_code: usize,
|
|
|
|
version: u16,
|
|
|
|
) -> Self {
|
|
|
|
let (header, coding_header) = Shredder::new_coding_shred_header(
|
|
|
|
slot,
|
|
|
|
index,
|
|
|
|
fec_set_index,
|
|
|
|
num_data,
|
|
|
|
num_code,
|
|
|
|
version,
|
|
|
|
);
|
|
|
|
Shred::new_empty_from_header(header, DataShredHeader::default(), coding_header)
|
|
|
|
}
|
|
|
|
|
2019-10-18 22:55:59 -07:00
|
|
|
pub fn new_empty_from_header(
|
|
|
|
common_header: ShredCommonHeader,
|
|
|
|
data_header: DataShredHeader,
|
|
|
|
coding_header: CodingShredHeader,
|
|
|
|
) -> Self {
|
2020-05-19 12:38:18 -07:00
|
|
|
let mut payload = vec![0; SHRED_PAYLOAD_SIZE];
|
2019-10-18 22:55:59 -07:00
|
|
|
let mut start = 0;
|
|
|
|
Self::serialize_obj_into(
|
|
|
|
&mut start,
|
2019-10-21 12:46:16 -07:00
|
|
|
SIZE_OF_COMMON_SHRED_HEADER,
|
2019-10-18 22:55:59 -07:00
|
|
|
&mut payload,
|
|
|
|
&common_header,
|
|
|
|
)
|
|
|
|
.expect("Failed to write header into shred buffer");
|
|
|
|
if common_header.shred_type == ShredType(DATA_SHRED) {
|
|
|
|
Self::serialize_obj_into(
|
|
|
|
&mut start,
|
2019-10-21 12:46:16 -07:00
|
|
|
SIZE_OF_DATA_SHRED_HEADER,
|
2019-10-18 22:55:59 -07:00
|
|
|
&mut payload,
|
|
|
|
&data_header,
|
|
|
|
)
|
|
|
|
.expect("Failed to write data header into shred buffer");
|
|
|
|
} else if common_header.shred_type == ShredType(CODING_SHRED) {
|
|
|
|
Self::serialize_obj_into(
|
|
|
|
&mut start,
|
2019-10-21 12:46:16 -07:00
|
|
|
SIZE_OF_CODING_SHRED_HEADER,
|
2019-10-18 22:55:59 -07:00
|
|
|
&mut payload,
|
|
|
|
&coding_header,
|
|
|
|
)
|
|
|
|
.expect("Failed to write data header into shred buffer");
|
|
|
|
}
|
|
|
|
Shred {
|
|
|
|
common_header,
|
|
|
|
data_header,
|
|
|
|
coding_header,
|
|
|
|
payload,
|
2019-09-16 20:28:54 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-18 22:55:59 -07:00
|
|
|
pub fn new_empty_data_shred() -> Self {
|
|
|
|
Self::new_empty_from_header(
|
|
|
|
ShredCommonHeader::default(),
|
|
|
|
DataShredHeader::default(),
|
|
|
|
CodingShredHeader::default(),
|
|
|
|
)
|
2019-09-16 20:28:54 -07:00
|
|
|
}
|
|
|
|
|
2019-11-02 00:38:30 -07:00
|
|
|
pub fn slot(&self) -> Slot {
|
2019-10-18 22:55:59 -07:00
|
|
|
self.common_header.slot
|
2019-09-16 20:28:54 -07:00
|
|
|
}
|
|
|
|
|
2019-11-02 00:38:30 -07:00
|
|
|
pub fn parent(&self) -> Slot {
|
2019-09-16 20:28:54 -07:00
|
|
|
if self.is_data() {
|
2019-10-18 22:55:59 -07:00
|
|
|
self.common_header.slot - u64::from(self.data_header.parent_offset)
|
2019-09-16 20:28:54 -07:00
|
|
|
} else {
|
|
|
|
std::u64::MAX
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn index(&self) -> u32 {
|
2019-10-18 22:55:59 -07:00
|
|
|
self.common_header.index
|
2019-09-16 20:28:54 -07:00
|
|
|
}
|
|
|
|
|
2019-11-18 18:05:02 -08:00
|
|
|
pub fn version(&self) -> u16 {
|
|
|
|
self.common_header.version
|
|
|
|
}
|
|
|
|
|
2019-09-17 18:22:46 -07:00
|
|
|
pub fn set_index(&mut self, index: u32) {
|
2019-11-16 02:41:59 -08:00
|
|
|
self.common_header.index = index;
|
|
|
|
Self::serialize_obj_into(
|
|
|
|
&mut 0,
|
|
|
|
SIZE_OF_COMMON_SHRED_HEADER,
|
|
|
|
&mut self.payload,
|
|
|
|
&self.common_header,
|
|
|
|
)
|
|
|
|
.unwrap();
|
2019-09-17 18:22:46 -07:00
|
|
|
}
|
|
|
|
|
2019-11-02 00:38:30 -07:00
|
|
|
pub fn set_slot(&mut self, slot: Slot) {
|
2019-11-16 02:41:59 -08:00
|
|
|
self.common_header.slot = slot;
|
|
|
|
Self::serialize_obj_into(
|
|
|
|
&mut 0,
|
|
|
|
SIZE_OF_COMMON_SHRED_HEADER,
|
|
|
|
&mut self.payload,
|
|
|
|
&self.common_header,
|
|
|
|
)
|
|
|
|
.unwrap();
|
2019-09-17 18:22:46 -07:00
|
|
|
}
|
|
|
|
|
2019-09-16 20:28:54 -07:00
|
|
|
pub fn signature(&self) -> Signature {
|
2019-10-18 22:55:59 -07:00
|
|
|
self.common_header.signature
|
2019-09-16 20:28:54 -07:00
|
|
|
}
|
|
|
|
|
2021-09-01 08:44:26 -07:00
|
|
|
pub fn seed(&self, leader_pubkey: Pubkey, root_bank: &Bank) -> [u8; 32] {
|
|
|
|
if enable_deterministic_seed(self.slot(), root_bank) {
|
|
|
|
hashv(&[
|
|
|
|
&self.slot().to_le_bytes(),
|
|
|
|
&self.index().to_le_bytes(),
|
|
|
|
&leader_pubkey.to_bytes(),
|
|
|
|
])
|
|
|
|
.to_bytes()
|
|
|
|
} else {
|
|
|
|
let signature = self.common_header.signature.as_ref();
|
|
|
|
let offset = signature.len().checked_sub(32).unwrap();
|
|
|
|
signature[offset..].try_into().unwrap()
|
2021-07-07 08:21:12 -07:00
|
|
|
}
|
2019-09-16 20:28:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn is_data(&self) -> bool {
|
2019-10-18 22:55:59 -07:00
|
|
|
self.common_header.shred_type == ShredType(DATA_SHRED)
|
2019-09-16 20:28:54 -07:00
|
|
|
}
|
2019-10-16 15:41:43 -07:00
|
|
|
pub fn is_code(&self) -> bool {
|
2019-10-18 22:55:59 -07:00
|
|
|
self.common_header.shred_type == ShredType(CODING_SHRED)
|
2019-10-16 15:41:43 -07:00
|
|
|
}
|
2019-09-16 20:28:54 -07:00
|
|
|
|
|
|
|
pub fn last_in_slot(&self) -> bool {
|
|
|
|
if self.is_data() {
|
2019-10-18 22:55:59 -07:00
|
|
|
self.data_header.flags & LAST_SHRED_IN_SLOT == LAST_SHRED_IN_SLOT
|
2019-09-16 20:28:54 -07:00
|
|
|
} else {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-17 18:22:46 -07:00
|
|
|
/// This is not a safe function. It only changes the meta information.
|
|
|
|
/// Use this only for test code which doesn't care about actual shred
|
|
|
|
pub fn set_last_in_slot(&mut self) {
|
|
|
|
if self.is_data() {
|
2019-10-18 22:55:59 -07:00
|
|
|
self.data_header.flags |= LAST_SHRED_IN_SLOT
|
2019-09-17 18:22:46 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-16 21:30:38 -08:00
|
|
|
#[cfg(test)]
|
|
|
|
pub fn unset_data_complete(&mut self) {
|
|
|
|
if self.is_data() {
|
|
|
|
self.data_header.flags &= !DATA_COMPLETE_SHRED;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Data header starts after the shred common header
|
|
|
|
let mut start = SIZE_OF_COMMON_SHRED_HEADER;
|
|
|
|
let size_of_data_shred_header = SIZE_OF_DATA_SHRED_HEADER;
|
|
|
|
Self::serialize_obj_into(
|
|
|
|
&mut start,
|
|
|
|
size_of_data_shred_header,
|
|
|
|
&mut self.payload,
|
|
|
|
&self.data_header,
|
|
|
|
)
|
|
|
|
.expect("Failed to write data header into shred buffer");
|
|
|
|
}
|
|
|
|
|
2019-09-16 20:28:54 -07:00
|
|
|
pub fn data_complete(&self) -> bool {
|
|
|
|
if self.is_data() {
|
2019-10-18 22:55:59 -07:00
|
|
|
self.data_header.flags & DATA_COMPLETE_SHRED == DATA_COMPLETE_SHRED
|
2019-09-16 20:28:54 -07:00
|
|
|
} else {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-06 13:27:58 -08:00
|
|
|
pub fn reference_tick(&self) -> u8 {
|
|
|
|
if self.is_data() {
|
|
|
|
self.data_header.flags & SHRED_TICK_REFERENCE_MASK
|
|
|
|
} else {
|
|
|
|
SHRED_TICK_REFERENCE_MASK
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-12 05:44:06 -08:00
|
|
|
// Get slot from a shred packet with partial deserialize
|
|
|
|
pub fn get_slot_from_packet(p: &Packet) -> Option<Slot> {
|
|
|
|
let slot_start = OFFSET_OF_SHRED_SLOT;
|
|
|
|
let slot_end = slot_start + SIZE_OF_SHRED_SLOT;
|
|
|
|
|
|
|
|
if slot_end > p.meta.size {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
limited_deserialize::<Slot>(&p.data[slot_start..slot_end]).ok()
|
|
|
|
}
|
|
|
|
|
2019-11-07 11:08:09 -08:00
|
|
|
pub fn reference_tick_from_data(data: &[u8]) -> u8 {
|
2020-05-19 12:38:18 -07:00
|
|
|
let flags = data[SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER
|
|
|
|
- size_of::<u8>()
|
|
|
|
- size_of::<u16>()];
|
2019-11-07 11:08:09 -08:00
|
|
|
flags & SHRED_TICK_REFERENCE_MASK
|
|
|
|
}
|
|
|
|
|
2019-09-17 18:22:46 -07:00
|
|
|
pub fn verify(&self, pubkey: &Pubkey) -> bool {
|
|
|
|
self.signature()
|
2019-10-21 12:46:16 -07:00
|
|
|
.verify(pubkey.as_ref(), &self.payload[SIZE_OF_SIGNATURE..])
|
2019-09-17 18:22:46 -07:00
|
|
|
}
|
2019-09-12 21:52:13 -07:00
|
|
|
}
|
|
|
|
|
2021-07-07 08:21:12 -07:00
|
|
|
fn enable_deterministic_seed(shred_slot: Slot, bank: &Bank) -> bool {
|
|
|
|
let feature_slot = bank
|
|
|
|
.feature_set
|
|
|
|
.activated_slot(&feature_set::deterministic_shred_seed_enabled::id());
|
|
|
|
match feature_slot {
|
|
|
|
None => false,
|
|
|
|
Some(feature_slot) => {
|
|
|
|
let epoch_schedule = bank.epoch_schedule();
|
|
|
|
let feature_epoch = epoch_schedule.get_epoch(feature_slot);
|
|
|
|
let shred_epoch = epoch_schedule.get_epoch(shred_slot);
|
|
|
|
feature_epoch < shred_epoch
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-14 21:05:54 -07:00
|
|
|
#[derive(Debug)]
|
2019-08-02 15:53:42 -07:00
|
|
|
pub struct Shredder {
|
2019-12-16 17:11:18 -08:00
|
|
|
pub slot: Slot,
|
|
|
|
pub parent_slot: Slot,
|
2019-11-18 18:05:02 -08:00
|
|
|
version: u16,
|
2019-10-05 22:51:47 -07:00
|
|
|
pub signing_coding_time: u128,
|
2019-11-06 13:27:58 -08:00
|
|
|
reference_tick: u8,
|
2019-08-02 15:53:42 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Shredder {
|
2021-06-21 13:12:38 -07:00
|
|
|
pub fn new(slot: Slot, parent_slot: Slot, reference_tick: u8, version: u16) -> Result<Self> {
|
2021-04-21 05:47:50 -07:00
|
|
|
if slot < parent_slot || slot - parent_slot > u64::from(std::u16::MAX) {
|
2019-10-17 15:44:15 -07:00
|
|
|
Err(ShredError::SlotTooLow { slot, parent_slot })
|
2019-08-07 17:02:49 -07:00
|
|
|
} else {
|
2019-10-17 15:44:15 -07:00
|
|
|
Ok(Self {
|
2019-08-07 17:02:49 -07:00
|
|
|
slot,
|
2019-10-08 00:42:51 -07:00
|
|
|
parent_slot,
|
2019-10-05 22:51:47 -07:00
|
|
|
signing_coding_time: 0,
|
2019-11-06 13:27:58 -08:00
|
|
|
reference_tick,
|
2019-11-18 18:05:02 -08:00
|
|
|
version,
|
2019-08-07 17:02:49 -07:00
|
|
|
})
|
2019-08-02 15:53:42 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
pub fn entries_to_shreds(
|
|
|
|
&self,
|
2021-06-21 13:12:38 -07:00
|
|
|
keypair: &Keypair,
|
2019-10-08 00:42:51 -07:00
|
|
|
entries: &[Entry],
|
|
|
|
is_last_in_slot: bool,
|
|
|
|
next_shred_index: u32,
|
|
|
|
) -> (Vec<Shred>, Vec<Shred>, u32) {
|
2020-11-09 23:04:27 -08:00
|
|
|
let mut stats = ProcessShredsStats::default();
|
2021-03-23 07:52:38 -07:00
|
|
|
let (data_shreds, last_shred_index) = self.entries_to_data_shreds(
|
2021-06-21 13:12:38 -07:00
|
|
|
keypair,
|
2021-03-23 07:52:38 -07:00
|
|
|
entries,
|
|
|
|
is_last_in_slot,
|
|
|
|
next_shred_index,
|
|
|
|
next_shred_index, // fec_set_offset
|
|
|
|
&mut stats,
|
|
|
|
);
|
2021-06-21 13:12:38 -07:00
|
|
|
let coding_shreds =
|
|
|
|
Self::data_shreds_to_coding_shreds(keypair, &data_shreds, is_last_in_slot, &mut stats)
|
|
|
|
.unwrap();
|
2019-12-16 17:11:18 -08:00
|
|
|
(data_shreds, coding_shreds, last_shred_index)
|
|
|
|
}
|
|
|
|
|
2021-03-23 07:52:38 -07:00
|
|
|
// Each FEC block has maximum MAX_DATA_SHREDS_PER_FEC_BLOCK shreds.
|
|
|
|
// "FEC set index" is the index of first data shred in that FEC block.
|
|
|
|
// Shred indices with the same value of:
|
|
|
|
// (shred_index - fec_set_offset) / MAX_DATA_SHREDS_PER_FEC_BLOCK
|
|
|
|
// belong to the same FEC set.
|
|
|
|
pub fn fec_set_index(shred_index: u32, fec_set_offset: u32) -> Option<u32> {
|
|
|
|
let diff = shred_index.checked_sub(fec_set_offset)?;
|
|
|
|
Some(shred_index - diff % MAX_DATA_SHREDS_PER_FEC_BLOCK)
|
|
|
|
}
|
|
|
|
|
2019-12-16 17:11:18 -08:00
|
|
|
pub fn entries_to_data_shreds(
|
|
|
|
&self,
|
2021-06-21 13:12:38 -07:00
|
|
|
keypair: &Keypair,
|
2019-12-16 17:11:18 -08:00
|
|
|
entries: &[Entry],
|
|
|
|
is_last_in_slot: bool,
|
|
|
|
next_shred_index: u32,
|
2021-03-23 07:52:38 -07:00
|
|
|
// Shred index offset at which FEC sets are generated.
|
|
|
|
fec_set_offset: u32,
|
2020-11-09 23:04:27 -08:00
|
|
|
process_stats: &mut ProcessShredsStats,
|
2019-12-16 17:11:18 -08:00
|
|
|
) -> (Vec<Shred>, u32) {
|
2020-11-09 23:04:27 -08:00
|
|
|
let mut serialize_time = Measure::start("shred_serialize");
|
2019-10-08 00:42:51 -07:00
|
|
|
let serialized_shreds =
|
|
|
|
bincode::serialize(entries).expect("Expect to serialize all entries");
|
2020-11-09 23:04:27 -08:00
|
|
|
serialize_time.stop();
|
2019-10-08 00:42:51 -07:00
|
|
|
|
2020-11-09 23:04:27 -08:00
|
|
|
let mut gen_data_time = Measure::start("shred_gen_data_time");
|
2021-04-16 12:04:46 -07:00
|
|
|
let payload_capacity = SIZE_OF_DATA_SHRED_PAYLOAD;
|
|
|
|
// Integer division to ensure we have enough shreds to fit all the data
|
|
|
|
let num_shreds = (serialized_shreds.len() + payload_capacity - 1) / payload_capacity;
|
2019-10-08 00:42:51 -07:00
|
|
|
let last_shred_index = next_shred_index + num_shreds as u32 - 1;
|
|
|
|
// 1) Generate data shreds
|
2021-03-23 07:52:38 -07:00
|
|
|
let make_data_shred = |shred_index: u32, data| {
|
|
|
|
let is_last_data = shred_index == last_shred_index;
|
|
|
|
let is_last_in_slot = is_last_data && is_last_in_slot;
|
|
|
|
let parent_offset = self.slot - self.parent_slot;
|
|
|
|
let fec_set_index = Self::fec_set_index(shred_index, fec_set_offset);
|
|
|
|
let mut shred = Shred::new_from_data(
|
|
|
|
self.slot,
|
|
|
|
shred_index,
|
|
|
|
parent_offset as u16,
|
|
|
|
Some(data),
|
|
|
|
is_last_data,
|
|
|
|
is_last_in_slot,
|
|
|
|
self.reference_tick,
|
|
|
|
self.version,
|
|
|
|
fec_set_index.unwrap(),
|
|
|
|
);
|
2021-06-21 13:12:38 -07:00
|
|
|
Shredder::sign_shred(keypair, &mut shred);
|
2021-03-23 07:52:38 -07:00
|
|
|
shred
|
|
|
|
};
|
2019-10-08 00:42:51 -07:00
|
|
|
let data_shreds: Vec<Shred> = PAR_THREAD_POOL.with(|thread_pool| {
|
2019-09-18 18:00:07 -07:00
|
|
|
thread_pool.borrow().install(|| {
|
2019-10-08 00:42:51 -07:00
|
|
|
serialized_shreds
|
2021-04-16 12:04:46 -07:00
|
|
|
.par_chunks(payload_capacity)
|
2019-10-08 00:42:51 -07:00
|
|
|
.enumerate()
|
|
|
|
.map(|(i, shred_data)| {
|
|
|
|
let shred_index = next_shred_index + i as u32;
|
2021-03-23 07:52:38 -07:00
|
|
|
make_data_shred(shred_index, shred_data)
|
2019-10-08 00:42:51 -07:00
|
|
|
})
|
|
|
|
.collect()
|
2019-09-18 18:00:07 -07:00
|
|
|
})
|
|
|
|
});
|
2020-11-09 23:04:27 -08:00
|
|
|
gen_data_time.stop();
|
|
|
|
|
|
|
|
process_stats.serialize_elapsed += serialize_time.as_us();
|
|
|
|
process_stats.gen_data_elapsed += gen_data_time.as_us();
|
|
|
|
|
2019-12-16 17:11:18 -08:00
|
|
|
(data_shreds, last_shred_index + 1)
|
|
|
|
}
|
2019-08-05 16:32:34 -07:00
|
|
|
|
2020-11-09 23:04:27 -08:00
|
|
|
pub fn data_shreds_to_coding_shreds(
|
2021-03-23 07:52:38 -07:00
|
|
|
keypair: &Keypair,
|
2020-11-09 23:04:27 -08:00
|
|
|
data_shreds: &[Shred],
|
2021-04-21 05:47:50 -07:00
|
|
|
is_last_in_slot: bool,
|
2020-11-09 23:04:27 -08:00
|
|
|
process_stats: &mut ProcessShredsStats,
|
2021-03-23 07:52:38 -07:00
|
|
|
) -> Result<Vec<Shred>> {
|
|
|
|
if data_shreds.is_empty() {
|
|
|
|
return Ok(Vec::default());
|
|
|
|
}
|
2020-11-09 23:04:27 -08:00
|
|
|
let mut gen_coding_time = Measure::start("gen_coding_shreds");
|
2021-04-16 12:04:46 -07:00
|
|
|
// 1) Generate coding shreds
|
2019-10-08 00:42:51 -07:00
|
|
|
let mut coding_shreds: Vec<_> = PAR_THREAD_POOL.with(|thread_pool| {
|
|
|
|
thread_pool.borrow().install(|| {
|
|
|
|
data_shreds
|
|
|
|
.par_chunks(MAX_DATA_SHREDS_PER_FEC_BLOCK as usize)
|
|
|
|
.flat_map(|shred_data_batch| {
|
2021-04-21 05:47:50 -07:00
|
|
|
Shredder::generate_coding_shreds(shred_data_batch, is_last_in_slot)
|
2019-10-08 00:42:51 -07:00
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
})
|
|
|
|
});
|
2020-11-09 23:04:27 -08:00
|
|
|
gen_coding_time.stop();
|
2019-09-14 21:05:54 -07:00
|
|
|
|
2020-11-09 23:04:27 -08:00
|
|
|
let mut sign_coding_time = Measure::start("sign_coding_shreds");
|
2021-04-16 12:04:46 -07:00
|
|
|
// 2) Sign coding shreds
|
2019-10-08 00:42:51 -07:00
|
|
|
PAR_THREAD_POOL.with(|thread_pool| {
|
|
|
|
thread_pool.borrow().install(|| {
|
2021-10-05 22:24:48 -07:00
|
|
|
coding_shreds.par_iter_mut().for_each(|coding_shred| {
|
|
|
|
Shredder::sign_shred(keypair, coding_shred);
|
2019-10-08 00:42:51 -07:00
|
|
|
})
|
|
|
|
})
|
|
|
|
});
|
2020-11-09 23:04:27 -08:00
|
|
|
sign_coding_time.stop();
|
2019-10-08 00:42:51 -07:00
|
|
|
|
2020-11-09 23:04:27 -08:00
|
|
|
process_stats.gen_coding_elapsed += gen_coding_time.as_us();
|
|
|
|
process_stats.sign_coding_elapsed += sign_coding_time.as_us();
|
2021-03-23 07:52:38 -07:00
|
|
|
Ok(coding_shreds)
|
2019-10-08 00:42:51 -07:00
|
|
|
}
|
2019-09-18 20:08:27 -07:00
|
|
|
|
2019-10-28 10:29:38 -07:00
|
|
|
pub fn sign_shred(signer: &Keypair, shred: &mut Shred) {
|
2019-10-21 12:46:16 -07:00
|
|
|
let signature = signer.sign_message(&shred.payload[SIZE_OF_SIGNATURE..]);
|
|
|
|
bincode::serialize_into(&mut shred.payload[..SIZE_OF_SIGNATURE], &signature)
|
2019-10-18 22:55:59 -07:00
|
|
|
.expect("Failed to generate serialized signature");
|
|
|
|
shred.common_header.signature = signature;
|
2019-08-02 15:53:42 -07:00
|
|
|
}
|
|
|
|
|
2019-09-18 20:08:27 -07:00
|
|
|
pub fn new_coding_shred_header(
|
2019-11-02 00:38:30 -07:00
|
|
|
slot: Slot,
|
2019-08-07 17:02:49 -07:00
|
|
|
index: u32,
|
2019-12-12 16:50:29 -08:00
|
|
|
fec_set_index: u32,
|
2019-08-07 17:02:49 -07:00
|
|
|
num_data: usize,
|
|
|
|
num_code: usize,
|
2019-11-18 18:05:02 -08:00
|
|
|
version: u16,
|
2019-10-18 22:55:59 -07:00
|
|
|
) -> (ShredCommonHeader, CodingShredHeader) {
|
2019-11-18 18:05:02 -08:00
|
|
|
let header = ShredCommonHeader {
|
|
|
|
shred_type: ShredType(CODING_SHRED),
|
|
|
|
index,
|
|
|
|
slot,
|
|
|
|
version,
|
2019-12-12 16:50:29 -08:00
|
|
|
fec_set_index,
|
2019-11-18 18:05:02 -08:00
|
|
|
..ShredCommonHeader::default()
|
|
|
|
};
|
2019-10-18 22:55:59 -07:00
|
|
|
(
|
|
|
|
header,
|
|
|
|
CodingShredHeader {
|
|
|
|
num_data_shreds: num_data as u16,
|
|
|
|
num_coding_shreds: num_code as u16,
|
2021-05-03 06:20:47 -07:00
|
|
|
..CodingShredHeader::default()
|
2019-10-18 22:55:59 -07:00
|
|
|
},
|
|
|
|
)
|
2019-08-07 17:02:49 -07:00
|
|
|
}
|
|
|
|
|
2019-08-05 16:32:34 -07:00
|
|
|
/// Generates coding shreds for the data shreds in the current FEC set
|
2021-04-21 05:47:50 -07:00
|
|
|
pub fn generate_coding_shreds(data: &[Shred], is_last_in_slot: bool) -> Vec<Shred> {
|
|
|
|
const PAYLOAD_ENCODE_SIZE: usize = SHRED_PAYLOAD_SIZE - SIZE_OF_CODING_SHRED_HEADERS;
|
|
|
|
let ShredCommonHeader {
|
|
|
|
slot,
|
|
|
|
index,
|
|
|
|
version,
|
|
|
|
fec_set_index,
|
|
|
|
..
|
|
|
|
} = data.first().unwrap().common_header;
|
|
|
|
assert_eq!(fec_set_index, index);
|
|
|
|
assert!(data.iter().all(|shred| shred.common_header.slot == slot
|
|
|
|
&& shred.common_header.version == version
|
|
|
|
&& shred.common_header.fec_set_index == fec_set_index));
|
|
|
|
let num_data = data.len();
|
|
|
|
let num_coding = if is_last_in_slot {
|
|
|
|
(2 * MAX_DATA_SHREDS_PER_FEC_BLOCK as usize)
|
|
|
|
.saturating_sub(num_data)
|
|
|
|
.max(num_data)
|
2020-05-19 16:13:12 -07:00
|
|
|
} else {
|
2021-04-21 05:47:50 -07:00
|
|
|
num_data
|
|
|
|
};
|
|
|
|
let data: Vec<_> = data
|
|
|
|
.iter()
|
|
|
|
.map(|shred| &shred.payload[..PAYLOAD_ENCODE_SIZE])
|
|
|
|
.collect();
|
2021-04-27 05:04:44 -07:00
|
|
|
let mut parity = vec![vec![0u8; PAYLOAD_ENCODE_SIZE]; num_coding];
|
2021-04-21 05:47:50 -07:00
|
|
|
Session::new(num_data, num_coding)
|
|
|
|
.unwrap()
|
|
|
|
.encode(&data, &mut parity[..])
|
|
|
|
.unwrap();
|
|
|
|
parity
|
|
|
|
.iter()
|
|
|
|
.enumerate()
|
|
|
|
.map(|(i, parity)| {
|
|
|
|
let mut shred = Shred::new_empty_coding(
|
|
|
|
slot,
|
|
|
|
fec_set_index + i as u32, // shred index
|
|
|
|
fec_set_index,
|
|
|
|
num_data,
|
|
|
|
num_coding,
|
|
|
|
version,
|
|
|
|
);
|
|
|
|
shred.payload[SIZE_OF_CODING_SHRED_HEADERS..].copy_from_slice(parity);
|
|
|
|
shred
|
|
|
|
})
|
|
|
|
.collect()
|
2019-08-02 15:53:42 -07:00
|
|
|
}
|
2019-08-07 17:02:49 -07:00
|
|
|
|
|
|
|
fn fill_in_missing_shreds(
|
|
|
|
num_data: usize,
|
|
|
|
num_coding: usize,
|
2019-09-23 16:24:21 -07:00
|
|
|
first_index_in_fec_set: usize,
|
2019-08-07 17:02:49 -07:00
|
|
|
expected_index: usize,
|
2019-09-23 16:24:21 -07:00
|
|
|
index_found: usize,
|
2019-08-07 17:02:49 -07:00
|
|
|
present: &mut [bool],
|
2019-09-23 16:24:21 -07:00
|
|
|
) -> Vec<Vec<u8>> {
|
|
|
|
let end_index = index_found.saturating_sub(1);
|
2019-08-26 18:27:45 -07:00
|
|
|
// The index of current shred must be within the range of shreds that are being
|
|
|
|
// recovered
|
2019-09-23 16:24:21 -07:00
|
|
|
if !(first_index_in_fec_set..first_index_in_fec_set + num_data + num_coding)
|
|
|
|
.contains(&end_index)
|
|
|
|
{
|
|
|
|
return vec![];
|
2019-08-26 18:27:45 -07:00
|
|
|
}
|
|
|
|
|
2019-09-23 16:24:21 -07:00
|
|
|
let missing_blocks: Vec<Vec<u8>> = (expected_index..index_found)
|
2019-08-07 17:02:49 -07:00
|
|
|
.map(|missing| {
|
2019-09-23 16:24:21 -07:00
|
|
|
present[missing.saturating_sub(first_index_in_fec_set)] = false;
|
|
|
|
if missing < first_index_in_fec_set + num_data {
|
|
|
|
Shred::new_empty_data_shred().payload
|
|
|
|
} else {
|
2020-05-19 12:38:18 -07:00
|
|
|
vec![0; SHRED_PAYLOAD_SIZE]
|
2019-09-23 16:24:21 -07:00
|
|
|
}
|
2019-08-07 17:02:49 -07:00
|
|
|
})
|
|
|
|
.collect();
|
2019-09-23 16:24:21 -07:00
|
|
|
missing_blocks
|
2019-08-07 17:02:49 -07:00
|
|
|
}
|
|
|
|
|
2019-08-26 18:27:45 -07:00
|
|
|
pub fn try_recovery(
|
2019-09-18 16:24:30 -07:00
|
|
|
shreds: Vec<Shred>,
|
2019-08-26 18:27:45 -07:00
|
|
|
num_data: usize,
|
|
|
|
num_coding: usize,
|
|
|
|
first_index: usize,
|
2019-11-02 00:38:30 -07:00
|
|
|
slot: Slot,
|
2019-10-17 15:44:15 -07:00
|
|
|
) -> std::result::Result<Vec<Shred>, reed_solomon_erasure::Error> {
|
2021-06-18 06:34:46 -07:00
|
|
|
Self::verify_consistent_shred_payload_sizes("try_recovery()", &shreds)?;
|
2019-08-07 17:02:49 -07:00
|
|
|
let mut recovered_data = vec![];
|
|
|
|
let fec_set_size = num_data + num_coding;
|
2019-10-08 00:42:51 -07:00
|
|
|
|
2019-08-26 18:27:45 -07:00
|
|
|
if num_coding > 0 && shreds.len() < fec_set_size {
|
2019-08-07 17:02:49 -07:00
|
|
|
// Let's try recovering missing shreds using erasure
|
2021-10-05 22:24:48 -07:00
|
|
|
let present = &mut vec![true; fec_set_size];
|
2019-08-07 17:02:49 -07:00
|
|
|
let mut next_expected_index = first_index;
|
|
|
|
let mut shred_bufs: Vec<Vec<u8>> = shreds
|
2019-09-12 21:52:13 -07:00
|
|
|
.into_iter()
|
2019-08-07 17:02:49 -07:00
|
|
|
.flat_map(|shred| {
|
2021-04-23 05:00:37 -07:00
|
|
|
let offset = if shred.is_data() { 0 } else { num_data };
|
|
|
|
let index = offset + shred.index() as usize;
|
2019-09-23 16:24:21 -07:00
|
|
|
let mut blocks = Self::fill_in_missing_shreds(
|
2019-08-07 17:02:49 -07:00
|
|
|
num_data,
|
|
|
|
num_coding,
|
|
|
|
first_index,
|
|
|
|
next_expected_index,
|
2019-09-23 16:24:21 -07:00
|
|
|
index,
|
2021-10-05 22:24:48 -07:00
|
|
|
present,
|
2019-08-07 17:02:49 -07:00
|
|
|
);
|
2019-09-18 16:24:30 -07:00
|
|
|
blocks.push(shred.payload);
|
2019-09-23 16:24:21 -07:00
|
|
|
next_expected_index = index + 1;
|
2019-08-07 17:02:49 -07:00
|
|
|
blocks
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
2019-08-26 18:27:45 -07:00
|
|
|
// Insert any other missing shreds after the last shred we have received in the
|
|
|
|
// current FEC block
|
2019-09-23 16:24:21 -07:00
|
|
|
let mut pending_shreds = Self::fill_in_missing_shreds(
|
|
|
|
num_data,
|
|
|
|
num_coding,
|
|
|
|
first_index,
|
|
|
|
next_expected_index,
|
|
|
|
first_index + fec_set_size,
|
2021-10-05 22:24:48 -07:00
|
|
|
present,
|
2019-09-23 16:24:21 -07:00
|
|
|
);
|
|
|
|
|
2019-08-07 17:02:49 -07:00
|
|
|
shred_bufs.append(&mut pending_shreds);
|
|
|
|
|
2019-08-26 18:27:45 -07:00
|
|
|
if shred_bufs.len() != fec_set_size {
|
2019-10-02 18:33:01 -07:00
|
|
|
return Err(reed_solomon_erasure::Error::TooFewShardsPresent);
|
2019-08-26 18:27:45 -07:00
|
|
|
}
|
|
|
|
|
2020-04-16 07:22:29 -07:00
|
|
|
let session = Session::new(num_data, num_coding)?;
|
2019-08-07 17:02:49 -07:00
|
|
|
|
2021-04-16 12:04:46 -07:00
|
|
|
// All information (excluding the restricted section) from a data shred is encoded
|
|
|
|
let valid_data_len = SHRED_PAYLOAD_SIZE - SIZE_OF_CODING_SHRED_HEADERS;
|
|
|
|
let coding_block_offset = SIZE_OF_CODING_SHRED_HEADERS;
|
2019-09-23 13:53:52 -07:00
|
|
|
let mut blocks: Vec<(&mut [u8], bool)> = shred_bufs
|
2019-08-07 17:02:49 -07:00
|
|
|
.iter_mut()
|
2019-10-18 22:55:59 -07:00
|
|
|
.enumerate()
|
|
|
|
.map(|(position, x)| {
|
|
|
|
if position < num_data {
|
|
|
|
x[..valid_data_len].as_mut()
|
|
|
|
} else {
|
|
|
|
x[coding_block_offset..].as_mut()
|
|
|
|
}
|
|
|
|
})
|
2019-09-23 13:53:52 -07:00
|
|
|
.zip(present.clone())
|
2019-08-07 17:02:49 -07:00
|
|
|
.collect();
|
2019-09-23 13:53:52 -07:00
|
|
|
session.decode_blocks(&mut blocks)?;
|
2019-08-07 17:02:49 -07:00
|
|
|
|
2019-09-17 18:22:46 -07:00
|
|
|
let mut num_drained = 0;
|
2019-08-26 18:27:45 -07:00
|
|
|
present
|
|
|
|
.iter()
|
|
|
|
.enumerate()
|
|
|
|
.for_each(|(position, was_present)| {
|
2019-09-23 16:24:21 -07:00
|
|
|
if !*was_present && position < num_data {
|
2019-09-17 18:22:46 -07:00
|
|
|
let drain_this = position - num_drained;
|
|
|
|
let shred_buf = shred_bufs.remove(drain_this);
|
|
|
|
num_drained += 1;
|
2019-09-18 16:24:30 -07:00
|
|
|
if let Ok(shred) = Shred::new_from_serialized_shred(shred_buf) {
|
2019-09-17 18:22:46 -07:00
|
|
|
let shred_index = shred.index() as usize;
|
|
|
|
// Valid shred must be in the same slot as the original shreds
|
|
|
|
if shred.slot() == slot {
|
2019-09-23 16:24:21 -07:00
|
|
|
// A valid data shred must be indexed between first_index and first+num_data index
|
|
|
|
if (first_index..first_index + num_data).contains(&shred_index) {
|
2019-09-17 18:22:46 -07:00
|
|
|
recovered_data.push(shred)
|
|
|
|
}
|
2019-08-07 17:02:49 -07:00
|
|
|
}
|
2019-08-26 18:27:45 -07:00
|
|
|
}
|
2019-08-07 17:02:49 -07:00
|
|
|
}
|
2019-08-26 18:27:45 -07:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2019-09-23 16:24:21 -07:00
|
|
|
Ok(recovered_data)
|
2019-08-26 18:27:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Combines all shreds to recreate the original buffer
|
2019-10-17 15:44:15 -07:00
|
|
|
pub fn deshred(shreds: &[Shred]) -> std::result::Result<Vec<u8>, reed_solomon_erasure::Error> {
|
2021-04-27 05:04:44 -07:00
|
|
|
use reed_solomon_erasure::Error::TooFewDataShards;
|
|
|
|
const SHRED_DATA_OFFSET: usize = SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER;
|
2021-06-18 06:34:46 -07:00
|
|
|
Self::verify_consistent_shred_payload_sizes("deshred()", shreds)?;
|
2021-04-27 05:04:44 -07:00
|
|
|
let index = shreds.first().ok_or(TooFewDataShards)?.index();
|
|
|
|
let aligned = shreds.iter().zip(index..).all(|(s, i)| s.index() == i);
|
|
|
|
let data_complete = {
|
|
|
|
let shred = shreds.last().unwrap();
|
|
|
|
shred.data_complete() || shred.last_in_slot()
|
2019-08-07 17:02:49 -07:00
|
|
|
};
|
2021-04-27 05:04:44 -07:00
|
|
|
if !data_complete || !aligned {
|
|
|
|
return Err(TooFewDataShards);
|
|
|
|
}
|
|
|
|
let data: Vec<_> = shreds
|
2019-08-07 17:02:49 -07:00
|
|
|
.iter()
|
2021-04-27 05:04:44 -07:00
|
|
|
.flat_map(|shred| {
|
|
|
|
let size = shred.data_header.size as usize;
|
|
|
|
let size = shred.payload.len().min(size);
|
|
|
|
let offset = SHRED_DATA_OFFSET.min(size);
|
|
|
|
shred.payload[offset..size].iter()
|
2019-08-07 17:02:49 -07:00
|
|
|
})
|
2021-04-27 05:04:44 -07:00
|
|
|
.copied()
|
|
|
|
.collect();
|
|
|
|
if data.is_empty() {
|
|
|
|
// For backward compatibility. This is needed when the data shred
|
|
|
|
// payload is None, so that deserializing to Vec<Entry> results in
|
|
|
|
// an empty vector.
|
|
|
|
Ok(vec![0u8; SIZE_OF_DATA_SHRED_PAYLOAD])
|
|
|
|
} else {
|
|
|
|
Ok(data)
|
|
|
|
}
|
2019-08-07 17:02:49 -07:00
|
|
|
}
|
2020-05-19 12:38:18 -07:00
|
|
|
|
|
|
|
fn verify_consistent_shred_payload_sizes(
|
|
|
|
caller: &str,
|
|
|
|
shreds: &[Shred],
|
|
|
|
) -> std::result::Result<(), reed_solomon_erasure::Error> {
|
|
|
|
if shreds.is_empty() {
|
|
|
|
return Err(reed_solomon_erasure::Error::TooFewShardsPresent);
|
|
|
|
}
|
|
|
|
let slot = shreds[0].slot();
|
|
|
|
for shred in shreds {
|
|
|
|
if shred.payload.len() != SHRED_PAYLOAD_SIZE {
|
|
|
|
error!(
|
|
|
|
"{} Shreds for slot: {} are inconsistent sizes. Expected: {} actual: {}",
|
|
|
|
caller,
|
|
|
|
slot,
|
|
|
|
SHRED_PAYLOAD_SIZE,
|
|
|
|
shred.payload.len()
|
|
|
|
);
|
|
|
|
return Err(reed_solomon_erasure::Error::IncorrectShardSize);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
2019-08-02 15:53:42 -07:00
|
|
|
}
|
|
|
|
|
2020-12-15 16:50:40 -08:00
|
|
|
#[derive(Default, Debug, Eq, PartialEq)]
|
|
|
|
pub struct ShredFetchStats {
|
|
|
|
pub index_overrun: usize,
|
|
|
|
pub shred_count: usize,
|
|
|
|
pub index_bad_deserialize: usize,
|
|
|
|
pub index_out_of_bounds: usize,
|
|
|
|
pub slot_bad_deserialize: usize,
|
|
|
|
pub duplicate_shred: usize,
|
|
|
|
pub slot_out_of_range: usize,
|
|
|
|
pub bad_shred_type: usize,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get slot, index, and type from a packet with partial deserialize
|
|
|
|
pub fn get_shred_slot_index_type(
|
|
|
|
p: &Packet,
|
|
|
|
stats: &mut ShredFetchStats,
|
|
|
|
) -> Option<(Slot, u32, bool)> {
|
|
|
|
let index_start = OFFSET_OF_SHRED_INDEX;
|
|
|
|
let index_end = index_start + SIZE_OF_SHRED_INDEX;
|
|
|
|
let slot_start = OFFSET_OF_SHRED_SLOT;
|
|
|
|
let slot_end = slot_start + SIZE_OF_SHRED_SLOT;
|
|
|
|
|
|
|
|
debug_assert!(index_end > slot_end);
|
|
|
|
debug_assert!(index_end > OFFSET_OF_SHRED_TYPE);
|
|
|
|
|
|
|
|
if index_end > p.meta.size {
|
|
|
|
stats.index_overrun += 1;
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
let index;
|
|
|
|
match limited_deserialize::<u32>(&p.data[index_start..index_end]) {
|
|
|
|
Ok(x) => index = x,
|
|
|
|
Err(_e) => {
|
|
|
|
stats.index_bad_deserialize += 1;
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if index >= MAX_DATA_SHREDS_PER_SLOT as u32 {
|
|
|
|
stats.index_out_of_bounds += 1;
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
let slot;
|
|
|
|
match limited_deserialize::<Slot>(&p.data[slot_start..slot_end]) {
|
|
|
|
Ok(x) => {
|
|
|
|
slot = x;
|
|
|
|
}
|
|
|
|
Err(_e) => {
|
|
|
|
stats.slot_bad_deserialize += 1;
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let shred_type = p.data[OFFSET_OF_SHRED_TYPE];
|
|
|
|
if shred_type == DATA_SHRED || shred_type == CODING_SHRED {
|
|
|
|
return Some((slot, index, shred_type == DATA_SHRED));
|
|
|
|
} else {
|
|
|
|
stats.bad_shred_type += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
None
|
|
|
|
}
|
|
|
|
|
2020-05-19 12:38:18 -07:00
|
|
|
pub fn max_ticks_per_n_shreds(num_shreds: u64, shred_data_size: Option<usize>) -> u64 {
|
2019-10-31 13:38:50 -07:00
|
|
|
let ticks = create_ticks(1, 0, Hash::default());
|
2020-05-19 12:38:18 -07:00
|
|
|
max_entries_per_n_shred(&ticks[0], num_shreds, shred_data_size)
|
2019-10-08 00:42:51 -07:00
|
|
|
}
|
|
|
|
|
2020-05-19 12:38:18 -07:00
|
|
|
pub fn max_entries_per_n_shred(
|
|
|
|
entry: &Entry,
|
|
|
|
num_shreds: u64,
|
|
|
|
shred_data_size: Option<usize>,
|
|
|
|
) -> u64 {
|
|
|
|
let shred_data_size = shred_data_size.unwrap_or(SIZE_OF_DATA_SHRED_PAYLOAD) as u64;
|
2019-10-08 00:42:51 -07:00
|
|
|
let vec_size = bincode::serialized_size(&vec![entry]).unwrap();
|
|
|
|
let entry_size = bincode::serialized_size(entry).unwrap();
|
|
|
|
let count_size = vec_size - entry_size;
|
|
|
|
|
|
|
|
(shred_data_size * num_shreds - count_size) / entry_size
|
|
|
|
}
|
|
|
|
|
2019-12-11 11:10:21 -08:00
|
|
|
pub fn verify_test_data_shred(
|
|
|
|
shred: &Shred,
|
|
|
|
index: u32,
|
|
|
|
slot: Slot,
|
|
|
|
parent: Slot,
|
|
|
|
pk: &Pubkey,
|
|
|
|
verify: bool,
|
|
|
|
is_last_in_slot: bool,
|
2021-03-16 03:09:16 -07:00
|
|
|
is_last_data: bool,
|
2019-12-11 11:10:21 -08:00
|
|
|
) {
|
2020-05-19 12:38:18 -07:00
|
|
|
assert_eq!(shred.payload.len(), SHRED_PAYLOAD_SIZE);
|
2019-12-11 11:10:21 -08:00
|
|
|
assert!(shred.is_data());
|
|
|
|
assert_eq!(shred.index(), index);
|
|
|
|
assert_eq!(shred.slot(), slot);
|
|
|
|
assert_eq!(shred.parent(), parent);
|
|
|
|
assert_eq!(verify, shred.verify(pk));
|
|
|
|
if is_last_in_slot {
|
|
|
|
assert!(shred.last_in_slot());
|
|
|
|
} else {
|
|
|
|
assert!(!shred.last_in_slot());
|
|
|
|
}
|
2021-03-16 03:09:16 -07:00
|
|
|
if is_last_data {
|
2019-12-11 11:10:21 -08:00
|
|
|
assert!(shred.data_complete());
|
|
|
|
} else {
|
|
|
|
assert!(!shred.data_complete());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-02 15:53:42 -07:00
|
|
|
#[cfg(test)]
|
2019-10-08 00:42:51 -07:00
|
|
|
pub mod tests {
|
2019-08-02 15:53:42 -07:00
|
|
|
use super::*;
|
2019-10-21 12:46:16 -07:00
|
|
|
use bincode::serialized_size;
|
2019-10-18 09:28:51 -07:00
|
|
|
use matches::assert_matches;
|
2021-04-21 05:47:50 -07:00
|
|
|
use rand::{seq::SliceRandom, Rng};
|
|
|
|
use solana_sdk::{
|
|
|
|
hash::{self, hash},
|
|
|
|
shred_version, system_transaction,
|
|
|
|
};
|
2021-06-21 13:12:38 -07:00
|
|
|
use std::{collections::HashSet, convert::TryInto, iter::repeat_with, sync::Arc};
|
2019-08-02 15:53:42 -07:00
|
|
|
|
2019-10-21 12:46:16 -07:00
|
|
|
#[test]
|
|
|
|
fn test_shred_constants() {
|
|
|
|
assert_eq!(
|
|
|
|
SIZE_OF_COMMON_SHRED_HEADER,
|
|
|
|
serialized_size(&ShredCommonHeader::default()).unwrap() as usize
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
SIZE_OF_CODING_SHRED_HEADER,
|
|
|
|
serialized_size(&CodingShredHeader::default()).unwrap() as usize
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
SIZE_OF_DATA_SHRED_HEADER,
|
|
|
|
serialized_size(&DataShredHeader::default()).unwrap() as usize
|
|
|
|
);
|
2020-05-19 12:38:18 -07:00
|
|
|
let data_shred_header_with_size = DataShredHeader {
|
|
|
|
size: 1000,
|
|
|
|
..DataShredHeader::default()
|
|
|
|
};
|
|
|
|
assert_eq!(
|
|
|
|
SIZE_OF_DATA_SHRED_HEADER,
|
|
|
|
serialized_size(&data_shred_header_with_size).unwrap() as usize
|
|
|
|
);
|
2019-10-21 12:46:16 -07:00
|
|
|
assert_eq!(
|
|
|
|
SIZE_OF_SIGNATURE,
|
|
|
|
bincode::serialized_size(&Signature::default()).unwrap() as usize
|
|
|
|
);
|
2019-12-30 07:42:09 -08:00
|
|
|
assert_eq!(
|
|
|
|
SIZE_OF_SHRED_TYPE,
|
|
|
|
bincode::serialized_size(&ShredType::default()).unwrap() as usize
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
SIZE_OF_SHRED_SLOT,
|
|
|
|
bincode::serialized_size(&Slot::default()).unwrap() as usize
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
SIZE_OF_SHRED_INDEX,
|
|
|
|
bincode::serialized_size(&ShredCommonHeader::default().index).unwrap() as usize
|
|
|
|
);
|
2019-10-21 12:46:16 -07:00
|
|
|
}
|
|
|
|
|
2019-11-02 00:38:30 -07:00
|
|
|
fn verify_test_code_shred(shred: &Shred, index: u32, slot: Slot, pk: &Pubkey, verify: bool) {
|
2020-05-19 12:38:18 -07:00
|
|
|
assert_eq!(shred.payload.len(), SHRED_PAYLOAD_SIZE);
|
2019-09-18 13:56:44 -07:00
|
|
|
assert!(!shred.is_data());
|
|
|
|
assert_eq!(shred.index(), index);
|
|
|
|
assert_eq!(shred.slot(), slot);
|
|
|
|
assert_eq!(verify, shred.verify(pk));
|
|
|
|
}
|
|
|
|
|
2020-05-19 12:38:18 -07:00
|
|
|
fn run_test_data_shredder(slot: Slot) {
|
2019-08-02 15:53:42 -07:00
|
|
|
let keypair = Arc::new(Keypair::new());
|
2019-09-04 21:06:47 -07:00
|
|
|
|
|
|
|
// Test that parent cannot be > current slot
|
2019-10-08 00:42:51 -07:00
|
|
|
assert_matches!(
|
2021-06-21 13:12:38 -07:00
|
|
|
Shredder::new(slot, slot + 1, 0, 0),
|
2019-10-17 22:50:38 -07:00
|
|
|
Err(ShredError::SlotTooLow {
|
|
|
|
slot: _,
|
|
|
|
parent_slot: _,
|
|
|
|
})
|
2019-10-08 00:42:51 -07:00
|
|
|
);
|
2019-09-04 21:06:47 -07:00
|
|
|
// Test that slot - parent cannot be > u16 MAX
|
|
|
|
assert_matches!(
|
2021-06-21 13:12:38 -07:00
|
|
|
Shredder::new(slot, slot - 1 - 0xffff, 0, 0),
|
2019-10-17 22:50:38 -07:00
|
|
|
Err(ShredError::SlotTooLow {
|
|
|
|
slot: _,
|
|
|
|
parent_slot: _,
|
|
|
|
})
|
2019-09-04 21:06:47 -07:00
|
|
|
);
|
2019-10-08 00:42:51 -07:00
|
|
|
let parent_slot = slot - 5;
|
2021-06-21 13:12:38 -07:00
|
|
|
let shredder = Shredder::new(slot, parent_slot, 0, 0).unwrap();
|
2019-10-08 00:42:51 -07:00
|
|
|
let entries: Vec<_> = (0..5)
|
|
|
|
.map(|_| {
|
|
|
|
let keypair0 = Keypair::new();
|
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let tx0 =
|
|
|
|
system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
|
|
|
|
Entry::new(&Hash::default(), 1, vec![tx0])
|
|
|
|
})
|
|
|
|
.collect();
|
2019-08-07 17:02:49 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
let size = serialized_size(&entries).unwrap();
|
2021-04-16 12:04:46 -07:00
|
|
|
// Integer division to ensure we have enough shreds to fit all the data
|
|
|
|
let payload_capacity = SIZE_OF_DATA_SHRED_PAYLOAD as u64;
|
|
|
|
let num_expected_data_shreds = (size + payload_capacity - 1) / payload_capacity;
|
2021-04-21 05:47:50 -07:00
|
|
|
let num_expected_coding_shreds = (2 * MAX_DATA_SHREDS_PER_FEC_BLOCK as usize)
|
|
|
|
.saturating_sub(num_expected_data_shreds as usize)
|
|
|
|
.max(num_expected_data_shreds as usize);
|
2019-10-08 00:42:51 -07:00
|
|
|
let start_index = 0;
|
|
|
|
let (data_shreds, coding_shreds, next_index) =
|
2021-06-21 13:12:38 -07:00
|
|
|
shredder.entries_to_shreds(&keypair, &entries, true, start_index);
|
2019-10-08 00:42:51 -07:00
|
|
|
assert_eq!(next_index as u64, num_expected_data_shreds);
|
|
|
|
|
|
|
|
let mut data_shred_indexes = HashSet::new();
|
|
|
|
let mut coding_shred_indexes = HashSet::new();
|
|
|
|
for shred in data_shreds.iter() {
|
2019-10-18 22:55:59 -07:00
|
|
|
assert_eq!(shred.common_header.shred_type, ShredType(DATA_SHRED));
|
|
|
|
let index = shred.common_header.index;
|
2019-10-08 00:42:51 -07:00
|
|
|
let is_last = index as u64 == num_expected_data_shreds - 1;
|
|
|
|
verify_test_data_shred(
|
|
|
|
shred,
|
|
|
|
index,
|
|
|
|
slot,
|
|
|
|
parent_slot,
|
|
|
|
&keypair.pubkey(),
|
|
|
|
true,
|
|
|
|
is_last,
|
|
|
|
is_last,
|
|
|
|
);
|
|
|
|
assert!(!data_shred_indexes.contains(&index));
|
|
|
|
data_shred_indexes.insert(index);
|
|
|
|
}
|
2019-08-07 17:02:49 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
for shred in coding_shreds.iter() {
|
2019-10-18 22:55:59 -07:00
|
|
|
let index = shred.common_header.index;
|
|
|
|
assert_eq!(shred.common_header.shred_type, ShredType(CODING_SHRED));
|
2019-10-08 00:42:51 -07:00
|
|
|
verify_test_code_shred(shred, index, slot, &keypair.pubkey(), true);
|
|
|
|
assert!(!coding_shred_indexes.contains(&index));
|
|
|
|
coding_shred_indexes.insert(index);
|
|
|
|
}
|
2019-08-07 17:02:49 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
for i in start_index..start_index + num_expected_data_shreds as u32 {
|
|
|
|
assert!(data_shred_indexes.contains(&i));
|
|
|
|
}
|
2019-08-07 17:02:49 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
for i in start_index..start_index + num_expected_coding_shreds as u32 {
|
|
|
|
assert!(coding_shred_indexes.contains(&i));
|
|
|
|
}
|
2019-08-07 17:02:49 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
assert_eq!(data_shred_indexes.len() as u64, num_expected_data_shreds);
|
|
|
|
assert_eq!(coding_shred_indexes.len(), num_expected_coding_shreds);
|
2019-08-07 17:02:49 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
// Test reassembly
|
|
|
|
let deshred_payload = Shredder::deshred(&data_shreds).unwrap();
|
|
|
|
let deshred_entries: Vec<Entry> = bincode::deserialize(&deshred_payload).unwrap();
|
|
|
|
assert_eq!(entries, deshred_entries);
|
|
|
|
}
|
2019-08-07 17:02:49 -07:00
|
|
|
|
2020-05-19 12:38:18 -07:00
|
|
|
#[test]
|
|
|
|
fn test_data_shredder() {
|
|
|
|
run_test_data_shredder(0x1234_5678_9abc_def0);
|
|
|
|
}
|
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
#[test]
|
|
|
|
fn test_deserialize_shred_payload() {
|
|
|
|
let keypair = Arc::new(Keypair::new());
|
|
|
|
let slot = 1;
|
|
|
|
let parent_slot = 0;
|
2021-06-21 13:12:38 -07:00
|
|
|
let shredder = Shredder::new(slot, parent_slot, 0, 0).unwrap();
|
2019-10-08 00:42:51 -07:00
|
|
|
let entries: Vec<_> = (0..5)
|
|
|
|
.map(|_| {
|
|
|
|
let keypair0 = Keypair::new();
|
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let tx0 =
|
|
|
|
system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
|
|
|
|
Entry::new(&Hash::default(), 1, vec![tx0])
|
|
|
|
})
|
|
|
|
.collect();
|
2019-08-07 17:02:49 -07:00
|
|
|
|
2021-06-21 13:12:38 -07:00
|
|
|
let data_shreds = shredder.entries_to_shreds(&keypair, &entries, true, 0).0;
|
2019-08-07 17:02:49 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
let deserialized_shred =
|
|
|
|
Shred::new_from_serialized_shred(data_shreds.last().unwrap().payload.clone()).unwrap();
|
|
|
|
assert_eq!(deserialized_shred, *data_shreds.last().unwrap());
|
2019-08-05 16:32:34 -07:00
|
|
|
}
|
|
|
|
|
2019-11-06 13:27:58 -08:00
|
|
|
#[test]
|
|
|
|
fn test_shred_reference_tick() {
|
|
|
|
let keypair = Arc::new(Keypair::new());
|
|
|
|
let slot = 1;
|
|
|
|
let parent_slot = 0;
|
2021-06-21 13:12:38 -07:00
|
|
|
let shredder = Shredder::new(slot, parent_slot, 5, 0).unwrap();
|
2019-11-06 13:27:58 -08:00
|
|
|
let entries: Vec<_> = (0..5)
|
|
|
|
.map(|_| {
|
|
|
|
let keypair0 = Keypair::new();
|
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let tx0 =
|
|
|
|
system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
|
|
|
|
Entry::new(&Hash::default(), 1, vec![tx0])
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
2021-06-21 13:12:38 -07:00
|
|
|
let data_shreds = shredder.entries_to_shreds(&keypair, &entries, true, 0).0;
|
2019-11-06 13:27:58 -08:00
|
|
|
data_shreds.iter().for_each(|s| {
|
|
|
|
assert_eq!(s.reference_tick(), 5);
|
2019-11-07 11:08:09 -08:00
|
|
|
assert_eq!(Shred::reference_tick_from_data(&s.payload), 5);
|
2019-11-06 13:27:58 -08:00
|
|
|
});
|
|
|
|
|
|
|
|
let deserialized_shred =
|
|
|
|
Shred::new_from_serialized_shred(data_shreds.last().unwrap().payload.clone()).unwrap();
|
|
|
|
assert_eq!(deserialized_shred.reference_tick(), 5);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_shred_reference_tick_overflow() {
|
|
|
|
let keypair = Arc::new(Keypair::new());
|
|
|
|
let slot = 1;
|
|
|
|
let parent_slot = 0;
|
2021-06-21 13:12:38 -07:00
|
|
|
let shredder = Shredder::new(slot, parent_slot, u8::max_value(), 0).unwrap();
|
2019-11-06 13:27:58 -08:00
|
|
|
let entries: Vec<_> = (0..5)
|
|
|
|
.map(|_| {
|
|
|
|
let keypair0 = Keypair::new();
|
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let tx0 =
|
|
|
|
system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
|
|
|
|
Entry::new(&Hash::default(), 1, vec![tx0])
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
2021-06-21 13:12:38 -07:00
|
|
|
let data_shreds = shredder.entries_to_shreds(&keypair, &entries, true, 0).0;
|
2019-11-06 13:27:58 -08:00
|
|
|
data_shreds.iter().for_each(|s| {
|
|
|
|
assert_eq!(s.reference_tick(), SHRED_TICK_REFERENCE_MASK);
|
2019-11-07 11:08:09 -08:00
|
|
|
assert_eq!(
|
|
|
|
Shred::reference_tick_from_data(&s.payload),
|
|
|
|
SHRED_TICK_REFERENCE_MASK
|
|
|
|
);
|
2019-11-06 13:27:58 -08:00
|
|
|
});
|
|
|
|
|
|
|
|
let deserialized_shred =
|
|
|
|
Shred::new_from_serialized_shred(data_shreds.last().unwrap().payload.clone()).unwrap();
|
|
|
|
assert_eq!(
|
|
|
|
deserialized_shred.reference_tick(),
|
|
|
|
SHRED_TICK_REFERENCE_MASK
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-05-19 12:38:18 -07:00
|
|
|
fn run_test_data_and_code_shredder(slot: Slot) {
|
2019-08-05 16:32:34 -07:00
|
|
|
let keypair = Arc::new(Keypair::new());
|
2021-06-21 13:12:38 -07:00
|
|
|
let shredder = Shredder::new(slot, slot - 5, 0, 0).unwrap();
|
2019-10-08 00:42:51 -07:00
|
|
|
// Create enough entries to make > 1 shred
|
2021-04-16 12:04:46 -07:00
|
|
|
let payload_capacity = SIZE_OF_DATA_SHRED_PAYLOAD;
|
|
|
|
let num_entries = max_ticks_per_n_shreds(1, Some(payload_capacity)) + 1;
|
2019-10-08 00:42:51 -07:00
|
|
|
let entries: Vec<_> = (0..num_entries)
|
|
|
|
.map(|_| {
|
|
|
|
let keypair0 = Keypair::new();
|
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let tx0 =
|
|
|
|
system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
|
|
|
|
Entry::new(&Hash::default(), 1, vec![tx0])
|
|
|
|
})
|
|
|
|
.collect();
|
2019-08-05 16:32:34 -07:00
|
|
|
|
2021-06-21 13:12:38 -07:00
|
|
|
let (data_shreds, coding_shreds, _) =
|
|
|
|
shredder.entries_to_shreds(&keypair, &entries, true, 0);
|
2019-08-05 16:32:34 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
for (i, s) in data_shreds.iter().enumerate() {
|
|
|
|
verify_test_data_shred(
|
|
|
|
s,
|
|
|
|
s.index(),
|
|
|
|
slot,
|
|
|
|
slot - 5,
|
|
|
|
&keypair.pubkey(),
|
|
|
|
true,
|
|
|
|
i == data_shreds.len() - 1,
|
|
|
|
i == data_shreds.len() - 1,
|
|
|
|
);
|
|
|
|
}
|
2019-09-27 21:58:53 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
for s in coding_shreds {
|
|
|
|
verify_test_code_shred(&s, s.index(), slot, &keypair.pubkey(), true);
|
|
|
|
}
|
2019-09-27 21:58:53 -07:00
|
|
|
}
|
|
|
|
|
2019-08-07 17:02:49 -07:00
|
|
|
#[test]
|
2020-05-19 12:38:18 -07:00
|
|
|
fn test_data_and_code_shredder() {
|
|
|
|
run_test_data_and_code_shredder(0x1234_5678_9abc_def0);
|
|
|
|
}
|
|
|
|
|
2021-04-21 05:47:50 -07:00
|
|
|
fn run_test_recovery_and_reassembly(slot: Slot, is_last_in_slot: bool) {
|
2019-08-07 17:02:49 -07:00
|
|
|
let keypair = Arc::new(Keypair::new());
|
2021-06-21 13:12:38 -07:00
|
|
|
let shredder = Shredder::new(slot, slot - 5, 0, 0).unwrap();
|
2019-10-08 00:42:51 -07:00
|
|
|
let keypair0 = Keypair::new();
|
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
|
|
|
|
let entry = Entry::new(&Hash::default(), 1, vec![tx0]);
|
|
|
|
|
|
|
|
let num_data_shreds: usize = 5;
|
2021-04-16 12:04:46 -07:00
|
|
|
let payload_capacity = SIZE_OF_DATA_SHRED_PAYLOAD;
|
2020-05-19 12:38:18 -07:00
|
|
|
let num_entries =
|
2021-04-16 12:04:46 -07:00
|
|
|
max_entries_per_n_shred(&entry, num_data_shreds as u64, Some(payload_capacity));
|
2019-10-08 00:42:51 -07:00
|
|
|
let entries: Vec<_> = (0..num_entries)
|
|
|
|
.map(|_| {
|
|
|
|
let keypair0 = Keypair::new();
|
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let tx0 =
|
|
|
|
system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
|
|
|
|
Entry::new(&Hash::default(), 1, vec![tx0])
|
|
|
|
})
|
|
|
|
.collect();
|
2019-08-07 17:02:49 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
let serialized_entries = bincode::serialize(&entries).unwrap();
|
2021-04-21 05:47:50 -07:00
|
|
|
let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(
|
2021-06-21 13:12:38 -07:00
|
|
|
&keypair,
|
2021-04-21 05:47:50 -07:00
|
|
|
&entries,
|
|
|
|
is_last_in_slot,
|
|
|
|
0, // next_shred_index
|
|
|
|
);
|
2020-02-04 15:45:01 -08:00
|
|
|
let num_coding_shreds = coding_shreds.len();
|
2019-08-07 17:02:49 -07:00
|
|
|
|
2021-04-16 12:04:46 -07:00
|
|
|
// We should have 5 data shreds now
|
2019-10-08 00:42:51 -07:00
|
|
|
assert_eq!(data_shreds.len(), num_data_shreds);
|
2021-04-21 05:47:50 -07:00
|
|
|
if is_last_in_slot {
|
|
|
|
assert_eq!(
|
|
|
|
num_coding_shreds,
|
|
|
|
2 * MAX_DATA_SHREDS_PER_FEC_BLOCK as usize - num_data_shreds
|
|
|
|
);
|
|
|
|
} else {
|
|
|
|
// and an equal number of coding shreds
|
|
|
|
assert_eq!(num_data_shreds, num_coding_shreds);
|
|
|
|
}
|
2019-10-08 00:42:51 -07:00
|
|
|
|
|
|
|
let all_shreds = data_shreds
|
|
|
|
.iter()
|
|
|
|
.cloned()
|
|
|
|
.chain(coding_shreds.iter().cloned())
|
|
|
|
.collect::<Vec<_>>();
|
2019-08-07 17:02:49 -07:00
|
|
|
|
|
|
|
// Test0: Try recovery/reassembly with only data shreds, but not all data shreds. Hint: should fail
|
|
|
|
assert_matches!(
|
2019-08-26 18:27:45 -07:00
|
|
|
Shredder::try_recovery(
|
2019-10-08 00:42:51 -07:00
|
|
|
data_shreds[..data_shreds.len() - 1].to_vec(),
|
|
|
|
num_data_shreds,
|
2020-02-04 15:45:01 -08:00
|
|
|
num_coding_shreds,
|
2019-08-26 18:27:45 -07:00
|
|
|
0,
|
|
|
|
slot
|
|
|
|
),
|
|
|
|
Err(reed_solomon_erasure::Error::TooFewShardsPresent)
|
2019-08-07 17:02:49 -07:00
|
|
|
);
|
|
|
|
|
|
|
|
// Test1: Try recovery/reassembly with only data shreds. Hint: should work
|
2019-09-23 16:24:21 -07:00
|
|
|
let recovered_data = Shredder::try_recovery(
|
2019-10-08 00:42:51 -07:00
|
|
|
data_shreds[..].to_vec(),
|
|
|
|
num_data_shreds,
|
2020-02-04 15:45:01 -08:00
|
|
|
num_coding_shreds,
|
2019-08-26 18:27:45 -07:00
|
|
|
0,
|
|
|
|
slot,
|
|
|
|
)
|
|
|
|
.unwrap();
|
2019-09-23 16:24:21 -07:00
|
|
|
assert!(recovered_data.is_empty());
|
2019-08-07 17:02:49 -07:00
|
|
|
|
|
|
|
// Test2: Try recovery/reassembly with missing data shreds + coding shreds. Hint: should work
|
2019-10-08 00:42:51 -07:00
|
|
|
let mut shred_info: Vec<Shred> = all_shreds
|
2019-08-07 17:02:49 -07:00
|
|
|
.iter()
|
|
|
|
.enumerate()
|
2019-09-18 16:24:30 -07:00
|
|
|
.filter_map(|(i, b)| if i % 2 == 0 { Some(b.clone()) } else { None })
|
|
|
|
.collect();
|
2019-08-07 17:02:49 -07:00
|
|
|
|
2019-09-23 16:24:21 -07:00
|
|
|
let mut recovered_data = Shredder::try_recovery(
|
2019-09-17 18:22:46 -07:00
|
|
|
shred_info.clone(),
|
2019-10-08 00:42:51 -07:00
|
|
|
num_data_shreds,
|
2020-02-04 15:45:01 -08:00
|
|
|
num_coding_shreds,
|
2019-08-26 18:27:45 -07:00
|
|
|
0,
|
|
|
|
slot,
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
2019-09-23 16:24:21 -07:00
|
|
|
assert_eq!(recovered_data.len(), 2); // Data shreds 1 and 3 were missing
|
|
|
|
let recovered_shred = recovered_data.remove(0);
|
2019-10-08 00:42:51 -07:00
|
|
|
verify_test_data_shred(
|
|
|
|
&recovered_shred,
|
|
|
|
1,
|
|
|
|
slot,
|
|
|
|
slot - 5,
|
|
|
|
&keypair.pubkey(),
|
|
|
|
true,
|
|
|
|
false,
|
|
|
|
false,
|
|
|
|
);
|
2019-09-17 18:22:46 -07:00
|
|
|
shred_info.insert(1, recovered_shred);
|
2019-08-20 17:16:06 -07:00
|
|
|
|
2019-09-23 16:24:21 -07:00
|
|
|
let recovered_shred = recovered_data.remove(0);
|
2019-10-08 00:42:51 -07:00
|
|
|
verify_test_data_shred(
|
|
|
|
&recovered_shred,
|
|
|
|
3,
|
2019-08-26 18:27:45 -07:00
|
|
|
slot,
|
2019-10-08 00:42:51 -07:00
|
|
|
slot - 5,
|
|
|
|
&keypair.pubkey(),
|
|
|
|
true,
|
|
|
|
false,
|
|
|
|
false,
|
2019-08-07 17:02:49 -07:00
|
|
|
);
|
2019-10-08 00:42:51 -07:00
|
|
|
shred_info.insert(3, recovered_shred);
|
2019-08-07 17:02:49 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
let result = Shredder::deshred(&shred_info[..num_data_shreds]).unwrap();
|
|
|
|
assert!(result.len() >= serialized_entries.len());
|
|
|
|
assert_eq!(serialized_entries[..], result[..serialized_entries.len()]);
|
2019-08-07 17:02:49 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
// Test3: Try recovery/reassembly with 3 missing data shreds + 2 coding shreds. Hint: should work
|
|
|
|
let mut shred_info: Vec<Shred> = all_shreds
|
2019-08-07 17:02:49 -07:00
|
|
|
.iter()
|
|
|
|
.enumerate()
|
2019-09-18 16:24:30 -07:00
|
|
|
.filter_map(|(i, b)| if i % 2 != 0 { Some(b.clone()) } else { None })
|
|
|
|
.collect();
|
2019-08-07 17:02:49 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
let recovered_data = Shredder::try_recovery(
|
2019-09-17 18:22:46 -07:00
|
|
|
shred_info.clone(),
|
2019-10-08 00:42:51 -07:00
|
|
|
num_data_shreds,
|
2020-02-04 15:45:01 -08:00
|
|
|
num_coding_shreds,
|
2019-08-26 18:27:45 -07:00
|
|
|
0,
|
|
|
|
slot,
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
assert_eq!(recovered_data.len(), 3); // Data shreds 0, 2, 4 were missing
|
|
|
|
for (i, recovered_shred) in recovered_data.into_iter().enumerate() {
|
|
|
|
let index = i * 2;
|
2021-04-21 05:47:50 -07:00
|
|
|
let is_last_data = recovered_shred.index() as usize == num_data_shreds - 1;
|
2019-10-08 00:42:51 -07:00
|
|
|
verify_test_data_shred(
|
|
|
|
&recovered_shred,
|
|
|
|
index.try_into().unwrap(),
|
|
|
|
slot,
|
|
|
|
slot - 5,
|
|
|
|
&keypair.pubkey(),
|
|
|
|
true,
|
2021-04-21 05:47:50 -07:00
|
|
|
is_last_data && is_last_in_slot,
|
|
|
|
is_last_data,
|
2019-10-08 00:42:51 -07:00
|
|
|
);
|
2019-08-20 17:16:06 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
shred_info.insert(i * 2, recovered_shred);
|
|
|
|
}
|
2019-08-20 17:16:06 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
let result = Shredder::deshred(&shred_info[..num_data_shreds]).unwrap();
|
|
|
|
assert!(result.len() >= serialized_entries.len());
|
|
|
|
assert_eq!(serialized_entries[..], result[..serialized_entries.len()]);
|
2019-08-07 17:02:49 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
// Test4: Try reassembly with 2 missing data shreds, but keeping the last
|
|
|
|
// data shred. Hint: should fail
|
|
|
|
let shreds: Vec<Shred> = all_shreds[..num_data_shreds]
|
2019-08-07 17:02:49 -07:00
|
|
|
.iter()
|
|
|
|
.enumerate()
|
2019-09-18 16:24:30 -07:00
|
|
|
.filter_map(|(i, s)| {
|
2019-10-08 00:42:51 -07:00
|
|
|
if (i < 4 && i % 2 != 0) || i == num_data_shreds - 1 {
|
|
|
|
// Keep 1, 3, 4
|
2019-09-12 10:10:25 -07:00
|
|
|
Some(s.clone())
|
2019-08-07 17:02:49 -07:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
assert_eq!(shreds.len(), 3);
|
2019-08-07 17:02:49 -07:00
|
|
|
assert_matches!(
|
|
|
|
Shredder::deshred(&shreds),
|
2019-08-26 18:27:45 -07:00
|
|
|
Err(reed_solomon_erasure::Error::TooFewDataShards)
|
|
|
|
);
|
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
// Test5: Try recovery/reassembly with non zero index full slot with 3 missing data shreds
|
|
|
|
// and 2 missing coding shreds. Hint: should work
|
|
|
|
let serialized_entries = bincode::serialize(&entries).unwrap();
|
2021-06-21 13:12:38 -07:00
|
|
|
let (data_shreds, coding_shreds, _) =
|
|
|
|
shredder.entries_to_shreds(&keypair, &entries, true, 25);
|
2020-02-04 15:45:01 -08:00
|
|
|
let num_coding_shreds = coding_shreds.len();
|
|
|
|
// We should have 10 shreds now
|
2019-10-08 00:42:51 -07:00
|
|
|
assert_eq!(data_shreds.len(), num_data_shreds);
|
2019-08-26 18:27:45 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
let all_shreds = data_shreds
|
|
|
|
.iter()
|
|
|
|
.cloned()
|
|
|
|
.chain(coding_shreds.iter().cloned())
|
|
|
|
.collect::<Vec<_>>();
|
2019-08-26 18:27:45 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
let mut shred_info: Vec<Shred> = all_shreds
|
2019-08-26 18:27:45 -07:00
|
|
|
.iter()
|
|
|
|
.enumerate()
|
2019-09-18 16:24:30 -07:00
|
|
|
.filter_map(|(i, b)| if i % 2 != 0 { Some(b.clone()) } else { None })
|
|
|
|
.collect();
|
2019-08-26 18:27:45 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
let recovered_data = Shredder::try_recovery(
|
2019-09-16 20:28:54 -07:00
|
|
|
shred_info.clone(),
|
2019-10-08 00:42:51 -07:00
|
|
|
num_data_shreds,
|
2020-02-04 15:45:01 -08:00
|
|
|
num_coding_shreds,
|
2019-08-26 18:27:45 -07:00
|
|
|
25,
|
|
|
|
slot,
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
assert_eq!(recovered_data.len(), 3); // Data shreds 25, 27, 29 were missing
|
|
|
|
for (i, recovered_shred) in recovered_data.into_iter().enumerate() {
|
|
|
|
let index = 25 + (i * 2);
|
|
|
|
verify_test_data_shred(
|
|
|
|
&recovered_shred,
|
|
|
|
index.try_into().unwrap(),
|
|
|
|
slot,
|
|
|
|
slot - 5,
|
|
|
|
&keypair.pubkey(),
|
|
|
|
true,
|
|
|
|
index == 25 + num_data_shreds - 1,
|
|
|
|
index == 25 + num_data_shreds - 1,
|
|
|
|
);
|
2019-08-26 18:27:45 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
shred_info.insert(i * 2, recovered_shred);
|
|
|
|
}
|
2019-08-26 18:27:45 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
let result = Shredder::deshred(&shred_info[..num_data_shreds]).unwrap();
|
|
|
|
assert!(result.len() >= serialized_entries.len());
|
|
|
|
assert_eq!(serialized_entries[..], result[..serialized_entries.len()]);
|
2019-08-26 18:27:45 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
// Test6: Try recovery/reassembly with incorrect slot. Hint: does not recover any shreds
|
2019-09-23 16:24:21 -07:00
|
|
|
let recovered_data = Shredder::try_recovery(
|
2019-09-16 20:28:54 -07:00
|
|
|
shred_info.clone(),
|
2019-10-08 00:42:51 -07:00
|
|
|
num_data_shreds,
|
2020-02-04 15:45:01 -08:00
|
|
|
num_coding_shreds,
|
2019-08-26 18:27:45 -07:00
|
|
|
25,
|
|
|
|
slot + 1,
|
|
|
|
)
|
|
|
|
.unwrap();
|
2019-09-23 16:24:21 -07:00
|
|
|
assert!(recovered_data.is_empty());
|
2019-08-26 18:27:45 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
// Test7: Try recovery/reassembly with incorrect index. Hint: does not recover any shreds
|
2019-08-26 18:27:45 -07:00
|
|
|
assert_matches!(
|
|
|
|
Shredder::try_recovery(
|
2019-09-16 20:28:54 -07:00
|
|
|
shred_info.clone(),
|
2019-10-08 00:42:51 -07:00
|
|
|
num_data_shreds,
|
2020-02-04 15:45:01 -08:00
|
|
|
num_coding_shreds,
|
2019-08-26 18:27:45 -07:00
|
|
|
15,
|
|
|
|
slot,
|
|
|
|
),
|
|
|
|
Err(reed_solomon_erasure::Error::TooFewShardsPresent)
|
|
|
|
);
|
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
// Test8: Try recovery/reassembly with incorrect index. Hint: does not recover any shreds
|
2019-08-26 18:27:45 -07:00
|
|
|
assert_matches!(
|
2021-04-23 05:00:37 -07:00
|
|
|
Shredder::try_recovery(shred_info, num_data_shreds, num_coding_shreds, 35, slot),
|
2019-08-07 17:02:49 -07:00
|
|
|
Err(reed_solomon_erasure::Error::TooFewShardsPresent)
|
|
|
|
);
|
|
|
|
}
|
2019-09-15 10:37:12 -07:00
|
|
|
|
2020-05-19 12:38:18 -07:00
|
|
|
#[test]
|
|
|
|
fn test_recovery_and_reassembly() {
|
2021-04-21 05:47:50 -07:00
|
|
|
run_test_recovery_and_reassembly(0x1234_5678_9abc_def0, false);
|
|
|
|
run_test_recovery_and_reassembly(0x1234_5678_9abc_def0, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn run_recovery_with_expanded_coding_shreds(num_tx: usize, is_last_in_slot: bool) {
|
|
|
|
let mut rng = rand::thread_rng();
|
|
|
|
let txs = repeat_with(|| {
|
2021-10-28 02:38:08 -07:00
|
|
|
let from_pubkey = Pubkey::new_unique();
|
|
|
|
let instruction = solana_sdk::system_instruction::transfer(
|
|
|
|
&from_pubkey,
|
|
|
|
&Pubkey::new_unique(), // to
|
|
|
|
rng.gen(), // lamports
|
|
|
|
);
|
|
|
|
let message = solana_sdk::message::Message::new(&[instruction], Some(&from_pubkey));
|
|
|
|
let mut tx = solana_sdk::transaction::Transaction::new_unsigned(message);
|
|
|
|
// Also randomize the signatre bytes.
|
|
|
|
let mut signature = [0u8; 64];
|
|
|
|
rng.fill(&mut signature[..]);
|
|
|
|
tx.signatures = vec![Signature::new(&signature)];
|
|
|
|
tx
|
2021-04-21 05:47:50 -07:00
|
|
|
})
|
|
|
|
.take(num_tx)
|
|
|
|
.collect();
|
|
|
|
let entry = Entry::new(
|
|
|
|
&hash::new_rand(&mut rng), // prev hash
|
|
|
|
rng.gen_range(1, 64), // num hashes
|
|
|
|
txs,
|
|
|
|
);
|
|
|
|
let keypair = Arc::new(Keypair::new());
|
|
|
|
let slot = 71489660;
|
|
|
|
let shredder = Shredder::new(
|
|
|
|
slot,
|
|
|
|
slot - rng.gen_range(1, 27), // parent slot
|
2021-06-21 13:12:38 -07:00
|
|
|
0, // reference tick
|
|
|
|
rng.gen(), // version
|
2021-04-21 05:47:50 -07:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
let next_shred_index = rng.gen_range(1, 1024);
|
|
|
|
let (data_shreds, coding_shreds, _) =
|
2021-06-21 13:12:38 -07:00
|
|
|
shredder.entries_to_shreds(&keypair, &[entry], is_last_in_slot, next_shred_index);
|
2021-04-21 05:47:50 -07:00
|
|
|
let num_data_shreds = data_shreds.len();
|
|
|
|
let num_coding_shreds = coding_shreds.len();
|
|
|
|
let mut shreds = coding_shreds;
|
|
|
|
shreds.extend(data_shreds.iter().cloned());
|
|
|
|
shreds.shuffle(&mut rng);
|
|
|
|
shreds.truncate(num_data_shreds);
|
|
|
|
shreds.sort_by_key(|shred| {
|
|
|
|
if shred.is_data() {
|
|
|
|
shred.index()
|
|
|
|
} else {
|
|
|
|
shred.index() + num_data_shreds as u32
|
|
|
|
}
|
|
|
|
});
|
|
|
|
let exclude: HashSet<_> = shreds
|
|
|
|
.iter()
|
|
|
|
.filter(|shred| shred.is_data())
|
|
|
|
.map(|shred| shred.index())
|
|
|
|
.collect();
|
|
|
|
let recovered_shreds = Shredder::try_recovery(
|
|
|
|
shreds,
|
|
|
|
num_data_shreds,
|
|
|
|
num_coding_shreds,
|
|
|
|
next_shred_index as usize, // first index
|
|
|
|
slot,
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
assert_eq!(
|
|
|
|
recovered_shreds,
|
|
|
|
data_shreds
|
|
|
|
.into_iter()
|
|
|
|
.filter(|shred| !exclude.contains(&shred.index()))
|
|
|
|
.collect::<Vec<_>>()
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_recovery_with_expanded_coding_shreds() {
|
2021-10-28 02:38:08 -07:00
|
|
|
for num_tx in 0..50 {
|
2021-04-21 05:47:50 -07:00
|
|
|
run_recovery_with_expanded_coding_shreds(num_tx, false);
|
|
|
|
run_recovery_with_expanded_coding_shreds(num_tx, true);
|
|
|
|
}
|
2020-05-19 12:38:18 -07:00
|
|
|
}
|
|
|
|
|
2019-11-18 18:05:02 -08:00
|
|
|
#[test]
|
|
|
|
fn test_shred_version() {
|
|
|
|
let keypair = Arc::new(Keypair::new());
|
|
|
|
let hash = hash(Hash::default().as_ref());
|
2020-02-24 09:18:08 -08:00
|
|
|
let version = shred_version::version_from_hash(&hash);
|
2019-11-18 18:05:02 -08:00
|
|
|
assert_ne!(version, 0);
|
2021-06-21 13:12:38 -07:00
|
|
|
let shredder = Shredder::new(0, 0, 0, version).unwrap();
|
2019-11-18 18:05:02 -08:00
|
|
|
let entries: Vec<_> = (0..5)
|
|
|
|
.map(|_| {
|
|
|
|
let keypair0 = Keypair::new();
|
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let tx0 =
|
|
|
|
system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
|
|
|
|
Entry::new(&Hash::default(), 1, vec![tx0])
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
let (data_shreds, coding_shreds, _next_index) =
|
2021-06-21 13:12:38 -07:00
|
|
|
shredder.entries_to_shreds(&keypair, &entries, true, 0);
|
2019-11-18 18:05:02 -08:00
|
|
|
assert!(!data_shreds
|
|
|
|
.iter()
|
|
|
|
.chain(coding_shreds.iter())
|
|
|
|
.any(|s| s.version() != version));
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_version_from_hash() {
|
|
|
|
let hash = [
|
|
|
|
0xa5u8, 0xa5, 0x5a, 0x5a, 0xa5, 0xa5, 0x5a, 0x5a, 0xa5, 0xa5, 0x5a, 0x5a, 0xa5, 0xa5,
|
|
|
|
0x5a, 0x5a, 0xa5, 0xa5, 0x5a, 0x5a, 0xa5, 0xa5, 0x5a, 0x5a, 0xa5, 0xa5, 0x5a, 0x5a,
|
|
|
|
0xa5, 0xa5, 0x5a, 0x5a,
|
|
|
|
];
|
2020-02-24 09:18:08 -08:00
|
|
|
let version = shred_version::version_from_hash(&Hash::new(&hash));
|
2020-01-24 08:14:27 -08:00
|
|
|
assert_eq!(version, 1);
|
2019-11-18 18:05:02 -08:00
|
|
|
let hash = [
|
|
|
|
0xa5u8, 0xa5, 0x5a, 0x5a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
|
|
|
0, 0, 0, 0, 0, 0, 0, 0,
|
|
|
|
];
|
2020-02-24 09:18:08 -08:00
|
|
|
let version = shred_version::version_from_hash(&Hash::new(&hash));
|
2019-11-18 18:05:02 -08:00
|
|
|
assert_eq!(version, 0xffff);
|
|
|
|
let hash = [
|
|
|
|
0xa5u8, 0xa5, 0x5a, 0x5a, 0xa5, 0xa5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
|
|
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
|
|
|
];
|
2020-02-24 09:18:08 -08:00
|
|
|
let version = shred_version::version_from_hash(&Hash::new(&hash));
|
2020-01-24 08:14:27 -08:00
|
|
|
assert_eq!(version, 0x5a5b);
|
2019-11-18 18:05:02 -08:00
|
|
|
}
|
2019-12-12 16:50:29 -08:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_shred_fec_set_index() {
|
|
|
|
let keypair = Arc::new(Keypair::new());
|
|
|
|
let hash = hash(Hash::default().as_ref());
|
2020-02-24 09:18:08 -08:00
|
|
|
let version = shred_version::version_from_hash(&hash);
|
2019-12-12 16:50:29 -08:00
|
|
|
assert_ne!(version, 0);
|
2021-06-21 13:12:38 -07:00
|
|
|
let shredder = Shredder::new(0, 0, 0, version).unwrap();
|
2019-12-12 16:50:29 -08:00
|
|
|
let entries: Vec<_> = (0..500)
|
|
|
|
.map(|_| {
|
|
|
|
let keypair0 = Keypair::new();
|
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let tx0 =
|
|
|
|
system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
|
|
|
|
Entry::new(&Hash::default(), 1, vec![tx0])
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
let start_index = 0x12;
|
|
|
|
let (data_shreds, coding_shreds, _next_index) =
|
2021-06-21 13:12:38 -07:00
|
|
|
shredder.entries_to_shreds(&keypair, &entries, true, start_index);
|
2019-12-12 16:50:29 -08:00
|
|
|
|
|
|
|
let max_per_block = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize;
|
|
|
|
data_shreds.iter().enumerate().for_each(|(i, s)| {
|
|
|
|
let expected_fec_set_index = start_index + ((i / max_per_block) * max_per_block) as u32;
|
|
|
|
assert_eq!(s.common_header.fec_set_index, expected_fec_set_index);
|
|
|
|
});
|
|
|
|
|
|
|
|
coding_shreds.iter().enumerate().for_each(|(i, s)| {
|
2021-04-21 05:47:50 -07:00
|
|
|
let mut expected_fec_set_index = start_index + (i - i % max_per_block) as u32;
|
|
|
|
while expected_fec_set_index as usize > data_shreds.len() {
|
|
|
|
expected_fec_set_index -= max_per_block as u32;
|
|
|
|
}
|
2019-12-12 16:50:29 -08:00
|
|
|
assert_eq!(s.common_header.fec_set_index, expected_fec_set_index);
|
|
|
|
});
|
|
|
|
}
|
2020-02-04 15:45:01 -08:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_max_coding_shreds() {
|
|
|
|
let keypair = Arc::new(Keypair::new());
|
|
|
|
let hash = hash(Hash::default().as_ref());
|
2020-02-24 09:18:08 -08:00
|
|
|
let version = shred_version::version_from_hash(&hash);
|
2020-02-04 15:45:01 -08:00
|
|
|
assert_ne!(version, 0);
|
2021-06-21 13:12:38 -07:00
|
|
|
let shredder = Shredder::new(0, 0, 0, version).unwrap();
|
2020-02-04 15:45:01 -08:00
|
|
|
let entries: Vec<_> = (0..500)
|
|
|
|
.map(|_| {
|
|
|
|
let keypair0 = Keypair::new();
|
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let tx0 =
|
|
|
|
system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
|
|
|
|
Entry::new(&Hash::default(), 1, vec![tx0])
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
2020-11-09 23:04:27 -08:00
|
|
|
let mut stats = ProcessShredsStats::default();
|
2020-02-04 15:45:01 -08:00
|
|
|
let start_index = 0x12;
|
2021-03-23 07:52:38 -07:00
|
|
|
let (data_shreds, _next_index) = shredder.entries_to_data_shreds(
|
2021-06-21 13:12:38 -07:00
|
|
|
&keypair,
|
2021-03-23 07:52:38 -07:00
|
|
|
&entries,
|
|
|
|
true, // is_last_in_slot
|
|
|
|
start_index,
|
|
|
|
start_index, // fec_set_offset
|
|
|
|
&mut stats,
|
|
|
|
);
|
2020-02-04 15:45:01 -08:00
|
|
|
|
|
|
|
assert!(data_shreds.len() > MAX_DATA_SHREDS_PER_FEC_BLOCK as usize);
|
|
|
|
|
2020-05-15 09:35:43 -07:00
|
|
|
(1..=MAX_DATA_SHREDS_PER_FEC_BLOCK as usize).for_each(|count| {
|
2021-03-23 07:52:38 -07:00
|
|
|
let coding_shreds = Shredder::data_shreds_to_coding_shreds(
|
2021-06-21 13:12:38 -07:00
|
|
|
&keypair,
|
2021-03-23 07:52:38 -07:00
|
|
|
&data_shreds[..count],
|
2021-04-21 05:47:50 -07:00
|
|
|
false, // is_last_in_slot
|
2021-03-23 07:52:38 -07:00
|
|
|
&mut stats,
|
|
|
|
)
|
|
|
|
.unwrap();
|
2020-05-15 09:35:43 -07:00
|
|
|
assert_eq!(coding_shreds.len(), count);
|
2021-04-21 05:47:50 -07:00
|
|
|
let coding_shreds = Shredder::data_shreds_to_coding_shreds(
|
2021-06-21 13:12:38 -07:00
|
|
|
&keypair,
|
2021-04-21 05:47:50 -07:00
|
|
|
&data_shreds[..count],
|
|
|
|
true, // is_last_in_slot
|
|
|
|
&mut stats,
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
assert_eq!(
|
|
|
|
coding_shreds.len(),
|
|
|
|
2 * MAX_DATA_SHREDS_PER_FEC_BLOCK as usize - count
|
|
|
|
);
|
2020-05-15 09:35:43 -07:00
|
|
|
});
|
2020-02-04 15:45:01 -08:00
|
|
|
|
2021-03-23 07:52:38 -07:00
|
|
|
let coding_shreds = Shredder::data_shreds_to_coding_shreds(
|
2021-06-21 13:12:38 -07:00
|
|
|
&keypair,
|
2020-02-04 15:45:01 -08:00
|
|
|
&data_shreds[..MAX_DATA_SHREDS_PER_FEC_BLOCK as usize + 1],
|
2021-04-21 05:47:50 -07:00
|
|
|
false, // is_last_in_slot
|
2020-11-09 23:04:27 -08:00
|
|
|
&mut stats,
|
2021-03-23 07:52:38 -07:00
|
|
|
)
|
|
|
|
.unwrap();
|
2020-02-04 15:45:01 -08:00
|
|
|
assert_eq!(
|
|
|
|
coding_shreds.len(),
|
2020-05-19 16:13:12 -07:00
|
|
|
MAX_DATA_SHREDS_PER_FEC_BLOCK as usize + 1
|
2020-02-04 15:45:01 -08:00
|
|
|
);
|
2021-04-21 05:47:50 -07:00
|
|
|
let coding_shreds = Shredder::data_shreds_to_coding_shreds(
|
2021-06-21 13:12:38 -07:00
|
|
|
&keypair,
|
2021-04-21 05:47:50 -07:00
|
|
|
&data_shreds[..MAX_DATA_SHREDS_PER_FEC_BLOCK as usize + 1],
|
|
|
|
true, // is_last_in_slot
|
|
|
|
&mut stats,
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
assert_eq!(
|
|
|
|
coding_shreds.len(),
|
|
|
|
3 * MAX_DATA_SHREDS_PER_FEC_BLOCK as usize - 1
|
|
|
|
);
|
2020-02-04 15:45:01 -08:00
|
|
|
}
|
2020-04-19 21:15:09 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_invalid_parent_offset() {
|
|
|
|
let shred = Shred::new_from_data(10, 0, 1000, Some(&[1, 2, 3]), false, false, 0, 1, 0);
|
|
|
|
let mut packet = Packet::default();
|
|
|
|
shred.copy_to_packet(&mut packet);
|
|
|
|
let shred_res = Shred::new_from_serialized_shred(packet.data.to_vec());
|
|
|
|
assert_matches!(
|
|
|
|
shred_res,
|
|
|
|
Err(ShredError::InvalidParentOffset {
|
|
|
|
slot: 10,
|
|
|
|
parent_offset: 1000
|
|
|
|
})
|
|
|
|
);
|
|
|
|
}
|
2020-12-15 16:50:40 -08:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_shred_offsets() {
|
|
|
|
solana_logger::setup();
|
|
|
|
let mut packet = Packet::default();
|
|
|
|
let shred = Shred::new_from_data(1, 3, 0, None, true, true, 0, 0, 0);
|
|
|
|
shred.copy_to_packet(&mut packet);
|
|
|
|
let mut stats = ShredFetchStats::default();
|
|
|
|
let ret = get_shred_slot_index_type(&packet, &mut stats);
|
|
|
|
assert_eq!(Some((1, 3, true)), ret);
|
|
|
|
assert_eq!(stats, ShredFetchStats::default());
|
|
|
|
|
|
|
|
packet.meta.size = OFFSET_OF_SHRED_TYPE;
|
|
|
|
assert_eq!(None, get_shred_slot_index_type(&packet, &mut stats));
|
|
|
|
assert_eq!(stats.index_overrun, 1);
|
|
|
|
|
|
|
|
packet.meta.size = OFFSET_OF_SHRED_INDEX;
|
|
|
|
assert_eq!(None, get_shred_slot_index_type(&packet, &mut stats));
|
|
|
|
assert_eq!(stats.index_overrun, 2);
|
|
|
|
|
|
|
|
packet.meta.size = OFFSET_OF_SHRED_INDEX + 1;
|
|
|
|
assert_eq!(None, get_shred_slot_index_type(&packet, &mut stats));
|
|
|
|
assert_eq!(stats.index_overrun, 3);
|
|
|
|
|
|
|
|
packet.meta.size = OFFSET_OF_SHRED_INDEX + SIZE_OF_SHRED_INDEX - 1;
|
|
|
|
assert_eq!(None, get_shred_slot_index_type(&packet, &mut stats));
|
|
|
|
assert_eq!(stats.index_overrun, 4);
|
|
|
|
|
|
|
|
packet.meta.size = OFFSET_OF_SHRED_INDEX + SIZE_OF_SHRED_INDEX;
|
|
|
|
assert_eq!(
|
|
|
|
Some((1, 3, true)),
|
|
|
|
get_shred_slot_index_type(&packet, &mut stats)
|
|
|
|
);
|
|
|
|
assert_eq!(stats.index_overrun, 4);
|
|
|
|
|
2021-05-03 06:20:47 -07:00
|
|
|
let shred = Shred::new_empty_coding(8, 2, 10, 30, 4, 200);
|
2020-12-15 16:50:40 -08:00
|
|
|
shred.copy_to_packet(&mut packet);
|
|
|
|
assert_eq!(
|
|
|
|
Some((8, 2, false)),
|
|
|
|
get_shred_slot_index_type(&packet, &mut stats)
|
|
|
|
);
|
|
|
|
|
|
|
|
let shred = Shred::new_from_data(1, std::u32::MAX - 10, 0, None, true, true, 0, 0, 0);
|
|
|
|
shred.copy_to_packet(&mut packet);
|
|
|
|
assert_eq!(None, get_shred_slot_index_type(&packet, &mut stats));
|
|
|
|
assert_eq!(1, stats.index_out_of_bounds);
|
|
|
|
|
2021-05-03 06:20:47 -07:00
|
|
|
let (mut header, coding_header) = Shredder::new_coding_shred_header(8, 2, 10, 30, 4, 200);
|
2020-12-15 16:50:40 -08:00
|
|
|
header.shred_type = ShredType(u8::MAX);
|
|
|
|
let shred = Shred::new_empty_from_header(header, DataShredHeader::default(), coding_header);
|
|
|
|
shred.copy_to_packet(&mut packet);
|
|
|
|
|
|
|
|
assert_eq!(None, get_shred_slot_index_type(&packet, &mut stats));
|
|
|
|
assert_eq!(1, stats.bad_shred_type);
|
|
|
|
}
|
2019-08-02 15:53:42 -07:00
|
|
|
}
|