limits to data_header.size when combining shreds' payloads (#16708)
Shredder::deshred is ignoring data_header.size when combining shreds' payloads: https://github.com/solana-labs/solana/blob/37b8587d4/ledger/src/shred.rs#L940-L961 Also adding more sanity checks on the alignment of data shreds indices.
This commit is contained in:
parent
68d5aec55b
commit
0f3ac51cf1
|
@ -2948,6 +2948,8 @@ pub(crate) mod tests {
|
||||||
let gibberish = [0xa5u8; PACKET_DATA_SIZE];
|
let gibberish = [0xa5u8; PACKET_DATA_SIZE];
|
||||||
let mut data_header = DataShredHeader::default();
|
let mut data_header = DataShredHeader::default();
|
||||||
data_header.flags |= DATA_COMPLETE_SHRED;
|
data_header.flags |= DATA_COMPLETE_SHRED;
|
||||||
|
// Need to provide the right size for Shredder::deshred.
|
||||||
|
data_header.size = SIZE_OF_DATA_SHRED_PAYLOAD as u16;
|
||||||
let mut shred = Shred::new_empty_from_header(
|
let mut shred = Shred::new_empty_from_header(
|
||||||
ShredCommonHeader::default(),
|
ShredCommonHeader::default(),
|
||||||
data_header,
|
data_header,
|
||||||
|
|
|
@ -786,7 +786,7 @@ impl Shredder {
|
||||||
.iter()
|
.iter()
|
||||||
.map(|shred| &shred.payload[..PAYLOAD_ENCODE_SIZE])
|
.map(|shred| &shred.payload[..PAYLOAD_ENCODE_SIZE])
|
||||||
.collect();
|
.collect();
|
||||||
let mut parity = vec![vec![0; PAYLOAD_ENCODE_SIZE]; num_coding];
|
let mut parity = vec![vec![0u8; PAYLOAD_ENCODE_SIZE]; num_coding];
|
||||||
Session::new(num_data, num_coding)
|
Session::new(num_data, num_coding)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.encode(&data, &mut parity[..])
|
.encode(&data, &mut parity[..])
|
||||||
|
@ -938,37 +938,36 @@ impl Shredder {
|
||||||
|
|
||||||
/// Combines all shreds to recreate the original buffer
|
/// Combines all shreds to recreate the original buffer
|
||||||
pub fn deshred(shreds: &[Shred]) -> std::result::Result<Vec<u8>, reed_solomon_erasure::Error> {
|
pub fn deshred(shreds: &[Shred]) -> std::result::Result<Vec<u8>, reed_solomon_erasure::Error> {
|
||||||
let num_data = shreds.len();
|
use reed_solomon_erasure::Error::TooFewDataShards;
|
||||||
|
const SHRED_DATA_OFFSET: usize = SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER;
|
||||||
Self::verify_consistent_shred_payload_sizes(&"deshred()", shreds)?;
|
Self::verify_consistent_shred_payload_sizes(&"deshred()", shreds)?;
|
||||||
let data_shred_bufs = {
|
let index = shreds.first().ok_or(TooFewDataShards)?.index();
|
||||||
let first_index = shreds.first().unwrap().index() as usize;
|
let aligned = shreds.iter().zip(index..).all(|(s, i)| s.index() == i);
|
||||||
let last_shred = shreds.last().unwrap();
|
let data_complete = {
|
||||||
let last_index = if last_shred.data_complete() || last_shred.last_in_slot() {
|
let shred = shreds.last().unwrap();
|
||||||
last_shred.index() as usize
|
shred.data_complete() || shred.last_in_slot()
|
||||||
} else {
|
|
||||||
0
|
|
||||||
};
|
|
||||||
|
|
||||||
if num_data.saturating_add(first_index) != last_index.saturating_add(1) {
|
|
||||||
return Err(reed_solomon_erasure::Error::TooFewDataShards);
|
|
||||||
}
|
|
||||||
|
|
||||||
shreds.iter().map(|shred| &shred.payload).collect()
|
|
||||||
};
|
};
|
||||||
|
if !data_complete || !aligned {
|
||||||
Ok(Self::reassemble_payload(num_data, data_shred_bufs))
|
return Err(TooFewDataShards);
|
||||||
}
|
}
|
||||||
|
let data: Vec<_> = shreds
|
||||||
fn reassemble_payload(num_data: usize, data_shred_bufs: Vec<&Vec<u8>>) -> Vec<u8> {
|
|
||||||
let valid_data_len = SHRED_PAYLOAD_SIZE - SIZE_OF_CODING_SHRED_HEADERS;
|
|
||||||
data_shred_bufs[..num_data]
|
|
||||||
.iter()
|
.iter()
|
||||||
.flat_map(|data| {
|
.flat_map(|shred| {
|
||||||
let offset = SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER;
|
let size = shred.data_header.size as usize;
|
||||||
data[offset..valid_data_len].iter()
|
let size = shred.payload.len().min(size);
|
||||||
|
let offset = SHRED_DATA_OFFSET.min(size);
|
||||||
|
shred.payload[offset..size].iter()
|
||||||
})
|
})
|
||||||
.cloned()
|
.copied()
|
||||||
.collect()
|
.collect();
|
||||||
|
if data.is_empty() {
|
||||||
|
// For backward compatibility. This is needed when the data shred
|
||||||
|
// payload is None, so that deserializing to Vec<Entry> results in
|
||||||
|
// an empty vector.
|
||||||
|
Ok(vec![0u8; SIZE_OF_DATA_SHRED_PAYLOAD])
|
||||||
|
} else {
|
||||||
|
Ok(data)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_consistent_shred_payload_sizes(
|
fn verify_consistent_shred_payload_sizes(
|
||||||
|
|
Loading…
Reference in New Issue