2018-04-18 19:34:57 -07:00
|
|
|
// Support erasure coding
|
2019-04-11 14:14:57 -07:00
|
|
|
use crate::packet::{Blob, SharedBlob};
|
2018-12-08 21:40:42 -08:00
|
|
|
use crate::result::{Error, Result};
|
2018-07-18 10:10:34 -07:00
|
|
|
use std::cmp;
|
2018-12-05 12:47:19 -08:00
|
|
|
use std::sync::{Arc, RwLock};
|
2018-04-18 19:34:57 -07:00
|
|
|
|
|
|
|
//TODO(sakridge) pick these values
|
2018-07-17 13:02:38 -07:00
|
|
|
pub const NUM_DATA: usize = 16; // number of data blobs
|
2018-07-17 13:03:57 -07:00
|
|
|
pub const NUM_CODING: usize = 4; // number of coding blobs, also the maximum number that can go missing
|
|
|
|
pub const ERASURE_SET_SIZE: usize = NUM_DATA + NUM_CODING; // total number of blobs in an erasure set, includes data and coding blobs
|
2018-04-18 19:34:57 -07:00
|
|
|
|
2018-08-13 14:23:32 -07:00
|
|
|
pub const JERASURE_ALIGN: usize = 4; // data size has to be a multiple of 4 bytes
|
|
|
|
|
2018-08-14 11:27:15 -07:00
|
|
|
macro_rules! align {
|
|
|
|
($x:expr, $align:expr) => {
|
|
|
|
$x + ($align - 1) & !($align - 1)
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2018-04-18 19:34:57 -07:00
|
|
|
#[derive(Debug, PartialEq, Eq)]
|
|
|
|
pub enum ErasureError {
|
|
|
|
NotEnoughBlocksToDecode,
|
|
|
|
DecodeError,
|
2018-05-31 12:04:23 -07:00
|
|
|
EncodeError,
|
2018-04-18 19:34:57 -07:00
|
|
|
InvalidBlockSize,
|
2018-12-05 12:47:19 -08:00
|
|
|
InvalidBlobData,
|
2019-04-11 14:14:57 -07:00
|
|
|
CorruptCoding,
|
2018-04-18 19:34:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// k = number of data devices
|
|
|
|
// m = number of coding devices
|
|
|
|
// w = word size
|
|
|
|
|
|
|
|
extern "C" {
|
|
|
|
fn jerasure_matrix_encode(
|
|
|
|
k: i32,
|
|
|
|
m: i32,
|
|
|
|
w: i32,
|
|
|
|
matrix: *const i32,
|
|
|
|
data_ptrs: *const *const u8,
|
|
|
|
coding_ptrs: *const *mut u8,
|
|
|
|
size: i32,
|
|
|
|
);
|
|
|
|
fn jerasure_matrix_decode(
|
|
|
|
k: i32,
|
|
|
|
m: i32,
|
|
|
|
w: i32,
|
|
|
|
matrix: *const i32,
|
|
|
|
row_k_ones: i32,
|
|
|
|
erasures: *const i32,
|
|
|
|
data_ptrs: *const *mut u8,
|
2018-07-18 14:51:18 -07:00
|
|
|
coding_ptrs: *const *mut u8,
|
2018-04-18 19:34:57 -07:00
|
|
|
size: i32,
|
|
|
|
) -> i32;
|
|
|
|
fn galois_single_divide(a: i32, b: i32, w: i32) -> i32;
|
2019-04-11 14:14:57 -07:00
|
|
|
fn galois_init_default_field(w: i32) -> i32;
|
|
|
|
}
|
|
|
|
|
|
|
|
use std::sync::Once;
|
|
|
|
static ERASURE_W_ONCE: Once = Once::new();
|
|
|
|
|
|
|
|
fn w() -> i32 {
|
|
|
|
let w = 32;
|
|
|
|
unsafe {
|
|
|
|
ERASURE_W_ONCE.call_once(|| {
|
|
|
|
galois_init_default_field(w);
|
|
|
|
()
|
|
|
|
});
|
|
|
|
}
|
|
|
|
w
|
2018-04-18 19:34:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
fn get_matrix(m: i32, k: i32, w: i32) -> Vec<i32> {
|
|
|
|
let mut matrix = vec![0; (m * k) as usize];
|
|
|
|
for i in 0..m {
|
|
|
|
for j in 0..k {
|
|
|
|
unsafe {
|
|
|
|
matrix[(i * k + j) as usize] = galois_single_divide(1, i ^ (m + j), w);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
matrix
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate coding blocks into coding
|
|
|
|
// There are some alignment restrictions, blocks should be aligned by 16 bytes
|
|
|
|
// which means their size should be >= 16 bytes
|
2019-03-04 15:16:01 -08:00
|
|
|
fn generate_coding_blocks(coding: &mut [&mut [u8]], data: &[&[u8]]) -> Result<()> {
|
2018-09-18 08:02:57 -07:00
|
|
|
if data.is_empty() {
|
2018-04-18 19:34:57 -07:00
|
|
|
return Ok(());
|
|
|
|
}
|
2018-07-17 13:02:38 -07:00
|
|
|
let k = data.len() as i32;
|
2018-04-18 19:34:57 -07:00
|
|
|
let m = coding.len() as i32;
|
2018-07-17 13:02:38 -07:00
|
|
|
let block_len = data[0].len() as i32;
|
2019-04-11 14:14:57 -07:00
|
|
|
let matrix: Vec<i32> = get_matrix(m, k, w());
|
2018-07-17 13:02:38 -07:00
|
|
|
let mut data_arg = Vec::with_capacity(data.len());
|
2018-04-18 19:34:57 -07:00
|
|
|
for block in data {
|
2018-07-17 13:02:38 -07:00
|
|
|
if block_len != block.len() as i32 {
|
2018-08-17 12:24:40 -07:00
|
|
|
error!(
|
2018-06-05 12:24:39 -07:00
|
|
|
"data block size incorrect {} expected {}",
|
|
|
|
block.len(),
|
|
|
|
block_len
|
|
|
|
);
|
2018-12-05 12:47:19 -08:00
|
|
|
return Err(Error::ErasureError(ErasureError::InvalidBlockSize));
|
2018-04-18 19:34:57 -07:00
|
|
|
}
|
|
|
|
data_arg.push(block.as_ptr());
|
|
|
|
}
|
2018-07-17 13:02:38 -07:00
|
|
|
let mut coding_arg = Vec::with_capacity(coding.len());
|
2018-12-08 21:44:20 -08:00
|
|
|
for block in coding {
|
2018-07-17 13:02:38 -07:00
|
|
|
if block_len != block.len() as i32 {
|
2018-08-17 12:24:40 -07:00
|
|
|
error!(
|
2018-06-05 12:24:39 -07:00
|
|
|
"coding block size incorrect {} expected {}",
|
|
|
|
block.len(),
|
|
|
|
block_len
|
|
|
|
);
|
2018-12-05 12:47:19 -08:00
|
|
|
return Err(Error::ErasureError(ErasureError::InvalidBlockSize));
|
2018-04-18 19:34:57 -07:00
|
|
|
}
|
|
|
|
coding_arg.push(block.as_mut_ptr());
|
|
|
|
}
|
|
|
|
|
|
|
|
unsafe {
|
|
|
|
jerasure_matrix_encode(
|
2018-07-17 13:02:38 -07:00
|
|
|
k,
|
2018-04-18 19:34:57 -07:00
|
|
|
m,
|
2019-04-11 14:14:57 -07:00
|
|
|
w(),
|
2018-04-18 19:34:57 -07:00
|
|
|
matrix.as_ptr(),
|
|
|
|
data_arg.as_ptr(),
|
|
|
|
coding_arg.as_ptr(),
|
2018-07-17 13:02:38 -07:00
|
|
|
block_len,
|
2018-04-18 19:34:57 -07:00
|
|
|
);
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Recover data + coding blocks into data blocks
|
|
|
|
// data: array of blocks to recover into
|
|
|
|
// coding: arry of coding blocks
|
|
|
|
// erasures: list of indices in data where blocks should be recovered
|
2019-04-11 14:14:57 -07:00
|
|
|
pub fn decode_blocks(
|
|
|
|
data: &mut [&mut [u8]],
|
|
|
|
coding: &mut [&mut [u8]],
|
|
|
|
erasures: &[i32],
|
|
|
|
) -> Result<()> {
|
2018-09-18 08:02:57 -07:00
|
|
|
if data.is_empty() {
|
2018-04-18 19:34:57 -07:00
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
let block_len = data[0].len();
|
2019-04-11 14:14:57 -07:00
|
|
|
let matrix: Vec<i32> = get_matrix(coding.len() as i32, data.len() as i32, w());
|
2018-04-18 19:34:57 -07:00
|
|
|
|
|
|
|
// generate coding pointers, blocks should be the same size
|
2018-07-18 14:51:18 -07:00
|
|
|
let mut coding_arg: Vec<*mut u8> = Vec::new();
|
|
|
|
for x in coding.iter_mut() {
|
2018-04-18 19:34:57 -07:00
|
|
|
if x.len() != block_len {
|
2018-12-05 12:47:19 -08:00
|
|
|
return Err(Error::ErasureError(ErasureError::InvalidBlockSize));
|
2018-04-18 19:34:57 -07:00
|
|
|
}
|
2018-07-18 14:51:18 -07:00
|
|
|
coding_arg.push(x.as_mut_ptr());
|
2018-04-18 19:34:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// generate data pointers, blocks should be the same size
|
|
|
|
let mut data_arg: Vec<*mut u8> = Vec::new();
|
|
|
|
for x in data.iter_mut() {
|
|
|
|
if x.len() != block_len {
|
2018-12-05 12:47:19 -08:00
|
|
|
return Err(Error::ErasureError(ErasureError::InvalidBlockSize));
|
2018-04-18 19:34:57 -07:00
|
|
|
}
|
|
|
|
data_arg.push(x.as_mut_ptr());
|
|
|
|
}
|
2018-09-03 02:31:50 -07:00
|
|
|
let ret = unsafe {
|
|
|
|
jerasure_matrix_decode(
|
2018-04-18 19:34:57 -07:00
|
|
|
data.len() as i32,
|
|
|
|
coding.len() as i32,
|
2019-04-11 14:14:57 -07:00
|
|
|
w(),
|
2018-04-18 19:34:57 -07:00
|
|
|
matrix.as_ptr(),
|
|
|
|
0,
|
|
|
|
erasures.as_ptr(),
|
|
|
|
data_arg.as_ptr(),
|
|
|
|
coding_arg.as_ptr(),
|
|
|
|
data[0].len() as i32,
|
2018-09-03 02:31:50 -07:00
|
|
|
)
|
|
|
|
};
|
|
|
|
trace!("jerasure_matrix_decode ret: {}", ret);
|
|
|
|
for x in data[erasures[0] as usize][0..8].iter() {
|
|
|
|
trace!("{} ", x)
|
|
|
|
}
|
|
|
|
trace!("");
|
|
|
|
if ret < 0 {
|
2018-12-05 12:47:19 -08:00
|
|
|
return Err(Error::ErasureError(ErasureError::DecodeError));
|
2018-04-18 19:34:57 -07:00
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-07-18 22:10:01 -07:00
|
|
|
// Generate coding blocks in window starting from start_idx,
|
|
|
|
// for num_blobs.. For each block place the coding blobs
|
2019-04-11 14:14:57 -07:00
|
|
|
// at the start of the block like so:
|
2018-07-17 13:02:38 -07:00
|
|
|
//
|
2019-04-11 14:14:57 -07:00
|
|
|
// model of an erasure set, with top row being data blobs and second being coding
|
2018-07-17 13:02:38 -07:00
|
|
|
// |<======================= NUM_DATA ==============================>|
|
2019-04-11 14:14:57 -07:00
|
|
|
// |<==== NUM_CODING ===>|
|
2018-07-17 13:02:38 -07:00
|
|
|
// +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+
|
|
|
|
// | D | | D | | D | | D | | D | | D | | D | | D | | D | | D |
|
|
|
|
// +---+ +---+ +---+ +---+ +---+ . . . +---+ +---+ +---+ +---+ +---+
|
2019-04-11 14:14:57 -07:00
|
|
|
// | C | | C | | C | | C | | | | | | | | | | | | |
|
2018-07-17 13:02:38 -07:00
|
|
|
// +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+
|
2018-07-23 18:55:58 -07:00
|
|
|
//
|
|
|
|
// blob structure for coding, recover
|
|
|
|
//
|
|
|
|
// + ------- meta is set and used by transport, meta.size is actual length
|
|
|
|
// | of data in the byte array blob.data
|
|
|
|
// |
|
|
|
|
// | + -- data is stuff shipped over the wire, and has an included
|
|
|
|
// | | header
|
|
|
|
// V V
|
|
|
|
// +----------+------------------------------------------------------------+
|
|
|
|
// | meta | data |
|
|
|
|
// |+---+-- |+---+---+---+---+------------------------------------------+|
|
|
|
|
// || s | . || i | | f | s | ||
|
|
|
|
// || i | . || n | i | l | i | ||
|
|
|
|
// || z | . || d | d | a | z | blob.data(), or blob.data_mut() ||
|
|
|
|
// || e | || e | | g | e | ||
|
|
|
|
// |+---+-- || x | | s | | ||
|
|
|
|
// | |+---+---+---+---+------------------------------------------+|
|
|
|
|
// +----------+------------------------------------------------------------+
|
|
|
|
// | |<=== coding blob part for "coding" =======>|
|
|
|
|
// | |
|
|
|
|
// |<============== data blob part for "coding" ==============>|
|
|
|
|
//
|
|
|
|
//
|
|
|
|
//
|
2019-01-15 10:51:53 -08:00
|
|
|
pub struct CodingGenerator {
|
|
|
|
leftover: Vec<SharedBlob>, // SharedBlobs that couldn't be used in last call to next()
|
|
|
|
}
|
2019-01-14 15:11:18 -08:00
|
|
|
|
2019-04-11 14:14:57 -07:00
|
|
|
impl Default for CodingGenerator {
|
|
|
|
fn default() -> Self {
|
|
|
|
CodingGenerator {
|
2019-01-15 10:51:53 -08:00
|
|
|
leftover: Vec::with_capacity(NUM_DATA),
|
2018-07-18 17:59:44 -07:00
|
|
|
}
|
2019-01-15 10:51:53 -08:00
|
|
|
}
|
2019-04-11 14:14:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
impl CodingGenerator {
|
|
|
|
pub fn new() -> Self {
|
|
|
|
Self::default()
|
|
|
|
}
|
2018-07-18 10:10:34 -07:00
|
|
|
|
2019-01-15 10:51:53 -08:00
|
|
|
// must be called with consecutive data blobs from previous invocation
|
|
|
|
pub fn next(&mut self, next_data: &[SharedBlob]) -> Result<Vec<SharedBlob>> {
|
|
|
|
let mut next_coding =
|
|
|
|
Vec::with_capacity((self.leftover.len() + next_data.len()) / NUM_DATA * NUM_CODING);
|
2018-07-18 09:02:21 -07:00
|
|
|
|
2019-04-10 18:18:55 -07:00
|
|
|
if self.leftover.len() > 0 && next_data.len() > 0 {
|
|
|
|
if self.leftover[0].read().unwrap().slot() != next_data[0].read().unwrap().slot() {
|
|
|
|
self.leftover.clear(); // reset on slot boundaries
|
|
|
|
}
|
|
|
|
}
|
2019-01-15 10:51:53 -08:00
|
|
|
let next_data: Vec<_> = self.leftover.iter().chain(next_data).cloned().collect();
|
2018-07-19 15:01:15 -07:00
|
|
|
|
2019-01-15 10:51:53 -08:00
|
|
|
for data_blobs in next_data.chunks(NUM_DATA) {
|
|
|
|
if data_blobs.len() < NUM_DATA {
|
|
|
|
self.leftover = data_blobs.to_vec();
|
|
|
|
break;
|
2018-06-05 09:50:50 -07:00
|
|
|
}
|
2019-01-15 10:51:53 -08:00
|
|
|
self.leftover.clear();
|
|
|
|
|
|
|
|
// find max_data_size for the chunk
|
|
|
|
let max_data_size = align!(
|
|
|
|
data_blobs
|
|
|
|
.iter()
|
|
|
|
.fold(0, |max, blob| cmp::max(blob.read().unwrap().meta.size, max)),
|
|
|
|
JERASURE_ALIGN
|
|
|
|
);
|
2018-07-19 15:01:15 -07:00
|
|
|
|
2019-01-15 10:51:53 -08:00
|
|
|
let data_locks: Vec<_> = data_blobs.iter().map(|b| b.read().unwrap()).collect();
|
|
|
|
let data_ptrs: Vec<_> = data_locks
|
|
|
|
.iter()
|
|
|
|
.map(|l| &l.data[..max_data_size])
|
|
|
|
.collect();
|
2019-01-14 15:11:18 -08:00
|
|
|
|
2019-01-15 10:51:53 -08:00
|
|
|
let mut coding_blobs = Vec::with_capacity(NUM_CODING);
|
|
|
|
|
2019-04-11 14:14:57 -07:00
|
|
|
for data_blob in &data_locks[..NUM_CODING] {
|
2019-01-30 20:18:28 -08:00
|
|
|
let index = data_blob.index();
|
|
|
|
let slot = data_blob.slot();
|
2019-02-27 13:37:08 -08:00
|
|
|
let id = data_blob.id();
|
2019-02-12 10:56:48 -08:00
|
|
|
let should_forward = data_blob.should_forward();
|
2019-01-15 10:51:53 -08:00
|
|
|
|
2019-04-11 14:14:57 -07:00
|
|
|
let mut coding_blob = Blob::default();
|
|
|
|
coding_blob.set_index(index);
|
|
|
|
coding_blob.set_slot(slot);
|
|
|
|
coding_blob.set_id(&id);
|
|
|
|
coding_blob.forward(should_forward);
|
|
|
|
coding_blob.set_size(max_data_size);
|
|
|
|
coding_blob.set_coding();
|
|
|
|
|
|
|
|
coding_blobs.push(Arc::new(RwLock::new(coding_blob)));
|
2018-07-19 15:01:15 -07:00
|
|
|
}
|
2018-07-18 14:51:18 -07:00
|
|
|
|
2019-01-15 10:51:53 -08:00
|
|
|
{
|
|
|
|
let mut coding_locks: Vec<_> =
|
|
|
|
coding_blobs.iter().map(|b| b.write().unwrap()).collect();
|
2018-07-23 18:55:58 -07:00
|
|
|
|
2019-01-15 10:51:53 -08:00
|
|
|
let mut coding_ptrs: Vec<_> = coding_locks
|
|
|
|
.iter_mut()
|
|
|
|
.map(|l| &mut l.data_mut()[..max_data_size])
|
|
|
|
.collect();
|
2018-07-18 09:02:21 -07:00
|
|
|
|
2019-01-15 10:51:53 -08:00
|
|
|
generate_coding_blocks(coding_ptrs.as_mut_slice(), &data_ptrs)?;
|
2018-07-18 09:02:21 -07:00
|
|
|
}
|
2019-01-15 10:51:53 -08:00
|
|
|
next_coding.append(&mut coding_blobs);
|
2018-07-18 17:59:44 -07:00
|
|
|
}
|
2018-07-18 09:02:21 -07:00
|
|
|
|
2019-01-15 10:51:53 -08:00
|
|
|
Ok(next_coding)
|
2018-06-01 11:36:20 -07:00
|
|
|
}
|
2018-04-18 19:34:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
2018-12-05 12:47:19 -08:00
|
|
|
pub mod test {
|
|
|
|
use super::*;
|
2019-02-07 20:52:39 -08:00
|
|
|
use crate::blocktree::get_tmp_ledger_path;
|
2019-02-25 12:48:48 -08:00
|
|
|
use crate::blocktree::Blocktree;
|
2019-01-09 14:33:44 -08:00
|
|
|
use crate::entry::{make_tiny_test_entries, EntrySlice};
|
2019-04-11 14:14:57 -07:00
|
|
|
use crate::packet::{index_blobs, SharedBlob, BLOB_DATA_SIZE, BLOB_HEADER_SIZE};
|
2019-03-30 20:37:33 -07:00
|
|
|
use solana_sdk::pubkey::Pubkey;
|
2019-04-11 14:14:57 -07:00
|
|
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
|
|
|
use std::borrow::Borrow;
|
2019-03-27 23:55:51 -07:00
|
|
|
|
|
|
|
/// Specifies the contents of a 16-data-blob and 4-coding-blob erasure set
|
|
|
|
/// Exists to be passed to `generate_blocktree_with_coding`
|
|
|
|
#[derive(Debug, Copy, Clone)]
|
|
|
|
pub struct ErasureSpec {
|
|
|
|
/// Which 16-blob erasure set this represents
|
2019-04-11 14:14:57 -07:00
|
|
|
pub set_index: u64,
|
2019-03-27 23:55:51 -07:00
|
|
|
pub num_data: usize,
|
|
|
|
pub num_coding: usize,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Specifies the contents of a slot
|
|
|
|
/// Exists to be passed to `generate_blocktree_with_coding`
|
|
|
|
#[derive(Debug, Clone)]
|
|
|
|
pub struct SlotSpec {
|
|
|
|
pub slot: u64,
|
|
|
|
pub set_specs: Vec<ErasureSpec>,
|
|
|
|
}
|
2018-04-18 19:34:57 -07:00
|
|
|
|
2019-04-11 14:14:57 -07:00
|
|
|
/// Model of a slot in 16-blob chunks with varying amounts of erasure and coding blobs
|
|
|
|
/// present
|
|
|
|
#[derive(Debug, Clone)]
|
|
|
|
pub struct SlotModel {
|
|
|
|
pub slot: u64,
|
|
|
|
pub chunks: Vec<ErasureSetModel>,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Model of 16-blob chunk
|
|
|
|
#[derive(Debug, Clone)]
|
|
|
|
pub struct ErasureSetModel {
|
|
|
|
pub set_index: u64,
|
|
|
|
pub start_index: u64,
|
|
|
|
pub coding: Vec<SharedBlob>,
|
|
|
|
pub data: Vec<SharedBlob>,
|
|
|
|
}
|
|
|
|
|
2018-04-18 19:34:57 -07:00
|
|
|
#[test]
|
2019-01-15 10:51:53 -08:00
|
|
|
fn test_coding() {
|
2018-04-18 19:34:57 -07:00
|
|
|
let zero_vec = vec![0; 16];
|
|
|
|
let mut vs: Vec<Vec<u8>> = (0..4).map(|i| (i..(16 + i)).collect()).collect();
|
|
|
|
let v_orig: Vec<u8> = vs[0].clone();
|
|
|
|
|
|
|
|
let m = 2;
|
|
|
|
let mut coding_blocks: Vec<_> = (0..m).map(|_| vec![0u8; 16]).collect();
|
|
|
|
|
|
|
|
{
|
|
|
|
let mut coding_blocks_slices: Vec<_> =
|
|
|
|
coding_blocks.iter_mut().map(|x| x.as_mut_slice()).collect();
|
|
|
|
let v_slices: Vec<_> = vs.iter().map(|x| x.as_slice()).collect();
|
|
|
|
|
2018-12-07 19:01:28 -08:00
|
|
|
assert!(generate_coding_blocks(
|
|
|
|
coding_blocks_slices.as_mut_slice(),
|
|
|
|
v_slices.as_slice(),
|
|
|
|
)
|
|
|
|
.is_ok());
|
2018-04-18 19:34:57 -07:00
|
|
|
}
|
2019-04-11 14:14:57 -07:00
|
|
|
trace!("test_coding: coding blocks:");
|
2018-04-18 19:34:57 -07:00
|
|
|
for b in &coding_blocks {
|
2019-04-11 14:14:57 -07:00
|
|
|
trace!("test_coding: {:?}", b);
|
2018-04-18 19:34:57 -07:00
|
|
|
}
|
|
|
|
let erasure: i32 = 1;
|
|
|
|
let erasures = vec![erasure, -1];
|
|
|
|
// clear an entry
|
|
|
|
vs[erasure as usize].copy_from_slice(zero_vec.as_slice());
|
|
|
|
|
|
|
|
{
|
2018-07-18 14:51:18 -07:00
|
|
|
let mut coding_blocks_slices: Vec<_> =
|
|
|
|
coding_blocks.iter_mut().map(|x| x.as_mut_slice()).collect();
|
2018-04-18 19:34:57 -07:00
|
|
|
let mut v_slices: Vec<_> = vs.iter_mut().map(|x| x.as_mut_slice()).collect();
|
|
|
|
|
2018-12-07 19:01:28 -08:00
|
|
|
assert!(decode_blocks(
|
|
|
|
v_slices.as_mut_slice(),
|
|
|
|
coding_blocks_slices.as_mut_slice(),
|
|
|
|
erasures.as_slice(),
|
|
|
|
)
|
|
|
|
.is_ok());
|
2018-04-18 19:34:57 -07:00
|
|
|
}
|
|
|
|
|
2019-04-11 14:14:57 -07:00
|
|
|
trace!("test_coding: vs:");
|
2018-04-18 19:34:57 -07:00
|
|
|
for v in &vs {
|
2019-04-11 14:14:57 -07:00
|
|
|
trace!("test_coding: {:?}", v);
|
2018-04-18 19:34:57 -07:00
|
|
|
}
|
|
|
|
assert_eq!(v_orig, vs[0]);
|
|
|
|
}
|
|
|
|
|
2019-01-15 10:51:53 -08:00
|
|
|
#[test]
|
|
|
|
fn test_erasure_generate_coding() {
|
|
|
|
solana_logger::setup();
|
|
|
|
|
|
|
|
// trivial case
|
|
|
|
let mut coding_generator = CodingGenerator::new();
|
|
|
|
let blobs = Vec::new();
|
|
|
|
for _ in 0..NUM_DATA * 2 {
|
|
|
|
let coding = coding_generator.next(&blobs).unwrap();
|
|
|
|
assert_eq!(coding.len(), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// test coding by iterating one blob at a time
|
|
|
|
let data_blobs = generate_test_blobs(0, NUM_DATA * 2);
|
|
|
|
|
|
|
|
for (i, blob) in data_blobs.iter().cloned().enumerate() {
|
|
|
|
let coding = coding_generator.next(&[blob]).unwrap();
|
|
|
|
|
|
|
|
if !coding.is_empty() {
|
|
|
|
assert_eq!(i % NUM_DATA, NUM_DATA - 1);
|
|
|
|
assert_eq!(coding.len(), NUM_CODING);
|
|
|
|
|
2019-01-30 20:18:28 -08:00
|
|
|
let size = coding[0].read().unwrap().size();
|
2019-01-15 10:51:53 -08:00
|
|
|
|
|
|
|
// toss one data and one coding
|
|
|
|
let erasures: Vec<i32> = vec![0, NUM_DATA as i32, -1];
|
|
|
|
|
|
|
|
let block_start_idx = i - (i % NUM_DATA);
|
2019-04-11 14:14:57 -07:00
|
|
|
let mut blobs: Vec<SharedBlob> = Vec::with_capacity(ERASURE_SET_SIZE);
|
2019-01-15 10:51:53 -08:00
|
|
|
|
|
|
|
blobs.push(SharedBlob::default()); // empty data, erasure at zero
|
|
|
|
for blob in &data_blobs[block_start_idx + 1..block_start_idx + NUM_DATA] {
|
|
|
|
// skip first blob
|
|
|
|
blobs.push(blob.clone());
|
|
|
|
}
|
2019-04-11 14:14:57 -07:00
|
|
|
blobs.push(SharedBlob::default()); // empty coding, erasure at zero
|
2019-01-15 10:51:53 -08:00
|
|
|
for blob in &coding[1..NUM_CODING] {
|
|
|
|
blobs.push(blob.clone());
|
|
|
|
}
|
|
|
|
|
|
|
|
let corrupt =
|
|
|
|
decode_blobs(&blobs, &erasures, size, block_start_idx as u64, 0).unwrap();
|
|
|
|
|
|
|
|
assert!(!corrupt);
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
blobs[1].read().unwrap().meta,
|
|
|
|
data_blobs[block_start_idx + 1].read().unwrap().meta
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
blobs[1].read().unwrap().data(),
|
|
|
|
data_blobs[block_start_idx + 1].read().unwrap().data()
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
blobs[0].read().unwrap().meta,
|
|
|
|
data_blobs[block_start_idx].read().unwrap().meta
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
blobs[0].read().unwrap().data(),
|
|
|
|
data_blobs[block_start_idx].read().unwrap().data()
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
blobs[NUM_DATA].read().unwrap().data(),
|
|
|
|
coding[0].read().unwrap().data()
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-10 18:18:55 -07:00
|
|
|
#[test]
|
|
|
|
fn test_erasure_generate_coding_reset_on_new_slot() {
|
|
|
|
solana_logger::setup();
|
|
|
|
|
|
|
|
let mut coding_generator = CodingGenerator::new();
|
|
|
|
|
|
|
|
// test coding by iterating one blob at a time
|
|
|
|
let data_blobs = generate_test_blobs(0, NUM_DATA * 2);
|
|
|
|
|
|
|
|
for i in NUM_DATA..NUM_DATA * 2 {
|
|
|
|
data_blobs[i].write().unwrap().set_slot(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
let coding = coding_generator.next(&data_blobs[1..]).unwrap();
|
|
|
|
|
|
|
|
assert_eq!(coding.len(), NUM_CODING);
|
|
|
|
}
|
|
|
|
|
2019-03-27 23:55:51 -07:00
|
|
|
#[test]
|
|
|
|
fn test_generate_blocktree_with_coding() {
|
|
|
|
let cases = vec![
|
|
|
|
(NUM_DATA, NUM_CODING, 7, 5),
|
|
|
|
(NUM_DATA - 6, NUM_CODING - 1, 5, 7),
|
|
|
|
];
|
2018-12-05 12:47:19 -08:00
|
|
|
|
2019-03-27 23:55:51 -07:00
|
|
|
for (num_data, num_coding, num_slots, num_sets_per_slot) in cases {
|
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
2018-12-05 12:47:19 -08:00
|
|
|
|
2019-03-27 23:55:51 -07:00
|
|
|
let specs = (0..num_slots)
|
|
|
|
.map(|slot| {
|
|
|
|
let set_specs = (0..num_sets_per_slot)
|
|
|
|
.map(|set_index| ErasureSpec {
|
|
|
|
set_index,
|
|
|
|
num_data,
|
|
|
|
num_coding,
|
|
|
|
})
|
|
|
|
.collect();
|
2019-01-15 10:51:53 -08:00
|
|
|
|
2019-03-27 23:55:51 -07:00
|
|
|
SlotSpec { slot, set_specs }
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>();
|
2019-01-15 10:51:53 -08:00
|
|
|
|
2019-03-27 23:55:51 -07:00
|
|
|
let blocktree = generate_blocktree_with_coding(&ledger_path, &specs);
|
2019-01-15 10:51:53 -08:00
|
|
|
|
2019-03-27 23:55:51 -07:00
|
|
|
for spec in specs.iter() {
|
|
|
|
let slot = spec.slot;
|
2019-01-15 10:51:53 -08:00
|
|
|
|
2019-03-27 23:55:51 -07:00
|
|
|
for erasure_spec in spec.set_specs.iter() {
|
2019-04-11 14:14:57 -07:00
|
|
|
let start_index = erasure_spec.set_index * NUM_DATA as u64;
|
|
|
|
let (data_end, coding_end) = (
|
|
|
|
start_index + erasure_spec.num_data as u64,
|
|
|
|
start_index + erasure_spec.num_coding as u64,
|
|
|
|
);
|
|
|
|
|
|
|
|
for idx in start_index..data_end {
|
|
|
|
let opt_bytes = blocktree.get_data_blob_bytes(slot, idx).unwrap();
|
2019-03-27 23:55:51 -07:00
|
|
|
assert!(opt_bytes.is_some());
|
|
|
|
}
|
2019-01-15 10:51:53 -08:00
|
|
|
|
2019-04-11 14:14:57 -07:00
|
|
|
for idx in start_index..coding_end {
|
|
|
|
let opt_bytes = blocktree.get_coding_blob_bytes(slot, idx).unwrap();
|
2019-03-27 23:55:51 -07:00
|
|
|
assert!(opt_bytes.is_some());
|
2019-01-15 10:51:53 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-27 23:55:51 -07:00
|
|
|
drop(blocktree);
|
|
|
|
Blocktree::destroy(&ledger_path).expect("Expect successful blocktree destruction");
|
|
|
|
}
|
|
|
|
}
|
2019-01-15 10:51:53 -08:00
|
|
|
|
2019-04-11 14:14:57 -07:00
|
|
|
/// This test is ignored because if successful, it never stops running. It is useful for
|
|
|
|
/// dicovering an initialization race-condition in the erasure FFI bindings. If this bug
|
|
|
|
/// re-emerges, running with `Z_THREADS = N` where `N > 1` should crash fairly rapidly.
|
|
|
|
#[ignore]
|
2019-03-27 23:55:51 -07:00
|
|
|
#[test]
|
2019-04-11 14:14:57 -07:00
|
|
|
fn test_recovery_with_model() {
|
|
|
|
use std::env;
|
|
|
|
use std::sync::{Arc, Mutex};
|
|
|
|
use std::thread;
|
2019-01-15 10:51:53 -08:00
|
|
|
|
2019-04-11 14:14:57 -07:00
|
|
|
const MAX_ERASURE_SETS: u64 = 16;
|
|
|
|
solana_logger::setup();
|
|
|
|
let n_threads: usize = env::var("Z_THREADS")
|
|
|
|
.unwrap_or("1".to_string())
|
|
|
|
.parse()
|
|
|
|
.unwrap();
|
2019-01-15 10:51:53 -08:00
|
|
|
|
2019-04-11 14:14:57 -07:00
|
|
|
let specs = (0..).map(|slot| {
|
|
|
|
let num_erasure_sets = slot % MAX_ERASURE_SETS;
|
2019-01-15 10:51:53 -08:00
|
|
|
|
2019-04-11 14:14:57 -07:00
|
|
|
let set_specs = (0..num_erasure_sets)
|
|
|
|
.map(|set_index| ErasureSpec {
|
|
|
|
set_index,
|
2019-03-27 23:55:51 -07:00
|
|
|
num_data: NUM_DATA,
|
|
|
|
num_coding: NUM_CODING,
|
2019-04-11 14:14:57 -07:00
|
|
|
})
|
|
|
|
.collect();
|
2018-12-05 12:47:19 -08:00
|
|
|
|
2019-04-11 14:14:57 -07:00
|
|
|
SlotSpec { slot, set_specs }
|
|
|
|
});
|
|
|
|
|
|
|
|
let decode_mutex = Arc::new(Mutex::new(()));
|
|
|
|
let mut handles = vec![];
|
|
|
|
|
|
|
|
for i in 0..n_threads {
|
|
|
|
let specs = specs.clone();
|
|
|
|
let decode_mutex = Arc::clone(&decode_mutex);
|
|
|
|
|
|
|
|
let handle = thread::Builder::new()
|
|
|
|
.name(i.to_string())
|
|
|
|
.spawn(move || {
|
|
|
|
for slot_model in generate_ledger_model(specs) {
|
|
|
|
for erasure_set in slot_model.chunks {
|
|
|
|
let erased_coding = erasure_set.coding[0].clone();
|
|
|
|
let erased_data = erasure_set.data[..3].to_vec();
|
|
|
|
|
|
|
|
let mut data = Vec::with_capacity(NUM_DATA);
|
|
|
|
let mut coding = Vec::with_capacity(NUM_CODING);
|
|
|
|
let erasures = vec![0, 1, 2, NUM_DATA as i32, -1];
|
|
|
|
|
|
|
|
data.push(SharedBlob::default());
|
|
|
|
data.push(SharedBlob::default());
|
|
|
|
data.push(SharedBlob::default());
|
|
|
|
for blob in erasure_set.data.into_iter().skip(3) {
|
|
|
|
data.push(blob);
|
|
|
|
}
|
|
|
|
|
|
|
|
coding.push(SharedBlob::default());
|
|
|
|
for blob in erasure_set.coding.into_iter().skip(1) {
|
|
|
|
coding.push(blob);
|
|
|
|
}
|
|
|
|
|
|
|
|
let size = erased_coding.read().unwrap().data_size() as usize;
|
|
|
|
|
|
|
|
let mut data_locks: Vec<_> =
|
|
|
|
data.iter().map(|shared| shared.write().unwrap()).collect();
|
|
|
|
let mut coding_locks: Vec<_> = coding
|
|
|
|
.iter()
|
|
|
|
.map(|shared| shared.write().unwrap())
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
let mut data_ptrs: Vec<_> = data_locks
|
|
|
|
.iter_mut()
|
|
|
|
.map(|blob| &mut blob.data[..size])
|
|
|
|
.collect();
|
|
|
|
let mut coding_ptrs: Vec<_> = coding_locks
|
|
|
|
.iter_mut()
|
|
|
|
.map(|blob| &mut blob.data_mut()[..size])
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
{
|
|
|
|
let _lock = decode_mutex.lock();
|
|
|
|
|
|
|
|
decode_blocks(
|
|
|
|
data_ptrs.as_mut_slice(),
|
|
|
|
coding_ptrs.as_mut_slice(),
|
|
|
|
&erasures,
|
|
|
|
)
|
|
|
|
.expect("decoding must succeed");
|
|
|
|
}
|
|
|
|
|
|
|
|
drop(coding_locks);
|
|
|
|
drop(data_locks);
|
|
|
|
|
|
|
|
for (expected, recovered) in erased_data.iter().zip(data.iter()) {
|
|
|
|
let expected = expected.read().unwrap();
|
|
|
|
let mut recovered = recovered.write().unwrap();
|
|
|
|
let data_size = recovered.data_size() as usize - BLOB_HEADER_SIZE;
|
|
|
|
recovered.set_size(data_size);
|
|
|
|
let corrupt = data_size > BLOB_DATA_SIZE;
|
|
|
|
assert!(!corrupt, "CORRUPTION");
|
|
|
|
assert_eq!(&*expected, &*recovered);
|
|
|
|
}
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
erased_coding.read().unwrap().data(),
|
|
|
|
coding[0].read().unwrap().data()
|
|
|
|
);
|
|
|
|
|
|
|
|
debug!("passed set: {}", erasure_set.set_index);
|
|
|
|
}
|
|
|
|
debug!("passed slot: {}", slot_model.slot);
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.expect("thread build error");
|
2018-07-23 18:55:58 -07:00
|
|
|
|
2019-04-11 14:14:57 -07:00
|
|
|
handles.push(handle);
|
|
|
|
}
|
2018-07-19 14:20:42 -07:00
|
|
|
|
2019-04-11 14:14:57 -07:00
|
|
|
handles.into_iter().for_each(|h| h.join().unwrap());
|
|
|
|
}
|
2018-05-31 11:21:07 -07:00
|
|
|
|
2019-04-11 14:14:57 -07:00
|
|
|
/// Generates a model of a ledger containing certain data and coding blobs according to a spec
|
|
|
|
pub fn generate_ledger_model<'a, I, IntoIt, S>(
|
|
|
|
specs: I,
|
|
|
|
) -> impl Iterator<Item = SlotModel> + Clone + 'a
|
|
|
|
where
|
|
|
|
I: IntoIterator<Item = S, IntoIter = IntoIt>,
|
|
|
|
IntoIt: Iterator<Item = S> + Clone + 'a,
|
|
|
|
S: Borrow<SlotSpec>,
|
|
|
|
{
|
|
|
|
specs.into_iter().map(|spec| {
|
|
|
|
let spec = spec.borrow();
|
|
|
|
let slot = spec.slot;
|
2019-01-15 10:51:53 -08:00
|
|
|
|
2019-04-11 14:14:57 -07:00
|
|
|
let chunks = spec
|
|
|
|
.set_specs
|
|
|
|
.iter()
|
|
|
|
.map(|erasure_spec| {
|
|
|
|
let set_index = erasure_spec.set_index as usize;
|
|
|
|
let start_index = set_index * NUM_DATA;
|
|
|
|
|
|
|
|
let mut blobs = make_tiny_test_entries(NUM_DATA).to_single_entry_shared_blobs();
|
|
|
|
index_blobs(
|
|
|
|
&blobs,
|
|
|
|
&Keypair::new().pubkey(),
|
|
|
|
start_index as u64,
|
|
|
|
slot,
|
|
|
|
0,
|
|
|
|
);
|
|
|
|
|
|
|
|
let mut coding_generator = CodingGenerator::new();
|
|
|
|
let mut coding_blobs = coding_generator.next(&blobs).unwrap();
|
|
|
|
|
|
|
|
blobs.drain(erasure_spec.num_data..);
|
|
|
|
coding_blobs.drain(erasure_spec.num_coding..);
|
|
|
|
|
|
|
|
ErasureSetModel {
|
|
|
|
start_index: start_index as u64,
|
|
|
|
set_index: set_index as u64,
|
|
|
|
data: blobs,
|
|
|
|
coding: coding_blobs,
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect();
|
2018-07-23 18:55:58 -07:00
|
|
|
|
2019-04-11 14:14:57 -07:00
|
|
|
SlotModel { slot, chunks }
|
|
|
|
})
|
2018-05-28 10:25:15 -07:00
|
|
|
}
|
|
|
|
|
2019-04-11 14:14:57 -07:00
|
|
|
/// Genarates a ledger according to the given specs.
|
|
|
|
/// Blocktree should have correct SlotMeta and ErasureMeta and so on but will not have done any
|
|
|
|
/// possible recovery.
|
2019-03-27 23:55:51 -07:00
|
|
|
pub fn generate_blocktree_with_coding(ledger_path: &str, specs: &[SlotSpec]) -> Blocktree {
|
|
|
|
let blocktree = Blocktree::open(ledger_path).unwrap();
|
2019-01-15 10:51:53 -08:00
|
|
|
|
2019-04-11 14:14:57 -07:00
|
|
|
let model = generate_ledger_model(specs);
|
|
|
|
for slot_model in model {
|
|
|
|
let slot = slot_model.slot;
|
2019-03-27 23:55:51 -07:00
|
|
|
|
2019-04-11 14:14:57 -07:00
|
|
|
for erasure_set in slot_model.chunks {
|
|
|
|
blocktree.write_shared_blobs(erasure_set.data).unwrap();
|
2018-12-05 12:47:19 -08:00
|
|
|
|
2019-04-11 14:14:57 -07:00
|
|
|
for shared_coding_blob in erasure_set.coding.into_iter() {
|
|
|
|
let blob = shared_coding_blob.read().unwrap();
|
2019-03-27 23:55:51 -07:00
|
|
|
blocktree
|
2019-04-11 14:14:57 -07:00
|
|
|
.put_coding_blob_bytes_raw(
|
|
|
|
slot,
|
|
|
|
blob.index(),
|
|
|
|
&blob.data[..blob.size() + BLOB_HEADER_SIZE],
|
|
|
|
)
|
2019-03-27 23:55:51 -07:00
|
|
|
.unwrap();
|
2018-07-23 18:55:58 -07:00
|
|
|
}
|
2018-07-19 14:20:42 -07:00
|
|
|
}
|
|
|
|
}
|
2018-07-23 18:55:58 -07:00
|
|
|
|
2019-03-27 23:55:51 -07:00
|
|
|
blocktree
|
2018-12-05 12:47:19 -08:00
|
|
|
}
|
|
|
|
|
2019-03-27 23:55:51 -07:00
|
|
|
fn generate_test_blobs(offset: usize, num_blobs: usize) -> Vec<SharedBlob> {
|
|
|
|
let blobs = make_tiny_test_entries(num_blobs).to_single_entry_shared_blobs();
|
2018-07-18 13:28:03 -07:00
|
|
|
|
2019-03-30 20:37:33 -07:00
|
|
|
index_blobs(&blobs, &Pubkey::new_rand(), offset as u64, 0, 0);
|
2019-03-27 23:55:51 -07:00
|
|
|
blobs
|
2018-05-28 10:25:15 -07:00
|
|
|
}
|
2019-03-27 23:55:51 -07:00
|
|
|
|
2019-04-11 14:14:57 -07:00
|
|
|
fn decode_blobs(
|
|
|
|
blobs: &[SharedBlob],
|
|
|
|
erasures: &[i32],
|
|
|
|
size: usize,
|
|
|
|
block_start_idx: u64,
|
|
|
|
slot: u64,
|
|
|
|
) -> Result<bool> {
|
|
|
|
let mut locks = Vec::with_capacity(ERASURE_SET_SIZE);
|
|
|
|
let mut coding_ptrs: Vec<&mut [u8]> = Vec::with_capacity(NUM_CODING);
|
|
|
|
let mut data_ptrs: Vec<&mut [u8]> = Vec::with_capacity(NUM_DATA);
|
|
|
|
|
|
|
|
assert_eq!(blobs.len(), ERASURE_SET_SIZE);
|
|
|
|
for b in blobs {
|
|
|
|
locks.push(b.write().unwrap());
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i, l) in locks.iter_mut().enumerate() {
|
|
|
|
if i < NUM_DATA {
|
|
|
|
data_ptrs.push(&mut l.data[..size]);
|
|
|
|
} else {
|
|
|
|
coding_ptrs.push(&mut l.data_mut()[..size]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Decode the blocks
|
|
|
|
decode_blocks(
|
|
|
|
data_ptrs.as_mut_slice(),
|
|
|
|
coding_ptrs.as_mut_slice(),
|
|
|
|
&erasures,
|
|
|
|
)?;
|
|
|
|
|
|
|
|
// Create the missing blobs from the reconstructed data
|
|
|
|
let mut corrupt = false;
|
|
|
|
|
|
|
|
for i in &erasures[..erasures.len() - 1] {
|
|
|
|
let n = *i as usize;
|
|
|
|
let mut idx = n as u64 + block_start_idx;
|
|
|
|
|
|
|
|
let mut data_size;
|
|
|
|
if n < NUM_DATA {
|
|
|
|
data_size = locks[n].data_size() as usize;
|
|
|
|
data_size -= BLOB_HEADER_SIZE;
|
|
|
|
if data_size > BLOB_DATA_SIZE {
|
|
|
|
error!("corrupt data blob[{}] data_size: {}", idx, data_size);
|
|
|
|
corrupt = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
data_size = size;
|
|
|
|
idx -= NUM_DATA as u64;
|
|
|
|
locks[n].set_slot(slot);
|
|
|
|
locks[n].set_index(idx);
|
|
|
|
|
|
|
|
if data_size - BLOB_HEADER_SIZE > BLOB_DATA_SIZE {
|
|
|
|
error!("corrupt coding blob[{}] data_size: {}", idx, data_size);
|
|
|
|
corrupt = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
locks[n].set_size(data_size);
|
|
|
|
trace!(
|
|
|
|
"erasures[{}] ({}) size: {} data[0]: {}",
|
|
|
|
*i,
|
|
|
|
idx,
|
|
|
|
data_size,
|
|
|
|
locks[n].data()[0]
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(corrupt)
|
|
|
|
}
|
|
|
|
|
2018-04-18 19:34:57 -07:00
|
|
|
}
|