add logging, more conservative reset
This commit is contained in:
parent
877920e61b
commit
5dbecd6b6b
|
@ -508,6 +508,12 @@ pub fn recover(
|
||||||
if let Some(b) = window[j].coding.clone() {
|
if let Some(b) = window[j].coding.clone() {
|
||||||
if size.is_none() {
|
if size.is_none() {
|
||||||
size = Some(b.read().unwrap().meta.size - BLOB_HEADER_SIZE);
|
size = Some(b.read().unwrap().meta.size - BLOB_HEADER_SIZE);
|
||||||
|
trace!(
|
||||||
|
"{:x} recover size {} from {}",
|
||||||
|
debug_id,
|
||||||
|
size.unwrap(),
|
||||||
|
i as u64 + block_start_idx
|
||||||
|
);
|
||||||
}
|
}
|
||||||
blobs.push(b);
|
blobs.push(b);
|
||||||
} else {
|
} else {
|
||||||
|
@ -518,12 +524,13 @@ pub fn recover(
|
||||||
erasures.push(((i - coding_start) + NUM_DATA) as i32);
|
erasures.push(((i - coding_start) + NUM_DATA) as i32);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// now that we have size (from coding), zero out data blob tails
|
// now that we have size (from coding), zero out data blob tails
|
||||||
|
let size = size.unwrap();
|
||||||
for i in block_start..block_end {
|
for i in block_start..block_end {
|
||||||
let j = i % window.len();
|
let j = i % window.len();
|
||||||
|
|
||||||
if let Some(b) = &window[j].data {
|
if let Some(b) = &window[j].data {
|
||||||
let size = size.unwrap();
|
|
||||||
let mut b_wl = b.write().unwrap();
|
let mut b_wl = b.write().unwrap();
|
||||||
for i in b_wl.meta.size..size {
|
for i in b_wl.meta.size..size {
|
||||||
b_wl.data[i] = 0;
|
b_wl.data[i] = 0;
|
||||||
|
@ -537,7 +544,7 @@ pub fn recover(
|
||||||
"erasures[]: {:x} {:?} data_size: {}",
|
"erasures[]: {:x} {:?} data_size: {}",
|
||||||
debug_id,
|
debug_id,
|
||||||
erasures,
|
erasures,
|
||||||
size.unwrap(),
|
size,
|
||||||
);
|
);
|
||||||
//lock everything for write
|
//lock everything for write
|
||||||
for b in &blobs {
|
for b in &blobs {
|
||||||
|
@ -550,10 +557,10 @@ pub fn recover(
|
||||||
for (i, l) in locks.iter_mut().enumerate() {
|
for (i, l) in locks.iter_mut().enumerate() {
|
||||||
if i < NUM_DATA {
|
if i < NUM_DATA {
|
||||||
trace!("{:x} pushing data: {}", debug_id, i);
|
trace!("{:x} pushing data: {}", debug_id, i);
|
||||||
data_ptrs.push(&mut l.data[..size.unwrap()]);
|
data_ptrs.push(&mut l.data[..size]);
|
||||||
} else {
|
} else {
|
||||||
trace!("{:x} pushing coding: {}", debug_id, i);
|
trace!("{:x} pushing coding: {}", debug_id, i);
|
||||||
coding_ptrs.push(&mut l.data_mut()[..size.unwrap()]);
|
coding_ptrs.push(&mut l.data_mut()[..size]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
trace!(
|
trace!(
|
||||||
|
@ -580,7 +587,7 @@ pub fn recover(
|
||||||
data_size = locks[n].get_data_size().unwrap();
|
data_size = locks[n].get_data_size().unwrap();
|
||||||
data_size -= BLOB_HEADER_SIZE as u64;
|
data_size -= BLOB_HEADER_SIZE as u64;
|
||||||
} else {
|
} else {
|
||||||
data_size = size.unwrap() as u64;
|
data_size = size as u64;
|
||||||
idx -= NUM_CODING as u64;
|
idx -= NUM_CODING as u64;
|
||||||
locks[n].set_index(idx).unwrap();
|
locks[n].set_index(idx).unwrap();
|
||||||
}
|
}
|
||||||
|
@ -596,6 +603,12 @@ pub fn recover(
|
||||||
locks[n].data()[0]
|
locks[n].data()[0]
|
||||||
);
|
);
|
||||||
if data_size > BLOB_DATA_SIZE as u64 {
|
if data_size > BLOB_DATA_SIZE as u64 {
|
||||||
|
trace!(
|
||||||
|
"{:x} corrupt blob[{}] data_size: {}",
|
||||||
|
debug_id,
|
||||||
|
idx,
|
||||||
|
data_size
|
||||||
|
);
|
||||||
corrupt = true;
|
corrupt = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -165,6 +165,9 @@ impl Reset for Blob {
|
||||||
fn reset(&mut self) {
|
fn reset(&mut self) {
|
||||||
self.meta = Meta::default();
|
self.meta = Meta::default();
|
||||||
self.data[..BLOB_HEADER_SIZE].copy_from_slice(&[0u8; BLOB_HEADER_SIZE]);
|
self.data[..BLOB_HEADER_SIZE].copy_from_slice(&[0u8; BLOB_HEADER_SIZE]);
|
||||||
|
for i in 0..BLOB_SIZE {
|
||||||
|
self.data[i] = 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue