solana/runtime/src/append_vec.rs

827 lines
29 KiB
Rust
Raw Normal View History

use bincode::{deserialize_from, serialize_into};
use memmap::MmapMut;
use serde::{Deserialize, Serialize};
use solana_sdk::{
account::Account,
clock::{Epoch, Slot},
hash::Hash,
pubkey::Pubkey,
};
2019-09-23 15:20:45 -07:00
use std::{
fmt,
fs::{remove_file, OpenOptions},
2019-09-23 15:20:45 -07:00
io,
io::{Cursor, Seek, SeekFrom, Write},
mem,
path::{Path, PathBuf},
sync::atomic::{AtomicUsize, Ordering},
sync::Mutex,
};
2019-01-31 18:37:05 -08:00
//Data placement should be aligned at the next boundary. Without alignment accessing the memory may
//crash on some architectures.
const ALIGN_BOUNDARY_OFFSET: usize = mem::size_of::<u64>();
macro_rules! u64_align {
($addr: expr) => {
($addr + (ALIGN_BOUNDARY_OFFSET - 1)) & !(ALIGN_BOUNDARY_OFFSET - 1)
2019-03-04 22:36:12 -08:00
};
}
const MAXIMUM_APPEND_VEC_FILE_SIZE: usize = 16 * 1024 * 1024 * 1024; // 16 GiB
2019-09-23 15:20:45 -07:00
/// Meta contains enough context to recover the index from storage itself
/// This struct will be backed by mmaped and snapshotted data files.
/// So the data layout must be stable and consistent across the entire cluster!
#[derive(Clone, PartialEq, Debug)]
2019-09-23 15:20:45 -07:00
pub struct StoredMeta {
/// global write version
pub write_version: u64,
/// key for the account
pub pubkey: Pubkey,
pub data_len: u64,
}
/// This struct will be backed by mmaped and snapshotted data files.
/// So the data layout must be stable and consistent across the entire cluster!
#[derive(Serialize, Deserialize, Clone, Debug, Default, Eq, PartialEq)]
2019-09-23 15:20:45 -07:00
pub struct AccountMeta {
/// lamports in the account
pub lamports: u64,
/// the program that owns this account. If executable, the program that loads this account.
pub owner: Pubkey,
/// this account's data contains a loaded program (and is now read-only)
pub executable: bool,
/// the epoch at which this account will next owe rent
pub rent_epoch: Epoch,
}
/// References to Memory Mapped memory
/// The Account is stored separately from its data, so getting the actual account requires a clone
#[derive(PartialEq, Debug)]
pub struct StoredAccount<'a> {
2019-09-23 15:20:45 -07:00
pub meta: &'a StoredMeta,
/// account data
2019-09-23 15:20:45 -07:00
pub account_meta: &'a AccountMeta,
pub data: &'a [u8],
pub offset: usize,
2019-09-20 13:21:12 -07:00
pub hash: &'a Hash,
}
impl<'a> StoredAccount<'a> {
pub fn clone_account(&self) -> Account {
Account {
2019-09-23 15:20:45 -07:00
lamports: self.account_meta.lamports,
owner: self.account_meta.owner,
executable: self.account_meta.executable,
rent_epoch: self.account_meta.rent_epoch,
data: self.data.to_vec(),
2019-09-20 13:21:12 -07:00
hash: *self.hash,
}
}
fn sanitize(&self) -> bool {
self.sanitize_executable() && self.sanitize_lamports()
}
fn sanitize_executable(&self) -> bool {
// Sanitize executable to ensure higher 7-bits are cleared correctly.
self.ref_executable_byte() & !1 == 0
}
fn sanitize_lamports(&self) -> bool {
// Sanitize 0 lamports to ensure to be same as Account::default()
self.account_meta.lamports != 0 || self.clone_account() == Account::default()
}
fn ref_executable_byte(&self) -> &u8 {
// Use extra references to avoid value silently clamped to 1 (=true) and 0 (=false)
// Yes, this really happens; see test_set_file_crafted_executable
let executable_bool: &bool = &self.account_meta.executable;
// UNSAFE: Force to interpret mmap-backed bool as u8 to really read the actual memory content
let executable_byte: &u8 = unsafe { &*(executable_bool as *const bool as *const u8) };
executable_byte
}
}
#[derive(Debug)]
#[allow(clippy::mutex_atomic)]
pub struct AppendVec {
path: PathBuf,
map: MmapMut,
// This mutex forces append to be single threaded, but concurrent with reads
#[allow(clippy::mutex_atomic)]
append_offset: Mutex<usize>,
current_len: AtomicUsize,
2019-01-31 18:37:05 -08:00
file_size: u64,
}
impl Drop for AppendVec {
fn drop(&mut self) {
let _ignored = remove_file(&self.path);
}
}
impl AppendVec {
#[allow(clippy::mutex_atomic)]
pub fn new(file: &Path, create: bool, size: usize) -> Self {
let initial_len = 0;
AppendVec::sanitize_len_and_size(initial_len, size).unwrap();
if create {
let _ignored = remove_file(file);
}
2019-01-31 18:37:05 -08:00
let mut data = OpenOptions::new()
.read(true)
.write(true)
.create(create)
.open(file)
.map_err(|e| {
2019-07-18 14:41:32 -07:00
let mut msg = format!("in current dir {:?}\n", std::env::current_dir());
for ancestor in file.ancestors() {
2019-07-18 14:41:32 -07:00
msg.push_str(&format!(
"{:?} is {:?}\n",
ancestor,
std::fs::metadata(ancestor)
));
}
panic!(
2019-07-18 14:41:32 -07:00
"{}Unable to {} data file {}, err {:?}",
msg,
if create { "create" } else { "open" },
file.display(),
e
);
})
.unwrap();
2019-01-31 18:37:05 -08:00
data.seek(SeekFrom::Start((size - 1) as u64)).unwrap();
2019-01-31 18:37:05 -08:00
data.write_all(&[0]).unwrap();
data.seek(SeekFrom::Start(0)).unwrap();
data.flush().unwrap();
//UNSAFE: Required to create a Mmap
let map = unsafe { MmapMut::map_mut(&data).expect("failed to map the data file") };
2019-01-31 18:37:05 -08:00
AppendVec {
path: file.to_path_buf(),
map,
// This mutex forces append to be single threaded, but concurrent with reads
// See UNSAFE usage in `append_ptr`
append_offset: Mutex::new(initial_len),
current_len: AtomicUsize::new(initial_len),
file_size: size as u64,
2019-01-31 18:37:05 -08:00
}
}
#[allow(clippy::mutex_atomic)]
fn new_empty_map(current_len: usize) -> Self {
let map = MmapMut::map_anon(1).expect("failed to map the data file");
AppendVec {
path: PathBuf::from(String::default()),
map,
append_offset: Mutex::new(current_len),
current_len: AtomicUsize::new(current_len),
file_size: 0, // will be filled by set_file()
}
}
fn sanitize_len_and_size(current_len: usize, file_size: usize) -> io::Result<()> {
if file_size == 0 {
Err(std::io::Error::new(
std::io::ErrorKind::Other,
format!("too small file size {} for AppendVec", file_size),
))
} else if file_size > MAXIMUM_APPEND_VEC_FILE_SIZE {
Err(std::io::Error::new(
std::io::ErrorKind::Other,
format!("too large file size {} for AppendVec", file_size),
))
} else if current_len > file_size {
Err(std::io::Error::new(
std::io::ErrorKind::Other,
format!("current_len is larger than file size ({})", file_size),
))
} else {
Ok(())
}
}
pub fn flush(&self) -> io::Result<()> {
self.map.flush()
}
#[allow(clippy::mutex_atomic)]
pub fn reset(&self) {
// This mutex forces append to be single threaded, but concurrent with reads
// See UNSAFE usage in `append_ptr`
let mut offset = self.append_offset.lock().unwrap();
self.current_len.store(0, Ordering::Relaxed);
*offset = 0;
2019-01-31 18:37:05 -08:00
}
pub fn len(&self) -> usize {
self.current_len.load(Ordering::Relaxed)
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn capacity(&self) -> u64 {
self.file_size
}
// Get the file path relative to the top level accounts directory
pub fn get_relative_path<P: AsRef<Path>>(append_vec_path: P) -> Option<PathBuf> {
append_vec_path.as_ref().file_name().map(PathBuf::from)
}
pub fn new_relative_path(slot: Slot, id: usize) -> PathBuf {
PathBuf::from(&format!("{}.{}", slot, id))
}
#[allow(clippy::mutex_atomic)]
pub fn set_file<P: AsRef<Path>>(&mut self, path: P) -> io::Result<()> {
// this AppendVec must not hold actual file;
assert_eq!(self.file_size, 0);
let data = OpenOptions::new()
.read(true)
.write(true)
.create(false)
.open(&path)?;
let current_len = self.current_len.load(Ordering::Relaxed);
assert_eq!(current_len, *self.append_offset.lock().unwrap());
let file_size = std::fs::metadata(&path)?.len();
AppendVec::sanitize_len_and_size(current_len, file_size as usize)?;
let map = unsafe { MmapMut::map_mut(&data)? };
self.file_size = file_size;
self.path = path.as_ref().to_path_buf();
self.map = map;
if !self.sanitize_layout_and_length() {
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
"incorrect layout/length/data",
));
}
Ok(())
}
fn sanitize_layout_and_length(&self) -> bool {
let mut offset = 0;
// This discards allocated accounts immediately after check at each loop iteration.
//
// This code should not reuse AppendVec.accounts() method as the current form or
// extend it to be reused here because it would allow attackers to accumulate
// some measurable amount of memory needlessly.
while let Some((account, next_offset)) = self.get_account(offset) {
if !account.sanitize() {
return false;
}
offset = next_offset;
}
let aligned_current_len = u64_align!(self.current_len.load(Ordering::Relaxed));
offset == aligned_current_len
}
fn get_slice(&self, offset: usize, size: usize) -> Option<(&[u8], usize)> {
let (next, overflow) = offset.overflowing_add(size);
if overflow || next > self.len() {
return None;
2019-01-31 18:37:05 -08:00
}
let data = &self.map[offset..next];
let next = u64_align!(next);
Some((
//UNSAFE: This unsafe creates a slice that represents a chunk of self.map memory
//The lifetime of this slice is tied to &self, since it points to self.map memory
unsafe { std::slice::from_raw_parts(data.as_ptr() as *const u8, size) },
next,
))
2019-01-31 18:37:05 -08:00
}
fn append_ptr(&self, offset: &mut usize, src: *const u8, len: usize) {
let pos = u64_align!(*offset);
let data = &self.map[pos..(pos + len)];
//UNSAFE: This mut append is safe because only 1 thread can append at a time
//Mutex<append_offset> guarantees exclusive write access to the memory occupied in
//the range.
unsafe {
let dst = data.as_ptr() as *mut u8;
std::ptr::copy(src, dst, len);
};
*offset = pos + len;
}
2019-04-24 09:51:57 -07:00
fn append_ptrs_locked(&self, offset: &mut usize, vals: &[(*const u8, usize)]) -> Option<usize> {
let mut end = *offset;
for val in vals {
end = u64_align!(end);
end += val.1;
}
2019-01-31 18:37:05 -08:00
if (self.file_size as usize) < end {
2019-01-31 18:37:05 -08:00
return None;
}
let pos = u64_align!(*offset);
for val in vals {
2019-04-24 09:51:57 -07:00
self.append_ptr(offset, val.0, val.1)
}
self.current_len.store(*offset, Ordering::Relaxed);
Some(pos)
}
fn get_type<'a, T>(&self, offset: usize) -> Option<(&'a T, usize)> {
let (data, next) = self.get_slice(offset, mem::size_of::<T>())?;
let ptr: *const T = data.as_ptr() as *const T;
//UNSAFE: The cast is safe because the slice is aligned and fits into the memory
//and the lifetime of he &T is tied to self, which holds the underlying memory map
Some((unsafe { &*ptr }, next))
}
pub fn get_account<'a>(&'a self, offset: usize) -> Option<(StoredAccount<'a>, usize)> {
2019-09-23 15:20:45 -07:00
let (meta, next): (&'a StoredMeta, _) = self.get_type(offset)?;
let (account_meta, next): (&'a AccountMeta, _) = self.get_type(next)?;
2019-09-20 13:21:12 -07:00
let (hash, next): (&'a Hash, _) = self.get_type(next)?;
let (data, next) = self.get_slice(next, meta.data_len as usize)?;
Some((
StoredAccount {
meta,
2019-09-23 15:20:45 -07:00
account_meta,
data,
offset,
2019-09-20 13:21:12 -07:00
hash,
},
next,
))
}
2019-09-23 15:20:45 -07:00
pub fn get_account_test(&self, offset: usize) -> Option<(StoredMeta, Account)> {
let (stored_account, _) = self.get_account(offset)?;
let meta = stored_account.meta.clone();
Some((meta, stored_account.clone_account()))
2019-01-31 18:37:05 -08:00
}
pub fn get_path(&self) -> PathBuf {
self.path.clone()
}
pub fn accounts<'a>(&'a self, mut start: usize) -> Vec<StoredAccount<'a>> {
let mut accounts = vec![];
while let Some((account, next)) = self.get_account(start) {
accounts.push(account);
start = next;
}
accounts
2019-01-31 18:37:05 -08:00
}
2019-04-24 09:51:57 -07:00
#[allow(clippy::mutex_atomic)]
2019-09-20 13:21:12 -07:00
pub fn append_accounts(
&self,
2019-09-23 15:20:45 -07:00
accounts: &[(StoredMeta, &Account)],
2019-09-20 13:21:12 -07:00
hashes: &[Hash],
) -> Vec<usize> {
2019-04-24 09:51:57 -07:00
let mut offset = self.append_offset.lock().unwrap();
let mut rv = Vec::with_capacity(accounts.len());
2019-09-23 15:20:45 -07:00
for ((stored_meta, account), hash) in accounts.iter().zip(hashes) {
let meta_ptr = stored_meta as *const StoredMeta;
let account_meta = AccountMeta {
lamports: account.lamports,
2019-04-24 09:51:57 -07:00
owner: account.owner,
executable: account.executable,
rent_epoch: account.rent_epoch,
2019-04-24 09:51:57 -07:00
};
2019-09-23 15:20:45 -07:00
let account_meta_ptr = &account_meta as *const AccountMeta;
let data_len = stored_meta.data_len as usize;
2019-04-24 09:51:57 -07:00
let data_ptr = account.data.as_ptr();
2019-09-20 13:21:12 -07:00
let hash_ptr = hash.as_ref().as_ptr();
2019-04-24 09:51:57 -07:00
let ptrs = [
2019-09-23 15:20:45 -07:00
(meta_ptr as *const u8, mem::size_of::<StoredMeta>()),
(account_meta_ptr as *const u8, mem::size_of::<AccountMeta>()),
2019-09-20 13:21:12 -07:00
(hash_ptr as *const u8, mem::size_of::<Hash>()),
2019-04-24 09:51:57 -07:00
(data_ptr, data_len),
];
if let Some(res) = self.append_ptrs_locked(&mut offset, &ptrs) {
rv.push(res)
} else {
break;
}
}
rv
}
2019-09-20 13:21:12 -07:00
pub fn append_account(
&self,
2019-09-23 15:20:45 -07:00
storage_meta: StoredMeta,
2019-09-20 13:21:12 -07:00
account: &Account,
hash: Hash,
) -> Option<usize> {
self.append_accounts(&[(storage_meta, account)], &[hash])
2019-04-24 09:51:57 -07:00
.first()
.cloned()
}
}
pub mod test_utils {
2019-09-23 15:20:45 -07:00
use super::StoredMeta;
use rand::distributions::Alphanumeric;
use rand::{thread_rng, Rng};
use solana_sdk::account::Account;
use solana_sdk::pubkey::Pubkey;
use std::fs::create_dir_all;
use std::path::PathBuf;
pub struct TempFile {
pub path: PathBuf,
}
impl Drop for TempFile {
fn drop(&mut self) {
let mut path = PathBuf::new();
std::mem::swap(&mut path, &mut self.path);
let _ignored = std::fs::remove_file(path);
2019-01-31 18:37:05 -08:00
}
}
2019-01-31 18:37:05 -08:00
pub fn get_append_vec_dir() -> String {
std::env::var("FARF_DIR").unwrap_or_else(|_| "farf/append_vec_tests".to_string())
}
pub fn get_append_vec_path(path: &str) -> TempFile {
let out_dir = get_append_vec_dir();
let rand_string: String = thread_rng().sample_iter(&Alphanumeric).take(30).collect();
let dir = format!("{}/{}", out_dir, rand_string);
let mut buf = PathBuf::new();
buf.push(&format!("{}/{}", dir, path));
create_dir_all(dir).expect("Create directory failed");
TempFile { path: buf }
}
2019-01-31 18:37:05 -08:00
2019-09-23 15:20:45 -07:00
pub fn create_test_account(sample: usize) -> (StoredMeta, Account) {
let data_len = sample % 256;
let mut account = Account::new(sample as u64, 0, &Pubkey::default());
account.data = (0..data_len).map(|_| data_len as u8).collect();
2019-09-23 15:20:45 -07:00
let stored_meta = StoredMeta {
write_version: 0,
pubkey: Pubkey::default(),
data_len: data_len as u64,
};
2019-09-23 15:20:45 -07:00
(stored_meta, account)
2019-01-31 18:37:05 -08:00
}
}
#[allow(clippy::mutex_atomic)]
impl Serialize for AppendVec {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
use serde::ser::Error;
let len = std::mem::size_of::<usize>();
let mut buf = vec![0u8; len];
let mut wr = Cursor::new(&mut buf[..]);
serialize_into(&mut wr, &(self.current_len.load(Ordering::Relaxed) as u64))
.map_err(Error::custom)?;
let len = wr.position() as usize;
serializer.serialize_bytes(&wr.into_inner()[..len])
}
}
struct AppendVecVisitor;
impl<'a> serde::de::Visitor<'a> for AppendVecVisitor {
type Value = AppendVec;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("Expecting AppendVec")
}
fn visit_bytes<E>(self, data: &[u8]) -> std::result::Result<Self::Value, E>
where
E: serde::de::Error,
{
use serde::de::Error;
let mut rd = Cursor::new(&data[..]);
let current_len: usize = deserialize_from(&mut rd).map_err(Error::custom)?;
// Note this does not initialize a valid Mmap in the AppendVec, needs to be done
// externally
Ok(AppendVec::new_empty_map(current_len))
}
}
impl<'de> Deserialize<'de> for AppendVec {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: ::serde::Deserializer<'de>,
{
deserializer.deserialize_bytes(AppendVecVisitor)
}
}
2019-01-31 18:37:05 -08:00
#[cfg(test)]
pub mod tests {
use super::test_utils::*;
2019-01-31 18:37:05 -08:00
use super::*;
use assert_matches::assert_matches;
use log::*;
2019-01-31 18:37:05 -08:00
use rand::{thread_rng, Rng};
use solana_sdk::timing::duration_as_ms;
2019-01-31 18:37:05 -08:00
use std::time::Instant;
2019-09-23 15:20:45 -07:00
impl AppendVec {
fn append_account_test(&self, data: &(StoredMeta, Account)) -> Option<usize> {
self.append_account(data.0.clone(), &data.1, Hash::default())
}
}
impl<'a> StoredAccount<'a> {
fn set_data_len_unsafe(&self, new_data_len: u64) {
let data_len: &u64 = &self.meta.data_len;
#[allow(mutable_transmutes)]
// UNSAFE: cast away & (= const ref) to &mut to force to mutate append-only (=read-only) AppendVec
let data_len: &mut u64 = unsafe { &mut *(data_len as *const u64 as *mut u64) };
*data_len = new_data_len;
}
fn get_executable_byte(&self) -> u8 {
let executable_bool: bool = self.account_meta.executable;
// UNSAFE: Force to interpret mmap-backed bool as u8 to really read the actual memory content
let executable_byte: u8 = unsafe { std::mem::transmute::<bool, u8>(executable_bool) };
executable_byte
}
fn set_executable_as_byte(&self, new_executable_byte: u8) {
let executable_ref: &bool = &self.account_meta.executable;
#[allow(mutable_transmutes)]
// UNSAFE: Force to interpret mmap-backed &bool as &u8 to write some crafted value;
let executable_byte: &mut u8 =
unsafe { &mut *(executable_ref as *const bool as *mut u8) };
*executable_byte = new_executable_byte;
}
}
#[test]
#[should_panic(expected = "too small file size 0 for AppendVec")]
fn test_append_vec_new_bad_size() {
let path = get_append_vec_path("test_append_vec_new_bad_size");
let _av = AppendVec::new(&path.path, true, 0);
}
#[test]
fn test_append_vec_set_file_bad_size() {
let file = get_append_vec_path("test_append_vec_set_file_bad_size");
let path = &file.path;
let mut av = AppendVec::new_empty_map(0);
2020-01-06 16:21:59 -08:00
assert_eq!(av.accounts(0).len(), 0);
let _data = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)
.expect("create a test file for mmap");
let result = av.set_file(path);
assert_matches!(result, Err(ref message) if message.to_string() == *"too small file size 0 for AppendVec");
}
#[test]
fn test_append_vec_sanitize_len_and_size_too_small() {
let result = AppendVec::sanitize_len_and_size(0, 0);
assert_matches!(result, Err(ref message) if message.to_string() == *"too small file size 0 for AppendVec");
}
#[test]
fn test_append_vec_sanitize_len_and_size_maximum() {
let result = AppendVec::sanitize_len_and_size(0, 16 * 1024 * 1024 * 1024);
assert_matches!(result, Ok(_));
}
#[test]
fn test_append_vec_sanitize_len_and_size_too_large() {
let result = AppendVec::sanitize_len_and_size(0, 16 * 1024 * 1024 * 1024 + 1);
assert_matches!(result, Err(ref message) if message.to_string() == *"too large file size 17179869185 for AppendVec");
}
#[test]
fn test_append_vec_sanitize_len_and_size_full_and_same_as_current_len() {
let result = AppendVec::sanitize_len_and_size(1 * 1024 * 1024, 1 * 1024 * 1024);
assert_matches!(result, Ok(_));
}
#[test]
fn test_append_vec_sanitize_len_and_size_larger_current_len() {
let result = AppendVec::sanitize_len_and_size(1 * 1024 * 1024 + 1, 1 * 1024 * 1024);
assert_matches!(result, Err(ref message) if message.to_string() == *"current_len is larger than file size (1048576)");
}
2019-01-31 18:37:05 -08:00
#[test]
fn test_append_vec_one() {
let path = get_append_vec_path("test_append");
let av = AppendVec::new(&path.path, true, 1024 * 1024);
let account = create_test_account(0);
let index = av.append_account_test(&account).unwrap();
assert_eq!(av.get_account_test(index).unwrap(), account);
2019-01-31 18:37:05 -08:00
}
#[test]
fn test_append_vec_data() {
let path = get_append_vec_path("test_append_data");
let av = AppendVec::new(&path.path, true, 1024 * 1024);
let account = create_test_account(5);
let index = av.append_account_test(&account).unwrap();
assert_eq!(av.get_account_test(index).unwrap(), account);
let account1 = create_test_account(6);
let index1 = av.append_account_test(&account1).unwrap();
assert_eq!(av.get_account_test(index).unwrap(), account);
assert_eq!(av.get_account_test(index1).unwrap(), account1);
2019-01-31 18:37:05 -08:00
}
#[test]
fn test_append_vec_append_many() {
let path = get_append_vec_path("test_append_many");
let av = AppendVec::new(&path.path, true, 1024 * 1024);
let size = 1000;
let mut indexes = vec![];
let now = Instant::now();
for sample in 0..size {
let account = create_test_account(sample);
let pos = av.append_account_test(&account).unwrap();
assert_eq!(av.get_account_test(pos).unwrap(), account);
indexes.push(pos)
}
trace!("append time: {} ms", duration_as_ms(&now.elapsed()),);
2019-01-31 18:37:05 -08:00
let now = Instant::now();
for _ in 0..size {
let sample = thread_rng().gen_range(0, indexes.len());
let account = create_test_account(sample);
assert_eq!(av.get_account_test(indexes[sample]).unwrap(), account);
2019-01-31 18:37:05 -08:00
}
trace!("random read time: {} ms", duration_as_ms(&now.elapsed()),);
2019-01-31 18:37:05 -08:00
let now = Instant::now();
assert_eq!(indexes.len(), size);
assert_eq!(indexes[0], 0);
let mut accounts = av.accounts(indexes[0]);
assert_eq!(accounts.len(), size);
for (sample, v) in accounts.iter_mut().enumerate() {
let account = create_test_account(sample);
let recovered = v.clone_account();
assert_eq!(recovered, account.1)
2019-01-31 18:37:05 -08:00
}
trace!(
"sequential read time: {} ms",
2019-01-31 18:37:05 -08:00
duration_as_ms(&now.elapsed()),
);
}
#[test]
fn test_relative_path() {
let relative_path = AppendVec::new_relative_path(0, 2);
let full_path = Path::new("/tmp").join(&relative_path);
assert_eq!(
relative_path,
AppendVec::get_relative_path(full_path).unwrap()
);
}
#[test]
fn test_set_file_crafted_zero_lamport_account() {
let file = get_append_vec_path("test_append");
let path = &file.path;
let mut av = AppendVec::new(&path, true, 1024 * 1024);
let pubkey = Pubkey::new_rand();
let owner = Pubkey::default();
let data_len = 3 as u64;
let mut account = Account::new(0, data_len as usize, &owner);
account.data = b"abc".to_vec();
let stored_meta = StoredMeta {
write_version: 0,
pubkey,
data_len,
};
let account_with_meta = (stored_meta, account);
let index = av.append_account_test(&account_with_meta).unwrap();
assert_eq!(av.get_account_test(index).unwrap(), account_with_meta);
av.flush().unwrap();
av.file_size = 0;
let result = av.set_file(path);
assert_matches!(result, Err(ref message) if message.to_string() == *"incorrect layout/length/data");
}
#[test]
fn test_set_file_crafted_data_len() {
let file = get_append_vec_path("test_set_file_crafted_data_len");
let path = &file.path;
let mut av = AppendVec::new(&path, true, 1024 * 1024);
let crafted_data_len = 1;
av.append_account_test(&create_test_account(10)).unwrap();
let accounts = av.accounts(0);
let account = accounts.first().unwrap();
account.set_data_len_unsafe(crafted_data_len);
assert_eq!(account.meta.data_len, crafted_data_len);
// Reload accoutns and observe crafted_data_len
let accounts = av.accounts(0);
let account = accounts.first().unwrap();
assert_eq!(account.meta.data_len, crafted_data_len);
av.flush().unwrap();
2020-01-06 16:21:59 -08:00
av.file_size = 0;
let result = av.set_file(path);
assert_matches!(result, Err(ref message) if message.to_string() == *"incorrect layout/length/data");
}
#[test]
fn test_set_file_too_large_data_len() {
let file = get_append_vec_path("test_set_file_too_large_data_len");
let path = &file.path;
let mut av = AppendVec::new(&path, true, 1024 * 1024);
let too_large_data_len = u64::max_value();
av.append_account_test(&create_test_account(10)).unwrap();
let accounts = av.accounts(0);
let account = accounts.first().unwrap();
account.set_data_len_unsafe(too_large_data_len);
assert_eq!(account.meta.data_len, too_large_data_len);
// Reload accounts and observe no account with bad offset
let accounts = av.accounts(0);
assert_matches!(accounts.first(), None);
av.flush().unwrap();
2020-01-06 16:21:59 -08:00
av.file_size = 0;
let result = av.set_file(path);
assert_matches!(result, Err(ref message) if message.to_string() == *"incorrect layout/length/data");
}
#[test]
fn test_set_file_crafted_executable() {
let file = get_append_vec_path("test_set_file_crafted_executable");
let path = &file.path;
let mut av = AppendVec::new(&path, true, 1024 * 1024);
av.append_account_test(&create_test_account(10)).unwrap();
{
let mut executable_account = create_test_account(10);
executable_account.1.executable = true;
av.append_account_test(&executable_account).unwrap();
}
// reload accounts
let accounts = av.accounts(0);
// ensure false is 0u8 and true is 1u8 actually
assert_eq!(*accounts[0].ref_executable_byte(), 0);
assert_eq!(*accounts[1].ref_executable_byte(), 1);
let account = &accounts[0];
let crafted_executable = u8::max_value() - 1;
account.set_executable_as_byte(crafted_executable);
// reload crafted accounts
let accounts = av.accounts(0);
let account = accounts.first().unwrap();
// we can observe crafted value by ref
{
let executable_bool: &bool = &account.account_meta.executable;
// Depending on use, *executable_bool can be truthy or falsy due to direct memory manipulation
// assert_eq! thinks *exeutable_bool is equal to false but the if condition thinks it's not, contradictly.
assert_eq!(*executable_bool, false);
if *executable_bool == false {
panic!("This didn't occur if this test passed.");
}
assert_eq!(*account.ref_executable_byte(), crafted_executable);
}
// we can NOT observe crafted value by value
{
let executable_bool: bool = account.account_meta.executable;
assert_eq!(executable_bool, false);
assert_eq!(account.get_executable_byte(), 0); // Wow, not crafted_executable!
}
av.flush().unwrap();
2020-01-06 16:21:59 -08:00
av.file_size = 0;
let result = av.set_file(path);
assert_matches!(result, Err(ref message) if message.to_string() == *"incorrect layout/length/data");
}
2019-01-31 18:37:05 -08:00
}